return 1;
}
- buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
- buf->page = NULL;
- buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
* If steal succeeds, buf->page is now pruned from the vm
- * side (LRU and page cache) and we can reuse it. The page
- * will also be looked on successful return.
+ * side (page cache) and we can reuse it. The page will also
+ * be locked on successful return.
*/
if (buf->ops->steal(info, buf))
goto find_page;
page = buf->page;
+ page_cache_get(page);
+
+ /*
+ * page must be on the LRU for adding to the pagecache.
+ * Check this without grabbing the zone lock, if it isn't
+ * the do grab the zone lock, recheck, and add if necessary.
+ */
+ if (!PageLRU(page)) {
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ if (!PageLRU(page)) {
+ SetPageLRU(page);
+ add_page_to_inactive_list(zone, page);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
+ page_cache_release(page);
unlock_page(page);
goto find_page;
}
-
- page_cache_get(page);
-
- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
- lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);
#define PIPE_BUFFERS (16)
-#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
-#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
-#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
+#define PIPE_BUF_FLAG_ATOMIC 0x01 /* was atomically mapped */
+#define PIPE_BUF_FLAG_GIFT 0x02 /* page is a gift */
struct pipe_buffer {
struct page *page;