Previous patch that allowed us to cleanup most of the issues with pages marked
as private_2 when calling ceph_readpages. However, there seams to be a case in
the error case clean up in start read that still trigers this from time to
time. I've only seen this one a couple times.
BUG: Bad page state in process petabucket pfn:335b82
page:
ffffea000cd6e080 count:0 mapcount:0 mapping: (null) index:0x0
page flags: 0x200000000001000(private_2)
Call Trace:
[<
ffffffff81563442>] dump_stack+0x46/0x58
[<
ffffffff8112c7f7>] bad_page+0xc7/0x120
[<
ffffffff8112cd9e>] free_pages_prepare+0x10e/0x120
[<
ffffffff8112e580>] free_hot_cold_page+0x40/0x160
[<
ffffffff81132427>] __put_single_page+0x27/0x30
[<
ffffffff81132d95>] put_page+0x25/0x40
[<
ffffffffa02cb409>] ceph_readpages+0x2e9/0x6f0 [ceph]
[<
ffffffff811313cf>] __do_page_cache_readahead+0x1af/0x260
Signed-off-by: Milosz Tanski <milosz@adfin.com>
Signed-off-by: Sage Weil <sage@inktank.com>
page->index);
if (add_to_page_cache_lru(page, &inode->i_data, page->index,
GFP_NOFS)) {
+ ceph_fscache_uncache_page(inode, page);
page_cache_release(page);
dout("start_read %p add_to_page_cache failed %p\n",
inode, page);
fscache_invalidate(ceph_inode(inode)->fscache);
}
+static inline void ceph_fscache_uncache_page(struct inode *inode,
+ struct page *page)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ return fscache_uncache_page(ci->fscache, page);
+}
+
static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
{
struct inode* inode = page->mapping->host;
{
}
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+static inline void ceph_fscache_uncache_page(struct inode *inode,
+ struct page *pages)
{
}
{
}
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+{
+}
+
static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
{
return 1;