6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
32 static const struct nfs_rw_ops nfs_rw_read_ops;
34 static struct kmem_cache *nfs_rdata_cachep;
36 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
38 return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
43 kmem_cache_free(nfs_rdata_cachep, rhdr);
47 int nfs_return_empty_page(struct page *page)
49 zero_user(page, 0, PAGE_SIZE);
50 SetPageUptodate(page);
55 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
56 struct inode *inode, bool force_mds,
57 const struct nfs_pgio_completion_ops *compl_ops)
59 struct nfs_server *server = NFS_SERVER(inode);
60 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
62 #ifdef CONFIG_NFS_V4_1
63 if (server->pnfs_curr_ld && !force_mds)
64 pg_ops = server->pnfs_curr_ld->pg_read_ops;
66 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
69 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
71 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
73 struct nfs_pgio_mirror *mirror;
75 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
76 pgio->pg_ops->pg_cleanup(pgio);
78 pgio->pg_ops = &nfs_pgio_rw_ops;
80 /* read path should never have more than one mirror */
81 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
83 mirror = &pgio->pg_mirrors[0];
84 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
86 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
88 static void nfs_readpage_release(struct nfs_page *req)
90 struct inode *inode = d_inode(req->wb_context->dentry);
92 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
93 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
94 (long long)req_offset(req));
96 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
97 if (PageUptodate(req->wb_page))
98 nfs_readpage_to_fscache(inode, req->wb_page, 0);
100 unlock_page(req->wb_page);
102 nfs_release_request(req);
105 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
108 struct nfs_page *new;
110 struct nfs_pageio_descriptor pgio;
111 struct nfs_pgio_mirror *pgm;
113 len = nfs_page_length(page);
115 return nfs_return_empty_page(page);
116 new = nfs_create_request(ctx, page, NULL, 0, len);
122 zero_user_segment(page, len, PAGE_SIZE);
124 nfs_pageio_init_read(&pgio, inode, false,
125 &nfs_async_read_completion_ops);
126 if (!nfs_pageio_add_request(&pgio, new)) {
127 nfs_list_remove_request(new);
128 nfs_readpage_release(new);
130 nfs_pageio_complete(&pgio);
132 /* It doesn't make sense to do mirrored reads! */
133 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
135 pgm = &pgio.pg_mirrors[0];
136 NFS_I(inode)->read_io += pgm->pg_bytes_written;
138 return pgio.pg_error < 0 ? pgio.pg_error : 0;
141 static void nfs_page_group_set_uptodate(struct nfs_page *req)
143 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
144 SetPageUptodate(req->wb_page);
147 static void nfs_read_completion(struct nfs_pgio_header *hdr)
149 unsigned long bytes = 0;
151 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
153 while (!list_empty(&hdr->pages)) {
154 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
155 struct page *page = req->wb_page;
156 unsigned long start = req->wb_pgbase;
157 unsigned long end = req->wb_pgbase + req->wb_bytes;
159 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
160 /* note: regions of the page not covered by a
161 * request are zeroed in nfs_readpage_async /
162 * readpage_async_filler */
163 if (bytes > hdr->good_bytes) {
164 /* nothing in this request was good, so zero
165 * the full extent of the request */
166 zero_user_segment(page, start, end);
168 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
169 /* part of this request has good bytes, but
170 * not all. zero the bad bytes */
171 start += hdr->good_bytes - bytes;
172 WARN_ON(start < req->wb_pgbase);
173 zero_user_segment(page, start, end);
176 bytes += req->wb_bytes;
177 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
178 if (bytes <= hdr->good_bytes)
179 nfs_page_group_set_uptodate(req);
181 nfs_page_group_set_uptodate(req);
182 nfs_list_remove_request(req);
183 nfs_readpage_release(req);
189 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
190 struct rpc_message *msg,
191 const struct nfs_rpc_ops *rpc_ops,
192 struct rpc_task_setup *task_setup_data, int how)
194 struct inode *inode = hdr->inode;
195 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
197 task_setup_data->flags |= swap_flags;
198 rpc_ops->read_setup(hdr, msg);
202 nfs_async_read_error(struct list_head *head)
204 struct nfs_page *req;
206 while (!list_empty(head)) {
207 req = nfs_list_entry(head->next);
208 nfs_list_remove_request(req);
209 nfs_readpage_release(req);
213 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
214 .error_cleanup = nfs_async_read_error,
215 .completion = nfs_read_completion,
219 * This is the callback from RPC telling us whether a reply was
220 * received or some error occurred (timeout or socket shutdown).
222 static int nfs_readpage_done(struct rpc_task *task,
223 struct nfs_pgio_header *hdr,
226 int status = NFS_PROTO(inode)->read_done(task, hdr);
230 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
232 if (task->tk_status == -ESTALE) {
233 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
234 nfs_mark_for_revalidate(inode);
239 static void nfs_readpage_retry(struct rpc_task *task,
240 struct nfs_pgio_header *hdr)
242 struct nfs_pgio_args *argp = &hdr->args;
243 struct nfs_pgio_res *resp = &hdr->res;
245 /* This is a short read! */
246 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
247 /* Has the server at least made some progress? */
248 if (resp->count == 0) {
249 nfs_set_pgio_error(hdr, -EIO, argp->offset);
253 /* For non rpc-based layout drivers, retry-through-MDS */
255 hdr->pnfs_error = -EAGAIN;
259 /* Yes, so retry the read at the end of the hdr */
260 hdr->mds_offset += resp->count;
261 argp->offset += resp->count;
262 argp->pgbase += resp->count;
263 argp->count -= resp->count;
264 rpc_restart_call_prepare(task);
267 static void nfs_readpage_result(struct rpc_task *task,
268 struct nfs_pgio_header *hdr)
273 bound = hdr->args.offset + hdr->res.count;
274 spin_lock(&hdr->lock);
275 if (bound < hdr->io_start + hdr->good_bytes) {
276 set_bit(NFS_IOHDR_EOF, &hdr->flags);
277 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
278 hdr->good_bytes = bound - hdr->io_start;
280 spin_unlock(&hdr->lock);
281 } else if (hdr->res.count < hdr->args.count)
282 nfs_readpage_retry(task, hdr);
286 * Read a page over NFS.
287 * We read the page synchronously in the following case:
288 * - The error flag is set for this page. This happens only when a
289 * previous async read operation failed.
291 int nfs_readpage(struct file *file, struct page *page)
293 struct nfs_open_context *ctx;
294 struct inode *inode = page_file_mapping(page)->host;
297 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
298 page, PAGE_SIZE, page_file_index(page));
299 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
300 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
303 * Try to flush any pending writes to the file..
305 * NOTE! Because we own the page lock, there cannot
306 * be any new pending writes generated at this point
307 * for this page (other pages can be written to).
309 error = nfs_wb_page(inode, page);
312 if (PageUptodate(page))
316 if (NFS_STALE(inode))
321 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
325 ctx = get_nfs_open_context(nfs_file_open_context(file));
327 if (!IS_SYNC(inode)) {
328 error = nfs_readpage_from_fscache(ctx, inode, page);
333 error = nfs_readpage_async(ctx, inode, page);
336 put_nfs_open_context(ctx);
343 struct nfs_readdesc {
344 struct nfs_pageio_descriptor *pgio;
345 struct nfs_open_context *ctx;
349 readpage_async_filler(void *data, struct page *page)
351 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
352 struct nfs_page *new;
356 len = nfs_page_length(page);
358 return nfs_return_empty_page(page);
360 new = nfs_create_request(desc->ctx, page, NULL, 0, len);
365 zero_user_segment(page, len, PAGE_SIZE);
366 if (!nfs_pageio_add_request(desc->pgio, new)) {
367 nfs_list_remove_request(new);
368 nfs_readpage_release(new);
369 error = desc->pgio->pg_error;
374 error = PTR_ERR(new);
380 int nfs_readpages(struct file *filp, struct address_space *mapping,
381 struct list_head *pages, unsigned nr_pages)
383 struct nfs_pageio_descriptor pgio;
384 struct nfs_pgio_mirror *pgm;
385 struct nfs_readdesc desc = {
388 struct inode *inode = mapping->host;
389 unsigned long npages;
392 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
394 (unsigned long long)NFS_FILEID(inode),
396 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
398 if (NFS_STALE(inode))
402 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
403 if (desc.ctx == NULL)
406 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
408 /* attempt to read as many of the pages as possible from the cache
409 * - this returns -ENOBUFS immediately if the cookie is negative
411 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
414 goto read_complete; /* all pages were read */
416 nfs_pageio_init_read(&pgio, inode, false,
417 &nfs_async_read_completion_ops);
419 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
420 nfs_pageio_complete(&pgio);
422 /* It doesn't make sense to do mirrored reads! */
423 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
425 pgm = &pgio.pg_mirrors[0];
426 NFS_I(inode)->read_io += pgm->pg_bytes_written;
427 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
429 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
431 put_nfs_open_context(desc.ctx);
436 int __init nfs_init_readpagecache(void)
438 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
439 sizeof(struct nfs_pgio_header),
440 0, SLAB_HWCACHE_ALIGN,
442 if (nfs_rdata_cachep == NULL)
448 void nfs_destroy_readpagecache(void)
450 kmem_cache_destroy(nfs_rdata_cachep);
453 static const struct nfs_rw_ops nfs_rw_read_ops = {
454 .rw_mode = FMODE_READ,
455 .rw_alloc_header = nfs_readhdr_alloc,
456 .rw_free_header = nfs_readhdr_free,
457 .rw_done = nfs_readpage_done,
458 .rw_result = nfs_readpage_result,
459 .rw_initiate = nfs_initiate_read,