struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
ssize_t retval;
- size_t write_len = 0;
+ size_t write_len;
+ pgoff_t end = 0; /* silence gcc */
/*
* If it's a write, unmap all mmappings of the file up-front. This
*/
if (rw == WRITE) {
write_len = iov_length(iov, nr_segs);
+ end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
if (mapping_mapped(mapping))
unmap_mapping_range(mapping, offset, write_len, 0);
}
retval = filemap_write_and_wait(mapping);
- if (retval == 0) {
- retval = mapping->a_ops->direct_IO(rw, iocb, iov,
- offset, nr_segs);
- if (rw == WRITE && mapping->nrpages) {
- pgoff_t end = (offset + write_len - 1)
- >> PAGE_CACHE_SHIFT;
- int err = invalidate_inode_pages2_range(mapping,
+ if (retval)
+ goto out;
+
+ /*
+ * After a write we want buffered reads to be sure to go to disk to get
+ * the new data. We invalidate clean cached page from the region we're
+ * about to write. We do this *before* the write so that we can return
+ * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
+ */
+ if (rw == WRITE && mapping->nrpages) {
+ retval = invalidate_inode_pages2_range(mapping,
offset >> PAGE_CACHE_SHIFT, end);
- if (err)
- retval = err;
- }
+ if (retval)
+ goto out;
}
+
+ retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
+ if (retval)
+ goto out;
+
+ /*
+ * Finally, try again to invalidate clean pages which might have been
+ * faulted in by get_user_pages() if the source of the write was an
+ * mmap()ed region of the file we're writing. That's a pretty crazy
+ * thing to do, so we don't support it 100%. If this invalidation
+ * fails and we have -EIOCBQUEUED we ignore the failure.
+ */
+ if (rw == WRITE && mapping->nrpages) {
+ int err = invalidate_inode_pages2_range(mapping,
+ offset >> PAGE_CACHE_SHIFT, end);
+ if (err && retval >= 0)
+ retval = err;
+ }
+out:
return retval;
}