struct iomap iomap = { 0 };
unsigned flags = 0;
int error, major = 0;
+ int locked_status = 0;
void *entry;
/*
goto unlock_entry;
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
error = -EIO; /* fs corruption? */
- goto unlock_entry;
+ goto finish_iomap;
}
sector = dax_iomap_sector(&iomap, pos);
}
if (error)
- goto unlock_entry;
+ goto finish_iomap;
if (!radix_tree_exceptional_entry(entry)) {
vmf->page = entry;
- return VM_FAULT_LOCKED;
+ locked_status = VM_FAULT_LOCKED;
+ } else {
+ vmf->entry = entry;
+ locked_status = VM_FAULT_DAX_LOCKED;
}
- vmf->entry = entry;
- return VM_FAULT_DAX_LOCKED;
+ goto finish_iomap;
}
switch (iomap.type) {
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
- if (!(vmf->flags & FAULT_FLAG_WRITE))
- return dax_load_hole(mapping, entry, vmf);
+ if (!(vmf->flags & FAULT_FLAG_WRITE)) {
+ locked_status = dax_load_hole(mapping, entry, vmf);
+ break;
+ }
/*FALLTHRU*/
default:
WARN_ON_ONCE(1);
break;
}
+ finish_iomap:
+ if (ops->iomap_end) {
+ if (error) {
+ /* keep previous error */
+ ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
+ &iomap);
+ } else {
+ error = ops->iomap_end(inode, pos, PAGE_SIZE,
+ PAGE_SIZE, flags, &iomap);
+ }
+ }
unlock_entry:
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ if (!locked_status || error)
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major;
+ if (locked_status) {
+ WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
+ return locked_status;
+ }
return VM_FAULT_NOPAGE | major;
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);