if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
struct vm_fault vmf2 = {
.page = NULL,
- .pgoff = linear_page_index(vma, vmf->address),
+ .pgoff = vmf->pgoff,
.address = vmf->address,
.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
};
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
-static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff,
- struct page *cow_page, struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
+ struct page **page, void **entry)
{
struct vm_area_struct *vma = vmf->vma;
struct vm_fault vmf2;
int ret;
vmf2.address = vmf->address;
- vmf2.pgoff = pgoff;
+ vmf2.pgoff = vmf->pgoff;
vmf2.flags = vmf->flags;
vmf2.page = NULL;
vmf2.gfp_mask = __get_fault_gfp_mask(vma);
* fault_around_pages() value (and therefore to page order). This way it's
* easier to guarantee that we don't cross page table boundaries.
*/
-static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff)
+static int do_fault_around(struct vm_fault *vmf)
{
unsigned long address = vmf->address, nr_pages, mask;
+ pgoff_t start_pgoff = vmf->pgoff;
pgoff_t end_pgoff;
int off, ret = 0;
return ret;
}
-static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
+static int do_read_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
* something).
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
- ret = do_fault_around(vmf, pgoff);
+ ret = do_fault_around(vmf);
if (ret)
return ret;
}
- ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL);
+ ret = __do_fault(vmf, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
return ret;
}
-static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
+static int do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page, *new_page;
return VM_FAULT_OOM;
}
- ret = __do_fault(vmf, pgoff, new_page, &fault_page, &fault_entry);
+ ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
unlock_page(fault_page);
put_page(fault_page);
} else {
- dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
+ dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
}
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
return ret;
}
-static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
+static int do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
int dirtied = 0;
int ret, tmp;
- ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL);
+ ret = __do_fault(vmf, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
static int do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pgoff_t pgoff = linear_page_index(vma, vmf->address);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
return VM_FAULT_SIGBUS;
if (!(vmf->flags & FAULT_FLAG_WRITE))
- return do_read_fault(vmf, pgoff);
+ return do_read_fault(vmf);
if (!(vma->vm_flags & VM_SHARED))
- return do_cow_fault(vmf, pgoff);
- return do_shared_fault(vmf, pgoff);
+ return do_cow_fault(vmf);
+ return do_shared_fault(vmf);
}
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
.vma = vma,
.address = address & PAGE_MASK,
.flags = flags,
+ .pgoff = linear_page_index(vma, address),
};
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;