mm/hmm: convert to use vm_fault_t
authorSouptick Joarder <jrdr.linux@gmail.com>
Tue, 12 Mar 2019 06:28:10 +0000 (23:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2019 17:04:00 +0000 (10:04 -0700)
Convert to use vm_fault_t type as return type for fault handler.

kbuild reported warning during testing of
*mm-create-the-new-vm_fault_t-type.patch* available in below link -
https://patchwork.kernel.org/patch/10752741/

  kernel/memremap.c:46:34: warning: incorrect type in return expression
                           (different base types)
  kernel/memremap.c:46:34: expected restricted vm_fault_t
  kernel/memremap.c:46:34: got int

This patch has fixed the warnings and also hmm_devmem_fault() is
converted to return vm_fault_t to avoid further warnings.

[sfr@canb.auug.org.au: drm/nouveau/dmem: update for struct hmm_devmem_ops member change]
Link: http://lkml.kernel.org/r/20190220174407.753d94e5@canb.auug.org.au
Link: http://lkml.kernel.org/r/20190110145900.GA1317@jordon-HP-15-Notebook-PC
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/gpu/drm/nouveau/nouveau_dmem.c
include/linux/hmm.h
mm/hmm.c

index 8be7a83ced9b5351e194c0bf0b98abc39df50eb3..aa9fec80492d167f720a07ee58f8e0196d858c3a 100644 (file)
@@ -261,7 +261,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
        .finalize_and_map       = nouveau_dmem_fault_finalize_and_map,
 };
 
-static int
+static vm_fault_t
 nouveau_dmem_fault(struct hmm_devmem *devmem,
                   struct vm_area_struct *vma,
                   unsigned long addr,
index 66f9ebbb1df3ffb91120bcaae4987ced0781e98e..ad50b7b4f141ce7eeb22c46c3d2ef61c672919a8 100644 (file)
@@ -468,7 +468,7 @@ struct hmm_devmem_ops {
         * Note that mmap semaphore is held in read mode at least when this
         * callback occurs, hence the vma is valid upon callback entry.
         */
-       int (*fault)(struct hmm_devmem *devmem,
+       vm_fault_t (*fault)(struct hmm_devmem *devmem,
                     struct vm_area_struct *vma,
                     unsigned long addr,
                     const struct page *page,
@@ -511,7 +511,7 @@ struct hmm_devmem_ops {
  * chunk, as an optimization. It must, however, prioritize the faulting address
  * over all the others.
  */
-typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
+typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
                                unsigned long addr,
                                const struct page *page,
                                unsigned int flags,
index a04e4b81061012b003c260d8e3cabc388947090c..fe1cd87e49acc94641eaf7178dc07e5c4306e408 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -990,7 +990,7 @@ static void hmm_devmem_ref_kill(struct percpu_ref *ref)
        percpu_ref_kill(ref);
 }
 
-static int hmm_devmem_fault(struct vm_area_struct *vma,
+static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
                            unsigned long addr,
                            const struct page *page,
                            unsigned int flags,