drm/i915: Simplify guard logic for setup_scratch_page()
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 29 Jan 2018 10:28:40 +0000 (10:28 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 29 Jan 2018 15:37:53 +0000 (15:37 +0000)
Older gcc is complaining it can't follow the guards and thinks that
addr may be used uninitialised

In the process, we can simplify down to one loop,
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-131 (-131)
Function                                     old     new   delta
setup_scratch_page                           545     414    -131

Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180129102840.19901-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_gtt.c

index be227512430aba9dbb57f05928b61da62206ef44..b65426c0457d4dbd929f7702d10324ebb2a952d5 100644 (file)
@@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm,
 static int
 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 {
-       struct page *page = NULL;
-       dma_addr_t addr;
-       int order;
+       unsigned long size;
 
        /*
         * In order to utilize 64K pages for an object with a size < 2M, we will
@@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
         * TODO: we should really consider write-protecting the scratch-page and
         * sharing between ppgtt
         */
+       size = I915_GTT_PAGE_SIZE_4K;
        if (i915_vm_is_48bit(vm) &&
            HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
-               order = get_order(I915_GTT_PAGE_SIZE_64K);
-               page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
-               if (page) {
-                       addr = dma_map_page(vm->dma, page, 0,
-                                           I915_GTT_PAGE_SIZE_64K,
-                                           PCI_DMA_BIDIRECTIONAL);
-                       if (unlikely(dma_mapping_error(vm->dma, addr))) {
-                               __free_pages(page, order);
-                               page = NULL;
-                       }
-
-                       if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
-                               dma_unmap_page(vm->dma, addr,
-                                              I915_GTT_PAGE_SIZE_64K,
-                                              PCI_DMA_BIDIRECTIONAL);
-                               __free_pages(page, order);
-                               page = NULL;
-                       }
-               }
+               size = I915_GTT_PAGE_SIZE_64K;
+               gfp |= __GFP_NOWARN;
        }
+       gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
 
-       if (!page) {
-               order = 0;
-               page = alloc_page(gfp | __GFP_ZERO);
+       do {
+               int order = get_order(size);
+               struct page *page;
+               dma_addr_t addr;
+
+               page = alloc_pages(gfp, order);
                if (unlikely(!page))
-                       return -ENOMEM;
+                       goto skip;
 
-               addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+               addr = dma_map_page(vm->dma, page, 0, size,
                                    PCI_DMA_BIDIRECTIONAL);
-               if (unlikely(dma_mapping_error(vm->dma, addr))) {
-                       __free_page(page);
-                       return -ENOMEM;
-               }
-       }
+               if (unlikely(dma_mapping_error(vm->dma, addr)))
+                       goto free_page;
 
-       vm->scratch_page.page = page;
-       vm->scratch_page.daddr = addr;
-       vm->scratch_page.order = order;
+               if (unlikely(!IS_ALIGNED(addr, size)))
+                       goto unmap_page;
 
-       return 0;
+               vm->scratch_page.page = page;
+               vm->scratch_page.daddr = addr;
+               vm->scratch_page.order = order;
+               return 0;
+
+unmap_page:
+               dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+               __free_pages(page, order);
+skip:
+               if (size == I915_GTT_PAGE_SIZE_4K)
+                       return -ENOMEM;
+
+               size = I915_GTT_PAGE_SIZE_4K;
+               gfp &= ~__GFP_NOWARN;
+       } while (1);
 }
 
 static void cleanup_scratch_page(struct i915_address_space *vm)