drm/i915: Use a cached mapping for the physical HWS
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 3 Sep 2018 15:23:04 +0000 (16:23 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 3 Sep 2018 16:55:59 +0000 (17:55 +0100)
Older gen use a physical address for the hardware status page, for which
we use cache-coherent writes. As the writes are into the cpu cache, we use
a normal WB mapped page to read the HWS, used for our seqno tracking.

Anecdotally, I observed lost breadcrumbs writes into the HWS on i965gm,
which so far have not reoccurred with this patch. How reliable that
evidence is remains to be seen.

v2: Explicitly pass the expected physical address to the hw
v3: Also remember the wild writes we once had for HWS above 4G.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Daniel Vetter <daniel@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180903152304.31589-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index 9771f39d99b3e6a9c37bea0570314faeb5c79089..5a4da5b723fdc5c3f937d4c50939d2aa8ccae577 100644 (file)
@@ -1666,7 +1666,6 @@ struct drm_i915_private {
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
 
-       struct drm_dma_handle *status_page_dmah;
        struct resource mch_res;
 
        /* protects the irq masks */
index 292eae19fce22b065ad9099c5023b1c2e646894a..10cd051ba29eebfc5d14c1ca7779784e4705aa73 100644 (file)
@@ -532,11 +532,11 @@ void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_dma_handle *dmah;
+       if (HWS_NEEDS_PHYSICAL(engine->i915)) {
+               void *addr = fetch_and_zero(&engine->status_page.page_addr);
 
-       dmah = fetch_and_zero(&engine->i915->status_page_dmah);
-       if (dmah)
-               drm_pci_free(&engine->i915->drm, dmah);
+               __free_page(virt_to_page(addr));
+       }
 
        i915_vma_unpin_and_release(&engine->status_page.vma,
                                   I915_VMA_RELEASE_MAP);
@@ -605,17 +605,18 @@ err:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
-       GEM_BUG_ON(engine->id != RCS);
+       struct page *page;
 
-       dev_priv->status_page_dmah =
-               drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
-       if (!dev_priv->status_page_dmah)
+       /*
+        * Though the HWS register does support 36bit addresses, historically
+        * we have had hangs and corruption reported due to wild writes if
+        * the HWS is placed above 4G.
+        */
+       page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
+       if (!page)
                return -ENOMEM;
 
-       engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.page_addr = page_address(page);
 
        return 0;
 }
index 44432677160ce00171c3f818544e711f82c34361..86604dd1c5a54a6ee3d6fb90ba77d502f40f93c0 100644 (file)
@@ -344,11 +344,14 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       struct page *page = virt_to_page(engine->status_page.page_addr);
+       phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
        u32 addr;
 
-       addr = dev_priv->status_page_dmah->busaddr;
+       addr = lower_32_bits(phys);
        if (INTEL_GEN(dev_priv) >= 4)
-               addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+               addr |= (phys >> 28) & 0xf0;
+
        I915_WRITE(HWS_PGA, addr);
 }