drm: move read_domains and write_domain into i915
authorChristian König <ckoenig.leichtzumerken@gmail.com>
Fri, 16 Feb 2018 12:43:38 +0000 (13:43 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Feb 2018 14:12:48 +0000 (14:12 +0000)
i915 is the only driver using those fields in the drm_gem_object
structure, so they only waste memory for all other drivers.

Move the fields into drm_i915_gem_object instead and patch the i915 code
with the following sed commands:

sed -i "s/obj->base.read_domains/obj->read_domains/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c
sed -i "s/obj->base.write_domain/obj->write_domain/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c

Change is only compile tested.

v2: move fields around as suggested by Chris.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180216124338.9087-1-christian.koenig@amd.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
16 files changed:
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_clflush.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/selftests/huge_gem_object.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
include/drm/drm_gem.h

index 2ab584f97dfb6ac1c96c8d89132e5859bff4dd0b..de3e076dcb312344191376f246e0f2b61f4de37e 100644 (file)
@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
                info->size << PAGE_SHIFT);
        i915_gem_object_init(obj, &intel_vgpu_gem_ops);
 
-       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
-       obj->base.write_domain = 0;
+       obj->read_domains = I915_GEM_DOMAIN_GTT;
+       obj->write_domain = 0;
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
index 3560eb567ca727b4f2f867c4597a5f1a80df7370..0cbe154e517d3987811ff388e4483c578b2c1311 100644 (file)
@@ -150,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   get_global_flag(obj),
                   get_pin_mapped_flag(obj),
                   obj->base.size / 1024,
-                  obj->base.read_domains,
-                  obj->base.write_domain,
+                  obj->read_domains,
+                  obj->write_domain,
                   i915_cache_level_str(dev_priv, obj->cache_level),
                   obj->mm.dirty ? " dirty" : "",
                   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
index fc68b35854df946e1e6c41616e513b229bc614c3..f530cd2477249f72284b9e924b26e2d07763a2f9 100644 (file)
@@ -240,8 +240,8 @@ err_phys:
 
 static void __start_cpu_write(struct drm_i915_gem_object *obj)
 {
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
        if (cpu_write_needs_clflush(obj))
                obj->cache_dirty = true;
 }
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
                obj->mm.dirty = false;
 
        if (needs_clflush &&
-           (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+           (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
            !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
                drm_clflush_sg(pages);
 
@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
 
-       if (!(obj->base.write_domain & flush_domains))
+       if (!(obj->write_domain & flush_domains))
                return;
 
-       switch (obj->base.write_domain) {
+       switch (obj->write_domain) {
        case I915_GEM_DOMAIN_GTT:
                i915_gem_flush_ggtt_writes(dev_priv);
 
@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
                break;
        }
 
-       obj->base.write_domain = 0;
+       obj->write_domain = 0;
 }
 
 static inline int
@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
         * anyway again before the next pread happens.
         */
        if (!obj->cache_dirty &&
-           !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+           !(obj->read_domains & I915_GEM_DOMAIN_CPU))
                *needs_clflush = CLFLUSH_BEFORE;
 
 out:
@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
                 * Same trick applies to invalidate partially written
                 * cachelines read before writing.
                 */
-               if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+               if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
                        *needs_clflush |= CLFLUSH_BEFORE;
        }
 
@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
         */
-       GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-       GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+       GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+       GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
@@ -3703,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
        flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
        if (obj->cache_dirty)
                i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
-       obj->base.write_domain = 0;
+       obj->write_domain = 0;
 }
 
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3740,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+       if (obj->write_domain == I915_GEM_DOMAIN_WC)
                return 0;
 
        /* Flush and acquire obj->pages so that we are coherent through
@@ -3761,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
         * coherent writes from the GPU, by effectively invalidating the
         * WC domain upon first access.
         */
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+       if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
                mb();
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
-       obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+       GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+       obj->read_domains |= I915_GEM_DOMAIN_WC;
        if (write) {
-               obj->base.read_domains = I915_GEM_DOMAIN_WC;
-               obj->base.write_domain = I915_GEM_DOMAIN_WC;
+               obj->read_domains = I915_GEM_DOMAIN_WC;
+               obj->write_domain = I915_GEM_DOMAIN_WC;
                obj->mm.dirty = true;
        }
 
@@ -3803,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+       if (obj->write_domain == I915_GEM_DOMAIN_GTT)
                return 0;
 
        /* Flush and acquire obj->pages so that we are coherent through
@@ -3824,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
         * coherent writes from the GPU, by effectively invalidating the
         * GTT domain upon first access.
         */
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+       if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
                mb();
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+       GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
-               obj->base.read_domains = I915_GEM_DOMAIN_GTT;
-               obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+               obj->read_domains = I915_GEM_DOMAIN_GTT;
+               obj->write_domain = I915_GEM_DOMAIN_GTT;
                obj->mm.dirty = true;
        }
 
@@ -4146,7 +4146,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
        return vma;
 
@@ -4199,15 +4199,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
        /* Flush the CPU cache if it's still invalid. */
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+       if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
                i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
-               obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+               obj->read_domains |= I915_GEM_DOMAIN_CPU;
        }
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+       GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
 
        /* If we're writing through the CPU, then the GPU read domains will
         * need to be invalidated at next use.
@@ -4643,8 +4643,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 
        i915_gem_object_init(obj, &i915_gem_object_ops);
 
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
 
        if (HAS_LLC(dev_priv))
                /* On some devices, we can have the GPU use the LLC (the CPU
@@ -5702,7 +5702,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
        if (IS_ERR(obj))
                return obj;
 
-       GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+       GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
 
        file = obj->base.filp;
        offset = 0;
index b9b53ac141766bd941d5fcb677801f5ce14e37db..f5c570d35b2a2dd75fbcad4f4205e84e1e58f811 100644 (file)
@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
        } else if (obj->mm.pages) {
                __i915_do_clflush(obj);
        } else {
-               GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+               GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
        }
 
        obj->cache_dirty = false;
index 864439a214c83c32d663e49bf05c50cd96f3ffb3..69a7aec49e84e06d70b5800af653d951d522049a 100644 (file)
@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
         * write-combined buffer or a delay through the chipset for GTT
         * writes that do require us to treat GTT as a separate cache domain.)
         */
-       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
-       obj->base.write_domain = 0;
+       obj->read_domains = I915_GEM_DOMAIN_GTT;
+       obj->write_domain = 0;
 
        return &obj->base;
 
index ed6e9db51e675c321522a9578dcd2c5f05d0386d..51f3c32c64bf9f327832e796ce4e8c4a5ca3bef4 100644 (file)
@@ -1073,7 +1073,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        u32 *cmd;
        int err;
 
-       GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+       GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
 
        obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
        if (IS_ERR(obj))
@@ -1861,16 +1861,16 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        i915_gem_active_set(&vma->last_read[idx], req);
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
 
-       obj->base.write_domain = 0;
+       obj->write_domain = 0;
        if (flags & EXEC_OBJECT_WRITE) {
-               obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+               obj->write_domain = I915_GEM_DOMAIN_RENDER;
 
                if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
                        i915_gem_active_set(&obj->frontbuffer_write, req);
 
-               obj->base.read_domains = 0;
+               obj->read_domains = 0;
        }
-       obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
+       obj->read_domains |= I915_GEM_GPU_DOMAINS;
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
                i915_gem_active_set(&vma->last_fence, req);
index 8301c06c952fd890d5ae3f5e943ab080be51abe5..0d0144b2104cb3264b2048a1569bc598fde532f3 100644 (file)
@@ -201,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &i915_gem_object_internal_ops);
 
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
 
        cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
        i915_gem_object_set_cache_coherency(obj, cache_level);
index 05e89e1c0a088b6cc2ca9e28f0d4ddd606726f97..ca2b3b62569d1475de2370fd5cffc138aaf17a3f 100644 (file)
@@ -148,6 +148,21 @@ struct drm_i915_gem_object {
 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
        unsigned int cache_dirty:1;
 
+       /**
+        * @read_domains: Read memory domains.
+        *
+        * These monitor which caches contain read/write data related to the
+        * object. When transitioning from one set of domains to another,
+        * the driver is called to ensure that caches are suitably flushed and
+        * invalidated.
+        */
+       u16 read_domains;
+
+       /**
+        * @write_domain: Corresponding unique write memory domain.
+        */
+       u16 write_domain;
+
        atomic_t frontbuffer_bits;
        unsigned int frontbuffer_ggtt_origin; /* write once */
        struct i915_gem_active frontbuffer_write;
index f18da9e2be8e03b2c70f19e89ad67fdfe3127f40..62aa67960bf4ceabcb7bed46e51175b12989604f 100644 (file)
@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
        obj->stolen = stolen;
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+       obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
        cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
        i915_gem_object_set_cache_coherency(obj, cache_level);
 
index 1f9d24021cbbd617d16e10743f96ce80cbfd8bf9..d596a8302ca3cca529977a10ebc02965855525af 100644 (file)
@@ -798,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 
        drm_gem_private_object_init(dev, &obj->base, args->user_size);
        i915_gem_object_init(obj, &i915_gem_userptr_ops);
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
        obj->userptr.ptr = args->user_ptr;
index 161d9103a65e1bb602d64cbf3c5b4db82c215dbf..65c0bef73ee5af252125fc6f1e6216d8817f39b9 100644 (file)
@@ -1021,8 +1021,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->engine = __active_get_engine_id(&obj->frontbuffer_write);
 
        err->gtt_offset = vma->node.start;
-       err->read_domains = obj->base.read_domains;
-       err->write_domain = obj->base.write_domain;
+       err->read_domains = obj->read_domains;
+       err->write_domain = obj->write_domain;
        err->fence_reg = vma->fence ? vma->fence->id : -1;
        err->tiling = i915_gem_object_get_tiling(obj);
        err->dirty = obj->mm.dirty;
index a2632df3917353b246a1316f3322811476c24418..391f3d9ffdf1ab652530121cb6d439b987039a8a 100644 (file)
@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
        drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
        i915_gem_object_init(obj, &huge_ops);
 
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
        cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
        i915_gem_object_set_cache_coherency(obj, cache_level);
        obj->scratch = phys_size;
index 2ea69394f42839f8ede1cc9475cd9ee9c16ba983..52b1bd17bf46c105ce15bf589b0b7b6e16eaaede 100644 (file)
@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &huge_page_ops);
 
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
 
        obj->mm.page_mask = page_mask;
@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
        else
                i915_gem_object_init(obj, &fake_ops);
 
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
 
        return obj;
index 56a803d11916e8142d04623876a81d48d03b1a25..6da2a2f29c5483cb8829d800012b0836428a0f3f 100644 (file)
@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
        }
 
        i915_gem_obj_finish_shmem_access(obj);
-       obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
-       obj->base.write_domain = 0;
+       obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+       obj->write_domain = 0;
        return 0;
 }
 
index 89b6ca9b14a7f34170f2004f371252b8a4fe10e3..f7dc926f4ef1fa610e5415d0b2771b30fcbdbbe5 100644 (file)
@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &fake_ops);
 
-       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->write_domain = I915_GEM_DOMAIN_CPU;
+       obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
 
        /* Preallocate the "backing storage" */
index 9c55c2acaa2b366dc5b93f068f66575aa3177a64..3583b98a1718139eb07dbda254af71987a45602b 100644 (file)
@@ -115,21 +115,6 @@ struct drm_gem_object {
         */
        int name;
 
-       /**
-        * @read_domains:
-        *
-        * Read memory domains. These monitor which caches contain read/write data
-        * related to the object. When transitioning from one set of domains
-        * to another, the driver is called to ensure that caches are suitably
-        * flushed and invalidated.
-        */
-       uint32_t read_domains;
-
-       /**
-        * @write_domain: Corresponding unique write memory domain.
-        */
-       uint32_t write_domain;
-
        /**
         * @dma_buf:
         *