info->size << PAGE_SHIFT);
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
- obj->base.read_domains,
- obj->base.write_domain,
+ obj->read_domains,
+ obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
obj->mm.dirty = false;
if (needs_clflush &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
- if (!(obj->base.write_domain & flush_domains))
+ if (!(obj->write_domain & flush_domains))
return;
- switch (obj->base.write_domain) {
+ switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
break;
}
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
static inline int
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
- !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_WC;
- obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
if (IS_ERR(obj))
return obj;
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
} else {
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
obj->cache_dirty = false;
* write-combined buffer or a delay through the chipset for GTT
* writes that do require us to treat GTT as a separate cache domain.)
*/
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
return &obj->base;
u32 *cmd;
int err;
- GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
i915_gem_active_set(&obj->frontbuffer_write, req);
- obj->base.read_domains = 0;
+ obj->read_domains = 0;
}
- obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+ /**
+ * @read_domains: Read memory domains.
+ *
+ * These monitor which caches contain read/write data related to the
+ * object. When transitioning from one set of domains to another,
+ * the driver is called to ensure that caches are suitably flushed and
+ * invalidated.
+ */
+ u16 read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ u16 write_domain;
+
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write;
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
+ err->read_domains = obj->read_domains;
+ err->write_domain = obj->write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->mm.dirty;
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
obj->mm.page_mask = page_mask;
else
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
return obj;
}
i915_gem_obj_finish_shmem_access(obj);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->write_domain = 0;
return 0;
}
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
*/
int name;
- /**
- * @read_domains:
- *
- * Read memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated.
- */
- uint32_t read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- uint32_t write_domain;
-
/**
* @dma_buf:
*