return err;
}
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- const int gen = INTEL_GEN(i915);
- unsigned int count = vma->size >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj;
- struct i915_vma *batch;
- unsigned int size;
- u32 *cmd;
- int n;
- int err;
-
- size = (1 + 4 * count) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = val;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = val;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = val;
- }
-
- offset += PAGE_SIZE;
- }
-
- *cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(vma->vm->gt);
-
- i915_gem_object_unpin_map(obj);
-
- batch = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return batch;
-
-err:
- i915_gem_object_put(obj);
-
- return ERR_PTR(err);
-}
-
static int gpu_write(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- u32 dword,
- u32 value)
+ u32 dw,
+ u32 val)
{
- struct i915_request *rq;
- struct i915_vma *batch;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto err_request;
-
- i915_vma_lock(vma);
- err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ i915_gem_object_lock(vma->obj);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ i915_gem_object_unlock(vma->obj);
if (err)
- goto err_request;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
-err_request:
- if (err)
- i915_request_skip(rq, err);
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ return err;
- return err;
+ return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ vma->size >> PAGE_SHIFT, val);
}
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
-{
- struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
- unsigned long n, size;
- u32 *cmd;
- int err;
-
- size = (4 * count + 1) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(vma->vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = value;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = value;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = value;
- }
- offset += PAGE_SIZE;
- }
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
{
return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
unsigned int dw)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
- struct i915_request *rq;
struct i915_vma *vma;
- struct i915_vma *batch;
- unsigned int flags;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
return PTR_ERR(vma);
i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return err;
if (err)
return err;
- /* Within the GTT the huge objects maps every page onto
+ /*
+ * Within the GTT the huge objects maps every page onto
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
* We set the nth dword within the page using the nth
* mapping via the GTT - this should exercise the GTT mapping
* whilst checking that each context provides a unique view
* into the object.
*/
- batch = gpu_fill_dw(vma,
- (dw * real_page_count(obj)) << PAGE_SHIFT |
- (dw * sizeof(u32)),
- real_page_count(obj),
- dw);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto skip_request;
-
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto skip_request;
-
- i915_request_add(rq);
-
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ err = igt_gpu_fill_dw(vma,
+ ctx,
+ engine,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
i915_vma_unpin(vma);
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
-err_vma:
- i915_vma_unpin(vma);
return err;
}
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "i915_vma.h"
+#include "i915_drv.h"
#include "i915_request.h"
return rq;
}
+
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? MI_USE_GGTT : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(vma->size > vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ batch = igt_emit_store_dw(vma, offset, count, val);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_lock(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto skip_request;
+
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto skip_request;
+
+ i915_request_add(rq);
+
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+ i915_vma_put(batch);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ return err;
+}
#ifndef __IGT_GEM_UTILS_H__
#define __IGT_GEM_UTILS_H__
+#include <linux/types.h>
+
struct i915_request;
struct i915_gem_context;
struct intel_engine_cs;
+struct i915_vma;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
#endif /* __IGT_GEM_UTILS_H__ */