From 18851edfa7dbe156cfc5a73bf009d8a4def84bb5 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Sat, 10 Aug 2019 11:50:08 +0100 Subject: [PATCH] drm/i915/selftests: move gpu-write-dw into utils Using the gpu to write to some dword over a number of pages is rather useful, and we already have two copies of such a thing, and we don't want a third so move it to utils. There is probably some other stuff also... Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190810105008.14320-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/huge_pages.c | 120 ++-------------- .../drm/i915/gem/selftests/i915_gem_context.c | 134 ++--------------- .../drm/i915/gem/selftests/igt_gem_utils.c | 135 ++++++++++++++++++ .../drm/i915/gem/selftests/igt_gem_utils.h | 16 +++ 4 files changed, 169 insertions(+), 236 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 6cbd4a668c9a..8de83c6d81f5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -879,126 +879,22 @@ out_object_put: return err; } -static struct i915_vma * -gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) -{ - struct drm_i915_private *i915 = vma->vm->i915; - const int gen = INTEL_GEN(i915); - unsigned int count = vma->size >> PAGE_SHIFT; - struct drm_i915_gem_object *obj; - struct i915_vma *batch; - unsigned int size; - u32 *cmd; - int n; - int err; - - size = (1 + 4 * count) * sizeof(u32); - size = round_up(size, PAGE_SIZE); - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - offset += vma->node.start; - - for (n = 0; n < count; n++) { - if (gen >= 8) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4; - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - *cmd++ = val; - } else if (gen >= 4) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? MI_USE_GGTT : 0); - *cmd++ = 0; - *cmd++ = offset; - *cmd++ = val; - } else { - *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cmd++ = offset; - *cmd++ = val; - } - - offset += PAGE_SIZE; - } - - *cmd = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(vma->vm->gt); - - i915_gem_object_unpin_map(obj); - - batch = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err; - } - - err = i915_vma_pin(batch, 0, 0, PIN_USER); - if (err) - goto err; - - return batch; - -err: - i915_gem_object_put(obj); - - return ERR_PTR(err); -} - static int gpu_write(struct i915_vma *vma, struct i915_gem_context *ctx, struct intel_engine_cs *engine, - u32 dword, - u32 value) + u32 dw, + u32 val) { - struct i915_request *rq; - struct i915_vma *batch; int err; - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); - - batch = gpu_write_dw(vma, dword * sizeof(u32), value); - if (IS_ERR(batch)) - return PTR_ERR(batch); - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_batch; - } - - i915_vma_lock(batch); - err = i915_vma_move_to_active(batch, rq, 0); - i915_vma_unlock(batch); - if (err) - goto err_request; - - i915_vma_lock(vma); - err = i915_gem_object_set_to_gtt_domain(vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + i915_gem_object_lock(vma->obj); + err = i915_gem_object_set_to_gtt_domain(vma->obj, true); + i915_gem_object_unlock(vma->obj); if (err) - goto err_request; - - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); -err_request: - if (err) - i915_request_skip(rq, err); - i915_request_add(rq); -err_batch: - i915_vma_unpin(batch); - i915_vma_close(batch); - i915_vma_put(batch); + return err; - return err; + return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32), + vma->size >> PAGE_SHIFT, val); } static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index c24430352a38..dd87e6cd612e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -156,70 +156,6 @@ out_unlock: return err; } -static struct i915_vma * -gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) -{ - struct drm_i915_gem_object *obj; - const int gen = INTEL_GEN(vma->vm->i915); - unsigned long n, size; - u32 *cmd; - int err; - - size = (4 * count + 1) * sizeof(u32); - size = round_up(size, PAGE_SIZE); - obj = i915_gem_object_create_internal(vma->vm->i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); - offset += vma->node.start; - - for (n = 0; n < count; n++) { - if (gen >= 8) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4; - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - *cmd++ = value; - } else if (gen >= 4) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? MI_USE_GGTT : 0); - *cmd++ = 0; - *cmd++ = offset; - *cmd++ = value; - } else { - *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cmd++ = offset; - *cmd++ = value; - } - offset += PAGE_SIZE; - } - *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - static unsigned long real_page_count(struct drm_i915_gem_object *obj) { return huge_gem_object_phys_size(obj) >> PAGE_SHIFT; @@ -236,10 +172,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, unsigned int dw) { struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; - struct i915_request *rq; struct i915_vma *vma; - struct i915_vma *batch; - unsigned int flags; int err; GEM_BUG_ON(obj->base.size > vm->total); @@ -250,7 +183,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, return PTR_ERR(vma); i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); + err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) return err; @@ -259,70 +192,23 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) return err; - /* Within the GTT the huge objects maps every page onto + /* + * Within the GTT the huge objects maps every page onto * its 1024 real pages (using phys_pfn = dma_pfn % 1024). * We set the nth dword within the page using the nth * mapping via the GTT - this should exercise the GTT mapping * whilst checking that each context provides a unique view * into the object. */ - batch = gpu_fill_dw(vma, - (dw * real_page_count(obj)) << PAGE_SHIFT | - (dw * sizeof(u32)), - real_page_count(obj), - dw); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_vma; - } - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_batch; - } - - flags = 0; - if (INTEL_GEN(vm->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); - if (err) - goto err_request; - - i915_vma_lock(batch); - err = i915_vma_move_to_active(batch, rq, 0); - i915_vma_unlock(batch); - if (err) - goto skip_request; - - i915_vma_lock(vma); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); - if (err) - goto skip_request; - - i915_request_add(rq); - - i915_vma_unpin(batch); - i915_vma_close(batch); - i915_vma_put(batch); - + err = igt_gpu_fill_dw(vma, + ctx, + engine, + (dw * real_page_count(obj)) << PAGE_SHIFT | + (dw * sizeof(u32)), + real_page_count(obj), + dw); i915_vma_unpin(vma); - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_batch: - i915_vma_unpin(batch); - i915_vma_put(batch); -err_vma: - i915_vma_unpin(vma); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 93c636aeae73..42e1e9c58f63 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -9,6 +9,8 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "i915_vma.h" +#include "i915_drv.h" #include "i915_request.h" @@ -32,3 +34,136 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) return rq; } + +struct i915_vma * +igt_emit_store_dw(struct i915_vma *vma, + u64 offset, + unsigned long count, + u32 val) +{ + struct drm_i915_gem_object *obj; + const int gen = INTEL_GEN(vma->vm->i915); + unsigned long n, size; + u32 *cmd; + int err; + + size = (4 * count + 1) * sizeof(u32); + size = round_up(size, PAGE_SIZE); + obj = i915_gem_object_create_internal(vma->vm->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); + offset += vma->node.start; + + for (n = 0; n < count; n++) { + if (gen >= 8) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4; + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + *cmd++ = val; + } else if (gen >= 4) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4 | + (gen < 6 ? MI_USE_GGTT : 0); + *cmd++ = 0; + *cmd++ = offset; + *cmd++ = val; + } else { + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cmd++ = offset; + *cmd++ = val; + } + offset += PAGE_SIZE; + } + *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +int igt_gpu_fill_dw(struct i915_vma *vma, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, + unsigned long count, + u32 val) +{ + struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; + struct i915_request *rq; + struct i915_vma *batch; + unsigned int flags; + int err; + + GEM_BUG_ON(vma->size > vm->total); + GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + + batch = igt_emit_store_dw(vma, offset, count, val); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + flags = 0; + if (INTEL_GEN(vm->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); + if (err) + goto err_request; + + i915_vma_lock(batch); + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto skip_request; + + i915_vma_lock(vma); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + if (err) + goto skip_request; + + i915_request_add(rq); + + i915_vma_unpin(batch); + i915_vma_close(batch); + i915_vma_put(batch); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); + i915_vma_put(batch); + return err; +} diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h index 0f17251cf75d..361a7ef866b0 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h @@ -7,11 +7,27 @@ #ifndef __IGT_GEM_UTILS_H__ #define __IGT_GEM_UTILS_H__ +#include + struct i915_request; struct i915_gem_context; struct intel_engine_cs; +struct i915_vma; struct i915_request * igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); +struct i915_vma * +igt_emit_store_dw(struct i915_vma *vma, + u64 offset, + unsigned long count, + u32 val); + +int igt_gpu_fill_dw(struct i915_vma *vma, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, + unsigned long count, + u32 val); + #endif /* __IGT_GEM_UTILS_H__ */ -- 2.30.2