#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
};
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_lmem_obj_ops;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
rcu_read_unlock();
}
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap((void __force __iomem *)ptr);
+ else if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
+ unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
}
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
pgprot_t pgprot;
void *addr;
+ if (i915_gem_object_is_lmem(obj)) {
+ void __iomem *io;
+
+ if (type != I915_MAP_WC)
+ return NULL;
+
+ io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ return (void __force *)io;
+ }
+
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
enum i915_map_type type)
{
enum i915_map_type has_type;
+ unsigned int flags;
bool pinned;
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+ if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+ unmap_object(obj, ptr);
ptr = obj->mm.mapping = NULL;
}
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
static void close_objects(struct intel_memory_region *mem,
return err;
}
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ err = i915_gem_object_fill_blt(obj, i915->engine[BCS0]->kernel_context,
+ 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* We want to throw in a random width/align */
+ bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+ sizeof(u32));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
};
if (!HAS_LMEM(i915)) {