{
unsigned int needs_clflush;
struct page *page;
- u32 *map;
+ void *map;
+ u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
+ cpu = map + offset_in_page(offset);
- if (needs_clflush & CLFLUSH_BEFORE) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
- map[offset_in_page(offset) / sizeof(*map)] = v;
+ *cpu = v;
- if (needs_clflush & CLFLUSH_AFTER) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_AFTER)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
-
i915_gem_obj_finish_shmem_access(obj);
+
return 0;
}
{
unsigned int needs_clflush;
struct page *page;
- u32 *map;
+ void *map;
+ u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
+ cpu = map + offset_in_page(offset);
- if (needs_clflush & CLFLUSH_BEFORE) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
- *v = map[offset_in_page(offset) / sizeof(*map)];
- kunmap_atomic(map);
+ *v = *cpu;
+ kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
+
return 0;
}