return ret;
}
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (swizzled) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
+ }
+
+}
+
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
vaddr = kmap(page);
if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
ret = __copy_to_user_swizzled(user_data,
int ret;
vaddr = kmap(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ if (needs_clflush_before || page_do_bit17_swizzling)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
user_data,
page_length);
if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
kunmap(page);
return ret;