drm/ttm: Implement vm_operations_struct.access v2
authorFelix Kuehling <Felix.Kuehling@amd.com>
Thu, 13 Jul 2017 21:01:16 +0000 (17:01 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Jul 2017 20:29:02 +0000 (16:29 -0400)
Allows gdb to access contents of user mode mapped BOs. System memory
is handled by TTM using kmap. Other memory pools require a new driver
callback in ttm_bo_driver.

v2:
* kmap only one page at a time
* swap in BO if needed
* make driver callback more generic to handle private memory pools
* document callback return value
* WARN_ON -> WARN_ON_ONCE

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo_vm.c
include/drm/ttm/ttm_bo_driver.h

index b442d12f2f7d64819faff9eace7bdc51fa199f89..a01e5c90fd87cb548990757bf4bd17b3978f6ab9 100644 (file)
@@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
        vma->vm_private_data = NULL;
 }
 
+static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
+                                unsigned long offset,
+                                void *buf, int len, int write)
+{
+       unsigned long page = offset >> PAGE_SHIFT;
+       unsigned long bytes_left = len;
+       int ret;
+
+       /* Copy a page at a time, that way no extra virtual address
+        * mapping is needed
+        */
+       offset -= page << PAGE_SHIFT;
+       do {
+               unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+               struct ttm_bo_kmap_obj map;
+               void *ptr;
+               bool is_iomem;
+
+               ret = ttm_bo_kmap(bo, page, 1, &map);
+               if (ret)
+                       return ret;
+
+               ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+               WARN_ON_ONCE(is_iomem);
+               if (write)
+                       memcpy(ptr, buf, bytes);
+               else
+                       memcpy(buf, ptr, bytes);
+               ttm_bo_kunmap(&map);
+
+               page++;
+               bytes_left -= bytes;
+               offset = 0;
+       } while (bytes_left);
+
+       return len;
+}
+
+static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+                           void *buf, int len, int write)
+{
+       unsigned long offset = (addr) - vma->vm_start;
+       struct ttm_buffer_object *bo = vma->vm_private_data;
+       int ret;
+
+       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+               return -EIO;
+
+       ret = ttm_bo_reserve(bo, true, false, NULL);
+       if (ret)
+               return ret;
+
+       switch (bo->mem.mem_type) {
+       case TTM_PL_SYSTEM:
+               if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+                       ret = ttm_tt_swapin(bo->ttm);
+                       if (unlikely(ret != 0))
+                               return ret;
+               }
+               /* fall through */
+       case TTM_PL_TT:
+               ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
+               break;
+       default:
+               if (bo->bdev->driver->access_memory)
+                       ret = bo->bdev->driver->access_memory(
+                               bo, offset, buf, len, write);
+               else
+                       ret = -EIO;
+       }
+
+       ttm_bo_unreserve(bo);
+
+       return ret;
+}
+
 static const struct vm_operations_struct ttm_bo_vm_ops = {
        .fault = ttm_bo_vm_fault,
        .open = ttm_bo_vm_open,
-       .close = ttm_bo_vm_close
+       .close = ttm_bo_vm_close,
+       .access = ttm_bo_vm_access
 };
 
 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
index 990d529f823c6e751c0fdb60130fe1b924b83416..d30850e079361e6f2fa049e5c26e298a77769a89 100644 (file)
@@ -472,6 +472,23 @@ struct ttm_bo_driver {
         */
        unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
                                    unsigned long page_offset);
+
+       /**
+        * Read/write memory buffers for ptrace access
+        *
+        * @bo: the BO to access
+        * @offset: the offset from the start of the BO
+        * @buf: pointer to source/destination buffer
+        * @len: number of bytes to copy
+        * @write: whether to read (0) from or write (non-0) to BO
+        *
+        * If successful, this function should return the number of
+        * bytes copied, -EIO otherwise. If the number of bytes
+        * returned is < len, the function may be called again with
+        * the remainder of the buffer to copy.
+        */
+       int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+                            void *buf, int len, int write);
 };
 
 /**