drm/vmwgfx: Protect from excessive execbuf kernel memory allocations v3
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 12 Dec 2018 10:52:08 +0000 (11:52 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Thu, 13 Dec 2018 12:04:25 +0000 (13:04 +0100)
With the new validation code, a malicious user-space app could
potentially submit command streams with enough buffer-object and resource
references in them to have the resulting allocated validion nodes and
relocations make the kernel run out of GFP_KERNEL memory.

Protect from this by having the validation code reserve TTM graphics
memory when allocating.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
---
v2: Removed leftover debug printouts

drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h

index 61a84b958d671671cb56c37b6e44a589989d026b..d7a2dfb8ee9b1101ed99a6ca6fe28e7000a8391c 100644 (file)
@@ -49,6 +49,8 @@
 
 #define VMWGFX_REPO "In Tree"
 
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
 
 /**
  * Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                spin_unlock(&dev_priv->cap_lock);
        }
 
-
+       vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
        ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_kms;
index 59f614225bcd72b8a84a5de42813359e9a2760d0..aca974b14b550b11d02e7d3d83ac77fc60e4f439 100644 (file)
@@ -606,6 +606,9 @@ struct vmw_private {
 
        struct vmw_cmdbuf_man *cman;
        DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+       /* Validation memory reservation */
+       struct vmw_validation_mem vvm;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+                                       size_t gran);
 /**
  * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
index 260650bb5560aab65c2b4e149e0a1a26a2e62b19..f2d13a72c05d3ef1785422b8b5285536453aecee 100644 (file)
@@ -3835,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        struct sync_file *sync_file = NULL;
        DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
 
+       vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
index 7b1e5a5cbd2c7758aadf70f784f4ce9c629ca1db..f882470467218bef553223b72ab1e0420523a514 100644 (file)
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
        drm_global_item_unref(&dev_priv->bo_global_ref.ref);
        drm_global_item_unref(&dev_priv->mem_global_ref);
 }
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+       static struct ttm_operation_ctx ctx = {.interruptible = false,
+                                              .no_wait_gpu = false};
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+       struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+       vvm->reserve_mem = vmw_vmt_reserve;
+       vvm->unreserve_mem = vmw_vmt_unreserve;
+       vvm->gran = gran;
+}
index 184025fa938e78fd0372d255b1db552bdb95a88d..f116f092e00bcbf33a4c73a12f504a2580435e6c 100644 (file)
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
                return NULL;
 
        if (ctx->mem_size_left < size) {
-               struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               struct page *page;
 
+               if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+                       int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+                       if (ret)
+                               return NULL;
+
+                       ctx->vm_size_left += ctx->vm->gran;
+                       ctx->total_mem += ctx->vm->gran;
+               }
+
+               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
                if (!page)
                        return NULL;
 
+               if (ctx->vm)
+                       ctx->vm_size_left -= PAGE_SIZE;
+
                list_add_tail(&page->lru, &ctx->page_list);
                ctx->page_address = page_address(page);
                ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
        }
 
        ctx->mem_size_left = 0;
+       if (ctx->vm && ctx->total_mem) {
+               ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+               ctx->total_mem = 0;
+               ctx->vm_size_left = 0;
+       }
 }
 
 /**
index b57e3292c386b28067a924a33f94dd614ad43190..3b396fea40d7182c6621e40045c77fa0ff50ebab 100644 (file)
 #include <linux/ww_mutex.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 
+/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+       int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+       void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+       size_t gran;
+};
+
 /**
  * struct vmw_validation_context - Per command submission validation context
  * @ht: Hash table used to find resource- or buffer object duplicates
  * buffer objects
  * @mem_size_left: Free memory left in the last page in @page_list
  * @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
  */
 struct vmw_validation_context {
        struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
        unsigned int merge_dups;
        unsigned int mem_size_left;
        u8 *page_address;
+       struct vmw_validation_mem *vm;
+       size_t vm_size_left;
+       size_t total_mem;
 };
 
 struct vmw_buffer_object;
@@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
        return !list_empty(&ctx->bo_list);
 }
 
+/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+                          struct vmw_validation_mem *vm)
+{
+       ctx->vm = vm;
+}
+
 /**
  * vmw_validation_set_ht - Register a hash table for duplicate finding
  * @ctx: The validation context