{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- i915_vma_unpin(ppgtt->vma);
i915_vma_destroy(ppgtt->vma);
gen6_ppgtt_free_pd(ppgtt);
return vma;
}
-static int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
PIN_GLOBAL | PIN_HIGH);
}
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
+
+ i915_vma_unpin(ppgtt->vma);
+}
+
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
if (err)
goto err_vma;
- err = gen6_ppgtt_pin(&ppgtt->base);
- if (err)
- goto err_pd;
-
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->vma->node.size >> 20,
- ppgtt->vma->node.start / PAGE_SIZE);
-
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->base.pd.base.ggtt_offset << 10);
-
return &ppgtt->base;
-err_pd:
- gen6_ppgtt_free_pd(ppgtt);
err_vma:
i915_vma_destroy(ppgtt->vma);
err_scratch:
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+
+ unsigned int pin_count;
};
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
__i915_gem_object_release_unless_active(ce->state->obj);
}
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int err = 0;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ err = gen6_ppgtt_pin(ppgtt);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ gen6_ppgtt_unpin(ppgtt);
+}
+
static int __context_pin(struct intel_context *ce)
{
struct i915_vma *vma;
static void intel_ring_context_unpin(struct intel_context *ce)
{
+ __context_unpin_ppgtt(ce->gem_context);
__context_unpin(ce);
i915_gem_context_put(ce->gem_context);
if (err)
goto err;
+ err = __context_pin_ppgtt(ce->gem_context);
+ if (err)
+ goto err_unpin;
+
i915_gem_context_get(ctx);
/* One ringbuffer to rule them all */
return ce;
+err_unpin:
+ __context_unpin(ce);
err:
ce->pin_count = 0;
return ERR_PTR(err);