return container_of(uc, struct intel_gt, uc);
}
+static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
+{
+ return container_of(guc, struct intel_gt, uc.guc);
+}
+
+static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
+{
+ return container_of(huc, struct intel_gt, uc.huc);
+}
+
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw(struct drm_i915_private *i915);
*
*/
+#include "gt/intel_gt.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}
static void gen11_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
void intel_guc_init_send_regs(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
enum forcewake_domains fw_domains = 0;
unsigned int i;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(gt->i915) >= 11) {
guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
}
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
+ fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
int intel_guc_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
int ret;
ret = intel_uc_fw_init(&guc->fw);
goto err_ads;
/* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
+ i915_ggtt_enable_guc(gt->ggtt);
return 0;
void intel_guc_fini(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- i915_ggtt_disable_guc(dev_priv);
+ i915_ggtt_disable_guc(gt->ggtt);
intel_guc_ct_fini(&guc->ct);
*/
void intel_guc_init_params(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 params[GUC_CTL_MAX_DWORDS];
int i;
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
- I915_WRITE(SOFT_SCRATCH(0), 0);
+ intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), params[i]);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 status;
int i;
int ret;
int count = min(response_buf_size, guc->send_regs.count - 1);
for (i = 0; i < count; i++)
- response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+ response_buf[i] = intel_uncore_read(uncore,
+ guc_send_reg(guc, i + 1));
}
/* Use data from the GuC response as our return value */
*/
int intel_guc_suspend(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int ret;
u32 status;
u32 action[] = {
* in progress so we need to take care of that ourselves as well.
*/
- I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+ intel_uncore_write(uncore, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
- ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(gt->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
* Alex Dai <yu.dai@intel.com>
*/
+#include "gt/intel_gt.h"
#include "intel_guc_fw.h"
#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
/* Must program this register before loading the ucode with DMA */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
- GUC_ENABLE_MIA_CLOCK_GATING);
-
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
+
+ if (IS_GEN9_LP(gt->i915))
+ intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN(dev_priv, 9)) {
+ if (IS_GEN(gt->i915, 9)) {
/* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
/* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+ intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
}
}
/* Copy RSA signature from the fw image to HW for verification */
static void guc_xfer_rsa(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
struct intel_uc_fw *fw = &guc->fw;
struct sg_table *pages = fw->obj->mm.pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
rsa, sizeof(rsa), fw->rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
}
-static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
+static bool guc_xfer_completed(struct intel_uncore *uncore, u32 *status)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
/* Did we complete the xfer? */
- *status = I915_READ(DMA_CTRL);
+ *status = intel_uncore_read(uncore, DMA_CTRL);
return !(*status & START_DMA);
}
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
-static inline bool guc_ready(struct intel_guc *guc, u32 *status)
+static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 val = I915_READ(GUC_STATUS);
+ u32 val = intel_uncore_read(uncore, GUC_STATUS);
u32 uk_val = val & GS_UKERNEL_MASK;
*status = val;
((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
}
-static int guc_wait_ucode(struct intel_guc *guc)
+static int guc_wait_ucode(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
u32 status;
int ret;
* (Higher levels of the driver may decide to reset the GuC and
* attempt the ucode load again if this happens.)
*/
- ret = wait_for(guc_ready(guc, &status), 100);
+ ret = wait_for(guc_ready(uncore, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
DRM_ERROR("GuC firmware exception. EIP: %#x\n",
- intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13)));
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
- if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+ if (ret == 0 && !guc_xfer_completed(uncore, &status)) {
DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
status);
ret = -ENXIO;
*/
static int guc_xfer_ucode(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+ intel_uncore_write(uncore, DMA_COPY_SIZE,
+ guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+ intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
+ intel_uncore_write(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0x2000);
+ intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+ intel_uncore_write(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
- return guc_wait_ucode(guc);
+ return guc_wait_ucode(uncore);
}
/*
* Load the GuC firmware blob into the MinuteIA.
static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int ret;
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc);
ret = guc_xfer_ucode(guc);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_lrc_reg.h"
#include "intel_guc_submission.h"
static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+ return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
}
static void __init_doorbell(struct intel_guc_client *client)
guc_stage_desc_pool_destroy(guc);
}
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = >->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
/* These three registers have the same bit definitions */
- I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
- I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
- I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
/*
* The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = >->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
return err;
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(dev_priv);
+ guc_interrupts_capture(gt);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
void intel_guc_submission_disable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
+ GEM_BUG_ON(gt->awake); /* GT should be parked first */
- guc_interrupts_release(dev_priv);
+ guc_interrupts_release(gt);
guc_clients_disable(guc);
}
#include <linux/types.h>
+#include "gt/intel_gt.h"
#include "intel_huc.h"
#include "i915_drv.h"
static int intel_huc_rsa_data_create(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->gt.uc.guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = >->uc.guc;
struct i915_vma *vma;
void *vaddr;
*/
int intel_huc_auth(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->gt.uc.guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = >->uc.guc;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(&i915->uncore,
+ ret = __intel_wait_for_register(gt->uncore,
huc->status.reg,
huc->status.mask,
huc->status.value,
*/
int intel_huc_check_status(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
bool status = false;
- if (!HAS_HUC(dev_priv))
+ if (!intel_uc_is_using_huc(>->uc))
return -ENODEV;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- status = (I915_READ(huc->status.reg) & huc->status.mask) ==
- huc->status.value;
+ with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
- return status;
+ return (status & huc->status.mask) == huc->status.value;
}
* Copyright © 2014-2018 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "intel_huc_fw.h"
#include "i915_drv.h"
static int huc_xfer_ucode(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = huc_to_gt(huc)->uncore;
unsigned long offset = 0;
u32 size;
int ret;
*/
static void guc_clear_mmio_msg(struct intel_guc *guc)
{
- intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0);
+ intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
}
static void guc_get_mmio_msg(struct intel_guc *guc)
spin_lock_irq(&guc->irq_lock);
- val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15));
+ val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
guc->mmio_msg |= val & guc->msg_enabled_mask;
/*
return 0;
}
-void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
ggtt->invalidate = guc_ggtt_invalidate;
ggtt->invalidate(ggtt);
}
-void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
/* XXX Temporary pardon for error unload */
if (ggtt->invalidate == gen6_ggtt_invalidate)
return;
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct drm_i915_private *i915);
-void i915_ggtt_disable_guc(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);