len += copy_len;
gma += copy_len;
}
- return 0;
+ return len;
}
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
- if (ret) {
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
- struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- unsigned int copy_len = 0;
+ u32 *cs;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
- ret = intel_ring_begin(workload->req, workload->rb_len / 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+ workload->shadow_ring_buffer_va = cs;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top,
- workload->shadow_ring_buffer_va);
- if (ret) {
+ gma_head, gma_top, cs);
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
- copy_len = gma_top - gma_head;
+ cs += ret / sizeof(u32);
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_tail,
- workload->shadow_ring_buffer_va + copy_len);
- if (ret) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
- ring->tail += workload->rb_len;
- intel_ring_advance(ring);
+ cs += ret / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
return 0;
}
wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
- if (ret) {
+ if (ret < 0) {
gvt_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
- u32 flags = hw_flags | MI_MM_SPACE_GTT;
+ u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915.semaphores ?
if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
- ret = intel_ring_begin(req, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(num_rings));
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
- intel_ring_emit_reg(ring,
- RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring,
- _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring,
- i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(num_rings));
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
- intel_ring_emit_reg(ring, last_reg);
- intel_ring_emit(ring,
- _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
- intel_ring_emit(ring,
- MI_STORE_REGISTER_MEM |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, last_reg);
- intel_ring_emit(ring,
- i915_ggtt_offset(engine->scratch));
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
}
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return ret;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
- u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_ring *ring = req->ring;
- int i, ret;
+ u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
+ int i;
if (!remap_info)
return 0;
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
- intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
- intel_ring_emit(ring, remap_info[i]);
+ *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+ *cs++ = remap_info[i];
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
- int ret, i;
+ u32 *cs;
+ int i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- ret = intel_ring_begin(req, 4 * 3);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4 * 3);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+ *cs++ = 0;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
int instp_mode;
- u32 instp_mask;
+ u32 instp_mask, *cs;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ring *ring = params->request->ring;
-
- ret = intel_ring_begin(params->request, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
+ cs = intel_ring_begin(params->request, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_NOOP;
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(INSTPM);
+ *cs++ = instp_mask << 16 | instp_mode;
+ intel_ring_advance(params->request, cs);
dev_priv->relative_constants_mode = instp_mode;
}
unsigned entry,
dma_addr_t addr)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- int ret;
+ u32 *cs;
BUG_ON(entry >= 4);
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
- intel_ring_emit(ring, upper_32_bits(addr));
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
- intel_ring_emit(ring, lower_32_bits(addr));
- intel_ring_advance(ring);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+ *cs++ = upper_32_bits(addr);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+ *cs++ = lower_32_bits(addr);
+ intel_ring_advance(req, cs);
return 0;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ u32 *cs;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
if (ret)
return ret;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = get_pd_offset(ppgtt);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ u32 *cs;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
if (ret)
return ret;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = get_pd_offset(ppgtt);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (engine->id != RCS) {
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
+ u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
- GEM_BUG_ON(err);
- request->postfix = ring->tail;
- ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+ cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(IS_ERR(cs));
+ request->postfix = intel_ring_offset(request, cs);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
+ u32 flip_mask, *cs;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
+ *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = 0; /* aux display base address, unused */
return 0;
}
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
+ u32 flip_mask, *cs;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = MI_NOOP;
return 0;
}
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
+ u32 pf, pipesrc, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier));
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset |
+ intel_fb_modifier_to_tiling(fb->modifier);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ *cs++ = pf | pipesrc;
return 0;
}
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
+ u32 pf, pipesrc, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+ *cs++ = intel_crtc->flip_work->gtt_offset;
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ *cs++ = pf | pipesrc;
return 0;
}
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane_bit = 0;
+ u32 *cs, plane_bit = 0;
int len, ret;
switch (intel_crtc->plane) {
if (ret)
return ret;
- ret = intel_ring_begin(req, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (req->engine->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DERRMR);
+ *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE);
if (IS_GEN8(dev_priv))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT;
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring,
- i915_ggtt_offset(req->engine->scratch) + 256);
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(DERRMR);
+ *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
if (IS_GEN8(dev_priv)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
}
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
+ *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+ *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = MI_NOOP;
return 0;
}
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
+ u32 *cs;
int ret;
GEM_BUG_ON(!ce->pin_count);
goto err;
}
- ret = intel_ring_begin(request, 0);
- if (ret)
+ cs = intel_ring_begin(request, 0);
+ if (IS_ERR(cs)) {
+ ret = PTR_ERR(cs);
goto err_unreserve;
+ }
if (!ce->initialised) {
ret = engine->init_context(request);
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- int ret, i;
- struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
+ u32 *cs;
+ int ret, i;
if (w->count == 0)
return 0;
if (ret)
return ret;
- ret = intel_ring_begin(req, w->count * 2 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, w->count * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
}
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
- int i, ret;
+ u32 *cs;
+ int i;
- ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+ *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
- intel_ring_emit(ring, upper_32_bits(pd_daddr));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
- intel_ring_emit(ring, lower_32_bits(pd_daddr));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = lower_32_bits(pd_daddr);
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+ u32 *cs;
int ret;
/* Don't rely in hw updating PDPs, specially in lite-restore.
req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
- (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+ I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
{
- struct intel_ring *ring = request->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(request, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW + 1;
cmd |= MI_INVALIDATE_BSD;
}
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring,
- I915_GEM_HWS_SCRATCH_ADDR |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(request, cs);
return 0;
}
static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 mode)
{
- struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
- u32 flags = 0;
- int ret;
+ u32 *cs, flags = 0;
int len;
flags |= PIPE_CONTROL_CS_STALL;
if (dc_flush_wa)
len += 12;
- ret = intel_ring_begin(request, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
if (vf_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
}
if (dc_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
}
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
if (dc_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
}
- intel_ring_advance(ring);
+ intel_ring_advance(request, cs);
return 0;
}
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
{
- *out++ = MI_NOOP;
- *out++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request->ring, out);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request, cs);
}
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
- u32 *out)
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
- *out++ = 0;
- *out++ = request->global_seqno;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
- request->tail = intel_ring_offset(request->ring, out);
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = request->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+ request->tail = intel_ring_offset(request, cs);
- gen8_emit_wa_tail(request, out);
+ gen8_emit_wa_tail(request, cs);
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
- u32 *out)
+ u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE);
- *out++ = intel_hws_seqno_address(request->engine);
- *out++ = 0;
- *out++ = request->global_seqno;
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_hws_seqno_address(request->engine);
+ *cs++ = 0;
+ *cs++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- *out++ = 0;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
- request->tail = intel_ring_offset(request->ring, out);
+ *cs++ = 0;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+ request->tail = intel_ring_offset(request, cs);
- gen8_emit_wa_tail(request, out);
+ gen8_emit_wa_tail(request, cs);
}
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
- int ret;
+ u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
for (index = 0; index < table->size; index++) {
- intel_ring_emit_reg(ring, mocs_register(engine, index));
- intel_ring_emit(ring, table->table[index].control_value);
+ *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+ *cs++ = table->table[index].control_value;
}
/*
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_ring_emit_reg(ring, mocs_register(engine, index));
- intel_ring_emit(ring, table->table[0].control_value);
+ *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+ *cs++ = table->table[0].control_value;
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ring *ring = req->ring;
unsigned int i;
- int ret;
+ u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
for (i = 0; i < table->size/2; i++) {
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 2 * i, 0);
i++;
}
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 0, 0);
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
- int ret;
+ u32 *cs;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 4);
- if (ret) {
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
- return ret;
+ return PTR_ERR(cs);
}
overlay->active = true;
if (IS_I830(dev_priv))
i830_overlay_clock_gating(dev_priv, false);
- ring = req->ring;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+ *cs++ = overlay->flip_addr | OFC_UPDATE;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
- u32 tmp;
- int ret;
+ u32 tmp, *cs;
WARN_ON(!overlay->active);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 2);
- if (ret) {
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
- return ret;
+ return PTR_ERR(cs);
}
- ring = req->ring;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_advance(ring);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, vma);
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
- u32 flip_addr = overlay->flip_addr;
- int ret;
+ u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 6);
- if (ret) {
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
- return ret;
+ return PTR_ERR(cs);
}
- ring = req->ring;
-
/* wait for overlay to go idle */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
/* turn overlay off */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, NULL);
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
+ u32 *cs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 2);
- if (ret) {
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
- return ret;
+ return PTR_ERR(cs);
}
- ring = req->ring;
- intel_ring_emit(ring,
- MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
static int
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
/*
* read/write caches:
cmd |= MI_INVALIDATE_ISP;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
+ u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
- int ret;
+ u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
gen7_render_ring_cs_stall_wa(req);
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
+ u32 *cs;
int ret, i;
if (w->count == 0)
if (ret)
return ret;
- ret = intel_ring_begin(req, (w->count * 2 + 2));
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, (w->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
}
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- *out++ = lower_32_bits(gtt_offset);
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = 0;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = lower_32_bits(gtt_offset);
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = 0;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- *out++ = MI_LOAD_REGISTER_IMM(1);
- *out++ = i915_mmio_reg_offset(mbox_reg);
- *out++ = req->global_seqno;
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(mbox_reg);
+ *cs++ = req->global_seqno;
num_rings++;
}
}
if (num_rings & 1)
- *out++ = MI_NOOP;
+ *cs++ = MI_NOOP;
- return out;
+ return cs;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
I915_WRITE_TAIL(request->engine, request->tail);
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
- *out++ = MI_STORE_DWORD_INDEX;
- *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *out++ = req->global_seqno;
- *out++ = MI_USER_INTERRUPT;
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *cs++ = req->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
}
static const int i9xx_emit_breadcrumb_sz = 4;
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, out));
+ req->engine->semaphore.signal(req, cs));
}
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+ u32 *cs)
{
struct intel_engine_cs *engine = req->engine;
if (engine->semaphore.signal)
- out = engine->semaphore.signal(req, out);
-
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE);
- *out++ = intel_hws_seqno_address(engine);
- *out++ = 0;
- *out++ = req->global_seqno;
+ cs = engine->semaphore.signal(req, cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_hws_seqno_address(engine);
+ *cs++ = 0;
+ *cs++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- *out++ = 0;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
+ *cs++ = 0;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
gen8_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
struct i915_hw_ppgtt *ppgtt;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->global_seqno);
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_advance(ring);
+ *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = signal->global_seqno;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ intel_ring_advance(req, cs);
/* When the !RCS engines idle waiting upon a semaphore, they lose their
* pagetables and we must reload them before executing the batch.
gen6_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
- int ret;
+ u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, dw1 | wait_mbox);
+ *cs++ = dw1 | wait_mbox;
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->global_seqno - 1);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = signal->global_seqno - 1;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 length,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- MI_BATCH_GTT |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+ I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
- int ret;
+ u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(req, 6 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring,
- BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
/* ... and execute it. */
offset = cs_offset;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
{
- int ret;
+ u32 *cs;
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
- ret = intel_ring_begin(request, 0);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, 0);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
return 0;
}
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
+ u32 *cs;
total_bytes = bytes + req->reserved_space;
if (wait_bytes > ring->space) {
int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
- return ret;
+ return ERR_PTR(ret);
}
if (unlikely(need_wrap)) {
ring->space -= remain_actual;
}
+ GEM_BUG_ON(ring->tail > ring->size - bytes);
+ cs = ring->vaddr + ring->tail;
+ ring->tail += bytes;
ring->space -= bytes;
GEM_BUG_ON(ring->space < 0);
- GEM_DEBUG_EXEC(ring->advance = ring->tail + bytes);
- return 0;
+
+ return cs;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
int num_dwords =
- (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
- int ret;
+ (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ u32 *cs;
if (num_dwords == 0)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- ret = intel_ring_begin(req, num_dwords);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+ I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
*/
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring,
- I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
u32 head;
u32 tail;
- GEM_DEBUG_DECL(u32 advance);
int space;
int size;
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
- u32 *out);
+ u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
/* Execlists */
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
- *(uint32_t *)(ring->vaddr + ring->tail) = data;
- ring->tail += 4;
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
- intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
-
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
{
/* Dummy function.
*
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_DEBUG_BUG_ON(ring->tail != ring->advance);
+ GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline u32
+intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - ring->vaddr;
- return offset & (ring->size - 1);
+ u32 offset = addr - req->ring->vaddr;
+ GEM_BUG_ON(offset > req->ring->size);
+ return offset & (req->ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_i915_gem_request *rq;
struct i915_vma *vma;
+ u32 *cs;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
return PTR_ERR(rq);
}
- err = intel_ring_begin(rq, 4);
- if (err) {
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
__i915_add_request(rq, false);
i915_vma_unpin(vma);
- return err;
+ return PTR_ERR(cs);
}
if (INTEL_GEN(i915) >= 8) {
- intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
- intel_ring_emit(rq->ring, lower_32_bits(i915_ggtt_offset(vma) + offset));
- intel_ring_emit(rq->ring, upper_32_bits(i915_ggtt_offset(vma) + offset));
- intel_ring_emit(rq->ring, v);
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
- intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
- intel_ring_emit(rq->ring, 0);
- intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
- intel_ring_emit(rq->ring, v);
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
} else {
- intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM | 1 << 22);
- intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
- intel_ring_emit(rq->ring, v);
- intel_ring_emit(rq->ring, MI_NOOP);
+ *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(rq->ring);
+ intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);