}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
- intel_runtime_pm_put(gvt->dev_priv);
+ intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
}
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
intel_runtime_pm_get(i915);
gpu = i915_capture_gpu_state(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret;
}
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
else
err = ironlake_drpc_info(m);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return err;
}
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
mutex_unlock(&dev_priv->pcu_lock);
out:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret;
}
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
for (i = 0; i < 16; i++)
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
dev_priv->psr.last_exit);
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret;
}
intel_runtime_pm_get(dev_priv);
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -ENODEV;
}
power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
seq_printf(m, "%llu", power);
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return len;
}
i915_gem_drain_freed_objects(i915);
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return ret;
}
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
}
gen10_sseu_device_status(dev_priv, &sseu);
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
i915_print_sseu_info(m, false, &sseu);
return 0;
intel_uncore_forcewake_user_put(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return 0;
}
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+typedef depot_stack_handle_t intel_wakeref_t;
+
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return i915->gt.epoch;
}
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static void
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
wmb();
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (on)
cond_resched();
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return ret;
}
eb_release_vmas(&eb);
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(eb.i915);
+ intel_runtime_pm_put_unchecked(eb.i915);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
*/
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915);
+ intel_runtime_pm_put_unchecked(fence->i915);
}
if (vma) {
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
return 0;
if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
}
if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
i915_retire_requests(i915);
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return freed;
}
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
shrinker_unlock(i915, unlock);
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
wake_up_all(&dev_priv->gpu_error.reset_queue);
out:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
if (stream->ctx)
oa_put_render_ctx_id(stream);
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
err_config:
if (stream->ctx)
if (fw)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static void
intel_runtime_pm_get_if_in_use(dev_priv)) {
val = intel_get_cagf(dev_priv,
I915_READ_NOTRACE(GEN6_RPSTAT1));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
if (intel_runtime_pm_get_if_in_use(i915)) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
/*
* If we are coming back from being runtime suspended we must
intel_runtime_pm_get(dev_priv);
res = intel_rc6_residency_us(dev_priv, reg);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
}
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -EINVAL;
}
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret ?: count;
}
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -EINVAL;
}
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret ?: count;
}
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return vma;
}
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
atomic_dec(&i915->runtime_pm.wakeref_count);
}
-void intel_runtime_pm_get(struct drm_i915_private *i915);
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
-void intel_runtime_pm_put(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
+#endif
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return idle;
}
if (intel_runtime_pm_get_if_in_use(engine->i915)) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915);
+ intel_runtime_pm_put_unchecked(engine->i915);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}
*/
intel_runtime_pm_get(dev_priv);
guc_action_flush_log_complete(guc);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
int intel_guc_log_create(struct intel_guc_log *log)
ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
intel_runtime_pm_get(i915);
guc_action_flush_log(guc);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
intel_runtime_pm_get(dev_priv);
status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return status;
}
ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret;
}
spin_lock_init(&rpm->debug.lock);
}
-static noinline void
+static noinline depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
assert_rpm_wakelock_held(i915);
if (!HAS_RUNTIME_PM(i915))
- return;
+ return -1;
stack = __save_depot_stack();
if (!stack)
- return;
+ return -1;
spin_lock_irqsave(&rpm->debug.lock, flags);
if (stacks) {
stacks[rpm->debug.count++] = stack;
rpm->debug.owners = stacks;
+ } else {
+ stack = -1;
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ return stack;
+}
+
+static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ depot_stack_handle_t stack)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long flags, n;
+ bool found = false;
+
+ if (unlikely(stack == -1))
+ return;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+ for (n = rpm->debug.count; n--; ) {
+ if (rpm->debug.owners[n] == stack) {
+ memmove(rpm->debug.owners + n,
+ rpm->debug.owners + n + 1,
+ (--rpm->debug.count - n) * sizeof(stack));
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ if (WARN(!found,
+ "Unmatched wakeref (tracking %lu), count %u\n",
+ rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
+
+ stack = READ_ONCE(rpm->debug.last_release);
+ if (stack) {
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
+ }
+
+ kfree(buf);
+ }
}
static int cmphandle(const void *_a, const void *_b)
{
}
-static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
atomic_inc(&i915->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(i915);
+ return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
mutex_unlock(&power_domains->lock);
if (!is_enabled)
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return is_enabled;
}
mutex_unlock(&power_domains->lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
#define I830_PIPES_POWER_DOMAINS ( \
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
{
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915_modparams.disable_power_well)
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- track_intel_runtime_pm_wakeref(i915);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
- * Returns: True if the wakeref was acquired, or False otherwise.
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
*/
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
{
if (IS_ENABLED(CONFIG_PM)) {
struct pci_dev *pdev = i915->drm.pdev;
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(kdev) <= 0)
- return false;
+ return 0;
}
- track_intel_runtime_pm_wakeref(i915);
-
- return true;
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(i915);
pm_runtime_get_noresume(kdev);
- track_intel_runtime_pm_wakeref(i915);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put(struct drm_i915_private *i915)
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
pm_runtime_put_autosuspend(kdev);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+ cancel_intel_runtime_pm_wakeref(i915, wref);
+ intel_runtime_pm_put_unchecked(i915);
+}
+#endif
+
/**
* intel_runtime_pm_enable - enable runtime pm
* @i915: i915 device instance
reg->val = I915_READ8(entry->offset_ldw);
else
ret = -EINVAL;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return ret;
}
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
i915_request_add(rq);
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return err;
}
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static int pm_prepare(struct drm_i915_private *i915)
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static void pm_hibernate(struct drm_i915_private *i915)
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static void pm_resume(struct drm_i915_private *i915)
i915_gem_sanitize(i915);
i915_gem_resume(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
static int igt_gem_suspend(void *arg)
}
}
unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
count, RUNTIME_INFO(i915)->num_rings);
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
out_unlock:
if (end_live_test(&t))
err = -EIO;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
count = n;
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
if (!i915->gt.active_requests++) {
intel_runtime_pm_get(i915);
i915_gem_unpark(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
}
mutex_unlock(&i915->drm.struct_mutex);
cancel_delayed_work_sync(&i915->gt.retire_work);
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
err = make_obj_busy(obj);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
i915_reset(i915, ALL_ENGINES, NULL);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
force_reset(i915);
unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
return err;
}
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915);
+ intel_runtime_pm_put_unchecked(smoke.i915);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
- intel_runtime_pm_put(engine->i915);
+ intel_runtime_pm_put_unchecked(engine->i915);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
else
rq = i915_request_alloc(engine, ctx);
- intel_runtime_pm_put(engine->i915);
+ intel_runtime_pm_put_unchecked(engine->i915);
kernel_context_close(ctx);
intel_runtime_pm_get(i915);
err = reset(engine);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
if (want_spin) {
igt_spinner_end(&spin);
out:
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
err:
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put_unchecked(i915);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);