{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
guc_action_flush_log_complete(guc);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
int ret;
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
goto out_unlock;
}
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
/*
* Before initiating the forceful flush, wait for any pending/ongoing
*/
flush_work(&log->relay.flush_work);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
guc_action_flush_log(guc);
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put(i915, wakeref);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ intel_wakeref_t wakeref;
bool status;
if (!HAS_HUC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return status;
}