kfd->pdev = pdev;
kfd->init_complete = false;
kfd->kfd2kgd = f2g;
+ atomic_set(&kfd->compute_profile, 0);
mutex_init(&kfd->doorbell_mutex);
memset(&kfd->doorbell_available_index, 0,
atomic_inc(&kfd->sram_ecc_flag);
}
+void kfd_inc_compute_active(struct kfd_dev *kfd)
+{
+ if (atomic_inc_return(&kfd->compute_profile) == 1)
+ amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
+}
+
+void kfd_dec_compute_active(struct kfd_dev *kfd)
+{
+ int count = atomic_dec_return(&kfd->compute_profile);
+
+ if (count == 0)
+ amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
+ WARN_ONCE(count < 0, "Compute profile ref. count error");
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
retval = dqm->asic_ops.update_qpd(dqm, qpd);
- if (dqm->processes_count++ == 0)
- amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
+ dqm->processes_count++;
+ kfd_inc_compute_active(dqm->dev);
dqm_unlock(dqm);
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
- if (--dqm->processes_count == 0)
- amdgpu_amdkfd_set_compute_idle(
- dqm->dev->kgd, true);
+ dqm->processes_count--;
+ kfd_dec_compute_active(dqm->dev);
goto out;
}
}
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
+ kfd_dec_compute_active(dqm->dev);
break;
}
}
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
+ kfd_dec_compute_active(dqm->dev);
break;
}
}
/* SRAM ECC flag */
atomic_t sram_ecc_flag;
+
+ /* Compute Profile ref. count */
+ atomic_t compute_profile;
};
enum kfd_mempool {
bool kfd_is_locked(void);
+/* Compute profile */
+void kfd_inc_compute_active(struct kfd_dev *dev);
+void kfd_dec_compute_active(struct kfd_dev *dev);
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)