/* KFD */
struct amdgpu_kfd_dev kfd;
+ /* UMC */
+ struct amdgpu_umc umc;
+
/* display related functionality */
struct amdgpu_display_manager dm;
const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_umc_funcs *umc_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__UMC:
- if (adev->umc_funcs->query_ras_error_count)
- adev->umc_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
void *ras_error_status);
};
+struct amdgpu_umc {
+ /* max error count in one ras query call */
+ uint32_t max_ras_err_cnt_per_query;
+ const struct amdgpu_umc_funcs *funcs;
+};
+
#endif
{
struct ras_err_data err_data = {0, 0};
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc_funcs->query_ras_error_count)
- adev->umc_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, &err_data);
amdgpu_ras_reset_gpu(adev, 0);
return AMDGPU_RAS_UE;
}
{
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->umc_funcs = &umc_v6_1_funcs;
+ adev->umc.max_ras_err_cnt_per_query =
+ UMC_V6_1_UMC_INSTANCE_NUM * UMC_V6_1_CHANNEL_INSTANCE_NUM;
+ adev->umc.funcs = &umc_v6_1_funcs;
break;
default:
break;