#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
+/* implement 64 bits REG operations via 32 bits interface */
+#define RREG64_UMC(reg) (RREG32(reg) | \
+ ((uint64_t)RREG32((reg) + 1) << 32))
+#define WREG64_UMC(reg, v) \
+ do { \
+ WREG32((reg), lower_32_bits(v)); \
+ WREG32((reg) + 1, upper_32_bits(v)); \
+ } while (0)
+
/*
* void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data,
* uint32_t umc_reg_offset, uint32_t channel_index)
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
- mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
/* check the MCUMC_STATUS */
- mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
/* skip error address process if -ENOMEM */
if (!err_data->err_addr) {
/* clear umc status */
- WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
return;
}
- mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
}
/* clear umc status */
- WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
}
static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,