r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return -EAGAIN;
+
+ /* query the reg access mode at the very beginning */
+ amdgpu_virt_init_reg_access_mode(adev);
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
return clk;
}
+void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->init_reg_access_mode)
+ virt->ops->init_reg_access_mode(adev);
+}
+
+bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
+ ret = true;
+
+ return ret;
+}
+
+bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
+ && !(amdgpu_sriov_runtime(adev)))
+ ret = true;
+
+ return ret;
+}
+
+bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
+ ret = true;
+
+ return ret;
+}
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
+/* According to the fw feature, some new reg access modes are supported */
+#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
+#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
+#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
+#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
+ void (*init_reg_access_mode)(struct amdgpu_device *adev);
};
/*
uint32_t gim_feature;
/* protect DPM events to GIM */
struct mutex dpm_mutex;
+ uint32_t reg_access_mode;
};
#define amdgpu_sriov_enabled(adev) \
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
+void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
+bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
+bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
+bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
+
#endif
#include "nbio/nbio_6_1_sh_mask.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
+#include "mp/mp_9_0_offset.h"
#include "soc15.h"
#include "vega10_ih.h"
#include "soc15_common.h"
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
+{
+ uint32_t rlc_fw_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
+ uint32_t sos_fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+
+ adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
+
+ if (rlc_fw_ver >= 0x5d)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
+
+ if (sos_fw_ver >= 0x80455)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
+
+ if (sos_fw_ver >= 0x8045b)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level,
+ .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
};