r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return -EAGAIN;
-
- /* query the reg access mode at the very beginning */
- amdgpu_virt_init_reg_access_mode(adev);
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
return clk;
}
-
-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_virt *virt = &adev->virt;
-
- if (virt->ops && virt->ops->init_reg_access_mode)
- virt->ops->init_reg_access_mode(adev);
-}
-
-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
- ret = true;
-
- return ret;
-}
-
-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
- && !(amdgpu_sriov_runtime(adev)))
- ret = true;
-
- return ret;
-}
-
-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
-{
- bool ret = false;
- struct amdgpu_virt *virt = &adev->virt;
-
- if (amdgpu_sriov_vf(adev)
- && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
- ret = true;
-
- return ret;
-}
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
-/* According to the fw feature, some new reg access modes are supported */
-#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
-#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
-#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
-#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
-
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
- void (*init_reg_access_mode)(struct amdgpu_device *adev);
};
/*
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
-
-void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
-bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
-bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
-bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
-
#endif
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (!amdgpu_virt_support_skip_setting(adev)) {
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0,
- ARRAY_SIZE(golden_settings_gc_9_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0_vg10,
- ARRAY_SIZE(golden_settings_gc_9_0_vg10));
- }
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0,
+ ARRAY_SIZE(golden_settings_gc_9_0));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0_vg10,
+ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v9_0_init_golden_registers(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfx_v9_0_init_golden_registers(adev);
gfx_v9_0_constants_init(adev);
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
break;
/* fall through */
case CHIP_VEGA20:
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
/* Set default page address. */
{
uint32_t tmp;
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0);
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
- if (!amdgpu_virt_support_skip_setting(adev)) {
+ if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
{
u32 tmp;
- if (amdgpu_virt_support_skip_setting(adev))
+ if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
-static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
-{
- adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
-
- /* Enable L1 security reg access mode by defaul, as non-security VF
- * will no longer be supported.
- */
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
-
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
-
- adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
-}
-
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level,
- .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
};
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- if (!amdgpu_virt_support_skip_setting(adev)) {
- soc15_program_register_sequence(adev,
- golden_settings_sdma_4,
- ARRAY_SIZE(golden_settings_sdma_4));
- soc15_program_register_sequence(adev,
- golden_settings_sdma_vg10,
- ARRAY_SIZE(golden_settings_sdma_vg10));
- }
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4,
+ ARRAY_SIZE(golden_settings_sdma_4));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_vg10,
+ ARRAY_SIZE(golden_settings_sdma_vg10));
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
- sdma_v4_0_init_golden_registers(adev);
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev);
int i;
struct amdgpu_ring *ring;
- /* Two reasons to skip
- * 1, Host driver already programmed them
- * 2, To avoid registers program violations in SR-IOV
- */
- if (!amdgpu_virt_support_skip_setting(adev)) {
+ /* sdma/ih doorbell range are programed by hypervisor */
+ if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
adev->nbio_funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- }
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
+ }
}
static int soc15_common_hw_init(void *handle)
} \
} while (0)
+#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return;
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 0);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT;
WPTR_OVERFLOW_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");