mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
+
+ if (mc->xgmi.num_physical_nodes == 0) {
+ mc->fb_start = mc->vram_start;
+ mc->fb_end = mc->vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
/* VCE doesn't like it when BOs cross a 4GB segment, so align
* the GART base on a 4GB boundary as well.
*/
- size_bf = mc->vram_start;
- size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->vram_end + 1, four_gb);
+ size_bf = mc->fb_start;
+ size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb);
if (mc->gart_size > max(size_bf, size_af)) {
dev_warn(adev->dev, "limiting GART\n");
const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
u64 size_af, size_bf;
- if (mc->vram_start > mc->gart_start) {
- size_bf = (mc->vram_start & sixteen_gb_mask) -
+ if (mc->fb_start > mc->gart_start) {
+ size_bf = (mc->fb_start & sixteen_gb_mask) -
ALIGN(mc->gart_end + 1, sixteen_gb);
- size_af = mc->mc_mask + 1 - ALIGN(mc->vram_end + 1, sixteen_gb);
+ size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
} else {
- size_bf = mc->vram_start & sixteen_gb_mask;
+ size_bf = mc->fb_start & sixteen_gb_mask;
size_af = (mc->gart_start & sixteen_gb_mask) -
- ALIGN(mc->vram_end + 1, sixteen_gb);
+ ALIGN(mc->fb_end + 1, sixteen_gb);
}
if (size_bf > size_af) {
- mc->agp_start = mc->vram_start > mc->gart_start ?
+ mc->agp_start = mc->fb_start > mc->gart_start ?
mc->gart_end + 1 : 0;
mc->agp_size = size_bf;
} else {
- mc->agp_start = (mc->vram_start > mc->gart_start ?
- mc->vram_end : mc->gart_end) + 1,
+ mc->agp_start = (mc->fb_start > mc->gart_start ?
+ mc->fb_end : mc->gart_end) + 1,
mc->agp_size = size_af;
}
u64 base = 0;
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
+ /* add the xgmi offset of the physical node */
+ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
if (!amdgpu_sriov_vf(adev))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+
+ /* XXX: add the xgmi offset of the physical node? */
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
}
/**
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
+ u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
base <<= 24;
+ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+ top <<= 24;
+
+ adev->gmc.fb_start = base;
+ adev->gmc.fb_end = top;
+
return base;
}