u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
{
uint64_t addr = 0;
- if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
+ if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
addr = mm_node->start << PAGE_SHIFT;
addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
/* Map only what needs to be accessed. Map src to window 0 and
* dst to window 1
*/
- if (src->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
+ if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring,
from += src_page_offset;
}
- if (dst->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
+ if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
r = amdgpu_map_buffer(dst->bo, dst->mem,
PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring,
uint64_t flags;
int r;
- if (bo->mem.mem_type != TTM_PL_TT ||
- amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
+ if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return 0;
- /* allocate GTT space */
+ /* allocate GART space */
tmp = bo->mem;
tmp.mm_node = NULL;
placement.num_placement = 1;