*/
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
*/
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
{
- return drm_vma_node_offset_addr(&gbo->bo.vma_node);
+ return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
}
EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
gem = drm_gem_object_lookup(file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
- *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
+ *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
drm_gem_object_put_unlocked(gem);
return 0;
}
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
* struct elements we want use regardless.
*/
reservation_object_init(&bo->base._resv);
+ drm_vma_node_reset(&bo->base.vma_node);
}
atomic_inc(&bo->bdev->glob->bo_count);
- drm_vma_node_reset(&bo->vma_node);
/*
* For ttm_bo_type_device buffers, allocate
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
{
struct ttm_bo_device *bdev = bo->bdev;
- drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
- drm_vma_node_reset(&fbo->base.vma_node);
+ drm_vma_node_reset(&fbo->base.base.vma_node);
atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
- drm_vma_node_start(&bo->vma_node);
+ drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
ret = VM_FAULT_SIGBUS;
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->vma_node) +
+ page->index = drm_vma_node_start(&bo->base.vma_node) +
page_offset;
pfn = page_to_pfn(page);
}
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object, vma_node);
+ bo = container_of(node, struct ttm_buffer_object,
+ base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
return drm_gem_prime_mmap(obj, vma);
}
goto out_no_bo;
rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
if (ret != 0)
return -EINVAL;
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
vmw_bo_unreference(&out_buf);
return 0;
}
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
rep->creq.version = drm_vmw_gb_surface_v1;
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @moving: Fence set when BO is moving
- * @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
*/
struct dma_fence *moving;
-
- struct drm_vma_offset_node vma_node;
-
unsigned priority;
/**