int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
* submission. This can result in a debt that can stop buffer migrations
* temporarily.
*/
-static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
- u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
{
spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
+ u64 initial_bytes_moved;
size_t acc_size;
int r;
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
WARN_ON(!locked);
}
+
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
&amdgpu_ttm_bo_destroy);
+ amdgpu_cs_report_moved_bytes(adev,
+ atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+
if (unlikely(r != 0)) {
if (!resv)
ww_mutex_unlock(&bo->tbo.resv->lock);