return usage;
}
+/**
+ * amdgpu_vram_mgr_virt_start - update virtual start address
+ *
+ * @mem: ttm_mem_reg to update
+ * @node: just allocated node
+ *
+ * Calculate a virtual BO start address to easily check if everything is CPU
+ * accessible.
+ */
+static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
+ struct drm_mm_node *node)
+{
+ unsigned long start;
+
+ start = node->start + node->size;
+ if (start > mem->num_pages)
+ start -= mem->num_pages;
+ else
+ start = 0;
+ mem->start = max(mem->start, start);
+}
+
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
- for (i = 0; i < num_nodes; ++i) {
+ for (i = 0; pages_left >= pages_per_node; ++i) {
+ unsigned long pages = rounddown_pow_of_two(pages_left);
+
+ r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
+ pages_per_node, 0,
+ place->fpfn, lpfn,
+ mode);
+ if (unlikely(r))
+ break;
+
+ usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ pages_left -= pages;
+ }
+
+ for (; pages_left; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
- unsigned long start;
if (pages == pages_per_node)
alignment = pages_per_node;
usage += nodes[i].size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
-
- /* Calculate a virtual BO start address to easily check if
- * everything is CPU accessible.
- */
- start = nodes[i].start + nodes[i].size;
- if (start > mem->num_pages)
- start -= mem->num_pages;
- else
- start = 0;
- mem->start = max(mem->start, start);
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
pages_left -= pages;
}
spin_unlock(&mgr->lock);