*/
uint64_t src;
+ /**
+ * @job: job to used for hw submission
+ */
+ struct amdgpu_job *job;
+
/**
* @ib: indirect buffer to fill with commands
*/
struct amdgpu_ib *ib;
+ /**
+ * @num_dw_left: number of dw left for the IB
+ */
+ unsigned int num_dw_left;
+
/**
* @func: Function which actually does the update
*/
uint64_t flags);
};
+struct amdgpu_vm_update_funcs {
+
+ int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
+ struct dma_fence *exclusive);
+ int (*update)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr, uint64_t flags);
+ int (*commit)(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence);
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
- bool use_cpu_for_update;
+ bool use_cpu_for_update;
+
+ /* Functions to use for VM table updates */
+ const struct amdgpu_vm_update_funcs *update_funcs;
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
+
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
--- /dev/null
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
+ struct dma_fence *exclusive)
+{
+ int r;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ if (unlikely(r))
+ return r;
+
+ /* Wait for any BO move to be completed */
+ if (exclusive) {
+ r = dma_fence_wait(exclusive, true);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_update - helper to update page tables via CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: kmap addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Write count number of PT/PD entries directly.
+ */
+static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i;
+ uint64_t value;
+
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ for (i = 0; i < count; i++) {
+ value = p->pages_addr ?
+ amdgpu_vm_map_gart(p->pages_addr, addr) :
+ addr;
+ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
+ addr += incr;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_commit - commit page table update to the HW
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: unused
+ *
+ * Make sure that the hardware sees the page table updates.
+ */
+static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ /* Flush HDP */
+ mb();
+ amdgpu_asic_flush_hdp(p->adev, NULL);
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
+ .prepare = amdgpu_vm_cpu_prepare,
+ .update = amdgpu_vm_cpu_update,
+ .commit = amdgpu_vm_cpu_commit
+};
--- /dev/null
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
+#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
+
+/**
+ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ void *owner, struct dma_fence *exclusive)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ int r;
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ owner, false);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ p->ib = &p->job->ibs[0];
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_commit - commit SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: resulting fence
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f;
+ int r;
+
+ ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+
+ WARN_ON(p->ib->length_dw == 0);
+ amdgpu_ring_pad_ib(ring, p->ib);
+ WARN_ON(p->ib->length_dw > p->num_dw_left);
+ r = amdgpu_job_submit(p->job, &p->vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, f, true);
+ if (fence)
+ swap(*fence, f);
+ dma_fence_put(f);
+ return 0;
+
+error:
+ amdgpu_job_free(p->job);
+ return r;
+}
+
+
+/**
+ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @count: number of page entries to copy
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ unsigned count)
+{
+ uint64_t src = p->ib->gpu_addr;
+
+ src += p->num_dw_left * 4;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
+
+ amdgpu_vm_copy_pte(p->adev, p->ib, pe, src, count);
+}
+
+/**
+ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ if (count < 3) {
+ amdgpu_vm_write_pte(p->adev, p->ib, pe, addr | flags,
+ count, incr);
+ } else {
+ amdgpu_vm_set_pte_pde(p->adev, p->ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_sdma_update - execute VM update
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Reserve space in the IB, setup mapping buffer on demand and write commands to
+ * the IB.
+ */
+static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i, ndw, nptes;
+ uint64_t *pte;
+ int r;
+
+ do {
+ ndw = p->num_dw_left;
+ ndw -= p->ib->length_dw;
+
+ if (ndw < 32) {
+ r = amdgpu_vm_sdma_commit(p, NULL);
+ if (r)
+ return r;
+
+ /* estimate how many dw we need */
+ ndw = 32;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ p->ib = &p->job->ibs[0];
+ }
+
+ if (!p->pages_addr) {
+ /* set page commands needed */
+ if (bo->shadow)
+ amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+ count, incr, flags);
+ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
+ incr, flags);
+ return 0;
+ }
+
+ /* copy commands needed */
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
+ (bo->shadow ? 2 : 1);
+
+ /* for padding */
+ ndw -= 7;
+
+ nptes = min(count, ndw / 2);
+
+ /* Put the PTEs at the end of the IB. */
+ p->num_dw_left -= nptes * 2;
+ pte = (uint64_t *)&(p->ib->ptr[p->num_dw_left]);
+ for (i = 0; i < nptes; ++i, addr += incr) {
+ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
+ pte[i] |= flags;
+ }
+
+ if (bo->shadow)
+ amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
+
+ pe += nptes * 8;
+ count -= nptes;
+ } while (count);
+
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
+ .prepare = amdgpu_vm_sdma_prepare,
+ .update = amdgpu_vm_sdma_update,
+ .commit = amdgpu_vm_sdma_commit
+};