return gpe;
}
+/*
+ * NPU2 ATS
+ */
+/* Maximum possible number of ATSD MMIO registers per NPU */
+#define NV_NMMU_ATSD_REGS 8
+
+/* An NPU descriptor, valid for POWER9 only */
+struct npu {
+ int index;
+ __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
+ unsigned int mmio_atsd_count;
+
+ /* Bitmask for MMIO register usage */
+ unsigned long mmio_atsd_usage;
+
+ /* Do we need to explicitly flush the nest mmu? */
+ bool nmmu_flush;
+};
+
/* Maximum number of nvlinks per npu */
#define NV_MAX_LINKS 6
int i, j;
struct npu *npu;
struct pci_dev *npdev;
- struct pnv_phb *nphb;
for (i = 0; i <= max_npu2_index; i++) {
mmio_atsd_reg[i].reg = -1;
if (!npdev)
continue;
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
+ npu = pci_bus_to_host(npdev->bus)->npu;
mmio_atsd_reg[i].npu = npu;
mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
while (mmio_atsd_reg[i].reg < 0) {
struct pnv_phb *nphb;
struct npu *npu;
struct npu_context *npu_context;
+ struct pci_controller *hose;
/*
* At present we don't support GPUs connected to multiple NPUs and I'm
return ERR_PTR(-EINVAL);
}
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
+ hose = pci_bus_to_host(npdev->bus);
+ nphb = hose->private_data;
+ npu = hose->npu;
/*
* Setup the NPU context table for a particular GPU. These need to be
*/
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
- if (!nphb->npu.nmmu_flush) {
+ if (!npu->nmmu_flush) {
/*
* If we're not explicitly flushing ourselves we need to mark
* the thread for global flushes
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
struct device_node *nvlink_dn;
u32 nvlink_index;
+ struct pci_controller *hose;
if (WARN_ON(!npdev))
return;
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
+ hose = pci_bus_to_host(npdev->bus);
+ nphb = hose->private_data;
+ npu = hose->npu;
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
struct pci_dev *gpdev;
static int npu_index;
uint64_t rc = 0;
+ struct pci_controller *hose = phb->hose;
+ struct npu *npu;
+ int ret;
+
+ npu = kzalloc(sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
- phb->npu.nmmu_flush =
- of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
+ npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
for_each_child_of_node(phb->hose->dn, dn) {
gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
if (gpdev) {
}
}
- for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
+ for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
i, &mmio_atsd); i++)
- phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+ npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
- pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
- phb->npu.mmio_atsd_count = i;
- phb->npu.mmio_atsd_usage = 0;
+ pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
+ npu->mmio_atsd_count = i;
+ npu->mmio_atsd_usage = 0;
npu_index++;
- if (WARN_ON(npu_index >= NV_MAX_NPUS))
- return -ENOSPC;
+ if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
+ ret = -ENOSPC;
+ goto fail_exit;
+ }
max_npu2_index = npu_index;
- phb->npu.index = npu_index;
+ npu->index = npu_index;
+ hose->npu = npu;
return 0;
+
+fail_exit:
+ for (i = 0; i < npu->mmio_atsd_count; ++i)
+ iounmap(npu->mmio_atsd_regs[i]);
+
+ kfree(npu);
+
+ return ret;
}