1 From ab6a07d577dbd45d00a1738a6b5a28a6666be754 Mon Sep 17 00:00:00 2001
2 From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
3 Date: Tue, 25 Jun 2019 09:09:07 +0000
4 Subject: [PATCH] PCI: mobiveil: Refactor Mobiveil PCIe Host Bridge IP driver
6 Refactor the Mobiveil PCIe Host Bridge IP driver to make
7 it easier to add support for both RC and EP mode driver.
8 This patch moved the Mobiveil driver to an new directory
9 'drivers/pci/controller/mobiveil' and refactor it according
10 to the RC and EP abstraction.
12 Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
13 Reviewed-by: Minghuan Lian <Minghuan.Lian@nxp.com>
14 Reviewed-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
17 drivers/pci/controller/Kconfig | 11 +-
18 drivers/pci/controller/Makefile | 2 +-
19 drivers/pci/controller/mobiveil/Kconfig | 24 +
20 drivers/pci/controller/mobiveil/Makefile | 4 +
21 .../pci/controller/mobiveil/pcie-mobiveil-host.c | 611 +++++++++++++
22 .../pci/controller/mobiveil/pcie-mobiveil-plat.c | 59 ++
23 drivers/pci/controller/mobiveil/pcie-mobiveil.c | 227 +++++
24 drivers/pci/controller/mobiveil/pcie-mobiveil.h | 189 ++++
25 drivers/pci/controller/pcie-mobiveil.c | 964 ---------------------
26 10 files changed, 1117 insertions(+), 976 deletions(-)
27 create mode 100644 drivers/pci/controller/mobiveil/Kconfig
28 create mode 100644 drivers/pci/controller/mobiveil/Makefile
29 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
30 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
31 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil.c
32 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil.h
33 delete mode 100644 drivers/pci/controller/pcie-mobiveil.c
37 @@ -12502,7 +12502,7 @@ M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
38 L: linux-pci@vger.kernel.org
40 F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
41 -F: drivers/pci/controller/pcie-mobiveil.c
42 +F: drivers/pci/controller/mobiveil/pcie-mobiveil*
44 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
45 M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
46 --- a/drivers/pci/controller/Kconfig
47 +++ b/drivers/pci/controller/Kconfig
48 @@ -241,16 +241,6 @@ config PCIE_MEDIATEK
49 Say Y here if you want to enable PCIe controller support on
53 - bool "Mobiveil AXI PCIe controller"
54 - depends on ARCH_ZYNQMP || COMPILE_TEST
56 - depends on PCI_MSI_IRQ_DOMAIN
58 - Say Y here if you want to enable support for the Mobiveil AXI PCIe
59 - Soft IP. It has up to 8 outbound and inbound windows
60 - for address translation and it is a PCIe Gen4 IP.
62 config PCIE_TANGO_SMP8759
63 bool "Tango SMP8759 PCIe controller (DANGEROUS)"
64 depends on ARCH_TANGO && PCI_MSI && OF
65 @@ -289,4 +279,5 @@ config PCI_HYPERV_INTERFACE
66 have a common interface with the Hyper-V PCI frontend driver.
68 source "drivers/pci/controller/dwc/Kconfig"
69 +source "drivers/pci/controller/mobiveil/Kconfig"
71 --- a/drivers/pci/controller/Makefile
72 +++ b/drivers/pci/controller/Makefile
73 @@ -27,11 +27,11 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rock
74 obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
75 obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
76 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
77 -obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
78 obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
79 obj-$(CONFIG_VMD) += vmd.o
80 # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
85 # The following drivers are for devices that use the generic ACPI
87 +++ b/drivers/pci/controller/mobiveil/Kconfig
89 +# SPDX-License-Identifier: GPL-2.0
91 +menu "Mobiveil PCIe Core Support"
97 +config PCIE_MOBIVEIL_HOST
99 + depends on PCI_MSI_IRQ_DOMAIN
100 + select PCIE_MOBIVEIL
102 +config PCIE_MOBIVEIL_PLAT
103 + bool "Mobiveil AXI PCIe controller"
104 + depends on ARCH_ZYNQMP || COMPILE_TEST
106 + select PCIE_MOBIVEIL_HOST
108 + Say Y here if you want to enable support for the Mobiveil AXI PCIe
109 + Soft IP. It has up to 8 outbound and inbound windows
110 + for address translation and it is a PCIe Gen4 IP.
114 +++ b/drivers/pci/controller/mobiveil/Makefile
116 +# SPDX-License-Identifier: GPL-2.0
117 +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
118 +obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
119 +obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
121 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
123 +// SPDX-License-Identifier: GPL-2.0
125 + * PCIe host controller driver for Mobiveil PCIe Host controller
127 + * Copyright (c) 2018 Mobiveil Inc.
128 + * Copyright 2019 NXP
130 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
131 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
134 +#include <linux/init.h>
135 +#include <linux/interrupt.h>
136 +#include <linux/irq.h>
137 +#include <linux/irqchip/chained_irq.h>
138 +#include <linux/irqdomain.h>
139 +#include <linux/kernel.h>
140 +#include <linux/module.h>
141 +#include <linux/msi.h>
142 +#include <linux/of_address.h>
143 +#include <linux/of_irq.h>
144 +#include <linux/of_platform.h>
145 +#include <linux/of_pci.h>
146 +#include <linux/pci.h>
147 +#include <linux/platform_device.h>
148 +#include <linux/slab.h>
150 +#include "pcie-mobiveil.h"
152 +static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
154 + struct mobiveil_pcie *pcie = bus->sysdata;
156 + /* Only one device down on each root port */
157 + if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
161 + * Do not read more than one device on the bus directly
164 + if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
171 + * mobiveil_pcie_map_bus - routine to get the configuration base of either
172 + * root port or endpoint
174 +static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
175 + unsigned int devfn, int where)
177 + struct mobiveil_pcie *pcie = bus->sysdata;
180 + if (!mobiveil_pcie_valid_device(bus, devfn))
183 + /* RC config access */
184 + if (bus->number == pcie->rp.root_bus_nr)
185 + return pcie->csr_axi_slave_base + where;
188 + * EP config access (in Config/APIO space)
189 + * Program PEX Address base (31..16 bits) with appropriate value
190 + * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
191 + * Relies on pci_lock serialization
193 + value = bus->number << PAB_BUS_SHIFT |
194 + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
195 + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
197 + csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
199 + return pcie->rp.config_axi_slave_base + where;
202 +static struct pci_ops mobiveil_pcie_ops = {
203 + .map_bus = mobiveil_pcie_map_bus,
204 + .read = pci_generic_config_read,
205 + .write = pci_generic_config_write,
208 +static void mobiveil_pcie_isr(struct irq_desc *desc)
210 + struct irq_chip *chip = irq_desc_get_chip(desc);
211 + struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
212 + struct device *dev = &pcie->pdev->dev;
213 + struct mobiveil_msi *msi = &pcie->rp.msi;
214 + u32 msi_data, msi_addr_lo, msi_addr_hi;
215 + u32 intr_status, msi_status;
216 + unsigned long shifted_status;
217 + u32 bit, virq, val, mask;
220 + * The core provides a single interrupt for both INTx/MSI messages.
221 + * So we'll read both INTx and MSI status
224 + chained_irq_enter(chip, desc);
226 + /* read INTx status */
227 + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
228 + mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
229 + intr_status = val & mask;
232 + if (intr_status & PAB_INTP_INTX_MASK) {
233 + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
234 + shifted_status &= PAB_INTP_INTX_MASK;
235 + shifted_status >>= PAB_INTX_START;
237 + for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
238 + virq = irq_find_mapping(pcie->rp.intx_domain,
241 + generic_handle_irq(virq);
243 + dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
246 + /* clear interrupt handled */
247 + csr_writel(pcie, 1 << (PAB_INTX_START + bit),
248 + PAB_INTP_AMBA_MISC_STAT);
251 + shifted_status = csr_readl(pcie,
252 + PAB_INTP_AMBA_MISC_STAT);
253 + shifted_status &= PAB_INTP_INTX_MASK;
254 + shifted_status >>= PAB_INTX_START;
255 + } while (shifted_status != 0);
258 + /* read extra MSI status register */
259 + msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
261 + /* handle MSI interrupts */
262 + while (msi_status & 1) {
263 + msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
266 + * MSI_STATUS_OFFSET register gets updated to zero
267 + * once we pop not only the MSI data but also address
268 + * from MSI hardware FIFO. So keeping these following
271 + msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
272 + MSI_ADDR_L_OFFSET);
273 + msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
274 + MSI_ADDR_H_OFFSET);
275 + dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
276 + msi_data, msi_addr_hi, msi_addr_lo);
278 + virq = irq_find_mapping(msi->dev_domain, msi_data);
280 + generic_handle_irq(virq);
282 + msi_status = readl_relaxed(pcie->apb_csr_base +
283 + MSI_STATUS_OFFSET);
286 + /* Clear the interrupt status */
287 + csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
288 + chained_irq_exit(chip, desc);
291 +static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
293 + struct device *dev = &pcie->pdev->dev;
294 + struct platform_device *pdev = pcie->pdev;
295 + struct device_node *node = dev->of_node;
296 + struct resource *res;
298 + /* map config resource */
299 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
300 + "config_axi_slave");
301 + pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
302 + if (IS_ERR(pcie->rp.config_axi_slave_base))
303 + return PTR_ERR(pcie->rp.config_axi_slave_base);
304 + pcie->rp.ob_io_res = res;
306 + /* map csr resource */
307 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
309 + pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
310 + if (IS_ERR(pcie->csr_axi_slave_base))
311 + return PTR_ERR(pcie->csr_axi_slave_base);
312 + pcie->pcie_reg_base = res->start;
314 + /* read the number of windows requested */
315 + if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
316 + pcie->apio_wins = MAX_PIO_WINDOWS;
318 + if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
319 + pcie->ppio_wins = MAX_PIO_WINDOWS;
324 +static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
326 + phys_addr_t msg_addr = pcie->pcie_reg_base;
327 + struct mobiveil_msi *msi = &pcie->rp.msi;
329 + msi->num_of_vectors = PCI_NUM_MSI;
330 + msi->msi_pages_phys = (phys_addr_t)msg_addr;
332 + writel_relaxed(lower_32_bits(msg_addr),
333 + pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
334 + writel_relaxed(upper_32_bits(msg_addr),
335 + pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
336 + writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
337 + writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
340 +static int mobiveil_host_init(struct mobiveil_pcie *pcie)
342 + u32 value, pab_ctrl, type;
343 + struct resource_entry *win;
345 + /* setup bus numbers */
346 + value = csr_readl(pcie, PCI_PRIMARY_BUS);
347 + value &= 0xff000000;
348 + value |= 0x00ff0100;
349 + csr_writel(pcie, value, PCI_PRIMARY_BUS);
352 + * program Bus Master Enable Bit in Command Register in PAB Config
355 + value = csr_readl(pcie, PCI_COMMAND);
356 + value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
357 + csr_writel(pcie, value, PCI_COMMAND);
360 + * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
363 + pab_ctrl = csr_readl(pcie, PAB_CTRL);
364 + pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
365 + csr_writel(pcie, pab_ctrl, PAB_CTRL);
368 + * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
369 + * PAB_AXI_PIO_CTRL Register
371 + value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
372 + value |= APIO_EN_MASK;
373 + csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
375 + /* Enable PCIe PIO master */
376 + value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
377 + value |= 1 << PIO_ENABLE_SHIFT;
378 + csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
381 + * we'll program one outbound window for config reads and
382 + * another default inbound window for all the upstream traffic
383 + * rest of the outbound windows will be configured according to
384 + * the "ranges" field defined in device tree
387 + /* config outbound translation window */
388 + program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
389 + CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
391 + /* memory inbound translation window */
392 + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
394 + /* Get the I/O and memory ranges from DT */
395 + resource_list_for_each_entry(win, &pcie->resources) {
396 + if (resource_type(win->res) == IORESOURCE_MEM) {
397 + type = MEM_WINDOW_TYPE;
398 + } else if (resource_type(win->res) == IORESOURCE_IO) {
399 + type = IO_WINDOW_TYPE;
400 + } else if (resource_type(win->res) == IORESOURCE_BUS) {
401 + pcie->rp.root_bus_nr = win->res->start;
407 + /* configure outbound translation window */
408 + program_ob_windows(pcie, pcie->ob_wins_configured,
410 + win->res->start - win->offset,
411 + type, resource_size(win->res));
414 + /* fixup for PCIe class register */
415 + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
417 + value |= (PCI_CLASS_BRIDGE_PCI << 16);
418 + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
423 +static void mobiveil_mask_intx_irq(struct irq_data *data)
425 + struct irq_desc *desc = irq_to_desc(data->irq);
426 + struct mobiveil_pcie *pcie;
427 + unsigned long flags;
428 + u32 mask, shifted_val;
430 + pcie = irq_desc_get_chip_data(desc);
431 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
432 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
433 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
434 + shifted_val &= ~mask;
435 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
436 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
439 +static void mobiveil_unmask_intx_irq(struct irq_data *data)
441 + struct irq_desc *desc = irq_to_desc(data->irq);
442 + struct mobiveil_pcie *pcie;
443 + unsigned long flags;
444 + u32 shifted_val, mask;
446 + pcie = irq_desc_get_chip_data(desc);
447 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
448 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
449 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
450 + shifted_val |= mask;
451 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
452 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
455 +static struct irq_chip intx_irq_chip = {
456 + .name = "mobiveil_pcie:intx",
457 + .irq_enable = mobiveil_unmask_intx_irq,
458 + .irq_disable = mobiveil_mask_intx_irq,
459 + .irq_mask = mobiveil_mask_intx_irq,
460 + .irq_unmask = mobiveil_unmask_intx_irq,
463 +/* routine to setup the INTx related data */
464 +static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
465 + irq_hw_number_t hwirq)
467 + irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
468 + irq_set_chip_data(irq, domain->host_data);
473 +/* INTx domain operations structure */
474 +static const struct irq_domain_ops intx_domain_ops = {
475 + .map = mobiveil_pcie_intx_map,
478 +static struct irq_chip mobiveil_msi_irq_chip = {
479 + .name = "Mobiveil PCIe MSI",
480 + .irq_mask = pci_msi_mask_irq,
481 + .irq_unmask = pci_msi_unmask_irq,
484 +static struct msi_domain_info mobiveil_msi_domain_info = {
485 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
486 + MSI_FLAG_PCI_MSIX),
487 + .chip = &mobiveil_msi_irq_chip,
490 +static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
492 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
493 + phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
495 + msg->address_lo = lower_32_bits(addr);
496 + msg->address_hi = upper_32_bits(addr);
497 + msg->data = data->hwirq;
499 + dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
500 + (int)data->hwirq, msg->address_hi, msg->address_lo);
503 +static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
504 + const struct cpumask *mask, bool force)
509 +static struct irq_chip mobiveil_msi_bottom_irq_chip = {
510 + .name = "Mobiveil MSI",
511 + .irq_compose_msi_msg = mobiveil_compose_msi_msg,
512 + .irq_set_affinity = mobiveil_msi_set_affinity,
515 +static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
517 + unsigned int nr_irqs, void *args)
519 + struct mobiveil_pcie *pcie = domain->host_data;
520 + struct mobiveil_msi *msi = &pcie->rp.msi;
523 + WARN_ON(nr_irqs != 1);
524 + mutex_lock(&msi->lock);
526 + bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
527 + if (bit >= msi->num_of_vectors) {
528 + mutex_unlock(&msi->lock);
532 + set_bit(bit, msi->msi_irq_in_use);
534 + mutex_unlock(&msi->lock);
536 + irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
537 + domain->host_data, handle_level_irq, NULL, NULL);
541 +static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
543 + unsigned int nr_irqs)
545 + struct irq_data *d = irq_domain_get_irq_data(domain, virq);
546 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
547 + struct mobiveil_msi *msi = &pcie->rp.msi;
549 + mutex_lock(&msi->lock);
551 + if (!test_bit(d->hwirq, msi->msi_irq_in_use))
552 + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
555 + __clear_bit(d->hwirq, msi->msi_irq_in_use);
557 + mutex_unlock(&msi->lock);
559 +static const struct irq_domain_ops msi_domain_ops = {
560 + .alloc = mobiveil_irq_msi_domain_alloc,
561 + .free = mobiveil_irq_msi_domain_free,
564 +static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
566 + struct device *dev = &pcie->pdev->dev;
567 + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
568 + struct mobiveil_msi *msi = &pcie->rp.msi;
570 + mutex_init(&msi->lock);
571 + msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
572 + &msi_domain_ops, pcie);
573 + if (!msi->dev_domain) {
574 + dev_err(dev, "failed to create IRQ domain\n");
578 + msi->msi_domain = pci_msi_create_irq_domain(fwnode,
579 + &mobiveil_msi_domain_info,
581 + if (!msi->msi_domain) {
582 + dev_err(dev, "failed to create MSI domain\n");
583 + irq_domain_remove(msi->dev_domain);
590 +static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
592 + struct device *dev = &pcie->pdev->dev;
593 + struct device_node *node = dev->of_node;
597 + pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
598 + &intx_domain_ops, pcie);
600 + if (!pcie->rp.intx_domain) {
601 + dev_err(dev, "Failed to get a INTx IRQ domain\n");
605 + raw_spin_lock_init(&pcie->rp.intx_mask_lock);
608 + ret = mobiveil_allocate_msi_domains(pcie);
615 +static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
617 + struct device *dev = &pcie->pdev->dev;
618 + struct resource *res;
621 + if (pcie->rp.ops->interrupt_init)
622 + return pcie->rp.ops->interrupt_init(pcie);
624 + /* map MSI config resource */
625 + res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
627 + pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
628 + if (IS_ERR(pcie->apb_csr_base))
629 + return PTR_ERR(pcie->apb_csr_base);
631 + /* setup MSI hardware registers */
632 + mobiveil_pcie_enable_msi(pcie);
634 + pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
635 + if (pcie->rp.irq <= 0) {
636 + dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
640 + /* initialize the IRQ domains */
641 + ret = mobiveil_pcie_init_irq_domain(pcie);
643 + dev_err(dev, "Failed creating IRQ Domain\n");
647 + irq_set_chained_handler_and_data(pcie->rp.irq,
648 + mobiveil_pcie_isr, pcie);
650 + /* Enable interrupts */
651 + csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
652 + PAB_INTP_AMBA_MISC_ENB);
657 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
659 + struct pci_bus *bus;
660 + struct pci_bus *child;
661 + struct pci_host_bridge *bridge = pcie->bridge;
662 + struct device *dev = &pcie->pdev->dev;
663 + resource_size_t iobase;
666 + INIT_LIST_HEAD(&pcie->resources);
668 + ret = mobiveil_pcie_parse_dt(pcie);
670 + dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
674 + /* parse the host bridge base addresses from the device tree file */
675 + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
676 + &pcie->resources, &iobase);
678 + dev_err(dev, "Getting bridge resources failed\n");
683 + * configure all inbound and outbound windows and prepare the RC for
686 + ret = mobiveil_host_init(pcie);
688 + dev_err(dev, "Failed to initialize host\n");
692 + ret = mobiveil_pcie_interrupt_init(pcie);
694 + dev_err(dev, "Interrupt init failed\n");
698 + ret = devm_request_pci_bus_resources(dev, &pcie->resources);
702 + /* Initialize bridge */
703 + list_splice_init(&pcie->resources, &bridge->windows);
704 + bridge->dev.parent = dev;
705 + bridge->sysdata = pcie;
706 + bridge->busnr = pcie->rp.root_bus_nr;
707 + bridge->ops = &mobiveil_pcie_ops;
708 + bridge->map_irq = of_irq_parse_and_map_pci;
709 + bridge->swizzle_irq = pci_common_swizzle;
711 + ret = mobiveil_bringup_link(pcie);
713 + dev_info(dev, "link bring-up failed\n");
717 + /* setup the kernel resources for the newly added PCIe root bus */
718 + ret = pci_scan_root_bus_bridge(bridge);
724 + pci_assign_unassigned_bus_resources(bus);
725 + list_for_each_entry(child, &bus->children, node)
726 + pcie_bus_configure_settings(child);
727 + pci_bus_add_devices(bus);
731 + pci_free_resource_list(&pcie->resources);
735 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
737 +// SPDX-License-Identifier: GPL-2.0
739 + * PCIe host controller driver for Mobiveil PCIe Host controller
741 + * Copyright (c) 2018 Mobiveil Inc.
742 + * Copyright 2019 NXP
744 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
745 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
748 +#include <linux/init.h>
749 +#include <linux/kernel.h>
750 +#include <linux/module.h>
751 +#include <linux/of_pci.h>
752 +#include <linux/pci.h>
753 +#include <linux/platform_device.h>
754 +#include <linux/slab.h>
756 +#include "pcie-mobiveil.h"
758 +static int mobiveil_pcie_probe(struct platform_device *pdev)
760 + struct mobiveil_pcie *pcie;
761 + struct pci_host_bridge *bridge;
762 + struct device *dev = &pdev->dev;
764 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
768 + pcie = pci_host_bridge_priv(bridge);
772 + return mobiveil_pcie_host_probe(pcie);
775 +static const struct of_device_id mobiveil_pcie_of_match[] = {
776 + {.compatible = "mbvl,gpex40-pcie",},
780 +MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
782 +static struct platform_driver mobiveil_pcie_driver = {
783 + .probe = mobiveil_pcie_probe,
785 + .name = "mobiveil-pcie",
786 + .of_match_table = mobiveil_pcie_of_match,
787 + .suppress_bind_attrs = true,
791 +builtin_platform_driver(mobiveil_pcie_driver);
793 +MODULE_LICENSE("GPL v2");
794 +MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
795 +MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
797 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
799 +// SPDX-License-Identifier: GPL-2.0
801 + * PCIe host controller driver for Mobiveil PCIe Host controller
803 + * Copyright (c) 2018 Mobiveil Inc.
804 + * Copyright 2019 NXP
806 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
807 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
810 +#include <linux/delay.h>
811 +#include <linux/init.h>
812 +#include <linux/kernel.h>
813 +#include <linux/pci.h>
814 +#include <linux/platform_device.h>
816 +#include "pcie-mobiveil.h"
819 + * mobiveil_pcie_sel_page - routine to access paged register
821 + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
822 + * for this scheme to work extracted higher 6 bits of the offset will be
823 + * written to pg_sel field of PAB_CTRL register and rest of the lower 10
824 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
826 +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
830 + val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
831 + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
832 + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
834 + writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
837 +static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
839 + if (off < PAGED_ADDR_BNDRY) {
840 + /* For directly accessed registers, clear the pg_sel field */
841 + mobiveil_pcie_sel_page(pcie, 0);
842 + return pcie->csr_axi_slave_base + off;
845 + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
846 + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
849 +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
851 + if ((uintptr_t)addr & (size - 1)) {
853 + return PCIBIOS_BAD_REGISTER_NUMBER;
858 + *val = readl(addr);
861 + *val = readw(addr);
864 + *val = readb(addr);
868 + return PCIBIOS_BAD_REGISTER_NUMBER;
871 + return PCIBIOS_SUCCESSFUL;
874 +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
876 + if ((uintptr_t)addr & (size - 1))
877 + return PCIBIOS_BAD_REGISTER_NUMBER;
890 + return PCIBIOS_BAD_REGISTER_NUMBER;
893 + return PCIBIOS_SUCCESSFUL;
896 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
902 + addr = mobiveil_pcie_comp_addr(pcie, off);
904 + ret = mobiveil_pcie_read(addr, size, &val);
906 + dev_err(&pcie->pdev->dev, "read CSR address failed\n");
911 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
916 + addr = mobiveil_pcie_comp_addr(pcie, off);
918 + ret = mobiveil_pcie_write(addr, size, val);
920 + dev_err(&pcie->pdev->dev, "write CSR address failed\n");
923 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
925 + if (pcie->ops->link_up)
926 + return pcie->ops->link_up(pcie);
928 + return (csr_readl(pcie, LTSSM_STATUS) &
929 + LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
932 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
933 + u64 pci_addr, u32 type, u64 size)
936 + u64 size64 = ~(size - 1);
938 + if (win_num >= pcie->ppio_wins) {
939 + dev_err(&pcie->pdev->dev,
940 + "ERROR: max inbound windows reached !\n");
944 + value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
945 + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
946 + value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
947 + (lower_32_bits(size64) & WIN_SIZE_MASK);
948 + csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
950 + csr_writel(pcie, upper_32_bits(size64),
951 + PAB_EXT_PEX_AMAP_SIZEN(win_num));
953 + csr_writel(pcie, lower_32_bits(cpu_addr),
954 + PAB_PEX_AMAP_AXI_WIN(win_num));
955 + csr_writel(pcie, upper_32_bits(cpu_addr),
956 + PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
958 + csr_writel(pcie, lower_32_bits(pci_addr),
959 + PAB_PEX_AMAP_PEX_WIN_L(win_num));
960 + csr_writel(pcie, upper_32_bits(pci_addr),
961 + PAB_PEX_AMAP_PEX_WIN_H(win_num));
963 + pcie->ib_wins_configured++;
967 + * routine to program the outbound windows
969 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
970 + u64 pci_addr, u32 type, u64 size)
973 + u64 size64 = ~(size - 1);
975 + if (win_num >= pcie->apio_wins) {
976 + dev_err(&pcie->pdev->dev,
977 + "ERROR: max outbound windows reached !\n");
982 + * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
983 + * to 4 KB in PAB_AXI_AMAP_CTRL register
985 + value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
986 + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
987 + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
988 + (lower_32_bits(size64) & WIN_SIZE_MASK);
989 + csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
991 + csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
994 + * program AXI window base with appropriate value in
995 + * PAB_AXI_AMAP_AXI_WIN0 register
997 + csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
998 + PAB_AXI_AMAP_AXI_WIN(win_num));
999 + csr_writel(pcie, upper_32_bits(cpu_addr),
1000 + PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1002 + csr_writel(pcie, lower_32_bits(pci_addr),
1003 + PAB_AXI_AMAP_PEX_WIN_L(win_num));
1004 + csr_writel(pcie, upper_32_bits(pci_addr),
1005 + PAB_AXI_AMAP_PEX_WIN_H(win_num));
1007 + pcie->ob_wins_configured++;
1010 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
1014 + /* check if the link is up or not */
1015 + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1016 + if (mobiveil_pcie_link_up(pcie))
1019 + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
1022 + dev_err(&pcie->pdev->dev, "link never came up\n");
1024 + return -ETIMEDOUT;
1027 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
1029 +/* SPDX-License-Identifier: GPL-2.0 */
1031 + * PCIe host controller driver for Mobiveil PCIe Host controller
1033 + * Copyright (c) 2018 Mobiveil Inc.
1034 + * Copyright 2019 NXP
1036 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
1037 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
1040 +#ifndef _PCIE_MOBIVEIL_H
1041 +#define _PCIE_MOBIVEIL_H
1043 +#include <linux/pci.h>
1044 +#include <linux/irq.h>
1045 +#include <linux/msi.h>
1046 +#include "../../pci.h"
1048 +/* register offsets and bit positions */
1051 + * translation tables are grouped into windows, each window registers are
1052 + * grouped into blocks of 4 or 16 registers each
1054 +#define PAB_REG_BLOCK_SIZE 16
1055 +#define PAB_EXT_REG_BLOCK_SIZE 4
1057 +#define PAB_REG_ADDR(offset, win) \
1058 + (offset + (win * PAB_REG_BLOCK_SIZE))
1059 +#define PAB_EXT_REG_ADDR(offset, win) \
1060 + (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
1062 +#define LTSSM_STATUS 0x0404
1063 +#define LTSSM_STATUS_L0_MASK 0x3f
1064 +#define LTSSM_STATUS_L0 0x2d
1066 +#define PAB_CTRL 0x0808
1067 +#define AMBA_PIO_ENABLE_SHIFT 0
1068 +#define PEX_PIO_ENABLE_SHIFT 1
1069 +#define PAGE_SEL_SHIFT 13
1070 +#define PAGE_SEL_MASK 0x3f
1071 +#define PAGE_LO_MASK 0x3ff
1072 +#define PAGE_SEL_OFFSET_SHIFT 10
1074 +#define PAB_AXI_PIO_CTRL 0x0840
1075 +#define APIO_EN_MASK 0xf
1077 +#define PAB_PEX_PIO_CTRL 0x08c0
1078 +#define PIO_ENABLE_SHIFT 0
1080 +#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
1081 +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
1082 +#define PAB_INTP_INTX_MASK 0x01e0
1083 +#define PAB_INTP_MSI_MASK 0x8
1085 +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
1086 +#define WIN_ENABLE_SHIFT 0
1087 +#define WIN_TYPE_SHIFT 1
1088 +#define WIN_TYPE_MASK 0x3
1089 +#define WIN_SIZE_MASK 0xfffffc00
1091 +#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
1093 +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
1094 +#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
1095 +#define AXI_WINDOW_ALIGN_MASK 3
1097 +#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
1098 +#define PAB_BUS_SHIFT 24
1099 +#define PAB_DEVICE_SHIFT 19
1100 +#define PAB_FUNCTION_SHIFT 16
1102 +#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
1103 +#define PAB_INTP_AXI_PIO_CLASS 0x474
1105 +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
1106 +#define AMAP_CTRL_EN_SHIFT 0
1107 +#define AMAP_CTRL_TYPE_SHIFT 1
1108 +#define AMAP_CTRL_TYPE_MASK 3
1110 +#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
1111 +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
1112 +#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
1113 +#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
1114 +#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
1116 +/* starting offset of INTX bits in status register */
1117 +#define PAB_INTX_START 5
1119 +/* supported number of MSI interrupts */
1120 +#define PCI_NUM_MSI 16
1122 +/* MSI registers */
1123 +#define MSI_BASE_LO_OFFSET 0x04
1124 +#define MSI_BASE_HI_OFFSET 0x08
1125 +#define MSI_SIZE_OFFSET 0x0c
1126 +#define MSI_ENABLE_OFFSET 0x14
1127 +#define MSI_STATUS_OFFSET 0x18
1128 +#define MSI_DATA_OFFSET 0x20
1129 +#define MSI_ADDR_L_OFFSET 0x24
1130 +#define MSI_ADDR_H_OFFSET 0x28
1132 +/* outbound and inbound window definitions */
1133 +#define WIN_NUM_0 0
1134 +#define WIN_NUM_1 1
1135 +#define CFG_WINDOW_TYPE 0
1136 +#define IO_WINDOW_TYPE 1
1137 +#define MEM_WINDOW_TYPE 2
1138 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
1139 +#define MAX_PIO_WINDOWS 8
1141 +/* Parameters for the waiting for link up routine */
1142 +#define LINK_WAIT_MAX_RETRIES 10
1143 +#define LINK_WAIT_MIN 90000
1144 +#define LINK_WAIT_MAX 100000
1146 +#define PAGED_ADDR_BNDRY 0xc00
1147 +#define OFFSET_TO_PAGE_ADDR(off) \
1148 + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
1149 +#define OFFSET_TO_PAGE_IDX(off) \
1150 + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
1152 +struct mobiveil_pcie;
1154 +struct mobiveil_msi { /* MSI information */
1155 + struct mutex lock; /* protect bitmap variable */
1156 + struct irq_domain *msi_domain;
1157 + struct irq_domain *dev_domain;
1158 + phys_addr_t msi_pages_phys;
1159 + int num_of_vectors;
1160 + DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
1163 +struct mobiveil_rp_ops {
1164 + int (*interrupt_init)(struct mobiveil_pcie *pcie);
1169 + void __iomem *config_axi_slave_base; /* endpoint config base */
1170 + struct resource *ob_io_res;
1171 + struct mobiveil_rp_ops *ops;
1173 + raw_spinlock_t intx_mask_lock;
1174 + struct irq_domain *intx_domain;
1175 + struct mobiveil_msi msi;
1178 +struct mobiveil_pab_ops {
1179 + int (*link_up)(struct mobiveil_pcie *pcie);
1182 +struct mobiveil_pcie {
1183 + struct platform_device *pdev;
1184 + struct list_head resources;
1185 + void __iomem *csr_axi_slave_base; /* PAB registers base */
1186 + phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
1187 + void __iomem *apb_csr_base; /* MSI register base */
1190 + u32 ob_wins_configured; /* configured outbound windows */
1191 + u32 ib_wins_configured; /* configured inbound windows */
1192 + const struct mobiveil_pab_ops *ops;
1193 + struct root_port rp;
1194 + struct pci_host_bridge *bridge;
1197 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
1198 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
1199 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
1200 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
1201 + u64 pci_addr, u32 type, u64 size);
1202 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
1203 + u64 pci_addr, u32 type, u64 size);
1204 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
1205 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
1207 +static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
1209 + return csr_read(pcie, off, 0x4);
1212 +static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1214 + csr_write(pcie, val, off, 0x4);
1217 +#endif /* _PCIE_MOBIVEIL_H */
1218 --- a/drivers/pci/controller/pcie-mobiveil.c
1221 -// SPDX-License-Identifier: GPL-2.0
1223 - * PCIe host controller driver for Mobiveil PCIe Host controller
1225 - * Copyright (c) 2018 Mobiveil Inc.
1226 - * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
1229 -#include <linux/delay.h>
1230 -#include <linux/init.h>
1231 -#include <linux/interrupt.h>
1232 -#include <linux/irq.h>
1233 -#include <linux/irqchip/chained_irq.h>
1234 -#include <linux/irqdomain.h>
1235 -#include <linux/kernel.h>
1236 -#include <linux/module.h>
1237 -#include <linux/msi.h>
1238 -#include <linux/of_address.h>
1239 -#include <linux/of_irq.h>
1240 -#include <linux/of_platform.h>
1241 -#include <linux/of_pci.h>
1242 -#include <linux/pci.h>
1243 -#include <linux/platform_device.h>
1244 -#include <linux/slab.h>
1246 -#include "../pci.h"
1248 -/* register offsets and bit positions */
1251 - * translation tables are grouped into windows, each window registers are
1252 - * grouped into blocks of 4 or 16 registers each
1254 -#define PAB_REG_BLOCK_SIZE 16
1255 -#define PAB_EXT_REG_BLOCK_SIZE 4
1257 -#define PAB_REG_ADDR(offset, win) \
1258 - (offset + (win * PAB_REG_BLOCK_SIZE))
1259 -#define PAB_EXT_REG_ADDR(offset, win) \
1260 - (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
1262 -#define LTSSM_STATUS 0x0404
1263 -#define LTSSM_STATUS_L0_MASK 0x3f
1264 -#define LTSSM_STATUS_L0 0x2d
1266 -#define PAB_CTRL 0x0808
1267 -#define AMBA_PIO_ENABLE_SHIFT 0
1268 -#define PEX_PIO_ENABLE_SHIFT 1
1269 -#define PAGE_SEL_SHIFT 13
1270 -#define PAGE_SEL_MASK 0x3f
1271 -#define PAGE_LO_MASK 0x3ff
1272 -#define PAGE_SEL_OFFSET_SHIFT 10
1274 -#define PAB_AXI_PIO_CTRL 0x0840
1275 -#define APIO_EN_MASK 0xf
1277 -#define PAB_PEX_PIO_CTRL 0x08c0
1278 -#define PIO_ENABLE_SHIFT 0
1280 -#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
1281 -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
1282 -#define PAB_INTP_INTX_MASK 0x01e0
1283 -#define PAB_INTP_MSI_MASK 0x8
1285 -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
1286 -#define WIN_ENABLE_SHIFT 0
1287 -#define WIN_TYPE_SHIFT 1
1288 -#define WIN_TYPE_MASK 0x3
1289 -#define WIN_SIZE_MASK 0xfffffc00
1291 -#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
1293 -#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
1294 -#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
1295 -#define AXI_WINDOW_ALIGN_MASK 3
1297 -#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
1298 -#define PAB_BUS_SHIFT 24
1299 -#define PAB_DEVICE_SHIFT 19
1300 -#define PAB_FUNCTION_SHIFT 16
1302 -#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
1303 -#define PAB_INTP_AXI_PIO_CLASS 0x474
1305 -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
1306 -#define AMAP_CTRL_EN_SHIFT 0
1307 -#define AMAP_CTRL_TYPE_SHIFT 1
1308 -#define AMAP_CTRL_TYPE_MASK 3
1310 -#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
1311 -#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
1312 -#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
1313 -#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
1314 -#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
1316 -/* starting offset of INTX bits in status register */
1317 -#define PAB_INTX_START 5
1319 -/* supported number of MSI interrupts */
1320 -#define PCI_NUM_MSI 16
1322 -/* MSI registers */
1323 -#define MSI_BASE_LO_OFFSET 0x04
1324 -#define MSI_BASE_HI_OFFSET 0x08
1325 -#define MSI_SIZE_OFFSET 0x0c
1326 -#define MSI_ENABLE_OFFSET 0x14
1327 -#define MSI_STATUS_OFFSET 0x18
1328 -#define MSI_DATA_OFFSET 0x20
1329 -#define MSI_ADDR_L_OFFSET 0x24
1330 -#define MSI_ADDR_H_OFFSET 0x28
1332 -/* outbound and inbound window definitions */
1333 -#define WIN_NUM_0 0
1334 -#define WIN_NUM_1 1
1335 -#define CFG_WINDOW_TYPE 0
1336 -#define IO_WINDOW_TYPE 1
1337 -#define MEM_WINDOW_TYPE 2
1338 -#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
1339 -#define MAX_PIO_WINDOWS 8
1341 -/* Parameters for the waiting for link up routine */
1342 -#define LINK_WAIT_MAX_RETRIES 10
1343 -#define LINK_WAIT_MIN 90000
1344 -#define LINK_WAIT_MAX 100000
1346 -#define PAGED_ADDR_BNDRY 0xc00
1347 -#define OFFSET_TO_PAGE_ADDR(off) \
1348 - ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
1349 -#define OFFSET_TO_PAGE_IDX(off) \
1350 - ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
1352 -struct mobiveil_msi { /* MSI information */
1353 - struct mutex lock; /* protect bitmap variable */
1354 - struct irq_domain *msi_domain;
1355 - struct irq_domain *dev_domain;
1356 - phys_addr_t msi_pages_phys;
1357 - int num_of_vectors;
1358 - DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
1361 -struct mobiveil_pcie {
1362 - struct platform_device *pdev;
1363 - struct list_head resources;
1364 - void __iomem *config_axi_slave_base; /* endpoint config base */
1365 - void __iomem *csr_axi_slave_base; /* root port config base */
1366 - void __iomem *apb_csr_base; /* MSI register base */
1367 - phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
1368 - struct irq_domain *intx_domain;
1369 - raw_spinlock_t intx_mask_lock;
1373 - int ob_wins_configured; /* configured outbound windows */
1374 - int ib_wins_configured; /* configured inbound windows */
1375 - struct resource *ob_io_res;
1377 - struct mobiveil_msi msi;
1381 - * mobiveil_pcie_sel_page - routine to access paged register
1383 - * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
1384 - * for this scheme to work extracted higher 6 bits of the offset will be
1385 - * written to pg_sel field of PAB_CTRL register and rest of the lower 10
1386 - * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
1388 -static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
1392 - val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
1393 - val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
1394 - val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
1396 - writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
1399 -static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
1401 - if (off < PAGED_ADDR_BNDRY) {
1402 - /* For directly accessed registers, clear the pg_sel field */
1403 - mobiveil_pcie_sel_page(pcie, 0);
1404 - return pcie->csr_axi_slave_base + off;
1407 - mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
1408 - return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
1411 -static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
1413 - if ((uintptr_t)addr & (size - 1)) {
1415 - return PCIBIOS_BAD_REGISTER_NUMBER;
1420 - *val = readl(addr);
1423 - *val = readw(addr);
1426 - *val = readb(addr);
1430 - return PCIBIOS_BAD_REGISTER_NUMBER;
1433 - return PCIBIOS_SUCCESSFUL;
1436 -static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
1438 - if ((uintptr_t)addr & (size - 1))
1439 - return PCIBIOS_BAD_REGISTER_NUMBER;
1443 - writel(val, addr);
1446 - writew(val, addr);
1449 - writeb(val, addr);
1452 - return PCIBIOS_BAD_REGISTER_NUMBER;
1455 - return PCIBIOS_SUCCESSFUL;
1458 -static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
1464 - addr = mobiveil_pcie_comp_addr(pcie, off);
1466 - ret = mobiveil_pcie_read(addr, size, &val);
1468 - dev_err(&pcie->pdev->dev, "read CSR address failed\n");
1473 -static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
1478 - addr = mobiveil_pcie_comp_addr(pcie, off);
1480 - ret = mobiveil_pcie_write(addr, size, val);
1482 - dev_err(&pcie->pdev->dev, "write CSR address failed\n");
1485 -static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
1487 - return csr_read(pcie, off, 0x4);
1490 -static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1492 - csr_write(pcie, val, off, 0x4);
1495 -static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
1497 - return (csr_readl(pcie, LTSSM_STATUS) &
1498 - LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
1501 -static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
1503 - struct mobiveil_pcie *pcie = bus->sysdata;
1505 - /* Only one device down on each root port */
1506 - if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
1510 - * Do not read more than one device on the bus directly
1513 - if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
1520 - * mobiveil_pcie_map_bus - routine to get the configuration base of either
1521 - * root port or endpoint
1523 -static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
1524 - unsigned int devfn, int where)
1526 - struct mobiveil_pcie *pcie = bus->sysdata;
1529 - if (!mobiveil_pcie_valid_device(bus, devfn))
1532 - /* RC config access */
1533 - if (bus->number == pcie->root_bus_nr)
1534 - return pcie->csr_axi_slave_base + where;
1537 - * EP config access (in Config/APIO space)
1538 - * Program PEX Address base (31..16 bits) with appropriate value
1539 - * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
1540 - * Relies on pci_lock serialization
1542 - value = bus->number << PAB_BUS_SHIFT |
1543 - PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
1544 - PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
1546 - csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
1548 - return pcie->config_axi_slave_base + where;
1551 -static struct pci_ops mobiveil_pcie_ops = {
1552 - .map_bus = mobiveil_pcie_map_bus,
1553 - .read = pci_generic_config_read,
1554 - .write = pci_generic_config_write,
1557 -static void mobiveil_pcie_isr(struct irq_desc *desc)
1559 - struct irq_chip *chip = irq_desc_get_chip(desc);
1560 - struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
1561 - struct device *dev = &pcie->pdev->dev;
1562 - struct mobiveil_msi *msi = &pcie->msi;
1563 - u32 msi_data, msi_addr_lo, msi_addr_hi;
1564 - u32 intr_status, msi_status;
1565 - unsigned long shifted_status;
1566 - u32 bit, virq, val, mask;
1569 - * The core provides a single interrupt for both INTx/MSI messages.
1570 - * So we'll read both INTx and MSI status
1573 - chained_irq_enter(chip, desc);
1575 - /* read INTx status */
1576 - val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1577 - mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1578 - intr_status = val & mask;
1581 - if (intr_status & PAB_INTP_INTX_MASK) {
1582 - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1583 - shifted_status &= PAB_INTP_INTX_MASK;
1584 - shifted_status >>= PAB_INTX_START;
1586 - for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
1587 - virq = irq_find_mapping(pcie->intx_domain,
1590 - generic_handle_irq(virq);
1592 - dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
1595 - /* clear interrupt handled */
1596 - csr_writel(pcie, 1 << (PAB_INTX_START + bit),
1597 - PAB_INTP_AMBA_MISC_STAT);
1600 - shifted_status = csr_readl(pcie,
1601 - PAB_INTP_AMBA_MISC_STAT);
1602 - shifted_status &= PAB_INTP_INTX_MASK;
1603 - shifted_status >>= PAB_INTX_START;
1604 - } while (shifted_status != 0);
1607 - /* read extra MSI status register */
1608 - msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
1610 - /* handle MSI interrupts */
1611 - while (msi_status & 1) {
1612 - msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
1615 - * MSI_STATUS_OFFSET register gets updated to zero
1616 - * once we pop not only the MSI data but also address
1617 - * from MSI hardware FIFO. So keeping these following
1618 - * two dummy reads.
1620 - msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
1621 - MSI_ADDR_L_OFFSET);
1622 - msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
1623 - MSI_ADDR_H_OFFSET);
1624 - dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
1625 - msi_data, msi_addr_hi, msi_addr_lo);
1627 - virq = irq_find_mapping(msi->dev_domain, msi_data);
1629 - generic_handle_irq(virq);
1631 - msi_status = readl_relaxed(pcie->apb_csr_base +
1632 - MSI_STATUS_OFFSET);
1635 - /* Clear the interrupt status */
1636 - csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
1637 - chained_irq_exit(chip, desc);
1640 -static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
1642 - struct device *dev = &pcie->pdev->dev;
1643 - struct platform_device *pdev = pcie->pdev;
1644 - struct device_node *node = dev->of_node;
1645 - struct resource *res;
1647 - /* map config resource */
1648 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1649 - "config_axi_slave");
1650 - pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
1651 - if (IS_ERR(pcie->config_axi_slave_base))
1652 - return PTR_ERR(pcie->config_axi_slave_base);
1653 - pcie->ob_io_res = res;
1655 - /* map csr resource */
1656 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1658 - pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
1659 - if (IS_ERR(pcie->csr_axi_slave_base))
1660 - return PTR_ERR(pcie->csr_axi_slave_base);
1661 - pcie->pcie_reg_base = res->start;
1663 - /* map MSI config resource */
1664 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
1665 - pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
1666 - if (IS_ERR(pcie->apb_csr_base))
1667 - return PTR_ERR(pcie->apb_csr_base);
1669 - /* read the number of windows requested */
1670 - if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
1671 - pcie->apio_wins = MAX_PIO_WINDOWS;
1673 - if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
1674 - pcie->ppio_wins = MAX_PIO_WINDOWS;
1676 - pcie->irq = platform_get_irq(pdev, 0);
1677 - if (pcie->irq <= 0) {
1678 - dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
1685 -static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
1686 - u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
1689 - u64 size64 = ~(size - 1);
1691 - if (win_num >= pcie->ppio_wins) {
1692 - dev_err(&pcie->pdev->dev,
1693 - "ERROR: max inbound windows reached !\n");
1697 - value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
1698 - value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
1699 - value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
1700 - (lower_32_bits(size64) & WIN_SIZE_MASK);
1701 - csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
1703 - csr_writel(pcie, upper_32_bits(size64),
1704 - PAB_EXT_PEX_AMAP_SIZEN(win_num));
1706 - csr_writel(pcie, lower_32_bits(cpu_addr),
1707 - PAB_PEX_AMAP_AXI_WIN(win_num));
1708 - csr_writel(pcie, upper_32_bits(cpu_addr),
1709 - PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
1711 - csr_writel(pcie, lower_32_bits(pci_addr),
1712 - PAB_PEX_AMAP_PEX_WIN_L(win_num));
1713 - csr_writel(pcie, upper_32_bits(pci_addr),
1714 - PAB_PEX_AMAP_PEX_WIN_H(win_num));
1716 - pcie->ib_wins_configured++;
1720 - * routine to program the outbound windows
1722 -static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
1723 - u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
1726 - u64 size64 = ~(size - 1);
1728 - if (win_num >= pcie->apio_wins) {
1729 - dev_err(&pcie->pdev->dev,
1730 - "ERROR: max outbound windows reached !\n");
1735 - * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
1736 - * to 4 KB in PAB_AXI_AMAP_CTRL register
1738 - value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
1739 - value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
1740 - value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
1741 - (lower_32_bits(size64) & WIN_SIZE_MASK);
1742 - csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
1744 - csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
1747 - * program AXI window base with appropriate value in
1748 - * PAB_AXI_AMAP_AXI_WIN0 register
1750 - csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
1751 - PAB_AXI_AMAP_AXI_WIN(win_num));
1752 - csr_writel(pcie, upper_32_bits(cpu_addr),
1753 - PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1755 - csr_writel(pcie, lower_32_bits(pci_addr),
1756 - PAB_AXI_AMAP_PEX_WIN_L(win_num));
1757 - csr_writel(pcie, upper_32_bits(pci_addr),
1758 - PAB_AXI_AMAP_PEX_WIN_H(win_num));
1760 - pcie->ob_wins_configured++;
1763 -static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
1767 - /* check if the link is up or not */
1768 - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1769 - if (mobiveil_pcie_link_up(pcie))
1772 - usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
1775 - dev_err(&pcie->pdev->dev, "link never came up\n");
1777 - return -ETIMEDOUT;
1780 -static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
1782 - phys_addr_t msg_addr = pcie->pcie_reg_base;
1783 - struct mobiveil_msi *msi = &pcie->msi;
1785 - pcie->msi.num_of_vectors = PCI_NUM_MSI;
1786 - msi->msi_pages_phys = (phys_addr_t)msg_addr;
1788 - writel_relaxed(lower_32_bits(msg_addr),
1789 - pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
1790 - writel_relaxed(upper_32_bits(msg_addr),
1791 - pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
1792 - writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
1793 - writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
1796 -static int mobiveil_host_init(struct mobiveil_pcie *pcie)
1798 - u32 value, pab_ctrl, type;
1799 - struct resource_entry *win;
1801 - /* setup bus numbers */
1802 - value = csr_readl(pcie, PCI_PRIMARY_BUS);
1803 - value &= 0xff000000;
1804 - value |= 0x00ff0100;
1805 - csr_writel(pcie, value, PCI_PRIMARY_BUS);
1808 - * program Bus Master Enable Bit in Command Register in PAB Config
1811 - value = csr_readl(pcie, PCI_COMMAND);
1812 - value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
1813 - csr_writel(pcie, value, PCI_COMMAND);
1816 - * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
1819 - pab_ctrl = csr_readl(pcie, PAB_CTRL);
1820 - pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
1821 - csr_writel(pcie, pab_ctrl, PAB_CTRL);
1823 - csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
1824 - PAB_INTP_AMBA_MISC_ENB);
1827 - * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
1828 - * PAB_AXI_PIO_CTRL Register
1830 - value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
1831 - value |= APIO_EN_MASK;
1832 - csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
1834 - /* Enable PCIe PIO master */
1835 - value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
1836 - value |= 1 << PIO_ENABLE_SHIFT;
1837 - csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
1840 - * we'll program one outbound window for config reads and
1841 - * another default inbound window for all the upstream traffic
1842 - * rest of the outbound windows will be configured according to
1843 - * the "ranges" field defined in device tree
1846 - /* config outbound translation window */
1847 - program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
1848 - CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
1850 - /* memory inbound translation window */
1851 - program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
1853 - /* Get the I/O and memory ranges from DT */
1854 - resource_list_for_each_entry(win, &pcie->resources) {
1855 - if (resource_type(win->res) == IORESOURCE_MEM)
1856 - type = MEM_WINDOW_TYPE;
1857 - else if (resource_type(win->res) == IORESOURCE_IO)
1858 - type = IO_WINDOW_TYPE;
1862 - /* configure outbound translation window */
1863 - program_ob_windows(pcie, pcie->ob_wins_configured,
1865 - win->res->start - win->offset,
1866 - type, resource_size(win->res));
1869 - /* fixup for PCIe class register */
1870 - value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
1872 - value |= (PCI_CLASS_BRIDGE_PCI << 16);
1873 - csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
1875 - /* setup MSI hardware registers */
1876 - mobiveil_pcie_enable_msi(pcie);
1881 -static void mobiveil_mask_intx_irq(struct irq_data *data)
1883 - struct irq_desc *desc = irq_to_desc(data->irq);
1884 - struct mobiveil_pcie *pcie;
1885 - unsigned long flags;
1886 - u32 mask, shifted_val;
1888 - pcie = irq_desc_get_chip_data(desc);
1889 - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1890 - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1891 - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1892 - shifted_val &= ~mask;
1893 - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1894 - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1897 -static void mobiveil_unmask_intx_irq(struct irq_data *data)
1899 - struct irq_desc *desc = irq_to_desc(data->irq);
1900 - struct mobiveil_pcie *pcie;
1901 - unsigned long flags;
1902 - u32 shifted_val, mask;
1904 - pcie = irq_desc_get_chip_data(desc);
1905 - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1906 - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1907 - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1908 - shifted_val |= mask;
1909 - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1910 - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1913 -static struct irq_chip intx_irq_chip = {
1914 - .name = "mobiveil_pcie:intx",
1915 - .irq_enable = mobiveil_unmask_intx_irq,
1916 - .irq_disable = mobiveil_mask_intx_irq,
1917 - .irq_mask = mobiveil_mask_intx_irq,
1918 - .irq_unmask = mobiveil_unmask_intx_irq,
1921 -/* routine to setup the INTx related data */
1922 -static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
1923 - irq_hw_number_t hwirq)
1925 - irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
1926 - irq_set_chip_data(irq, domain->host_data);
1931 -/* INTx domain operations structure */
1932 -static const struct irq_domain_ops intx_domain_ops = {
1933 - .map = mobiveil_pcie_intx_map,
1936 -static struct irq_chip mobiveil_msi_irq_chip = {
1937 - .name = "Mobiveil PCIe MSI",
1938 - .irq_mask = pci_msi_mask_irq,
1939 - .irq_unmask = pci_msi_unmask_irq,
1942 -static struct msi_domain_info mobiveil_msi_domain_info = {
1943 - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1944 - MSI_FLAG_PCI_MSIX),
1945 - .chip = &mobiveil_msi_irq_chip,
1948 -static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1950 - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
1951 - phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
1953 - msg->address_lo = lower_32_bits(addr);
1954 - msg->address_hi = upper_32_bits(addr);
1955 - msg->data = data->hwirq;
1957 - dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
1958 - (int)data->hwirq, msg->address_hi, msg->address_lo);
1961 -static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
1962 - const struct cpumask *mask, bool force)
1967 -static struct irq_chip mobiveil_msi_bottom_irq_chip = {
1968 - .name = "Mobiveil MSI",
1969 - .irq_compose_msi_msg = mobiveil_compose_msi_msg,
1970 - .irq_set_affinity = mobiveil_msi_set_affinity,
1973 -static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
1974 - unsigned int virq,
1975 - unsigned int nr_irqs, void *args)
1977 - struct mobiveil_pcie *pcie = domain->host_data;
1978 - struct mobiveil_msi *msi = &pcie->msi;
1979 - unsigned long bit;
1981 - WARN_ON(nr_irqs != 1);
1982 - mutex_lock(&msi->lock);
1984 - bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
1985 - if (bit >= msi->num_of_vectors) {
1986 - mutex_unlock(&msi->lock);
1990 - set_bit(bit, msi->msi_irq_in_use);
1992 - mutex_unlock(&msi->lock);
1994 - irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
1995 - domain->host_data, handle_level_irq, NULL, NULL);
1999 -static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
2000 - unsigned int virq,
2001 - unsigned int nr_irqs)
2003 - struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2004 - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
2005 - struct mobiveil_msi *msi = &pcie->msi;
2007 - mutex_lock(&msi->lock);
2009 - if (!test_bit(d->hwirq, msi->msi_irq_in_use))
2010 - dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
2013 - __clear_bit(d->hwirq, msi->msi_irq_in_use);
2015 - mutex_unlock(&msi->lock);
2017 -static const struct irq_domain_ops msi_domain_ops = {
2018 - .alloc = mobiveil_irq_msi_domain_alloc,
2019 - .free = mobiveil_irq_msi_domain_free,
2022 -static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
2024 - struct device *dev = &pcie->pdev->dev;
2025 - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
2026 - struct mobiveil_msi *msi = &pcie->msi;
2028 - mutex_init(&pcie->msi.lock);
2029 - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
2030 - &msi_domain_ops, pcie);
2031 - if (!msi->dev_domain) {
2032 - dev_err(dev, "failed to create IRQ domain\n");
2036 - msi->msi_domain = pci_msi_create_irq_domain(fwnode,
2037 - &mobiveil_msi_domain_info,
2039 - if (!msi->msi_domain) {
2040 - dev_err(dev, "failed to create MSI domain\n");
2041 - irq_domain_remove(msi->dev_domain);
2048 -static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
2050 - struct device *dev = &pcie->pdev->dev;
2051 - struct device_node *node = dev->of_node;
2055 - pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
2056 - &intx_domain_ops, pcie);
2058 - if (!pcie->intx_domain) {
2059 - dev_err(dev, "Failed to get a INTx IRQ domain\n");
2063 - raw_spin_lock_init(&pcie->intx_mask_lock);
2066 - ret = mobiveil_allocate_msi_domains(pcie);
2073 -static int mobiveil_pcie_probe(struct platform_device *pdev)
2075 - struct mobiveil_pcie *pcie;
2076 - struct pci_bus *bus;
2077 - struct pci_bus *child;
2078 - struct pci_host_bridge *bridge;
2079 - struct device *dev = &pdev->dev;
2080 - resource_size_t iobase;
2083 - /* allocate the PCIe port */
2084 - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2088 - pcie = pci_host_bridge_priv(bridge);
2090 - pcie->pdev = pdev;
2092 - ret = mobiveil_pcie_parse_dt(pcie);
2094 - dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
2098 - INIT_LIST_HEAD(&pcie->resources);
2100 - /* parse the host bridge base addresses from the device tree file */
2101 - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
2102 - &pcie->resources, &iobase);
2104 - dev_err(dev, "Getting bridge resources failed\n");
2109 - * configure all inbound and outbound windows and prepare the RC for
2112 - ret = mobiveil_host_init(pcie);
2114 - dev_err(dev, "Failed to initialize host\n");
2118 - /* initialize the IRQ domains */
2119 - ret = mobiveil_pcie_init_irq_domain(pcie);
2121 - dev_err(dev, "Failed creating IRQ Domain\n");
2125 - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
2127 - ret = devm_request_pci_bus_resources(dev, &pcie->resources);
2131 - /* Initialize bridge */
2132 - list_splice_init(&pcie->resources, &bridge->windows);
2133 - bridge->dev.parent = dev;
2134 - bridge->sysdata = pcie;
2135 - bridge->busnr = pcie->root_bus_nr;
2136 - bridge->ops = &mobiveil_pcie_ops;
2137 - bridge->map_irq = of_irq_parse_and_map_pci;
2138 - bridge->swizzle_irq = pci_common_swizzle;
2140 - ret = mobiveil_bringup_link(pcie);
2142 - dev_info(dev, "link bring-up failed\n");
2146 - /* setup the kernel resources for the newly added PCIe root bus */
2147 - ret = pci_scan_root_bus_bridge(bridge);
2151 - bus = bridge->bus;
2153 - pci_assign_unassigned_bus_resources(bus);
2154 - list_for_each_entry(child, &bus->children, node)
2155 - pcie_bus_configure_settings(child);
2156 - pci_bus_add_devices(bus);
2160 - pci_free_resource_list(&pcie->resources);
2164 -static const struct of_device_id mobiveil_pcie_of_match[] = {
2165 - {.compatible = "mbvl,gpex40-pcie",},
2169 -MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
2171 -static struct platform_driver mobiveil_pcie_driver = {
2172 - .probe = mobiveil_pcie_probe,
2174 - .name = "mobiveil-pcie",
2175 - .of_match_table = mobiveil_pcie_of_match,
2176 - .suppress_bind_attrs = true,
2180 -builtin_platform_driver(mobiveil_pcie_driver);
2182 -MODULE_LICENSE("GPL v2");
2183 -MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
2184 -MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");