acbf2fe35aec86f1a2591d10f0781b9a471736c0
[openwrt/staging/xback.git] /
1 From ab6a07d577dbd45d00a1738a6b5a28a6666be754 Mon Sep 17 00:00:00 2001
2 From: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
3 Date: Tue, 25 Jun 2019 09:09:07 +0000
4 Subject: [PATCH] PCI: mobiveil: Refactor Mobiveil PCIe Host Bridge IP driver
5
6 Refactor the Mobiveil PCIe Host Bridge IP driver to make
7 it easier to add support for both RC and EP mode driver.
8 This patch moved the Mobiveil driver to an new directory
9 'drivers/pci/controller/mobiveil' and refactor it according
10 to the RC and EP abstraction.
11
12 Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
13 Reviewed-by: Minghuan Lian <Minghuan.Lian@nxp.com>
14 Reviewed-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
15 ---
16 MAINTAINERS | 2 +-
17 drivers/pci/controller/Kconfig | 11 +-
18 drivers/pci/controller/Makefile | 2 +-
19 drivers/pci/controller/mobiveil/Kconfig | 24 +
20 drivers/pci/controller/mobiveil/Makefile | 4 +
21 .../pci/controller/mobiveil/pcie-mobiveil-host.c | 611 +++++++++++++
22 .../pci/controller/mobiveil/pcie-mobiveil-plat.c | 59 ++
23 drivers/pci/controller/mobiveil/pcie-mobiveil.c | 227 +++++
24 drivers/pci/controller/mobiveil/pcie-mobiveil.h | 189 ++++
25 drivers/pci/controller/pcie-mobiveil.c | 964 ---------------------
26 10 files changed, 1117 insertions(+), 976 deletions(-)
27 create mode 100644 drivers/pci/controller/mobiveil/Kconfig
28 create mode 100644 drivers/pci/controller/mobiveil/Makefile
29 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
30 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
31 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil.c
32 create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil.h
33 delete mode 100644 drivers/pci/controller/pcie-mobiveil.c
34
35 --- a/MAINTAINERS
36 +++ b/MAINTAINERS
37 @@ -12503,7 +12503,7 @@ M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
38 L: linux-pci@vger.kernel.org
39 S: Supported
40 F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
41 -F: drivers/pci/controller/pcie-mobiveil.c
42 +F: drivers/pci/controller/mobiveil/pcie-mobiveil*
43
44 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
45 M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
46 --- a/drivers/pci/controller/Kconfig
47 +++ b/drivers/pci/controller/Kconfig
48 @@ -241,16 +241,6 @@ config PCIE_MEDIATEK
49 Say Y here if you want to enable PCIe controller support on
50 MediaTek SoCs.
51
52 -config PCIE_MOBIVEIL
53 - bool "Mobiveil AXI PCIe controller"
54 - depends on ARCH_ZYNQMP || COMPILE_TEST
55 - depends on OF
56 - depends on PCI_MSI_IRQ_DOMAIN
57 - help
58 - Say Y here if you want to enable support for the Mobiveil AXI PCIe
59 - Soft IP. It has up to 8 outbound and inbound windows
60 - for address translation and it is a PCIe Gen4 IP.
61 -
62 config PCIE_TANGO_SMP8759
63 bool "Tango SMP8759 PCIe controller (DANGEROUS)"
64 depends on ARCH_TANGO && PCI_MSI && OF
65 @@ -289,4 +279,5 @@ config PCI_HYPERV_INTERFACE
66 have a common interface with the Hyper-V PCI frontend driver.
67
68 source "drivers/pci/controller/dwc/Kconfig"
69 +source "drivers/pci/controller/mobiveil/Kconfig"
70 endmenu
71 --- a/drivers/pci/controller/Makefile
72 +++ b/drivers/pci/controller/Makefile
73 @@ -27,11 +27,11 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rock
74 obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
75 obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
76 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
77 -obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
78 obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
79 obj-$(CONFIG_VMD) += vmd.o
80 # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
81 obj-y += dwc/
82 +obj-y += mobiveil/
83
84
85 # The following drivers are for devices that use the generic ACPI
86 --- /dev/null
87 +++ b/drivers/pci/controller/mobiveil/Kconfig
88 @@ -0,0 +1,24 @@
89 +# SPDX-License-Identifier: GPL-2.0
90 +
91 +menu "Mobiveil PCIe Core Support"
92 + depends on PCI
93 +
94 +config PCIE_MOBIVEIL
95 + bool
96 +
97 +config PCIE_MOBIVEIL_HOST
98 + bool
99 + depends on PCI_MSI_IRQ_DOMAIN
100 + select PCIE_MOBIVEIL
101 +
102 +config PCIE_MOBIVEIL_PLAT
103 + bool "Mobiveil AXI PCIe controller"
104 + depends on ARCH_ZYNQMP || COMPILE_TEST
105 + depends on OF
106 + select PCIE_MOBIVEIL_HOST
107 + help
108 + Say Y here if you want to enable support for the Mobiveil AXI PCIe
109 + Soft IP. It has up to 8 outbound and inbound windows
110 + for address translation and it is a PCIe Gen4 IP.
111 +
112 +endmenu
113 --- /dev/null
114 +++ b/drivers/pci/controller/mobiveil/Makefile
115 @@ -0,0 +1,4 @@
116 +# SPDX-License-Identifier: GPL-2.0
117 +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
118 +obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
119 +obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
120 --- /dev/null
121 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
122 @@ -0,0 +1,611 @@
123 +// SPDX-License-Identifier: GPL-2.0
124 +/*
125 + * PCIe host controller driver for Mobiveil PCIe Host controller
126 + *
127 + * Copyright (c) 2018 Mobiveil Inc.
128 + * Copyright 2019 NXP
129 + *
130 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
131 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
132 + */
133 +
134 +#include <linux/init.h>
135 +#include <linux/interrupt.h>
136 +#include <linux/irq.h>
137 +#include <linux/irqchip/chained_irq.h>
138 +#include <linux/irqdomain.h>
139 +#include <linux/kernel.h>
140 +#include <linux/module.h>
141 +#include <linux/msi.h>
142 +#include <linux/of_address.h>
143 +#include <linux/of_irq.h>
144 +#include <linux/of_platform.h>
145 +#include <linux/of_pci.h>
146 +#include <linux/pci.h>
147 +#include <linux/platform_device.h>
148 +#include <linux/slab.h>
149 +
150 +#include "pcie-mobiveil.h"
151 +
152 +static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
153 +{
154 + struct mobiveil_pcie *pcie = bus->sysdata;
155 +
156 + /* Only one device down on each root port */
157 + if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
158 + return false;
159 +
160 + /*
161 + * Do not read more than one device on the bus directly
162 + * attached to RC
163 + */
164 + if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
165 + return false;
166 +
167 + return true;
168 +}
169 +
170 +/*
171 + * mobiveil_pcie_map_bus - routine to get the configuration base of either
172 + * root port or endpoint
173 + */
174 +static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
175 + unsigned int devfn, int where)
176 +{
177 + struct mobiveil_pcie *pcie = bus->sysdata;
178 + u32 value;
179 +
180 + if (!mobiveil_pcie_valid_device(bus, devfn))
181 + return NULL;
182 +
183 + /* RC config access */
184 + if (bus->number == pcie->rp.root_bus_nr)
185 + return pcie->csr_axi_slave_base + where;
186 +
187 + /*
188 + * EP config access (in Config/APIO space)
189 + * Program PEX Address base (31..16 bits) with appropriate value
190 + * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
191 + * Relies on pci_lock serialization
192 + */
193 + value = bus->number << PAB_BUS_SHIFT |
194 + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
195 + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
196 +
197 + csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
198 +
199 + return pcie->rp.config_axi_slave_base + where;
200 +}
201 +
202 +static struct pci_ops mobiveil_pcie_ops = {
203 + .map_bus = mobiveil_pcie_map_bus,
204 + .read = pci_generic_config_read,
205 + .write = pci_generic_config_write,
206 +};
207 +
208 +static void mobiveil_pcie_isr(struct irq_desc *desc)
209 +{
210 + struct irq_chip *chip = irq_desc_get_chip(desc);
211 + struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
212 + struct device *dev = &pcie->pdev->dev;
213 + struct mobiveil_msi *msi = &pcie->rp.msi;
214 + u32 msi_data, msi_addr_lo, msi_addr_hi;
215 + u32 intr_status, msi_status;
216 + unsigned long shifted_status;
217 + u32 bit, virq, val, mask;
218 +
219 + /*
220 + * The core provides a single interrupt for both INTx/MSI messages.
221 + * So we'll read both INTx and MSI status
222 + */
223 +
224 + chained_irq_enter(chip, desc);
225 +
226 + /* read INTx status */
227 + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
228 + mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
229 + intr_status = val & mask;
230 +
231 + /* Handle INTx */
232 + if (intr_status & PAB_INTP_INTX_MASK) {
233 + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
234 + shifted_status &= PAB_INTP_INTX_MASK;
235 + shifted_status >>= PAB_INTX_START;
236 + do {
237 + for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
238 + virq = irq_find_mapping(pcie->rp.intx_domain,
239 + bit + 1);
240 + if (virq)
241 + generic_handle_irq(virq);
242 + else
243 + dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
244 + bit);
245 +
246 + /* clear interrupt handled */
247 + csr_writel(pcie, 1 << (PAB_INTX_START + bit),
248 + PAB_INTP_AMBA_MISC_STAT);
249 + }
250 +
251 + shifted_status = csr_readl(pcie,
252 + PAB_INTP_AMBA_MISC_STAT);
253 + shifted_status &= PAB_INTP_INTX_MASK;
254 + shifted_status >>= PAB_INTX_START;
255 + } while (shifted_status != 0);
256 + }
257 +
258 + /* read extra MSI status register */
259 + msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
260 +
261 + /* handle MSI interrupts */
262 + while (msi_status & 1) {
263 + msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
264 +
265 + /*
266 + * MSI_STATUS_OFFSET register gets updated to zero
267 + * once we pop not only the MSI data but also address
268 + * from MSI hardware FIFO. So keeping these following
269 + * two dummy reads.
270 + */
271 + msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
272 + MSI_ADDR_L_OFFSET);
273 + msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
274 + MSI_ADDR_H_OFFSET);
275 + dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
276 + msi_data, msi_addr_hi, msi_addr_lo);
277 +
278 + virq = irq_find_mapping(msi->dev_domain, msi_data);
279 + if (virq)
280 + generic_handle_irq(virq);
281 +
282 + msi_status = readl_relaxed(pcie->apb_csr_base +
283 + MSI_STATUS_OFFSET);
284 + }
285 +
286 + /* Clear the interrupt status */
287 + csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
288 + chained_irq_exit(chip, desc);
289 +}
290 +
291 +static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
292 +{
293 + struct device *dev = &pcie->pdev->dev;
294 + struct platform_device *pdev = pcie->pdev;
295 + struct device_node *node = dev->of_node;
296 + struct resource *res;
297 +
298 + /* map config resource */
299 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
300 + "config_axi_slave");
301 + pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
302 + if (IS_ERR(pcie->rp.config_axi_slave_base))
303 + return PTR_ERR(pcie->rp.config_axi_slave_base);
304 + pcie->rp.ob_io_res = res;
305 +
306 + /* map csr resource */
307 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
308 + "csr_axi_slave");
309 + pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
310 + if (IS_ERR(pcie->csr_axi_slave_base))
311 + return PTR_ERR(pcie->csr_axi_slave_base);
312 + pcie->pcie_reg_base = res->start;
313 +
314 + /* read the number of windows requested */
315 + if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
316 + pcie->apio_wins = MAX_PIO_WINDOWS;
317 +
318 + if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
319 + pcie->ppio_wins = MAX_PIO_WINDOWS;
320 +
321 + return 0;
322 +}
323 +
324 +static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
325 +{
326 + phys_addr_t msg_addr = pcie->pcie_reg_base;
327 + struct mobiveil_msi *msi = &pcie->rp.msi;
328 +
329 + msi->num_of_vectors = PCI_NUM_MSI;
330 + msi->msi_pages_phys = (phys_addr_t)msg_addr;
331 +
332 + writel_relaxed(lower_32_bits(msg_addr),
333 + pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
334 + writel_relaxed(upper_32_bits(msg_addr),
335 + pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
336 + writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
337 + writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
338 +}
339 +
340 +static int mobiveil_host_init(struct mobiveil_pcie *pcie)
341 +{
342 + u32 value, pab_ctrl, type;
343 + struct resource_entry *win;
344 +
345 + /* setup bus numbers */
346 + value = csr_readl(pcie, PCI_PRIMARY_BUS);
347 + value &= 0xff000000;
348 + value |= 0x00ff0100;
349 + csr_writel(pcie, value, PCI_PRIMARY_BUS);
350 +
351 + /*
352 + * program Bus Master Enable Bit in Command Register in PAB Config
353 + * Space
354 + */
355 + value = csr_readl(pcie, PCI_COMMAND);
356 + value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
357 + csr_writel(pcie, value, PCI_COMMAND);
358 +
359 + /*
360 + * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
361 + * register
362 + */
363 + pab_ctrl = csr_readl(pcie, PAB_CTRL);
364 + pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
365 + csr_writel(pcie, pab_ctrl, PAB_CTRL);
366 +
367 + /*
368 + * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
369 + * PAB_AXI_PIO_CTRL Register
370 + */
371 + value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
372 + value |= APIO_EN_MASK;
373 + csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
374 +
375 + /* Enable PCIe PIO master */
376 + value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
377 + value |= 1 << PIO_ENABLE_SHIFT;
378 + csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
379 +
380 + /*
381 + * we'll program one outbound window for config reads and
382 + * another default inbound window for all the upstream traffic
383 + * rest of the outbound windows will be configured according to
384 + * the "ranges" field defined in device tree
385 + */
386 +
387 + /* config outbound translation window */
388 + program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
389 + CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
390 +
391 + /* memory inbound translation window */
392 + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
393 +
394 + /* Get the I/O and memory ranges from DT */
395 + resource_list_for_each_entry(win, &pcie->resources) {
396 + if (resource_type(win->res) == IORESOURCE_MEM) {
397 + type = MEM_WINDOW_TYPE;
398 + } else if (resource_type(win->res) == IORESOURCE_IO) {
399 + type = IO_WINDOW_TYPE;
400 + } else if (resource_type(win->res) == IORESOURCE_BUS) {
401 + pcie->rp.root_bus_nr = win->res->start;
402 + continue;
403 + } else {
404 + continue;
405 + }
406 +
407 + /* configure outbound translation window */
408 + program_ob_windows(pcie, pcie->ob_wins_configured,
409 + win->res->start,
410 + win->res->start - win->offset,
411 + type, resource_size(win->res));
412 + }
413 +
414 + /* fixup for PCIe class register */
415 + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
416 + value &= 0xff;
417 + value |= (PCI_CLASS_BRIDGE_PCI << 16);
418 + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
419 +
420 + return 0;
421 +}
422 +
423 +static void mobiveil_mask_intx_irq(struct irq_data *data)
424 +{
425 + struct irq_desc *desc = irq_to_desc(data->irq);
426 + struct mobiveil_pcie *pcie;
427 + unsigned long flags;
428 + u32 mask, shifted_val;
429 +
430 + pcie = irq_desc_get_chip_data(desc);
431 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
432 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
433 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
434 + shifted_val &= ~mask;
435 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
436 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
437 +}
438 +
439 +static void mobiveil_unmask_intx_irq(struct irq_data *data)
440 +{
441 + struct irq_desc *desc = irq_to_desc(data->irq);
442 + struct mobiveil_pcie *pcie;
443 + unsigned long flags;
444 + u32 shifted_val, mask;
445 +
446 + pcie = irq_desc_get_chip_data(desc);
447 + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
448 + raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
449 + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
450 + shifted_val |= mask;
451 + csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
452 + raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
453 +}
454 +
455 +static struct irq_chip intx_irq_chip = {
456 + .name = "mobiveil_pcie:intx",
457 + .irq_enable = mobiveil_unmask_intx_irq,
458 + .irq_disable = mobiveil_mask_intx_irq,
459 + .irq_mask = mobiveil_mask_intx_irq,
460 + .irq_unmask = mobiveil_unmask_intx_irq,
461 +};
462 +
463 +/* routine to setup the INTx related data */
464 +static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
465 + irq_hw_number_t hwirq)
466 +{
467 + irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
468 + irq_set_chip_data(irq, domain->host_data);
469 +
470 + return 0;
471 +}
472 +
473 +/* INTx domain operations structure */
474 +static const struct irq_domain_ops intx_domain_ops = {
475 + .map = mobiveil_pcie_intx_map,
476 +};
477 +
478 +static struct irq_chip mobiveil_msi_irq_chip = {
479 + .name = "Mobiveil PCIe MSI",
480 + .irq_mask = pci_msi_mask_irq,
481 + .irq_unmask = pci_msi_unmask_irq,
482 +};
483 +
484 +static struct msi_domain_info mobiveil_msi_domain_info = {
485 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
486 + MSI_FLAG_PCI_MSIX),
487 + .chip = &mobiveil_msi_irq_chip,
488 +};
489 +
490 +static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
491 +{
492 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
493 + phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
494 +
495 + msg->address_lo = lower_32_bits(addr);
496 + msg->address_hi = upper_32_bits(addr);
497 + msg->data = data->hwirq;
498 +
499 + dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
500 + (int)data->hwirq, msg->address_hi, msg->address_lo);
501 +}
502 +
503 +static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
504 + const struct cpumask *mask, bool force)
505 +{
506 + return -EINVAL;
507 +}
508 +
509 +static struct irq_chip mobiveil_msi_bottom_irq_chip = {
510 + .name = "Mobiveil MSI",
511 + .irq_compose_msi_msg = mobiveil_compose_msi_msg,
512 + .irq_set_affinity = mobiveil_msi_set_affinity,
513 +};
514 +
515 +static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
516 + unsigned int virq,
517 + unsigned int nr_irqs, void *args)
518 +{
519 + struct mobiveil_pcie *pcie = domain->host_data;
520 + struct mobiveil_msi *msi = &pcie->rp.msi;
521 + unsigned long bit;
522 +
523 + WARN_ON(nr_irqs != 1);
524 + mutex_lock(&msi->lock);
525 +
526 + bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
527 + if (bit >= msi->num_of_vectors) {
528 + mutex_unlock(&msi->lock);
529 + return -ENOSPC;
530 + }
531 +
532 + set_bit(bit, msi->msi_irq_in_use);
533 +
534 + mutex_unlock(&msi->lock);
535 +
536 + irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
537 + domain->host_data, handle_level_irq, NULL, NULL);
538 + return 0;
539 +}
540 +
541 +static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
542 + unsigned int virq,
543 + unsigned int nr_irqs)
544 +{
545 + struct irq_data *d = irq_domain_get_irq_data(domain, virq);
546 + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
547 + struct mobiveil_msi *msi = &pcie->rp.msi;
548 +
549 + mutex_lock(&msi->lock);
550 +
551 + if (!test_bit(d->hwirq, msi->msi_irq_in_use))
552 + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
553 + d->hwirq);
554 + else
555 + __clear_bit(d->hwirq, msi->msi_irq_in_use);
556 +
557 + mutex_unlock(&msi->lock);
558 +}
559 +static const struct irq_domain_ops msi_domain_ops = {
560 + .alloc = mobiveil_irq_msi_domain_alloc,
561 + .free = mobiveil_irq_msi_domain_free,
562 +};
563 +
564 +static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
565 +{
566 + struct device *dev = &pcie->pdev->dev;
567 + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
568 + struct mobiveil_msi *msi = &pcie->rp.msi;
569 +
570 + mutex_init(&msi->lock);
571 + msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
572 + &msi_domain_ops, pcie);
573 + if (!msi->dev_domain) {
574 + dev_err(dev, "failed to create IRQ domain\n");
575 + return -ENOMEM;
576 + }
577 +
578 + msi->msi_domain = pci_msi_create_irq_domain(fwnode,
579 + &mobiveil_msi_domain_info,
580 + msi->dev_domain);
581 + if (!msi->msi_domain) {
582 + dev_err(dev, "failed to create MSI domain\n");
583 + irq_domain_remove(msi->dev_domain);
584 + return -ENOMEM;
585 + }
586 +
587 + return 0;
588 +}
589 +
590 +static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
591 +{
592 + struct device *dev = &pcie->pdev->dev;
593 + struct device_node *node = dev->of_node;
594 + int ret;
595 +
596 + /* setup INTx */
597 + pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
598 + &intx_domain_ops, pcie);
599 +
600 + if (!pcie->rp.intx_domain) {
601 + dev_err(dev, "Failed to get a INTx IRQ domain\n");
602 + return -ENOMEM;
603 + }
604 +
605 + raw_spin_lock_init(&pcie->rp.intx_mask_lock);
606 +
607 + /* setup MSI */
608 + ret = mobiveil_allocate_msi_domains(pcie);
609 + if (ret)
610 + return ret;
611 +
612 + return 0;
613 +}
614 +
615 +static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
616 +{
617 + struct device *dev = &pcie->pdev->dev;
618 + struct resource *res;
619 + int ret;
620 +
621 + if (pcie->rp.ops->interrupt_init)
622 + return pcie->rp.ops->interrupt_init(pcie);
623 +
624 + /* map MSI config resource */
625 + res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
626 + "apb_csr");
627 + pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
628 + if (IS_ERR(pcie->apb_csr_base))
629 + return PTR_ERR(pcie->apb_csr_base);
630 +
631 + /* setup MSI hardware registers */
632 + mobiveil_pcie_enable_msi(pcie);
633 +
634 + pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
635 + if (pcie->rp.irq <= 0) {
636 + dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
637 + return -ENODEV;
638 + }
639 +
640 + /* initialize the IRQ domains */
641 + ret = mobiveil_pcie_init_irq_domain(pcie);
642 + if (ret) {
643 + dev_err(dev, "Failed creating IRQ Domain\n");
644 + return ret;
645 + }
646 +
647 + irq_set_chained_handler_and_data(pcie->rp.irq,
648 + mobiveil_pcie_isr, pcie);
649 +
650 + /* Enable interrupts */
651 + csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
652 + PAB_INTP_AMBA_MISC_ENB);
653 +
654 + return 0;
655 +}
656 +
657 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
658 +{
659 + struct pci_bus *bus;
660 + struct pci_bus *child;
661 + struct pci_host_bridge *bridge = pcie->bridge;
662 + struct device *dev = &pcie->pdev->dev;
663 + resource_size_t iobase;
664 + int ret;
665 +
666 + INIT_LIST_HEAD(&pcie->resources);
667 +
668 + ret = mobiveil_pcie_parse_dt(pcie);
669 + if (ret) {
670 + dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
671 + return ret;
672 + }
673 +
674 + /* parse the host bridge base addresses from the device tree file */
675 + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
676 + &pcie->resources, &iobase);
677 + if (ret) {
678 + dev_err(dev, "Getting bridge resources failed\n");
679 + return ret;
680 + }
681 +
682 + /*
683 + * configure all inbound and outbound windows and prepare the RC for
684 + * config access
685 + */
686 + ret = mobiveil_host_init(pcie);
687 + if (ret) {
688 + dev_err(dev, "Failed to initialize host\n");
689 + goto error;
690 + }
691 +
692 + ret = mobiveil_pcie_interrupt_init(pcie);
693 + if (ret) {
694 + dev_err(dev, "Interrupt init failed\n");
695 + goto error;
696 + }
697 +
698 + ret = devm_request_pci_bus_resources(dev, &pcie->resources);
699 + if (ret)
700 + goto error;
701 +
702 + /* Initialize bridge */
703 + list_splice_init(&pcie->resources, &bridge->windows);
704 + bridge->dev.parent = dev;
705 + bridge->sysdata = pcie;
706 + bridge->busnr = pcie->rp.root_bus_nr;
707 + bridge->ops = &mobiveil_pcie_ops;
708 + bridge->map_irq = of_irq_parse_and_map_pci;
709 + bridge->swizzle_irq = pci_common_swizzle;
710 +
711 + ret = mobiveil_bringup_link(pcie);
712 + if (ret) {
713 + dev_info(dev, "link bring-up failed\n");
714 + goto error;
715 + }
716 +
717 + /* setup the kernel resources for the newly added PCIe root bus */
718 + ret = pci_scan_root_bus_bridge(bridge);
719 + if (ret)
720 + goto error;
721 +
722 + bus = bridge->bus;
723 +
724 + pci_assign_unassigned_bus_resources(bus);
725 + list_for_each_entry(child, &bus->children, node)
726 + pcie_bus_configure_settings(child);
727 + pci_bus_add_devices(bus);
728 +
729 + return 0;
730 +error:
731 + pci_free_resource_list(&pcie->resources);
732 + return ret;
733 +}
734 --- /dev/null
735 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
736 @@ -0,0 +1,59 @@
737 +// SPDX-License-Identifier: GPL-2.0
738 +/*
739 + * PCIe host controller driver for Mobiveil PCIe Host controller
740 + *
741 + * Copyright (c) 2018 Mobiveil Inc.
742 + * Copyright 2019 NXP
743 + *
744 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
745 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
746 + */
747 +
748 +#include <linux/init.h>
749 +#include <linux/kernel.h>
750 +#include <linux/module.h>
751 +#include <linux/of_pci.h>
752 +#include <linux/pci.h>
753 +#include <linux/platform_device.h>
754 +#include <linux/slab.h>
755 +
756 +#include "pcie-mobiveil.h"
757 +
758 +static int mobiveil_pcie_probe(struct platform_device *pdev)
759 +{
760 + struct mobiveil_pcie *pcie;
761 + struct pci_host_bridge *bridge;
762 + struct device *dev = &pdev->dev;
763 +
764 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
765 + if (!bridge)
766 + return -ENOMEM;
767 +
768 + pcie = pci_host_bridge_priv(bridge);
769 +
770 + pcie->pdev = pdev;
771 +
772 + return mobiveil_pcie_host_probe(pcie);
773 +}
774 +
775 +static const struct of_device_id mobiveil_pcie_of_match[] = {
776 + {.compatible = "mbvl,gpex40-pcie",},
777 + {},
778 +};
779 +
780 +MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
781 +
782 +static struct platform_driver mobiveil_pcie_driver = {
783 + .probe = mobiveil_pcie_probe,
784 + .driver = {
785 + .name = "mobiveil-pcie",
786 + .of_match_table = mobiveil_pcie_of_match,
787 + .suppress_bind_attrs = true,
788 + },
789 +};
790 +
791 +builtin_platform_driver(mobiveil_pcie_driver);
792 +
793 +MODULE_LICENSE("GPL v2");
794 +MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
795 +MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
796 --- /dev/null
797 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
798 @@ -0,0 +1,227 @@
799 +// SPDX-License-Identifier: GPL-2.0
800 +/*
801 + * PCIe host controller driver for Mobiveil PCIe Host controller
802 + *
803 + * Copyright (c) 2018 Mobiveil Inc.
804 + * Copyright 2019 NXP
805 + *
806 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
807 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
808 + */
809 +
810 +#include <linux/delay.h>
811 +#include <linux/init.h>
812 +#include <linux/kernel.h>
813 +#include <linux/pci.h>
814 +#include <linux/platform_device.h>
815 +
816 +#include "pcie-mobiveil.h"
817 +
818 +/*
819 + * mobiveil_pcie_sel_page - routine to access paged register
820 + *
821 + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
822 + * for this scheme to work extracted higher 6 bits of the offset will be
823 + * written to pg_sel field of PAB_CTRL register and rest of the lower 10
824 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
825 + */
826 +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
827 +{
828 + u32 val;
829 +
830 + val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
831 + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
832 + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
833 +
834 + writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
835 +}
836 +
837 +static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
838 +{
839 + if (off < PAGED_ADDR_BNDRY) {
840 + /* For directly accessed registers, clear the pg_sel field */
841 + mobiveil_pcie_sel_page(pcie, 0);
842 + return pcie->csr_axi_slave_base + off;
843 + }
844 +
845 + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
846 + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
847 +}
848 +
849 +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
850 +{
851 + if ((uintptr_t)addr & (size - 1)) {
852 + *val = 0;
853 + return PCIBIOS_BAD_REGISTER_NUMBER;
854 + }
855 +
856 + switch (size) {
857 + case 4:
858 + *val = readl(addr);
859 + break;
860 + case 2:
861 + *val = readw(addr);
862 + break;
863 + case 1:
864 + *val = readb(addr);
865 + break;
866 + default:
867 + *val = 0;
868 + return PCIBIOS_BAD_REGISTER_NUMBER;
869 + }
870 +
871 + return PCIBIOS_SUCCESSFUL;
872 +}
873 +
874 +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
875 +{
876 + if ((uintptr_t)addr & (size - 1))
877 + return PCIBIOS_BAD_REGISTER_NUMBER;
878 +
879 + switch (size) {
880 + case 4:
881 + writel(val, addr);
882 + break;
883 + case 2:
884 + writew(val, addr);
885 + break;
886 + case 1:
887 + writeb(val, addr);
888 + break;
889 + default:
890 + return PCIBIOS_BAD_REGISTER_NUMBER;
891 + }
892 +
893 + return PCIBIOS_SUCCESSFUL;
894 +}
895 +
896 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
897 +{
898 + void *addr;
899 + u32 val;
900 + int ret;
901 +
902 + addr = mobiveil_pcie_comp_addr(pcie, off);
903 +
904 + ret = mobiveil_pcie_read(addr, size, &val);
905 + if (ret)
906 + dev_err(&pcie->pdev->dev, "read CSR address failed\n");
907 +
908 + return val;
909 +}
910 +
911 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
912 +{
913 + void *addr;
914 + int ret;
915 +
916 + addr = mobiveil_pcie_comp_addr(pcie, off);
917 +
918 + ret = mobiveil_pcie_write(addr, size, val);
919 + if (ret)
920 + dev_err(&pcie->pdev->dev, "write CSR address failed\n");
921 +}
922 +
923 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
924 +{
925 + if (pcie->ops->link_up)
926 + return pcie->ops->link_up(pcie);
927 +
928 + return (csr_readl(pcie, LTSSM_STATUS) &
929 + LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
930 +}
931 +
932 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
933 + u64 pci_addr, u32 type, u64 size)
934 +{
935 + u32 value;
936 + u64 size64 = ~(size - 1);
937 +
938 + if (win_num >= pcie->ppio_wins) {
939 + dev_err(&pcie->pdev->dev,
940 + "ERROR: max inbound windows reached !\n");
941 + return;
942 + }
943 +
944 + value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
945 + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
946 + value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
947 + (lower_32_bits(size64) & WIN_SIZE_MASK);
948 + csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
949 +
950 + csr_writel(pcie, upper_32_bits(size64),
951 + PAB_EXT_PEX_AMAP_SIZEN(win_num));
952 +
953 + csr_writel(pcie, lower_32_bits(cpu_addr),
954 + PAB_PEX_AMAP_AXI_WIN(win_num));
955 + csr_writel(pcie, upper_32_bits(cpu_addr),
956 + PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
957 +
958 + csr_writel(pcie, lower_32_bits(pci_addr),
959 + PAB_PEX_AMAP_PEX_WIN_L(win_num));
960 + csr_writel(pcie, upper_32_bits(pci_addr),
961 + PAB_PEX_AMAP_PEX_WIN_H(win_num));
962 +
963 + pcie->ib_wins_configured++;
964 +}
965 +
966 +/*
967 + * routine to program the outbound windows
968 + */
969 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
970 + u64 pci_addr, u32 type, u64 size)
971 +{
972 + u32 value;
973 + u64 size64 = ~(size - 1);
974 +
975 + if (win_num >= pcie->apio_wins) {
976 + dev_err(&pcie->pdev->dev,
977 + "ERROR: max outbound windows reached !\n");
978 + return;
979 + }
980 +
981 + /*
982 + * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
983 + * to 4 KB in PAB_AXI_AMAP_CTRL register
984 + */
985 + value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
986 + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
987 + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
988 + (lower_32_bits(size64) & WIN_SIZE_MASK);
989 + csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
990 +
991 + csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
992 +
993 + /*
994 + * program AXI window base with appropriate value in
995 + * PAB_AXI_AMAP_AXI_WIN0 register
996 + */
997 + csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
998 + PAB_AXI_AMAP_AXI_WIN(win_num));
999 + csr_writel(pcie, upper_32_bits(cpu_addr),
1000 + PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1001 +
1002 + csr_writel(pcie, lower_32_bits(pci_addr),
1003 + PAB_AXI_AMAP_PEX_WIN_L(win_num));
1004 + csr_writel(pcie, upper_32_bits(pci_addr),
1005 + PAB_AXI_AMAP_PEX_WIN_H(win_num));
1006 +
1007 + pcie->ob_wins_configured++;
1008 +}
1009 +
1010 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
1011 +{
1012 + int retries;
1013 +
1014 + /* check if the link is up or not */
1015 + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1016 + if (mobiveil_pcie_link_up(pcie))
1017 + return 0;
1018 +
1019 + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
1020 + }
1021 +
1022 + dev_err(&pcie->pdev->dev, "link never came up\n");
1023 +
1024 + return -ETIMEDOUT;
1025 +}
1026 --- /dev/null
1027 +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
1028 @@ -0,0 +1,189 @@
1029 +/* SPDX-License-Identifier: GPL-2.0 */
1030 +/*
1031 + * PCIe host controller driver for Mobiveil PCIe Host controller
1032 + *
1033 + * Copyright (c) 2018 Mobiveil Inc.
1034 + * Copyright 2019 NXP
1035 + *
1036 + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
1037 + * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
1038 + */
1039 +
1040 +#ifndef _PCIE_MOBIVEIL_H
1041 +#define _PCIE_MOBIVEIL_H
1042 +
1043 +#include <linux/pci.h>
1044 +#include <linux/irq.h>
1045 +#include <linux/msi.h>
1046 +#include "../../pci.h"
1047 +
1048 +/* register offsets and bit positions */
1049 +
1050 +/*
1051 + * translation tables are grouped into windows, each window registers are
1052 + * grouped into blocks of 4 or 16 registers each
1053 + */
1054 +#define PAB_REG_BLOCK_SIZE 16
1055 +#define PAB_EXT_REG_BLOCK_SIZE 4
1056 +
1057 +#define PAB_REG_ADDR(offset, win) \
1058 + (offset + (win * PAB_REG_BLOCK_SIZE))
1059 +#define PAB_EXT_REG_ADDR(offset, win) \
1060 + (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
1061 +
1062 +#define LTSSM_STATUS 0x0404
1063 +#define LTSSM_STATUS_L0_MASK 0x3f
1064 +#define LTSSM_STATUS_L0 0x2d
1065 +
1066 +#define PAB_CTRL 0x0808
1067 +#define AMBA_PIO_ENABLE_SHIFT 0
1068 +#define PEX_PIO_ENABLE_SHIFT 1
1069 +#define PAGE_SEL_SHIFT 13
1070 +#define PAGE_SEL_MASK 0x3f
1071 +#define PAGE_LO_MASK 0x3ff
1072 +#define PAGE_SEL_OFFSET_SHIFT 10
1073 +
1074 +#define PAB_AXI_PIO_CTRL 0x0840
1075 +#define APIO_EN_MASK 0xf
1076 +
1077 +#define PAB_PEX_PIO_CTRL 0x08c0
1078 +#define PIO_ENABLE_SHIFT 0
1079 +
1080 +#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
1081 +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
1082 +#define PAB_INTP_INTX_MASK 0x01e0
1083 +#define PAB_INTP_MSI_MASK 0x8
1084 +
1085 +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
1086 +#define WIN_ENABLE_SHIFT 0
1087 +#define WIN_TYPE_SHIFT 1
1088 +#define WIN_TYPE_MASK 0x3
1089 +#define WIN_SIZE_MASK 0xfffffc00
1090 +
1091 +#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
1092 +
1093 +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
1094 +#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
1095 +#define AXI_WINDOW_ALIGN_MASK 3
1096 +
1097 +#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
1098 +#define PAB_BUS_SHIFT 24
1099 +#define PAB_DEVICE_SHIFT 19
1100 +#define PAB_FUNCTION_SHIFT 16
1101 +
1102 +#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
1103 +#define PAB_INTP_AXI_PIO_CLASS 0x474
1104 +
1105 +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
1106 +#define AMAP_CTRL_EN_SHIFT 0
1107 +#define AMAP_CTRL_TYPE_SHIFT 1
1108 +#define AMAP_CTRL_TYPE_MASK 3
1109 +
1110 +#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
1111 +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
1112 +#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
1113 +#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
1114 +#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
1115 +
1116 +/* starting offset of INTX bits in status register */
1117 +#define PAB_INTX_START 5
1118 +
1119 +/* supported number of MSI interrupts */
1120 +#define PCI_NUM_MSI 16
1121 +
1122 +/* MSI registers */
1123 +#define MSI_BASE_LO_OFFSET 0x04
1124 +#define MSI_BASE_HI_OFFSET 0x08
1125 +#define MSI_SIZE_OFFSET 0x0c
1126 +#define MSI_ENABLE_OFFSET 0x14
1127 +#define MSI_STATUS_OFFSET 0x18
1128 +#define MSI_DATA_OFFSET 0x20
1129 +#define MSI_ADDR_L_OFFSET 0x24
1130 +#define MSI_ADDR_H_OFFSET 0x28
1131 +
1132 +/* outbound and inbound window definitions */
1133 +#define WIN_NUM_0 0
1134 +#define WIN_NUM_1 1
1135 +#define CFG_WINDOW_TYPE 0
1136 +#define IO_WINDOW_TYPE 1
1137 +#define MEM_WINDOW_TYPE 2
1138 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
1139 +#define MAX_PIO_WINDOWS 8
1140 +
1141 +/* Parameters for the waiting for link up routine */
1142 +#define LINK_WAIT_MAX_RETRIES 10
1143 +#define LINK_WAIT_MIN 90000
1144 +#define LINK_WAIT_MAX 100000
1145 +
1146 +#define PAGED_ADDR_BNDRY 0xc00
1147 +#define OFFSET_TO_PAGE_ADDR(off) \
1148 + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
1149 +#define OFFSET_TO_PAGE_IDX(off) \
1150 + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
1151 +
1152 +struct mobiveil_pcie;
1153 +
1154 +struct mobiveil_msi { /* MSI information */
1155 + struct mutex lock; /* protect bitmap variable */
1156 + struct irq_domain *msi_domain;
1157 + struct irq_domain *dev_domain;
1158 + phys_addr_t msi_pages_phys;
1159 + int num_of_vectors;
1160 + DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
1161 +};
1162 +
1163 +struct mobiveil_rp_ops {
1164 + int (*interrupt_init)(struct mobiveil_pcie *pcie);
1165 +};
1166 +
1167 +struct root_port {
1168 + u8 root_bus_nr;
1169 + void __iomem *config_axi_slave_base; /* endpoint config base */
1170 + struct resource *ob_io_res;
1171 + struct mobiveil_rp_ops *ops;
1172 + int irq;
1173 + raw_spinlock_t intx_mask_lock;
1174 + struct irq_domain *intx_domain;
1175 + struct mobiveil_msi msi;
1176 +};
1177 +
1178 +struct mobiveil_pab_ops {
1179 + int (*link_up)(struct mobiveil_pcie *pcie);
1180 +};
1181 +
1182 +struct mobiveil_pcie {
1183 + struct platform_device *pdev;
1184 + struct list_head resources;
1185 + void __iomem *csr_axi_slave_base; /* PAB registers base */
1186 + phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
1187 + void __iomem *apb_csr_base; /* MSI register base */
1188 + u32 apio_wins;
1189 + u32 ppio_wins;
1190 + u32 ob_wins_configured; /* configured outbound windows */
1191 + u32 ib_wins_configured; /* configured inbound windows */
1192 + const struct mobiveil_pab_ops *ops;
1193 + struct root_port rp;
1194 + struct pci_host_bridge *bridge;
1195 +};
1196 +
1197 +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
1198 +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
1199 +int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
1200 +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
1201 + u64 pci_addr, u32 type, u64 size);
1202 +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
1203 + u64 pci_addr, u32 type, u64 size);
1204 +u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
1205 +void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
1206 +
1207 +static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
1208 +{
1209 + return csr_read(pcie, off, 0x4);
1210 +}
1211 +
1212 +static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1213 +{
1214 + csr_write(pcie, val, off, 0x4);
1215 +}
1216 +
1217 +#endif /* _PCIE_MOBIVEIL_H */
1218 --- a/drivers/pci/controller/pcie-mobiveil.c
1219 +++ /dev/null
1220 @@ -1,964 +0,0 @@
1221 -// SPDX-License-Identifier: GPL-2.0
1222 -/*
1223 - * PCIe host controller driver for Mobiveil PCIe Host controller
1224 - *
1225 - * Copyright (c) 2018 Mobiveil Inc.
1226 - * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
1227 - */
1228 -
1229 -#include <linux/delay.h>
1230 -#include <linux/init.h>
1231 -#include <linux/interrupt.h>
1232 -#include <linux/irq.h>
1233 -#include <linux/irqchip/chained_irq.h>
1234 -#include <linux/irqdomain.h>
1235 -#include <linux/kernel.h>
1236 -#include <linux/module.h>
1237 -#include <linux/msi.h>
1238 -#include <linux/of_address.h>
1239 -#include <linux/of_irq.h>
1240 -#include <linux/of_platform.h>
1241 -#include <linux/of_pci.h>
1242 -#include <linux/pci.h>
1243 -#include <linux/platform_device.h>
1244 -#include <linux/slab.h>
1245 -
1246 -#include "../pci.h"
1247 -
1248 -/* register offsets and bit positions */
1249 -
1250 -/*
1251 - * translation tables are grouped into windows, each window registers are
1252 - * grouped into blocks of 4 or 16 registers each
1253 - */
1254 -#define PAB_REG_BLOCK_SIZE 16
1255 -#define PAB_EXT_REG_BLOCK_SIZE 4
1256 -
1257 -#define PAB_REG_ADDR(offset, win) \
1258 - (offset + (win * PAB_REG_BLOCK_SIZE))
1259 -#define PAB_EXT_REG_ADDR(offset, win) \
1260 - (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
1261 -
1262 -#define LTSSM_STATUS 0x0404
1263 -#define LTSSM_STATUS_L0_MASK 0x3f
1264 -#define LTSSM_STATUS_L0 0x2d
1265 -
1266 -#define PAB_CTRL 0x0808
1267 -#define AMBA_PIO_ENABLE_SHIFT 0
1268 -#define PEX_PIO_ENABLE_SHIFT 1
1269 -#define PAGE_SEL_SHIFT 13
1270 -#define PAGE_SEL_MASK 0x3f
1271 -#define PAGE_LO_MASK 0x3ff
1272 -#define PAGE_SEL_OFFSET_SHIFT 10
1273 -
1274 -#define PAB_AXI_PIO_CTRL 0x0840
1275 -#define APIO_EN_MASK 0xf
1276 -
1277 -#define PAB_PEX_PIO_CTRL 0x08c0
1278 -#define PIO_ENABLE_SHIFT 0
1279 -
1280 -#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
1281 -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
1282 -#define PAB_INTP_INTX_MASK 0x01e0
1283 -#define PAB_INTP_MSI_MASK 0x8
1284 -
1285 -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
1286 -#define WIN_ENABLE_SHIFT 0
1287 -#define WIN_TYPE_SHIFT 1
1288 -#define WIN_TYPE_MASK 0x3
1289 -#define WIN_SIZE_MASK 0xfffffc00
1290 -
1291 -#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
1292 -
1293 -#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
1294 -#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
1295 -#define AXI_WINDOW_ALIGN_MASK 3
1296 -
1297 -#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
1298 -#define PAB_BUS_SHIFT 24
1299 -#define PAB_DEVICE_SHIFT 19
1300 -#define PAB_FUNCTION_SHIFT 16
1301 -
1302 -#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
1303 -#define PAB_INTP_AXI_PIO_CLASS 0x474
1304 -
1305 -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
1306 -#define AMAP_CTRL_EN_SHIFT 0
1307 -#define AMAP_CTRL_TYPE_SHIFT 1
1308 -#define AMAP_CTRL_TYPE_MASK 3
1309 -
1310 -#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
1311 -#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
1312 -#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
1313 -#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
1314 -#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
1315 -
1316 -/* starting offset of INTX bits in status register */
1317 -#define PAB_INTX_START 5
1318 -
1319 -/* supported number of MSI interrupts */
1320 -#define PCI_NUM_MSI 16
1321 -
1322 -/* MSI registers */
1323 -#define MSI_BASE_LO_OFFSET 0x04
1324 -#define MSI_BASE_HI_OFFSET 0x08
1325 -#define MSI_SIZE_OFFSET 0x0c
1326 -#define MSI_ENABLE_OFFSET 0x14
1327 -#define MSI_STATUS_OFFSET 0x18
1328 -#define MSI_DATA_OFFSET 0x20
1329 -#define MSI_ADDR_L_OFFSET 0x24
1330 -#define MSI_ADDR_H_OFFSET 0x28
1331 -
1332 -/* outbound and inbound window definitions */
1333 -#define WIN_NUM_0 0
1334 -#define WIN_NUM_1 1
1335 -#define CFG_WINDOW_TYPE 0
1336 -#define IO_WINDOW_TYPE 1
1337 -#define MEM_WINDOW_TYPE 2
1338 -#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
1339 -#define MAX_PIO_WINDOWS 8
1340 -
1341 -/* Parameters for the waiting for link up routine */
1342 -#define LINK_WAIT_MAX_RETRIES 10
1343 -#define LINK_WAIT_MIN 90000
1344 -#define LINK_WAIT_MAX 100000
1345 -
1346 -#define PAGED_ADDR_BNDRY 0xc00
1347 -#define OFFSET_TO_PAGE_ADDR(off) \
1348 - ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
1349 -#define OFFSET_TO_PAGE_IDX(off) \
1350 - ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
1351 -
1352 -struct mobiveil_msi { /* MSI information */
1353 - struct mutex lock; /* protect bitmap variable */
1354 - struct irq_domain *msi_domain;
1355 - struct irq_domain *dev_domain;
1356 - phys_addr_t msi_pages_phys;
1357 - int num_of_vectors;
1358 - DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
1359 -};
1360 -
1361 -struct mobiveil_pcie {
1362 - struct platform_device *pdev;
1363 - struct list_head resources;
1364 - void __iomem *config_axi_slave_base; /* endpoint config base */
1365 - void __iomem *csr_axi_slave_base; /* root port config base */
1366 - void __iomem *apb_csr_base; /* MSI register base */
1367 - phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
1368 - struct irq_domain *intx_domain;
1369 - raw_spinlock_t intx_mask_lock;
1370 - int irq;
1371 - int apio_wins;
1372 - int ppio_wins;
1373 - int ob_wins_configured; /* configured outbound windows */
1374 - int ib_wins_configured; /* configured inbound windows */
1375 - struct resource *ob_io_res;
1376 - char root_bus_nr;
1377 - struct mobiveil_msi msi;
1378 -};
1379 -
1380 -/*
1381 - * mobiveil_pcie_sel_page - routine to access paged register
1382 - *
1383 - * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
1384 - * for this scheme to work extracted higher 6 bits of the offset will be
1385 - * written to pg_sel field of PAB_CTRL register and rest of the lower 10
1386 - * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
1387 - */
1388 -static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
1389 -{
1390 - u32 val;
1391 -
1392 - val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
1393 - val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
1394 - val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
1395 -
1396 - writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
1397 -}
1398 -
1399 -static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
1400 -{
1401 - if (off < PAGED_ADDR_BNDRY) {
1402 - /* For directly accessed registers, clear the pg_sel field */
1403 - mobiveil_pcie_sel_page(pcie, 0);
1404 - return pcie->csr_axi_slave_base + off;
1405 - }
1406 -
1407 - mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
1408 - return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
1409 -}
1410 -
1411 -static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
1412 -{
1413 - if ((uintptr_t)addr & (size - 1)) {
1414 - *val = 0;
1415 - return PCIBIOS_BAD_REGISTER_NUMBER;
1416 - }
1417 -
1418 - switch (size) {
1419 - case 4:
1420 - *val = readl(addr);
1421 - break;
1422 - case 2:
1423 - *val = readw(addr);
1424 - break;
1425 - case 1:
1426 - *val = readb(addr);
1427 - break;
1428 - default:
1429 - *val = 0;
1430 - return PCIBIOS_BAD_REGISTER_NUMBER;
1431 - }
1432 -
1433 - return PCIBIOS_SUCCESSFUL;
1434 -}
1435 -
1436 -static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
1437 -{
1438 - if ((uintptr_t)addr & (size - 1))
1439 - return PCIBIOS_BAD_REGISTER_NUMBER;
1440 -
1441 - switch (size) {
1442 - case 4:
1443 - writel(val, addr);
1444 - break;
1445 - case 2:
1446 - writew(val, addr);
1447 - break;
1448 - case 1:
1449 - writeb(val, addr);
1450 - break;
1451 - default:
1452 - return PCIBIOS_BAD_REGISTER_NUMBER;
1453 - }
1454 -
1455 - return PCIBIOS_SUCCESSFUL;
1456 -}
1457 -
1458 -static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
1459 -{
1460 - void *addr;
1461 - u32 val;
1462 - int ret;
1463 -
1464 - addr = mobiveil_pcie_comp_addr(pcie, off);
1465 -
1466 - ret = mobiveil_pcie_read(addr, size, &val);
1467 - if (ret)
1468 - dev_err(&pcie->pdev->dev, "read CSR address failed\n");
1469 -
1470 - return val;
1471 -}
1472 -
1473 -static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
1474 -{
1475 - void *addr;
1476 - int ret;
1477 -
1478 - addr = mobiveil_pcie_comp_addr(pcie, off);
1479 -
1480 - ret = mobiveil_pcie_write(addr, size, val);
1481 - if (ret)
1482 - dev_err(&pcie->pdev->dev, "write CSR address failed\n");
1483 -}
1484 -
1485 -static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
1486 -{
1487 - return csr_read(pcie, off, 0x4);
1488 -}
1489 -
1490 -static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
1491 -{
1492 - csr_write(pcie, val, off, 0x4);
1493 -}
1494 -
1495 -static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
1496 -{
1497 - return (csr_readl(pcie, LTSSM_STATUS) &
1498 - LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
1499 -}
1500 -
1501 -static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
1502 -{
1503 - struct mobiveil_pcie *pcie = bus->sysdata;
1504 -
1505 - /* Only one device down on each root port */
1506 - if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
1507 - return false;
1508 -
1509 - /*
1510 - * Do not read more than one device on the bus directly
1511 - * attached to RC
1512 - */
1513 - if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
1514 - return false;
1515 -
1516 - return true;
1517 -}
1518 -
1519 -/*
1520 - * mobiveil_pcie_map_bus - routine to get the configuration base of either
1521 - * root port or endpoint
1522 - */
1523 -static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
1524 - unsigned int devfn, int where)
1525 -{
1526 - struct mobiveil_pcie *pcie = bus->sysdata;
1527 - u32 value;
1528 -
1529 - if (!mobiveil_pcie_valid_device(bus, devfn))
1530 - return NULL;
1531 -
1532 - /* RC config access */
1533 - if (bus->number == pcie->root_bus_nr)
1534 - return pcie->csr_axi_slave_base + where;
1535 -
1536 - /*
1537 - * EP config access (in Config/APIO space)
1538 - * Program PEX Address base (31..16 bits) with appropriate value
1539 - * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
1540 - * Relies on pci_lock serialization
1541 - */
1542 - value = bus->number << PAB_BUS_SHIFT |
1543 - PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
1544 - PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
1545 -
1546 - csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
1547 -
1548 - return pcie->config_axi_slave_base + where;
1549 -}
1550 -
1551 -static struct pci_ops mobiveil_pcie_ops = {
1552 - .map_bus = mobiveil_pcie_map_bus,
1553 - .read = pci_generic_config_read,
1554 - .write = pci_generic_config_write,
1555 -};
1556 -
1557 -static void mobiveil_pcie_isr(struct irq_desc *desc)
1558 -{
1559 - struct irq_chip *chip = irq_desc_get_chip(desc);
1560 - struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
1561 - struct device *dev = &pcie->pdev->dev;
1562 - struct mobiveil_msi *msi = &pcie->msi;
1563 - u32 msi_data, msi_addr_lo, msi_addr_hi;
1564 - u32 intr_status, msi_status;
1565 - unsigned long shifted_status;
1566 - u32 bit, virq, val, mask;
1567 -
1568 - /*
1569 - * The core provides a single interrupt for both INTx/MSI messages.
1570 - * So we'll read both INTx and MSI status
1571 - */
1572 -
1573 - chained_irq_enter(chip, desc);
1574 -
1575 - /* read INTx status */
1576 - val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1577 - mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1578 - intr_status = val & mask;
1579 -
1580 - /* Handle INTx */
1581 - if (intr_status & PAB_INTP_INTX_MASK) {
1582 - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
1583 - shifted_status &= PAB_INTP_INTX_MASK;
1584 - shifted_status >>= PAB_INTX_START;
1585 - do {
1586 - for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
1587 - virq = irq_find_mapping(pcie->intx_domain,
1588 - bit + 1);
1589 - if (virq)
1590 - generic_handle_irq(virq);
1591 - else
1592 - dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
1593 - bit);
1594 -
1595 - /* clear interrupt handled */
1596 - csr_writel(pcie, 1 << (PAB_INTX_START + bit),
1597 - PAB_INTP_AMBA_MISC_STAT);
1598 - }
1599 -
1600 - shifted_status = csr_readl(pcie,
1601 - PAB_INTP_AMBA_MISC_STAT);
1602 - shifted_status &= PAB_INTP_INTX_MASK;
1603 - shifted_status >>= PAB_INTX_START;
1604 - } while (shifted_status != 0);
1605 - }
1606 -
1607 - /* read extra MSI status register */
1608 - msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
1609 -
1610 - /* handle MSI interrupts */
1611 - while (msi_status & 1) {
1612 - msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
1613 -
1614 - /*
1615 - * MSI_STATUS_OFFSET register gets updated to zero
1616 - * once we pop not only the MSI data but also address
1617 - * from MSI hardware FIFO. So keeping these following
1618 - * two dummy reads.
1619 - */
1620 - msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
1621 - MSI_ADDR_L_OFFSET);
1622 - msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
1623 - MSI_ADDR_H_OFFSET);
1624 - dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
1625 - msi_data, msi_addr_hi, msi_addr_lo);
1626 -
1627 - virq = irq_find_mapping(msi->dev_domain, msi_data);
1628 - if (virq)
1629 - generic_handle_irq(virq);
1630 -
1631 - msi_status = readl_relaxed(pcie->apb_csr_base +
1632 - MSI_STATUS_OFFSET);
1633 - }
1634 -
1635 - /* Clear the interrupt status */
1636 - csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
1637 - chained_irq_exit(chip, desc);
1638 -}
1639 -
1640 -static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
1641 -{
1642 - struct device *dev = &pcie->pdev->dev;
1643 - struct platform_device *pdev = pcie->pdev;
1644 - struct device_node *node = dev->of_node;
1645 - struct resource *res;
1646 -
1647 - /* map config resource */
1648 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1649 - "config_axi_slave");
1650 - pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
1651 - if (IS_ERR(pcie->config_axi_slave_base))
1652 - return PTR_ERR(pcie->config_axi_slave_base);
1653 - pcie->ob_io_res = res;
1654 -
1655 - /* map csr resource */
1656 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1657 - "csr_axi_slave");
1658 - pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
1659 - if (IS_ERR(pcie->csr_axi_slave_base))
1660 - return PTR_ERR(pcie->csr_axi_slave_base);
1661 - pcie->pcie_reg_base = res->start;
1662 -
1663 - /* map MSI config resource */
1664 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
1665 - pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
1666 - if (IS_ERR(pcie->apb_csr_base))
1667 - return PTR_ERR(pcie->apb_csr_base);
1668 -
1669 - /* read the number of windows requested */
1670 - if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
1671 - pcie->apio_wins = MAX_PIO_WINDOWS;
1672 -
1673 - if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
1674 - pcie->ppio_wins = MAX_PIO_WINDOWS;
1675 -
1676 - pcie->irq = platform_get_irq(pdev, 0);
1677 - if (pcie->irq <= 0) {
1678 - dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
1679 - return -ENODEV;
1680 - }
1681 -
1682 - return 0;
1683 -}
1684 -
1685 -static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
1686 - u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
1687 -{
1688 - u32 value;
1689 - u64 size64 = ~(size - 1);
1690 -
1691 - if (win_num >= pcie->ppio_wins) {
1692 - dev_err(&pcie->pdev->dev,
1693 - "ERROR: max inbound windows reached !\n");
1694 - return;
1695 - }
1696 -
1697 - value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
1698 - value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
1699 - value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
1700 - (lower_32_bits(size64) & WIN_SIZE_MASK);
1701 - csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
1702 -
1703 - csr_writel(pcie, upper_32_bits(size64),
1704 - PAB_EXT_PEX_AMAP_SIZEN(win_num));
1705 -
1706 - csr_writel(pcie, lower_32_bits(cpu_addr),
1707 - PAB_PEX_AMAP_AXI_WIN(win_num));
1708 - csr_writel(pcie, upper_32_bits(cpu_addr),
1709 - PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
1710 -
1711 - csr_writel(pcie, lower_32_bits(pci_addr),
1712 - PAB_PEX_AMAP_PEX_WIN_L(win_num));
1713 - csr_writel(pcie, upper_32_bits(pci_addr),
1714 - PAB_PEX_AMAP_PEX_WIN_H(win_num));
1715 -
1716 - pcie->ib_wins_configured++;
1717 -}
1718 -
1719 -/*
1720 - * routine to program the outbound windows
1721 - */
1722 -static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
1723 - u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
1724 -{
1725 - u32 value;
1726 - u64 size64 = ~(size - 1);
1727 -
1728 - if (win_num >= pcie->apio_wins) {
1729 - dev_err(&pcie->pdev->dev,
1730 - "ERROR: max outbound windows reached !\n");
1731 - return;
1732 - }
1733 -
1734 - /*
1735 - * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
1736 - * to 4 KB in PAB_AXI_AMAP_CTRL register
1737 - */
1738 - value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
1739 - value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
1740 - value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
1741 - (lower_32_bits(size64) & WIN_SIZE_MASK);
1742 - csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
1743 -
1744 - csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
1745 -
1746 - /*
1747 - * program AXI window base with appropriate value in
1748 - * PAB_AXI_AMAP_AXI_WIN0 register
1749 - */
1750 - csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
1751 - PAB_AXI_AMAP_AXI_WIN(win_num));
1752 - csr_writel(pcie, upper_32_bits(cpu_addr),
1753 - PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
1754 -
1755 - csr_writel(pcie, lower_32_bits(pci_addr),
1756 - PAB_AXI_AMAP_PEX_WIN_L(win_num));
1757 - csr_writel(pcie, upper_32_bits(pci_addr),
1758 - PAB_AXI_AMAP_PEX_WIN_H(win_num));
1759 -
1760 - pcie->ob_wins_configured++;
1761 -}
1762 -
1763 -static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
1764 -{
1765 - int retries;
1766 -
1767 - /* check if the link is up or not */
1768 - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1769 - if (mobiveil_pcie_link_up(pcie))
1770 - return 0;
1771 -
1772 - usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
1773 - }
1774 -
1775 - dev_err(&pcie->pdev->dev, "link never came up\n");
1776 -
1777 - return -ETIMEDOUT;
1778 -}
1779 -
1780 -static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
1781 -{
1782 - phys_addr_t msg_addr = pcie->pcie_reg_base;
1783 - struct mobiveil_msi *msi = &pcie->msi;
1784 -
1785 - pcie->msi.num_of_vectors = PCI_NUM_MSI;
1786 - msi->msi_pages_phys = (phys_addr_t)msg_addr;
1787 -
1788 - writel_relaxed(lower_32_bits(msg_addr),
1789 - pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
1790 - writel_relaxed(upper_32_bits(msg_addr),
1791 - pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
1792 - writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
1793 - writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
1794 -}
1795 -
1796 -static int mobiveil_host_init(struct mobiveil_pcie *pcie)
1797 -{
1798 - u32 value, pab_ctrl, type;
1799 - struct resource_entry *win;
1800 -
1801 - /* setup bus numbers */
1802 - value = csr_readl(pcie, PCI_PRIMARY_BUS);
1803 - value &= 0xff000000;
1804 - value |= 0x00ff0100;
1805 - csr_writel(pcie, value, PCI_PRIMARY_BUS);
1806 -
1807 - /*
1808 - * program Bus Master Enable Bit in Command Register in PAB Config
1809 - * Space
1810 - */
1811 - value = csr_readl(pcie, PCI_COMMAND);
1812 - value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
1813 - csr_writel(pcie, value, PCI_COMMAND);
1814 -
1815 - /*
1816 - * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
1817 - * register
1818 - */
1819 - pab_ctrl = csr_readl(pcie, PAB_CTRL);
1820 - pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
1821 - csr_writel(pcie, pab_ctrl, PAB_CTRL);
1822 -
1823 - csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
1824 - PAB_INTP_AMBA_MISC_ENB);
1825 -
1826 - /*
1827 - * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
1828 - * PAB_AXI_PIO_CTRL Register
1829 - */
1830 - value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
1831 - value |= APIO_EN_MASK;
1832 - csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
1833 -
1834 - /* Enable PCIe PIO master */
1835 - value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
1836 - value |= 1 << PIO_ENABLE_SHIFT;
1837 - csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
1838 -
1839 - /*
1840 - * we'll program one outbound window for config reads and
1841 - * another default inbound window for all the upstream traffic
1842 - * rest of the outbound windows will be configured according to
1843 - * the "ranges" field defined in device tree
1844 - */
1845 -
1846 - /* config outbound translation window */
1847 - program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
1848 - CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
1849 -
1850 - /* memory inbound translation window */
1851 - program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
1852 -
1853 - /* Get the I/O and memory ranges from DT */
1854 - resource_list_for_each_entry(win, &pcie->resources) {
1855 - if (resource_type(win->res) == IORESOURCE_MEM)
1856 - type = MEM_WINDOW_TYPE;
1857 - else if (resource_type(win->res) == IORESOURCE_IO)
1858 - type = IO_WINDOW_TYPE;
1859 - else
1860 - continue;
1861 -
1862 - /* configure outbound translation window */
1863 - program_ob_windows(pcie, pcie->ob_wins_configured,
1864 - win->res->start,
1865 - win->res->start - win->offset,
1866 - type, resource_size(win->res));
1867 - }
1868 -
1869 - /* fixup for PCIe class register */
1870 - value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
1871 - value &= 0xff;
1872 - value |= (PCI_CLASS_BRIDGE_PCI << 16);
1873 - csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
1874 -
1875 - /* setup MSI hardware registers */
1876 - mobiveil_pcie_enable_msi(pcie);
1877 -
1878 - return 0;
1879 -}
1880 -
1881 -static void mobiveil_mask_intx_irq(struct irq_data *data)
1882 -{
1883 - struct irq_desc *desc = irq_to_desc(data->irq);
1884 - struct mobiveil_pcie *pcie;
1885 - unsigned long flags;
1886 - u32 mask, shifted_val;
1887 -
1888 - pcie = irq_desc_get_chip_data(desc);
1889 - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1890 - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1891 - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1892 - shifted_val &= ~mask;
1893 - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1894 - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1895 -}
1896 -
1897 -static void mobiveil_unmask_intx_irq(struct irq_data *data)
1898 -{
1899 - struct irq_desc *desc = irq_to_desc(data->irq);
1900 - struct mobiveil_pcie *pcie;
1901 - unsigned long flags;
1902 - u32 shifted_val, mask;
1903 -
1904 - pcie = irq_desc_get_chip_data(desc);
1905 - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
1906 - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
1907 - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
1908 - shifted_val |= mask;
1909 - csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
1910 - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
1911 -}
1912 -
1913 -static struct irq_chip intx_irq_chip = {
1914 - .name = "mobiveil_pcie:intx",
1915 - .irq_enable = mobiveil_unmask_intx_irq,
1916 - .irq_disable = mobiveil_mask_intx_irq,
1917 - .irq_mask = mobiveil_mask_intx_irq,
1918 - .irq_unmask = mobiveil_unmask_intx_irq,
1919 -};
1920 -
1921 -/* routine to setup the INTx related data */
1922 -static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
1923 - irq_hw_number_t hwirq)
1924 -{
1925 - irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
1926 - irq_set_chip_data(irq, domain->host_data);
1927 -
1928 - return 0;
1929 -}
1930 -
1931 -/* INTx domain operations structure */
1932 -static const struct irq_domain_ops intx_domain_ops = {
1933 - .map = mobiveil_pcie_intx_map,
1934 -};
1935 -
1936 -static struct irq_chip mobiveil_msi_irq_chip = {
1937 - .name = "Mobiveil PCIe MSI",
1938 - .irq_mask = pci_msi_mask_irq,
1939 - .irq_unmask = pci_msi_unmask_irq,
1940 -};
1941 -
1942 -static struct msi_domain_info mobiveil_msi_domain_info = {
1943 - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1944 - MSI_FLAG_PCI_MSIX),
1945 - .chip = &mobiveil_msi_irq_chip,
1946 -};
1947 -
1948 -static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1949 -{
1950 - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
1951 - phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
1952 -
1953 - msg->address_lo = lower_32_bits(addr);
1954 - msg->address_hi = upper_32_bits(addr);
1955 - msg->data = data->hwirq;
1956 -
1957 - dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
1958 - (int)data->hwirq, msg->address_hi, msg->address_lo);
1959 -}
1960 -
1961 -static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
1962 - const struct cpumask *mask, bool force)
1963 -{
1964 - return -EINVAL;
1965 -}
1966 -
1967 -static struct irq_chip mobiveil_msi_bottom_irq_chip = {
1968 - .name = "Mobiveil MSI",
1969 - .irq_compose_msi_msg = mobiveil_compose_msi_msg,
1970 - .irq_set_affinity = mobiveil_msi_set_affinity,
1971 -};
1972 -
1973 -static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
1974 - unsigned int virq,
1975 - unsigned int nr_irqs, void *args)
1976 -{
1977 - struct mobiveil_pcie *pcie = domain->host_data;
1978 - struct mobiveil_msi *msi = &pcie->msi;
1979 - unsigned long bit;
1980 -
1981 - WARN_ON(nr_irqs != 1);
1982 - mutex_lock(&msi->lock);
1983 -
1984 - bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
1985 - if (bit >= msi->num_of_vectors) {
1986 - mutex_unlock(&msi->lock);
1987 - return -ENOSPC;
1988 - }
1989 -
1990 - set_bit(bit, msi->msi_irq_in_use);
1991 -
1992 - mutex_unlock(&msi->lock);
1993 -
1994 - irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
1995 - domain->host_data, handle_level_irq, NULL, NULL);
1996 - return 0;
1997 -}
1998 -
1999 -static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
2000 - unsigned int virq,
2001 - unsigned int nr_irqs)
2002 -{
2003 - struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2004 - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
2005 - struct mobiveil_msi *msi = &pcie->msi;
2006 -
2007 - mutex_lock(&msi->lock);
2008 -
2009 - if (!test_bit(d->hwirq, msi->msi_irq_in_use))
2010 - dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
2011 - d->hwirq);
2012 - else
2013 - __clear_bit(d->hwirq, msi->msi_irq_in_use);
2014 -
2015 - mutex_unlock(&msi->lock);
2016 -}
2017 -static const struct irq_domain_ops msi_domain_ops = {
2018 - .alloc = mobiveil_irq_msi_domain_alloc,
2019 - .free = mobiveil_irq_msi_domain_free,
2020 -};
2021 -
2022 -static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
2023 -{
2024 - struct device *dev = &pcie->pdev->dev;
2025 - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
2026 - struct mobiveil_msi *msi = &pcie->msi;
2027 -
2028 - mutex_init(&pcie->msi.lock);
2029 - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
2030 - &msi_domain_ops, pcie);
2031 - if (!msi->dev_domain) {
2032 - dev_err(dev, "failed to create IRQ domain\n");
2033 - return -ENOMEM;
2034 - }
2035 -
2036 - msi->msi_domain = pci_msi_create_irq_domain(fwnode,
2037 - &mobiveil_msi_domain_info,
2038 - msi->dev_domain);
2039 - if (!msi->msi_domain) {
2040 - dev_err(dev, "failed to create MSI domain\n");
2041 - irq_domain_remove(msi->dev_domain);
2042 - return -ENOMEM;
2043 - }
2044 -
2045 - return 0;
2046 -}
2047 -
2048 -static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
2049 -{
2050 - struct device *dev = &pcie->pdev->dev;
2051 - struct device_node *node = dev->of_node;
2052 - int ret;
2053 -
2054 - /* setup INTx */
2055 - pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
2056 - &intx_domain_ops, pcie);
2057 -
2058 - if (!pcie->intx_domain) {
2059 - dev_err(dev, "Failed to get a INTx IRQ domain\n");
2060 - return -ENOMEM;
2061 - }
2062 -
2063 - raw_spin_lock_init(&pcie->intx_mask_lock);
2064 -
2065 - /* setup MSI */
2066 - ret = mobiveil_allocate_msi_domains(pcie);
2067 - if (ret)
2068 - return ret;
2069 -
2070 - return 0;
2071 -}
2072 -
2073 -static int mobiveil_pcie_probe(struct platform_device *pdev)
2074 -{
2075 - struct mobiveil_pcie *pcie;
2076 - struct pci_bus *bus;
2077 - struct pci_bus *child;
2078 - struct pci_host_bridge *bridge;
2079 - struct device *dev = &pdev->dev;
2080 - resource_size_t iobase;
2081 - int ret;
2082 -
2083 - /* allocate the PCIe port */
2084 - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2085 - if (!bridge)
2086 - return -ENOMEM;
2087 -
2088 - pcie = pci_host_bridge_priv(bridge);
2089 -
2090 - pcie->pdev = pdev;
2091 -
2092 - ret = mobiveil_pcie_parse_dt(pcie);
2093 - if (ret) {
2094 - dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
2095 - return ret;
2096 - }
2097 -
2098 - INIT_LIST_HEAD(&pcie->resources);
2099 -
2100 - /* parse the host bridge base addresses from the device tree file */
2101 - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
2102 - &pcie->resources, &iobase);
2103 - if (ret) {
2104 - dev_err(dev, "Getting bridge resources failed\n");
2105 - return ret;
2106 - }
2107 -
2108 - /*
2109 - * configure all inbound and outbound windows and prepare the RC for
2110 - * config access
2111 - */
2112 - ret = mobiveil_host_init(pcie);
2113 - if (ret) {
2114 - dev_err(dev, "Failed to initialize host\n");
2115 - goto error;
2116 - }
2117 -
2118 - /* initialize the IRQ domains */
2119 - ret = mobiveil_pcie_init_irq_domain(pcie);
2120 - if (ret) {
2121 - dev_err(dev, "Failed creating IRQ Domain\n");
2122 - goto error;
2123 - }
2124 -
2125 - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
2126 -
2127 - ret = devm_request_pci_bus_resources(dev, &pcie->resources);
2128 - if (ret)
2129 - goto error;
2130 -
2131 - /* Initialize bridge */
2132 - list_splice_init(&pcie->resources, &bridge->windows);
2133 - bridge->dev.parent = dev;
2134 - bridge->sysdata = pcie;
2135 - bridge->busnr = pcie->root_bus_nr;
2136 - bridge->ops = &mobiveil_pcie_ops;
2137 - bridge->map_irq = of_irq_parse_and_map_pci;
2138 - bridge->swizzle_irq = pci_common_swizzle;
2139 -
2140 - ret = mobiveil_bringup_link(pcie);
2141 - if (ret) {
2142 - dev_info(dev, "link bring-up failed\n");
2143 - goto error;
2144 - }
2145 -
2146 - /* setup the kernel resources for the newly added PCIe root bus */
2147 - ret = pci_scan_root_bus_bridge(bridge);
2148 - if (ret)
2149 - goto error;
2150 -
2151 - bus = bridge->bus;
2152 -
2153 - pci_assign_unassigned_bus_resources(bus);
2154 - list_for_each_entry(child, &bus->children, node)
2155 - pcie_bus_configure_settings(child);
2156 - pci_bus_add_devices(bus);
2157 -
2158 - return 0;
2159 -error:
2160 - pci_free_resource_list(&pcie->resources);
2161 - return ret;
2162 -}
2163 -
2164 -static const struct of_device_id mobiveil_pcie_of_match[] = {
2165 - {.compatible = "mbvl,gpex40-pcie",},
2166 - {},
2167 -};
2168 -
2169 -MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
2170 -
2171 -static struct platform_driver mobiveil_pcie_driver = {
2172 - .probe = mobiveil_pcie_probe,
2173 - .driver = {
2174 - .name = "mobiveil-pcie",
2175 - .of_match_table = mobiveil_pcie_of_match,
2176 - .suppress_bind_attrs = true,
2177 - },
2178 -};
2179 -
2180 -builtin_platform_driver(mobiveil_pcie_driver);
2181 -
2182 -MODULE_LICENSE("GPL v2");
2183 -MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
2184 -MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");