unsigned long irq_flags, const char * devname,
void *dev_id)
{
- unsigned int irq = virt_irq_create_mapping(ist);
+ unsigned int irq = irq_create_mapping(NULL, ist, 0);
if (irq == NO_IRQ)
return -EINVAL;
- irq = irq_offset_up(irq);
-
return request_irq(irq, handler,
irq_flags, devname, dev_id);
}
void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id)
{
- unsigned int irq = virt_irq_create_mapping(ist);
+ unsigned int irq = irq_find_mapping(NULL, ist);
- irq = irq_offset_up(irq);
free_irq(irq, dev_id);
-
- return;
}
EXPORT_SYMBOL(ibmebus_free_irq);
* to reduce code space and undefined function references.
*/
+#undef DEBUG
+
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
-#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/mutex.h>
+#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
+#include <asm/udbg.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/paca.h>
#endif
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
-u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */
int show_interrupts(struct seq_file *p, void *v)
void do_IRQ(struct pt_regs *regs)
{
- int irq;
+ unsigned int irq;
#ifdef CONFIG_IRQSTACKS
struct thread_info *curtp, *irqtp;
#endif
*/
irq = ppc_md.get_irq(regs);
- if (irq >= 0) {
+ if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
#ifdef CONFIG_IRQSTACKS
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
} else
#endif
generic_handle_irq(irq, regs);
- } else if (irq != -2)
+ } else if (irq != NO_IRQ_IGNORE)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
#endif
}
-#ifdef CONFIG_PPC64
-/*
- * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
- */
-
-#define UNDEFINED_IRQ 0xffffffff
-unsigned int virt_irq_to_real_map[NR_IRQS];
-
-/*
- * Don't use virtual irqs 0, 1, 2 for devices.
- * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
- * and 2 is the XICS IPI interrupt.
- * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
- * that when we offset them we don't end up with an interrupt
- * number >= virt_irq_max.
- */
-#define MIN_VIRT_IRQ 3
-
-unsigned int virt_irq_max;
-static unsigned int max_virt_irq;
-static unsigned int nr_virt_irqs;
-
-void
-virt_irq_init(void)
-{
- int i;
-
- if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
- virt_irq_max = NR_IRQS - 1;
- max_virt_irq = virt_irq_max - __irq_offset_value;
- nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
-
- for (i = 0; i < NR_IRQS; i++)
- virt_irq_to_real_map[i] = UNDEFINED_IRQ;
-}
-
-/* Create a mapping for a real_irq if it doesn't already exist.
- * Return the virtual irq as a convenience.
- */
-int virt_irq_create_mapping(unsigned int real_irq)
-{
- unsigned int virq, first_virq;
- static int warned;
-
- if (ppc64_interrupt_controller == IC_OPEN_PIC)
- return real_irq; /* no mapping for openpic (for now) */
-
- if (ppc64_interrupt_controller == IC_CELL_PIC)
- return real_irq; /* no mapping for iic either */
-
- /* don't map interrupts < MIN_VIRT_IRQ */
- if (real_irq < MIN_VIRT_IRQ) {
- virt_irq_to_real_map[real_irq] = real_irq;
- return real_irq;
- }
-
- /* map to a number between MIN_VIRT_IRQ and max_virt_irq */
- virq = real_irq;
- if (virq > max_virt_irq)
- virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
-
- /* search for this number or a free slot */
- first_virq = virq;
- while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
- if (virt_irq_to_real_map[virq] == real_irq)
- return virq;
- if (++virq > max_virt_irq)
- virq = MIN_VIRT_IRQ;
- if (virq == first_virq)
- goto nospace; /* oops, no free slots */
- }
-
- virt_irq_to_real_map[virq] = real_irq;
- return virq;
-
- nospace:
- if (!warned) {
- printk(KERN_CRIT "Interrupt table is full\n");
- printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
- "in your kernel sources and rebuild.\n", virt_irq_max);
- warned = 1;
- }
- return NO_IRQ;
-}
-
-/*
- * In most cases will get a hit on the very first slot checked in the
- * virt_irq_to_real_map. Only when there are a large number of
- * IRQs will this be expensive.
- */
-unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
-{
- unsigned int virq;
- unsigned int first_virq;
-
- virq = real_irq;
-
- if (virq > max_virt_irq)
- virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
-
- first_virq = virq;
-
- do {
- if (virt_irq_to_real_map[virq] == real_irq)
- return virq;
-
- virq++;
-
- if (virq >= max_virt_irq)
- virq = 0;
-
- } while (first_virq != virq);
-
- return NO_IRQ;
-
-}
-#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
}
EXPORT_SYMBOL(do_softirq);
+
+/*
+ * IRQ controller and virtual interrupts
+ */
+
+#ifdef CONFIG_PPC_MERGE
+
+static LIST_HEAD(irq_hosts);
+static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
+
+struct irq_map_entry irq_map[NR_IRQS];
+static unsigned int irq_virq_count = NR_IRQS;
+static struct irq_host *irq_default_host;
+
+struct irq_host *irq_alloc_host(unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq)
+{
+ struct irq_host *host;
+ unsigned int size = sizeof(struct irq_host);
+ unsigned int i;
+ unsigned int *rmap;
+ unsigned long flags;
+
+ /* Allocate structure and revmap table if using linear mapping */
+ if (revmap_type == IRQ_HOST_MAP_LINEAR)
+ size += revmap_arg * sizeof(unsigned int);
+ if (mem_init_done)
+ host = kzalloc(size, GFP_KERNEL);
+ else {
+ host = alloc_bootmem(size);
+ if (host)
+ memset(host, 0, size);
+ }
+ if (host == NULL)
+ return NULL;
+
+ /* Fill structure */
+ host->revmap_type = revmap_type;
+ host->inval_irq = inval_irq;
+ host->ops = ops;
+
+ spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* If it's a legacy controller, check for duplicates and
+ * mark it as allocated (we use irq 0 host pointer for that
+ */
+ if (revmap_type == IRQ_HOST_MAP_LEGACY) {
+ if (irq_map[0].host != NULL) {
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ /* If we are early boot, we can't free the structure,
+ * too bad...
+ * this will be fixed once slab is made available early
+ * instead of the current cruft
+ */
+ if (mem_init_done)
+ kfree(host);
+ return NULL;
+ }
+ irq_map[0].host = host;
+ }
+
+ list_add(&host->link, &irq_hosts);
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+
+ /* Additional setups per revmap type */
+ switch(revmap_type) {
+ case IRQ_HOST_MAP_LEGACY:
+ /* 0 is always the invalid number for legacy */
+ host->inval_irq = 0;
+ /* setup us as the host for all legacy interrupts */
+ for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
+ irq_map[i].hwirq = 0;
+ smp_wmb();
+ irq_map[i].host = host;
+ smp_wmb();
+
+ /* Clear some flags */
+ get_irq_desc(i)->status
+ &= ~(IRQ_NOREQUEST | IRQ_LEVEL);
+
+ /* Legacy flags are left to default at this point,
+ * one can then use irq_create_mapping() to
+ * explicitely change them
+ */
+ ops->map(host, i, i, 0);
+ }
+ break;
+ case IRQ_HOST_MAP_LINEAR:
+ rmap = (unsigned int *)(host + 1);
+ for (i = 0; i < revmap_arg; i++)
+ rmap[i] = IRQ_NONE;
+ host->revmap_data.linear.size = revmap_arg;
+ smp_wmb();
+ host->revmap_data.linear.revmap = rmap;
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
+
+ return host;
+}
+
+struct irq_host *irq_find_host(struct device_node *node)
+{
+ struct irq_host *h, *found = NULL;
+ unsigned long flags;
+
+ /* We might want to match the legacy controller last since
+ * it might potentially be set to match all interrupts in
+ * the absence of a device node. This isn't a problem so far
+ * yet though...
+ */
+ spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link)
+ if (h->ops->match == NULL || h->ops->match(h, node)) {
+ found = h;
+ break;
+ }
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+void irq_set_default_host(struct irq_host *host)
+{
+ pr_debug("irq: Default host set to @0x%p\n", host);
+
+ irq_default_host = host;
+}
+
+void irq_set_virq_count(unsigned int count)
+{
+ pr_debug("irq: Trying to set virq count to %d\n", count);
+
+ BUG_ON(count < NUM_ISA_INTERRUPTS);
+ if (count < NR_IRQS)
+ irq_virq_count = count;
+}
+
+unsigned int irq_create_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq,
+ unsigned int flags)
+{
+ unsigned int virq, hint;
+
+ pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n",
+ host, hwirq, flags);
+
+ /* Look for default host if nececssary */
+ if (host == NULL)
+ host = irq_default_host;
+ if (host == NULL) {
+ printk(KERN_WARNING "irq_create_mapping called for"
+ " NULL host, hwirq=%lx\n", hwirq);
+ WARN_ON(1);
+ return NO_IRQ;
+ }
+ pr_debug("irq: -> using host @%p\n", host);
+
+ /* Check if mapping already exist, if it does, call
+ * host->ops->map() to update the flags
+ */
+ virq = irq_find_mapping(host, hwirq);
+ if (virq != IRQ_NONE) {
+ pr_debug("irq: -> existing mapping on virq %d\n", virq);
+ host->ops->map(host, virq, hwirq, flags);
+ return virq;
+ }
+
+ /* Get a virtual interrupt number */
+ if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
+ /* Handle legacy */
+ virq = (unsigned int)hwirq;
+ if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
+ return NO_IRQ;
+ return virq;
+ } else {
+ /* Allocate a virtual interrupt number */
+ hint = hwirq % irq_virq_count;
+ virq = irq_alloc_virt(host, 1, hint);
+ if (virq == NO_IRQ) {
+ pr_debug("irq: -> virq allocation failed\n");
+ return NO_IRQ;
+ }
+ }
+ pr_debug("irq: -> obtained virq %d\n", virq);
+
+ /* Clear some flags */
+ get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL);
+
+ /* map it */
+ if (host->ops->map(host, virq, hwirq, flags)) {
+ pr_debug("irq: -> mapping failed, freeing\n");
+ irq_free_virt(virq, 1);
+ return NO_IRQ;
+ }
+ smp_wmb();
+ irq_map[virq].hwirq = hwirq;
+ smp_mb();
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+extern unsigned int irq_create_of_mapping(struct device_node *controller,
+ u32 *intspec, unsigned int intsize)
+{
+ struct irq_host *host;
+ irq_hw_number_t hwirq;
+ unsigned int flags = IRQ_TYPE_NONE;
+
+ if (controller == NULL)
+ host = irq_default_host;
+ else
+ host = irq_find_host(controller);
+ if (host == NULL)
+ return NO_IRQ;
+
+ /* If host has no translation, then we assume interrupt line */
+ if (host->ops->xlate == NULL)
+ hwirq = intspec[0];
+ else {
+ if (host->ops->xlate(host, controller, intspec, intsize,
+ &hwirq, &flags))
+ return NO_IRQ;
+ }
+
+ return irq_create_mapping(host, hwirq, flags);
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
+{
+ struct of_irq oirq;
+
+ if (of_irq_map_one(dev, index, &oirq))
+ return NO_IRQ;
+
+ return irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+}
+EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
+
+void irq_dispose_mapping(unsigned int virq)
+{
+ struct irq_host *host = irq_map[virq].host;
+ irq_hw_number_t hwirq;
+ unsigned long flags;
+
+ WARN_ON (host == NULL);
+ if (host == NULL)
+ return;
+
+ /* Never unmap legacy interrupts */
+ if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
+ return;
+
+ /* remove chip and handler */
+ set_irq_chip_and_handler(virq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(virq);
+
+ /* Tell the PIC about it */
+ if (host->ops->unmap)
+ host->ops->unmap(host, virq);
+ smp_mb();
+
+ /* Clear reverse map */
+ hwirq = irq_map[virq].hwirq;
+ switch(host->revmap_type) {
+ case IRQ_HOST_MAP_LINEAR:
+ if (hwirq < host->revmap_data.linear.size)
+ host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
+ break;
+ case IRQ_HOST_MAP_TREE:
+ /* Check if radix tree allocated yet */
+ if (host->revmap_data.tree.gfp_mask == 0)
+ break;
+ /* XXX radix tree not safe ! remove lock whem it becomes safe
+ * and use some RCU sync to make sure everything is ok before we
+ * can re-use that map entry
+ */
+ spin_lock_irqsave(&irq_big_lock, flags);
+ radix_tree_delete(&host->revmap_data.tree, hwirq);
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ break;
+ }
+
+ /* Destroy map */
+ smp_mb();
+ irq_map[virq].hwirq = host->inval_irq;
+
+ /* Set some flags */
+ get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+
+ /* Free it */
+ irq_free_virt(virq, 1);
+}
+EXPORT_SYMBOL_GPL(irq_dispose_mapping);
+
+unsigned int irq_find_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ unsigned int i;
+ unsigned int hint = hwirq % irq_virq_count;
+
+ /* Look for default host if nececssary */
+ if (host == NULL)
+ host = irq_default_host;
+ if (host == NULL)
+ return NO_IRQ;
+
+ /* legacy -> bail early */
+ if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
+ return hwirq;
+
+ /* Slow path does a linear search of the map */
+ if (hint < NUM_ISA_INTERRUPTS)
+ hint = NUM_ISA_INTERRUPTS;
+ i = hint;
+ do {
+ if (irq_map[i].host == host &&
+ irq_map[i].hwirq == hwirq)
+ return i;
+ i++;
+ if (i >= irq_virq_count)
+ i = NUM_ISA_INTERRUPTS;
+ } while(i != hint);
+ return NO_IRQ;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+
+unsigned int irq_radix_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ struct radix_tree_root *tree;
+ struct irq_map_entry *ptr;
+ unsigned int virq;
+ unsigned long flags;
+
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
+
+ /* Check if the radix tree exist yet. We test the value of
+ * the gfp_mask for that. Sneaky but saves another int in the
+ * structure. If not, we fallback to slow mode
+ */
+ tree = &host->revmap_data.tree;
+ if (tree->gfp_mask == 0)
+ return irq_find_mapping(host, hwirq);
+
+ /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
+ * when that is fixed (when Nick's patch gets in
+ */
+ spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* Now try to resolve */
+ ptr = radix_tree_lookup(tree, hwirq);
+ /* Found it, return */
+ if (ptr) {
+ virq = ptr - irq_map;
+ goto bail;
+ }
+
+ /* If not there, try to insert it */
+ virq = irq_find_mapping(host, hwirq);
+ if (virq != NO_IRQ)
+ radix_tree_insert(tree, virq, &irq_map[virq]);
+ bail:
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ return virq;
+}
+
+unsigned int irq_linear_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ unsigned int *revmap;
+
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
+
+ /* Check revmap bounds */
+ if (unlikely(hwirq >= host->revmap_data.linear.size))
+ return irq_find_mapping(host, hwirq);
+
+ /* Check if revmap was allocated */
+ revmap = host->revmap_data.linear.revmap;
+ if (unlikely(revmap == NULL))
+ return irq_find_mapping(host, hwirq);
+
+ /* Fill up revmap with slow path if no mapping found */
+ if (unlikely(revmap[hwirq] == NO_IRQ))
+ revmap[hwirq] = irq_find_mapping(host, hwirq);
+
+ return revmap[hwirq];
+}
+
+unsigned int irq_alloc_virt(struct irq_host *host,
+ unsigned int count,
+ unsigned int hint)
+{
+ unsigned long flags;
+ unsigned int i, j, found = NO_IRQ;
+ unsigned int limit = irq_virq_count - count;
+
+ if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
+ return NO_IRQ;
+
+ spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* Use hint for 1 interrupt if any */
+ if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
+ hint < irq_virq_count && irq_map[hint].host == NULL) {
+ found = hint;
+ goto hint_found;
+ }
+
+ /* Look for count consecutive numbers in the allocatable
+ * (non-legacy) space
+ */
+ for (i = NUM_ISA_INTERRUPTS; i <= limit; ) {
+ for (j = i; j < (i + count); j++)
+ if (irq_map[j].host != NULL) {
+ i = j + 1;
+ continue;
+ }
+ found = i;
+ break;
+ }
+ if (found == NO_IRQ) {
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ return NO_IRQ;
+ }
+ hint_found:
+ for (i = found; i < (found + count); i++) {
+ irq_map[i].hwirq = host->inval_irq;
+ smp_wmb();
+ irq_map[i].host = host;
+ }
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+ return found;
+}
+
+void irq_free_virt(unsigned int virq, unsigned int count)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ WARN_ON (virq < NUM_ISA_INTERRUPTS);
+ WARN_ON (count == 0 || (virq + count) > irq_virq_count);
+
+ spin_lock_irqsave(&irq_big_lock, flags);
+ for (i = virq; i < (virq + count); i++) {
+ struct irq_host *host;
+
+ if (i < NUM_ISA_INTERRUPTS ||
+ (virq + count) > irq_virq_count)
+ continue;
+
+ host = irq_map[i].host;
+ irq_map[i].hwirq = host->inval_irq;
+ smp_wmb();
+ irq_map[i].host = NULL;
+ }
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+}
+
+void irq_early_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_IRQS; i++)
+ get_irq_desc(i)->status |= IRQ_NOREQUEST;
+}
+
+/* We need to create the radix trees late */
+static int irq_late_init(void)
+{
+ struct irq_host *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link) {
+ if (h->revmap_type == IRQ_HOST_MAP_TREE)
+ INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
+ }
+ spin_unlock_irqrestore(&irq_big_lock, flags);
+
+ return 0;
+}
+arch_initcall(irq_late_init);
+
+#endif /* CONFIG_PPC_MERGE */
+
#ifdef CONFIG_PCI_MSI
int pci_enable_msi(struct pci_dev * pdev)
{
struct device_node *np;
unsigned int speed;
unsigned int clock;
+ int irq_check_parent;
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static unsigned int legacy_serial_count;
static int __init add_legacy_port(struct device_node *np, int want_index,
int iotype, phys_addr_t base,
phys_addr_t taddr, unsigned long irq,
- upf_t flags)
+ upf_t flags, int irq_check_parent)
{
u32 *clk, *spd, clock = BASE_BAUD * 16;
int index;
if (legacy_serial_infos[index].np != 0) {
/* if we still have some room, move it, else override */
if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
- printk(KERN_INFO "Moved legacy port %d -> %d\n",
+ printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
index, legacy_serial_count);
legacy_serial_ports[legacy_serial_count] =
legacy_serial_ports[index];
legacy_serial_infos[index];
legacy_serial_count++;
} else {
- printk(KERN_INFO "Replacing legacy port %d\n", index);
+ printk(KERN_DEBUG "Replacing legacy port %d\n", index);
}
}
legacy_serial_infos[index].np = of_node_get(np);
legacy_serial_infos[index].clock = clock;
legacy_serial_infos[index].speed = spd ? *spd : 0;
+ legacy_serial_infos[index].irq_check_parent = irq_check_parent;
- printk(KERN_INFO "Found legacy serial port %d for %s\n",
+ printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
index, np->full_name);
- printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
+ printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
(iotype == UPIO_PORT) ? "port" : "mem",
(unsigned long long)base, (unsigned long long)taddr, irq,
legacy_serial_ports[index].uartclk,
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
- return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags);
+ return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
}
static int __init add_legacy_isa_port(struct device_node *np,
/* Add port, irq will be dealt with later */
return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
- NO_IRQ, UPF_BOOT_AUTOCONF);
+ NO_IRQ, UPF_BOOT_AUTOCONF, 0);
}
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
- return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF);
+ return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
+ UPF_BOOT_AUTOCONF, np != pci_dev);
}
#endif
struct device_node *np,
struct plat_serial8250_port *port)
{
+ unsigned int virq;
+
DBG("fixup_port_irq(%d)\n", index);
- /* Check for interrupts in that node */
- if (np->n_intrs > 0) {
- port->irq = np->intrs[0].line;
- DBG(" port %d (%s), irq=%d\n",
- index, np->full_name, port->irq);
- return;
+ virq = irq_of_parse_and_map(np, 0);
+ if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) {
+ np = of_get_parent(np);
+ if (np == NULL)
+ return;
+ virq = irq_of_parse_and_map(np, 0);
+ of_node_put(np);
}
-
- /* Check for interrupts in the parent */
- np = of_get_parent(np);
- if (np == NULL)
+ if (virq == NO_IRQ)
return;
- if (np->n_intrs > 0) {
- port->irq = np->intrs[0].line;
- DBG(" port %d (%s), irq=%d\n",
- index, np->full_name, port->irq);
- }
- of_node_put(np);
+ port->irq = virq;
}
static void __init fixup_port_pio(int index,
/* XXX FIXME - update OF device tree node interrupt property */
}
+#ifdef CONFIG_PPC_MERGE
+/* XXX This is a copy of the ppc64 version. This is temporary until we start
+ * merging the 2 PCI layers
+ */
+/*
+ * Reads the interrupt pin to determine if interrupt is use by card.
+ * If the interrupt is used, then gets the interrupt line from the
+ * openfirmware and sets it in the pci_dev and pci_config line.
+ */
+int pci_read_irq_line(struct pci_dev *pci_dev)
+{
+ struct of_irq oirq;
+ unsigned int virq;
+
+ DBG("Try to map irq for %s...\n", pci_name(pci_dev));
+
+ if (of_irq_map_pci(pci_dev, &oirq)) {
+ DBG(" -> failed !\n");
+ return -1;
+ }
+
+ DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
+ oirq.size, oirq.specifier[0], oirq.controller->full_name);
+
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
+ if(virq == NO_IRQ) {
+ DBG(" -> failed to map !\n");
+ return -1;
+ }
+ pci_dev->irq = virq;
+ pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(pci_read_irq_line);
+#endif /* CONFIG_PPC_MERGE */
+
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
+ /* Maybe do a default OF mapping here */
dev->irq = NO_IRQ;
- if (node->n_intrs > 0) {
- dev->irq = node->intrs[0].line;
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
- dev->irq);
- }
}
pci_parse_of_addrs(node, dev);
*/
int pci_read_irq_line(struct pci_dev *pci_dev)
{
- u8 intpin;
- struct device_node *node;
-
- pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin);
- if (intpin == 0)
- return 0;
+ struct of_irq oirq;
+ unsigned int virq;
- node = pci_device_to_OF_node(pci_dev);
- if (node == NULL)
- return -1;
+ DBG("Try to map irq for %s...\n", pci_name(pci_dev));
- if (node->n_intrs == 0)
+ if (of_irq_map_pci(pci_dev, &oirq)) {
+ DBG(" -> failed !\n");
return -1;
+ }
- pci_dev->irq = node->intrs[0].line;
+ DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
+ oirq.size, oirq.specifier[0], oirq.controller->full_name);
- pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
+ if(virq == NO_IRQ) {
+ DBG(" -> failed to map !\n");
+ return -1;
+ }
+ pci_dev->irq = virq;
+ pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
return 0;
}
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/debugfs.h>
+#include <linux/irq.h>
#include <asm/prom.h>
#include <asm/rtas.h>
/* export that to outside world */
struct device_node *of_chosen;
-struct device_node *dflt_interrupt_controller;
-int num_interrupt_controllers;
-
-/*
- * Wrapper for allocating memory for various data that needs to be
- * attached to device nodes as they are processed at boot or when
- * added to the device tree later (e.g. DLPAR). At boot there is
- * already a region reserved so we just increment *mem_start by size;
- * otherwise we call kmalloc.
- */
-static void * prom_alloc(unsigned long size, unsigned long *mem_start)
-{
- unsigned long tmp;
-
- if (!mem_start)
- return kmalloc(size, GFP_KERNEL);
-
- tmp = *mem_start;
- *mem_start += size;
- return (void *)tmp;
-}
-
-/*
- * Find the device_node with a given phandle.
- */
-static struct device_node * find_phandle(phandle ph)
-{
- struct device_node *np;
-
- for (np = allnodes; np != 0; np = np->allnext)
- if (np->linux_phandle == ph)
- return np;
- return NULL;
-}
-
-/*
- * Find the interrupt parent of a node.
- */
-static struct device_node * __devinit intr_parent(struct device_node *p)
-{
- phandle *parp;
-
- parp = (phandle *) get_property(p, "interrupt-parent", NULL);
- if (parp == NULL)
- return p->parent;
- p = find_phandle(*parp);
- if (p != NULL)
- return p;
- /*
- * On a powermac booted with BootX, we don't get to know the
- * phandles for any nodes, so find_phandle will return NULL.
- * Fortunately these machines only have one interrupt controller
- * so there isn't in fact any ambiguity. -- paulus
- */
- if (num_interrupt_controllers == 1)
- p = dflt_interrupt_controller;
- return p;
-}
-
-/*
- * Find out the size of each entry of the interrupts property
- * for a node.
- */
-int __devinit prom_n_intr_cells(struct device_node *np)
-{
- struct device_node *p;
- unsigned int *icp;
-
- for (p = np; (p = intr_parent(p)) != NULL; ) {
- icp = (unsigned int *)
- get_property(p, "#interrupt-cells", NULL);
- if (icp != NULL)
- return *icp;
- if (get_property(p, "interrupt-controller", NULL) != NULL
- || get_property(p, "interrupt-map", NULL) != NULL) {
- printk("oops, node %s doesn't have #interrupt-cells\n",
- p->full_name);
- return 1;
- }
- }
-#ifdef DEBUG_IRQ
- printk("prom_n_intr_cells failed for %s\n", np->full_name);
-#endif
- return 1;
-}
-
-/*
- * Map an interrupt from a device up to the platform interrupt
- * descriptor.
- */
-static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
- struct device_node *np, unsigned int *ints,
- int nintrc)
-{
- struct device_node *p, *ipar;
- unsigned int *imap, *imask, *ip;
- int i, imaplen, match;
- int newintrc = 0, newaddrc = 0;
- unsigned int *reg;
- int naddrc;
-
- reg = (unsigned int *) get_property(np, "reg", NULL);
- naddrc = prom_n_addr_cells(np);
- p = intr_parent(np);
- while (p != NULL) {
- if (get_property(p, "interrupt-controller", NULL) != NULL)
- /* this node is an interrupt controller, stop here */
- break;
- imap = (unsigned int *)
- get_property(p, "interrupt-map", &imaplen);
- if (imap == NULL) {
- p = intr_parent(p);
- continue;
- }
- imask = (unsigned int *)
- get_property(p, "interrupt-map-mask", NULL);
- if (imask == NULL) {
- printk("oops, %s has interrupt-map but no mask\n",
- p->full_name);
- return 0;
- }
- imaplen /= sizeof(unsigned int);
- match = 0;
- ipar = NULL;
- while (imaplen > 0 && !match) {
- /* check the child-interrupt field */
- match = 1;
- for (i = 0; i < naddrc && match; ++i)
- match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
- for (; i < naddrc + nintrc && match; ++i)
- match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
- imap += naddrc + nintrc;
- imaplen -= naddrc + nintrc;
- /* grab the interrupt parent */
- ipar = find_phandle((phandle) *imap++);
- --imaplen;
- if (ipar == NULL && num_interrupt_controllers == 1)
- /* cope with BootX not giving us phandles */
- ipar = dflt_interrupt_controller;
- if (ipar == NULL) {
- printk("oops, no int parent %x in map of %s\n",
- imap[-1], p->full_name);
- return 0;
- }
- /* find the parent's # addr and intr cells */
- ip = (unsigned int *)
- get_property(ipar, "#interrupt-cells", NULL);
- if (ip == NULL) {
- printk("oops, no #interrupt-cells on %s\n",
- ipar->full_name);
- return 0;
- }
- newintrc = *ip;
- ip = (unsigned int *)
- get_property(ipar, "#address-cells", NULL);
- newaddrc = (ip == NULL)? 0: *ip;
- imap += newaddrc + newintrc;
- imaplen -= newaddrc + newintrc;
- }
- if (imaplen < 0) {
- printk("oops, error decoding int-map on %s, len=%d\n",
- p->full_name, imaplen);
- return 0;
- }
- if (!match) {
-#ifdef DEBUG_IRQ
- printk("oops, no match in %s int-map for %s\n",
- p->full_name, np->full_name);
-#endif
- return 0;
- }
- p = ipar;
- naddrc = newaddrc;
- nintrc = newintrc;
- ints = imap - nintrc;
- reg = ints - naddrc;
- }
- if (p == NULL) {
-#ifdef DEBUG_IRQ
- printk("hmmm, int tree for %s doesn't have ctrler\n",
- np->full_name);
-#endif
- return 0;
- }
- *irq = ints;
- *ictrler = p;
- return nintrc;
-}
-
-static unsigned char map_isa_senses[4] = {
- IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
- IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
- IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
- IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
-};
-
-static unsigned char map_mpic_senses[4] = {
- IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
- IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
- /* 2 seems to be used for the 8259 cascade... */
- IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
- IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
-};
-
-static int __devinit finish_node_interrupts(struct device_node *np,
- unsigned long *mem_start,
- int measure_only)
-{
- unsigned int *ints;
- int intlen, intrcells, intrcount;
- int i, j, n, sense;
- unsigned int *irq, virq;
- struct device_node *ic;
- int trace = 0;
-
- //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
-#define TRACE(fmt...)
-
- if (!strcmp(np->name, "smu-doorbell"))
- trace = 1;
-
- TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
- num_interrupt_controllers);
-
- if (num_interrupt_controllers == 0) {
- /*
- * Old machines just have a list of interrupt numbers
- * and no interrupt-controller nodes.
- */
- ints = (unsigned int *) get_property(np, "AAPL,interrupts",
- &intlen);
- /* XXX old interpret_pci_props looked in parent too */
- /* XXX old interpret_macio_props looked for interrupts
- before AAPL,interrupts */
- if (ints == NULL)
- ints = (unsigned int *) get_property(np, "interrupts",
- &intlen);
- if (ints == NULL)
- return 0;
-
- np->n_intrs = intlen / sizeof(unsigned int);
- np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
- mem_start);
- if (!np->intrs)
- return -ENOMEM;
- if (measure_only)
- return 0;
-
- for (i = 0; i < np->n_intrs; ++i) {
- np->intrs[i].line = *ints++;
- np->intrs[i].sense = IRQ_SENSE_LEVEL
- | IRQ_POLARITY_NEGATIVE;
- }
- return 0;
- }
-
- ints = (unsigned int *) get_property(np, "interrupts", &intlen);
- TRACE("ints=%p, intlen=%d\n", ints, intlen);
- if (ints == NULL)
- return 0;
- intrcells = prom_n_intr_cells(np);
- intlen /= intrcells * sizeof(unsigned int);
- TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
- np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
- if (!np->intrs)
- return -ENOMEM;
-
- if (measure_only)
- return 0;
-
- intrcount = 0;
- for (i = 0; i < intlen; ++i, ints += intrcells) {
- n = map_interrupt(&irq, &ic, np, ints, intrcells);
- TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
- if (n <= 0)
- continue;
-
- /* don't map IRQ numbers under a cascaded 8259 controller */
- if (ic && device_is_compatible(ic, "chrp,iic")) {
- np->intrs[intrcount].line = irq[0];
- sense = (n > 1)? (irq[1] & 3): 3;
- np->intrs[intrcount].sense = map_isa_senses[sense];
- } else {
- virq = virt_irq_create_mapping(irq[0]);
- TRACE("virq=%d\n", virq);
-#ifdef CONFIG_PPC64
- if (virq == NO_IRQ) {
- printk(KERN_CRIT "Could not allocate interrupt"
- " number for %s\n", np->full_name);
- continue;
- }
-#endif
- np->intrs[intrcount].line = irq_offset_up(virq);
- sense = (n > 1)? (irq[1] & 3): 1;
-
- /* Apple uses bits in there in a different way, let's
- * only keep the real sense bit on macs
- */
- if (machine_is(powermac))
- sense &= 0x1;
- np->intrs[intrcount].sense = map_mpic_senses[sense];
- }
-
-#ifdef CONFIG_PPC64
- /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
- if (machine_is(powermac) && ic && ic->parent) {
- char *name = get_property(ic->parent, "name", NULL);
- if (name && !strcmp(name, "u3"))
- np->intrs[intrcount].line += 128;
- else if (!(name && (!strcmp(name, "mac-io") ||
- !strcmp(name, "u4"))))
- /* ignore other cascaded controllers, such as
- the k2-sata-root */
- break;
- }
-#endif /* CONFIG_PPC64 */
- if (n > 2) {
- printk("hmmm, got %d intr cells for %s:", n,
- np->full_name);
- for (j = 0; j < n; ++j)
- printk(" %d", irq[j]);
- printk("\n");
- }
- ++intrcount;
- }
- np->n_intrs = intrcount;
-
- return 0;
-}
-
-static int __devinit finish_node(struct device_node *np,
- unsigned long *mem_start,
- int measure_only)
-{
- struct device_node *child;
- int rc = 0;
-
- rc = finish_node_interrupts(np, mem_start, measure_only);
- if (rc)
- goto out;
-
- for (child = np->child; child != NULL; child = child->sibling) {
- rc = finish_node(child, mem_start, measure_only);
- if (rc)
- goto out;
- }
-out:
- return rc;
-}
-
-static void __init scan_interrupt_controllers(void)
-{
- struct device_node *np;
- int n = 0;
- char *name, *ic;
- int iclen;
-
- for (np = allnodes; np != NULL; np = np->allnext) {
- ic = get_property(np, "interrupt-controller", &iclen);
- name = get_property(np, "name", NULL);
- /* checking iclen makes sure we don't get a false
- match on /chosen.interrupt_controller */
- if ((name != NULL
- && strcmp(name, "interrupt-controller") == 0)
- || (ic != NULL && iclen == 0
- && strcmp(name, "AppleKiwi"))) {
- if (n == 0)
- dflt_interrupt_controller = np;
- ++n;
- }
- }
- num_interrupt_controllers = n;
-}
-
-/**
- * finish_device_tree is called once things are running normally
- * (i.e. with text and data mapped to the address they were linked at).
- * It traverses the device tree and fills in some of the additional,
- * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
- * mapping is also initialized at this point.
- */
-void __init finish_device_tree(void)
-{
- unsigned long start, end, size = 0;
-
- DBG(" -> finish_device_tree\n");
-
-#ifdef CONFIG_PPC64
- /* Initialize virtual IRQ map */
- virt_irq_init();
-#endif
- scan_interrupt_controllers();
-
- /*
- * Finish device-tree (pre-parsing some properties etc...)
- * We do this in 2 passes. One with "measure_only" set, which
- * will only measure the amount of memory needed, then we can
- * allocate that memory, and call finish_node again. However,
- * we must be careful as most routines will fail nowadays when
- * prom_alloc() returns 0, so we must make sure our first pass
- * doesn't start at 0. We pre-initialize size to 16 for that
- * reason and then remove those additional 16 bytes
- */
- size = 16;
- finish_node(allnodes, &size, 1);
- size -= 16;
-
- if (0 == size)
- end = start = 0;
- else
- end = start = (unsigned long)__va(lmb_alloc(size, 128));
-
- finish_node(allnodes, &end, 0);
- BUG_ON(end != start + size);
-
- DBG(" <- finish_device_tree\n");
-}
-
static inline char *find_flat_dt_string(u32 offset)
{
return ((char *)initial_boot_params) +
}
EXPORT_SYMBOL(prom_n_size_cells);
-/**
- * Work out the sense (active-low level / active-high edge)
- * of each interrupt from the device tree.
- */
-void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
-{
- struct device_node *np;
- int i, j;
-
- /* default to level-triggered */
- memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
-
- for (np = allnodes; np != 0; np = np->allnext) {
- for (j = 0; j < np->n_intrs; j++) {
- i = np->intrs[j].line;
- if (i >= off && i < max)
- senses[i-off] = np->intrs[j].sense;
- }
- }
-}
-
/**
* Construct and return a list of the device_nodes with a given name.
*/
node->deadprops = NULL;
}
}
- kfree(node->intrs);
kfree(node->full_name);
kfree(node->data);
kfree(node);
#ifdef CONFIG_PPC_PSERIES
/*
* Fix up the uninitialized fields in a new device node:
- * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
- *
- * A lot of boot-time code is duplicated here, because functions such
- * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
- * slab allocator.
- *
- * This should probably be split up into smaller chunks.
+ * name, type and pci-specific fields
*/
static int of_finish_dynamic_node(struct device_node *node)
switch (action) {
case PSERIES_RECONFIG_ADD:
err = of_finish_dynamic_node(node);
- if (!err)
- finish_node(node, NULL, 0);
if (err < 0) {
printk(KERN_ERR "finish_node returned %d\n", err);
err = NOTIFY_BAD;
struct device_node *node;
struct pci_controller *phb;
unsigned int index;
- unsigned int root_size_cells = 0;
- unsigned int *opprop = NULL;
struct device_node *root = of_find_node_by_path("/");
- if (ppc64_interrupt_controller == IC_OPEN_PIC) {
- opprop = (unsigned int *)get_property(root,
- "platform-open-pic", NULL);
- }
-
- root_size_cells = prom_n_size_cells(root);
-
index = 0;
-
for (node = of_get_next_child(root, NULL);
node != NULL;
node = of_get_next_child(root, node)) {
setup_phb(node, phb);
pci_process_bridge_OF_ranges(phb, node, 0);
pci_setup_phb_io(phb, index == 0);
-#ifdef CONFIG_PPC_PSERIES
- /* XXX This code need serious fixing ... --BenH */
- if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
- int addr = root_size_cells * (index + 2) - 1;
- mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
- }
-#endif
index++;
}
ppc_md.init_early();
find_legacy_serial_ports();
- finish_device_tree();
smp_setup_cpu_maps();
/*
* Fill the ppc64_caches & systemcfg structures with informations
- * retrieved from the device-tree. Need to be called before
- * finish_device_tree() since the later requires some of the
- * informations filled up here to properly parse the interrupt tree.
+ * retrieved from the device-tree.
*/
initialize_cache_info();
+ /*
+ * Initialize irq remapping subsystem
+ */
+ irq_early_init();
+
#ifdef CONFIG_PPC_RTAS
/*
* Initialize RTAS if available
*/
find_legacy_serial_ports();
- /*
- * "Finish" the device-tree, that is do the actual parsing of
- * some of the properties like the interrupt map
- */
- finish_device_tree();
-
/*
* Initialize xmon
*/
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
- printk("ppc64_interrupt_controller = 0x%ld\n",
- ppc64_interrupt_controller);
printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
{
struct vio_dev *viodev;
unsigned int *unit_address;
- unsigned int *irq_p;
/* we need the 'device_type' property, in order to match with drivers */
if (of_node->type == NULL) {
viodev->dev.platform_data = of_node_get(of_node);
- viodev->irq = NO_IRQ;
- irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
- if (irq_p) {
- int virq = virt_irq_create_mapping(*irq_p);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", of_node->full_name);
- } else
- viodev->irq = irq_offset_up(virq);
- }
+ viodev->irq = irq_of_parse_and_map(of_node, 0);
snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
viodev->name = of_node->name;
/*
* Cell Internal Interrupt Controller
*
+ * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * IBM, Corp.
+ *
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/ioport.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
+#include <asm/machdep.h>
#include "interrupt.h"
#include "cbe_regs.h"
u8 target_id;
u8 eoi_stack[16];
int eoi_ptr;
+ struct irq_host *host;
};
static DEFINE_PER_CPU(struct iic, iic);
+#define IIC_NODE_COUNT 2
+static struct irq_host *iic_hosts[IIC_NODE_COUNT];
+
+/* Convert between "pending" bits and hw irq number */
+static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
+{
+ unsigned char unit = bits.source & 0xf;
+
+ if (bits.flags & CBE_IIC_IRQ_IPI)
+ return IIC_IRQ_IPI0 | (bits.prio >> 4);
+ else if (bits.class <= 3)
+ return (bits.class << 4) | unit;
+ else
+ return IIC_IRQ_INVALID;
+}
static void iic_mask(unsigned int irq)
{
.eoi = iic_eoi,
};
-/* XXX All of this has to be reworked completely. We need to assign a real
- * interrupt numbers to the external interrupts and remove all the hard coded
- * interrupt maps (rely on the device-tree whenever possible).
- *
- * Basically, my scheme is to define the "pendings" bits to be the HW interrupt
- * number (ignoring the data and flags here). That means we can sort-of split
- * external sources based on priority, and we can use request_irq() on pretty
- * much anything.
- *
- * For spider or axon, they have their own interrupt space. spider will just have
- * local "hardward" interrupts 0...xx * node stride. The node stride is not
- * necessary (separate interrupt chips will have separate HW number space), but
- * will allow to be compatible with existing device-trees.
- *
- * All of thise little world will get a standard remapping scheme to map those HW
- * numbers into the linux flat irq number space.
-*/
-static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
-{
- int irq;
- unsigned char node, unit;
-
- node = pending.source >> 4;
- unit = pending.source & 0xf;
- irq = -1;
-
- /*
- * This mapping is specific to the Cell Broadband
- * Engine. We might need to get the numbers
- * from the device tree to support future CPUs.
- */
- switch (unit) {
- case 0x00:
- case 0x0b:
- /*
- * One of these units can be connected
- * to an external interrupt controller.
- */
- if (pending.class != 2)
- break;
- /* TODO: We might want to silently ignore cascade interrupts
- * when no cascade handler exist yet
- */
- irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
- break;
- case 0x01 ... 0x04:
- case 0x07 ... 0x0a:
- /*
- * These units are connected to the SPEs
- */
- if (pending.class > 2)
- break;
- irq = IIC_SPE_OFFSET
- + pending.class * IIC_CLASS_STRIDE
- + node * IIC_NODE_STRIDE
- + unit;
- break;
- }
- if (irq == -1)
- printk(KERN_WARNING "Unexpected interrupt class %02x, "
- "source %02x, prio %02x, cpu %02x\n", pending.class,
- pending.source, pending.prio, smp_processor_id());
- return irq;
-}
-
/* Get an IRQ number from the pending state register of the IIC */
-int iic_get_irq(struct pt_regs *regs)
-{
- struct iic *iic;
- int irq;
- struct cbe_iic_pending_bits pending;
-
- iic = &__get_cpu_var(iic);
- *(unsigned long *) &pending =
- in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
- iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
- BUG_ON(iic->eoi_ptr > 15);
-
- irq = -1;
- if (pending.flags & CBE_IIC_IRQ_VALID) {
- if (pending.flags & CBE_IIC_IRQ_IPI) {
- irq = IIC_IPI_OFFSET + (pending.prio >> 4);
-/*
- if (irq > 0x80)
- printk(KERN_WARNING "Unexpected IPI prio %02x"
- "on CPU %02x\n", pending.prio,
- smp_processor_id());
-*/
- } else {
- irq = iic_external_get_irq(pending);
- }
- }
- return irq;
-}
-
-/* hardcoded part to be compatible with older firmware */
-
-static int __init setup_iic_hardcoded(void)
-{
- struct device_node *np;
- int nodeid, cpu;
- unsigned long regs;
- struct iic *iic;
-
- for_each_possible_cpu(cpu) {
- iic = &per_cpu(iic, cpu);
- nodeid = cpu/2;
-
- for (np = of_find_node_by_type(NULL, "cpu");
- np;
- np = of_find_node_by_type(np, "cpu")) {
- if (nodeid == *(int *)get_property(np, "node-id", NULL))
- break;
- }
-
- if (!np) {
- printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
- iic->regs = NULL;
- iic->target_id = 0xff;
- return -ENODEV;
- }
-
- regs = *(long *)get_property(np, "iic", NULL);
-
- /* hack until we have decided on the devtree info */
- regs += 0x400;
- if (cpu & 1)
- regs += 0x20;
-
- printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
- iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
- iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
- iic->eoi_stack[0] = 0xff;
- }
-
- return 0;
-}
-
-static int __init setup_iic(void)
+static unsigned int iic_get_irq(struct pt_regs *regs)
{
- struct device_node *dn;
- unsigned long *regs;
- char *compatible;
- unsigned *np, found = 0;
- struct iic *iic = NULL;
-
- for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
- compatible = (char *)get_property(dn, "compatible", NULL);
-
- if (!compatible) {
- printk(KERN_WARNING "no compatible property found !\n");
- continue;
- }
-
- if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller"))
- regs = (unsigned long *)get_property(dn,"reg", NULL);
- else
- continue;
-
- if (!regs)
- printk(KERN_WARNING "IIC: no reg property\n");
-
- np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL);
-
- if (!np) {
- printk(KERN_WARNING "IIC: CPU association not found\n");
- iic->regs = NULL;
- iic->target_id = 0xff;
- return -ENODEV;
- }
-
- iic = &per_cpu(iic, np[0]);
- iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
- iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
- iic->eoi_stack[0] = 0xff;
- printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
-
- iic = &per_cpu(iic, np[1]);
- iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
- iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
- iic->eoi_stack[0] = 0xff;
-
- printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
-
- found++;
- }
-
- if (found)
- return 0;
- else
- return -ENODEV;
+ struct cbe_iic_pending_bits pending;
+ struct iic *iic;
+
+ iic = &__get_cpu_var(iic);
+ *(unsigned long *) &pending =
+ in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
+ iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
+ BUG_ON(iic->eoi_ptr > 15);
+ if (pending.flags & CBE_IIC_IRQ_VALID)
+ return irq_linear_revmap(iic->host,
+ iic_pending_to_hwnum(pending));
+ return NO_IRQ;
}
#ifdef CONFIG_SMP
/* Use the highest interrupt priorities for IPI */
static inline int iic_ipi_to_irq(int ipi)
{
- return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi;
+ return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi;
}
static inline int iic_irq_to_ipi(int irq)
{
- return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET);
+ return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0);
}
void iic_setup_cpu(void)
}
EXPORT_SYMBOL_GPL(iic_get_target_id);
+struct irq_host *iic_get_irq_host(int node)
+{
+ if (node < 0 || node >= IIC_NODE_COUNT)
+ return NULL;
+ return iic_hosts[node];
+}
+EXPORT_SYMBOL_GPL(iic_get_irq_host);
+
+
static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
- smp_message_recv(iic_irq_to_ipi(irq), regs);
+ int ipi = (int)(long)dev_id;
+
+ smp_message_recv(ipi, regs);
+
return IRQ_HANDLED;
}
static void iic_request_ipi(int ipi, const char *name)
{
- int irq;
+ int node, virq;
- irq = iic_ipi_to_irq(ipi);
-
- /* IPIs are marked IRQF_DISABLED as they must run with irqs
- * disabled */
- set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq);
- request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL);
+ for (node = 0; node < IIC_NODE_COUNT; node++) {
+ char *rname;
+ if (iic_hosts[node] == NULL)
+ continue;
+ virq = irq_create_mapping(iic_hosts[node],
+ iic_ipi_to_irq(ipi), 0);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR
+ "iic: failed to map IPI %s on node %d\n",
+ name, node);
+ continue;
+ }
+ rname = kzalloc(strlen(name) + 16, GFP_KERNEL);
+ if (rname)
+ sprintf(rname, "%s node %d", name, node);
+ else
+ rname = (char *)name;
+ if (request_irq(virq, iic_ipi_action, IRQF_DISABLED,
+ rname, (void *)(long)ipi))
+ printk(KERN_ERR
+ "iic: failed to request IPI %s on node %d\n",
+ name, node);
+ }
}
void iic_request_IPIs(void)
iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
#endif /* CONFIG_DEBUGGER */
}
+
#endif /* CONFIG_SMP */
-static void __init iic_setup_builtin_handlers(void)
+
+static int iic_host_match(struct irq_host *h, struct device_node *node)
+{
+ return h->host_data != NULL && node == h->host_data;
+}
+
+static int iic_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ if (hw < IIC_IRQ_IPI0)
+ set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
+ else
+ set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
+ return 0;
+}
+
+static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
{
- int be, isrc;
+ /* Currently, we don't translate anything. That needs to be fixed as
+ * we get better defined device-trees. iic interrupts have to be
+ * explicitely mapped by whoever needs them
+ */
+ return -ENODEV;
+}
+
+static struct irq_host_ops iic_host_ops = {
+ .match = iic_host_match,
+ .map = iic_host_map,
+ .xlate = iic_host_xlate,
+};
+
+static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
+ struct irq_host *host)
+{
+ /* XXX FIXME: should locate the linux CPU number from the HW cpu
+ * number properly. We are lucky for now
+ */
+ struct iic *iic = &per_cpu(iic, hw_cpu);
- /* XXX FIXME: Assume two threads per BE are present */
- for (be=0; be < num_present_cpus() / 2; be++) {
- int irq;
+ iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
+ BUG_ON(iic->regs == NULL);
- /* setup SPE chip and handlers */
- for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
- irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
- set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
+ iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
+ iic->eoi_stack[0] = 0xff;
+ iic->host = host;
+ out_be64(&iic->regs->prio, 0);
+
+ printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n",
+ hw_cpu, addr, iic->regs, iic->target_id);
+}
+
+static int __init setup_iic(void)
+{
+ struct device_node *dn;
+ struct resource r0, r1;
+ struct irq_host *host;
+ int found = 0;
+ u32 *np;
+
+ for (dn = NULL;
+ (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
+ if (!device_is_compatible(dn,
+ "IBM,CBEA-Internal-Interrupt-Controller"))
+ continue;
+ np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges",
+ NULL);
+ if (np == NULL) {
+ printk(KERN_WARNING "IIC: CPU association not found\n");
+ of_node_put(dn);
+ return -ENODEV;
}
- /* setup cascade chip */
- irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE;
- set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
+ if (of_address_to_resource(dn, 0, &r0) ||
+ of_address_to_resource(dn, 1, &r1)) {
+ printk(KERN_WARNING "IIC: Can't resolve addresses\n");
+ of_node_put(dn);
+ return -ENODEV;
+ }
+ host = NULL;
+ if (found < IIC_NODE_COUNT) {
+ host = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
+ IIC_SOURCE_COUNT,
+ &iic_host_ops,
+ IIC_IRQ_INVALID);
+ iic_hosts[found] = host;
+ BUG_ON(iic_hosts[found] == NULL);
+ iic_hosts[found]->host_data = of_node_get(dn);
+ found++;
+ }
+ init_one_iic(np[0], r0.start, host);
+ init_one_iic(np[1], r1.start, host);
}
+
+ if (found)
+ return 0;
+ else
+ return -ENODEV;
}
void __init iic_init_IRQ(void)
{
- int cpu, irq_offset;
- struct iic *iic;
-
+ /* Discover and initialize iics */
if (setup_iic() < 0)
- setup_iic_hardcoded();
+ panic("IIC: Failed to initialize !\n");
- irq_offset = 0;
- for_each_possible_cpu(cpu) {
- iic = &per_cpu(iic, cpu);
- if (iic->regs)
- out_be64(&iic->regs->prio, 0xff);
- }
- iic_setup_builtin_handlers();
+ /* Set master interrupt handling function */
+ ppc_md.get_irq = iic_get_irq;
+ /* Enable on current CPU */
+ iic_setup_cpu();
}
*/
enum {
- IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */
- IIC_EXT_CASCADE = 0x20, /* There is no interrupt 32 on spider */
- IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */
- IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */
- IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */
- IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */
- IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
- IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */
+ IIC_IRQ_INVALID = 0xff,
+ IIC_IRQ_MAX = 0x3f,
+ IIC_IRQ_EXT_IOIF0 = 0x20,
+ IIC_IRQ_EXT_IOIF1 = 0x2b,
+ IIC_IRQ_IPI0 = 0x40,
+ IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
+ IIC_SOURCE_COUNT = 0x50,
};
extern void iic_init_IRQ(void);
-extern int iic_get_irq(struct pt_regs *regs);
extern void iic_cause_IPI(int cpu, int mesg);
extern void iic_request_IPIs(void);
extern void iic_setup_cpu(void);
extern u8 iic_get_target_id(int cpu);
+extern struct irq_host *iic_get_irq_host(int node);
extern void spider_init_IRQ(void);
printk("*** %04x : %s\n", hex, s ? s : "");
}
+static void __init cell_pcibios_fixup(void)
+{
+ struct pci_dev *dev = NULL;
+
+ for_each_pci_dev(dev)
+ pci_read_irq_line(dev);
+}
+
static void __init cell_init_irq(void)
{
iic_init_IRQ();
cell_init_iommu();
- ppc64_interrupt_controller = IC_CELL_PIC;
-
DBG(" <- cell_init_early()\n");
}
.check_legacy_ioport = cell_check_legacy_ioport,
.progress = cell_progress,
.init_IRQ = cell_init_irq,
- .get_irq = iic_get_irq,
-
+ .pcibios_fixup = cell_pcibios_fixup,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/ioport.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
REISWAITEN = 0x508, /* Reissue Wait Control*/
};
-static void __iomem *spider_pics[4];
+#define SPIDER_CHIP_COUNT 4
+#define SPIDER_SRC_COUNT 64
+#define SPIDER_IRQ_INVALID 63
-static void __iomem *spider_get_pic(int irq)
-{
- int node = irq / IIC_NODE_STRIDE;
- irq %= IIC_NODE_STRIDE;
-
- if (irq >= IIC_EXT_OFFSET &&
- irq < IIC_EXT_OFFSET + IIC_NUM_EXT &&
- spider_pics)
- return spider_pics[node];
- return NULL;
-}
+struct spider_pic {
+ struct irq_host *host;
+ struct device_node *of_node;
+ void __iomem *regs;
+ unsigned int node_id;
+};
+static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
-static int spider_get_nr(unsigned int irq)
+static struct spider_pic *spider_virq_to_pic(unsigned int virq)
{
- return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET;
+ return irq_map[virq].host->host_data;
}
-static void __iomem *spider_get_irq_config(int irq)
+static void __iomem *spider_get_irq_config(struct spider_pic *pic,
+ unsigned int src)
{
- void __iomem *pic;
- pic = spider_get_pic(irq);
- return pic + TIR_CFGA + 8 * spider_get_nr(irq);
+ return pic->regs + TIR_CFGA + 8 * src;
}
-static void spider_unmask_irq(unsigned int irq)
+static void spider_unmask_irq(unsigned int virq)
{
- int nodeid = (irq / IIC_NODE_STRIDE) * 0x10;
- void __iomem *cfg = spider_get_irq_config(irq);
- irq = spider_get_nr(irq);
+ struct spider_pic *pic = spider_virq_to_pic(virq);
+ void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
- /* FIXME: Most of that is configuration and has nothing to do with enabling/disable,
- * besides, it's also partially bogus.
+ /* We use no locking as we should be covered by the descriptor lock
+ * for access to invidual source configuration registers
*/
- out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid);
- out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq);
+ out_be32(cfg, in_be32(cfg) | 0x30000000u);
}
-static void spider_mask_irq(unsigned int irq)
+static void spider_mask_irq(unsigned int virq)
{
- void __iomem *cfg = spider_get_irq_config(irq);
- irq = spider_get_nr(irq);
+ struct spider_pic *pic = spider_virq_to_pic(virq);
+ void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
+ /* We use no locking as we should be covered by the descriptor lock
+ * for access to invidual source configuration registers
+ */
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
-static void spider_ack_irq(unsigned int irq)
+static void spider_ack_irq(unsigned int virq)
{
- /* Should reset edge detection logic but we don't configure any edge interrupt
- * at the moment.
+ struct spider_pic *pic = spider_virq_to_pic(virq);
+ unsigned int src = irq_map[virq].hwirq;
+
+ /* Reset edge detection logic if necessary
*/
+ if (get_irq_desc(virq)->status & IRQ_LEVEL)
+ return;
+
+ /* Only interrupts 47 to 50 can be set to edge */
+ if (src < 47 || src > 50)
+ return;
+
+ /* Perform the clear of the edge logic */
+ out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf));
}
static struct irq_chip spider_pic = {
.ack = spider_ack_irq,
};
-static int spider_get_irq(int node)
+static int spider_host_match(struct irq_host *h, struct device_node *node)
+{
+ struct spider_pic *pic = h->host_data;
+ return node == pic->of_node;
+}
+
+static int spider_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
{
- unsigned long cs;
- void __iomem *regs = spider_pics[node];
+ unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
+ struct spider_pic *pic = h->host_data;
+ void __iomem *cfg = spider_get_irq_config(pic, hw);
+ int level = 0;
+ u32 ic;
+
+ /* Note that only level high is supported for most interrupts */
+ if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH &&
+ (hw < 47 || hw > 50))
+ return -EINVAL;
+
+ /* Decode sense type */
+ switch(sense) {
+ case IRQ_TYPE_EDGE_RISING:
+ ic = 0x3;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ic = 0x2;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ic = 0x0;
+ level = 1;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_NONE:
+ ic = 0x1;
+ level = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
- cs = in_be32(regs + TIR_CS) >> 24;
+ /* Configure the source. One gross hack that was there before and
+ * that I've kept around is the priority to the BE which I set to
+ * be the same as the interrupt source number. I don't know wether
+ * that's supposed to make any kind of sense however, we'll have to
+ * decide that, but for now, I'm not changing the behaviour.
+ */
+ out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe);
+ out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff));
- if (cs == 63)
- return -1;
- else
- return cs;
+ if (level)
+ get_irq_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq);
+ return 0;
+}
+
+static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ /* Spider interrupts have 2 cells, first is the interrupt source,
+ * second, well, I don't know for sure yet ... We mask the top bits
+ * because old device-trees encode a node number in there
+ */
+ *out_hwirq = intspec[0] & 0x3f;
+ *out_flags = IRQ_TYPE_LEVEL_HIGH;
+ return 0;
}
+static struct irq_host_ops spider_host_ops = {
+ .match = spider_host_match,
+ .map = spider_host_map,
+ .xlate = spider_host_xlate,
+};
+
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
- int node = (int)(long)desc->handler_data;
- int cascade_irq;
+ struct spider_pic *pic = desc->handler_data;
+ unsigned int cs, virq;
- cascade_irq = spider_get_irq(node);
- generic_handle_irq(cascade_irq, regs);
+ cs = in_be32(pic->regs + TIR_CS) >> 24;
+ if (cs == SPIDER_IRQ_INVALID)
+ virq = NO_IRQ;
+ else
+ virq = irq_linear_revmap(pic->host, cs);
+ if (virq != NO_IRQ)
+ generic_handle_irq(virq, regs);
desc->chip->eoi(irq);
}
-/* hardcoded part to be compatible with older firmware */
+/* For hooking up the cascace we have a problem. Our device-tree is
+ * crap and we don't know on which BE iic interrupt we are hooked on at
+ * least not the "standard" way. We can reconstitute it based on two
+ * informations though: which BE node we are connected to and wether
+ * we are connected to IOIF0 or IOIF1. Right now, we really only care
+ * about the IBM cell blade and we know that its firmware gives us an
+ * interrupt-map property which is pretty strange.
+ */
+static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
+{
+ unsigned int virq;
+ u32 *imap, *tmp;
+ int imaplen, intsize, unit;
+ struct device_node *iic;
+ struct irq_host *iic_host;
+
+#if 0 /* Enable that when we have a way to retreive the node as well */
+ /* First, we check wether we have a real "interrupts" in the device
+ * tree in case the device-tree is ever fixed
+ */
+ struct of_irq oirq;
+ if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) {
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+ goto bail;
+ }
+#endif
+
+ /* Now do the horrible hacks */
+ tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL);
+ if (tmp == NULL)
+ return NO_IRQ;
+ intsize = *tmp;
+ imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen);
+ if (imap == NULL || imaplen < (intsize + 1))
+ return NO_IRQ;
+ iic = of_find_node_by_phandle(imap[intsize]);
+ if (iic == NULL)
+ return NO_IRQ;
+ imap += intsize + 1;
+ tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL);
+ if (tmp == NULL)
+ return NO_IRQ;
+ intsize = *tmp;
+ /* Assume unit is last entry of interrupt specifier */
+ unit = imap[intsize - 1];
+ /* Ok, we have a unit, now let's try to get the node */
+ tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL);
+ if (tmp == NULL) {
+ of_node_put(iic);
+ return NO_IRQ;
+ }
+ /* ugly as hell but works for now */
+ pic->node_id = (*tmp) >> 1;
+ of_node_put(iic);
+
+ /* Ok, now let's get cracking. You may ask me why I just didn't match
+ * the iic host from the iic OF node, but that way I'm still compatible
+ * with really really old old firmwares for which we don't have a node
+ */
+ iic_host = iic_get_irq_host(pic->node_id);
+ if (iic_host == NULL)
+ return NO_IRQ;
+ /* Manufacture an IIC interrupt number of class 2 */
+ virq = irq_create_mapping(iic_host, 0x20 | unit, 0);
+ if (virq == NO_IRQ)
+ printk(KERN_ERR "spider_pic: failed to map cascade !");
+ return virq;
+}
+
-static void __init spider_init_one(int node, unsigned long addr)
+static void __init spider_init_one(struct device_node *of_node, int chip,
+ unsigned long addr)
{
- int n, irq;
+ struct spider_pic *pic = &spider_pics[chip];
+ int i, virq;
- spider_pics[node] = ioremap(addr, 0x800);
- if (spider_pics[node] == NULL)
+ /* Map registers */
+ pic->regs = ioremap(addr, 0x1000);
+ if (pic->regs == NULL)
panic("spider_pic: can't map registers !");
- printk(KERN_INFO "spider_pic: mapped for node %d, addr: 0x%lx mapped to %p\n",
- node, addr, spider_pics[node]);
+ /* Allocate a host */
+ pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT,
+ &spider_host_ops, SPIDER_IRQ_INVALID);
+ if (pic->host == NULL)
+ panic("spider_pic: can't allocate irq host !");
+ pic->host->host_data = pic;
- for (n = 0; n < IIC_NUM_EXT; n++) {
- if (n == IIC_EXT_CASCADE)
- continue;
- irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
- set_irq_chip_and_handler(irq, &spider_pic, handle_level_irq);
- get_irq_desc(irq)->status |= IRQ_LEVEL;
+ /* Fill out other bits */
+ pic->of_node = of_node_get(of_node);
+
+ /* Go through all sources and disable them */
+ for (i = 0; i < SPIDER_SRC_COUNT; i++) {
+ void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i;
+ out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
/* do not mask any interrupts because of level */
- out_be32(spider_pics[node] + TIR_MSK, 0x0);
-
- /* disable edge detection clear */
- /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
+ out_be32(pic->regs + TIR_MSK, 0x0);
/* enable interrupt packets to be output */
- out_be32(spider_pics[node] + TIR_PIEN,
- in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
+ out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1);
- /* Hook up cascade */
- irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
- set_irq_data(irq, (void *)(long)node);
- set_irq_chained_handler(irq, spider_irq_cascade);
+ /* Hook up the cascade interrupt to the iic and nodeid */
+ virq = spider_find_cascade_and_node(pic);
+ if (virq == NO_IRQ)
+ return;
+ set_irq_data(virq, pic);
+ set_irq_chained_handler(virq, spider_irq_cascade);
+
+ printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
+ pic->node_id, addr, of_node->full_name);
/* Enable the interrupt detection enable bit. Do this last! */
- out_be32(spider_pics[node] + TIR_DEN,
- in_be32(spider_pics[node] + TIR_DEN) | 0x1);
+ out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
}
void __init spider_init_IRQ(void)
{
- unsigned long *spider_reg;
+ struct resource r;
struct device_node *dn;
- char *compatible;
- int node = 0;
-
- /* XXX node numbers are totally bogus. We _hope_ we get the device nodes in the right
- * order here but that's definitely not guaranteed, we need to get the node from the
- * device tree instead. There is currently no proper property for it (but our whole
- * device-tree is bogus anyway) so all we can do is pray or maybe test the address
- * and deduce the node-id
+ int chip = 0;
+
+ /* XXX node numbers are totally bogus. We _hope_ we get the device
+ * nodes in the right order here but that's definitely not guaranteed,
+ * we need to get the node from the device tree instead.
+ * There is currently no proper property for it (but our whole
+ * device-tree is bogus anyway) so all we can do is pray or maybe test
+ * the address and deduce the node-id
*/
- for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
- compatible = (char *)get_property(dn, "compatible", NULL);
-
- if (!compatible)
- continue;
-
- if (strstr(compatible, "CBEA,platform-spider-pic"))
- spider_reg = (unsigned long *)get_property(dn, "reg", NULL);
- else if (strstr(compatible, "sti,platform-spider-pic") && (node < 2)) {
- static long hard_coded_pics[] = { 0x24000008000, 0x34000008000 };
- spider_reg = &hard_coded_pics[node];
+ for (dn = NULL;
+ (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
+ if (device_is_compatible(dn, "CBEA,platform-spider-pic")) {
+ if (of_address_to_resource(dn, 0, &r)) {
+ printk(KERN_WARNING "spider-pic: Failed\n");
+ continue;
+ }
+ } else if (device_is_compatible(dn, "sti,platform-spider-pic")
+ && (chip < 2)) {
+ static long hard_coded_pics[] =
+ { 0x24000008000, 0x34000008000 };
+ r.start = hard_coded_pics[chip];
} else
continue;
-
- if (spider_reg == NULL)
- printk(KERN_ERR "spider_pic: No address for node %d\n", node);
-
- spider_init_one(node, *spider_reg);
- node++;
+ spider_init_one(dn, chip++, r.start);
}
}
return stat ? IRQ_HANDLED : IRQ_NONE;
}
-static int
-spu_request_irqs(struct spu *spu)
+static int spu_request_irqs(struct spu *spu)
{
- int ret;
- int irq_base;
-
- irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
-
- snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
- ret = request_irq(irq_base + spu->isrc,
- spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu);
- if (ret)
- goto out;
-
- snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
- ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
- spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu);
- if (ret)
- goto out1;
+ int ret = 0;
- snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
- ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
- spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu);
- if (ret)
- goto out2;
- goto out;
+ if (spu->irqs[0] != NO_IRQ) {
+ snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
+ spu->number);
+ ret = request_irq(spu->irqs[0], spu_irq_class_0,
+ IRQF_DISABLED,
+ spu->irq_c0, spu);
+ if (ret)
+ goto bail0;
+ }
+ if (spu->irqs[1] != NO_IRQ) {
+ snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
+ spu->number);
+ ret = request_irq(spu->irqs[1], spu_irq_class_1,
+ IRQF_DISABLED,
+ spu->irq_c1, spu);
+ if (ret)
+ goto bail1;
+ }
+ if (spu->irqs[2] != NO_IRQ) {
+ snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
+ spu->number);
+ ret = request_irq(spu->irqs[2], spu_irq_class_2,
+ IRQF_DISABLED,
+ spu->irq_c2, spu);
+ if (ret)
+ goto bail2;
+ }
+ return 0;
-out2:
- free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
-out1:
- free_irq(irq_base + spu->isrc, spu);
-out:
+bail2:
+ if (spu->irqs[1] != NO_IRQ)
+ free_irq(spu->irqs[1], spu);
+bail1:
+ if (spu->irqs[0] != NO_IRQ)
+ free_irq(spu->irqs[0], spu);
+bail0:
return ret;
}
-static void
-spu_free_irqs(struct spu *spu)
+static void spu_free_irqs(struct spu *spu)
{
- int irq_base;
-
- irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
-
- free_irq(irq_base + spu->isrc, spu);
- free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
- free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
+ if (spu->irqs[0] != NO_IRQ)
+ free_irq(spu->irqs[0], spu);
+ if (spu->irqs[1] != NO_IRQ)
+ free_irq(spu->irqs[1], spu);
+ if (spu->irqs[2] != NO_IRQ)
+ free_irq(spu->irqs[2], spu);
}
static LIST_HEAD(spu_list);
iounmap((u8 __iomem *)spu->local_store);
}
+/* This function shall be abstracted for HV platforms */
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+ struct irq_host *host;
+ unsigned int isrc;
+ u32 *tmp;
+
+ host = iic_get_irq_host(spu->node);
+ if (host == NULL)
+ return -ENODEV;
+
+ /* Get the interrupt source from the device-tree */
+ tmp = (u32 *)get_property(np, "isrc", NULL);
+ if (!tmp)
+ return -ENODEV;
+ spu->isrc = isrc = tmp[0];
+
+ /* Now map interrupts of all 3 classes */
+ spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0);
+ spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0);
+ spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0);
+
+ /* Right now, we only fail if class 2 failed */
+ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+}
+
static int __init spu_map_device(struct spu *spu, struct device_node *node)
{
char *prop;
int ret;
ret = -ENODEV;
- prop = get_property(node, "isrc", NULL);
- if (!prop)
- goto out;
- spu->isrc = *(unsigned int *)prop;
-
spu->name = get_property(node, "name", NULL);
if (!spu->name)
goto out;
return ret;
}
- sysdev_create_file(&spu->sysdev, &attr_isrc);
+ if (spu->isrc != 0)
+ sysdev_create_file(&spu->sysdev, &attr_isrc);
sysfs_add_device_to_node(&spu->sysdev, spu->nid);
return 0;
spu->nid = of_node_to_nid(spe);
if (spu->nid == -1)
spu->nid = 0;
+ ret = spu_map_interrupts(spu, spe);
+ if (ret)
+ goto out_unmap;
spin_lock_init(&spu->register_lock);
spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
spu_mfc_sr1_set(spu, 0x33);
#include <asm/machdep.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
-#include <asm/open_pic.h>
#include <asm/grackle.h>
#include <asm/rtas.h>
chrp_pcibios_fixup(void)
{
struct pci_dev *dev = NULL;
- struct device_node *np;
- /* PCI interrupts are controlled by the OpenPIC */
- for_each_pci_dev(dev) {
- np = pci_device_to_OF_node(dev);
- if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
- dev->irq = np->intrs[0].line;
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
- }
+ for_each_pci_dev(dev)
+ pci_read_irq_line(dev);
}
#define PRG_CL_RESET_VALID 0x00010000
int _chrp_type;
EXPORT_SYMBOL(_chrp_type);
-struct mpic *chrp_mpic;
+static struct mpic *chrp_mpic;
/* Used for doing CHRP event-scans */
DEFINE_PER_CPU(struct timer_list, heartbeat_timer);
jiffies + event_scan_interval);
}
-void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc,
+ struct pt_regs *regs)
{
- unsigned int max = 100;
-
- while(max--) {
- int irq = i8259_irq(regs);
- if (max == 99)
- desc->chip->eoi(irq);
- if (irq < 0)
- break;
- generic_handle_irq(irq, regs);
- };
+ unsigned int cascade_irq = i8259_irq(regs);
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq, regs);
+ desc->chip->eoi(irq);
}
/*
static void __init chrp_find_openpic(void)
{
struct device_node *np, *root;
- int len, i, j, irq_count;
+ int len, i, j;
int isu_size, idu_size;
unsigned int *iranges, *opprop = NULL;
int oplen = 0;
unsigned long opaddr;
int na = 1;
- unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
- np = find_type_devices("open-pic");
+ np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL)
return;
- root = find_path_device("/");
+ root = of_find_node_by_path("/");
if (root) {
opprop = (unsigned int *) get_property
(root, "platform-open-pic", &oplen);
oplen /= na * sizeof(unsigned int);
} else {
struct resource r;
- if (of_address_to_resource(np, 0, &r))
- return;
+ if (of_address_to_resource(np, 0, &r)) {
+ goto bail;
+ }
opaddr = r.start;
oplen = 0;
}
printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
- irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
- prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
- /* i8259 cascade is always positive level */
- init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
-
iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
if (iranges == NULL)
len = 0; /* non-distributed mpic */
if (len > 1)
isu_size = iranges[3];
- chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY,
- isu_size, NUM_ISA_INTERRUPTS, irq_count,
- NR_IRQS - 4, init_senses, irq_count,
- " MPIC ");
+ chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY,
+ isu_size, 0, " MPIC ");
if (chrp_mpic == NULL) {
printk(KERN_ERR "Failed to allocate MPIC structure\n");
- return;
+ goto bail;
}
-
j = na - 1;
for (i = 1; i < len; ++i) {
iranges += 2;
}
mpic_init(chrp_mpic);
- set_irq_chained_handler(NUM_ISA_INTERRUPTS, chrp_8259_cascade);
+ ppc_md.get_irq = mpic_get_irq;
+ bail:
+ of_node_put(root);
+ of_node_put(np);
}
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
};
#endif
-void __init chrp_init_IRQ(void)
+static void __init chrp_find_8259(void)
{
- struct device_node *np;
+ struct device_node *np, *pic = NULL;
unsigned long chrp_int_ack = 0;
-#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
- struct device_node *kbd;
-#endif
+ unsigned int cascade_irq;
+ /* Look for cascade */
+ for_each_node_by_type(np, "interrupt-controller")
+ if (device_is_compatible(np, "chrp,iic")) {
+ pic = np;
+ break;
+ }
+ /* Ok, 8259 wasn't found. We need to handle the case where
+ * we have a pegasos that claims to be chrp but doesn't have
+ * a proper interrupt tree
+ */
+ if (pic == NULL && chrp_mpic != NULL) {
+ printk(KERN_ERR "i8259: Not found in device-tree"
+ " assuming no legacy interrupts\n");
+ return;
+ }
+
+ /* Look for intack. In a perfect world, we would look for it on
+ * the ISA bus that holds the 8259 but heh... Works that way. If
+ * we ever see a problem, we can try to re-use the pSeries code here.
+ * Also, Pegasos-type platforms don't have a proper node to start
+ * from anyway
+ */
for (np = find_devices("pci"); np != NULL; np = np->next) {
unsigned int *addrp = (unsigned int *)
get_property(np, "8259-interrupt-acknowledge", NULL);
break;
}
if (np == NULL)
- printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n");
+ printk(KERN_WARNING "Cannot find PCI interrupt acknowledge"
+ " address, polling\n");
+
+ i8259_init(pic, chrp_int_ack);
+ if (ppc_md.get_irq == NULL)
+ ppc_md.get_irq = i8259_irq;
+ if (chrp_mpic != NULL) {
+ cascade_irq = irq_of_parse_and_map(pic, 0);
+ if (cascade_irq == NO_IRQ)
+ printk(KERN_ERR "i8259: failed to map cascade irq\n");
+ else
+ set_irq_chained_handler(cascade_irq,
+ chrp_8259_cascade);
+ }
+}
+void __init chrp_init_IRQ(void)
+{
+#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
+ struct device_node *kbd;
+#endif
chrp_find_openpic();
-
- i8259_init(chrp_int_ack, 0);
+ chrp_find_8259();
if (_chrp_type == _CHRP_Pegasos)
ppc_md.get_irq = i8259_irq;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
isa_io_base = CHRP_ISA_IO_BASE; /* default value */
- ppc_do_canonicalize_irqs = 1;
-
- /* Assume we have an 8259... */
- __irq_offset_value = NUM_ISA_INTERRUPTS;
return 1;
}
.init = chrp_init2,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
- .get_irq = mpic_get_irq,
.pcibios_fixup = chrp_pcibios_fixup,
.restart = rtas_restart,
.power_off = rtas_power_off,
#include <asm/smp.h>
#include <asm/residual.h>
#include <asm/time.h>
-#include <asm/open_pic.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/mpic.h>
printk(KERN_ERR "pci_event_handler: NULL event received\n");
}
-/*
- * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
- * It must be called before the bus walk.
- */
-void __init iSeries_init_IRQ(void)
-{
- /* Register PCI event handler and open an event path */
- int ret;
-
- ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
- &pci_event_handler);
- if (ret == 0) {
- ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
- if (ret != 0)
- printk(KERN_ERR "iseries_init_IRQ: open event path "
- "failed with rc 0x%x\n", ret);
- } else
- printk(KERN_ERR "iseries_init_IRQ: register handler "
- "failed with rc 0x%x\n", ret);
-}
-
#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = virt_irq_to_real_map[irq];
+ unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = virt_irq_to_real_map[irq];
+ unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = virt_irq_to_real_map[irq];
+ unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
/* irq should be locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = virt_irq_to_real_map[irq];
+ unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
static void iseries_end_IRQ(unsigned int irq)
{
- unsigned int rirq = virt_irq_to_real_map[irq];
+ unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
(REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
int __init iSeries_allocate_IRQ(HvBusNumber bus,
HvSubBusNumber sub_bus, u32 bsubbus)
{
- int virtirq;
unsigned int realirq;
u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
+ function;
- virtirq = virt_irq_create_mapping(realirq);
- set_irq_chip_and_handler(virtirq, &iseries_pic, handle_fasteoi_irq);
- return virtirq;
+
+ return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE);
}
#endif /* CONFIG_PCI */
/*
* Get the next pending IRQ.
*/
-int iSeries_get_irq(struct pt_regs *regs)
+unsigned int iSeries_get_irq(struct pt_regs *regs)
{
- /* -2 means ignore this interrupt */
- int irq = -2;
+ int irq = NO_IRQ_IGNORE;
#ifdef CONFIG_SMP
if (get_lppaca()->int_dword.fields.ipi_cnt) {
}
spin_unlock(&pending_irqs_lock);
if (irq >= NR_IRQS)
- irq = -2;
+ irq = NO_IRQ_IGNORE;
}
#endif
return irq;
}
+
+static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
+
+ return 0;
+}
+
+static struct irq_host_ops iseries_irq_host_ops = {
+ .map = iseries_irq_host_map,
+};
+
+/*
+ * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
+ * It must be called before the bus walk.
+ */
+void __init iSeries_init_IRQ(void)
+{
+ /* Register PCI event handler and open an event path */
+ struct irq_host *host;
+ int ret;
+
+ /*
+ * The Hypervisor only allows us up to 256 interrupt
+ * sources (the irq number is passed in a u8).
+ */
+ irq_set_virq_count(256);
+
+ /* Create irq host. No need for a revmap since HV will give us
+ * back our virtual irq number
+ */
+ host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0);
+ BUG_ON(host == NULL);
+ irq_set_default_host(host);
+
+ ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
+ &pci_event_handler);
+ if (ret == 0) {
+ ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
+ if (ret != 0)
+ printk(KERN_ERR "iseries_init_IRQ: open event path "
+ "failed with rc 0x%x\n", ret);
+ } else
+ printk(KERN_ERR "iseries_init_IRQ: register handler "
+ "failed with rc 0x%x\n", ret);
+}
+
extern void iSeries_init_IRQ(void);
extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
extern void iSeries_activate_IRQs(void);
-extern int iSeries_get_irq(struct pt_regs *);
+extern unsigned int iSeries_get_irq(struct pt_regs *);
#endif /* _ISERIES_IRQ_H */
{
DBG(" -> iSeries_init_early()\n");
- ppc64_interrupt_controller = IC_ISERIES;
-
#if defined(CONFIG_BLK_DEV_INITRD)
/*
* If the init RAM disk has been configured and there is
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
- /*
- * The Hypervisor only allows us up to 256 interrupt
- * sources (the irq number is passed in a u8).
- */
- virt_irq_max = 255;
-
hpte_init_iSeries();
return 1;
int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
{
struct device_node *np;
- int irq = channel ? 15 : 14;
+ unsigned int defirq = channel ? 15 : 14;
+ unsigned int irq;
if (pdev->vendor != PCI_VENDOR_ID_AMD ||
pdev->device != PCI_DEVICE_ID_AMD_8111_IDE)
- return irq;
+ return defirq;
np = pci_device_to_OF_node(pdev);
if (np == NULL)
- return irq;
- if (np->n_intrs < 2)
- return irq;
- return np->intrs[channel & 0x1].line;
+ return defirq;
+ irq = irq_of_parse_and_map(np, channel & 0x1);
+ if (irq == NO_IRQ) {
+ printk("Failed to map onboard IDE interrupt for channel %d\n",
+ channel);
+ return defirq;
+ }
+ return irq;
}
/* XXX: To remove once all firmwares are ok */
{
DBG(" -> maple_init_early\n");
- /* Setup interrupt mapping options */
- ppc64_interrupt_controller = IC_OPEN_PIC;
-
iommu_init_early_dart();
DBG(" <- maple_init_early\n");
}
-
-static __init void maple_init_IRQ(void)
+/*
+ * This is almost identical to pSeries and CHRP. We need to make that
+ * code generic at one point, with appropriate bits in the device-tree to
+ * identify the presence of an HT APIC
+ */
+static void __init maple_init_IRQ(void)
{
- struct device_node *root;
+ struct device_node *root, *np, *mpic_node = NULL;
unsigned int *opprop;
- unsigned long opic_addr;
+ unsigned long openpic_addr = 0;
+ int naddr, n, i, opplen, has_isus = 0;
struct mpic *mpic;
- unsigned char senses[128];
- int n;
+ unsigned int flags = MPIC_PRIMARY;
- DBG(" -> maple_init_IRQ\n");
+ /* Locate MPIC in the device-tree. Note that there is a bug
+ * in Maple device-tree where the type of the controller is
+ * open-pic and not interrupt-controller
+ */
+ for_each_node_by_type(np, "open-pic") {
+ mpic_node = np;
+ break;
+ }
+ if (mpic_node == NULL) {
+ printk(KERN_ERR
+ "Failed to locate the MPIC interrupt controller\n");
+ return;
+ }
- /* XXX: Non standard, replace that with a proper openpic/mpic node
- * in the device-tree. Find the Open PIC if present */
+ /* Find address list in /platform-open-pic */
root = of_find_node_by_path("/");
- opprop = (unsigned int *) get_property(root,
- "platform-open-pic", NULL);
- if (opprop == 0)
- panic("OpenPIC not found !\n");
-
- n = prom_n_addr_cells(root);
- for (opic_addr = 0; n > 0; --n)
- opic_addr = (opic_addr << 32) + *opprop++;
+ naddr = prom_n_addr_cells(root);
+ opprop = (unsigned int *) get_property(root, "platform-open-pic",
+ &opplen);
+ if (opprop != 0) {
+ openpic_addr = of_read_number(opprop, naddr);
+ has_isus = (opplen > naddr);
+ printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
+ openpic_addr, has_isus);
+ }
of_node_put(root);
- /* Obtain sense values from device-tree */
- prom_get_irq_senses(senses, 0, 128);
+ BUG_ON(openpic_addr == 0);
- mpic = mpic_alloc(opic_addr,
- MPIC_PRIMARY | MPIC_BIG_ENDIAN |
- MPIC_BROKEN_U3 | MPIC_WANTS_RESET,
- 0, 0, 128, 128, senses, 128, "U3-MPIC");
+ /* Check for a big endian MPIC */
+ if (get_property(np, "big-endian", NULL) != NULL)
+ flags |= MPIC_BIG_ENDIAN;
+
+ /* XXX Maple specific bits */
+ flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET;
+
+ /* Setup the openpic driver. More device-tree junks, we hard code no
+ * ISUs for now. I'll have to revisit some stuffs with the folks doing
+ * the firmware for those
+ */
+ mpic = mpic_alloc(mpic_node, openpic_addr, flags,
+ /*has_isus ? 16 :*/ 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
- mpic_init(mpic);
- DBG(" <- maple_init_IRQ\n");
+ /* Add ISUs */
+ opplen /= sizeof(u32);
+ for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
+ unsigned long isuaddr = of_read_number(opprop + i, naddr);
+ mpic_assign_isu(mpic, n, isuaddr);
+ }
+
+ /* All ISUs are setup, complete initialization */
+ mpic_init(mpic);
+ ppc_md.get_irq = mpic_get_irq;
+ of_node_put(mpic_node);
+ of_node_put(root);
}
static void __init maple_progress(char *s, unsigned short hex)
.setup_arch = maple_setup_arch,
.init_early = maple_init_early,
.init_IRQ = maple_init_IRQ,
- .get_irq = mpic_get_irq,
.pcibios_fixup = maple_pcibios_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
.restart = maple_restart,
{
u32 val;
+ bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end);
+
if (bootx_info->kernelParamsOffset) {
char *args = (char *)((unsigned long)bootx_info) +
bootx_info->kernelParamsOffset;
if (!strcmp(namep, "/chosen")) {
DBG(" detected /chosen ! adding properties names !\n");
- bootx_dt_add_string("linux,platform", mem_end);
+ bootx_dt_add_string("linux,bootx", mem_end);
bootx_dt_add_string("linux,stdout-path", mem_end);
bootx_dt_add_string("linux,initrd-start", mem_end);
bootx_dt_add_string("linux,initrd-end", mem_end);
host->speed = KW_I2C_MODE_25KHZ;
break;
}
- if (np->n_intrs > 0)
- host->irq = np->intrs[0].line;
- else
- host->irq = NO_IRQ;
+ host->irq = irq_of_parse_and_map(np, 0);
+ if (host->irq == NO_IRQ)
+ printk(KERN_WARNING
+ "low_i2c: Failed to map interrupt for %s\n",
+ np->full_name);
host->base = ioremap((*addrp), 0x1000);
if (host->base == NULL) {
#include <asm/machdep.h>
#include <asm/nvram.h>
+#include "pmac.h"
+
#define DEBUG
#ifdef DEBUG
// XXX Turn that into a sem
static DEFINE_SPINLOCK(nv_lock);
-extern int pmac_newworld;
-extern int system_running;
-
static int (*core99_write_bank)(int bank, u8* datas);
static int (*core99_erase_bank)(int bank);
static struct pci_controller *u3_agp;
static struct pci_controller *u4_pcie;
static struct pci_controller *u3_ht;
+#define has_second_ohare 0
+#else
+static int has_second_ohare;
#endif /* CONFIG_PPC64 */
extern u8 pci_cache_line_size;
early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
}
+static void __init init_second_ohare(void)
+{
+ struct device_node *np = of_find_node_by_name(NULL, "pci106b,7");
+ unsigned char bus, devfn;
+ unsigned short cmd;
+
+ if (np == NULL)
+ return;
+
+ /* This must run before we initialize the PICs since the second
+ * ohare hosts a PIC that will be accessed there.
+ */
+ if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
+ struct pci_controller* hose =
+ pci_find_hose_for_OF_device(np);
+ if (!hose) {
+ printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+ return;
+ }
+ early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ cmd &= ~PCI_COMMAND_IO;
+ early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+ }
+ has_second_ohare = 1;
+}
+
/*
* Some Apple desktop machines have a NEC PD720100A USB2 controller
* on the motherboard. Open Firmware, on these, will disable the
" EHCI, fixing up...\n");
data &= ~1UL;
early_write_config_dword(hose, bus, devfn, 0xe4, data);
- early_write_config_byte(hose, bus,
- devfn | 2, PCI_INTERRUPT_LINE,
- nec->intrs[0].line);
}
}
}
return 0;
}
-static void __init pcibios_fixup_OF_interrupts(void)
+void __init pmac_pcibios_fixup(void)
{
struct pci_dev* dev = NULL;
- /*
- * Open Firmware often doesn't initialize the
- * PCI_INTERRUPT_LINE config register properly, so we
- * should find the device node and apply the interrupt
- * obtained from the OF device-tree
- */
for_each_pci_dev(dev) {
- struct device_node *node;
- node = pci_device_to_OF_node(dev);
- /* this is the node, see if it has interrupts */
- if (node && node->n_intrs > 0)
- dev->irq = node->intrs[0].line;
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ /* Read interrupt from the device-tree */
+ pci_read_irq_line(dev);
+
+ /* Fixup interrupt for the modem/ethernet combo controller.
+ * on machines with a second ohare chip.
+ * The number in the device tree (27) is bogus (correct for
+ * the ethernet-only board but not the combo ethernet/modem
+ * board). The real interrupt is 28 on the second controller
+ * -> 28+32 = 60.
+ */
+ if (has_second_ohare &&
+ dev->vendor == PCI_VENDOR_ID_DEC &&
+ dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS)
+ dev->irq = irq_create_mapping(NULL, 60, 0);
}
}
-void __init pmac_pcibios_fixup(void)
-{
- /* Fixup interrupts according to OF tree */
- pcibios_fixup_OF_interrupts();
-}
-
#ifdef CONFIG_PPC64
static void __init pmac_fixup_phb_resources(void)
{
#else /* CONFIG_PPC64 */
init_p2pbridge();
+ init_second_ohare();
fixup_nec_usb2();
/* We are still having some issues with the Xserve G4, enabling
static int macio_do_gpio_irq_enable(struct pmf_function *func)
{
- if (func->node->n_intrs < 1)
+ unsigned int irq = irq_of_parse_and_map(func->node, 0);
+ if (irq == NO_IRQ)
return -EINVAL;
-
- return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0,
- func->node->name, func);
+ return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
}
static int macio_do_gpio_irq_disable(struct pmf_function *func)
{
- if (func->node->n_intrs < 1)
+ unsigned int irq = irq_of_parse_and_map(func->node, 0);
+ if (irq == NO_IRQ)
return -EINVAL;
-
- free_irq(func->node->intrs[0].line, func);
+ free_irq(irq, func);
return 0;
}
static DEFINE_SPINLOCK(pmac_pic_lock);
-#define GATWICK_IRQ_POOL_SIZE 10
-static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
-
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
static int pmac_irq_cascade = -1;
+static struct irq_host *pmac_pic_host;
static void __pmac_retrigger(unsigned int irq_nr)
{
}
}
-static void pmac_mask_and_ack_irq(unsigned int irq_nr)
+static void pmac_mask_and_ack_irq(unsigned int virq)
{
- unsigned long bit = 1UL << (irq_nr & 0x1f);
- int i = irq_nr >> 5;
+ unsigned int src = irq_map[virq].hwirq;
+ unsigned long bit = 1UL << (virq & 0x1f);
+ int i = virq >> 5;
unsigned long flags;
- if ((unsigned)irq_nr >= max_irqs)
- return;
-
spin_lock_irqsave(&pmac_pic_lock, flags);
- __clear_bit(irq_nr, ppc_cached_irq_mask);
- if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts))
+ __clear_bit(src, ppc_cached_irq_mask);
+ if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
out_le32(&pmac_irq_hw[i]->ack, bit);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static void pmac_ack_irq(unsigned int irq_nr)
+static void pmac_ack_irq(unsigned int virq)
{
- unsigned long bit = 1UL << (irq_nr & 0x1f);
- int i = irq_nr >> 5;
+ unsigned int src = irq_map[virq].hwirq;
+ unsigned long bit = 1UL << (src & 0x1f);
+ int i = src >> 5;
unsigned long flags;
- if ((unsigned)irq_nr >= max_irqs)
- return;
-
spin_lock_irqsave(&pmac_pic_lock, flags);
- if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts))
+ if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->ack, bit);
(void)in_le32(&pmac_irq_hw[i]->ack);
/* When an irq gets requested for the first client, if it's an
* edge interrupt, we clear any previous one on the controller
*/
-static unsigned int pmac_startup_irq(unsigned int irq_nr)
+static unsigned int pmac_startup_irq(unsigned int virq)
{
unsigned long flags;
- unsigned long bit = 1UL << (irq_nr & 0x1f);
- int i = irq_nr >> 5;
+ unsigned int src = irq_map[virq].hwirq;
+ unsigned long bit = 1UL << (src & 0x1f);
+ int i = src >> 5;
spin_lock_irqsave(&pmac_pic_lock, flags);
- if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
+ if ((irq_desc[virq].status & IRQ_LEVEL) == 0)
out_le32(&pmac_irq_hw[i]->ack, bit);
- __set_bit(irq_nr, ppc_cached_irq_mask);
- __pmac_set_irq_mask(irq_nr, 0);
+ __set_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 0);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 0;
}
-static void pmac_mask_irq(unsigned int irq_nr)
+static void pmac_mask_irq(unsigned int virq)
{
unsigned long flags;
+ unsigned int src = irq_map[virq].hwirq;
spin_lock_irqsave(&pmac_pic_lock, flags);
- __clear_bit(irq_nr, ppc_cached_irq_mask);
- __pmac_set_irq_mask(irq_nr, 0);
+ __clear_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 0);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static void pmac_unmask_irq(unsigned int irq_nr)
+static void pmac_unmask_irq(unsigned int virq)
{
unsigned long flags;
+ unsigned int src = irq_map[virq].hwirq;
spin_lock_irqsave(&pmac_pic_lock, flags);
- __set_bit(irq_nr, ppc_cached_irq_mask);
- __pmac_set_irq_mask(irq_nr, 0);
+ __set_bit(src, ppc_cached_irq_mask);
+ __pmac_set_irq_mask(src, 0);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static int pmac_retrigger(unsigned int irq_nr)
+static int pmac_retrigger(unsigned int virq)
{
unsigned long flags;
spin_lock_irqsave(&pmac_pic_lock, flags);
- __pmac_retrigger(irq_nr);
+ __pmac_retrigger(irq_map[virq].hwirq);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
return rc;
}
-static int pmac_get_irq(struct pt_regs *regs)
+static unsigned int pmac_pic_get_irq(struct pt_regs *regs)
{
int irq;
unsigned long bits = 0;
/* IPI's are a hack on the powersurge -- Cort */
if ( smp_processor_id() != 0 ) {
psurge_smp_message_recv(regs);
- return -2; /* ignore, already handled */
+ return NO_IRQ_IGNORE; /* ignore, already handled */
}
#endif /* CONFIG_SMP */
spin_lock_irqsave(&pmac_pic_lock, flags);
break;
}
spin_unlock_irqrestore(&pmac_pic_lock, flags);
-
- return irq;
-}
-
-/* This routine will fix some missing interrupt values in the device tree
- * on the gatwick mac-io controller used by some PowerBooks
- *
- * Walking of OF nodes could use a bit more fixing up here, but it's not
- * very important as this is all boot time code on static portions of the
- * device-tree.
- *
- * However, the modifications done to "intrs" will have to be removed and
- * replaced with proper updates of the "interrupts" properties or
- * AAPL,interrupts, yet to be decided, once the dynamic parsing is there.
- */
-static void __init pmac_fix_gatwick_interrupts(struct device_node *gw,
- int irq_base)
-{
- struct device_node *node;
- int count;
-
- memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
- count = 0;
- for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) {
- /* Fix SCC */
- if ((strcasecmp(node->name, "escc") == 0) && node->child) {
- if (node->child->n_intrs < 3) {
- node->child->intrs = &gatwick_int_pool[count];
- count += 3;
- }
- node->child->n_intrs = 3;
- node->child->intrs[0].line = 15+irq_base;
- node->child->intrs[1].line = 4+irq_base;
- node->child->intrs[2].line = 5+irq_base;
- printk(KERN_INFO "irq: fixed SCC on gatwick"
- " (%d,%d,%d)\n",
- node->child->intrs[0].line,
- node->child->intrs[1].line,
- node->child->intrs[2].line);
- }
- /* Fix media-bay & left SWIM */
- if (strcasecmp(node->name, "media-bay") == 0) {
- struct device_node* ya_node;
-
- if (node->n_intrs == 0)
- node->intrs = &gatwick_int_pool[count++];
- node->n_intrs = 1;
- node->intrs[0].line = 29+irq_base;
- printk(KERN_INFO "irq: fixed media-bay on gatwick"
- " (%d)\n", node->intrs[0].line);
-
- ya_node = node->child;
- while(ya_node) {
- if (strcasecmp(ya_node->name, "floppy") == 0) {
- if (ya_node->n_intrs < 2) {
- ya_node->intrs = &gatwick_int_pool[count];
- count += 2;
- }
- ya_node->n_intrs = 2;
- ya_node->intrs[0].line = 19+irq_base;
- ya_node->intrs[1].line = 1+irq_base;
- printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
- ya_node->intrs[0].line, ya_node->intrs[1].line);
- }
- if (strcasecmp(ya_node->name, "ata4") == 0) {
- if (ya_node->n_intrs < 2) {
- ya_node->intrs = &gatwick_int_pool[count];
- count += 2;
- }
- ya_node->n_intrs = 2;
- ya_node->intrs[0].line = 14+irq_base;
- ya_node->intrs[1].line = 3+irq_base;
- printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
- ya_node->intrs[0].line, ya_node->intrs[1].line);
- }
- ya_node = ya_node->sibling;
- }
- }
- }
- if (count > 10) {
- printk("WARNING !! Gatwick interrupt pool overflow\n");
- printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
- printk(" requested = %d\n", count);
- }
-}
-
-/*
- * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
- * card which includes an ohare chip that acts as a second interrupt
- * controller. If we find this second ohare, set it up and fix the
- * interrupt value in the device tree for the ethernet chip.
- */
-static void __init enable_second_ohare(struct device_node *np)
-{
- unsigned char bus, devfn;
- unsigned short cmd;
- struct device_node *ether;
-
- /* This code doesn't strictly belong here, it could be part of
- * either the PCI initialisation or the feature code. It's kept
- * here for historical reasons.
- */
- if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
- struct pci_controller* hose =
- pci_find_hose_for_OF_device(np);
- if (!hose) {
- printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
- return;
- }
- early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- cmd &= ~PCI_COMMAND_IO;
- early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
- }
-
- /* Fix interrupt for the modem/ethernet combo controller. The number
- * in the device tree (27) is bogus (correct for the ethernet-only
- * board but not the combo ethernet/modem board).
- * The real interrupt is 28 on the second controller -> 28+32 = 60.
- */
- ether = of_find_node_by_name(NULL, "pci1011,14");
- if (ether && ether->n_intrs > 0) {
- ether->intrs[0].line = 60;
- printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
- ether->intrs[0].line);
- }
- of_node_put(ether);
+ if (unlikely(irq < 0))
+ return NO_IRQ;
+ return irq_linear_revmap(pmac_pic_host, irq);
}
#ifdef CONFIG_XMON
.name = "cascade",
};
+static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
+{
+ /* We match all, we don't always have a node anyway */
+ return 1;
+}
+
+static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ struct irq_desc *desc = get_irq_desc(virq);
+ int level;
+
+ if (hw >= max_irqs)
+ return -EINVAL;
+
+ /* Mark level interrupts, set delayed disable for edge ones and set
+ * handlers
+ */
+ level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
+ if (level)
+ desc->status |= IRQ_LEVEL;
+ else
+ desc->status |= IRQ_DELAYED_DISABLE;
+ set_irq_chip_and_handler(virq, &pmac_pic, level ?
+ handle_level_irq : handle_edge_irq);
+ return 0;
+}
+
+static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_flags)
+
+{
+ *out_hwirq = *intspec;
+ return 0;
+}
+
+static struct irq_host_ops pmac_pic_host_ops = {
+ .match = pmac_pic_host_match,
+ .map = pmac_pic_host_map,
+ .xlate = pmac_pic_host_xlate,
+};
+
static void __init pmac_pic_probe_oldstyle(void)
{
int i;
struct resource r;
/* Set our get_irq function */
- ppc_md.get_irq = pmac_get_irq;
+ ppc_md.get_irq = pmac_pic_get_irq;
/*
* Find the interrupt controller type & node
if (slave) {
max_irqs = 64;
level_mask[1] = OHARE_LEVEL_MASK;
- enable_second_ohare(slave);
}
} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
max_irqs = max_real_irqs = 64;
max_irqs = 128;
level_mask[2] = HEATHROW_LEVEL_MASK;
level_mask[3] = 0;
- pmac_fix_gatwick_interrupts(slave, max_real_irqs);
}
}
BUG_ON(master == NULL);
- /* Mark level interrupts and set handlers */
- for (i = 0; i < max_irqs; i++) {
- int level = !!(level_mask[i >> 5] & (1UL << (i & 0x1f)));
- if (level)
- irq_desc[i].status |= IRQ_LEVEL;
- else
- irq_desc[i].status |= IRQ_DELAYED_DISABLE;
- set_irq_chip_and_handler(i, &pmac_pic, level ?
- handle_level_irq : handle_edge_irq);
- }
+ /*
+ * Allocate an irq host
+ */
+ pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs,
+ &pmac_pic_host_ops,
+ max_irqs);
+ BUG_ON(pmac_pic_host == NULL);
+ irq_set_default_host(pmac_pic_host);
/* Get addresses of first controller if we have a node for it */
BUG_ON(of_address_to_resource(master, 0, &r));
pmac_irq_hw[i++] =
(volatile struct pmac_irq_hw __iomem *)
(addr + 0x10);
- pmac_irq_cascade = slave->intrs[0].line;
+ pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
" cascade: %d\n", slave->full_name,
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
- if (slave)
+ if (slave && pmac_irq_cascade != NO_IRQ)
setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
- setup_irq(20, &xmon_action);
+ setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action);
#endif
}
#endif /* CONFIG_PPC32 */
struct pt_regs *regs)
{
struct mpic *mpic = desc->handler_data;
- unsigned int max = 100;
- while(max--) {
- int cascade_irq = mpic_get_one_irq(mpic, regs);
- if (max == 99)
- desc->chip->eoi(irq);
- if (irq < 0)
- break;
+ unsigned int cascade_irq = mpic_get_one_irq(mpic, regs);
+ if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq, regs);
- };
+ desc->chip->eoi(irq);
}
static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
int nmi_irq;
pswitch = of_find_node_by_name(NULL, "programmer-switch");
- if (pswitch && pswitch->n_intrs) {
- nmi_irq = pswitch->intrs[0].line;
- mpic_irq_set_priority(nmi_irq, 9);
- setup_irq(nmi_irq, &xmon_action);
+ if (pswitch) {
+ nmi_irq = irq_of_parse_and_map(pswitch, 0);
+ if (nmi_irq != NO_IRQ) {
+ mpic_irq_set_priority(nmi_irq, 9);
+ setup_irq(nmi_irq, &xmon_action);
+ }
+ of_node_put(pswitch);
}
- of_node_put(pswitch);
#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
}
static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
int master)
{
- unsigned char senses[128];
- int offset = master ? 0 : 128;
- int count = master ? 128 : 124;
const char *name = master ? " MPIC 1 " : " MPIC 2 ";
struct resource r;
struct mpic *mpic;
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
- prom_get_irq_senses(senses, offset, offset + count);
-
flags |= MPIC_WANTS_RESET;
if (get_property(np, "big-endian", NULL))
flags |= MPIC_BIG_ENDIAN;
if (master && (flags & MPIC_BIG_ENDIAN))
flags |= MPIC_BROKEN_U3;
- mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0,
- senses, count, name);
+ mpic = mpic_alloc(np, r.start, flags, 0, 0, name);
if (mpic == NULL)
return NULL;
{
struct mpic *mpic1, *mpic2;
struct device_node *np, *master = NULL, *slave = NULL;
+ unsigned int cascade;
/* We can have up to 2 MPICs cascaded */
for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
of_node_put(master);
/* No slave, let's go out */
- if (slave == NULL || slave->n_intrs < 1)
+ if (slave == NULL)
+ return 0;
+
+ /* Get/Map slave interrupt */
+ cascade = irq_of_parse_and_map(slave, 0);
+ if (cascade == NO_IRQ) {
+ printk(KERN_ERR "Failed to map cascade IRQ\n");
return 0;
+ }
mpic2 = pmac_setup_one_mpic(slave, 0);
if (mpic2 == NULL) {
of_node_put(slave);
return 0;
}
- set_irq_data(slave->intrs[0].line, mpic2);
- set_irq_chained_handler(slave->intrs[0].line, pmac_u3_cascade);
+ set_irq_data(cascade, mpic2);
+ set_irq_chained_handler(cascade, pmac_u3_cascade);
of_node_put(slave);
return 0;
void __init pmac_pic_init(void)
{
+ unsigned int flags = 0;
+
+ /* We configure the OF parsing based on our oldworld vs. newworld
+ * platform type and wether we were booted by BootX.
+ */
+#ifdef CONFIG_PPC32
+ if (!pmac_newworld)
+ flags |= OF_IMAP_OLDWORLD_MAC;
+ if (get_property(of_chosen, "linux,bootx", NULL) != NULL)
+ flags |= OF_IMAP_NO_PHANDLE;
+ of_irq_map_init(flags);
+#endif /* CONFIG_PPC_32 */
+
/* We first try to detect Apple's new Core99 chipset, since mac-io
* is quite different on those machines and contains an IBM MPIC2.
*/
/* This used to be passed by the PMU driver but that link got
* broken with the new driver model. We use this tweak for now...
+ * We really want to do things differently though...
*/
static int pmacpic_find_viaint(void)
{
np = of_find_node_by_name(NULL, "via-pmu");
if (np == NULL)
goto not_found;
- viaint = np->intrs[0].line;
+ viaint = irq_of_parse_and_map(np, 0);;
#endif /* CONFIG_ADB_PMU */
not_found:
struct rtc_time;
+extern int pmac_newworld;
+
extern long pmac_time_init(void);
extern unsigned long pmac_get_boot_time(void);
extern void pmac_get_rtc_time(struct rtc_time *);
udbg_adb_init(!!strstr(cmd_line, "btextdbg"));
#ifdef CONFIG_PPC64
- /* Setup interrupt mapping options */
- ppc64_interrupt_controller = IC_OPEN_PIC;
-
iommu_init_early_dart();
#endif
}
/* #define DEBUG */
-static void request_ras_irqs(struct device_node *np, char *propname,
+
+static void request_ras_irqs(struct device_node *np,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
const char *name)
{
- unsigned int *ireg, len, i;
- int virq, n_intr;
-
- ireg = (unsigned int *)get_property(np, propname, &len);
- if (ireg == NULL)
- return;
- n_intr = prom_n_intr_cells(np);
- len /= n_intr * sizeof(*ireg);
-
- for (i = 0; i < len; i++) {
- virq = virt_irq_create_mapping(*ireg);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- return;
+ int i, index, count = 0;
+ struct of_irq oirq;
+ u32 *opicprop;
+ unsigned int opicplen;
+ unsigned int virqs[16];
+
+ /* Check for obsolete "open-pic-interrupt" property. If present, then
+ * map those interrupts using the default interrupt host and default
+ * trigger
+ */
+ opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen);
+ if (opicprop) {
+ opicplen /= sizeof(u32);
+ for (i = 0; i < opicplen; i++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_mapping(NULL, *(opicprop++),
+ IRQ_TYPE_NONE);
+ if (virqs[count] == NO_IRQ)
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ else
+ count++;
+
}
- if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
+ }
+ /* Else use normal interrupt tree parsing */
+ else {
+ /* First try to do a proper OF tree parsing */
+ for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+ index++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_of_mapping(oirq.controller,
+ oirq.specifier,
+ oirq.size);
+ if (virqs[count] == NO_IRQ)
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ else
+ count++;
+ }
+ }
+
+ /* Now request them */
+ for (i = 0; i < count; i++) {
+ if (request_irq(virqs[i], handler, 0, name, NULL)) {
printk(KERN_ERR "Unable to request interrupt %d for "
- "%s\n", irq_offset_up(virq), np->full_name);
+ "%s\n", virqs[i], np->full_name);
return;
}
- ireg += n_intr;
}
}
/* Internal Errors */
np = of_find_node_by_path("/event-sources/internal-errors");
if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
- "RAS_ERROR");
- request_ras_irqs(np, "interrupts", ras_error_interrupt,
- "RAS_ERROR");
+ request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
of_node_put(np);
}
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
- "RAS_EPOW");
- request_ras_irqs(np, "interrupts", ras_epow_interrupt,
- "RAS_EPOW");
+ request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
of_node_put(np);
}
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
+ irq_map[irq].hwirq,
RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
critical, __pa(&ras_log_buf),
rtas_get_error_log_max());
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
+ irq_map[irq].hwirq,
RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
__pa(&ras_log_buf),
rtas_get_error_log_max());
#define DBG(fmt...)
#endif
+/* move those away to a .h */
+extern void smp_init_pseries_mpic(void);
+extern void smp_init_pseries_xics(void);
extern void find_udbg_vterm(void);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
static void pseries_shared_idle_sleep(void);
static void pseries_dedicated_idle_sleep(void);
-struct mpic *pSeries_mpic;
+static struct device_node *pSeries_mpic_node;
static void pSeries_show_cpuinfo(struct seq_file *m)
{
fwnmi_active = 1;
}
-void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc,
+void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
- unsigned int max = 100;
-
- while(max--) {
- int cascade_irq = i8259_irq(regs);
- if (max == 99)
- desc->chip->eoi(irq);
- if (cascade_irq < 0)
- break;
+ unsigned int cascade_irq = i8259_irq(regs);
+ if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq, regs);
- };
+ desc->chip->eoi(irq);
}
-static void __init pSeries_init_mpic(void)
+static void __init pseries_mpic_init_IRQ(void)
{
+ struct device_node *np, *old, *cascade = NULL;
unsigned int *addrp;
- struct device_node *np;
unsigned long intack = 0;
-
- /* All ISUs are setup, complete initialization */
- mpic_init(pSeries_mpic);
-
- /* Check what kind of cascade ACK we have */
- if (!(np = of_find_node_by_name(NULL, "pci"))
- || !(addrp = (unsigned int *)
- get_property(np, "8259-interrupt-acknowledge", NULL)))
- printk(KERN_ERR "Cannot find pci to get ack address\n");
- else
- intack = addrp[prom_n_addr_cells(np)-1];
- of_node_put(np);
-
- /* Setup the legacy interrupts & controller */
- i8259_init(intack, 0);
-
- /* Hook cascade to mpic */
- set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade);
-}
-
-static void __init pSeries_setup_mpic(void)
-{
unsigned int *opprop;
unsigned long openpic_addr = 0;
- unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
- struct device_node *root;
- int irq_count;
+ unsigned int cascade_irq;
+ int naddr, n, i, opplen;
+ struct mpic *mpic;
- /* Find the Open PIC if present */
- root = of_find_node_by_path("/");
- opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
+ np = of_find_node_by_path("/");
+ naddr = prom_n_addr_cells(np);
+ opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen);
if (opprop != 0) {
- int n = prom_n_addr_cells(root);
-
- for (openpic_addr = 0; n > 0; --n)
- openpic_addr = (openpic_addr << 32) + *opprop++;
+ openpic_addr = of_read_number(opprop, naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
}
- of_node_put(root);
+ of_node_put(np);
BUG_ON(openpic_addr == 0);
- /* Get the sense values from OF */
- prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
-
/* Setup the openpic driver */
- irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
- pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
- 16, 16, irq_count, /* isu size, irq offset, irq count */
- NR_IRQS - 4, /* ipi offset */
- senses, irq_count, /* sense & sense size */
- " MPIC ");
+ mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
+ MPIC_PRIMARY,
+ 16, 250, /* isu size, irq count */
+ " MPIC ");
+ BUG_ON(mpic == NULL);
+
+ /* Add ISUs */
+ opplen /= sizeof(u32);
+ for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
+ unsigned long isuaddr = of_read_number(opprop + i, naddr);
+ mpic_assign_isu(mpic, n, isuaddr);
+ }
+
+ /* All ISUs are setup, complete initialization */
+ mpic_init(mpic);
+
+ /* Look for cascade */
+ for_each_node_by_type(np, "interrupt-controller")
+ if (device_is_compatible(np, "chrp,iic")) {
+ cascade = np;
+ break;
+ }
+ if (cascade == NULL)
+ return;
+
+ cascade_irq = irq_of_parse_and_map(cascade, 0);
+ if (cascade == NO_IRQ) {
+ printk(KERN_ERR "xics: failed to map cascade interrupt");
+ return;
+ }
+
+ /* Check ACK type */
+ for (old = of_node_get(cascade); old != NULL ; old = np) {
+ np = of_get_parent(old);
+ of_node_put(old);
+ if (np == NULL)
+ break;
+ if (strcmp(np->name, "pci") != 0)
+ continue;
+ addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge",
+ NULL);
+ if (addrp == NULL)
+ continue;
+ naddr = prom_n_addr_cells(np);
+ intack = addrp[naddr-1];
+ if (naddr > 1)
+ intack |= ((unsigned long)addrp[naddr-2]) << 32;
+ }
+ if (intack)
+ printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n",
+ intack);
+ i8259_init(cascade, intack);
+ of_node_put(cascade);
+ set_irq_chained_handler(cascade_irq, pseries_8259_cascade);
}
static void pseries_lpar_enable_pmcs(void)
get_lppaca()->pmcregs_in_use = 1;
}
-static void __init pSeries_setup_arch(void)
+#ifdef CONFIG_KEXEC
+static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary)
{
- /* Fixup ppc_md depending on the type of interrupt controller */
- if (ppc64_interrupt_controller == IC_OPEN_PIC) {
- ppc_md.init_IRQ = pSeries_init_mpic;
- ppc_md.get_irq = mpic_get_irq;
- /* Allocate the mpic now, so that find_and_init_phbs() can
- * fill the ISUs */
- pSeries_setup_mpic();
- } else
- ppc_md.init_IRQ = xics_init_IRQ;
+ mpic_teardown_this_cpu(secondary);
+}
+static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
+{
+ /* Don't risk a hypervisor call if we're crashing */
+ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
+ unsigned long vpa = __pa(get_lppaca());
+
+ if (unregister_vpa(hard_smp_processor_id(), vpa)) {
+ printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
+ "failed\n", smp_processor_id(),
+ hard_smp_processor_id());
+ }
+ }
+ xics_teardown_cpu(secondary);
+}
+#endif /* CONFIG_KEXEC */
+
+static void __init pseries_discover_pic(void)
+{
+ struct device_node *np;
+ char *typep;
+
+ for (np = NULL; (np = of_find_node_by_name(np,
+ "interrupt-controller"));) {
+ typep = (char *)get_property(np, "compatible", NULL);
+ if (strstr(typep, "open-pic")) {
+ pSeries_mpic_node = of_node_get(np);
+ ppc_md.init_IRQ = pseries_mpic_init_IRQ;
+ ppc_md.get_irq = mpic_get_irq;
+#ifdef CONFIG_KEXEC
+ ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic;
+#endif
#ifdef CONFIG_SMP
- smp_init_pSeries();
+ smp_init_pseries_mpic();
#endif
+ return;
+ } else if (strstr(typep, "ppc-xicp")) {
+ ppc_md.init_IRQ = xics_init_IRQ;
+#ifdef CONFIG_KEXEC
+ ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
+#endif
+#ifdef CONFIG_SMP
+ smp_init_pseries_xics();
+#endif
+ return;
+ }
+ }
+ printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
+ " interrupt-controller\n");
+}
+
+static void __init pSeries_setup_arch(void)
+{
+ /* Discover PIC type and setup ppc_md accordingly */
+ pseries_discover_pic();
+
/* openpic global configuration register (64-bit format). */
/* openpic Interrupt Source Unit pointer (64-bit format). */
/* python0 facility area (mmio) (64-bit format) REAL address. */
}
arch_initcall(pSeries_init_panel);
-static void __init pSeries_discover_pic(void)
-{
- struct device_node *np;
- char *typep;
-
- /*
- * Setup interrupt mapping options that are needed for finish_device_tree
- * to properly parse the OF interrupt tree & do the virtual irq mapping
- */
- __irq_offset_value = NUM_ISA_INTERRUPTS;
- ppc64_interrupt_controller = IC_INVALID;
- for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
- typep = (char *)get_property(np, "compatible", NULL);
- if (strstr(typep, "open-pic")) {
- ppc64_interrupt_controller = IC_OPEN_PIC;
- break;
- } else if (strstr(typep, "ppc-xicp")) {
- ppc64_interrupt_controller = IC_PPC_XIC;
- break;
- }
- }
- if (ppc64_interrupt_controller == IC_INVALID)
- printk("pSeries_discover_pic: failed to recognize"
- " interrupt-controller\n");
-
-}
-
static void pSeries_mach_cpu_die(void)
{
local_irq_disable();
iommu_init_early_pSeries();
- pSeries_discover_pic();
-
DBG(" <- pSeries_init_early()\n");
}
return PCI_PROBE_NORMAL;
}
-#ifdef CONFIG_KEXEC
-static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
-{
- /* Don't risk a hypervisor call if we're crashing */
- if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
- unsigned long vpa = __pa(get_lppaca());
-
- if (unregister_vpa(hard_smp_processor_id(), vpa)) {
- printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
- "failed\n", smp_processor_id(),
- hard_smp_processor_id());
- }
- }
-
- if (ppc64_interrupt_controller == IC_OPEN_PIC)
- mpic_teardown_this_cpu(secondary);
- else
- xics_teardown_cpu(secondary);
-}
-#endif
-
define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
- .kexec_cpu_down = pseries_kexec_cpu_down,
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
.machine_crash_shutdown = default_machine_crash_shutdown,
#endif
/* This is called very early */
-void __init smp_init_pSeries(void)
+static void __init smp_init_pseries(void)
{
int i;
DBG(" -> smp_init_pSeries()\n");
- switch (ppc64_interrupt_controller) {
-#ifdef CONFIG_MPIC
- case IC_OPEN_PIC:
- smp_ops = &pSeries_mpic_smp_ops;
- break;
-#endif
-#ifdef CONFIG_XICS
- case IC_PPC_XIC:
- smp_ops = &pSeries_xics_smp_ops;
- break;
-#endif
- default:
- panic("Invalid interrupt controller");
- }
-
#ifdef CONFIG_HOTPLUG_CPU
smp_ops->cpu_disable = pSeries_cpu_disable;
smp_ops->cpu_die = pSeries_cpu_die;
DBG(" <- smp_init_pSeries()\n");
}
+#ifdef CONFIG_MPIC
+void __init smp_init_pseries_mpic(void)
+{
+ smp_ops = &pSeries_mpic_smp_ops;
+
+ smp_init_pseries();
+}
+#endif
+
+void __init smp_init_pseries_xics(void)
+{
+ smp_ops = &pSeries_xics_smp_ops;
+
+ smp_init_pseries();
+}
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#undef DEBUG
+
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/radix-tree.h>
#include <linux/cpu.h>
+
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/io.h>
#include "xics.h"
-/* This is used to map real irq numbers to virtual */
-static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
-
#define XICS_IPI 2
#define XICS_IRQ_SPURIOUS 0
static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
-static int xics_irq_8259_cascade = 0;
-static int xics_irq_8259_cascade_real = 0;
static unsigned int default_server = 0xFF;
static unsigned int default_distrib_server = 0;
static unsigned int interrupt_server_size = 8;
+static struct irq_host *xics_host;
+
/*
* XICS only has a single IPI, so encode the messages per CPU
*/
/* Direct HW low level accessors */
-static inline int direct_xirr_info_get(int n_cpu)
+static inline unsigned int direct_xirr_info_get(int n_cpu)
{
return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
}
return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
}
-static inline int lpar_xirr_info_get(int n_cpu)
+static inline unsigned int lpar_xirr_info_get(int n_cpu)
{
unsigned long lpar_rc;
unsigned long return_value;
lpar_rc = plpar_xirr(&return_value);
if (lpar_rc != H_SUCCESS)
panic(" bad return code xirr - rc = %lx \n", lpar_rc);
- return (int)return_value;
+ return (unsigned int)return_value;
}
static inline void lpar_xirr_info_set(int n_cpu, int value)
#ifdef CONFIG_SMP
-static int get_irq_server(unsigned int irq)
+static int get_irq_server(unsigned int virq)
{
unsigned int server;
/* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t cpumask = irq_desc[irq].affinity;
+ cpumask_t cpumask = irq_desc[virq].affinity;
cpumask_t tmp = CPU_MASK_NONE;
if (!distribute_irqs)
}
#else
-static int get_irq_server(unsigned int irq)
+static int get_irq_server(unsigned int virq)
{
return default_server;
}
int call_status;
unsigned int server;
- irq = virt_irq_to_real(irq_offset_down(virq));
- WARN_ON(irq == NO_IRQ);
- if (irq == XICS_IPI || irq == NO_IRQ)
+ pr_debug("xics: unmask virq %d\n", virq);
+
+ irq = (unsigned int)irq_map[virq].hwirq;
+ pr_debug(" -> map to hwirq 0x%x\n", irq);
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
server = get_irq_server(virq);
{
unsigned int irq;
- irq = virt_irq_to_real(irq_offset_down(virq));
- WARN_ON(irq == NO_IRQ);
- if (irq != NO_IRQ)
- xics_mask_real_irq(irq);
+ pr_debug("xics: mask virq %d\n", virq);
+
+ irq = (unsigned int)irq_map[virq].hwirq;
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ return;
+ xics_mask_real_irq(irq);
}
-static void xics_set_irq_revmap(unsigned int virq)
+static unsigned int xics_startup(unsigned int virq)
{
unsigned int irq;
- irq = irq_offset_down(virq);
- if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
- &virt_irq_to_real_map[irq]) == -ENOMEM)
- printk(KERN_CRIT "Out of memory creating real -> virtual"
- " IRQ mapping for irq %u (real 0x%x)\n",
- virq, virt_irq_to_real(irq));
-}
+ /* force a reverse mapping of the interrupt so it gets in the cache */
+ irq = (unsigned int)irq_map[virq].hwirq;
+ irq_radix_revmap(xics_host, irq);
-static unsigned int xics_startup(unsigned int virq)
-{
- xics_set_irq_revmap(virq);
+ /* unmask it */
xics_unmask_irq(virq);
return 0;
}
-static unsigned int real_irq_to_virt(unsigned int real_irq)
-{
- unsigned int *ptr;
-
- ptr = radix_tree_lookup(&irq_map, real_irq);
- if (ptr == NULL)
- return NO_IRQ;
- return ptr - virt_irq_to_real_map;
-}
-
-static void xics_eoi_direct(unsigned int irq)
+static void xics_eoi_direct(unsigned int virq)
{
int cpu = smp_processor_id();
+ unsigned int irq = (unsigned int)irq_map[virq].hwirq;
iosync();
- direct_xirr_info_set(cpu, ((0xff << 24) |
- (virt_irq_to_real(irq_offset_down(irq)))));
+ direct_xirr_info_set(cpu, (0xff << 24) | irq);
}
-static void xics_eoi_lpar(unsigned int irq)
+static void xics_eoi_lpar(unsigned int virq)
{
int cpu = smp_processor_id();
+ unsigned int irq = (unsigned int)irq_map[virq].hwirq;
iosync();
- lpar_xirr_info_set(cpu, ((0xff << 24) |
- (virt_irq_to_real(irq_offset_down(irq)))));
-
+ lpar_xirr_info_set(cpu, (0xff << 24) | irq);
}
-static inline int xics_remap_irq(int vec)
+static inline unsigned int xics_remap_irq(unsigned int vec)
{
- int irq;
+ unsigned int irq;
vec &= 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
-
- irq = real_irq_to_virt(vec);
- if (irq == NO_IRQ)
- irq = real_irq_to_virt_slowpath(vec);
+ irq = irq_radix_revmap(xics_host, vec);
if (likely(irq != NO_IRQ))
- return irq_offset_up(irq);
+ return irq;
printk(KERN_ERR "Interrupt %u (real) is invalid,"
" disabling it.\n", vec);
return NO_IRQ;
}
-static int xics_get_irq_direct(struct pt_regs *regs)
+static unsigned int xics_get_irq_direct(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
return xics_remap_irq(direct_xirr_info_get(cpu));
}
-static int xics_get_irq_lpar(struct pt_regs *regs)
+static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
unsigned long newmask;
cpumask_t tmp = CPU_MASK_NONE;
- irq = virt_irq_to_real(irq_offset_down(virq));
- if (irq == XICS_IPI || irq == NO_IRQ)
+ irq = (unsigned int)irq_map[virq].hwirq;
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
}
}
+void xics_setup_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ xics_set_cpu_priority(cpu, 0xff);
+
+ /*
+ * Put the calling processor into the GIQ. This is really only
+ * necessary from a secondary thread as the OF start-cpu interface
+ * performs this function for us on primary threads.
+ *
+ * XXX: undo of teardown on kexec needs this too, as may hotplug
+ */
+ rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
+ (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
+}
+
+
static struct irq_chip xics_pic_direct = {
.typename = " XICS ",
.startup = xics_startup,
};
-void xics_setup_cpu(void)
+static int xics_host_match(struct irq_host *h, struct device_node *node)
{
- int cpu = smp_processor_id();
+ /* IBM machines have interrupt parents of various funky types for things
+ * like vdevices, events, etc... The trick we use here is to match
+ * everything here except the legacy 8259 which is compatible "chrp,iic"
+ */
+ return !device_is_compatible(node, "chrp,iic");
+}
- xics_set_cpu_priority(cpu, 0xff);
+static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
- /*
- * Put the calling processor into the GIQ. This is really only
- * necessary from a secondary thread as the OF start-cpu interface
- * performs this function for us on primary threads.
- *
- * XXX: undo of teardown on kexec needs this too, as may hotplug
+ pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n",
+ virq, hw, flags);
+
+ if (sense && sense != IRQ_TYPE_LEVEL_LOW)
+ printk(KERN_WARNING "xics: using unsupported sense 0x%x"
+ " for irq %d (h: 0x%lx)\n", flags, virq, hw);
+
+ get_irq_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
+ return 0;
+}
+
+static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
+
+ pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n",
+ virq, hw, flags);
+
+ if (sense && sense != IRQ_TYPE_LEVEL_LOW)
+ printk(KERN_WARNING "xics: using unsupported sense 0x%x"
+ " for irq %d (h: 0x%lx)\n", flags, virq, hw);
+
+ get_irq_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
+ return 0;
+}
+
+static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ /* Current xics implementation translates everything
+ * to level. It is not technically right for MSIs but this
+ * is irrelevant at this point. We might get smarter in the future
*/
- rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
+ *out_hwirq = intspec[0];
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+ return 0;
+}
+
+static struct irq_host_ops xics_host_direct_ops = {
+ .match = xics_host_match,
+ .map = xics_host_map_direct,
+ .xlate = xics_host_xlate,
+};
+
+static struct irq_host_ops xics_host_lpar_ops = {
+ .match = xics_host_match,
+ .map = xics_host_map_lpar,
+ .xlate = xics_host_xlate,
+};
+
+static void __init xics_init_host(void)
+{
+ struct irq_host_ops *ops;
+
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ ops = &xics_host_lpar_ops;
+ else
+ ops = &xics_host_direct_ops;
+ xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
+ XICS_IRQ_SPURIOUS);
+ BUG_ON(xics_host == NULL);
+ irq_set_default_host(xics_host);
}
-void xics_init_IRQ(void)
+static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
+ unsigned long size)
{
+#ifdef CONFIG_SMP
int i;
- unsigned long intr_size = 0;
- struct device_node *np;
- uint *ireg, ilen, indx = 0;
- unsigned long intr_base = 0;
- struct xics_interrupt_node {
- unsigned long addr;
- unsigned long size;
- } intnodes[NR_CPUS];
- struct irq_chip *chip;
- ppc64_boot_msg(0x20, "XICS Init");
+ /* This may look gross but it's good enough for now, we don't quite
+ * have a hard -> linux processor id matching.
+ */
+ for_each_possible_cpu(i) {
+ if (!cpu_present(i))
+ continue;
+ if (hw_id == get_hard_smp_processor_id(i)) {
+ xics_per_cpu[i] = ioremap(addr, size);
+ return;
+ }
+ }
+#else
+ if (hw_id != 0)
+ return;
+ xics_per_cpu[0] = ioremap(addr, size);
+#endif /* CONFIG_SMP */
+}
- ibm_get_xive = rtas_token("ibm,get-xive");
- ibm_set_xive = rtas_token("ibm,set-xive");
- ibm_int_on = rtas_token("ibm,int-on");
- ibm_int_off = rtas_token("ibm,int-off");
+static void __init xics_init_one_node(struct device_node *np,
+ unsigned int *indx)
+{
+ unsigned int ilen;
+ u32 *ireg;
- np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
- if (!np)
- panic("xics_init_IRQ: can't find interrupt presentation");
+ /* This code does the theorically broken assumption that the interrupt
+ * server numbers are the same as the hard CPU numbers.
+ * This happens to be the case so far but we are playing with fire...
+ * should be fixed one of these days. -BenH.
+ */
+ ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL);
-nextnode:
- ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
+ /* Do that ever happen ? we'll know soon enough... but even good'old
+ * f80 does have that property ..
+ */
+ WARN_ON(ireg == NULL);
if (ireg) {
/*
* set node starting index for this node
*/
- indx = *ireg;
+ *indx = *ireg;
}
-
- ireg = (uint *)get_property(np, "reg", &ilen);
+ ireg = (u32 *)get_property(np, "reg", &ilen);
if (!ireg)
panic("xics_init_IRQ: can't find interrupt reg property");
- while (ilen) {
- intnodes[indx].addr = (unsigned long)*ireg++ << 32;
- ilen -= sizeof(uint);
- intnodes[indx].addr |= *ireg++;
- ilen -= sizeof(uint);
- intnodes[indx].size = (unsigned long)*ireg++ << 32;
- ilen -= sizeof(uint);
- intnodes[indx].size |= *ireg++;
- ilen -= sizeof(uint);
- indx++;
- if (indx >= NR_CPUS) break;
+ while (ilen >= (4 * sizeof(u32))) {
+ unsigned long addr, size;
+
+ /* XXX Use proper OF parsing code here !!! */
+ addr = (unsigned long)*ireg++ << 32;
+ ilen -= sizeof(u32);
+ addr |= *ireg++;
+ ilen -= sizeof(u32);
+ size = (unsigned long)*ireg++ << 32;
+ ilen -= sizeof(u32);
+ size |= *ireg++;
+ ilen -= sizeof(u32);
+ xics_map_one_cpu(*indx, addr, size);
+ (*indx)++;
+ }
+}
+
+
+static void __init xics_setup_8259_cascade(void)
+{
+ struct device_node *np, *old, *found = NULL;
+ int cascade, naddr;
+ u32 *addrp;
+ unsigned long intack = 0;
+
+ for_each_node_by_type(np, "interrupt-controller")
+ if (device_is_compatible(np, "chrp,iic")) {
+ found = np;
+ break;
+ }
+ if (found == NULL) {
+ printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
+ return;
}
+ cascade = irq_of_parse_and_map(found, 0);
+ if (cascade == NO_IRQ) {
+ printk(KERN_ERR "xics: failed to map cascade interrupt");
+ return;
+ }
+ pr_debug("xics: cascade mapped to irq %d\n", cascade);
+
+ for (old = of_node_get(found); old != NULL ; old = np) {
+ np = of_get_parent(old);
+ of_node_put(old);
+ if (np == NULL)
+ break;
+ if (strcmp(np->name, "pci") != 0)
+ continue;
+ addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL);
+ if (addrp == NULL)
+ continue;
+ naddr = prom_n_addr_cells(np);
+ intack = addrp[naddr-1];
+ if (naddr > 1)
+ intack |= ((unsigned long)addrp[naddr-2]) << 32;
+ }
+ if (intack)
+ printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
+ i8259_init(found, intack);
+ of_node_put(found);
+ set_irq_chained_handler(cascade, pseries_8259_cascade);
+}
- np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
- if ((indx < NR_CPUS) && np) goto nextnode;
+void __init xics_init_IRQ(void)
+{
+ int i;
+ struct device_node *np;
+ u32 *ireg, ilen, indx = 0;
+ int found = 0;
+
+ ppc64_boot_msg(0x20, "XICS Init");
+
+ ibm_get_xive = rtas_token("ibm,get-xive");
+ ibm_set_xive = rtas_token("ibm,set-xive");
+ ibm_int_on = rtas_token("ibm,int-on");
+ ibm_int_off = rtas_token("ibm,int-off");
+
+ for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
+ found = 1;
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ break;
+ xics_init_one_node(np, &indx);
+ }
+ if (found == 0)
+ return;
+
+ xics_init_host();
/* Find the server numbers for the boot cpu. */
for (np = of_find_node_by_type(NULL, "cpu");
np;
np = of_find_node_by_type(np, "cpu")) {
- ireg = (uint *)get_property(np, "reg", &ilen);
+ ireg = (u32 *)get_property(np, "reg", &ilen);
if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
- ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
- &ilen);
+ ireg = (u32 *)get_property(np,
+ "ibm,ppc-interrupt-gserver#s",
+ &ilen);
i = ilen / sizeof(int);
if (ireg && i > 0) {
default_server = ireg[0];
- default_distrib_server = ireg[i-1]; /* take last element */
+ /* take last element */
+ default_distrib_server = ireg[i-1];
}
- ireg = (uint *)get_property(np,
+ ireg = (u32 *)get_property(np,
"ibm,interrupt-server#-size", NULL);
if (ireg)
interrupt_server_size = *ireg;
}
of_node_put(np);
- intr_base = intnodes[0].addr;
- intr_size = intnodes[0].size;
-
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
- ppc_md.get_irq = xics_get_irq_lpar;
- chip = &xics_pic_lpar;
- } else {
-#ifdef CONFIG_SMP
- for_each_possible_cpu(i) {
- int hard_id;
-
- /* FIXME: Do this dynamically! --RR */
- if (!cpu_present(i))
- continue;
-
- hard_id = get_hard_smp_processor_id(i);
- xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
- intnodes[hard_id].size);
- }
-#else
- xics_per_cpu[0] = ioremap(intr_base, intr_size);
-#endif /* CONFIG_SMP */
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.get_irq = xics_get_irq_lpar;
+ else
ppc_md.get_irq = xics_get_irq_direct;
- chip = &xics_pic_direct;
-
- }
-
- for (i = irq_offset_value(); i < NR_IRQS; ++i) {
- /* All IRQs on XICS are level for now. MSI code may want to modify
- * that for reporting purposes
- */
- get_irq_desc(i)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(i, chip, handle_fasteoi_irq);
- }
xics_setup_cpu();
- ppc64_boot_msg(0x21, "XICS Done");
-}
+ xics_setup_8259_cascade();
-static int xics_setup_8259_cascade(void)
-{
- struct device_node *np;
- uint *ireg;
-
- np = of_find_node_by_type(NULL, "interrupt-controller");
- if (np == NULL) {
- printk(KERN_WARNING "xics: no ISA interrupt controller\n");
- xics_irq_8259_cascade_real = -1;
- xics_irq_8259_cascade = -1;
- return 0;
- }
-
- ireg = (uint *) get_property(np, "interrupts", NULL);
- if (!ireg)
- panic("xics_init_IRQ: can't find ISA interrupts property");
-
- xics_irq_8259_cascade_real = *ireg;
- xics_irq_8259_cascade = irq_offset_up
- (virt_irq_create_mapping(xics_irq_8259_cascade_real));
- i8259_init(0, 0);
- of_node_put(np);
-
- xics_set_irq_revmap(xics_irq_8259_cascade);
- set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade);
-
- return 0;
+ ppc64_boot_msg(0x21, "XICS Done");
}
-arch_initcall(xics_setup_8259_cascade);
#ifdef CONFIG_SMP
void xics_request_IPIs(void)
{
- virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
+ unsigned int ipi;
+
+ ipi = irq_create_mapping(xics_host, XICS_IPI, 0);
+ BUG_ON(ipi == NO_IRQ);
/*
* IPIs are marked IRQF_DISABLED as they must run with irqs
* disabled
*/
- set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq);
+ set_irq_handler(ipi, handle_percpu_irq);
if (firmware_has_feature(FW_FEATURE_LPAR))
- request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar,
- SA_INTERRUPT, "IPI", NULL);
+ request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
+ "IPI", NULL);
else
- request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct,
- SA_INTERRUPT, "IPI", NULL);
+ request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
+ "IPI", NULL);
}
#endif /* CONFIG_SMP */
void xics_teardown_cpu(int secondary)
{
- struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI));
int cpu = smp_processor_id();
+ unsigned int ipi;
+ struct irq_desc *desc;
- xics_set_cpu_priority(cpu, 0);
+ xics_set_cpu_priority(cpu, 0);
/*
* we need to EOI the IPI if we got here from kexec down IPI
* should we be flagging idle loop instead?
* or creating some task to be scheduled?
*/
+
+ ipi = irq_find_mapping(xics_host, XICS_IPI);
+ if (ipi == XICS_IRQ_SPURIOUS)
+ return;
+ desc = get_irq_desc(ipi);
if (desc->chip && desc->chip->eoi)
desc->chip->eoi(XICS_IPI);
*/
if (secondary)
rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 -
- default_distrib_server, 0);
+ (1UL << interrupt_server_size) - 1 -
+ default_distrib_server, 0);
}
#ifdef CONFIG_HOTPLUG_CPU
unsigned long flags;
/* We cant set affinity on ISA interrupts */
- if (virq < irq_offset_value())
+ if (virq < NUM_ISA_INTERRUPTS)
continue;
-
- desc = get_irq_desc(virq);
- irq = virt_irq_to_real(irq_offset_down(virq));
-
+ if (irq_map[virq].host != xics_host)
+ continue;
+ irq = (unsigned int)irq_map[virq].hwirq;
/* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == NO_IRQ)
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
+ desc = get_irq_desc(virq);
/* We only need to migrate enabled IRQS */
if (desc == NULL || desc->chip == NULL
extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
struct irq_desc;
-extern void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc,
+extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs);
#endif /* _POWERPC_KERNEL_XICS_H */
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#undef DEBUG
+
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/i8259.h>
+#include <asm/prom.h>
static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
static DEFINE_SPINLOCK(i8259_lock);
-static int i8259_pic_irq_offset;
+static struct device_node *i8259_node;
+static struct irq_host *i8259_host;
/*
* Acknowledge the IRQ using either the PCI host bridge's interrupt
* which is called. It should be noted that polling is broken on some
* IBM and Motorola PReP boxes so we must use the int-ack feature on them.
*/
-int i8259_irq(struct pt_regs *regs)
+unsigned int i8259_irq(struct pt_regs *regs)
{
int irq;
-
- spin_lock(&i8259_lock);
+ int lock = 0;
/* Either int-ack or poll for the IRQ */
if (pci_intack)
irq = readb(pci_intack);
else {
+ spin_lock(&i8259_lock);
+ lock = 1;
+
/* Perform an interrupt acknowledge cycle on controller 1. */
outb(0x0C, 0x20); /* prepare for poll */
irq = inb(0x20) & 7;
if (!pci_intack)
outb(0x0B, 0x20); /* ISR register */
if(~inb(0x20) & 0x80)
- irq = -1;
- }
+ irq = NO_IRQ;
+ } else if (irq == 0xff)
+ irq = NO_IRQ;
- spin_unlock(&i8259_lock);
- return irq + i8259_pic_irq_offset;
+ if (lock)
+ spin_unlock(&i8259_lock);
+ return irq;
}
static void i8259_mask_and_ack_irq(unsigned int irq_nr)
unsigned long flags;
spin_lock_irqsave(&i8259_lock, flags);
- irq_nr -= i8259_pic_irq_offset;
if (irq_nr > 7) {
cached_A1 |= 1 << (irq_nr-8);
inb(0xA1); /* DUMMY */
{
unsigned long flags;
+ pr_debug("i8259_mask_irq(%d)\n", irq_nr);
+
spin_lock_irqsave(&i8259_lock, flags);
- irq_nr -= i8259_pic_irq_offset;
if (irq_nr < 8)
cached_21 |= 1 << irq_nr;
else
{
unsigned long flags;
+ pr_debug("i8259_unmask_irq(%d)\n", irq_nr);
+
spin_lock_irqsave(&i8259_lock, flags);
- irq_nr -= i8259_pic_irq_offset;
if (irq_nr < 8)
cached_21 &= ~(1 << irq_nr);
else
.flags = IORESOURCE_BUSY,
};
-static struct irqaction i8259_irqaction = {
- .handler = no_action,
- .flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
- .name = "82c59 secondary cascade",
+static int i8259_host_match(struct irq_host *h, struct device_node *node)
+{
+ return i8259_node == NULL || i8259_node == node;
+}
+
+static int i8259_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
+
+ /* We block the internal cascade */
+ if (hw == 2)
+ get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+
+ /* We use the level stuff only for now, we might want to
+ * be more cautious here but that works for now
+ */
+ get_irq_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
+ return 0;
+}
+
+static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
+{
+ /* Make sure irq is masked in hardware */
+ i8259_mask_irq(virq);
+
+ /* remove chip and handler */
+ set_irq_chip_and_handler(virq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(virq);
+}
+
+static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+{
+ static unsigned char map_isa_senses[4] = {
+ IRQ_TYPE_LEVEL_LOW,
+ IRQ_TYPE_LEVEL_HIGH,
+ IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_EDGE_RISING,
+ };
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1 && intspec[1] < 4)
+ *out_flags = map_isa_senses[intspec[1]];
+ else
+ *out_flags = IRQ_TYPE_NONE;
+
+ return 0;
+}
+
+static struct irq_host_ops i8259_host_ops = {
+ .match = i8259_host_match,
+ .map = i8259_host_map,
+ .unmap = i8259_host_unmap,
+ .xlate = i8259_host_xlate,
};
-/*
- * i8259_init()
- * intack_addr - PCI interrupt acknowledge (real) address which will return
- * the active irq from the 8259
+/****
+ * i8259_init - Initialize the legacy controller
+ * @node: device node of the legacy PIC (can be NULL, but then, it will match
+ * all interrupts, so beware)
+ * @intack_addr: PCI interrupt acknowledge (real) address which will return
+ * the active irq from the 8259
*/
-void __init i8259_init(unsigned long intack_addr, int offset)
+void i8259_init(struct device_node *node, unsigned long intack_addr)
{
unsigned long flags;
- int i;
+ /* initialize the controller */
spin_lock_irqsave(&i8259_lock, flags);
- i8259_pic_irq_offset = offset;
+
+ /* Mask all first */
+ outb(0xff, 0xA1);
+ outb(0xff, 0x21);
/* init master interrupt controller */
outb(0x11, 0x20); /* Start init sequence */
outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
outb(0x01, 0xA1); /* Select 8086 mode */
+ /* That thing is slow */
+ udelay(100);
+
/* always read ISR */
outb(0x0B, 0x20);
outb(0x0B, 0xA0);
- /* Mask all interrupts */
+ /* Unmask the internal cascade */
+ cached_21 &= ~(1 << 2);
+
+ /* Set interrupt masks */
outb(cached_A1, 0xA1);
outb(cached_21, 0x21);
spin_unlock_irqrestore(&i8259_lock, flags);
- for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) {
- set_irq_chip_and_handler(offset + i, &i8259_pic,
- handle_level_irq);
- irq_desc[offset + i].status |= IRQ_LEVEL;
+ /* create a legacy host */
+ if (node)
+ i8259_node = of_node_get(node);
+ i8259_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0);
+ if (i8259_host == NULL) {
+ printk(KERN_ERR "i8259: failed to allocate irq host !\n");
+ return;
}
/* reserve our resources */
- setup_irq(offset + 2, &i8259_irqaction);
+ /* XXX should we continue doing that ? it seems to cause problems
+ * with further requesting of PCI IO resources for that range...
+ * need to look into it.
+ */
request_resource(&ioport_resource, &pic1_iores);
request_resource(&ioport_resource, &pic2_iores);
request_resource(&ioport_resource, &pic_edgectrl_iores);
if (intack_addr != 0)
pci_intack = ioremap(intack_addr, 1);
+ printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
+#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
+
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
{
- struct mpic *mpic = mpics;
-
- while(mpic) {
- /* search IPIs first since they may override the main interrupts */
- if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
- if (is_ipi)
- *is_ipi = 1;
- return mpic;
- }
- if (irq >= mpic->irq_offset &&
- irq < (mpic->irq_offset + mpic->irq_count)) {
- if (is_ipi)
- *is_ipi = 0;
- return mpic;
- }
- mpic = mpic -> next;
- }
- return NULL;
+ unsigned int src = mpic_irq_to_hw(irq);
+
+ if (irq < NUM_ISA_INTERRUPTS)
+ return NULL;
+ if (is_ipi)
+ *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3);
+
+ return irq_desc[irq].chip_data;
}
/* Convert a cpu mask from logical to physical cpu numbers. */
#ifdef CONFIG_SMP
static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
- struct mpic *mpic = dev_id;
-
- smp_message_recv(irq - mpic->ipi_offset, regs);
+ smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs);
return IRQ_HANDLED;
}
#endif /* CONFIG_SMP */
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
static void mpic_unmask_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
mpic_unmask_irq(irq);
static unsigned int mpic_startup_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
mpic_unmask_irq(irq);
mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
static void mpic_shutdown_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
mpic_mask_irq(irq);
static void mpic_end_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = irq - mpic->irq_offset;
+ unsigned int src = mpic_irq_to_hw(irq);
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, irq);
static void mpic_unmask_ipi(unsigned int irq)
{
struct mpic *mpic = mpic_from_ipi(irq);
- unsigned int src = irq - mpic->ipi_offset;
+ unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0;
DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = mpic_irq_to_hw(irq);
cpumask_t tmp;
cpus_and(tmp, cpumask, cpu_online_map);
- mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
+ mpic_irq_write(src, MPIC_IRQ_DESTINATION,
mpic_physmask(cpus_addr(tmp)[0]));
}
+static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level)
+{
+ unsigned int vecpri;
+
+ /* Now convert sense value */
+ switch(flags & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ vecpri = MPIC_VECPRI_SENSE_EDGE |
+ MPIC_VECPRI_POLARITY_POSITIVE;
+ *level = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ vecpri = MPIC_VECPRI_SENSE_EDGE |
+ MPIC_VECPRI_POLARITY_NEGATIVE;
+ *level = 0;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ vecpri = MPIC_VECPRI_SENSE_LEVEL |
+ MPIC_VECPRI_POLARITY_POSITIVE;
+ *level = 1;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ default:
+ vecpri = MPIC_VECPRI_SENSE_LEVEL |
+ MPIC_VECPRI_POLARITY_NEGATIVE;
+ *level = 1;
+ }
+ return vecpri;
+}
+
static struct irq_chip mpic_irq_chip = {
.mask = mpic_mask_irq,
.unmask = mpic_unmask_irq,
#endif /* CONFIG_MPIC_BROKEN_U3 */
+static int mpic_host_match(struct irq_host *h, struct device_node *node)
+{
+ struct mpic *mpic = h->host_data;
+
+ /* Exact match, unless mpic node is NULL */
+ return mpic->of_node == NULL || mpic->of_node == node;
+}
+
+static int mpic_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags)
+{
+ struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_chip *chip;
+ struct mpic *mpic = h->host_data;
+ unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL |
+ MPIC_VECPRI_POLARITY_NEGATIVE;
+ int level;
+
+ pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
+ virq, hw, flags);
+
+ if (hw == MPIC_VEC_SPURRIOUS)
+ return -EINVAL;
+#ifdef CONFIG_SMP
+ else if (hw >= MPIC_VEC_IPI_0) {
+ WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+
+ pr_debug("mpic: mapping as IPI\n");
+ set_irq_chip_data(virq, mpic);
+ set_irq_chip_and_handler(virq, &mpic->hc_ipi,
+ handle_percpu_irq);
+ return 0;
+ }
+#endif /* CONFIG_SMP */
+
+ if (hw >= mpic->irq_count)
+ return -EINVAL;
+
+ /* If no sense provided, check default sense array */
+ if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) &&
+ mpic->senses && hw < mpic->senses_count)
+ flags |= mpic->senses[hw];
+
+ vecpri = mpic_flags_to_vecpri(flags, &level);
+ if (level)
+ desc->status |= IRQ_LEVEL;
+ chip = &mpic->hc_irq;
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ /* Check for HT interrupts, override vecpri */
+ if (mpic_is_ht_interrupt(mpic, hw)) {
+ vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
+ MPIC_VECPRI_POLARITY_MASK);
+ vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
+ chip = &mpic->hc_ht_irq;
+ }
+#endif
+
+ /* Reconfigure irq */
+ vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
+ mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
+
+ pr_debug("mpic: mapping as IRQ\n");
+
+ set_irq_chip_data(virq, mpic);
+ set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
+ return 0;
+}
+
+static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ static unsigned char map_mpic_senses[4] = {
+ IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_LEVEL_LOW,
+ IRQ_TYPE_LEVEL_HIGH,
+ IRQ_TYPE_EDGE_FALLING,
+ };
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1 && intspec[1] < 4)
+ *out_flags = map_mpic_senses[intspec[1]];
+ else
+ *out_flags = IRQ_TYPE_NONE;
+
+ return 0;
+}
+
+static struct irq_host_ops mpic_host_ops = {
+ .match = mpic_host_match,
+ .map = mpic_host_map,
+ .xlate = mpic_host_xlate,
+};
+
/*
* Exported functions
*/
-
-struct mpic * __init mpic_alloc(unsigned long phys_addr,
+struct mpic * __init mpic_alloc(struct device_node *node,
+ unsigned long phys_addr,
unsigned int flags,
unsigned int isu_size,
- unsigned int irq_offset,
unsigned int irq_count,
- unsigned int ipi_offset,
- unsigned char *senses,
- unsigned int senses_count,
const char *name)
{
struct mpic *mpic;
if (mpic == NULL)
return NULL;
-
memset(mpic, 0, sizeof(struct mpic));
mpic->name = name;
+ mpic->of_node = node ? of_node_get(node) : NULL;
+ mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256,
+ &mpic_host_ops,
+ MPIC_VEC_SPURRIOUS);
+ if (mpic->irqhost == NULL) {
+ of_node_put(node);
+ return NULL;
+ }
+
+ mpic->irqhost->host_data = mpic;
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.typename = name;
if (flags & MPIC_PRIMARY)
mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_BROKEN_U3 */
#ifdef CONFIG_SMP
- mpic->hc_ipi.typename = name;
mpic->hc_ipi = mpic_ipi_chip;
+ mpic->hc_ipi.typename = name;
#endif /* CONFIG_SMP */
mpic->flags = flags;
mpic->isu_size = isu_size;
- mpic->irq_offset = irq_offset;
mpic->irq_count = irq_count;
- mpic->ipi_offset = ipi_offset;
mpic->num_sources = 0; /* so far */
- mpic->senses = senses;
- mpic->senses_count = senses_count;
/* Map the global registers */
mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
mpic->next = mpics;
mpics = mpic;
- if (flags & MPIC_PRIMARY)
+ if (flags & MPIC_PRIMARY) {
mpic_primary = mpic;
+ irq_set_default_host(mpic->irqhost);
+ }
return mpic;
}
mpic->num_sources = isu_first + mpic->isu_size;
}
+void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
+{
+ mpic->senses = senses;
+ mpic->senses_count = count;
+}
+
void __init mpic_init(struct mpic *mpic)
{
int i;
BUG_ON(mpic->num_sources == 0);
+ WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0);
+
+ /* Sanitize source count */
+ if (mpic->num_sources > MPIC_VEC_IPI_0)
+ mpic->num_sources = MPIC_VEC_IPI_0;
printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
MPIC_VECPRI_MASK |
(10 << MPIC_VECPRI_PRIORITY_SHIFT) |
(MPIC_VEC_IPI_0 + i));
-#ifdef CONFIG_SMP
- if (!(mpic->flags & MPIC_PRIMARY))
- continue;
- set_irq_chip_data(mpic->ipi_offset+i, mpic);
- set_irq_chip_and_handler(mpic->ipi_offset+i,
- &mpic->hc_ipi,
- handle_percpu_irq);
-#endif /* CONFIG_SMP */
}
/* Initialize interrupt sources */
for (i = 0; i < mpic->num_sources; i++) {
/* start with vector = source number, and masked */
u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
- int level = 0;
+ int level = 1;
- /* if it's an IPI, we skip it */
- if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
- (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
- continue;
-
/* do senses munging */
- if (mpic->senses && i < mpic->senses_count) {
- if (mpic->senses[i] & IRQ_SENSE_LEVEL)
- vecpri |= MPIC_VECPRI_SENSE_LEVEL;
- if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
- vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
- } else
+ if (mpic->senses && i < mpic->senses_count)
+ vecpri = mpic_flags_to_vecpri(mpic->senses[i],
+ &level);
+ else
vecpri |= MPIC_VECPRI_SENSE_LEVEL;
- /* remember if it was a level interrupts */
- level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
-
/* deal with broken U3 */
if (mpic->flags & MPIC_BROKEN_U3) {
#ifdef CONFIG_MPIC_BROKEN_U3
mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1 << hard_smp_processor_id());
-
- /* init linux descriptors */
- if (i < mpic->irq_count) {
- struct irq_chip *chip = &mpic->hc_irq;
-
- irq_desc[mpic->irq_offset+i].status |=
- level ? IRQ_LEVEL : 0;
-#ifdef CONFIG_MPIC_BROKEN_U3
- if (mpic_is_ht_interrupt(mpic, i))
- chip = &mpic->hc_ht_irq;
-#endif /* CONFIG_MPIC_BROKEN_U3 */
- set_irq_chip_data(mpic->irq_offset+i, mpic);
- set_irq_chip_and_handler(mpic->irq_offset+i, chip,
- handle_fasteoi_irq);
- }
}
/* Init spurrious vector */
{
int is_ipi;
struct mpic *mpic = mpic_find(irq, &is_ipi);
+ unsigned int src = mpic_irq_to_hw(irq);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpic_lock, flags);
if (is_ipi) {
- reg = mpic_ipi_read(irq - mpic->ipi_offset) &
+ reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) &
~MPIC_VECPRI_PRIORITY_MASK;
- mpic_ipi_write(irq - mpic->ipi_offset,
+ mpic_ipi_write(src - MPIC_VEC_IPI_0,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
- reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
+ reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI)
& ~MPIC_VECPRI_PRIORITY_MASK;
- mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
+ mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
spin_unlock_irqrestore(&mpic_lock, flags);
{
int is_ipi;
struct mpic *mpic = mpic_find(irq, &is_ipi);
+ unsigned int src = mpic_irq_to_hw(irq);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpic_lock, flags);
if (is_ipi)
- reg = mpic_ipi_read(irq - mpic->ipi_offset);
+ reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
else
- reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
+ reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
spin_unlock_irqrestore(&mpic_lock, flags);
return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
}
mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
}
-int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
+unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
{
- u32 irq;
+ u32 src;
- irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
+ src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
#ifdef DEBUG_LOW
- DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
-#endif
- if (unlikely(irq == MPIC_VEC_SPURRIOUS))
- return -1;
- if (irq < MPIC_VEC_IPI_0) {
-#ifdef DEBUG_IRQ
- DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
+ DBG("%s: get_one_irq(): %d\n", mpic->name, src);
#endif
- return irq + mpic->irq_offset;
- }
-#ifdef DEBUG_IPI
- DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
-#endif
- return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
+ if (unlikely(src == MPIC_VEC_SPURRIOUS))
+ return NO_IRQ;
+ return irq_linear_revmap(mpic->irqhost, src);
}
-int mpic_get_irq(struct pt_regs *regs)
+unsigned int mpic_get_irq(struct pt_regs *regs)
{
struct mpic *mpic = mpic_primary;
void mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
-
+ int i;
+ static char *ipi_names[] = {
+ "IPI0 (call function)",
+ "IPI1 (reschedule)",
+ "IPI2 (unused)",
+ "IPI3 (debugger break)",
+ };
BUG_ON(mpic == NULL);
-
- printk("requesting IPIs ... \n");
- /*
- * IPIs are marked IRQF_DISABLED as they must run with irqs
- * disabled
- */
- request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED,
- "IPI0 (call function)", mpic);
- request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED,
- "IPI1 (reschedule)", mpic);
- request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED,
- "IPI2 (unused)", mpic);
- request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED,
- "IPI3 (debugger break)", mpic);
-
- printk("IPIs requested... \n");
+ printk(KERN_INFO "mpic: requesting IPIs ... \n");
+
+ for (i = 0; i < 4; i++) {
+ unsigned int vipi = irq_create_mapping(mpic->irqhost,
+ MPIC_VEC_IPI_0 + i, 0);
+ if (vipi == NO_IRQ) {
+ printk(KERN_ERR "Failed to map IPI %d\n", i);
+ break;
+ }
+ request_irq(vipi, mpic_ipi_action, IRQF_DISABLED,
+ ipi_names[i], mpic);
+ }
}
void smp_mpic_message_pass(int target, int msg)
hp->inbuf_end = hp->inbuf;
hp->state = HVSI_CLOSED;
hp->vtermno = *vtermno;
- hp->virq = virt_irq_create_mapping(irq[0]);
+ hp->virq = irq_create_mapping(NULL, irq[0], 0);
if (hp->virq == NO_IRQ) {
printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
- __FUNCTION__, hp->virq);
+ __FUNCTION__, irq[0]);
continue;
- } else
- hp->virq = irq_offset_up(hp->virq);
+ }
hvsi_count++;
}
{
struct device_node *adbs;
struct resource r;
+ unsigned int irq;
adbs = find_compatible_devices("adb", "chrp,adb0");
if (adbs == 0)
return -ENXIO;
-#if 0
- { int i = 0;
-
- printk("macio_adb_init: node = %p, addrs =", adbs->node);
- while(!of_address_to_resource(adbs, i, &r))
- printk(" %x(%x)", r.start, r.end - r.start);
- printk(", intrs =");
- for (i = 0; i < adbs->n_intrs; ++i)
- printk(" %x", adbs->intrs[i].line);
- printk("\n"); }
-#endif
if (of_address_to_resource(adbs, 0, &r))
return -ENXIO;
adb = ioremap(r.start, sizeof(struct adb_regs));
out_8(&adb->active_lo.r, 0xff);
out_8(&adb->autopoll.r, APE);
- if (request_irq(adbs->intrs[0].line, macio_adb_interrupt,
- 0, "ADB", (void *)0)) {
- printk(KERN_ERR "ADB: can't get irq %d\n",
- adbs->intrs[0].line);
+ irq = irq_of_parse_and_map(adbs, 0);
+ if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
+ printk(KERN_ERR "ADB: can't get irq %d\n", irq);
return -EAGAIN;
}
out_8(&adb->intr_enb.r, DFB | TAG);
static int macio_resource_quirks(struct device_node *np, struct resource *res,
int index)
{
- if (res->flags & IORESOURCE_MEM) {
- /* Grand Central has too large resource 0 on some machines */
- if (index == 0 && !strcmp(np->name, "gc"))
- res->end = res->start + 0x1ffff;
+ /* Only quirks for memory resources for now */
+ if ((res->flags & IORESOURCE_MEM) == 0)
+ return 0;
+
+ /* Grand Central has too large resource 0 on some machines */
+ if (index == 0 && !strcmp(np->name, "gc"))
+ res->end = res->start + 0x1ffff;
- /* Airport has bogus resource 2 */
- if (index >= 2 && !strcmp(np->name, "radio"))
- return 1;
+ /* Airport has bogus resource 2 */
+ if (index >= 2 && !strcmp(np->name, "radio"))
+ return 1;
#ifndef CONFIG_PPC64
- /* DBDMAs may have bogus sizes */
- if ((res->start & 0x0001f000) == 0x00008000)
- res->end = res->start + 0xff;
+ /* DBDMAs may have bogus sizes */
+ if ((res->start & 0x0001f000) == 0x00008000)
+ res->end = res->start + 0xff;
#endif /* CONFIG_PPC64 */
- /* ESCC parent eats child resources. We could have added a
- * level of hierarchy, but I don't really feel the need
- * for it
- */
- if (!strcmp(np->name, "escc"))
- return 1;
-
- /* ESCC has bogus resources >= 3 */
- if (index >= 3 && !(strcmp(np->name, "ch-a") &&
- strcmp(np->name, "ch-b")))
- return 1;
-
- /* Media bay has too many resources, keep only first one */
- if (index > 0 && !strcmp(np->name, "media-bay"))
- return 1;
-
- /* Some older IDE resources have bogus sizes */
- if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") &&
- strcmp(np->type, "ide") && strcmp(np->type, "ata"))) {
- if (index == 0 && (res->end - res->start) > 0xfff)
- res->end = res->start + 0xfff;
- if (index == 1 && (res->end - res->start) > 0xff)
- res->end = res->start + 0xff;
- }
+ /* ESCC parent eats child resources. We could have added a
+ * level of hierarchy, but I don't really feel the need
+ * for it
+ */
+ if (!strcmp(np->name, "escc"))
+ return 1;
+
+ /* ESCC has bogus resources >= 3 */
+ if (index >= 3 && !(strcmp(np->name, "ch-a") &&
+ strcmp(np->name, "ch-b")))
+ return 1;
+
+ /* Media bay has too many resources, keep only first one */
+ if (index > 0 && !strcmp(np->name, "media-bay"))
+ return 1;
+
+ /* Some older IDE resources have bogus sizes */
+ if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") &&
+ strcmp(np->type, "ide") && strcmp(np->type, "ata"))) {
+ if (index == 0 && (res->end - res->start) > 0xfff)
+ res->end = res->start + 0xfff;
+ if (index == 1 && (res->end - res->start) > 0xff)
+ res->end = res->start + 0xff;
}
return 0;
}
+static void macio_create_fixup_irq(struct macio_dev *dev, int index,
+ unsigned int line)
+{
+ unsigned int irq;
-static void macio_setup_interrupts(struct macio_dev *dev)
+ irq = irq_create_mapping(NULL, line, 0);
+ if (irq != NO_IRQ) {
+ dev->interrupt[index].start = irq;
+ dev->interrupt[index].flags = IORESOURCE_IRQ;
+ dev->interrupt[index].name = dev->ofdev.dev.bus_id;
+ }
+ if (dev->n_interrupts <= index)
+ dev->n_interrupts = index + 1;
+}
+
+static void macio_add_missing_resources(struct macio_dev *dev)
{
struct device_node *np = dev->ofdev.node;
- int i,j;
+ unsigned int irq_base;
+
+ /* Gatwick has some missing interrupts on child nodes */
+ if (dev->bus->chip->type != macio_gatwick)
+ return;
- /* For now, we use pre-parsed entries in the device-tree for
- * interrupt routing and addresses, but we should change that
- * to dynamically parsed entries and so get rid of most of the
- * clutter in struct device_node
+ /* irq_base is always 64 on gatwick. I have no cleaner way to get
+ * that value from here at this point
*/
- for (i = j = 0; i < np->n_intrs; i++) {
+ irq_base = 64;
+
+ /* Fix SCC */
+ if (strcmp(np->name, "ch-a") == 0) {
+ macio_create_fixup_irq(dev, 0, 15 + irq_base);
+ macio_create_fixup_irq(dev, 1, 4 + irq_base);
+ macio_create_fixup_irq(dev, 2, 5 + irq_base);
+ printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n");
+ }
+
+ /* Fix media-bay */
+ if (strcmp(np->name, "media-bay") == 0) {
+ macio_create_fixup_irq(dev, 0, 29 + irq_base);
+ printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n");
+ }
+
+ /* Fix left media bay childs */
+ if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) {
+ macio_create_fixup_irq(dev, 0, 19 + irq_base);
+ macio_create_fixup_irq(dev, 1, 1 + irq_base);
+ printk(KERN_INFO "macio: fixed left floppy irqs\n");
+ }
+ if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) {
+ macio_create_fixup_irq(dev, 0, 14 + irq_base);
+ macio_create_fixup_irq(dev, 0, 3 + irq_base);
+ printk(KERN_INFO "macio: fixed left ide irqs\n");
+ }
+}
+
+static void macio_setup_interrupts(struct macio_dev *dev)
+{
+ struct device_node *np = dev->ofdev.node;
+ unsigned int irq;
+ int i = 0, j = 0;
+
+ for (;;) {
struct resource *res = &dev->interrupt[j];
if (j >= MACIO_DEV_COUNT_IRQS)
break;
- res->start = np->intrs[i].line;
- res->flags = IORESOURCE_IO;
- if (np->intrs[j].sense)
- res->flags |= IORESOURCE_IRQ_LOWLEVEL;
- else
- res->flags |= IORESOURCE_IRQ_HIGHEDGE;
+ irq = irq_of_parse_and_map(np, i++);
+ if (irq == NO_IRQ)
+ break;
+ res->start = irq;
+ res->flags = IORESOURCE_IRQ;
res->name = dev->ofdev.dev.bus_id;
- if (macio_resource_quirks(np, res, i))
+ if (macio_resource_quirks(np, res, i - 1)) {
memset(res, 0, sizeof(struct resource));
- else
+ continue;
+ } else
j++;
}
dev->n_interrupts = j;
/* Setup interrupts & resources */
macio_setup_interrupts(dev);
macio_setup_resources(dev, parent_res);
+ macio_add_missing_resources(dev);
/* Register with core */
if (of_device_register(&dev->ofdev) != 0) {
smu->doorbell = *data;
if (smu->doorbell < 0x50)
smu->doorbell += 0x50;
- if (np->n_intrs > 0)
- smu->db_irq = np->intrs[0].line;
+ smu->db_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
smu->msg = *data;
if (smu->msg < 0x50)
smu->msg += 0x50;
- if (np->n_intrs > 0)
- smu->msg_irq = np->intrs[0].line;
+ smu->msg_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
} while(0);
static volatile unsigned char __iomem *via;
static DEFINE_SPINLOCK(cuda_lock);
-#ifdef CONFIG_MAC
-#define CUDA_IRQ IRQ_MAC_ADB
-#define eieio()
-#else
-#define CUDA_IRQ vias->intrs[0].line
-#endif
-
/* VIA registers - spaced 0x200 bytes apart */
#define RS 0x200 /* skip between registers */
#define B 0 /* B-side data */
static int __init via_cuda_start(void)
{
+ unsigned int irq;
+
if (via == NULL)
return -ENODEV;
- if (request_irq(CUDA_IRQ, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
- printk(KERN_ERR "cuda_init: can't get irq %d\n", CUDA_IRQ);
+#ifdef CONFIG_MAC
+ irq = IRQ_MAC_ADB;
+#else /* CONFIG_MAC */
+ irq = irq_of_parse_and_map(vias, 0);
+ if (irq == NO_IRQ) {
+ printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
+ vias->full_name);
+ return -ENODEV;
+ }
+#endif /* CONFIG_MAP */
+
+ if (request_irq(irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
+ printk(KERN_ERR "via-cuda: can't request irq %d\n", irq);
return -EAGAIN;
}
#include <asm/backlight.h>
#endif
-#ifdef CONFIG_PPC32
-#include <asm/open_pic.h>
-#endif
-
#include "via-pmu-event.h"
/* Some compile options */
static int pmu_has_adb;
static struct device_node *gpio_node;
static unsigned char __iomem *gpio_reg = NULL;
-static int gpio_irq = -1;
+static int gpio_irq = NO_IRQ;
static int gpio_irq_enabled = -1;
static volatile int pmu_suspended = 0;
static spinlock_t pmu_lock;
*/
static int __init via_pmu_start(void)
{
+ unsigned int irq;
+
if (vias == NULL)
return -ENODEV;
batt_req.complete = 1;
-#ifndef CONFIG_PPC_MERGE
- if (pmu_kind == PMU_KEYLARGO_BASED)
- openpic_set_irq_priority(vias->intrs[0].line,
- OPENPIC_PRIORITY_DEFAULT + 1);
-#endif
-
- if (request_irq(vias->intrs[0].line, via_pmu_interrupt, 0, "VIA-PMU",
- (void *)0)) {
- printk(KERN_ERR "VIA-PMU: can't get irq %d\n",
- vias->intrs[0].line);
- return -EAGAIN;
+ irq = irq_of_parse_and_map(vias, 0);
+ if (irq == NO_IRQ) {
+ printk(KERN_ERR "via-pmu: can't map interruptn");
+ return -ENODEV;
+ }
+ if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) {
+ printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
+ return -ENODEV;
}
if (pmu_kind == PMU_KEYLARGO_BASED) {
if (gpio_node == NULL)
gpio_node = of_find_node_by_name(NULL,
"pmu-interrupt");
- if (gpio_node && gpio_node->n_intrs > 0)
- gpio_irq = gpio_node->intrs[0].line;
+ if (gpio_node)
+ gpio_irq = irq_of_parse_and_map(gpio_node, 0);
- if (gpio_irq != -1) {
+ if (gpio_irq != NO_IRQ) {
if (request_irq(gpio_irq, gpio1_interrupt, 0,
"GPIO1 ADB", (void *)0))
printk(KERN_ERR "pmu: can't get irq %d"
}
rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
if (rc) {
- printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line);
+ printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
goto err_free_irq;
}
rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
if (rc) {
- printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line);
+ printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
goto err_free_tx_irq;
}
uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
goto no_dma;
}
- uap->tx_dma_irq = np->intrs[1].line;
- uap->rx_dma_irq = np->intrs[2].line;
+ uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
+ uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
}
no_dma:
* Init remaining bits of "port" structure
*/
uap->port.iotype = UPIO_MEM;
- uap->port.irq = np->intrs[0].line;
+ uap->port.irq = irq_of_parse_and_map(np, 0);
uap->port.uartclk = ZS_CLOCK;
uap->port.fifosize = 1;
uap->port.ops = &pmz_pops;
#include <linux/irq.h>
+#ifdef CONFIG_PPC_MERGE
+extern void i8259_init(struct device_node *node, unsigned long intack_addr);
+extern unsigned int i8259_irq(struct pt_regs *regs);
+#else
extern void i8259_init(unsigned long intack_addr, int offset);
extern int i8259_irq(struct pt_regs *regs);
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/config.h>
#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
#include <asm/types.h>
#include <asm/atomic.h>
-/* this number is used when no interrupt has been assigned */
-#define NO_IRQ (-1)
-
-/*
- * These constants are used for passing information about interrupt
- * signal polarity and level/edge sensing to the low-level PIC chip
- * drivers.
- */
-#define IRQ_SENSE_MASK 0x1
-#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
-#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
-
-#define IRQ_POLARITY_MASK 0x2
-#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
-#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
#define get_irq_desc(irq) (&irq_desc[(irq)])
#define for_each_irq(i) \
for ((i) = 0; (i) < NR_IRQS; ++(i))
-#ifdef CONFIG_PPC64
+extern atomic_t ppc_n_lost_interrupts;
-/*
- * Maximum number of interrupt sources that we can handle.
+#ifdef CONFIG_PPC_MERGE
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ (0)
+
+/* This is a special irq number to return from get_irq() to tell that
+ * no interrupt happened _and_ ignore it (don't count it as bad). Some
+ * platforms like iSeries rely on that.
*/
+#define NO_IRQ_IGNORE ((unsigned int)-1)
+
+/* Total number of virq in the platform (make it a CONFIG_* option ? */
#define NR_IRQS 512
-/* Interrupt numbers are virtual in case they are sparsely
- * distributed by the hardware.
+/* Number of irqs reserved for the legacy controller */
+#define NUM_ISA_INTERRUPTS 16
+
+/* This type is the placeholder for a hardware interrupt number. It has to
+ * be big enough to enclose whatever representation is used by a given
+ * platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+/* Interrupt controller "host" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The host
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a host can cover more than one PIC if they have a flat number
+ * model). It's the host callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are fairly agnostic to the fact that
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart host->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
+ */
+struct irq_host;
+struct radix_tree_root;
+
+/* Functions below are provided by the host and called whenever a new mapping
+ * is created or an old mapping is disposed. The host can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
+ */
+struct irq_host_ops {
+ /* Match an interrupt controller device node to a host, returns
+ * 1 on a match
+ */
+ int (*match)(struct irq_host *h, struct device_node *node);
+
+ /* Create or update a mapping between a virtual irq number and a hw
+ * irq number. This can be called several times for the same mapping
+ * but with different flags, though unmap shall always be called
+ * before the virq->hw mapping is changed.
+ */
+ int (*map)(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw, unsigned int flags);
+
+ /* Dispose of such a mapping */
+ void (*unmap)(struct irq_host *h, unsigned int virq);
+
+ /* Translate device-tree interrupt specifier from raw format coming
+ * from the firmware to a irq_hw_number_t (interrupt line number) and
+ * trigger flags that can be passed to irq_create_mapping().
+ * If no translation is provided, raw format is assumed to be one cell
+ * for interrupt line and default sense.
+ */
+ int (*xlate)(struct irq_host *h, struct device_node *ctrler,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags);
+};
+
+struct irq_host {
+ struct list_head link;
+
+ /* type of reverse mapping technique */
+ unsigned int revmap_type;
+#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
+#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_HOST_MAP_TREE 3 /* radix tree */
+ union {
+ struct {
+ unsigned int size;
+ unsigned int *revmap;
+ } linear;
+ struct radix_tree_root tree;
+ } revmap_data;
+ struct irq_host_ops *ops;
+ void *host_data;
+ irq_hw_number_t inval_irq;
+};
+
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+ irq_hw_number_t hwirq;
+ struct irq_host *host;
+};
+
+extern struct irq_map_entry irq_map[NR_IRQS];
+
+
+/***
+ * irq_alloc_host - Allocate a new irq_host data structure
+ * @node: device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
+ * @ops: map/unmap host callbacks
+ * @inval_irq: provide a hw number in that host space that is always invalid
+ *
+ * Allocates and initialize and irq_host structure. Note that in the case of
+ * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
+ * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
+ * later during boot automatically (the reverse mapping will use the slow path
+ * until that happens).
+ */
+extern struct irq_host *irq_alloc_host(unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq);
+
+
+/***
+ * irq_find_host - Locates a host for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+extern struct irq_host *irq_find_host(struct device_node *node);
+
+
+/***
+ * irq_set_default_host - Set a "default" host
+ * @host: default host pointer
+ *
+ * For convenience, it's possible to set a "default" host that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+extern void irq_set_default_host(struct irq_host *host);
+
+
+/***
+ * irq_set_virq_count - Set the maximum number of virt irqs
+ * @count: number of linux virtual irqs, capped with NR_IRQS
+ *
+ * This is mainly for use by platforms like iSeries who want to program
+ * the virtual irq number in the controller to avoid the reverse mapping
+ */
+extern void irq_set_virq_count(unsigned int count);
+
+
+/***
+ * irq_create_mapping - Map a hardware interrupt into linux virq space
+ * @host: host owning this hardware interrupt or NULL for default host
+ * @hwirq: hardware irq number in that host space
+ * @flags: flags passed to the controller. contains the trigger type among
+ * others. Use IRQ_TYPE_* defined in include/linux/irq.h
+ *
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * virq number. The flags can be used to provide sense information to the
+ * controller (typically extracted from the device-tree). If no information
+ * is passed, the controller defaults will apply (for example, xics can only
+ * do edge so flags are irrelevant for some pseries specific irqs).
+ *
+ * The device-tree generally contains the trigger info in an encoding that is
+ * specific to a given type of controller. In that case, you can directly use
+ * host->ops->trigger_xlate() to translate that.
+ *
+ * It is recommended that new PICs that don't have existing OF bindings chose
+ * to use a representation of triggers identical to linux.
+ */
+extern unsigned int irq_create_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq,
+ unsigned int flags);
+
+
+/***
+ * irq_dispose_mapping - Unmap an interrupt
+ * @virq: linux virq number of the interrupt to unmap
*/
-extern unsigned int virt_irq_to_real_map[NR_IRQS];
+extern void irq_dispose_mapping(unsigned int virq);
-/* The maximum virtual IRQ number that we support. This
- * can be set by the platform and will be reduced by the
- * value of __irq_offset_value. It defaults to and is
- * capped by (NR_IRQS - 1).
+/***
+ * irq_find_mapping - Find a linux virq from an hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
*/
-extern unsigned int virt_irq_max;
+extern unsigned int irq_find_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq);
-/* Create a mapping for a real_irq if it doesn't already exist.
- * Return the virtual irq as a convenience.
+
+/***
+ * irq_radix_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+extern unsigned int irq_radix_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+/***
+ * irq_linear_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+
+extern unsigned int irq_linear_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+
+
+/***
+ * irq_alloc_virt - Allocate virtual irq numbers
+ * @host: host owning these new virtual irqs
+ * @count: number of consecutive numbers to allocate
+ * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
+ *
+ * This is a low level function that is used internally by irq_create_mapping()
+ * and that can be used by some irq controllers implementations for things
+ * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
*/
-int virt_irq_create_mapping(unsigned int real_irq);
-void virt_irq_init(void);
+extern unsigned int irq_alloc_virt(struct irq_host *host,
+ unsigned int count,
+ unsigned int hint);
+
+/***
+ * irq_free_virt - Free virtual irq numbers
+ * @virq: virtual irq number of the first interrupt to free
+ * @count: number of interrupts to free
+ *
+ * This function is the opposite of irq_alloc_virt. It will not clear reverse
+ * maps, this should be done previously by unmap'ing the interrupt. In fact,
+ * all interrupts covered by the range being freed should have been unmapped
+ * prior to calling this.
+ */
+extern void irq_free_virt(unsigned int virq, unsigned int count);
+
+
+/* -- OF helpers -- */
+
+/* irq_create_of_mapping - Map a hardware interrupt into linux virq space
+ * @controller: Device node of the interrupt controller
+ * @inspec: Interrupt specifier from the device-tree
+ * @intsize: Size of the interrupt specifier from the device-tree
+ *
+ * This function is identical to irq_create_mapping except that it takes
+ * as input informations straight from the device-tree (typically the results
+ * of the of_irq_map_*() functions
+ */
+extern unsigned int irq_create_of_mapping(struct device_node *controller,
+ u32 *intspec, unsigned int intsize);
+
+
+/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space
+ * @device: Device node of the device whose interrupt is to be mapped
+ * @index: Index of the interrupt to map
+ *
+ * This function is a wrapper that chains of_irq_map_one() and
+ * irq_create_of_mapping() to make things easier to callers
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index);
+
+/* -- End OF helpers -- */
-static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
+/***
+ * irq_early_init - Init irq remapping subsystem
+ */
+extern void irq_early_init(void);
+
+static __inline__ int irq_canonicalize(int irq)
{
- return virt_irq_to_real_map[virt_irq];
+ return irq;
}
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
+#else /* CONFIG_PPC_MERGE */
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ (-1)
+#define NO_IRQ_IGNORE (-2)
+
/*
- * List of interrupt controllers.
+ * These constants are used for passing information about interrupt
+ * signal polarity and level/edge sensing to the low-level PIC chip
+ * drivers.
*/
-#define IC_INVALID 0
-#define IC_OPEN_PIC 1
-#define IC_PPC_XIC 2
-#define IC_CELL_PIC 3
-#define IC_ISERIES 4
+#define IRQ_SENSE_MASK 0x1
+#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
+#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
-extern u64 ppc64_interrupt_controller;
+#define IRQ_POLARITY_MASK 0x2
+#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
+#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
-#else /* 32-bit */
#if defined(CONFIG_40x)
#include <asm/ibm4xx.h>
#endif /* CONFIG_8260 */
-#endif
+#endif /* Whatever way too big #ifdef */
-#ifndef CONFIG_PPC_MERGE
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
/* pedantic: these are long because they are used with set_bit --RR */
extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-#endif
-
-extern atomic_t ppc_n_lost_interrupts;
-
-#define virt_irq_create_mapping(x) (x)
-
-#endif
/*
* Because many systems have two overlapping names spaces for
irq = 9;
return irq;
}
+#endif /* CONFIG_PPC_MERGE */
extern int distribute_irqs;
void (*show_percpuinfo)(struct seq_file *m, int i);
void (*init_IRQ)(void);
- int (*get_irq)(struct pt_regs *);
+ unsigned int (*get_irq)(struct pt_regs *);
#ifdef CONFIG_KEXEC
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
#endif
/* The instance data of a given MPIC */
struct mpic
{
+ /* The device node of the interrupt controller */
+ struct device_node *of_node;
+
+ /* The remapper for this MPIC */
+ struct irq_host *irqhost;
+
/* The "linux" controller struct */
struct irq_chip hc_irq;
#ifdef CONFIG_MPIC_BROKEN_U3
unsigned int isu_size;
unsigned int isu_shift;
unsigned int isu_mask;
- /* Offset of irq vector numbers */
- unsigned int irq_offset;
unsigned int irq_count;
- /* Offset of ipi vector numbers */
- unsigned int ipi_offset;
/* Number of sources */
unsigned int num_sources;
/* Number of CPUs */
unsigned int num_cpus;
- /* senses array */
+ /* default senses array */
unsigned char *senses;
unsigned int senses_count;
* The values in the array start at the first source of the MPIC,
* that is senses[0] correspond to linux irq "irq_offset".
*/
-extern struct mpic *mpic_alloc(unsigned long phys_addr,
+extern struct mpic *mpic_alloc(struct device_node *node,
+ unsigned long phys_addr,
unsigned int flags,
unsigned int isu_size,
- unsigned int irq_offset,
unsigned int irq_count,
- unsigned int ipi_offset,
- unsigned char *senses,
- unsigned int senses_num,
const char *name);
/* Assign ISUs, to call before mpic_init()
extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
unsigned long phys_addr);
+/* Set default sense codes
+ *
+ * @mpic: controller
+ * @senses: array of sense codes
+ * @count: size of above array
+ *
+ * Optionally provide an array (indexed on hardware interrupt numbers
+ * for this MPIC) of default sense codes for the chip. Those are linux
+ * sense codes IRQ_TYPE_*
+ *
+ * The driver gets ownership of the pointer, don't dispose of it or
+ * anything like that. __init only.
+ */
+extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
+
+
/* Initialize the controller. After this has been called, none of the above
* should be called again for this mpic
*/
void smp_mpic_message_pass(int target, int msg);
/* Fetch interrupt from a given mpic */
-extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
+extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
/* This one gets to the primary mpic */
-extern int mpic_get_irq(struct pt_regs *regs);
+extern unsigned int mpic_get_irq(struct pt_regs *regs);
/* Set the EPIC clock ratio */
void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
/* Enable/Disable EPIC serial interrupt mode */
void mpic_set_serial_int(struct mpic *mpic, int enable);
-/* global mpic for pSeries */
-extern struct mpic *pSeries_mpic;
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MPIC_H */
typedef u32 phandle;
typedef u32 ihandle;
-struct interrupt_info {
- int line;
- int sense; /* +ve/-ve logic, edge or level, etc. */
-};
-
struct property {
char *name;
int length;
char *type;
phandle node;
phandle linux_phandle;
- int n_intrs;
- struct interrupt_info *intrs;
char *full_name;
struct property *properties;
struct list_head sched_list;
int number;
int nid;
+ unsigned int irqs[3];
u32 isrc;
u32 node;
u64 flags;
static void get_irq(struct device_node * np, int *irqptr)
{
- *irqptr = -1;
- if (!np)
- return;
- if (np->n_intrs != 1)
- return;
- *irqptr = np->intrs[0].line;
+ *irqptr = irq_of_parse_and_map(np, 0);
}
/* 0x4 is outenable, 0x1 is out, thus 4 or 5 */
if (strncmp(np->name, "i2s-", 4))
return 0;
- if (np->n_intrs != 3)
+ if (macio_irq_count(macio) != 3)
return 0;
dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name);
}
for (i=0;i<3;i++) {
- if (request_irq(np->intrs[i].line, ints[i], 0, dev->rnames[i], dev))
+ if (request_irq(macio_irq(macio, i), ints[i], 0,
+ dev->rnames[i], dev))
goto err;
- dev->interrupts[i] = np->intrs[i].line;
+ dev->interrupts[i] = macio_irq(macio, i);
}
for (i=0;i<3;i++) {
*gpio_pol = *pp;
else
*gpio_pol = 1;
- if (np->n_intrs > 0)
- return np->intrs[0].line;
-
- return 0;
+ return irq_of_parse_and_map(np, 0);
}
static inline void
* other info if necessary (early AWACS we want to read chip ids)
*/
- if (of_get_address(io, 2, NULL, NULL) == NULL || io->n_intrs < 3) {
+ if (of_get_address(io, 2, NULL, NULL) == NULL) {
/* OK - maybe we need to use the 'awacs' node (on earlier
* machines).
*/
if (awacs_node) {
io = awacs_node ;
- if (of_get_address(io, 2, NULL, NULL) == NULL ||
- io->n_intrs < 3) {
+ if (of_get_address(io, 2, NULL, NULL) == NULL) {
printk("dmasound_pmac: can't use %s\n",
io->full_name);
return -ENODEV;
if (awacs_revision == AWACS_SCREAMER && awacs)
awacs_recalibrate();
- awacs_irq = io->intrs[0].line;
- awacs_tx_irq = io->intrs[1].line;
- awacs_rx_irq = io->intrs[2].line;
+ awacs_irq = irq_of_parse_and_map(io, 0);
+ awacs_tx_irq = irq_of_parse_and_map(io, 1);
+ awacs_rx_irq = irq_of_parse_and_map(io, 2);
/* Hack for legacy crap that will be killed someday */
awacs_node = io;
struct snd_pmac *chip;
struct device_node *np;
int i, err;
+ unsigned int irq;
unsigned long ctrl_addr, txdma_addr, rxdma_addr;
static struct snd_device_ops ops = {
.dev_free = snd_pmac_dev_free,
if (chip->is_k2) {
static char *rnames[] = {
"Sound Control", "Sound DMA" };
- if (np->n_intrs < 3) {
- err = -ENODEV;
- goto __error;
- }
for (i = 0; i < 2; i ++) {
if (of_address_to_resource(np->parent, i,
&chip->rsrc[i])) {
} else {
static char *rnames[] = {
"Sound Control", "Sound Tx DMA", "Sound Rx DMA" };
- if (np->n_intrs < 3) {
- err = -ENODEV;
- goto __error;
- }
for (i = 0; i < 3; i ++) {
if (of_address_to_resource(np, i,
&chip->rsrc[i])) {
chip->playback.dma = ioremap(txdma_addr, 0x100);
chip->capture.dma = ioremap(rxdma_addr, 0x100);
if (chip->model <= PMAC_BURGUNDY) {
- if (request_irq(np->intrs[0].line, snd_pmac_ctrl_intr, 0,
+ irq = irq_of_parse_and_map(np, 0);
+ if (request_irq(irq, snd_pmac_ctrl_intr, 0,
"PMac", (void*)chip)) {
- snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[0].line);
+ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n",
+ irq);
err = -EBUSY;
goto __error;
}
- chip->irq = np->intrs[0].line;
+ chip->irq = irq;
}
- if (request_irq(np->intrs[1].line, snd_pmac_tx_intr, 0,
- "PMac Output", (void*)chip)) {
- snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[1].line);
+ irq = irq_of_parse_and_map(np, 1);
+ if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){
+ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
- chip->tx_irq = np->intrs[1].line;
- if (request_irq(np->intrs[2].line, snd_pmac_rx_intr, 0,
- "PMac Input", (void*)chip)) {
- snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[2].line);
+ chip->tx_irq = irq;
+ irq = irq_of_parse_and_map(np, 2);
+ if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) {
+ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
- chip->rx_irq = np->intrs[2].line;
+ chip->rx_irq = irq;
snd_pmac_sound_feature(chip, 1);
DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n",
device, gp->addr, gp->active_state);
- return (node->n_intrs > 0) ? node->intrs[0].line : 0;
+ return irq_of_parse_and_map(node, 0);
}
/* reset audio */
&mix->line_mute, 1);
irq = tumbler_find_device("headphone-detect",
NULL, &mix->hp_detect, 0);
- if (irq < 0)
+ if (irq <= NO_IRQ)
irq = tumbler_find_device("headphone-detect",
NULL, &mix->hp_detect, 1);
- if (irq < 0)
+ if (irq <= NO_IRQ)
irq = tumbler_find_device("keywest-gpio15",
NULL, &mix->hp_detect, 1);
mix->headphone_irq = irq;
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 0);
- if (irq < 0)
+ if (irq <= NO_IRQ)
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 1);
mix->lineout_irq = irq;