static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
+ +++static void enable_dynirq(struct irq_data *data);
+ +++static void disable_dynirq(struct irq_data *data);
+
+/* Get info for IRQ */
+static struct irq_info *info_for_irq(unsigned irq)
+{
+ return irq_get_handler_data(irq);
+}
-/* Constructor for packed IRQ information. */
-static struct irq_info mk_unbound_info(void)
+/* Constructors for packed IRQ information. */
+static void xen_irq_info_common_init(struct irq_info *info,
+ unsigned irq,
+ enum xen_irq_type type,
+ unsigned short evtchn,
+ unsigned short cpu)
{
- return (struct irq_info) { .type = IRQT_UNBOUND };
+
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+ info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+
+ evtchn_to_irq[evtchn] = irq;
}
-static struct irq_info mk_evtchn_info(unsigned short evtchn)
+static void xen_irq_info_evtchn_init(unsigned irq,
+ unsigned short evtchn)
{
- return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
- .cpu = 0 };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
+static void xen_irq_info_ipi_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ enum ipi_vector ipi)
{
- return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
- .cpu = 0, .u.ipi = ipi };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
+
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
}
-static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+static void xen_irq_info_virq_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ unsigned short virq)
{
- return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
- .cpu = 0, .u.virq = virq };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
+
+ info->u.virq = virq;
+
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
-static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
- unsigned short gsi, unsigned short vector)
+static void xen_irq_info_pirq_init(unsigned irq,
+ unsigned short evtchn,
+ unsigned short pirq,
+ unsigned short gsi,
+ unsigned short vector,
++++ uint16_t domid,
+ unsigned char flags)
{
- return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .cpu = 0,
- .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
+
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.vector = vector;
++++ info->u.pirq.domid = domid;
+ info->u.pirq.flags = flags;
}
/*
put_cpu();
}
-static int get_nr_hw_irqs(void)
+static void xen_irq_init(unsigned irq)
{
- int ret = 1;
+ struct irq_info *info;
+ struct irq_desc *desc = irq_to_desc(irq);
-#ifdef CONFIG_X86_IO_APIC
- ret = get_nr_irqs_gsi();
+#ifdef CONFIG_SMP
+ /* By default all event channels notify CPU#0. */
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
#endif
- return ret;
-}
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ panic("Unable to allocate metadata for IRQ%d\n", irq);
-static int find_unbound_pirq(int type)
-{
- int rc, i;
- struct physdev_get_free_pirq op_get_free_pirq;
- op_get_free_pirq.type = type;
+ info->type = IRQT_UNBOUND;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- if (!rc)
- return op_get_free_pirq.pirq;
+ irq_set_handler_data(irq, info);
- for (i = 0; i < nr_irqs; i++) {
- if (pirq_to_irq[i] < 0)
- return i;
- }
- return -1;
+ list_add_tail(&info->list, &xen_irq_list_head);
}
-static int find_unbound_irq(void)
+static int __must_check xen_allocate_irq_dynamic(void)
{
- struct irq_data *data;
- int irq, res;
- int bottom = get_nr_hw_irqs();
- int top = nr_irqs-1;
-
- if (bottom == nr_irqs)
- goto no_irqs;
+ int first = 0;
+ int irq;
- /* This loop starts from the top of IRQ space and goes down.
- * We need this b/c if we have a PCI device in a Xen PV guest
- * we do not have an IO-APIC (though the backend might have them)
- * mapped in. To not have a collision of physical IRQs with the Xen
- * event channels start at the top of the IRQ space for virtual IRQs.
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * For an HVM guest or domain 0 which see "real" (emulated or
- * actual repectively) GSIs we allocate dynamic IRQs
+ + * actual respectively) GSIs we allocate dynamic IRQs
+ * e.g. those corresponding to event channels or MSIs
+ * etc. from the range above those "real" GSIs to avoid
+ * collisions.
*/
- for (irq = top; irq > bottom; irq--) {
- data = irq_get_irq_data(irq);
- /* only 15->0 have init'd desc; handle irq > 16 */
- if (!data)
- break;
- if (data->chip == &no_irq_chip)
- break;
- if (data->chip != &xen_dynamic_chip)
- continue;
- if (irq_info[irq].type == IRQT_UNBOUND)
- return irq;
- }
-
- if (irq == bottom)
- goto no_irqs;
+ if (xen_initial_domain() || xen_hvm_domain())
+ first = get_nr_irqs_gsi();
+#endif
- res = irq_alloc_desc_at(irq, -1);
+ irq = irq_alloc_desc_from(first, -1);
- if (WARN_ON(res != irq))
- return -1;
+ xen_irq_init(irq);
return irq;
-
-no_irqs:
- panic("No available IRQ to bind to: increase nr_irqs!\n");
}
-static bool identity_mapped_irq(unsigned irq)
+static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
- /* identity map all the hardware irqs */
- return irq < get_nr_hw_irqs();
+ int irq;
+
+ /*
+ * A PV guest has no concept of a GSI (since it has no ACPI
+ * nor access to/knowledge of the physical APICs). Therefore
+ * all IRQs are dynamically allocated from the entire IRQ
+ * space.
+ */
+ if (xen_pv_domain() && !xen_initial_domain())
+ return xen_allocate_irq_dynamic();
+
+ /* Legacy IRQ descriptors are already allocated by the arch. */
+ if (gsi < NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
+
+ xen_irq_init(irq);
+
+ return irq;
}
-static void pirq_unmask_notify(int irq)
+static void xen_free_irq(unsigned irq)
{
- struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
+ struct irq_info *info = irq_get_handler_data(irq);
- if (unlikely(pirq_needs_eoi(irq))) {
- int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
- WARN_ON(rc);
- }
+ list_del(&info->list);
+
+ irq_set_handler_data(irq, NULL);
+
+ kfree(info);
+
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq < NR_IRQS_LEGACY)
+ return;
+
+ irq_free_desc(irq);
}
- -- static void pirq_unmask_notify(int irq)
- -- {
- -- struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
- --
- -- if (unlikely(pirq_needs_eoi(irq))) {
- -- int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
- -- WARN_ON(rc);
- -- }
- -- }
- --
static void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
return desc && desc->action == NULL;
}
-static unsigned int startup_pirq(unsigned int irq)
+ +++static void eoi_pirq(struct irq_data *data)
+ +++{
+ +++ int evtchn = evtchn_from_irq(data->irq);
+ +++ struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ +++ int rc = 0;
+ +++
+ +++ irq_move_irq(data);
+ +++
+ +++ if (VALID_EVTCHN(evtchn))
+ +++ clear_evtchn(evtchn);
+ +++
+ +++ if (pirq_needs_eoi(data->irq)) {
+ +++ rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
+ +++ WARN_ON(rc);
+ +++ }
+ +++}
+ +++
+ +++static void mask_ack_pirq(struct irq_data *data)
+ +++{
+ +++ disable_dynirq(data);
+ +++ eoi_pirq(data);
+ +++}
+ +++
+static unsigned int __startup_pirq(unsigned int irq)
{
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
info->evtchn = 0;
}
-static void enable_pirq(unsigned int irq)
+static void enable_pirq(struct irq_data *data)
{
- startup_pirq(irq);
+ startup_pirq(data);
}
-static void disable_pirq(unsigned int irq)
+static void disable_pirq(struct irq_data *data)
{
- ---}
- ---
- -- static void ack_pirq(struct irq_data *data)
-static void ack_pirq(unsigned int irq)
- ---{
- -- int evtchn = evtchn_from_irq(data->irq);
- int evtchn = evtchn_from_irq(irq);
- ---
- -- irq_move_irq(data);
- move_native_irq(irq);
- ---
- --- if (VALID_EVTCHN(evtchn)) {
- --- mask_evtchn(evtchn);
- --- clear_evtchn(evtchn);
- }
-}
-
-static void end_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (WARN_ON(!desc))
- return;
-
- if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
- (IRQ_DISABLED|IRQ_PENDING)) {
- shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- --- }
+ +++ disable_dynirq(data);
}
static int find_irq_by_gsi(unsigned gsi)
goto out; /* XXX need refcount? */
}
- /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
- * we are using the !xen_initial_domain() to drop in the function.*/
- if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
- xen_pv_domain())) {
- irq = gsi;
- irq_alloc_desc_at(irq, -1);
- } else
- irq = find_unbound_irq();
-
- set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq, name);
+ irq = xen_allocate_irq_gsi(gsi);
+ if (irq < 0)
+ goto out;
- -- irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
- -- name);
- --
irq_op.irq = irq;
irq_op.vector = 0;
goto out;
}
--- xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
- irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
- irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
- pirq_to_irq[pirq] = irq;
++++ xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
+ shareable ? PIRQ_SHAREABLE : 0);
+
+ +++ pirq_query_unmask(irq);
+ +++ /* We try to use the handler with the appropriate semantic for the
+ +++ * type of interrupt: if the interrupt doesn't need an eoi
+ +++ * (pirq_needs_eoi returns false), we treat it like an edge
+ +++ * triggered interrupt so we use handle_edge_irq.
+ +++ * As a matter of fact this only happens when the corresponding
+ +++ * physical interrupt is edge triggered or an msi.
+ +++ *
+ +++ * On the other hand if the interrupt needs an eoi (pirq_needs_eoi
+ +++ * returns true) we treat it like a level triggered interrupt so we
+ +++ * use handle_fasteoi_irq like the native code does for this kind of
+ +++ * interrupts.
+ +++ * Depending on the Xen version, pirq_needs_eoi might return true
+ +++ * not only for level triggered interrupts but for edge triggered
+ +++ * interrupts too. In any case Xen always honors the eoi mechanism,
+ +++ * not injecting any more pirqs of the same kind if the first one
+ +++ * hasn't received an eoi yet. Therefore using the fasteoi handler
+ +++ * is the right choice either way.
+ +++ */
+ +++ if (pirq_needs_eoi(irq))
+ +++ irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ +++ handle_fasteoi_irq, name);
+ +++ else
+ +++ irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ +++ handle_edge_irq, name);
+ ++
out:
spin_unlock(&irq_mapping_update_lock);
}
#ifdef CONFIG_PCI_MSI
-#include <linux/msi.h>
-#include "../pci/msi.h"
-
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
{
- spin_lock(&irq_mapping_update_lock);
-
- if (alloc & XEN_ALLOC_IRQ) {
- *irq = find_unbound_irq();
- if (*irq == -1)
- goto out;
- }
-
- if (alloc & XEN_ALLOC_PIRQ) {
- *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
- if (*pirq == -1)
- goto out;
- }
+ int rc;
+ struct physdev_get_free_pirq op_get_free_pirq;
- set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
- handle_level_irq, name);
+ op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
- pirq_to_irq[*pirq] = *irq;
+ WARN_ONCE(rc == -ENOSYS,
+ "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
-out:
- spin_unlock(&irq_mapping_update_lock);
+ return rc ? -1 : op_get_free_pirq.pirq;
}
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
--- int pirq, int vector, const char *name)
++++ int pirq, int vector, const char *name,
++++ domid_t domid)
{
- int irq = -1;
- struct physdev_map_pirq map_irq;
- int rc;
- int pos;
- u32 table_offset, bir;
-
- memset(&map_irq, 0, sizeof(map_irq));
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
- map_irq.index = -1;
- map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
- map_irq.devfn = dev->devfn;
-
- if (type == PCI_CAP_ID_MSIX) {
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-
- map_irq.table_base = pci_resource_start(dev, bir);
- map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
- }
+ int irq, ret;
spin_lock(&irq_mapping_update_lock);
if (irq == -1)
goto out;
- -- irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
- -- name);
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (rc) {
- printk(KERN_WARNING "xen map irq failed %d\n", rc);
-
- irq_free_desc(irq);
-
- irq = -1;
- goto out;
- }
- irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
-
- set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq,
- (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
+ +++ irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
+ +++ name);
--- xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
++++ xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
+ ret = irq_set_msi_desc(irq, msidesc);
+ if (ret < 0)
+ goto error_irq;
out:
spin_unlock(&irq_mapping_update_lock);
return irq;
return rc;
}
-int xen_vector_from_irq(unsigned irq)
+int xen_irq_from_pirq(unsigned pirq)
{
- return vector_from_irq(irq);
-}
+ int irq;
-int xen_gsi_from_irq(unsigned irq)
-{
- return gsi_from_irq(irq);
+ struct irq_info *info;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info == NULL || info->type != IRQT_PIRQ)
+ continue;
+ irq = info->irq;
+ if (info->u.pirq.pirq == pirq)
+ goto out;
+ }
+ irq = -1;
+out:
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
}
-int xen_irq_from_pirq(unsigned pirq)
++++
++++int xen_pirq_from_irq(unsigned irq)
+++ {
- return pirq_to_irq[pirq];
++++ return pirq_from_irq(irq);
+++ }
-
++++EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
irq = evtchn_to_irq[evtchn];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_fasteoi_irq, "event");
+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
- -- handle_fasteoi_irq, "event");
+ +++ handle_edge_irq, "event");
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
unsigned long irqflags,
const char *devname, void *dev_id)
{
- - unsigned int irq;
- - int retval;
+ + int irq, retval;
irq = bind_evtchn_to_irq(evtchn);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- - unsigned int irq;
- - int retval;
+ + int irq, retval;
irq = bind_virq_to_irq(virq, cpu);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
wmb();
#endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
- while (pending_words != 0) {
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
unsigned long pending_bits;
- int word_idx = __ffs(pending_words);
- pending_words &= ~(1UL << word_idx);
+ unsigned long words;
+
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = __ffs(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ if (word_idx == start_word_idx) {
+ /* We scan the starting word in two parts */
+ if (i == 0)
+ /* 1st time: start in the middle */
+ bit_idx = start_bit_idx;
+ else
+ /* 2nd time: mask bits done already */
+ bit_idx &= (1UL << start_bit_idx) - 1;
+ }
- while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
- int bit_idx = __ffs(pending_bits);
- int port = (word_idx * BITS_PER_LONG) + bit_idx;
- int irq = evtchn_to_irq[port];
+ do {
+ unsigned long bits;
+ int port, irq;
struct irq_desc *desc;
- mask_evtchn(port);
- clear_evtchn(port);
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = __ffs(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_LONG) + bit_idx;
+ irq = evtchn_to_irq[port];
- -- mask_evtchn(port);
- -- clear_evtchn(port);
- --
if (irq != -1) {
desc = irq_to_desc(irq);
if (desc)
mask_evtchn(evtchn);
}
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
- -- irq_move_masked_irq(data);
- move_masked_irq(irq);
+ +++ irq_move_irq(data);
if (VALID_EVTCHN(evtchn))
- --- unmask_evtchn(evtchn);
+ +++ clear_evtchn(evtchn);
+ ++ }
+ ++
-static int retrigger_dynirq(unsigned int irq)
+ +++static void mask_ack_dynirq(struct irq_data *data)
+ ++ {
- int evtchn = evtchn_from_irq(irq);
+ +++ disable_dynirq(data);
+ +++ ack_dynirq(data);
+}
+
+static int retrigger_dynirq(struct irq_data *data)
+{
+ int evtchn = evtchn_from_irq(data->irq);
struct shared_info *sh = HYPERVISOR_shared_info;
int ret = 0;
xen_poll_irq_timeout(irq, 0 /* no timeout */);
}
++++/* Check whether the IRQ line is shared with other guests. */
++++int xen_test_irq_shared(int irq)
++++{
++++ struct irq_info *info = info_for_irq(irq);
++++ struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
++++
++++ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++++ return 0;
++++ return !(irq_status.flags & XENIRQSTAT_shared);
++++}
++++EXPORT_SYMBOL_GPL(xen_test_irq_shared);
++++
void xen_irq_resume(void)
{
- unsigned int cpu, irq, evtchn;
- struct irq_desc *desc;
+ unsigned int cpu, evtchn;
+ struct irq_info *info;
init_evtchn_cpu_bindings();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
- .name = "xen-dyn",
+ .name = "xen-dyn",
- .disable = disable_dynirq,
- .mask = disable_dynirq,
- .unmask = enable_dynirq,
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
- -- .irq_eoi = ack_dynirq,
- .eoi = ack_dynirq,
- .set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ +++ .irq_ack = ack_dynirq,
+ +++ .irq_mask_ack = mask_ack_dynirq,
+ +++
+ .irq_set_affinity = set_affinity_irq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_pirq_chip __read_mostly = {
- .name = "xen-pirq",
-
- .startup = startup_pirq,
- .shutdown = shutdown_pirq,
+ .name = "xen-pirq",
- .enable = enable_pirq,
- .unmask = enable_pirq,
+ .irq_startup = startup_pirq,
+ .irq_shutdown = shutdown_pirq,
- --
+ .irq_enable = enable_pirq,
- -- .irq_unmask = enable_pirq,
- --
+ .irq_disable = disable_pirq,
- -- .irq_mask = disable_pirq,
- -- .irq_ack = ack_pirq,
- .disable = disable_pirq,
- .mask = disable_pirq,
+ +++ .irq_mask = disable_dynirq,
+ +++ .irq_unmask = enable_dynirq,
+ ++
- .ack = ack_pirq,
- .end = end_pirq,
+ +++ .irq_ack = eoi_pirq,
+ +++ .irq_eoi = eoi_pirq,
+ +++ .irq_mask_ack = mask_ack_pirq,
- .set_affinity = set_affinity_irq,
+ .irq_set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_percpu_chip __read_mostly = {