This adds support for irqtrace for lockdep on ARM.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
bool
default y
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+ default y
+
config HARDIRQS_SW_RESEND
bool
default y
__irq_svc:
svc_entry
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
msr spsr_cxsf, r0
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst r0, #PSR_I_BIT
+ bleq trace_hardirqs_on
+#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.ltorg
__irq_usr:
usr_entry
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
teq r0, r7
strne r0, [r0, -r0]
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_on
+#endif
mov why, #0
b ret_to_user
--- /dev/null
+#ifndef __ASM_ARM_IRQFLAGS_H
+#define __ASM_ARM_IRQFLAGS_H
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+/*
+ * CPU interrupt mask handling.
+ */
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define raw_local_irq_save(x) \
+ ({ \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_irq_save\n" \
+ "cpsid i" \
+ : "=r" (x) : : "memory", "cc"); \
+ })
+
+#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
+#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
+#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
+
+#else
+
+/*
+ * Save the current interrupt enable state & disable IRQs
+ */
+#define raw_local_irq_save(x) \
+ ({ \
+ unsigned long temp; \
+ (void) (&temp == &x); \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_irq_save\n" \
+" orr %1, %0, #128\n" \
+" msr cpsr_c, %1" \
+ : "=r" (x), "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Enable IRQs
+ */
+#define raw_local_irq_enable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_irq_enable\n" \
+" bic %0, %0, #128\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Disable IRQs
+ */
+#define raw_local_irq_disable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_irq_disable\n" \
+" orr %0, %0, #128\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Enable FIQs
+ */
+#define local_fiq_enable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ stf\n" \
+" bic %0, %0, #64\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Disable FIQs
+ */
+#define local_fiq_disable() \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ clf\n" \
+" orr %0, %0, #64\n" \
+" msr cpsr_c, %0" \
+ : "=r" (temp) \
+ : \
+ : "memory", "cc"); \
+ })
+
+#endif
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define raw_local_save_flags(x) \
+ ({ \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_save_flags" \
+ : "=r" (x) : : "memory", "cc"); \
+ })
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define raw_local_irq_restore(x) \
+ __asm__ __volatile__( \
+ "msr cpsr_c, %0 @ local_irq_restore\n" \
+ : \
+ : "r" (x) \
+ : "memory", "cc")
+
+#define raw_irqs_disabled_flags(flags) \
+({ \
+ (int)((flags) & PSR_I_BIT); \
+})
+
+#endif
+#endif
{
}
-/*
- * CPU interrupt mask handling.
- */
-#if __LINUX_ARM_ARCH__ >= 6
-
-#define local_irq_save(x) \
- ({ \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_save\n" \
- "cpsid i" \
- : "=r" (x) : : "memory", "cc"); \
- })
-
-#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
-#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
-#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
-#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
-
-#else
-
-/*
- * Save the current interrupt enable state & disable IRQs
- */
-#define local_irq_save(x) \
- ({ \
- unsigned long temp; \
- (void) (&temp == &x); \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_save\n" \
-" orr %1, %0, #128\n" \
-" msr cpsr_c, %1" \
- : "=r" (x), "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Enable IRQs
- */
-#define local_irq_enable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_enable\n" \
-" bic %0, %0, #128\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Disable IRQs
- */
-#define local_irq_disable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_disable\n" \
-" orr %0, %0, #128\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Enable FIQs
- */
-#define local_fiq_enable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ stf\n" \
-" bic %0, %0, #64\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Disable FIQs
- */
-#define local_fiq_disable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ clf\n" \
-" orr %0, %0, #64\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-#endif
-
-/*
- * Save the current interrupt enable state.
- */
-#define local_save_flags(x) \
- ({ \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_save_flags" \
- : "=r" (x) : : "memory", "cc"); \
- })
-
-/*
- * restore saved IRQ & FIQ state
- */
-#define local_irq_restore(x) \
- __asm__ __volatile__( \
- "msr cpsr_c, %0 @ local_irq_restore\n" \
- : \
- : "r" (x) \
- : "memory", "cc")
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (int)(flags & PSR_I_BIT); \
-})
+#include <linux/irqflags.h>
#ifdef CONFIG_SMP