"paca->soft_enabled" is used as a flag to mask some of interrupts.
Currently supported flags values and their details:
soft_enabled MSR[EE]
0 0 Disabled (PMI and HMI not masked)
1 1 Enabled
"paca->soft_enabled" is initialized to 1 to make the interripts as
enabled. arch_local_irq_disable() will toggle the value when
interrupts needs to disbled. At this point, the interrupts are not
actually disabled, instead, interrupt vector has code to check for the
flag and mask it when it occurs. By "mask it", it update interrupt
paca->irq_happened and return. arch_local_irq_restore() is called to
re-enable interrupts, which checks and replays interrupts if any
occured.
Now, as mentioned, current logic doesnot mask "performance monitoring
interrupts" and PMIs are implemented as NMI. But this patchset depends
on local_irq_* for a successful local_* update. Meaning, mask all
possible interrupts during local_* update and replay them after the
update.
So the idea here is to reserve the "paca->soft_enabled" logic. New
values and details:
soft_enabled MSR[EE]
1 0 Disabled (PMI and HMI not masked)
0 1 Enabled
Reason for the this change is to create foundation for a third mask
value "0x2" for "soft_enabled" to add support to mask PMIs. When
->soft_enabled is set to a value "3", PMI interrupts are mask and when
set to a value of "1", PMI are not mask. With this patch also extends
soft_enabled as interrupt disable mask.
Current flags are renamed from IRQ_[EN?DIS}ABLED to
IRQS_ENABLED and IRQS_DISABLED.
Patch also fixes the ptrace call to force the user to see the softe
value to be alway 1. Reason being, even though userspace has no
business knowing about softe, it is part of pt_regs. Like-wise in
signal context.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
- cmpwi r10,IRQS_DISABLED; \
+ andi. r10,r10,IRQS_DISABLED; \
li r10,SOFTEN_VALUE_##vec; \
- beq masked_##h##interrupt
+ bne masked_##h##interrupt
#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
/*
* flags for paca->soft_enabled
*/
-#define IRQS_ENABLED 1
-#define IRQS_DISABLED 0
+#define IRQS_ENABLED 0
+#define IRQS_DISABLED 1
#endif /* CONFIG_PPC64 */
*/
static inline notrace void soft_enabled_set(unsigned long enable)
{
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * mask must always include LINUX bit if any are set, and
+ * interrupts don't get replayed until the Linux interrupt is
+ * unmasked. This could be changed to replay partial unmasks
+ * in future, which would allow Linux masks to nest inside
+ * other masks, among other things. For now, be very dumb and
+ * simple.
+ */
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
asm volatile(
"stb %0,%1(13)"
:
: "memory");
}
-static inline notrace unsigned long soft_enabled_set_return(unsigned long enable)
+static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
{
unsigned long flags;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
asm volatile(
"lbz %0,%1(13); stb %2,%1(13)"
: "=&r" (flags)
: "i" (offsetof(struct paca_struct, soft_enabled)),
- "r" (enable)
+ "r" (mask)
: "memory");
return flags;
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return flags == IRQS_DISABLED;
+ return flags & IRQS_DISABLED;
}
static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
- flags = soft_enabled_set_return(IRQS_DISABLED); \
+ flags = soft_enabled_set_return(IRQS_DISABLED);\
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
- return (regs->softe == IRQS_DISABLED);
+ return (regs->softe & IRQS_DISABLED);
}
extern bool prep_irq_for_idle(void);
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
- cmpwi cr0,__rA,IRQS_DISABLED;\
+ andi. __rA,__rA,IRQS_DISABLED;\
li __rA,IRQS_DISABLED; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
- beq 44f; \
+ bne 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
*/
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
lbz r10,PACASOFTIRQEN(r13)
- xori r10,r10,IRQS_ENABLED
-1: tdnei r10,0
+1: tdnei r10,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
beq+ restore
/* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9)
- cmpwi cr1,r8,0
+ cmpwi cr0,r8,0
+ bne restore
ld r0,SOFTE(r1)
- cmpdi r0,IRQS_DISABLED
- crandc eq,cr1*4+eq,eq
+ andi. r0,r0,IRQS_DISABLED
bne restore
/*
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
- cmpwi cr0,r5,IRQS_DISABLED
- beq .Lrestore_irq_off
+ andi. r5,r5,IRQS_DISABLED
+ bne .Lrestore_irq_off
/* We are enabling, were we already enabled ? Yes, just return */
- cmpwi cr0,r6,IRQS_ENABLED
+ andi. r6,r6,IRQS_DISABLED
beq cr0,.Ldo_restore
/*
li r0,0
mtcr r0
-#ifdef CONFIG_BUG
+#ifdef CONFIG_BUG
/* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON
*/
lbz r0,PACASOFTIRQEN(r13)
-1: tdnei r0,IRQS_DISABLED
+1: tdeqi r0,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
-
+
/* Hard-disable interrupts */
mfmsr r6
rldicl r7,r6,48,1
ld r5,SOFTE(r1)
/* Interrupts had better not already be enabled... */
- twnei r6,IRQS_DISABLED
+ tweqi r6,IRQS_ENABLED
- cmpwi cr0,r5,IRQS_DISABLED
- beq 1f
+ andi. r6,r5,IRQS_DISABLED
+ bne 1f
TRACE_ENABLE_INTS
stb r5,PACASOFTIRQEN(r13)
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r10,IRQS_DISABLED; /* yes -> go out of line */ \
- beq masked_interrupt_book3e_##n
+ andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
+ bne masked_interrupt_book3e_##n
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
return 0;
}
-notrace void arch_local_irq_restore(unsigned long en)
+notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
unsigned int replay;
/* Write the new soft-enabled value */
- soft_enabled_set(en);
- if (en == IRQS_DISABLED)
+ soft_enabled_set(mask);
+ if (mask) {
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * mask must always include LINUX bit if any
+ * are set, and interrupts don't get replayed until
+ * the Linux interrupt is unmasked. This could be
+ * changed to replay partial unmasks in future,
+ * which would allow Linux masks to nest inside
+ * other masks, among other things. For now, be very
+ * dumb and simple.
+ */
+ WARN_ON(!(mask & IRQS_DISABLED));
+#endif
return;
+ }
+
/*
* From this point onward, we can take interrupts, preempt,
* etc... unless we got hard-disabled. We check if an event
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
- return (regs->softe == IRQS_DISABLED);
+ return (regs->softe & IRQS_DISABLED);
}
/*