*
* 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Add printing support for MCA/INIT.
+ *
+ * 2007-04-27 Russ Anderson <rja@sgi.com>
+ * Support multiple cpus going through OS_MCA in the same event.
*/
#include <linux/types.h>
#include <linux/init.h>
#endif
/* Used by mca_asm.S */
-u32 ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
goto no_mod;
}
+ if (r13 != sos->prev_IA64_KR_CURRENT) {
+ msg = "inconsistent previous current and r13";
+ goto no_mod;
+ }
+
if (!mca_recover_range(ms->pmsa_iip)) {
- if (r13 != sos->prev_IA64_KR_CURRENT) {
- msg = "inconsistent previous current and r13";
- goto no_mod;
- }
if ((r12 - r13) >= KERNEL_STACK_SIZE) {
msg = "inconsistent r12 and r13";
goto no_mod;
* further MCA logging is enabled by clearing logs.
* Monarch also has the duty of sending wakeup-IPIs to pull the
* slave processors out of rendezvous spinloop.
+ *
+ * If multiple processors call into OS_MCA, the first will become
+ * the monarch. Subsequent cpus will be recorded in the mca_cpu
+ * bitmask. After the first monarch has processed its MCA, it
+ * will wake up the next cpu in the mca_cpu bitmask and then go
+ * into the rendezvous loop. When all processors have serviced
+ * their MCA, the last monarch frees up the rest of the processors.
*/
void
ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
+ static atomic_t mca_count;
+ static cpumask_t mca_cpu;
+ if (atomic_add_return(1, &mca_count) == 1) {
+ monarch_cpu = cpu;
+ sos->monarch = 1;
+ } else {
+ cpu_set(cpu, mca_cpu);
+ sos->monarch = 0;
+ }
mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
- monarch_cpu = cpu;
+
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- ia64_wait_for_slaves(cpu, "MCA");
+ if (sos->monarch) {
+ ia64_wait_for_slaves(cpu, "MCA");
+ } else {
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
+ while (cpu_isset(cpu, mca_cpu))
+ cpu_relax(); /* spin until monarch wakes us */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+ }
/* Wakeup all the processors which are spinning in the rendezvous loop.
* They will leave SAL, then spin in the OS with interrupts disabled
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
+
+ if (atomic_dec_return(&mca_count) > 0) {
+ int i;
+
+ /* wake up the next monarch cpu,
+ * and put this cpu in the rendez loop.
+ */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, mca_cpu)) {
+ monarch_cpu = i;
+ cpu_clear(i, mca_cpu); /* wake next cpu */
+ while (monarch_cpu != -1)
+ cpu_relax(); /* spin until last cpu leaves */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+ set_curr_task(cpu, previous_current);
+ return;
+ }
+ }
+ }
set_curr_task(cpu, previous_current);
monarch_cpu = -1;
}