kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_SYSCALL << CAUSEB_EXCCODE));
+ (EXCCODE_SYS << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
arch->pc = KVM_GUEST_KSEG0 + 0x180;
}
- kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_MOD << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+ (EXCCODE_CPU << CAUSEB_EXCCODE));
kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
return EMULATE_DONE;
kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_RES_INST << CAUSEB_EXCCODE));
+ (EXCCODE_RI << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_BREAK << CAUSEB_EXCCODE));
+ (EXCCODE_BP << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TRAP << CAUSEB_EXCCODE));
+ (EXCCODE_TR << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_MSAFPE << CAUSEB_EXCCODE));
+ (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_FPE << CAUSEB_EXCCODE));
+ (EXCCODE_FPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_MSADIS << CAUSEB_EXCCODE));
+ (EXCCODE_MSADIS << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
if (usermode) {
switch (exccode) {
- case T_INT:
- case T_SYSCALL:
- case T_BREAK:
- case T_RES_INST:
- case T_TRAP:
- case T_MSAFPE:
- case T_FPE:
- case T_MSADIS:
+ case EXCCODE_INT:
+ case EXCCODE_SYS:
+ case EXCCODE_BP:
+ case EXCCODE_RI:
+ case EXCCODE_TR:
+ case EXCCODE_MSAFPE:
+ case EXCCODE_FPE:
+ case EXCCODE_MSADIS:
break;
- case T_COP_UNUSABLE:
+ case EXCCODE_CPU:
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
er = EMULATE_PRIV_FAIL;
break;
- case T_TLB_MOD:
+ case EXCCODE_MOD:
break;
- case T_TLB_LD_MISS:
+ case EXCCODE_TLBL:
/*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
kvm_debug("%s: LD MISS @ %#lx\n", __func__,
badvaddr);
cause &= ~0xff;
- cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
er = EMULATE_PRIV_FAIL;
}
break;
- case T_TLB_ST_MISS:
+ case EXCCODE_TLBS:
/*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
kvm_debug("%s: ST MISS @ %#lx\n", __func__,
badvaddr);
cause &= ~0xff;
- cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
er = EMULATE_PRIV_FAIL;
}
break;
- case T_ADDR_ERR_ST:
+ case EXCCODE_ADES:
kvm_debug("%s: address error ST @ %#lx\n", __func__,
badvaddr);
if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
cause &= ~0xff;
- cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
}
er = EMULATE_PRIV_FAIL;
break;
- case T_ADDR_ERR_LD:
+ case EXCCODE_ADEL:
kvm_debug("%s: address error LD @ %#lx\n", __func__,
badvaddr);
if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
cause &= ~0xff;
- cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
}
er = EMULATE_PRIV_FAIL;
break;
(va & VPN2_MASK) |
(kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
if (index < 0) {
- if (exccode == T_TLB_LD_MISS) {
+ if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
- } else if (exccode == T_TLB_ST_MISS) {
+ } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
* exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
- if (exccode == T_TLB_LD_MISS) {
+ if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
vcpu);
- } else if (exccode == T_TLB_ST_MISS) {
+ } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
vcpu);
} else {
}
switch (exccode) {
- case T_INT:
- kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+ case EXCCODE_INT:
+ kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
++vcpu->stat.int_exits;
trace_kvm_exit(vcpu, INT_EXITS);
ret = RESUME_GUEST;
break;
- case T_COP_UNUSABLE:
- kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+ case EXCCODE_CPU:
+ kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
++vcpu->stat.cop_unusable_exits;
trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
ret = RESUME_HOST;
break;
- case T_TLB_MOD:
+ case EXCCODE_MOD:
++vcpu->stat.tlbmod_exits;
trace_kvm_exit(vcpu, TLBMOD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
break;
- case T_TLB_ST_MISS:
+ case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
badvaddr);
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
break;
- case T_TLB_LD_MISS:
+ case EXCCODE_TLBL:
kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
break;
- case T_ADDR_ERR_ST:
+ case EXCCODE_ADES:
++vcpu->stat.addrerr_st_exits;
trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
break;
- case T_ADDR_ERR_LD:
+ case EXCCODE_ADEL:
++vcpu->stat.addrerr_ld_exits;
trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
break;
- case T_SYSCALL:
+ case EXCCODE_SYS:
++vcpu->stat.syscall_exits;
trace_kvm_exit(vcpu, SYSCALL_EXITS);
ret = kvm_mips_callbacks->handle_syscall(vcpu);
break;
- case T_RES_INST:
+ case EXCCODE_RI:
++vcpu->stat.resvd_inst_exits;
trace_kvm_exit(vcpu, RESVD_INST_EXITS);
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
break;
- case T_BREAK:
+ case EXCCODE_BP:
++vcpu->stat.break_inst_exits;
trace_kvm_exit(vcpu, BREAK_INST_EXITS);
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
- case T_TRAP:
+ case EXCCODE_TR:
++vcpu->stat.trap_inst_exits;
trace_kvm_exit(vcpu, TRAP_INST_EXITS);
ret = kvm_mips_callbacks->handle_trap(vcpu);
break;
- case T_MSAFPE:
+ case EXCCODE_MSAFPE:
++vcpu->stat.msa_fpe_exits;
trace_kvm_exit(vcpu, MSA_FPE_EXITS);
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
break;
- case T_FPE:
+ case EXCCODE_FPE:
++vcpu->stat.fpe_exits;
trace_kvm_exit(vcpu, FPE_EXITS);
ret = kvm_mips_callbacks->handle_fpe(vcpu);
break;
- case T_MSADIS:
+ case EXCCODE_MSADIS:
++vcpu->stat.msa_disabled_exits;
trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);