KVM: PPC: Use ccr field in pt_regs struct embedded in vcpu struct
authorPaul Mackerras <paulus@ozlabs.org>
Mon, 8 Oct 2018 05:30:58 +0000 (16:30 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 9 Oct 2018 05:04:27 +0000 (16:04 +1100)
When the 'regs' field was added to struct kvm_vcpu_arch, the code
was changed to use several of the fields inside regs (e.g., gpr, lr,
etc.) but not the ccr field, because the ccr field in struct pt_regs
is 64 bits on 64-bit platforms, but the cr field in kvm_vcpu_arch is
only 32 bits.  This changes the code to use the regs.ccr field
instead of cr, and changes the assembly code on 64-bit platforms to
use 64-bit loads and stores instead of 32-bit ones.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
13 files changed:
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_booke.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_hv_tm.c
arch/powerpc/kvm/book3s_hv_tm_builtin.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/bookehv_interrupts.S
arch/powerpc/kvm/emulate_loadstore.c

index 83a9aa3cf689172648d234da87ec7d4e95add69d..dd18d8174504f5550538a6536aff04f7f6cd38ea 100644 (file)
@@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-       vcpu->arch.cr = val;
+       vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.cr;
+       return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
index af25aaa24090e5a39c6d2c481c318929b3eae975..5c0e2d9a7e15149dd1354fd75036a7d7e73daabf 100644 (file)
@@ -483,7 +483,7 @@ static inline u64 sanitize_msr(u64 msr)
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr  = vcpu->arch.cr_tm;
+       vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
        vcpu->arch.regs.link  = vcpu->arch.lr_tm;
        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
@@ -500,7 +500,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 
 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr_tm  = vcpu->arch.cr;
+       vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
        vcpu->arch.lr_tm  = vcpu->arch.regs.link;
        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
index d513e3ed1c659c711d4a68e5db5924f720c66833..f0cef625f17ce0afcb1d08510d27a43fce77f6fc 100644 (file)
@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-       vcpu->arch.cr = val;
+       vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.cr;
+       return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
index a3d4f61a409d89f5f0e209fe68daed173b8e14c8..c9cc42f73b3c57b92a74a8214582266d4917c270 100644 (file)
@@ -538,8 +538,6 @@ struct kvm_vcpu_arch {
        ulong tar;
 #endif
 
-       u32 cr;
-
 #ifdef CONFIG_PPC_BOOK3S
        ulong hflags;
        ulong guest_owned_ext;
index 89cf15566c4e80ba4e81d500e1d86f204e1241b2..7c3738d890e8b21d93581c09ce46620afe88cef9 100644 (file)
@@ -438,7 +438,7 @@ int main(void)
 #ifdef CONFIG_PPC_BOOK3S
        OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
 #endif
-       OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+       OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
        OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
@@ -695,7 +695,7 @@ int main(void)
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #else /* CONFIG_PPC_BOOK3S */
-       OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+       OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
        OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
        OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
        OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
index 36b11c5a0dbb968444da85b230cbc7020f69f583..2654df220d05487cfd27394b0bb8360e2ab11c20 100644 (file)
@@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
        vcpu->arch.tar_tm = vcpu->arch.tar;
        vcpu->arch.lr_tm = vcpu->arch.regs.link;
-       vcpu->arch.cr_tm = vcpu->arch.cr;
+       vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
        vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
 }
@@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
        vcpu->arch.tar = vcpu->arch.tar_tm;
        vcpu->arch.regs.link = vcpu->arch.lr_tm;
-       vcpu->arch.cr = vcpu->arch.cr_tm;
+       vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
        vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
 }
@@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
        uint64_t texasr;
 
        /* CR0 = 0 | MSR[TS] | 0 */
-       vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
                (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
                 << CR0_SHIFT);
 
@@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
        tm_abort(ra_val);
 
        /* CR0 = 0 | MSR[TS] | 0 */
-       vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
                (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
                 << CR0_SHIFT);
 
@@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                        if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
                                preempt_disable();
-                               vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
-                                 (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
+                               vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
+                                 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
 
                                vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
                                        (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
index deaca0135abad5ff5c33c8136d9d85a09f9c741a..3d99f4f410f982e58eb43574eaa0e0f037a4476f 100644 (file)
@@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
               vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
        pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
               vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
-       pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
-              vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+       pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
+              vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
        pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
        pr_err("fault dar = %.16lx dsisr = %.8x\n",
               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
index ea84696b0a3cbc3d6b181700a3ba27ca6257e647..9cea23866378b69a0be0485d45f3e7b637eaa4a4 100644 (file)
@@ -1092,7 +1092,7 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        ld      r5, VCPU_LR(r4)
-       lwz     r6, VCPU_CR(r4)
+       l     r6, VCPU_CR(r4)
        mtlr    r5
        mtcr    r6
 
@@ -1280,7 +1280,7 @@ kvmppc_interrupt_hv:
        std     r3, VCPU_GPR(R12)(r9)
        /* CR is in the high half of r12 */
        srdi    r4, r12, 32
-       stw     r4, VCPU_CR(r9)
+       std     r4, VCPU_CR(r9)
 BEGIN_FTR_SECTION
        ld      r3, HSTATE_CFAR(r13)
        std     r3, VCPU_CFAR(r9)
index 008285058f9b554616cb52fdc53a85843826ae23..888e2609e3f156322cac1ac0bde0529db7193359 100644 (file)
@@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                        return RESUME_GUEST;
                }
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
                /* L=1 => tresume, L=0 => tsuspend */
                if (instr & (1 << 21)) {
@@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                copy_from_checkpoint(vcpu);
 
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
                vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
                return RESUME_GUEST;
@@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                copy_to_checkpoint(vcpu);
 
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
                vcpu->arch.shregs.msr = msr | MSR_TS_S;
                return RESUME_GUEST;
index b2c7c6fca4f96e5a315371e39ec9d25dc220da8d..3cf5863bc06e8513d5cb7d359f401846ba07aab5 100644 (file)
@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
                if (instr & (1 << 21))
                        vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                /* Set CR0 to 0b0010 */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+                       0x20000000;
                return 1;
        }
 
@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
        vcpu->arch.shregs.msr &= ~MSR_TS_MASK;  /* go to N state */
        vcpu->arch.regs.nip = vcpu->arch.tfhar;
        copy_from_checkpoint(vcpu);
-       vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
 }
index 059683e4e67a2ed6b1601dccb1dc9ab6a14460a8..4efd65d9e8280622ef29e24b0fff6f95eddff58a 100644 (file)
@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
        svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
        svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
        svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
-       svcpu->cr  = vcpu->arch.cr;
+       svcpu->cr  = vcpu->arch.regs.ccr;
        svcpu->xer = vcpu->arch.regs.xer;
        svcpu->ctr = vcpu->arch.regs.ctr;
        svcpu->lr  = vcpu->arch.regs.link;
@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
        vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
        vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
        vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
-       vcpu->arch.cr  = svcpu->cr;
+       vcpu->arch.regs.ccr  = svcpu->cr;
        vcpu->arch.regs.xer = svcpu->xer;
        vcpu->arch.regs.ctr = svcpu->ctr;
        vcpu->arch.regs.link  = svcpu->lr;
index 81bd8a07aa51f0c393cab9081e77bd316b1f6536..051af7d973275ae9294e60f62d45f7932186cdce 100644 (file)
         */
        PPC_LL  r4, PACACURRENT(r13)
        PPC_LL  r4, (THREAD + THREAD_KVM_VCPU)(r4)
-       stw     r10, VCPU_CR(r4)
+       PPC_STL r10, VCPU_CR(r4)
        PPC_STL r11, VCPU_GPR(R4)(r4)
        PPC_STL r5, VCPU_GPR(R5)(r4)
        PPC_STL r6, VCPU_GPR(R6)(r4)
@@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
        PPC_STL r4, VCPU_GPR(R4)(r11)
        PPC_LL  r4, THREAD_NORMSAVE(0)(r10)
        PPC_STL r5, VCPU_GPR(R5)(r11)
-       stw     r13, VCPU_CR(r11)
+       PPC_STL r13, VCPU_CR(r11)
        mfspr   r5, \srr0
        PPC_STL r3, VCPU_GPR(R10)(r11)
        PPC_LL  r3, THREAD_NORMSAVE(2)(r10)
@@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
        PPC_STL r4, VCPU_GPR(R4)(r11)
        PPC_LL  r4, GPR9(r8)
        PPC_STL r5, VCPU_GPR(R5)(r11)
-       stw     r9, VCPU_CR(r11)
+       PPC_STL r9, VCPU_CR(r11)
        mfspr   r5, \srr0
        PPC_STL r3, VCPU_GPR(R8)(r11)
        PPC_LL  r3, GPR10(r8)
@@ -643,7 +643,7 @@ lightweight_exit:
        PPC_LL  r3, VCPU_LR(r4)
        PPC_LL  r5, VCPU_XER(r4)
        PPC_LL  r6, VCPU_CTR(r4)
-       lwz     r7, VCPU_CR(r4)
+       PPC_LL  r7, VCPU_CR(r4)
        PPC_LL  r8, VCPU_PC(r4)
        PPC_LD(r9, VCPU_SHARED_MSR, r11)
        PPC_LL  r0, VCPU_GPR(R0)(r4)
index 75dce1ef3bc83473e23689ded4c7bfdf72637a7e..f91b1309a0a861688c9c794f691eacaf2e8351c2 100644 (file)
@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 
        emulated = EMULATE_FAIL;
        vcpu->arch.regs.msr = vcpu->arch.shared->msr;
-       vcpu->arch.regs.ccr = vcpu->arch.cr;
        if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
                int type = op.type & INSTR_TYPE_MASK;
                int size = GETSIZE(op.type);