KVM: arm/arm64: Get rid of vcpu->arch.irq_lines
authorChristoffer Dall <christoffer.dall@linaro.org>
Thu, 3 Aug 2017 10:09:05 +0000 (12:09 +0200)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 19 Mar 2018 10:53:10 +0000 (10:53 +0000)
We currently have a separate read-modify-write of the HCR_EL2 on entry
to the guest for the sole purpose of setting the VF and VI bits, if set.
Since this is most rarely the case (only when using userspace IRQ chip
and interrupts are in flight), let's get rid of this operation and
instead modify the bits in the vcpu->arch.hcr[_el2] directly when
needed.

Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/kvm/emulate.c
arch/arm/kvm/hyp/switch.c
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/inject_fault.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c

index 9003bd19cb701852184a7f3ca0edb7743fa47eea..e27caa4b47a1bff6e9644a82e7ad2e2b3a6acb1c 100644 (file)
@@ -92,14 +92,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr = HCR_GUEST_MASK;
 }
 
-static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.hcr;
-}
-
-static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
-{
-       vcpu->arch.hcr = hcr;
+       return (unsigned long *)&vcpu->arch.hcr;
 }
 
 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
index 248b930563e5a68ed870632290b0b0cad7158cb2..6137195ab815a6dc6a4d2c53fbf2e17aa9f0bacc 100644 (file)
@@ -155,9 +155,6 @@ struct kvm_vcpu_arch {
        /* HYP trapping configuration */
        u32 hcr;
 
-       /* Interrupt related fields */
-       u32 irq_lines;          /* IRQ and FIQ levels */
-
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
 
index cdff963f133a189e848929d831a5e88b9fb270d7..fa501bf437f3bb9301b1024e65112d85114cb993 100644 (file)
@@ -174,5 +174,5 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
  */
 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
 {
-       vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
+       *vcpu_hcr(vcpu) |= HCR_VA;
 }
index ae45ae96aac28bbd4395865c83d59664cead4838..e86679daddff9f144ff4e999f298f21a5714b6fa 100644 (file)
@@ -44,7 +44,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
                isb();
        }
 
-       write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
+       write_sysreg(vcpu->arch.hcr, HCR);
        /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
        write_sysreg(HSTR_T(15), HSTR);
        write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
index 4610bc81809705166ccdff2d36a2ba2dd9811696..9ee316b962c8d555533562e3ef060421f9d2b619 100644 (file)
@@ -69,14 +69,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TID3;
 }
 
-static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.hcr_el2;
-}
-
-static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
-{
-       vcpu->arch.hcr_el2 = hcr;
+       return (unsigned long *)&vcpu->arch.hcr_el2;
 }
 
 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
index 618cfee7206a3d37058a40180301428d3587ba47..b027a7f025d43499d969873984f6cb9a142b432a 100644 (file)
@@ -272,9 +272,6 @@ struct kvm_vcpu_arch {
        /* IO related fields */
        struct kvm_decode mmio_decode;
 
-       /* Interrupt related fields */
-       u64 irq_lines;          /* IRQ and FIQ levels */
-
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
index 4117717548b044ed1fec0d6f2acf44f1a642fcb7..80bf38ccc8a4ba21437dc6ec5701784f670d5556 100644 (file)
@@ -178,12 +178,6 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
 
 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
 {
-       u64 val;
-
-       val = read_sysreg(hcr_el2);
-       val |= vcpu->arch.irq_lines;
-       write_sysreg(val, hcr_el2);
-
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                __vgic_v3_restore_state(vcpu);
        else
index 60666a05694490dfc3fc070019cdf64c1794d4d8..c1e179d34e6a844a8e601fcc9699584b46bac4ff 100644 (file)
@@ -167,7 +167,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
 {
        vcpu_set_vsesr(vcpu, esr);
-       vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+       *vcpu_hcr(vcpu) |= HCR_VSE;
 }
 
 /**
index 932e61858c5592e5809cd55308d3814a7a658569..49d13510e9c2829f915327187f13b1ab615a7f20 100644 (file)
@@ -420,7 +420,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  */
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
-       return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
+       bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
+       return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
                && !v->arch.power_off && !v->arch.pause);
 }
 
@@ -814,18 +815,18 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
 {
        int bit_index;
        bool set;
-       unsigned long *ptr;
+       unsigned long *hcr;
 
        if (number == KVM_ARM_IRQ_CPU_IRQ)
                bit_index = __ffs(HCR_VI);
        else /* KVM_ARM_IRQ_CPU_FIQ */
                bit_index = __ffs(HCR_VF);
 
-       ptr = (unsigned long *)&vcpu->arch.irq_lines;
+       hcr = vcpu_hcr(vcpu);
        if (level)
-               set = test_and_set_bit(bit_index, ptr);
+               set = test_and_set_bit(bit_index, hcr);
        else
-               set = test_and_clear_bit(bit_index, ptr);
+               set = test_and_clear_bit(bit_index, hcr);
 
        /*
         * If we didn't change anything, no need to wake up or kick other CPUs
index ec62d1cccab7c92dddb4eea86f2be22f7d4d0adc..9ebff8e530f98b5cc21a3922a4beece4982cd901 100644 (file)
@@ -2035,7 +2035,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  */
 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
 {
-       unsigned long hcr = vcpu_get_hcr(vcpu);
+       unsigned long hcr = *vcpu_hcr(vcpu);
 
        /*
         * If this is the first time we do a S/W operation
@@ -2050,7 +2050,7 @@ void kvm_set_way_flush(struct kvm_vcpu *vcpu)
                trace_kvm_set_way_flush(*vcpu_pc(vcpu),
                                        vcpu_has_cache_enabled(vcpu));
                stage2_flush_vm(vcpu->kvm);
-               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+               *vcpu_hcr(vcpu) = hcr | HCR_TVM;
        }
 }
 
@@ -2068,7 +2068,7 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
 
        /* Caches are now on, stop trapping VM ops (until a S/W op) */
        if (now_enabled)
-               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+               *vcpu_hcr(vcpu) &= ~HCR_TVM;
 
        trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
 }