KVM: arm64: pmu: Fix chained SW_INCR counters
authorEric Auger <eric.auger@redhat.com>
Fri, 24 Jan 2020 14:25:34 +0000 (15:25 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 28 Jan 2020 12:50:33 +0000 (12:50 +0000)
At the moment a SW_INCR counter always overflows on 32-bit
boundary, independently on whether the n+1th counter is
programmed as CHAIN.

Check whether the SW_INCR counter is a 64b counter and if so,
implement the 64b logic.

Fixes: 80f393a23be6 ("KVM: arm/arm64: Support chained PMU counters")
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200124142535.29386-4-eric.auger@redhat.com
virt/kvm/arm/pmu.c

index 9f605e0b8dd7489ab438ab834618c3a0730ea786..560db62821374ef24a7f84a9a727db4d658166b2 100644 (file)
@@ -477,28 +477,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
  */
 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
 {
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
        int i;
-       u64 type, enable, reg;
-
-       if (val == 0)
-               return;
 
        if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
                return;
 
-       enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+       /* Weed out disabled counters */
+       val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+
        for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+               u64 type, reg;
+
                if (!(val & BIT(i)))
                        continue;
-               type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
-                      & ARMV8_PMU_EVTYPE_EVENT;
-               if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
-                   && (enable & BIT(i))) {
-                       reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+
+               /* PMSWINC only applies to ... SW_INC! */
+               type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+               type &= ARMV8_PMU_EVTYPE_EVENT;
+               if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
+                       continue;
+
+               /* increment this even SW_INC counter */
+               reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+               reg = lower_32_bits(reg);
+               __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+
+               if (reg) /* no overflow on the low part */
+                       continue;
+
+               if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
+                       /* increment the high counter */
+                       reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
                        reg = lower_32_bits(reg);
-                       __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
-                       if (!reg)
-                               __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+                       __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
+                       if (!reg) /* mark overflow on the high counter */
+                               __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
+               } else {
+                       /* mark overflow on low counter */
+                       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
                }
        }
 }