ACPI_EDGE_SENSITIVE, &res[2]);
}
+static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ /* HiSilicon Hip08 Platform */
+ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ { }
+};
+
static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
{
- u32 model = IORT_SMMU_V3_PMCG_GENERIC;
+ u32 model;
+ int idx;
+
+ idx = acpi_match_platform_list(pmcg_plat_info);
+ if (idx >= 0)
+ model = pmcg_plat_info[idx].data;
+ else
+ model = IORT_SMMU_V3_PMCG_GENERIC;
return platform_device_add_data(pdev, &model, sizeof(model));
}
*/
#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#define SMMU_PMCG_PA_SHIFT 12
+#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
+
static int cpuhp_state_num;
struct smmu_pmu {
void __iomem *reg_base;
void __iomem *reloc_base;
u64 counter_mask;
+ u32 options;
bool global_filter;
u32 global_filter_span;
u32 global_filter_sid;
u32 idx = hwc->idx;
u64 new;
- /*
- * We limit the max period to half the max counter value of the counter
- * size, so that even in the case of extreme interrupt latency the
- * counter will (hopefully) not wrap past its initial value.
- */
- new = smmu_pmu->counter_mask >> 1;
+ if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
+ /*
+ * On platforms that require this quirk, if the counter starts
+ * at < half_counter value and wraps, the current logic of
+ * handling the overflow may not work. It is expected that,
+ * those platforms will have full 64 counter bits implemented
+ * so that such a possibility is remote(eg: HiSilicon HIP08).
+ */
+ new = smmu_pmu_counter_get_value(smmu_pmu, idx);
+ } else {
+ /*
+ * We limit the max period to half the max counter value
+ * of the counter size, so that even in the case of extreme
+ * interrupt latency the counter will (hopefully) not wrap
+ * past its initial value.
+ */
+ new = smmu_pmu->counter_mask >> 1;
+ smmu_pmu_counter_set_value(smmu_pmu, idx, new);
+ }
local64_set(&hwc->prev_count, new);
- smmu_pmu_counter_set_value(smmu_pmu, idx, new);
}
static void smmu_pmu_set_event_filter(struct perf_event *event,
smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
}
+static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
+{
+ u32 model;
+
+ model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
+
+ switch (model) {
+ case IORT_SMMU_V3_PMCG_HISI_HIP08:
+ /* HiSilicon Erratum 162001800 */
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+ break;
+ }
+
+ dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
+}
+
static int smmu_pmu_probe(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu;
return -EINVAL;
}
+ smmu_pmu_get_acpi_options(smmu_pmu);
+
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,