PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
-static u32 pt_cap_get(enum pt_capabilities cap)
+u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
{
struct pt_cap_desc *cd = &pt_caps[cap];
u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
return (c & cd->mask) >> shift;
}
+EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
static ssize_t pt_cap_show(struct device *cdev,
struct device_attribute *attr,
container_of(attr, struct dev_ext_attribute, attr);
enum pt_capabilities cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
}
static struct attribute_group pt_cap_group __ro_after_init = {
return false;
if (config & RTIT_CTL_CYC_PSB) {
- if (!pt_cap_get(PT_CAP_psb_cyc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
return false;
- allowed = pt_cap_get(PT_CAP_psb_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
requested = (config & RTIT_CTL_PSB_FREQ) >>
RTIT_CTL_PSB_FREQ_OFFSET;
if (requested && (!(allowed & BIT(requested))))
return false;
- allowed = pt_cap_get(PT_CAP_cycle_thresholds);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
requested = (config & RTIT_CTL_CYC_THRESH) >>
RTIT_CTL_CYC_THRESH_OFFSET;
if (requested && (!(allowed & BIT(requested))))
* Spec says that setting mtc period bits while mtc bit in
* CPUID is 0 will #GP, so better safe than sorry.
*/
- if (!pt_cap_get(PT_CAP_mtc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
return false;
- allowed = pt_cap_get(PT_CAP_mtc_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
if (!allowed)
return false;
}
if (config & RTIT_CTL_PWR_EVT_EN &&
- !pt_cap_get(PT_CAP_power_event_trace))
+ !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
return false;
if (config & RTIT_CTL_PTW) {
- if (!pt_cap_get(PT_CAP_ptwrite))
+ if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
return false;
/* FUPonPTW without PTW doesn't make sense */
* In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
TOPA_ENTRY(topa, 1)->end = 1;
}
topa->offset = last->offset + last->size;
buf->last = topa;
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return;
BUG_ON(last->last != TENTS_PER_PAGE - 1);
static bool topa_table_full(struct topa *topa)
{
/* single-entry ToPA is a special case */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return !!topa->last;
return topa->last == TENTS_PER_PAGE - 1;
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order;
- if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (!buf->snapshot &&
+ !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, -1)->intr = 1;
TOPA_ENTRY(topa, -1)->stop = 1;
}
topa->table[i].intr ? 'I' : ' ',
topa->table[i].stop ? 'S' : ' ',
*(u64 *)&topa->table[i]);
- if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
+ if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
topa->table[i].stop) ||
topa->table[i].end)
break;
* means we are already losing data; need to let the decoder
* know.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
* Also on single-entry ToPA implementations, interrupt will come
* before the output reaches its output region's boundary.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
+ !buf->snapshot &&
pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
void *head = pt_buffer_region(buf);
/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return 0;
/* clear STOP and INT from current entry */
pt_buffer_setup_topa_index(buf);
/* link last table to the first one, unless we're double buffering */
- if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
TOPA_ENTRY(buf->last, -1)->end = 1;
}
struct pt_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
- if (!pt_cap_get(PT_CAP_num_address_ranges))
+ if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return 0;
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
return -EINVAL;
}
- if (++range > pt_cap_get(PT_CAP_num_address_ranges))
+ if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
}
if (ret)
return ret;
- if (!pt_cap_get(PT_CAP_topa_output)) {
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
pr_warn("ToPA output is not supported on this CPU\n");
return -ENODEV;
}
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
pt_pmu.pmu.capabilities =
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pt_pmu.pmu.nr_addr_filters =
- pt_cap_get(PT_CAP_num_address_ranges);
+ intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);