Skip to content

Commit 0fdf1bb

Browse files
mrutland-armwilldeacon
authored andcommitted
arm64: perf: Avoid PMXEV* indirection
Currently we access the counter registers and their respective type registers indirectly. This requires us to write to PMSELR, issue an ISB, then access the relevant PMXEV* registers. This is unfortunate, because: * Under virtualization, accessing one register requires two traps to the hypervisor, even though we could access the register directly with a single trap. * We have to issue an ISB which we could otherwise avoid the cost of. * When we use NMIs, the NMI handler will have to save/restore the select register in case the code it preempted was attempting to access a counter or its type register. We can avoid these issues by directly accessing the relevant registers. This patch adds helpers to do so. In armv8pmu_enable_event() we still need the ISB to prevent the PE from reordering the write to PMINTENSET_EL1 register. If the interrupt is enabled before we disable the counter and the new event is configured, we might get an interrupt triggered by the previously programmed event overflowing, but which we wrongly attribute to the event that we are enabling. Execute an ISB after we disable the counter. In the process, remove the comment that refers to the ARMv7 PMU. [Julien T.: Don't inline read/write functions to avoid big code-size increase, remove unused read_pmevtypern function, fix counter index issue.] [Alexandru E.: Removed comment, removed trailing semicolons in macros, added ISB] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Tested-by: Sumit Garg <sumit.garg@linaro.org> (Developerbox) Cc: Julien Thierry <julien.thierry.kdev@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20200924110706.254996-3-alexandru.elisei@arm.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 490d7b7 commit 0fdf1bb

1 file changed

Lines changed: 85 additions & 14 deletions

File tree

arch/arm64/kernel/perf_event.c

Lines changed: 85 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,73 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
371371
#define ARMV8_IDX_TO_COUNTER(x) \
372372
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
373373

374+
/*
375+
* This code is really good
376+
*/
377+
378+
#define PMEVN_CASE(n, case_macro) \
379+
case n: case_macro(n); break
380+
381+
#define PMEVN_SWITCH(x, case_macro) \
382+
do { \
383+
switch (x) { \
384+
PMEVN_CASE(0, case_macro); \
385+
PMEVN_CASE(1, case_macro); \
386+
PMEVN_CASE(2, case_macro); \
387+
PMEVN_CASE(3, case_macro); \
388+
PMEVN_CASE(4, case_macro); \
389+
PMEVN_CASE(5, case_macro); \
390+
PMEVN_CASE(6, case_macro); \
391+
PMEVN_CASE(7, case_macro); \
392+
PMEVN_CASE(8, case_macro); \
393+
PMEVN_CASE(9, case_macro); \
394+
PMEVN_CASE(10, case_macro); \
395+
PMEVN_CASE(11, case_macro); \
396+
PMEVN_CASE(12, case_macro); \
397+
PMEVN_CASE(13, case_macro); \
398+
PMEVN_CASE(14, case_macro); \
399+
PMEVN_CASE(15, case_macro); \
400+
PMEVN_CASE(16, case_macro); \
401+
PMEVN_CASE(17, case_macro); \
402+
PMEVN_CASE(18, case_macro); \
403+
PMEVN_CASE(19, case_macro); \
404+
PMEVN_CASE(20, case_macro); \
405+
PMEVN_CASE(21, case_macro); \
406+
PMEVN_CASE(22, case_macro); \
407+
PMEVN_CASE(23, case_macro); \
408+
PMEVN_CASE(24, case_macro); \
409+
PMEVN_CASE(25, case_macro); \
410+
PMEVN_CASE(26, case_macro); \
411+
PMEVN_CASE(27, case_macro); \
412+
PMEVN_CASE(28, case_macro); \
413+
PMEVN_CASE(29, case_macro); \
414+
PMEVN_CASE(30, case_macro); \
415+
default: WARN(1, "Invalid PMEV* index\n"); \
416+
} \
417+
} while (0)
418+
419+
#define RETURN_READ_PMEVCNTRN(n) \
420+
return read_sysreg(pmevcntr##n##_el0)
421+
static unsigned long read_pmevcntrn(int n)
422+
{
423+
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
424+
return 0;
425+
}
426+
427+
#define WRITE_PMEVCNTRN(n) \
428+
write_sysreg(val, pmevcntr##n##_el0)
429+
static void write_pmevcntrn(int n, unsigned long val)
430+
{
431+
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
432+
}
433+
434+
#define WRITE_PMEVTYPERN(n) \
435+
write_sysreg(val, pmevtyper##n##_el0)
436+
static void write_pmevtypern(int n, unsigned long val)
437+
{
438+
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
439+
}
440+
374441
static inline u32 armv8pmu_pmcr_read(void)
375442
{
376443
return read_sysreg(pmcr_el0);
@@ -393,17 +460,11 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
393460
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
394461
}
395462

396-
static inline void armv8pmu_select_counter(int idx)
463+
static inline u32 armv8pmu_read_evcntr(int idx)
397464
{
398465
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
399-
write_sysreg(counter, pmselr_el0);
400-
isb();
401-
}
402466

403-
static inline u64 armv8pmu_read_evcntr(int idx)
404-
{
405-
armv8pmu_select_counter(idx);
406-
return read_sysreg(pmxevcntr_el0);
467+
return read_pmevcntrn(counter);
407468
}
408469

409470
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
@@ -471,8 +532,9 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
471532

472533
static inline void armv8pmu_write_evcntr(int idx, u64 value)
473534
{
474-
armv8pmu_select_counter(idx);
475-
write_sysreg(value, pmxevcntr_el0);
535+
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
536+
537+
write_pmevcntrn(counter, value);
476538
}
477539

478540
static inline void armv8pmu_write_hw_counter(struct perf_event *event,
@@ -503,9 +565,10 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
503565

504566
static inline void armv8pmu_write_evtype(int idx, u32 val)
505567
{
506-
armv8pmu_select_counter(idx);
568+
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
569+
507570
val &= ARMV8_PMU_EVTYPE_MASK;
508-
write_sysreg(val, pmxevtyper_el0);
571+
write_pmevtypern(counter, val);
509572
}
510573

511574
static inline void armv8pmu_write_event_type(struct perf_event *event)
@@ -525,7 +588,10 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
525588
armv8pmu_write_evtype(idx - 1, hwc->config_base);
526589
armv8pmu_write_evtype(idx, chain_evt);
527590
} else {
528-
armv8pmu_write_evtype(idx, hwc->config_base);
591+
if (idx == ARMV8_IDX_CYCLE_COUNTER)
592+
write_sysreg(hwc->config_base, pmccfiltr_el0);
593+
else
594+
armv8pmu_write_evtype(idx, hwc->config_base);
529595
}
530596
}
531597

@@ -564,6 +630,11 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
564630
static inline void armv8pmu_disable_counter(u32 mask)
565631
{
566632
write_sysreg(mask, pmcntenclr_el0);
633+
/*
634+
* Make sure the effects of disabling the counter are visible before we
635+
* start configuring the event.
636+
*/
637+
isb();
567638
}
568639

569640
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
@@ -636,7 +707,7 @@ static void armv8pmu_enable_event(struct perf_event *event)
636707
armv8pmu_disable_event_counter(event);
637708

638709
/*
639-
* Set event (if destined for PMNx counters).
710+
* Set event.
640711
*/
641712
armv8pmu_write_event_type(event);
642713

0 commit comments

Comments
 (0)