Skip to content

Commit f76b130

Browse files
Julien Thierrywilldeacon
authored andcommitted
arm_pmu: Introduce pmu_irq_ops
Currently the PMU interrupt can either be a normal irq or a percpu irq. Supporting NMI will introduce two cases for each existing one. It becomes a mess of 'if's when managing the interrupt. Define sets of callbacks for operations commonly done on the interrupt. The appropriate set of callbacks is selected at interrupt request time and simplifies interrupt enabling/disabling and freeing. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Tested-by: Sumit Garg <sumit.garg@linaro.org> (Developerbox) Cc: Julien Thierry <julien.thierry.kdev@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Link: https://lore.kernel.org/r/20200924110706.254996-7-alexandru.elisei@arm.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 95e92e4 commit f76b130

1 file changed

Lines changed: 74 additions & 16 deletions

File tree

drivers/perf/arm_pmu.c

Lines changed: 74 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,46 @@
2626

2727
#include <asm/irq_regs.h>
2828

29+
static int armpmu_count_irq_users(const int irq);
30+
31+
struct pmu_irq_ops {
32+
void (*enable_pmuirq)(unsigned int irq);
33+
void (*disable_pmuirq)(unsigned int irq);
34+
void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
35+
};
36+
37+
static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
38+
{
39+
free_irq(irq, per_cpu_ptr(devid, cpu));
40+
}
41+
42+
static const struct pmu_irq_ops pmuirq_ops = {
43+
.enable_pmuirq = enable_irq,
44+
.disable_pmuirq = disable_irq_nosync,
45+
.free_pmuirq = armpmu_free_pmuirq
46+
};
47+
48+
static void armpmu_enable_percpu_pmuirq(unsigned int irq)
49+
{
50+
enable_percpu_irq(irq, IRQ_TYPE_NONE);
51+
}
52+
53+
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
54+
void __percpu *devid)
55+
{
56+
if (armpmu_count_irq_users(irq) == 1)
57+
free_percpu_irq(irq, devid);
58+
}
59+
60+
static const struct pmu_irq_ops percpu_pmuirq_ops = {
61+
.enable_pmuirq = armpmu_enable_percpu_pmuirq,
62+
.disable_pmuirq = disable_percpu_irq,
63+
.free_pmuirq = armpmu_free_percpu_pmuirq
64+
};
65+
2966
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
3067
static DEFINE_PER_CPU(int, cpu_irq);
68+
static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
3169

3270
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
3371
{
@@ -544,25 +582,42 @@ static int armpmu_count_irq_users(const int irq)
544582
return count;
545583
}
546584

585+
static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
586+
{
587+
const struct pmu_irq_ops *ops = NULL;
588+
int cpu;
589+
590+
for_each_possible_cpu(cpu) {
591+
if (per_cpu(cpu_irq, cpu) != irq)
592+
continue;
593+
594+
ops = per_cpu(cpu_irq_ops, cpu);
595+
if (ops)
596+
break;
597+
}
598+
599+
return ops;
600+
}
601+
547602
void armpmu_free_irq(int irq, int cpu)
548603
{
549604
if (per_cpu(cpu_irq, cpu) == 0)
550605
return;
551606
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
552607
return;
553608

554-
if (!irq_is_percpu_devid(irq))
555-
free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
556-
else if (armpmu_count_irq_users(irq) == 1)
557-
free_percpu_irq(irq, &cpu_armpmu);
609+
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
558610

559611
per_cpu(cpu_irq, cpu) = 0;
612+
per_cpu(cpu_irq_ops, cpu) = NULL;
560613
}
561614

562615
int armpmu_request_irq(int irq, int cpu)
563616
{
564617
int err = 0;
565618
const irq_handler_t handler = armpmu_dispatch_irq;
619+
const struct pmu_irq_ops *irq_ops;
620+
566621
if (!irq)
567622
return 0;
568623

@@ -584,15 +639,26 @@ int armpmu_request_irq(int irq, int cpu)
584639
irq_set_status_flags(irq, IRQ_NOAUTOEN);
585640
err = request_irq(irq, handler, irq_flags, "arm-pmu",
586641
per_cpu_ptr(&cpu_armpmu, cpu));
642+
643+
irq_ops = &pmuirq_ops;
587644
} else if (armpmu_count_irq_users(irq) == 0) {
588645
err = request_percpu_irq(irq, handler, "arm-pmu",
589646
&cpu_armpmu);
647+
648+
irq_ops = &percpu_pmuirq_ops;
649+
} else {
650+
/* Per cpudevid irq was already requested by another CPU */
651+
irq_ops = armpmu_find_irq_ops(irq);
652+
653+
if (WARN_ON(!irq_ops))
654+
err = -EINVAL;
590655
}
591656

592657
if (err)
593658
goto err_out;
594659

595660
per_cpu(cpu_irq, cpu) = irq;
661+
per_cpu(cpu_irq_ops, cpu) = irq_ops;
596662
return 0;
597663

598664
err_out:
@@ -625,12 +691,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
625691
per_cpu(cpu_armpmu, cpu) = pmu;
626692

627693
irq = armpmu_get_cpu_irq(pmu, cpu);
628-
if (irq) {
629-
if (irq_is_percpu_devid(irq))
630-
enable_percpu_irq(irq, IRQ_TYPE_NONE);
631-
else
632-
enable_irq(irq);
633-
}
694+
if (irq)
695+
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
634696

635697
return 0;
636698
}
@@ -644,12 +706,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
644706
return 0;
645707

646708
irq = armpmu_get_cpu_irq(pmu, cpu);
647-
if (irq) {
648-
if (irq_is_percpu_devid(irq))
649-
disable_percpu_irq(irq);
650-
else
651-
disable_irq_nosync(irq);
652-
}
709+
if (irq)
710+
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
653711

654712
per_cpu(cpu_armpmu, cpu) = NULL;
655713

0 commit comments

Comments
 (0)