Skip to content

Commit 95e92e4

Browse files
Julien Thierrywilldeacon
authored andcommitted
KVM: arm64: pmu: Make overflow handler NMI safe
kvm_vcpu_kick() is not NMI safe. When the overflow handler is called from NMI context, defer waking the vcpu to an irq_work queue. A vcpu can be freed while it's not running by kvm_destroy_vm(). Prevent running the irq_work for a non-existent vcpu by calling irq_work_sync() on the PMU destroy path. [Alexandru E.: Added irq_work_sync()] Signed-off-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Tested-by: Sumit Garg <sumit.garg@linaro.org> (Developerbox) Cc: Julien Thierry <julien.thierry.kdev@gmail.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Suzuki K Pouloze <suzuki.poulose@arm.com> Cc: kvm@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Link: https://lore.kernel.org/r/20200924110706.254996-6-alexandru.elisei@arm.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 05ab728 commit 95e92e4

2 files changed

Lines changed: 26 additions & 1 deletion

File tree

arch/arm64/kvm/pmu-emul.c

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
269269

270270
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
271271
kvm_pmu_release_perf_event(&pmu->pmc[i]);
272+
irq_work_sync(&vcpu->arch.pmu.overflow_work);
272273
}
273274

274275
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
@@ -433,6 +434,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
433434
kvm_pmu_update_state(vcpu);
434435
}
435436

437+
/**
438+
* When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
439+
* to the event.
440+
* This is why we need a callback to do it once outside of the NMI context.
441+
*/
442+
static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
443+
{
444+
struct kvm_vcpu *vcpu;
445+
struct kvm_pmu *pmu;
446+
447+
pmu = container_of(work, struct kvm_pmu, overflow_work);
448+
vcpu = kvm_pmc_to_vcpu(pmu->pmc);
449+
450+
kvm_vcpu_kick(vcpu);
451+
}
452+
436453
/**
437454
* When the perf event overflows, set the overflow status and inform the vcpu.
438455
*/
@@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
465482

466483
if (kvm_pmu_overflow_status(vcpu)) {
467484
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
468-
kvm_vcpu_kick(vcpu);
485+
486+
if (!in_nmi())
487+
kvm_vcpu_kick(vcpu);
488+
else
489+
irq_work_queue(&vcpu->arch.pmu.overflow_work);
469490
}
470491

471492
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
@@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
764785
return ret;
765786
}
766787

788+
init_irq_work(&vcpu->arch.pmu.overflow_work,
789+
kvm_pmu_perf_overflow_notify_vcpu);
790+
767791
vcpu->arch.pmu.created = true;
768792
return 0;
769793
}

include/kvm/arm_pmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ struct kvm_pmu {
2727
bool ready;
2828
bool created;
2929
bool irq_level;
30+
struct irq_work overflow_work;
3031
};
3132

3233
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)

0 commit comments

Comments
 (0)