Skip to content

Commit 29cf0f5

Browse files
Ben Gardonbonzini
authored andcommitted
kvm: x86/mmu: NX largepage recovery for TDP MMU
When KVM maps a largepage backed region at a lower level in order to make it executable (i.e. NX large page shattering), it reduces the TLB performance of that region. In order to avoid making this degradation permanent, KVM must periodically reclaim shattered NX largepages by zapping them and allowing them to be rebuilt in the page fault handler. With this patch, the TDP MMU does not respect KVM's rate limiting on reclaim. It traverses the entire TDP structure every time. This will be addressed in a future patch. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-21-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent daa5b6c commit 29cf0f5

3 files changed

Lines changed: 18 additions & 4 deletions

File tree

arch/x86/kvm/mmu/mmu.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
776776
kvm_mmu_gfn_disallow_lpage(slot, gfn);
777777
}
778778

779-
static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
779+
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
780780
{
781781
if (sp->lpage_disallowed)
782782
return;
@@ -804,7 +804,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
804804
kvm_mmu_gfn_allow_lpage(slot, gfn);
805805
}
806806

807-
static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
807+
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
808808
{
809809
--kvm->stat.nx_lpage_splits;
810810
sp->lpage_disallowed = false;
@@ -5988,8 +5988,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
59885988
struct kvm_mmu_page,
59895989
lpage_disallowed_link);
59905990
WARN_ON_ONCE(!sp->lpage_disallowed);
5991-
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5992-
WARN_ON_ONCE(sp->lpage_disallowed);
5991+
if (sp->tdp_mmu_page)
5992+
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
5993+
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
5994+
else {
5995+
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5996+
WARN_ON_ONCE(sp->lpage_disallowed);
5997+
}
59935998

59945999
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
59956000
kvm_mmu_commit_zap_page(kvm, &invalid_list);

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,4 +143,7 @@ bool is_nx_huge_page_enabled(void);
143143

144144
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
145145

146+
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
147+
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
148+
146149
#endif /* __KVM_X86_MMU_INTERNAL_H */

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
273273

274274
list_del(&sp->link);
275275

276+
if (sp->lpage_disallowed)
277+
unaccount_huge_nx_page(kvm, sp);
278+
276279
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
277280
old_child_spte = READ_ONCE(*(pt + i));
278281
WRITE_ONCE(*(pt + i), 0);
@@ -571,6 +574,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
571574
!shadow_accessed_mask);
572575

573576
trace_kvm_mmu_get_page(sp, true);
577+
if (huge_page_disallowed && req_level >= iter.level)
578+
account_huge_nx_page(vcpu->kvm, sp);
579+
574580
tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
575581
}
576582
}

0 commit comments

Comments
 (0)