Skip to content

Commit 46044f7

Browse files
Ben Gardonbonzini
authored andcommitted
kvm: x86/mmu: Support write protection for nesting in tdp MMU
To support nested virtualization, KVM will sometimes need to write protect pages which are part of a shadowed paging structure or are not writable in the shadowed paging structure. Add a function to write protect GFN mappings for this purpose. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-18-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 1488199 commit 46044f7

3 files changed

Lines changed: 57 additions & 0 deletions

File tree

arch/x86/kvm/mmu/mmu.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1299,6 +1299,10 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
12991299
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
13001300
}
13011301

1302+
if (kvm->arch.tdp_mmu_enabled)
1303+
write_protected |=
1304+
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1305+
13021306
return write_protected;
13031307
}
13041308

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,3 +1078,53 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
10781078
kvm_mmu_put_root(kvm, root);
10791079
}
10801080
}
1081+
1082+
/*
1083+
* Removes write access on the last level SPTE mapping this GFN and unsets the
1084+
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1085+
* Returns true if an SPTE was set and a TLB flush is needed.
1086+
*/
1087+
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1088+
gfn_t gfn)
1089+
{
1090+
struct tdp_iter iter;
1091+
u64 new_spte;
1092+
bool spte_set = false;
1093+
1094+
tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1095+
if (!is_writable_pte(iter.old_spte))
1096+
break;
1097+
1098+
new_spte = iter.old_spte &
1099+
~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
1100+
1101+
tdp_mmu_set_spte(kvm, &iter, new_spte);
1102+
spte_set = true;
1103+
}
1104+
1105+
return spte_set;
1106+
}
1107+
1108+
/*
1109+
* Removes write access on the last level SPTE mapping this GFN and unsets the
1110+
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1111+
* Returns true if an SPTE was set and a TLB flush is needed.
1112+
*/
1113+
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1114+
struct kvm_memory_slot *slot, gfn_t gfn)
1115+
{
1116+
struct kvm_mmu_page *root;
1117+
int root_as_id;
1118+
bool spte_set = false;
1119+
1120+
lockdep_assert_held(&kvm->mmu_lock);
1121+
for_each_tdp_mmu_root(kvm, root) {
1122+
root_as_id = kvm_mmu_page_as_id(root);
1123+
if (root_as_id != slot->as_id)
1124+
continue;
1125+
1126+
spte_set |= write_protect_gfn(kvm, root, gfn);
1127+
}
1128+
return spte_set;
1129+
}
1130+

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
4040
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
4141
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
4242
const struct kvm_memory_slot *slot);
43+
44+
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
45+
struct kvm_memory_slot *slot, gfn_t gfn);
4346
#endif /* __KVM_X86_MMU_TDP_MMU_H */

0 commit comments

Comments
 (0)