Skip to content

Commit 95fb5b0

Browse files
Ben Gardonbonzini
authored andcommitted
kvm: x86/mmu: Support MMIO in the TDP MMU
In order to support MMIO, KVM must be able to walk the TDP paging structures to find mappings for a given GFN. Support this walk for the TDP MMU. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 v2: Thanks to Dan Carpenter and kernel test robot for finding that root was used uninitialized in get_mmio_spte. Signed-off-by: Ben Gardon <bgardon@google.com> Reported-by: kernel test robot <lkp@intel.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Message-Id: <20201014182700.2888246-19-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 46044f7 commit 95fb5b0

3 files changed

Lines changed: 72 additions & 21 deletions

File tree

arch/x86/kvm/mmu/mmu.c

Lines changed: 49 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3479,54 +3479,82 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
34793479
return vcpu_match_mmio_gva(vcpu, addr);
34803480
}
34813481

3482-
/* return true if reserved bit is detected on spte. */
3483-
static bool
3484-
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3482+
/*
3483+
* Return the level of the lowest level SPTE added to sptes.
3484+
* That SPTE may be non-present.
3485+
*/
3486+
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
34853487
{
34863488
struct kvm_shadow_walk_iterator iterator;
3487-
u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
3488-
struct rsvd_bits_validate *rsvd_check;
3489-
int root, leaf;
3490-
bool reserved = false;
3489+
int leaf = vcpu->arch.mmu->root_level;
3490+
u64 spte;
34913491

3492-
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
34933492

34943493
walk_shadow_page_lockless_begin(vcpu);
34953494

3496-
for (shadow_walk_init(&iterator, vcpu, addr),
3497-
leaf = root = iterator.level;
3495+
for (shadow_walk_init(&iterator, vcpu, addr);
34983496
shadow_walk_okay(&iterator);
34993497
__shadow_walk_next(&iterator, spte)) {
3498+
leaf = iterator.level;
35003499
spte = mmu_spte_get_lockless(iterator.sptep);
35013500

35023501
sptes[leaf - 1] = spte;
3503-
leaf--;
35043502

35053503
if (!is_shadow_present_pte(spte))
35063504
break;
35073505

3506+
}
3507+
3508+
walk_shadow_page_lockless_end(vcpu);
3509+
3510+
return leaf;
3511+
}
3512+
3513+
/* return true if reserved bit is detected on spte. */
3514+
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3515+
{
3516+
u64 sptes[PT64_ROOT_MAX_LEVEL];
3517+
struct rsvd_bits_validate *rsvd_check;
3518+
int root = vcpu->arch.mmu->root_level;
3519+
int leaf;
3520+
int level;
3521+
bool reserved = false;
3522+
3523+
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
3524+
*sptep = 0ull;
3525+
return reserved;
3526+
}
3527+
3528+
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3529+
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
3530+
else
3531+
leaf = get_walk(vcpu, addr, sptes);
3532+
3533+
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3534+
3535+
for (level = root; level >= leaf; level--) {
3536+
if (!is_shadow_present_pte(sptes[level - 1]))
3537+
break;
35083538
/*
35093539
* Use a bitwise-OR instead of a logical-OR to aggregate the
35103540
* reserved bit and EPT's invalid memtype/XWR checks to avoid
35113541
* adding a Jcc in the loop.
35123542
*/
3513-
reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
3514-
__is_rsvd_bits_set(rsvd_check, spte, iterator.level);
3543+
reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
3544+
__is_rsvd_bits_set(rsvd_check, sptes[level - 1],
3545+
level);
35153546
}
35163547

3517-
walk_shadow_page_lockless_end(vcpu);
3518-
35193548
if (reserved) {
35203549
pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
35213550
__func__, addr);
3522-
while (root > leaf) {
3551+
for (level = root; level >= leaf; level--)
35233552
pr_err("------ spte 0x%llx level %d.\n",
3524-
sptes[root - 1], root);
3525-
root--;
3526-
}
3553+
sptes[level - 1], level);
35273554
}
35283555

3529-
*sptep = spte;
3556+
*sptep = sptes[leaf - 1];
3557+
35303558
return reserved;
35313559
}
35323560

@@ -3538,7 +3566,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
35383566
if (mmio_info_in_cache(vcpu, addr, direct))
35393567
return RET_PF_EMULATE;
35403568

3541-
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3569+
reserved = get_mmio_spte(vcpu, addr, &spte);
35423570
if (WARN_ON(reserved))
35433571
return -EINVAL;
35443572

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@
77
#include "tdp_mmu.h"
88
#include "spte.h"
99

10+
#ifdef CONFIG_X86_64
1011
static bool __read_mostly tdp_mmu_enabled = false;
12+
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
13+
#endif
1114

1215
static bool is_tdp_mmu_enabled(void)
1316
{
@@ -1128,3 +1131,21 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
11281131
return spte_set;
11291132
}
11301133

1134+
/*
1135+
* Return the level of the lowest level SPTE added to sptes.
1136+
* That SPTE may be non-present.
1137+
*/
1138+
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
1139+
{
1140+
struct tdp_iter iter;
1141+
struct kvm_mmu *mmu = vcpu->arch.mmu;
1142+
int leaf = vcpu->arch.mmu->shadow_root_level;
1143+
gfn_t gfn = addr >> PAGE_SHIFT;
1144+
1145+
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1146+
leaf = iter.level;
1147+
sptes[leaf - 1] = iter.old_spte;
1148+
}
1149+
1150+
return leaf;
1151+
}

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,4 +43,6 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
4343

4444
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
4545
struct kvm_memory_slot *slot, gfn_t gfn);
46+
47+
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
4648
#endif /* __KVM_X86_MMU_TDP_MMU_H */

0 commit comments

Comments
 (0)