KVM: MMU: Fix SMP shadow instantiation race

There is a race where VCPU0 is shadowing a pagetable entry while VCPU1
is updating it, which results in a stale shadow copy.

Fix that by comparing the contents of the cached guest pte with the
current guest pte after write-protecting the guest pagetable.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Marcelo Tosatti 2007-12-11 19:12:27 -05:00 committed by Avi Kivity
parent 1d07543414
commit 7819026eef
2 changed files with 29 additions and 12 deletions

View file

@ -681,7 +681,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned level, unsigned level,
int metaphysical, int metaphysical,
unsigned access, unsigned access,
u64 *parent_pte) u64 *parent_pte,
bool *new_page)
{ {
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
unsigned index; unsigned index;
@ -720,6 +721,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu->mmu.prefetch_page(vcpu, sp); vcpu->mmu.prefetch_page(vcpu, sp);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn); rmap_write_protect(vcpu->kvm, gfn);
if (new_page)
*new_page = 1;
return sp; return sp;
} }
@ -993,7 +996,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
>> PAGE_SHIFT; >> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1, v, level - 1,
1, ACC_ALL, &table[index]); 1, ACC_ALL, &table[index],
NULL);
if (!new_table) { if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n"); pgprintk("nonpaging_map: ENOMEM\n");
return -ENOMEM; return -ENOMEM;
@ -1059,7 +1063,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->mmu.root_hpa = root; vcpu->mmu.root_hpa = root;
@ -1080,7 +1084,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0; root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu), PT32_ROOT_LEVEL, !is_paging(vcpu),
ACC_ALL, NULL); ACC_ALL, NULL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;

View file

@ -65,7 +65,8 @@
struct guest_walker { struct guest_walker {
int level; int level;
gfn_t table_gfn[PT_MAX_FULL_LEVELS]; gfn_t table_gfn[PT_MAX_FULL_LEVELS];
pt_element_t pte; pt_element_t ptes[PT_MAX_FULL_LEVELS];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
unsigned pt_access; unsigned pt_access;
unsigned pte_access; unsigned pte_access;
gfn_t gfn; gfn_t gfn;
@ -150,6 +151,7 @@ walk:
pte_gpa = gfn_to_gpa(table_gfn); pte_gpa = gfn_to_gpa(table_gfn);
pte_gpa += index * sizeof(pt_element_t); pte_gpa += index * sizeof(pt_element_t);
walker->table_gfn[walker->level - 1] = table_gfn; walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
walker->level - 1, table_gfn); walker->level - 1, table_gfn);
@ -180,6 +182,8 @@ walk:
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
if (walker->level == PT_PAGE_TABLE_LEVEL) { if (walker->level == PT_PAGE_TABLE_LEVEL) {
walker->gfn = gpte_to_gfn(pte); walker->gfn = gpte_to_gfn(pte);
break; break;
@ -209,9 +213,9 @@ walk:
goto walk; goto walk;
pte |= PT_DIRTY_MASK; pte |= PT_DIRTY_MASK;
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
walker->ptes[walker->level - 1] = pte;
} }
walker->pte = pte;
walker->pt_access = pt_access; walker->pt_access = pt_access;
walker->pte_access = pte_access; walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n", pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
@ -268,7 +272,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
u64 *shadow_ent; u64 *shadow_ent;
unsigned access = walker->pt_access; unsigned access = walker->pt_access;
if (!is_present_pte(walker->pte)) if (!is_present_pte(walker->ptes[walker->level - 1]))
return NULL; return NULL;
shadow_addr = vcpu->mmu.root_hpa; shadow_addr = vcpu->mmu.root_hpa;
@ -285,6 +289,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
u64 shadow_pte; u64 shadow_pte;
int metaphysical; int metaphysical;
gfn_t table_gfn; gfn_t table_gfn;
bool new_page = 0;
shadow_ent = ((u64 *)__va(shadow_addr)) + index; shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (is_shadow_present_pte(*shadow_ent)) { if (is_shadow_present_pte(*shadow_ent)) {
@ -300,16 +305,23 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (level - 1 == PT_PAGE_TABLE_LEVEL if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) { && walker->level == PT_DIRECTORY_LEVEL) {
metaphysical = 1; metaphysical = 1;
if (!is_dirty_pte(walker->pte)) if (!is_dirty_pte(walker->ptes[level - 1]))
access &= ~ACC_WRITE_MASK; access &= ~ACC_WRITE_MASK;
table_gfn = gpte_to_gfn(walker->pte); table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
} else { } else {
metaphysical = 0; metaphysical = 0;
table_gfn = walker->table_gfn[level - 2]; table_gfn = walker->table_gfn[level - 2];
} }
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
metaphysical, access, metaphysical, access,
shadow_ent); shadow_ent, &new_page);
if (new_page && !metaphysical) {
pt_element_t curr_pte;
kvm_read_guest(vcpu->kvm, walker->pte_gpa[level - 2],
&curr_pte, sizeof(curr_pte));
if (curr_pte != walker->ptes[level - 2])
return NULL;
}
shadow_addr = __pa(shadow_page->spt); shadow_addr = __pa(shadow_page->spt);
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK; | PT_WRITABLE_MASK | PT_USER_MASK;
@ -317,7 +329,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
} }
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
user_fault, write_fault, walker->pte & PT_DIRTY_MASK, user_fault, write_fault,
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
ptwrite, walker->gfn); ptwrite, walker->gfn);
return shadow_ent; return shadow_ent;
@ -382,7 +395,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
/* /*
* mmio: emulate if accessible, otherwise its a guest fault. * mmio: emulate if accessible, otherwise its a guest fault.
*/ */
if (is_io_pte(*shadow_pte)) if (shadow_pte && is_io_pte(*shadow_pte))
return 1; return 1;
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;