pkgsrc/sysutils/xenkernel46/patches/patch-XSA247
bouyer f73734a7f4 Apply patches from upstream, fixing security issues XSA246 up to XSA251.
Also update patch-XSA240 from upstream, fixing issues in linear page table
handling introduced by the original XSA240 patch.
Bump PKGREVISION
2017-12-15 14:00:44 +00:00

286 lines
10 KiB
Text

$NetBSD: patch-XSA247,v 1.1 2017/12/15 14:00:44 bouyer Exp $
From 6208d2d761ca4cec3560322222532c4a5ba1b375 Mon Sep 17 00:00:00 2001
From: George Dunlap <george.dunlap@citrix.com>
Date: Fri, 10 Nov 2017 16:53:54 +0000
Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
worked
The PoD zero-check functions speculatively remove memory from the p2m,
then check to see if it's completely zeroed, before putting it in the
cache.
Unfortunately, the p2m_set_entry() calls may fail if the underlying
pagetable structure needs to change and the domain has exhausted its
p2m memory pool: for instance, if we're removing a 2MiB region out of
a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
case); and the return value is not checked.
The underlying mfn will then be added into the PoD cache, and at some
point mapped into another location in the p2m. If the guest
afterwards ballons out this memory, it will be freed to the hypervisor
and potentially reused by another domain, in spite of the fact that
the original domain still has writable mappings to it.
There are several places where p2m_set_entry() shouldn't be able to
fail, as it is guaranteed to write an entry of the same order that
succeeded before. Add a backstop of crashing the domain just in case,
and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
builds.
While we're here, use PAGE_ORDER_2M rather than a magic constant.
This is part of XSA-247.
Reported-by: George Dunlap <george.dunlap.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
v4:
- Removed some training whitespace
v3:
- Reformat reset clause to be more compact
- Make sure to set map[i] = NULL when unmapping in case we need to bail
v2:
- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
---
xen/arch/x86/mm/p2m-pod.c | 76 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 60 insertions(+), 16 deletions(-)
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 519b80cc3d..b1f0abe02d 100644
--- xen/arch/x86/mm/p2m-pod.c.orig
+++ xen/arch/x86/mm/p2m-pod.c
@@ -729,8 +729,9 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
}
/* Try to remove the page, restoring old mapping if it fails. */
- p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
- p2m_populate_on_demand, p2m->default_access);
+ if ( p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
+ p2m_populate_on_demand, p2m->default_access) )
+ goto out;
/* Make none of the MFNs are used elsewhere... for example, mapped
* via the grant table interface, or by qemu. Allow one refcount for
@@ -786,9 +787,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
ret = SUPERPAGE_PAGES;
out_reset:
- if ( reset )
- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
-
+ /*
+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
+ * on the same gfn succeeded above. If that turns out to be false, crashing
+ * the domain should be the safest way of making sure we don't leak memory.
+ */
+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
+ type0, p2m->default_access) )
+ {
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+ }
+
out:
gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
return ret;
@@ -845,19 +855,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
}
/* Try to remove the page, restoring old mapping if it fails. */
- p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
- p2m_populate_on_demand, p2m->default_access);
+ if ( p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ p2m_populate_on_demand, p2m->default_access) )
+ goto skip;
/* See if the page was successfully unmapped. (Allow one refcount
* for being allocated to a domain.) */
if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
{
+ /*
+ * If the previous p2m_set_entry call succeeded, this one shouldn't
+ * be able to fail. If it does, crashing the domain should be safe.
+ */
+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ types[i], p2m->default_access) )
+ {
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+ goto out_unmap;
+ }
+
+ skip:
unmap_domain_page(map[i]);
map[i] = NULL;
- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
- types[i], p2m->default_access);
-
continue;
}
}
@@ -874,12 +895,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
unmap_domain_page(map[i]);
- /* See comment in p2m_pod_zero_check_superpage() re gnttab
- * check timing. */
- if ( j < PAGE_SIZE/sizeof(*map[i]) )
+ map[i] = NULL;
+
+ /*
+ * See comment in p2m_pod_zero_check_superpage() re gnttab
+ * check timing.
+ */
+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
{
- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
- types[i], p2m->default_access);
+ /*
+ * If the previous p2m_set_entry call succeeded, this one shouldn't
+ * be able to fail. If it does, crashing the domain should be safe.
+ */
+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ types[i], p2m->default_access) )
+ {
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+ goto out_unmap;
+ }
}
else
{
@@ -903,7 +937,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
p2m->pod.entry_count++;
}
}
-
+
+ return;
+
+out_unmap:
+ /*
+ * Something went wrong, probably crashing the domain. Unmap
+ * everything and return.
+ */
+ for ( i = 0; i < count; i++ )
+ if ( map[i] )
+ unmap_domain_page(map[i]);
}
#define POD_SWEEP_LIMIT 1024
--
2.15.0
From d65a029d34e3d6157c87ac343dc8eefa1b12818e Mon Sep 17 00:00:00 2001
From: George Dunlap <george.dunlap@citrix.com>
Date: Fri, 10 Nov 2017 16:53:55 +0000
Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
decreasing reservation
If the entire range specified to p2m_pod_decrease_reservation() is marked
populate-on-demand, then it will make a single p2m_set_entry() call,
reducing its PoD entry count.
Unfortunately, in the right circumstances, this p2m_set_entry() call
may fail. It that case, repeated calls to decrease_reservation() may
cause p2m->pod.entry_count to fall below zero, potentially tripping
over BUG_ON()s to the contrary.
Instead, check to see if the entry succeeded, and return false if not.
The caller will then call guest_remove_page() on the gfns, which will
return -EINVAL upon finding no valid memory there to return.
Unfortunately if the order > 0, the entry may have partially changed.
A domain_crash() is probably the safest thing in that case.
Other p2m_set_entry() calls in the same function should be fine,
because they are writing the entry at its current order. Nonetheless,
check the return value and crash if our assumption turns otu to be
wrong.
This is part of XSA-247.
Reported-by: George Dunlap <george.dunlap.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
v2: Crash the domain if we're not sure it's safe (or if we think it
can't happen)
---
xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
1 file changed, 33 insertions(+), 9 deletions(-)
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index b1f0abe02d..9324f16c91 100644
--- xen/arch/x86/mm/p2m-pod.c.orig
+++ xen/arch/x86/mm/p2m-pod.c
@@ -559,11 +559,23 @@ recount:
if ( !nonpod )
{
- /* All PoD: Mark the whole region invalid and tell caller
- * we're done. */
- p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
- p2m->default_access);
- p2m->pod.entry_count-=(1<<order);
+ /*
+ * All PoD: Mark the whole region invalid and tell caller
+ * we're done.
+ */
+ if ( p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
+ p2m->default_access) )
+ {
+ /*
+ * If this fails, we can't tell how much of the range was changed.
+ * Best to crash the domain unless we're sure a partial change is
+ * impossible.
+ */
+ if ( order != 0 )
+ domain_crash(d);
+ goto out_unlock;
+ }
+ p2m->pod.entry_count -= 1UL << order;
BUG_ON(p2m->pod.entry_count < 0);
ret = 1;
goto out_entry_check;
@@ -595,8 +607,14 @@ recount:
mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
if ( t == p2m_populate_on_demand )
{
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
- p2m->default_access);
+ /* This shouldn't be able to fail */
+ if ( p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
+ p2m_invalid, p2m->default_access) )
+ {
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+ goto out_unlock;
+ }
p2m->pod.entry_count--;
BUG_ON(p2m->pod.entry_count < 0);
pod--;
@@ -609,8 +627,14 @@ recount:
page = mfn_to_page(mfn);
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
- p2m->default_access);
+ /* This shouldn't be able to fail */
+ if ( p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
+ p2m_invalid, p2m->default_access) )
+ {
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+ goto out_unlock;
+ }
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, 0);
--
2.15.0