changes since Xen 4.6.5: mostly bug fixes, including security fixes for XSA206, XSA211 to XSA244. PKGREVISION set to 1 to account for the fact that it's not a stock Xen 4.6.6. Note that, unlike upstream, pv-linear-pt defaults to true, so that NetBSD PV guests (including dom0) will continue to boot without changes to boot.cfg
110 lines
3.4 KiB
Text
110 lines
3.4 KiB
Text
$NetBSD: patch-XSA231,v 1.1 2017/10/17 10:57:34 bouyer Exp $
|
|
|
|
From: George Dunlap <george.dunlap@citrix.com>
|
|
Subject: xen/mm: make sure node is less than MAX_NUMNODES
|
|
|
|
The output of MEMF_get_node(memflags) can be as large as nodeid_t can
|
|
hold (currently 255). This is then used as an index to arrays of size
|
|
MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
|
|
untrusted guest (via memory_exchange and increase_reservation) and is
|
|
not currently bounds-checked.
|
|
|
|
Check the value in page_alloc.c before using it, and also check the
|
|
value in the hypercall call sites and return -EINVAL if appropriate.
|
|
Don't permit domains other than the hardware or control domain to
|
|
allocate node-constrained memory.
|
|
|
|
This is XSA-231.
|
|
|
|
Reported-by: Matthew Daley <mattd@bugfuzz.com>
|
|
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
|
--- xen/common/memory.c.orig
|
|
+++ xen/common/memory.c
|
|
@@ -390,6 +390,31 @@ static void decrease_reservation(struct
|
|
a->nr_done = i;
|
|
}
|
|
|
|
+static bool_t propagate_node(unsigned int xmf, unsigned int *memflags)
|
|
+{
|
|
+ const struct domain *currd = current->domain;
|
|
+
|
|
+ BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
|
|
+ BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
|
|
+
|
|
+ if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
|
|
+ return 1;
|
|
+
|
|
+ if ( is_hardware_domain(currd) || is_control_domain(currd) )
|
|
+ {
|
|
+ if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
|
|
+ return 0;
|
|
+
|
|
+ *memflags |= MEMF_node(XENMEMF_get_node(xmf));
|
|
+ if ( xmf & XENMEMF_exact_node_request )
|
|
+ *memflags |= MEMF_exact_node;
|
|
+ }
|
|
+ else if ( xmf & XENMEMF_exact_node_request )
|
|
+ return 0;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
|
|
{
|
|
struct xen_memory_exchange exch;
|
|
@@ -462,6 +487,12 @@ static long memory_exchange(XEN_GUEST_HA
|
|
}
|
|
}
|
|
|
|
+ if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
|
|
+ {
|
|
+ rc = -EINVAL;
|
|
+ goto fail_early;
|
|
+ }
|
|
+
|
|
d = rcu_lock_domain_by_any_id(exch.in.domid);
|
|
if ( d == NULL )
|
|
{
|
|
@@ -480,7 +511,6 @@ static long memory_exchange(XEN_GUEST_HA
|
|
d,
|
|
XENMEMF_get_address_bits(exch.out.mem_flags) ? :
|
|
(BITS_PER_LONG+PAGE_SHIFT)));
|
|
- memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
|
|
|
|
for ( i = (exch.nr_exchanged >> in_chunk_order);
|
|
i < (exch.in.nr_extents >> in_chunk_order);
|
|
@@ -834,12 +864,8 @@ static int construct_memop_from_reservat
|
|
}
|
|
read_unlock(&d->vnuma_rwlock);
|
|
}
|
|
- else
|
|
- {
|
|
- a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
|
|
- if ( r->mem_flags & XENMEMF_exact_node_request )
|
|
- a->memflags |= MEMF_exact_node;
|
|
- }
|
|
+ else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
|
|
+ return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
--- xen/common/page_alloc.c.orig
|
|
+++ xen/common/page_alloc.c
|
|
@@ -711,9 +711,13 @@ static struct page_info *alloc_heap_page
|
|
if ( node >= MAX_NUMNODES )
|
|
node = cpu_to_node(smp_processor_id());
|
|
}
|
|
+ else if ( unlikely(node >= MAX_NUMNODES) )
|
|
+ {
|
|
+ ASSERT_UNREACHABLE();
|
|
+ return NULL;
|
|
+ }
|
|
first_node = node;
|
|
|
|
- ASSERT(node < MAX_NUMNODES);
|
|
ASSERT(zone_lo <= zone_hi);
|
|
ASSERT(zone_hi < NR_ZONES);
|
|
|