pkgsrc/sysutils/xenkernel46/patches/patch-XSA241
bouyer f73734a7f4 Apply patches from upstream, fixing security issues XSA246 up to XSA251.
Also update patch-XSA240 from upstream, fixing issues in linear page table
handling introduced by the original XSA240 patch.
Bump PKGREVISION
2017-12-15 14:00:44 +00:00

104 lines
4 KiB
Text

$NetBSD: patch-XSA241,v 1.2 2017/12/15 14:00:44 bouyer Exp $
x86: don't store possibly stale TLB flush time stamp
While the timing window is extremely narrow, it is theoretically
possible for an update to the TLB flush clock and a subsequent flush
IPI to happen between the read and write parts of the update of the
per-page stamp. Exclude this possibility by disabling interrupts
across the update, preventing the IPI to be serviced in the middle.
This is XSA-241.
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: George Dunlap <george.dunlap@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- xen/arch/arm/smp.c.orig
+++ xen/arch/arm/smp.c
@@ -1,4 +1,5 @@
#include <xen/config.h>
+#include <xen/mm.h>
#include <asm/system.h>
#include <asm/smp.h>
#include <asm/cpregs.h>
--- xen/arch/x86/mm.c.orig
+++ xen/arch/x86/mm.c
@@ -2440,7 +2440,7 @@ static int _put_final_page_type(struct p
*/
if ( !(shadow_mode_enabled(page_get_owner(page)) &&
(page->count_info & PGC_page_table)) )
- page->tlbflush_timestamp = tlbflush_current_time();
+ page_set_tlbflush_timestamp(page);
wmb();
page->u.inuse.type_info--;
}
@@ -2510,7 +2510,7 @@
if ( (!ptpg || !PGT_type_equal(x, ptpg->u.inuse.type_info)) &&
!(shadow_mode_enabled(page_get_owner(page)) &&
(page->count_info & PGC_page_table)) )
- page->tlbflush_timestamp = tlbflush_current_time();
+ page_set_tlbflush_timestamp(page);
}
if ( likely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) == x) )
--- xen/arch/x86/mm/shadow/common.c.orig
+++ xen/arch/x86/mm/shadow/common.c
@@ -1464,7 +1464,7 @@ void shadow_free(struct domain *d, mfn_t
* TLBs when we reuse the page. Because the destructors leave the
* contents of the pages in place, we can delay TLB flushes until
* just before the allocator hands the page out again. */
- sp->tlbflush_timestamp = tlbflush_current_time();
+ page_set_tlbflush_timestamp(sp);
perfc_decr(shadow_alloc_count);
page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
sp = next;
--- xen/common/page_alloc.c.orig
+++ xen/common/page_alloc.c
@@ -960,7 +960,7 @@ static void free_heap_pages(
/* If a page has no owner it will need no safety TLB flush. */
pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
if ( pg[i].u.free.need_tlbflush )
- pg[i].tlbflush_timestamp = tlbflush_current_time();
+ page_set_tlbflush_timestamp(&pg[i]);
/* This page is not a guest frame any more. */
page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
--- xen/include/asm-arm/flushtlb.h.orig
+++ xen/include/asm-arm/flushtlb.h
@@ -12,6 +12,11 @@ static inline void tlbflush_filter(cpuma
#define tlbflush_current_time() (0)
+static inline void page_set_tlbflush_timestamp(struct page_info *page)
+{
+ page->tlbflush_timestamp = tlbflush_current_time();
+}
+
#if defined(CONFIG_ARM_32)
# include <asm/arm32/flushtlb.h>
#elif defined(CONFIG_ARM_64)
--- xen/include/asm-x86/flushtlb.h.orig
+++ xen/include/asm-x86/flushtlb.h
@@ -23,6 +23,20 @@ DECLARE_PER_CPU(u32, tlbflush_time);
#define tlbflush_current_time() tlbflush_clock
+static inline void page_set_tlbflush_timestamp(struct page_info *page)
+{
+ /*
+ * Prevent storing a stale time stamp, which could happen if an update
+ * to tlbflush_clock plus a subsequent flush IPI happen between the
+ * reading of tlbflush_clock and the writing of the struct page_info
+ * field.
+ */
+ ASSERT(local_irq_is_enabled());
+ local_irq_disable();
+ page->tlbflush_timestamp = tlbflush_current_time();
+ local_irq_enable();
+}
+
/*
* @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
* @lastuse_stamp is a timestamp taken when the PFN we are testing was last