update to 4.1.5

This integrates fixes for all vulnerabilities which were patched
in pkgsrc before.
Among many bug fixes and improvements (around 50 since Xen 4.1.4):
 * ACPI APEI/ERST finally working on production systems
 * Bug fixes for other low level system state handling
 * Support for xz compressed Dom0 and DomU kernels
This commit is contained in:
drochner 2013-05-03 16:48:37 +00:00
parent d814cbdfc8
commit 1e646464a4
31 changed files with 1185 additions and 432 deletions

View file

@ -1,10 +1,9 @@
# $NetBSD: Makefile,v 1.20 2013/04/19 14:02:45 bouyer Exp $
# $NetBSD: Makefile,v 1.21 2013/05/03 16:48:37 drochner Exp $
#
VERSION= 4.1.4
VERSION= 4.1.5
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel41-${VERSION}
PKGREVISION= 2
CATEGORIES= sysutils
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${VERSION}/

View file

@ -1,16 +1,22 @@
$NetBSD: distinfo,v 1.16 2013/04/19 14:02:45 bouyer Exp $
$NetBSD: distinfo,v 1.17 2013/05/03 16:48:37 drochner Exp $
SHA1 (xen-4.1.4.tar.gz) = d5f1e9c9eeb96202dd827c196750530ffc64baab
RMD160 (xen-4.1.4.tar.gz) = e3cb379954c985354dfd7dfbed15eae43e73254d
Size (xen-4.1.4.tar.gz) = 10387283 bytes
SHA1 (patch-CVE-2012-5511_2) = a345d28d4a6dcc4bf203243f49d66b5479fdbf14
SHA1 (patch-CVE-2012-5634) = 2992ee4972ec733a80fa3841d12a70a9076625c0
SHA1 (patch-CVE-2013-1917-1) = 3ebd5e8c30e962e1dcb0e8cae642a583a6d160e9
SHA1 (patch-CVE-2013-1917-2) = 3b33b3430ac984cefb86617bbcf0b22e5b21427c
SHA1 (patch-CVE-2013-1917-3) = cf188803c62eb3b2fb722edc11980bd0731ab242
SHA1 (patch-CVE-2013-1920) = 116d04d095f1bd5296576bbb4c23b18c5ac628bf
SHA1 (patch-CVE-2013-1964-1) = f3f17d292677b1f9a6520543cf65c61910ed65f0
SHA1 (patch-CVE-2013-1964-2) = e8d05eb615c13608cb57c70d74cd8cdba80ba14a
SHA1 (xen-4.1.5.tar.gz) = 38f098cdbcf4612a6e059e6ad332e68bbfc8bf4d
RMD160 (xen-4.1.5.tar.gz) = 265d6a9faee6cf9314f4ed647604f7b43c327f52
Size (xen-4.1.5.tar.gz) = 10421420 bytes
SHA1 (patch-CVE-2013-1918_1) = 7403c3cc0b6481edf581591885843ee24154da06
SHA1 (patch-CVE-2013-1918_10) = 3aa6a519013fa3275ad389533e9ebcf0f29e24b7
SHA1 (patch-CVE-2013-1918_11) = 57ddcc8afcab390a1ac027a6a063677c89310662
SHA1 (patch-CVE-2013-1918_12) = 3d768316139ea189219de4dff13fc1190fbe27a2
SHA1 (patch-CVE-2013-1918_13) = bccb34626942b17ed0097977d5a16adcf7acd746
SHA1 (patch-CVE-2013-1918_2) = b5a5ddf9549ba4064f587fa6769730158a165bd6
SHA1 (patch-CVE-2013-1918_3) = bd6b95c3c359638f1cb95bb9b4119836cb421fea
SHA1 (patch-CVE-2013-1918_4) = e6e6648cdf81e543f5c410b1083b97bdd9a08ea6
SHA1 (patch-CVE-2013-1918_5) = 0bc2755b024d14d53e83b47621f6a550538b5347
SHA1 (patch-CVE-2013-1918_6) = 027711424053ebae1093ff7d4be2353113612b5c
SHA1 (patch-CVE-2013-1918_7) = 77414ec5283278433a15a96e91ed5842326370b9
SHA1 (patch-CVE-2013-1918_8) = 1abd13678a24365ab651483fb3e3feeb2c0248ce
SHA1 (patch-CVE-2013-1918_9) = 28a34dda25693501c78043f550009dba53fa9e62
SHA1 (patch-CVE-2013-1952) = b8976b41cc0520993f3c424030f7c9aa8a9be1f3
SHA1 (patch-Config.mk) = a43ed1b3304d6383dc093acd128a7f373d0ca266
SHA1 (patch-xen_Makefile) = d1c7e4860221f93d90818f45a77748882486f92b
SHA1 (patch-xen_arch_x86_Rules.mk) = 6b9b4bfa28924f7d3f6c793a389f1a7ac9d228e2

View file

@ -1,16 +0,0 @@
$NetBSD: patch-CVE-2012-5511_2,v 1.2 2013/01/17 19:37:55 drochner Exp $
see http://lists.xen.org/archives/html/xen-devel/2013-01/msg01193.html
--- xen/arch/x86/mm/paging.c.orig 2012-12-18 12:54:25.000000000 +0000
+++ xen/arch/x86/mm/paging.c
@@ -534,7 +534,8 @@ int paging_log_dirty_range(struct domain
size = ((nr + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof (long);
rv = 0;
- for ( off = 0; !rv && off < size; off += sizeof zeroes )
+ off = 0;
+ while ( !rv && off < size )
{
int todo = min(size - off, (int) PAGE_SIZE);
if ( copy_to_guest_offset(dirty_bitmap, off, zeroes, todo) )

View file

@ -1,15 +0,0 @@
$NetBSD: patch-CVE-2012-5634,v 1.1 2013/01/17 19:37:55 drochner Exp $
see http://lists.xen.org/archives/html/xen-devel/2013-01/msg00445.html
--- xen/drivers/passthrough/vtd/intremap.c.orig 2012-12-18 12:54:27.000000000 +0000
+++ xen/drivers/passthrough/vtd/intremap.c
@@ -499,7 +499,7 @@ static void set_msi_source_id(struct pci
set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
(bus << 8) | pdev->bus);
else if ( pdev_type(bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
- set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
+ set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
PCI_BDF2(bus, devfn));
}
break;

View file

@ -1,21 +0,0 @@
$NetBSD: patch-CVE-2013-1917-1,v 1.1 2013/04/19 14:02:45 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00005.html
--- xen/arch/x86/acpi/suspend.c.orig
+++ xen/arch/x86/acpi/suspend.c
@@ -81,8 +81,12 @@ void restore_rest_processor_state(void)
}
#else /* !defined(CONFIG_X86_64) */
- if ( supervisor_mode_kernel && cpu_has_sep )
- wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
+ if ( cpu_has_sep )
+ {
+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+ if ( supervisor_mode_kernel )
+ wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
+ }
#endif
/* Maybe load the debug registers. */

View file

@ -1,19 +0,0 @@
$NetBSD: patch-CVE-2013-1917-2,v 1.1 2013/04/19 14:02:45 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00005.html
--- xen/arch/x86/cpu/common.c.orig
+++ xen/arch/x86/cpu/common.c
@@ -715,8 +715,11 @@ void __cpuinit cpu_init(void)
#if defined(CONFIG_X86_32)
t->ss0 = __HYPERVISOR_DS;
t->esp0 = get_stack_bottom();
- if ( supervisor_mode_kernel && cpu_has_sep )
+ if ( cpu_has_sep ) {
+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+ if ( supervisor_mode_kernel )
wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0);
+ }
#elif defined(CONFIG_X86_64)
/* Bottom-of-stack must be 16-byte aligned! */
BUG_ON((get_stack_bottom() & 15) != 0);

View file

@ -1,21 +0,0 @@
$NetBSD: patch-CVE-2013-1917-3,v 1.1 2013/04/19 14:02:45 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00005.html
--- xen/arch/x86/x86_64/entry.S.orig
+++ xen/arch/x86/x86_64/entry.S
@@ -287,7 +287,14 @@ sysenter_eflags_saved:
movl $3,UREGS_cs(%rsp) /* ring 3 null cs */
movq VCPU_sysenter_addr(%rbx),%rax
setne %cl
+ testl $X86_EFLAGS_NT,UREGS_eflags(%rsp)
leaq VCPU_trap_bounce(%rbx),%rdx
+UNLIKELY_START(nz, sysenter_nt_set)
+ pushfq
+ andl $~X86_EFLAGS_NT,(%rsp)
+ popfq
+ xorl %eax,%eax
+UNLIKELY_END(sysenter_nt_set)
testq %rax,%rax
leal (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, sysenter_gpf)

View file

@ -0,0 +1,24 @@
$NetBSD: patch-CVE-2013-1918_1,v 1.1 2013/05/03 16:48:37 drochner Exp $
see http://lists.xen.org/archives/html/xen-announce/2013-05/msg00000.html
--- xen/include/xen/domain.h.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/include/xen/domain.h
@@ -15,7 +15,7 @@ struct vcpu *alloc_vcpu(
int boot_vcpu(
struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
struct vcpu *alloc_dom0_vcpu0(void);
-void vcpu_reset(struct vcpu *v);
+int vcpu_reset(struct vcpu *);
struct xen_domctl_getdomaininfo;
void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
@@ -57,7 +57,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
void arch_dump_domain_info(struct domain *d);
-void arch_vcpu_reset(struct vcpu *v);
+int arch_vcpu_reset(struct vcpu *);
bool_t domctl_lock_acquire(void);
void domctl_lock_release(void);

View file

@ -0,0 +1,21 @@
$NetBSD: patch-CVE-2013-1918_10,v 1.1 2013/05/03 16:48:37 drochner Exp $
--- xen/arch/x86/traps.c.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/arch/x86/traps.c
@@ -2317,8 +2317,15 @@ static int emulate_privileged_op(struct
rc = new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg)));
#endif
domain_unlock(v->domain);
- if ( rc == 0 ) /* not okay */
+ switch ( rc )
+ {
+ case 0:
+ break;
+ case -EAGAIN: /* retry after preemption */
+ goto skip;
+ default: /* not okay */
goto fail;
+ }
break;
case 4: /* Write CR4 */

View file

@ -0,0 +1,261 @@
$NetBSD: patch-CVE-2013-1918_11,v 1.1 2013/05/03 16:48:37 drochner Exp $
--- xen/arch/x86/domain.c.orig 2013-05-03 13:27:23.000000000 +0000
+++ xen/arch/x86/domain.c
@@ -70,8 +70,6 @@ void (*dead_idle) (void) __read_mostly =
static void paravirt_ctxt_switch_from(struct vcpu *v);
static void paravirt_ctxt_switch_to(struct vcpu *v);
-static void vcpu_destroy_pagetables(struct vcpu *v);
-
static void continue_idle_domain(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
@@ -678,6 +676,7 @@ int arch_set_info_guest(
{
struct domain *d = v->domain;
unsigned long cr3_pfn = INVALID_MFN;
+ struct page_info *cr3_page;
unsigned long flags, cr4;
int i, rc = 0, compat;
@@ -817,72 +816,103 @@ int arch_set_info_guest(
if ( rc != 0 )
return rc;
+ set_bit(_VPF_in_reset, &v->pause_flags);
+
if ( !compat )
- {
cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
+#ifdef __x86_64__
+ else
+ cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
+#endif
+ cr3_page = mfn_to_page(cr3_pfn);
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_base_page_table)) )
- {
- destroy_gdt(v);
- return -EINVAL;
- }
+ if ( !mfn_valid(cr3_pfn) || !get_page(cr3_page, d) )
+ {
+ cr3_page = NULL;
+ rc = -EINVAL;
+ }
+ else if ( paging_mode_refcounts(d) )
+ /* nothing */;
+ else if ( cr3_page == v->arch.old_guest_table )
+ {
+ v->arch.old_guest_table = NULL;
+ put_page(cr3_page);
+ }
+ else
+ {
+ /*
+ * Since v->arch.guest_table{,_user} are both NULL, this effectively
+ * is just a call to put_old_guest_table().
+ */
+ if ( !compat )
+ rc = vcpu_destroy_pagetables(v);
+ if ( !rc )
+ rc = get_page_type_preemptible(cr3_page,
+ !compat ? PGT_root_page_table
+ : PGT_l3_page_table);
+ if ( rc == -EINTR )
+ rc = -EAGAIN;
+ }
+ if ( rc )
+ /* handled below */;
+ else if ( !compat )
+ {
v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
#ifdef __x86_64__
if ( c.nat->ctrlreg[1] )
{
cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
+ cr3_page = mfn_to_page(cr3_pfn);
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_base_page_table)) )
+ if ( !mfn_valid(cr3_pfn) || !get_page(cr3_page, d) )
{
- cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_null();
- if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(cr3_pfn));
- else
- put_page_and_type(mfn_to_page(cr3_pfn));
- destroy_gdt(v);
- return -EINVAL;
+ cr3_page = NULL;
+ rc = -EINVAL;
+ }
+ else if ( !paging_mode_refcounts(d) )
+ {
+ rc = get_page_type_preemptible(cr3_page, PGT_root_page_table);
+ switch ( rc )
+ {
+ case -EINTR:
+ rc = -EAGAIN;
+ case -EAGAIN:
+ v->arch.old_guest_table =
+ pagetable_get_page(v->arch.guest_table);
+ v->arch.guest_table = pagetable_null();
+ break;
+ }
}
- v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
+ if ( !rc )
+ v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
}
else if ( !(flags & VGCF_in_kernel) )
{
- destroy_gdt(v);
- return -EINVAL;
+ cr3_page = NULL;
+ rc = -EINVAL;
}
}
else
{
l4_pgentry_t *l4tab;
- cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
-
- if ( !mfn_valid(cr3_pfn) ||
- (paging_mode_refcounts(d)
- ? !get_page(mfn_to_page(cr3_pfn), d)
- : !get_page_and_type(mfn_to_page(cr3_pfn), d,
- PGT_l3_page_table)) )
- {
- destroy_gdt(v);
- return -EINVAL;
- }
-
l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
*l4tab = l4e_from_pfn(
cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
#endif
}
+ if ( rc )
+ {
+ if ( cr3_page )
+ put_page(cr3_page);
+ destroy_gdt(v);
+ return rc;
+ }
+
+ clear_bit(_VPF_in_reset, &v->pause_flags);
if ( v->vcpu_id == 0 )
update_domain_wallclock_time(d);
@@ -904,17 +934,16 @@ int arch_set_info_guest(
#undef c
}
-void arch_vcpu_reset(struct vcpu *v)
+int arch_vcpu_reset(struct vcpu *v)
{
if ( !is_hvm_vcpu(v) )
{
destroy_gdt(v);
- vcpu_destroy_pagetables(v);
- }
- else
- {
- vcpu_end_shutdown_deferral(v);
+ return vcpu_destroy_pagetables(v);
}
+
+ vcpu_end_shutdown_deferral(v);
+ return 0;
}
/*
@@ -1917,63 +1946,6 @@ static int relinquish_memory(
return ret;
}
-static void vcpu_destroy_pagetables(struct vcpu *v)
-{
- struct domain *d = v->domain;
- unsigned long pfn;
-
-#ifdef __x86_64__
- if ( is_pv_32on64_vcpu(v) )
- {
- pfn = l4e_get_pfn(*(l4_pgentry_t *)
- __va(pagetable_get_paddr(v->arch.guest_table)));
-
- if ( pfn != 0 )
- {
- if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(pfn));
- else
- put_page_and_type(mfn_to_page(pfn));
- }
-
- l4e_write(
- (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
- l4e_empty());
-
- v->arch.cr3 = 0;
- return;
- }
-#endif
-
- pfn = pagetable_get_pfn(v->arch.guest_table);
- if ( pfn != 0 )
- {
- if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(pfn));
- else
- put_page_and_type(mfn_to_page(pfn));
- v->arch.guest_table = pagetable_null();
- }
-
-#ifdef __x86_64__
- /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
- pfn = pagetable_get_pfn(v->arch.guest_table_user);
- if ( pfn != 0 )
- {
- if ( !is_pv_32bit_vcpu(v) )
- {
- if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(pfn));
- else
- put_page_and_type(mfn_to_page(pfn));
- }
- v->arch.guest_table_user = pagetable_null();
- }
-#endif
-
- v->arch.cr3 = 0;
-}
-
int domain_relinquish_resources(struct domain *d)
{
int ret;
@@ -1992,7 +1964,9 @@ int domain_relinquish_resources(struct d
for_each_vcpu ( d, v )
{
/* Drop the in-use references to page-table bases. */
- vcpu_destroy_pagetables(v);
+ ret = vcpu_destroy_pagetables(v);
+ if ( ret )
+ return ret;
/*
* Relinquish GDT mappings. No need for explicit unmapping of the

View file

@ -0,0 +1,19 @@
$NetBSD: patch-CVE-2013-1918_12,v 1.1 2013/05/03 16:48:37 drochner Exp $
--- xen/arch/x86/hvm/vlapic.c.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/arch/x86/hvm/vlapic.c
@@ -252,10 +252,13 @@ static void vlapic_init_sipi_action(unsi
{
case APIC_DM_INIT: {
bool_t fpu_initialised;
+ int rc;
+
domain_lock(target->domain);
/* Reset necessary VCPU state. This does not include FPU state. */
fpu_initialised = target->fpu_initialised;
- vcpu_reset(target);
+ rc = vcpu_reset(target);
+ ASSERT(!rc);
target->fpu_initialised = fpu_initialised;
vlapic_reset(vcpu_vlapic(target));
domain_unlock(target->domain);

View file

@ -0,0 +1,17 @@
$NetBSD: patch-CVE-2013-1918_13,v 1.1 2013/05/03 16:48:37 drochner Exp $
--- xen/arch/x86/hvm/hvm.c.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/arch/x86/hvm/hvm.c
@@ -3083,8 +3083,11 @@ static void hvm_s3_suspend(struct domain
for_each_vcpu ( d, v )
{
+ int rc;
+
vlapic_reset(vcpu_vlapic(v));
- vcpu_reset(v);
+ rc = vcpu_reset(v);
+ ASSERT(!rc);
}
vpic_reset(d);

View file

@ -0,0 +1,14 @@
$NetBSD: patch-CVE-2013-1918_2,v 1.1 2013/05/03 16:48:37 drochner Exp $
--- xen/include/xen/sched.h.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/include/xen/sched.h
@@ -597,6 +597,9 @@ extern struct domain *domain_list;
/* VCPU is blocked on memory-event ring. */
#define _VPF_mem_event 4
#define VPF_mem_event (1UL<<_VPF_mem_event)
+ /* VCPU is being reset. */
+#define _VPF_in_reset 7
+#define VPF_in_reset (1UL<<_VPF_in_reset)
static inline int vcpu_runnable(struct vcpu *v)
{

View file

@ -0,0 +1,12 @@
$NetBSD: patch-CVE-2013-1918_3,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/include/asm-x86/domain.h.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/include/asm-x86/domain.h
@@ -405,6 +405,7 @@ struct arch_vcpu
pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
#endif
pagetable_t guest_table; /* (MFN) guest notion of cr3 */
+ struct page_info *old_guest_table; /* partially destructed pagetable */
/* guest_table holds a ref to the page, and also a type-count unless
* shadow refcounts are in use */
pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */

View file

@ -0,0 +1,12 @@
$NetBSD: patch-CVE-2013-1918_4,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/include/asm-x86/mm.h.orig 2013-05-03 13:46:46.000000000 +0000
+++ xen/include/asm-x86/mm.h
@@ -555,6 +555,7 @@ void audit_domains(void);
int new_guest_cr3(unsigned long pfn);
void make_cr3(struct vcpu *v, unsigned long mfn);
void update_cr3(struct vcpu *v);
+int vcpu_destroy_pagetables(struct vcpu *);
void propagate_page_fault(unsigned long addr, u16 error_code);
void *do_page_walk(struct vcpu *v, unsigned long addr);

View file

@ -0,0 +1,15 @@
$NetBSD: patch-CVE-2013-1918_5,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/common/compat/domain.c.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/common/compat/domain.c
@@ -52,6 +52,10 @@ int compat_vcpu_op(int cmd, int vcpuid,
rc = boot_vcpu(d, vcpuid, cmp_ctxt);
domain_unlock(d);
+ if ( rc == -EAGAIN )
+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
+ cmd, vcpuid, arg);
+
xfree(cmp_ctxt);
break;
}

View file

@ -0,0 +1,28 @@
$NetBSD: patch-CVE-2013-1918_6,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/common/domctl.c.orig 2013-05-03 13:37:03.000000000 +0000
+++ xen/common/domctl.c
@@ -286,8 +286,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
{
- vcpu_reset(v);
- ret = 0;
+ ret = vcpu_reset(v);
+ if ( ret == -EAGAIN )
+ ret = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
goto svc_out;
}
@@ -316,6 +318,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
domain_pause(d);
ret = arch_set_info_guest(v, c);
domain_unpause(d);
+
+ if ( ret == -EAGAIN )
+ ret = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
}
svc_out:

View file

@ -0,0 +1,51 @@
$NetBSD: patch-CVE-2013-1918_7,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/common/domain.c.orig 2013-05-03 13:28:00.000000000 +0000
+++ xen/common/domain.c
@@ -770,14 +770,18 @@ int boot_vcpu(struct domain *d, int vcpu
return arch_set_info_guest(v, ctxt);
}
-void vcpu_reset(struct vcpu *v)
+int vcpu_reset(struct vcpu *v)
{
struct domain *d = v->domain;
+ int rc;
vcpu_pause(v);
domain_lock(d);
- arch_vcpu_reset(v);
+ set_bit(_VPF_in_reset, &v->pause_flags);
+ rc = arch_vcpu_reset(v);
+ if ( rc )
+ goto out_unlock;
set_bit(_VPF_down, &v->pause_flags);
@@ -793,9 +797,13 @@ void vcpu_reset(struct vcpu *v)
#endif
cpus_clear(v->cpu_affinity_tmp);
clear_bit(_VPF_blocked, &v->pause_flags);
+ clear_bit(_VPF_in_reset, &v->pause_flags);
+ out_unlock:
domain_unlock(v->domain);
vcpu_unpause(v);
+
+ return rc;
}
@@ -834,6 +842,11 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
domain_unlock(d);
xfree(ctxt);
+
+ if ( rc == -EAGAIN )
+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
+ cmd, vcpuid, arg);
+
break;
case VCPUOP_up:

View file

@ -0,0 +1,48 @@
$NetBSD: patch-CVE-2013-1918_8,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/arch/x86/x86_64/compat/mm.c.orig 2013-05-03 13:37:44.000000000 +0000
+++ xen/arch/x86/x86_64/compat/mm.c
@@ -222,6 +222,13 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
int rc = 0;
XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
+ likely(guest_handle_is_null(cmp_uops)) )
+ {
+ set_xen_guest_handle(nat_ops, NULL);
+ return do_mmuext_op(nat_ops, count, pdone, foreigndom);
+ }
+
preempt_mask = count & MMU_UPDATE_PREEMPTED;
count ^= preempt_mask;
@@ -319,17 +326,23 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
: mcs->call.args[1];
unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
- BUG_ON(left == arg1);
+ BUG_ON(left == arg1 && left != i);
BUG_ON(left > count);
guest_handle_add_offset(nat_ops, i - left);
guest_handle_subtract_offset(cmp_uops, left);
left = 1;
- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops));
- BUG_ON(left != arg1);
- if (!test_bit(_MCSF_in_multicall, &mcs->flags))
- regs->_ecx += count - i;
+ if ( arg1 != MMU_UPDATE_PREEMPTED )
+ {
+ BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
+ cmp_uops));
+ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
+ regs->_ecx += count - i;
+ else
+ mcs->compat_call.args[1] += count - i;
+ }
else
- mcs->compat_call.args[1] += count - i;
+ BUG_ON(hypercall_xlat_continuation(&left, 0));
+ BUG_ON(left != arg1);
}
else
BUG_ON(err > 0);

View file

@ -0,0 +1,593 @@
$NetBSD: patch-CVE-2013-1918_9,v 1.1 2013/05/03 16:48:38 drochner Exp $
--- xen/arch/x86/mm.c.orig 2013-05-03 13:38:09.000000000 +0000
+++ xen/arch/x86/mm.c
@@ -1183,7 +1183,16 @@ static int put_page_from_l3e(l3_pgentry_
#endif
if ( unlikely(partial > 0) )
+ {
+ ASSERT(preemptible >= 0);
return __put_page_type(l3e_get_page(l3e), preemptible);
+ }
+
+ if ( preemptible < 0 )
+ {
+ current->arch.old_guest_table = l3e_get_page(l3e);
+ return 0;
+ }
return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
}
@@ -1196,7 +1205,17 @@ static int put_page_from_l4e(l4_pgentry_
(l4e_get_pfn(l4e) != pfn) )
{
if ( unlikely(partial > 0) )
+ {
+ ASSERT(preemptible >= 0);
return __put_page_type(l4e_get_page(l4e), preemptible);
+ }
+
+ if ( preemptible < 0 )
+ {
+ current->arch.old_guest_table = l4e_get_page(l4e);
+ return 0;
+ }
+
return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible);
}
return 1;
@@ -1486,12 +1505,17 @@ static int alloc_l3_table(struct page_in
if ( rc < 0 && rc != -EAGAIN && rc != -EINTR )
{
MEM_LOG("Failure in alloc_l3_table: entry %d", i);
+ if ( i )
+ {
+ page->nr_validated_ptes = i;
+ page->partial_pte = 0;
+ current->arch.old_guest_table = page;
+ }
while ( i-- > 0 )
{
if ( !is_guest_l3_slot(i) )
continue;
unadjust_guest_l3e(pl3e[i], d);
- put_page_from_l3e(pl3e[i], pfn, 0, 0);
}
}
@@ -1521,22 +1545,24 @@ static int alloc_l4_table(struct page_in
page->nr_validated_ptes = i;
page->partial_pte = partial ?: 1;
}
- else if ( rc == -EINTR )
+ else if ( rc < 0 )
{
+ if ( rc != -EINTR )
+ MEM_LOG("Failure in alloc_l4_table: entry %d", i);
if ( i )
{
page->nr_validated_ptes = i;
page->partial_pte = 0;
- rc = -EAGAIN;
+ if ( rc == -EINTR )
+ rc = -EAGAIN;
+ else
+ {
+ if ( current->arch.old_guest_table )
+ page->nr_validated_ptes++;
+ current->arch.old_guest_table = page;
+ }
}
}
- else if ( rc < 0 )
- {
- MEM_LOG("Failure in alloc_l4_table: entry %d", i);
- while ( i-- > 0 )
- if ( is_guest_l4_slot(d, i) )
- put_page_from_l4e(pl4e[i], pfn, 0, 0);
- }
if ( rc < 0 )
return rc;
@@ -1966,7 +1992,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
}
- put_page_from_l3e(ol3e, pfn, 0, 0);
+ put_page_from_l3e(ol3e, pfn, 0, -preemptible);
return rc;
}
@@ -2029,7 +2055,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
return -EFAULT;
}
- put_page_from_l4e(ol4e, pfn, 0, 0);
+ put_page_from_l4e(ol4e, pfn, 0, -preemptible);
return rc;
}
@@ -2187,7 +2213,15 @@ static int alloc_page_type(struct page_i
PRtype_info ": caf=%08lx taf=%" PRtype_info,
page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
type, page->count_info, page->u.inuse.type_info);
- page->u.inuse.type_info = 0;
+ if ( page != current->arch.old_guest_table )
+ page->u.inuse.type_info = 0;
+ else
+ {
+ ASSERT((page->u.inuse.type_info &
+ (PGT_count_mask | PGT_validated)) == 1);
+ get_page_light(page);
+ page->u.inuse.type_info |= PGT_partial;
+ }
}
else
{
@@ -2725,49 +2759,150 @@ static void put_superpage(unsigned long
#endif
+static int put_old_guest_table(struct vcpu *v)
+{
+ int rc;
+
+ if ( !v->arch.old_guest_table )
+ return 0;
+
+ switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) )
+ {
+ case -EINTR:
+ case -EAGAIN:
+ return -EAGAIN;
+ }
+
+ v->arch.old_guest_table = NULL;
+
+ return rc;
+}
+
+int vcpu_destroy_pagetables(struct vcpu *v)
+{
+ unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+ struct page_info *page;
+ int rc = put_old_guest_table(v);
+
+ if ( rc )
+ return rc;
+
+#ifdef __x86_64__
+ if ( is_pv_32on64_vcpu(v) )
+ mfn = l4e_get_pfn(*(l4_pgentry_t *)mfn_to_virt(mfn));
+#endif
+
+ if ( mfn )
+ {
+ page = mfn_to_page(mfn);
+ if ( paging_mode_refcounts(v->domain) )
+ put_page(page);
+ else
+ rc = put_page_and_type_preemptible(page, 1);
+ }
+
+#ifdef __x86_64__
+ if ( is_pv_32on64_vcpu(v) )
+ {
+ if ( !rc )
+ l4e_write(
+ (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),
+ l4e_empty());
+ }
+ else
+#endif
+ if ( !rc )
+ {
+ v->arch.guest_table = pagetable_null();
+
+#ifdef __x86_64__
+ /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
+ mfn = pagetable_get_pfn(v->arch.guest_table_user);
+ if ( mfn )
+ {
+ page = mfn_to_page(mfn);
+ if ( paging_mode_refcounts(v->domain) )
+ put_page(page);
+ else
+ rc = put_page_and_type_preemptible(page, 1);
+ }
+ if ( !rc )
+ v->arch.guest_table_user = pagetable_null();
+#endif
+ }
+
+ v->arch.cr3 = 0;
+
+ return rc;
+}
int new_guest_cr3(unsigned long mfn)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
- int okay;
+ int rc;
unsigned long old_base_mfn;
#ifdef __x86_64__
if ( is_pv_32on64_domain(d) )
{
- okay = paging_mode_refcounts(d)
- ? 0 /* Old code was broken, but what should it be? */
- : mod_l4_entry(
+ rc = paging_mode_refcounts(d)
+ ? -EINVAL /* Old code was broken, but what should it be? */
+ : mod_l4_entry(
__va(pagetable_get_paddr(curr->arch.guest_table)),
l4e_from_pfn(
mfn,
(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
- pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 0;
- if ( unlikely(!okay) )
+ pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr);
+ switch ( rc )
{
+ case 0:
+ break;
+ case -EINTR:
+ case -EAGAIN:
+ return -EAGAIN;
+ default:
MEM_LOG("Error while installing new compat baseptr %lx", mfn);
- return 0;
+ return rc;
}
invalidate_shadow_ldt(curr, 0);
write_ptbase(curr);
- return 1;
+ return 0;
}
#endif
- okay = paging_mode_refcounts(d)
- ? get_page_from_pagenr(mfn, d)
- : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0);
- if ( unlikely(!okay) )
+ rc = put_old_guest_table(curr);
+ if ( unlikely(rc) )
+ return rc;
+
+ old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
+ /*
+ * This is particularly important when getting restarted after the
+ * previous attempt got preempted in the put-old-MFN phase.
+ */
+ if ( old_base_mfn == mfn )
{
- MEM_LOG("Error while installing new baseptr %lx", mfn);
+ write_ptbase(curr);
return 0;
}
- invalidate_shadow_ldt(curr, 0);
+ rc = paging_mode_refcounts(d)
+ ? (get_page_from_pagenr(mfn, d) ? 0 : -EINVAL)
+ : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 1);
+ switch ( rc )
+ {
+ case 0:
+ break;
+ case -EINTR:
+ case -EAGAIN:
+ return -EAGAIN;
+ default:
+ MEM_LOG("Error while installing new baseptr %lx", mfn);
+ return rc;
+ }
- old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
+ invalidate_shadow_ldt(curr, 0);
curr->arch.guest_table = pagetable_from_pfn(mfn);
update_cr3(curr);
@@ -2776,13 +2911,25 @@ int new_guest_cr3(unsigned long mfn)
if ( likely(old_base_mfn != 0) )
{
+ struct page_info *page = mfn_to_page(old_base_mfn);
+
if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(old_base_mfn));
+ put_page(page);
else
- put_page_and_type(mfn_to_page(old_base_mfn));
+ switch ( rc = put_page_and_type_preemptible(page, 1) )
+ {
+ case -EINTR:
+ rc = -EAGAIN;
+ case -EAGAIN:
+ curr->arch.old_guest_table = page;
+ break;
+ default:
+ BUG_ON(rc);
+ break;
+ }
}
- return 1;
+ return rc;
}
static struct domain *get_pg_owner(domid_t domid)
@@ -2911,12 +3058,29 @@ long do_mmuext_op(
unsigned int foreigndom)
{
struct mmuext_op op;
- int rc = 0, i = 0, okay;
unsigned long type;
- unsigned int done = 0;
+ unsigned int i = 0, done = 0;
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct domain *pg_owner;
+ int okay, rc = put_old_guest_table(curr);
+
+ if ( unlikely(rc) )
+ {
+ if ( likely(rc == -EAGAIN) )
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmuext_op, "hihi", uops, count, pdone,
+ foreigndom);
+ return rc;
+ }
+
+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
+ likely(guest_handle_is_null(uops)) )
+ {
+ /* See the curr->arch.old_guest_table related
+ * hypercall_create_continuation() below. */
+ return (int)foreigndom;
+ }
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
@@ -2941,7 +3105,7 @@ long do_mmuext_op(
for ( i = 0; i < count; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( curr->arch.old_guest_table || hypercall_preempt_check() )
{
rc = -EAGAIN;
break;
@@ -3001,21 +3165,17 @@ long do_mmuext_op(
page = mfn_to_page(mfn);
if ( (rc = xsm_memory_pin_page(d, page)) != 0 )
- {
- put_page_and_type(page);
okay = 0;
- break;
- }
-
- if ( unlikely(test_and_set_bit(_PGT_pinned,
- &page->u.inuse.type_info)) )
+ else if ( unlikely(test_and_set_bit(_PGT_pinned,
+ &page->u.inuse.type_info)) )
{
MEM_LOG("Mfn %lx already pinned", mfn);
- put_page_and_type(page);
okay = 0;
- break;
}
+ if ( unlikely(!okay) )
+ goto pin_drop;
+
/* A page is dirtied when its pin status is set. */
paging_mark_dirty(pg_owner, mfn);
@@ -3029,7 +3189,13 @@ long do_mmuext_op(
&page->u.inuse.type_info));
spin_unlock(&pg_owner->page_alloc_lock);
if ( drop_ref )
- put_page_and_type(page);
+ {
+ pin_drop:
+ if ( type == PGT_l1_page_table )
+ put_page_and_type(page);
+ else
+ curr->arch.old_guest_table = page;
+ }
}
break;
@@ -3059,7 +3225,17 @@ long do_mmuext_op(
break;
}
- put_page_and_type(page);
+ switch ( rc = put_page_and_type_preemptible(page, 1) )
+ {
+ case -EINTR:
+ case -EAGAIN:
+ curr->arch.old_guest_table = page;
+ rc = 0;
+ break;
+ default:
+ BUG_ON(rc);
+ break;
+ }
put_page(page);
/* A page is dirtied when its pin status is cleared. */
@@ -3069,7 +3245,8 @@ long do_mmuext_op(
}
case MMUEXT_NEW_BASEPTR:
- okay = new_guest_cr3(gmfn_to_mfn(d, op.arg1.mfn));
+ rc = new_guest_cr3(gmfn_to_mfn(d, op.arg1.mfn));
+ okay = !rc;
break;
#ifdef __x86_64__
@@ -3077,29 +3254,55 @@ long do_mmuext_op(
unsigned long old_mfn, mfn;
mfn = gmfn_to_mfn(d, op.arg1.mfn);
+ old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
+ /*
+ * This is particularly important when getting restarted after the
+ * previous attempt got preempted in the put-old-MFN phase.
+ */
+ if ( old_mfn == mfn )
+ break;
+
if ( mfn != 0 )
{
if ( paging_mode_refcounts(d) )
okay = get_page_from_pagenr(mfn, d);
else
- okay = !get_page_and_type_from_pagenr(
- mfn, PGT_root_page_table, d, 0, 0);
+ {
+ rc = get_page_and_type_from_pagenr(
+ mfn, PGT_root_page_table, d, 0, 1);
+ okay = !rc;
+ }
if ( unlikely(!okay) )
{
- MEM_LOG("Error while installing new mfn %lx", mfn);
+ if ( rc == -EINTR )
+ rc = -EAGAIN;
+ else if ( rc != -EAGAIN )
+ MEM_LOG("Error while installing new mfn %lx", mfn);
break;
}
}
- old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
curr->arch.guest_table_user = pagetable_from_pfn(mfn);
if ( old_mfn != 0 )
{
+ struct page_info *page = mfn_to_page(old_mfn);
+
if ( paging_mode_refcounts(d) )
- put_page(mfn_to_page(old_mfn));
+ put_page(page);
else
- put_page_and_type(mfn_to_page(old_mfn));
+ switch ( rc = put_page_and_type_preemptible(page, 1) )
+ {
+ case -EINTR:
+ rc = -EAGAIN;
+ case -EAGAIN:
+ curr->arch.old_guest_table = page;
+ okay = 0;
+ break;
+ default:
+ BUG_ON(rc);
+ break;
+ }
}
break;
@@ -3338,9 +3541,27 @@ long do_mmuext_op(
}
if ( rc == -EAGAIN )
+ {
+ ASSERT(i < count);
rc = hypercall_create_continuation(
__HYPERVISOR_mmuext_op, "hihi",
uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+ }
+ else if ( curr->arch.old_guest_table )
+ {
+ XEN_GUEST_HANDLE(void) null;
+
+ ASSERT(rc || i == count);
+ set_xen_guest_handle(null, NULL);
+ /*
+ * In order to have a way to communicate the final return value to
+ * our continuation, we pass this in place of "foreigndom", building
+ * on the fact that this argument isn't needed anymore.
+ */
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmuext_op, "hihi", null,
+ MMU_UPDATE_PREEMPTED, null, rc);
+ }
put_pg_owner(pg_owner);
@@ -3367,11 +3588,28 @@ long do_mmu_update(
void *va;
unsigned long gpfn, gmfn, mfn;
struct page_info *page;
- int rc = 0, okay = 1, i = 0;
- unsigned int cmd, done = 0, pt_dom;
- struct vcpu *v = current;
+ unsigned int cmd, i = 0, done = 0, pt_dom;
+ struct vcpu *curr = current, *v = curr;
struct domain *d = v->domain, *pt_owner = d, *pg_owner;
struct domain_mmap_cache mapcache;
+ int rc = put_old_guest_table(curr), okay = 1;
+
+ if ( unlikely(rc) )
+ {
+ if ( likely(rc == -EAGAIN) )
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmu_update, "hihi", ureqs, count, pdone,
+ foreigndom);
+ return rc;
+ }
+
+ if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
+ likely(guest_handle_is_null(ureqs)) )
+ {
+ /* See the curr->arch.old_guest_table related
+ * hypercall_create_continuation() below. */
+ return (int)foreigndom;
+ }
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
@@ -3420,7 +3658,7 @@ long do_mmu_update(
for ( i = 0; i < count; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( curr->arch.old_guest_table || hypercall_preempt_check() )
{
rc = -EAGAIN;
break;
@@ -3685,9 +3923,27 @@ long do_mmu_update(
}
if ( rc == -EAGAIN )
+ {
+ ASSERT(i < count);
rc = hypercall_create_continuation(
__HYPERVISOR_mmu_update, "hihi",
ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+ }
+ else if ( curr->arch.old_guest_table )
+ {
+ XEN_GUEST_HANDLE(void) null;
+
+ ASSERT(rc || i == count);
+ set_xen_guest_handle(null, NULL);
+ /*
+ * In order to have a way to communicate the final return value to
+ * our continuation, we pass this in place of "foreigndom", building
+ * on the fact that this argument isn't needed anymore.
+ */
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmu_update, "hihi", null,
+ MMU_UPDATE_PREEMPTED, null, rc);
+ }
put_pg_owner(pg_owner);

View file

@ -1,23 +0,0 @@
$NetBSD: patch-CVE-2013-1920,v 1.1 2013/04/19 14:02:45 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00000.html
--- xen/common/event_channel.c.orig
+++ xen/common/event_channel.c
@@ -104,7 +104,6 @@ static int get_free_port(struct domain *
if ( unlikely(chn == NULL) )
return -ENOMEM;
memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
- bucket_from_port(d, port) = chn;
for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
{
@@ -117,6 +116,8 @@ static int get_free_port(struct domain *
}
}
+ bucket_from_port(d, port) = chn;
+
return port;
}

View file

@ -0,0 +1,43 @@
$NetBSD: patch-CVE-2013-1952,v 1.1 2013/05/03 16:48:38 drochner Exp $
see http://lists.xen.org/archives/html/xen-announce/2013-05/msg00001.html
--- xen/drivers/passthrough/vtd/intremap.c.orig 2013-04-23 16:44:20.000000000 +0000
+++ xen/drivers/passthrough/vtd/intremap.c
@@ -477,16 +477,15 @@ static void set_msi_source_id(struct pci
type = pdev_type(bus, devfn);
switch ( type )
{
+ case DEV_TYPE_PCIe_ENDPOINT:
case DEV_TYPE_PCIe_BRIDGE:
case DEV_TYPE_PCIe2PCI_BRIDGE:
- case DEV_TYPE_LEGACY_PCI_BRIDGE:
- break;
-
- case DEV_TYPE_PCIe_ENDPOINT:
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, PCI_BDF2(bus, devfn));
break;
case DEV_TYPE_PCI:
+ case DEV_TYPE_LEGACY_PCI_BRIDGE:
+ /* case DEV_TYPE_PCI2PCIe_BRIDGE: */
ret = find_upstream_bridge(&bus, &devfn, &secbus);
if ( ret == 0 ) /* integrated PCI device */
{
@@ -498,10 +497,15 @@ static void set_msi_source_id(struct pci
if ( pdev_type(bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE )
set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
(bus << 8) | pdev->bus);
- else if ( pdev_type(bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
+ else
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
PCI_BDF2(bus, devfn));
}
+ else
+ dprintk(XENLOG_WARNING VTDPREFIX,
+ "d%d: no upstream bridge for %02x:%02x.%u\n",
+ pdev->domain->domain_id,
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
break;
default:

View file

@ -1,151 +0,0 @@
$NetBSD: patch-CVE-2013-1964-1,v 1.1 2013/04/19 14:02:45 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00006.html
--- xen/common/grant_table.c.orig
+++ xen/common/grant_table.c
@@ -598,7 +598,7 @@ __gnttab_map_grant_ref(
act->start = 0;
act->length = PAGE_SIZE;
act->is_sub_page = 0;
- act->trans_dom = rd->domain_id;
+ act->trans_domain = rd;
act->trans_gref = op->ref;
}
}
@@ -1629,11 +1629,10 @@ __release_grant_for_copy(
struct active_grant_entry *act;
unsigned long r_frame;
uint16_t *status;
- domid_t trans_domid;
grant_ref_t trans_gref;
int released_read;
int released_write;
- struct domain *trans_dom;
+ struct domain *td;
released_read = 0;
released_write = 0;
@@ -1647,15 +1646,13 @@ __release_grant_for_copy(
if (rd->grant_table->gt_version == 1)
{
status = &sha->flags;
- trans_domid = rd->domain_id;
- /* Shut the compiler up. This'll never be used, because
- trans_domid == rd->domain_id, but gcc doesn't know that. */
- trans_gref = 0x1234567;
+ td = rd;
+ trans_gref = gref;
}
else
{
status = &status_entry(rd->grant_table, gref);
- trans_domid = act->trans_dom;
+ td = act->trans_domain;
trans_gref = act->trans_gref;
}
@@ -1683,21 +1680,16 @@ __release_grant_for_copy(
spin_unlock(&rd->grant_table->lock);
- if ( trans_domid != rd->domain_id )
+ if ( td != rd )
{
- if ( released_write || released_read )
- {
- trans_dom = rcu_lock_domain_by_id(trans_domid);
- if ( trans_dom != NULL )
- {
- /* Recursive calls, but they're tail calls, so it's
- okay. */
- if ( released_write )
- __release_grant_for_copy(trans_dom, trans_gref, 0);
- else if ( released_read )
- __release_grant_for_copy(trans_dom, trans_gref, 1);
- }
- }
+ /* Recursive calls, but they're tail calls, so it's
+ okay. */
+ if ( released_write )
+ __release_grant_for_copy(td, trans_gref, 0);
+ else if ( released_read )
+ __release_grant_for_copy(td, trans_gref, 1);
+
+ rcu_unlock_domain(td);
}
}
@@ -1734,7 +1726,7 @@ __acquire_grant_for_copy(
uint32_t old_pin;
domid_t trans_domid;
grant_ref_t trans_gref;
- struct domain *rrd;
+ struct domain *td;
unsigned long gfn;
unsigned long grant_frame;
unsigned trans_page_off;
@@ -1788,8 +1780,8 @@ __acquire_grant_for_copy(
status) ) != GNTST_okay )
goto unlock_out;
- trans_domid = ld->domain_id;
- trans_gref = 0;
+ td = rd;
+ trans_gref = gref;
if ( sha2 && (shah->flags & GTF_type_mask) == GTF_transitive )
{
if ( !allow_transitive )
@@ -1811,14 +1803,15 @@ __acquire_grant_for_copy(
that you don't need to go out of your way to avoid it
in the guest. */
- rrd = rcu_lock_domain_by_id(trans_domid);
- if ( rrd == NULL )
+ /* We need to leave the rrd locked during the grant copy */
+ td = rcu_lock_domain_by_id(trans_domid);
+ if ( td == NULL )
PIN_FAIL(unlock_out_clear, GNTST_general_error,
"transitive grant referenced bad domain %d\n",
trans_domid);
spin_unlock(&rd->grant_table->lock);
- rc = __acquire_grant_for_copy(rrd, trans_gref, rd,
+ rc = __acquire_grant_for_copy(td, trans_gref, rd,
readonly, &grant_frame,
&trans_page_off, &trans_length,
0, &ignore);
@@ -1826,6 +1819,7 @@ __acquire_grant_for_copy(
spin_lock(&rd->grant_table->lock);
if ( rc != GNTST_okay ) {
__fixup_status_for_copy_pin(act, status);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
return rc;
}
@@ -1837,6 +1831,7 @@ __acquire_grant_for_copy(
if ( act->pin != old_pin )
{
__fixup_status_for_copy_pin(act, status);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
return __acquire_grant_for_copy(rd, gref, ld, readonly,
frame, page_off, length,
@@ -1848,7 +1843,7 @@ __acquire_grant_for_copy(
sub-page, but we always treat it as one because that
blocks mappings of transitive grants. */
is_sub_page = 1;
- *owning_domain = rrd;
+ *owning_domain = td;
act->gfn = -1ul;
}
else if ( sha1 )
@@ -1894,7 +1889,7 @@ __acquire_grant_for_copy(
act->is_sub_page = is_sub_page;
act->start = trans_page_off;
act->length = trans_length;
- act->trans_dom = trans_domid;
+ act->trans_domain = td;
act->trans_gref = trans_gref;
act->frame = grant_frame;
}

View file

@ -1,15 +0,0 @@
$NetBSD: patch-CVE-2013-1964-2,v 1.1 2013/04/19 14:02:46 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-04/msg00006.html
--- xen/include/xen/grant_table.h.orig
+++ xen/include/xen/grant_table.h
@@ -32,7 +32,7 @@
struct active_grant_entry {
u32 pin; /* Reference count information. */
domid_t domid; /* Domain being granted access. */
- domid_t trans_dom;
+ struct domain *trans_domain;
uint32_t trans_gref;
unsigned long frame; /* Frame being granted. */
unsigned long gfn; /* Guest's idea of the frame being granted. */

View file

@ -1,11 +1,10 @@
# $NetBSD: Makefile,v 1.32 2013/04/29 11:22:42 sborrill Exp $
# $NetBSD: Makefile,v 1.33 2013/05/03 16:48:38 drochner Exp $
#
# VERSION is set in version.mk as it is shared with other packages
.include "version.mk"
DISTNAME= xen-${VERSION}
PKGNAME= xentools41-${VERSION}
PKGREVISION= 4
CATEGORIES= sysutils
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${VERSION}/

View file

@ -1,18 +1,15 @@
$NetBSD: distinfo,v 1.28 2013/04/19 14:03:51 bouyer Exp $
$NetBSD: distinfo,v 1.29 2013/05/03 16:48:38 drochner Exp $
SHA1 (ipxe-git-v1.0.0.tar.gz) = da052c8de5f3485fe0253c19cf52ed6d72528485
RMD160 (ipxe-git-v1.0.0.tar.gz) = dcd9b6eaafa1ce05c1ebf2a15f2f73ad7a8c5547
Size (ipxe-git-v1.0.0.tar.gz) = 1996881 bytes
SHA1 (xen-4.1.4.tar.gz) = d5f1e9c9eeb96202dd827c196750530ffc64baab
RMD160 (xen-4.1.4.tar.gz) = e3cb379954c985354dfd7dfbed15eae43e73254d
Size (xen-4.1.4.tar.gz) = 10387283 bytes
SHA1 (xen-4.1.5.tar.gz) = 38f098cdbcf4612a6e059e6ad332e68bbfc8bf4d
RMD160 (xen-4.1.5.tar.gz) = 265d6a9faee6cf9314f4ed647604f7b43c327f52
Size (xen-4.1.5.tar.gz) = 10421420 bytes
SHA1 (patch-.._.._ipxe_src_arch_i386_include_librm.h) = 4549ac641b112321b4731a918d85219c3fce6808
SHA1 (patch-.._.._ipxe_src_core_settings.c) = 240ff973757403b983f12b2cbed826584c4a8aba
SHA1 (patch-.._.._ipxe_src_net_tls.c) = c0cfbc2ab2b92c659c146601c4f80d58c951ca62
SHA1 (patch-.._Config.mk) = 9b971a41f67bb3974d3a4459bb9d96fbbd636c96
SHA1 (patch-CVE-2012-6075) = 9de84238489875d94245d4f6ce3689629bb318ee
SHA1 (patch-CVE-2013-0215-1) = 61149c756c6b9314980368cadb09437c64205199
SHA1 (patch-CVE-2013-0215-2) = 44a86ef7fa85a212fda95e73ef8aefb98af1cc39
SHA1 (patch-aa) = 9b53ba4a809dad7a1de34c8fa0dbe493d7256ada
SHA1 (patch-ab) = 0906a5ec3a7450fc987b01289e2560e60966d00d
SHA1 (patch-ac) = c3cc5335a1d6b066307c5f03fe72f513a9eb2bdb
@ -51,7 +48,6 @@ SHA1 (patch-ioemu-qemu-xen_hw_pt-graphics.c) = 3c03404f1d711c667559a1332e717a5f1
SHA1 (patch-ioemu-qemu-xen_hw_pt-msi.c) = 2dcebc65f591988bb95dea74c3b21f7066154a9f
SHA1 (patch-ioemu-qemu-xen_hw_pt-msi.h) = d1bb1a8ad90d6577056f11df96f5469ffe74a3b0
SHA1 (patch-ioemu-qemu-xen_xen-hooks.mak) = a00d9a9fd0fbb9fd89788b9dfaf5b389a28d47e2
SHA1 (patch-libcx_xc__dom__boot.c) = 0507c2d7fe194f2d11a367fb1840b5d36da66cb1
SHA1 (patch-libxl_libxl_create.c) = 02b661ca684609939c6ef762c0ddd1c5e62ad4d0
SHA1 (patch-libxl_libxl_internal.h) = e126e5e998117903f0c66cc370d350c504ed33d9
SHA1 (patch-ocaml_Makefile.rules) = 104f9d40186e5e4ca6a2e6359bbb369c3c91d1dc

View file

@ -1,34 +0,0 @@
$NetBSD: patch-CVE-2012-6075,v 1.1 2013/01/20 15:21:55 drochner Exp $
see http://lists.xen.org/archives/html/xen-devel/2013-01/msg01070.html
--- ioemu-qemu-xen/hw/e1000.c.orig 2012-11-13 18:25:17.000000000 +0000
+++ ioemu-qemu-xen/hw/e1000.c
@@ -55,6 +55,11 @@ static int debugflags = DBGBIT(TXERR) |
#define REG_IOADDR 0x0
#define REG_IODATA 0x4
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+/* this is the size past which hardware will drop packets when setting LPE=1 */
+#define MAXIMUM_ETHERNET_LPE_SIZE 16384
+
/*
* HW models:
* E1000_DEV_ID_82540EM works with Windows and Linux
@@ -628,6 +633,15 @@ e1000_receive(void *opaque, const uint8_
return;
}
+ /* Discard oversized packets if !LPE and !SBP. */
+ if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
+ (size > MAXIMUM_ETHERNET_VLAN_SIZE
+ && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
+ && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
+ DBGOUT(RX, "packet too large for applicable LPE/VLAN size\n");
+ return;
+ }
+
if (!receive_filter(s, buf, size))
return;

View file

@ -1,30 +0,0 @@
$NetBSD: patch-CVE-2013-0215-1,v 1.1 2013/04/19 14:03:51 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-02/msg00005.html
--- ocaml/libs/xb/partial.ml.orig
+++ ocaml/libs/xb/partial.ml
@@ -27,8 +27,15 @@ external header_size: unit -> int = "stub_header_size"
external header_of_string_internal: string -> int * int * int * int
= "stub_header_of_string"
+let xenstore_payload_max = 4096 (* xen/include/public/io/xs_wire.h *)
+
let of_string s =
let tid, rid, opint, dlen = header_of_string_internal s in
+ (* A packet which is bigger than xenstore_payload_max is illegal.
+ This will leave the guest connection is a bad state and will
+ be hard to recover from without restarting the connection
+ (ie rebooting the guest) *)
+ let dlen = min xenstore_payload_max dlen in
{
tid = tid;
rid = rid;
@@ -38,6 +45,7 @@ let of_string s =
}
let append pkt s sz =
+ if pkt.len > 4096 then failwith "Buffer.add: cannot grow buffer";
Buffer.add_string pkt.buf (String.sub s 0 sz)
let to_complete pkt =

View file

@ -1,47 +0,0 @@
$NetBSD: patch-CVE-2013-0215-2,v 1.1 2013/04/19 14:03:52 bouyer Exp $
http://lists.xen.org/archives/html/xen-announce/2013-02/msg00005.html
--- ocaml/libs/xb/xs_ring_stubs.c.orig
+++ ocaml/libs/xb/xs_ring_stubs.c
@@ -39,21 +39,23 @@ static int xs_ring_read(struct mmap_interface *interface,
char *buffer, int len)
{
struct xenstore_domain_interface *intf = interface->addr;
- XENSTORE_RING_IDX cons, prod;
+ XENSTORE_RING_IDX cons, prod; /* offsets only */
int to_read;
- cons = intf->req_cons;
- prod = intf->req_prod;
+ cons = *(volatile uint32*)&intf->req_cons;
+ prod = *(volatile uint32*)&intf->req_prod;
xen_mb();
if (prod == cons)
return 0;
- if (MASK_XENSTORE_IDX(prod) > MASK_XENSTORE_IDX(cons))
+ cons = MASK_XENSTORE_IDX(cons);
+ prod = MASK_XENSTORE_IDX(prod);
+ if (prod > cons)
to_read = prod - cons;
else
- to_read = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+ to_read = XENSTORE_RING_SIZE - cons;
if (to_read < len)
len = to_read;
- memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), len);
+ memcpy(buffer, intf->req + cons, len);
xen_mb();
intf->req_cons += len;
return len;
@@ -66,8 +68,8 @@ static int xs_ring_write(struct mmap_interface *interface,
XENSTORE_RING_IDX cons, prod;
int can_write;
- cons = intf->rsp_cons;
- prod = intf->rsp_prod;
+ cons = *(volatile uint32*)&intf->rsp_cons;
+ prod = *(volatile uint32*)&intf->rsp_prod;
xen_mb();
if ( (prod - cons) >= XENSTORE_RING_SIZE )
return 0;

View file

@ -1,13 +0,0 @@
$NetBSD: patch-libcx_xc__dom__boot.c,v 1.1 2013/04/11 19:57:53 joerg Exp $
--- libxc/xc_dom_boot.c.orig 2013-03-25 10:12:30.000000000 +0000
+++ libxc/xc_dom_boot.c
@@ -265,7 +265,7 @@ int xc_dom_boot_image(struct xc_dom_imag
return rc;
/* let the vm run */
- memset(ctxt, 0, sizeof(ctxt));
+ memset(ctxt, 0, sizeof(*ctxt));
if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 )
return rc;
xc_dom_unmap_all(dom);

View file

@ -1,6 +1,6 @@
# $NetBSD: version.mk,v 1.5 2013/04/29 11:22:42 sborrill Exp $
# $NetBSD: version.mk,v 1.6 2013/05/03 16:48:38 drochner Exp $
# Version number is used by xentools41 and xenstoretools
VERSION= 4.1.4
VERSION= 4.1.5
VERSION_IPXE= 1.0.0