diff options
author | Ian Delaney <idella4@gentoo.org> | 2013-10-02 17:22:28 +0000 |
---|---|---|
committer | Ian Delaney <idella4@gentoo.org> | 2013-10-02 17:22:28 +0000 |
commit | ab8e2c49df61b19712509a979e95f3f4b5edeb92 (patch) | |
tree | 1c0bb970d6e57dbf89106ffb5af2c1b03f740f69 /app-emulation/xen/files | |
parent | Fix class interface setup to be compatible with glib-2.36. Remove old alterna... (diff) | |
download | gentoo-2-ab8e2c49df61b19712509a979e95f3f4b5edeb92.tar.gz gentoo-2-ab8e2c49df61b19712509a979e95f3f4b5edeb92.tar.bz2 gentoo-2-ab8e2c49df61b19712509a979e95f3f4b5edeb92.zip |
Adding security patches to 4.3.0 from Bug #486354, 4.2.2 excluded (for now) due to one sec. patch failing
(Portage version: 2.2.0/cvs/Linux x86_64, signed Manifest commit with key 0xB8072B0D)
Diffstat (limited to 'app-emulation/xen/files')
4 files changed, 297 insertions, 0 deletions
diff --git a/app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch b/app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch new file mode 100644 index 000000000000..3bb432762a2c --- /dev/null +++ b/app-emulation/xen/files/xen-CVE-2013-1442-XSA-62.patch @@ -0,0 +1,46 @@ +x86/xsave: initialize extended register state when guests enable it + +Till now, when setting previously unset bits in XCR0 we wouldn't touch +the active register state, thus leaving in the newly enabled registers +whatever a prior user of it left there, i.e. potentially leaking +information between guests. + +This is CVE-2013-1442 / XSA-62. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> + +--- a/xen/arch/x86/xstate.c ++++ b/xen/arch/x86/xstate.c +@@ -307,6 +307,7 @@ int validate_xstate(u64 xcr0, u64 xcr0_a + int handle_xsetbv(u32 index, u64 new_bv) + { + struct vcpu *curr = current; ++ u64 mask; + + if ( index != XCR_XFEATURE_ENABLED_MASK ) + return -EOPNOTSUPP; +@@ -320,9 +321,23 @@ int handle_xsetbv(u32 index, u64 new_bv) + if ( !set_xcr0(new_bv) ) + return -EFAULT; + ++ mask = new_bv & ~curr->arch.xcr0_accum; + curr->arch.xcr0 = new_bv; + curr->arch.xcr0_accum |= new_bv; + ++ mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY; ++ if ( mask ) ++ { ++ unsigned long cr0 = read_cr0(); ++ ++ clts(); ++ if ( curr->fpu_dirtied ) ++ asm ( "stmxcsr %0" : "=m" (curr->arch.xsave_area->fpu_sse.mxcsr) ); ++ xrstor(curr, mask); ++ if ( cr0 & X86_CR0_TS ) ++ write_cr0(cr0); ++ } ++ + return 0; + } + diff --git a/app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch b/app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch new file mode 100644 index 000000000000..5134650e2f88 --- /dev/null +++ b/app-emulation/xen/files/xen-CVE-2013-4355-XSA-63.patch @@ -0,0 +1,171 @@ +x86: properly handle hvm_copy_from_guest_{phys,virt}() errors + +Ignoring them generally implies using uninitialized data and, in all +cases dealt with here, potentially leaking hypervisor stack contents to +guests. + +This is XSA-63. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Tim Deegan <tim@xen.org> +Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> + +--- a/xen/arch/x86/hvm/hvm.c ++++ b/xen/arch/x86/hvm/hvm.c +@@ -2308,11 +2308,7 @@ void hvm_task_switch( + + rc = hvm_copy_from_guest_virt( + &tss, prev_tr.base, sizeof(tss), PFEC_page_present); +- if ( rc == HVMCOPY_bad_gva_to_gfn ) +- goto out; +- if ( rc == HVMCOPY_gfn_paged_out ) +- goto out; +- if ( rc == HVMCOPY_gfn_shared ) ++ if ( rc != HVMCOPY_okay ) + goto out; + + eflags = regs->eflags; +@@ -2357,13 +2353,11 @@ void hvm_task_switch( + + rc = hvm_copy_from_guest_virt( + &tss, tr.base, sizeof(tss), PFEC_page_present); +- if ( rc == HVMCOPY_bad_gva_to_gfn ) +- goto out; +- if ( rc == HVMCOPY_gfn_paged_out ) +- goto out; +- /* Note: this could be optimised, if the callee functions knew we want RO +- * access */ +- if ( rc == HVMCOPY_gfn_shared ) ++ /* ++ * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee ++ * functions knew we want RO access. ++ */ ++ if ( rc != HVMCOPY_okay ) + goto out; + + +--- a/xen/arch/x86/hvm/intercept.c ++++ b/xen/arch/x86/hvm/intercept.c +@@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu * + { + for ( i = 0; i < p->count; i++ ) + { +- int ret; +- +- ret = hvm_copy_from_guest_phys(&data, +- p->data + (sign * i * p->size), +- p->size); +- if ( (ret == HVMCOPY_gfn_paged_out) || +- (ret == HVMCOPY_gfn_shared) ) ++ switch ( hvm_copy_from_guest_phys(&data, ++ p->data + sign * i * p->size, ++ p->size) ) + { ++ case HVMCOPY_okay: ++ break; ++ case HVMCOPY_gfn_paged_out: ++ case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; ++ case HVMCOPY_bad_gfn_to_mfn: ++ data = ~0; ++ break; ++ case HVMCOPY_bad_gva_to_gfn: ++ ASSERT(0); ++ /* fall through */ ++ default: ++ rc = X86EMUL_UNHANDLEABLE; ++ break; + } ++ if ( rc != X86EMUL_OKAY ) ++ break; + rc = write_handler(v, p->addr + (sign * i * p->size), p->size, + data); + if ( rc != X86EMUL_OKAY ) +@@ -165,8 +176,28 @@ static int process_portio_intercept(port + for ( i = 0; i < p->count; i++ ) + { + data = 0; +- (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, +- p->size); ++ switch ( hvm_copy_from_guest_phys(&data, ++ p->data + sign * i * p->size, ++ p->size) ) ++ { ++ case HVMCOPY_okay: ++ break; ++ case HVMCOPY_gfn_paged_out: ++ case HVMCOPY_gfn_shared: ++ rc = X86EMUL_RETRY; ++ break; ++ case HVMCOPY_bad_gfn_to_mfn: ++ data = ~0; ++ break; ++ case HVMCOPY_bad_gva_to_gfn: ++ ASSERT(0); ++ /* fall through */ ++ default: ++ rc = X86EMUL_UNHANDLEABLE; ++ break; ++ } ++ if ( rc != X86EMUL_OKAY ) ++ break; + rc = action(IOREQ_WRITE, p->addr, p->size, &data); + if ( rc != X86EMUL_OKAY ) + break; +--- a/xen/arch/x86/hvm/io.c ++++ b/xen/arch/x86/hvm/io.c +@@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mp + data = p->data; + if ( p->data_is_ptr ) + { +- int ret; +- +- ret = hvm_copy_from_guest_phys(&data, +- p->data + (sign * i * p->size), +- p->size); +- if ( (ret == HVMCOPY_gfn_paged_out) && +- (ret == HVMCOPY_gfn_shared) ) ++ switch ( hvm_copy_from_guest_phys(&data, ++ p->data + sign * i * p->size, ++ p->size) ) ++ { ++ case HVMCOPY_okay: ++ break; ++ case HVMCOPY_gfn_paged_out: ++ case HVMCOPY_gfn_shared: + return X86EMUL_RETRY; ++ case HVMCOPY_bad_gfn_to_mfn: ++ data = ~0; ++ break; ++ case HVMCOPY_bad_gva_to_gfn: ++ ASSERT(0); ++ /* fall through */ ++ default: ++ return X86EMUL_UNHANDLEABLE; ++ } + } + + switch ( p->size ) +--- a/xen/arch/x86/hvm/vmx/realmode.c ++++ b/xen/arch/x86/hvm/vmx/realmode.c +@@ -39,7 +39,9 @@ static void realmode_deliver_exception( + + again: + last_byte = (vector * 4) + 3; +- if ( idtr->limit < last_byte ) ++ if ( idtr->limit < last_byte || ++ hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != ++ HVMCOPY_okay ) + { + /* Software interrupt? */ + if ( insn_len != 0 ) +@@ -64,8 +66,6 @@ static void realmode_deliver_exception( + } + } + +- (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); +- + frame[0] = regs->eip + insn_len; + frame[1] = csr->sel; + frame[2] = regs->eflags & ~X86_EFLAGS_RF; diff --git a/app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch b/app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch new file mode 100644 index 000000000000..9b3a7cbe1bed --- /dev/null +++ b/app-emulation/xen/files/xen-CVE-2013-4356-XSA-64.patch @@ -0,0 +1,57 @@ +commit 95a0770282ea2a03f7bc48c6656d5fc79bae0599 +Author: Tim Deegan <tim@xen.org> +Date: Thu Sep 12 14:16:28 2013 +0100 + + x86/mm/shadow: Fix initialization of PV shadow L4 tables. + + Shadowed PV L4 tables must have the same Xen mappings as their + unshadowed equivalent. This is done by copying the Xen entries + verbatim from the idle pagetable, and then using guest_l4_slot() + in the SHADOW_FOREACH_L4E() iterator to avoid touching those entries. + + adc5afbf1c70ef55c260fb93e4b8ce5ccb918706 (x86: support up to 16Tb) + changed the definition of ROOT_PAGETABLE_XEN_SLOTS to extend right to + the top of the address space, which causes the shadow code to + copy Xen mappings into guest-kernel-address slots too. + + In the common case, all those slots are zero in the idle pagetable, + and no harm is done. But if any slot above #271 is non-zero, Xen will + crash when that slot is later cleared (it attempts to drop + shadow-pagetable refcounts on its own L4 pagetables). + + Fix by using the new ROOT_PAGETABLE_PV_XEN_SLOTS when appropriate. + Monitor pagetables need the full Xen mappings, so they keep using the + old name (with its new semantics). + + This is XSA-64. + + Signed-off-by: Tim Deegan <tim@xen.org> + Reviewed-by: Jan Beulich <jbeulich@suse.com> + + Xen 4.3.x and xen-unstable are vulnerable. + +diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c +index 4c4c2ba..3fed0b6 100644 +--- a/xen/arch/x86/mm/shadow/multi.c ++++ b/xen/arch/x86/mm/shadow/multi.c +@@ -1433,15 +1433,19 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn) + { + struct domain *d = v->domain; + shadow_l4e_t *sl4e; ++ unsigned int slots; + + sl4e = sh_map_domain_page(sl4mfn); + ASSERT(sl4e != NULL); + ASSERT(sizeof (l4_pgentry_t) == sizeof (shadow_l4e_t)); + + /* Copy the common Xen mappings from the idle domain */ ++ slots = (shadow_mode_external(d) ++ ? ROOT_PAGETABLE_XEN_SLOTS ++ : ROOT_PAGETABLE_PV_XEN_SLOTS); + memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT], + &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT], +- ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t)); ++ slots * sizeof(l4_pgentry_t)); + + /* Install the per-domain mappings for this domain */ + sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] = diff --git a/app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch b/app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch new file mode 100644 index 000000000000..1d9f25abae1f --- /dev/null +++ b/app-emulation/xen/files/xen-CVE-2013-4361-XSA-66.patch @@ -0,0 +1,23 @@ +x86: properly set up fbld emulation operand address + +This is CVE-2013-4361 / XSA-66. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Ian Jackson <ian.jackson@eu.citrix.com> + +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -3156,11 +3156,11 @@ x86_emulate( + break; + case 4: /* fbld m80dec */ + ea.bytes = 10; +- dst = ea; ++ src = ea; + if ( (rc = ops->read(src.mem.seg, src.mem.off, + &src.val, src.bytes, ctxt)) != 0 ) + goto done; +- emulate_fpu_insn_memdst("fbld", src.val); ++ emulate_fpu_insn_memsrc("fbld", src.val); + break; + case 5: /* fild m64i */ + ea.bytes = 8; |