diff options
author | Tomáš Mózes <hydrapolic@gmail.com> | 2024-01-04 12:34:35 +0100 |
---|---|---|
committer | Tomáš Mózes <hydrapolic@gmail.com> | 2024-01-04 12:34:35 +0100 |
commit | 899e5378b706299ec647f9dafc556fc9cef5da55 (patch) | |
tree | 7c76226ad8fe721e89c2ec4cd7a515d41dd8f79a | |
parent | Xen 4.16.6-pre-patchset-0 (diff) | |
download | xen-upstream-patches-4.16.tar.gz xen-upstream-patches-4.16.tar.bz2 xen-upstream-patches-4.16.zip |
Xen 4.16.6-pre-patchset-14.16.6-pre-patchset-14.16
Signed-off-by: Tomáš Mózes <hydrapolic@gmail.com>
31 files changed, 352 insertions, 56 deletions
diff --git a/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch b/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch index 8da66a5..3988d89 100644 --- a/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch +++ b/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch @@ -1,7 +1,7 @@ From d720c2310a7ac8878c01fe9d9fdc13f43cb266b3 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini <stefano.stabellini@amd.com> Date: Tue, 5 Sep 2023 14:34:28 +0200 -Subject: [PATCH 01/27] xen/arm: page: Handle cache flush of an element at the +Subject: [PATCH 01/30] xen/arm: page: Handle cache flush of an element at the top of the address space The region that needs to be cleaned/invalidated may be at the top @@ -107,5 +107,5 @@ index c6f9fb0d4e..eff5883ef8 100644 dsb(sy); /* So we know the flushes happen before continuing */ /* ARM callers assume that dcache_* functions cannot fail. */ -- -2.42.0 +2.43.0 diff --git a/0002-x86-AMD-extend-Zenbleed-check-to-models-good-ucode-i.patch b/0002-x86-AMD-extend-Zenbleed-check-to-models-good-ucode-i.patch index d315be0..c5b53d6 100644 --- a/0002-x86-AMD-extend-Zenbleed-check-to-models-good-ucode-i.patch +++ b/0002-x86-AMD-extend-Zenbleed-check-to-models-good-ucode-i.patch @@ -1,7 +1,7 @@ From 08539e8315fdae5f5bfd655d53ed35fd2922fe6c Mon Sep 17 00:00:00 2001 From: Jan Beulich <jbeulich@suse.com> Date: Wed, 23 Aug 2023 09:26:36 +0200 -Subject: [PATCH 02/27] x86/AMD: extend Zenbleed check to models "good" ucode +Subject: [PATCH 02/30] x86/AMD: extend Zenbleed check to models "good" ucode isn't known for Reportedly the AMD Custom APU 0405 found on SteamDeck, models 0x90 and @@ -44,5 +44,5 @@ index 60c6d88edf..a591038757 100644 rdmsrl(MSR_AMD64_DE_CFG, val); -- -2.42.0 +2.43.0 diff --git a/0003-x86-spec-ctrl-Fix-confusion-between-SPEC_CTRL_EXIT_T.patch b/0003-x86-spec-ctrl-Fix-confusion-between-SPEC_CTRL_EXIT_T.patch index ecd9c37..ba38b0f 100644 --- a/0003-x86-spec-ctrl-Fix-confusion-between-SPEC_CTRL_EXIT_T.patch +++ b/0003-x86-spec-ctrl-Fix-confusion-between-SPEC_CTRL_EXIT_T.patch @@ -1,7 +1,7 @@ From 1e52cdf07cdf52e5d99957c3ecbddf5b1feda963 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Tue, 12 Sep 2023 15:06:49 +0100 -Subject: [PATCH 03/27] x86/spec-ctrl: Fix confusion between +Subject: [PATCH 03/30] x86/spec-ctrl: Fix confusion between SPEC_CTRL_EXIT_TO_XEN{,_IST} c/s 3fffaf9c13e9 ("x86/entry: Avoid using alternatives in NMI/#MC paths") @@ -70,5 +70,5 @@ index b61a5571ae..f5110616e4 100644 * Requires %rbx=stack_end * Clobbers %rax, %rcx, %rdx -- -2.42.0 +2.43.0 diff --git a/0004-x86-spec-ctrl-Fold-DO_SPEC_CTRL_EXIT_TO_XEN-into-it-.patch b/0004-x86-spec-ctrl-Fold-DO_SPEC_CTRL_EXIT_TO_XEN-into-it-.patch index 52d055c..db1eaa5 100644 --- a/0004-x86-spec-ctrl-Fold-DO_SPEC_CTRL_EXIT_TO_XEN-into-it-.patch +++ b/0004-x86-spec-ctrl-Fold-DO_SPEC_CTRL_EXIT_TO_XEN-into-it-.patch @@ -1,7 +1,7 @@ From afa5b17f385372226de6b0862f12ab39fda16b5c Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Tue, 12 Sep 2023 17:03:16 +0100 -Subject: [PATCH 04/27] x86/spec-ctrl: Fold DO_SPEC_CTRL_EXIT_TO_XEN into it's +Subject: [PATCH 04/30] x86/spec-ctrl: Fold DO_SPEC_CTRL_EXIT_TO_XEN into it's single user With the SPEC_CTRL_EXIT_TO_XEN{,_IST} confusion fixed, it's now obvious that @@ -81,5 +81,5 @@ index f5110616e4..251c30eee5 100644 #endif /* __ASSEMBLY__ */ -- -2.42.0 +2.43.0 diff --git a/0005-x86-spec-ctrl-Turn-the-remaining-SPEC_CTRL_-ENTRY-EX.patch b/0005-x86-spec-ctrl-Turn-the-remaining-SPEC_CTRL_-ENTRY-EX.patch index ceca67a..a1b56bd 100644 --- a/0005-x86-spec-ctrl-Turn-the-remaining-SPEC_CTRL_-ENTRY-EX.patch +++ b/0005-x86-spec-ctrl-Turn-the-remaining-SPEC_CTRL_-ENTRY-EX.patch @@ -1,7 +1,7 @@ From 353e876a9dd5a93d0bf8819e77613c33db0de97b Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Fri, 1 Sep 2023 11:38:44 +0100 -Subject: [PATCH 05/27] x86/spec-ctrl: Turn the remaining +Subject: [PATCH 05/30] x86/spec-ctrl: Turn the remaining SPEC_CTRL_{ENTRY,EXIT}_* into asm macros These have grown more complex over time, with some already having been @@ -79,5 +79,5 @@ index 251c30eee5..94ed5dc880 100644 /* * Use in IST interrupt/exception context. May interrupt Xen or PV context. -- -2.42.0 +2.43.0 diff --git a/0006-x86-spec-ctrl-Improve-all-SPEC_CTRL_-ENTER-EXIT-_-co.patch b/0006-x86-spec-ctrl-Improve-all-SPEC_CTRL_-ENTER-EXIT-_-co.patch index 1784fe1..57a816b 100644 --- a/0006-x86-spec-ctrl-Improve-all-SPEC_CTRL_-ENTER-EXIT-_-co.patch +++ b/0006-x86-spec-ctrl-Improve-all-SPEC_CTRL_-ENTER-EXIT-_-co.patch @@ -1,7 +1,7 @@ From 6cc49c355e952f4ff564c6b817e7eff57c5a02c7 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 30 Aug 2023 20:11:50 +0100 -Subject: [PATCH 06/27] x86/spec-ctrl: Improve all SPEC_CTRL_{ENTER,EXIT}_* +Subject: [PATCH 06/30] x86/spec-ctrl: Improve all SPEC_CTRL_{ENTER,EXIT}_* comments ... to better explain how they're used. @@ -102,5 +102,5 @@ index 94ed5dc880..9c397f7cbd 100644 #endif /* __ASSEMBLY__ */ -- -2.42.0 +2.43.0 diff --git a/0007-x86-entry-Adjust-restore_all_xen-to-hold-stack_end-i.patch b/0007-x86-entry-Adjust-restore_all_xen-to-hold-stack_end-i.patch index 5d586b1..79b5eec 100644 --- a/0007-x86-entry-Adjust-restore_all_xen-to-hold-stack_end-i.patch +++ b/0007-x86-entry-Adjust-restore_all_xen-to-hold-stack_end-i.patch @@ -1,7 +1,7 @@ From 19aca8f32778f289112fc8db2ee547cdf29c81ca Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 13 Sep 2023 13:48:16 +0100 -Subject: [PATCH 07/27] x86/entry: Adjust restore_all_xen to hold stack_end in +Subject: [PATCH 07/30] x86/entry: Adjust restore_all_xen to hold stack_end in %r14 All other SPEC_CTRL_{ENTRY,EXIT}_* helpers hold stack_end in %r14. Adjust it @@ -70,5 +70,5 @@ index 9c397f7cbd..3e745813cf 100644 wrmsr -- -2.42.0 +2.43.0 diff --git a/0008-x86-entry-Track-the-IST-ness-of-an-entry-for-the-exi.patch b/0008-x86-entry-Track-the-IST-ness-of-an-entry-for-the-exi.patch index c88a8b1..3714bfd 100644 --- a/0008-x86-entry-Track-the-IST-ness-of-an-entry-for-the-exi.patch +++ b/0008-x86-entry-Track-the-IST-ness-of-an-entry-for-the-exi.patch @@ -1,7 +1,7 @@ From 8064cbdbef79e328fad5158beeaf1c45bd0f5bd3 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 13 Sep 2023 12:20:12 +0100 -Subject: [PATCH 08/27] x86/entry: Track the IST-ness of an entry for the exit +Subject: [PATCH 08/30] x86/entry: Track the IST-ness of an entry for the exit paths Use %r12 to hold an ist_exit boolean. This register is zero elsewhere in the @@ -105,5 +105,5 @@ index 266c0a0990..671e3b3fd5 100644 jne ret_from_intr -- -2.42.0 +2.43.0 diff --git a/0009-x86-spec-ctrl-Issue-VERW-during-IST-exit-to-Xen.patch b/0009-x86-spec-ctrl-Issue-VERW-during-IST-exit-to-Xen.patch index 4396aa9..fbcab99 100644 --- a/0009-x86-spec-ctrl-Issue-VERW-during-IST-exit-to-Xen.patch +++ b/0009-x86-spec-ctrl-Issue-VERW-during-IST-exit-to-Xen.patch @@ -1,7 +1,7 @@ From 3e51782ebb088fde39fdcfa30d002baddd1a9e06 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 13 Sep 2023 13:53:33 +0100 -Subject: [PATCH 09/27] x86/spec-ctrl: Issue VERW during IST exit to Xen +Subject: [PATCH 09/30] x86/spec-ctrl: Issue VERW during IST exit to Xen There is a corner case where e.g. an NMI hitting an exit-to-guest path after SPEC_CTRL_EXIT_TO_* would have run the entire NMI handler *after* the VERW @@ -85,5 +85,5 @@ index 3e745813cf..8a816b8cf6 100644 #endif /* __ASSEMBLY__ */ -- -2.42.0 +2.43.0 diff --git a/0010-x86-amd-Introduce-is_zen-1-2-_uarch-predicates.patch b/0010-x86-amd-Introduce-is_zen-1-2-_uarch-predicates.patch index c9cf0df..cc3c93e 100644 --- a/0010-x86-amd-Introduce-is_zen-1-2-_uarch-predicates.patch +++ b/0010-x86-amd-Introduce-is_zen-1-2-_uarch-predicates.patch @@ -1,7 +1,7 @@ From a5857f1eca17a609119ae928c9fa73bb0996ddd9 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Fri, 15 Sep 2023 12:13:51 +0100 -Subject: [PATCH 10/27] x86/amd: Introduce is_zen{1,2}_uarch() predicates +Subject: [PATCH 10/30] x86/amd: Introduce is_zen{1,2}_uarch() predicates We already have 3 cases using STIBP as a Zen1/2 heuristic, and are about to introduce a 4th. Wrap the heuristic into a pair of predicates rather than @@ -87,5 +87,5 @@ index a82382e6bf..7fe1e19217 100644 int cpu_has_amd_erratum(const struct cpuinfo_x86 *, int, ...); -- -2.42.0 +2.43.0 diff --git a/0011-x86-spec-ctrl-Mitigate-the-Zen1-DIV-leakage.patch b/0011-x86-spec-ctrl-Mitigate-the-Zen1-DIV-leakage.patch index cce4fa7..8157b99 100644 --- a/0011-x86-spec-ctrl-Mitigate-the-Zen1-DIV-leakage.patch +++ b/0011-x86-spec-ctrl-Mitigate-the-Zen1-DIV-leakage.patch @@ -1,7 +1,7 @@ From de751c3d906d17b2e25ee429f81b17a689c7c6c0 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 30 Aug 2023 20:24:25 +0100 -Subject: [PATCH 11/27] x86/spec-ctrl: Mitigate the Zen1 DIV leakage +Subject: [PATCH 11/30] x86/spec-ctrl: Mitigate the Zen1 DIV leakage In the Zen1 microarchitecure, there is one divider in the pipeline which services uops from both threads. In the case of #DE, the latched result from @@ -232,5 +232,5 @@ index 8a816b8cf6..0e69971f66 100644 .endm -- -2.42.0 +2.43.0 diff --git a/0012-x86-shadow-defer-releasing-of-PV-s-top-level-shadow-.patch b/0012-x86-shadow-defer-releasing-of-PV-s-top-level-shadow-.patch index b374a11..fa4386c 100644 --- a/0012-x86-shadow-defer-releasing-of-PV-s-top-level-shadow-.patch +++ b/0012-x86-shadow-defer-releasing-of-PV-s-top-level-shadow-.patch @@ -1,7 +1,7 @@ From c450a4bc11e97eabe97dcefe06f510d7acea8d6d Mon Sep 17 00:00:00 2001 From: Jan Beulich <JBeulich@suse.com> Date: Wed, 20 Sep 2023 10:34:24 +0100 -Subject: [PATCH 12/27] x86/shadow: defer releasing of PV's top-level shadow +Subject: [PATCH 12/30] x86/shadow: defer releasing of PV's top-level shadow reference sh_set_toplevel_shadow() re-pinning the top-level shadow we may be @@ -449,5 +449,5 @@ index e25f9604d8..302ae97fc6 100644 struct xen_domctl_shadow_op *sc, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) -- -2.42.0 +2.43.0 diff --git a/0013-tools-xenstored-domain_entry_fix-Handle-conflicting-.patch b/0013-tools-xenstored-domain_entry_fix-Handle-conflicting-.patch index 963818d..cfb6b85 100644 --- a/0013-tools-xenstored-domain_entry_fix-Handle-conflicting-.patch +++ b/0013-tools-xenstored-domain_entry_fix-Handle-conflicting-.patch @@ -1,7 +1,7 @@ From 3382512b9f5e0d8cf37709d7cb47389d2ce8e624 Mon Sep 17 00:00:00 2001 From: Julien Grall <jgrall@amazon.com> Date: Fri, 22 Sep 2023 11:32:16 +0100 -Subject: [PATCH 13/27] tools/xenstored: domain_entry_fix(): Handle conflicting +Subject: [PATCH 13/30] tools/xenstored: domain_entry_fix(): Handle conflicting transaction The function domain_entry_fix() will be initially called to check if the @@ -61,5 +61,5 @@ index ddd49eddfa..a3475284ea 100644 return domid_is_unprivileged(domid) ? cnt : 0; } -- -2.42.0 +2.43.0 diff --git a/0014-iommu-amd-vi-flush-IOMMU-TLB-when-flushing-the-DTE.patch b/0014-iommu-amd-vi-flush-IOMMU-TLB-when-flushing-the-DTE.patch index 9642714..156de74 100644 --- a/0014-iommu-amd-vi-flush-IOMMU-TLB-when-flushing-the-DTE.patch +++ b/0014-iommu-amd-vi-flush-IOMMU-TLB-when-flushing-the-DTE.patch @@ -1,7 +1,7 @@ From 35217b78048e91a0f4d0f14b31a474cc59ec1388 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne <roger.pau@citrix.com> Date: Tue, 13 Jun 2023 15:01:05 +0200 -Subject: [PATCH 14/27] iommu/amd-vi: flush IOMMU TLB when flushing the DTE +Subject: [PATCH 14/30] iommu/amd-vi: flush IOMMU TLB when flushing the DTE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -182,5 +182,5 @@ index e5e0f00402..7b6dbf546a 100644 if ( amd_iommu_reserve_domain_unity_map( -- -2.42.0 +2.43.0 diff --git a/0015-libfsimage-xfs-Remove-dead-code.patch b/0015-libfsimage-xfs-Remove-dead-code.patch index 93b6e2d..8a3df28 100644 --- a/0015-libfsimage-xfs-Remove-dead-code.patch +++ b/0015-libfsimage-xfs-Remove-dead-code.patch @@ -1,7 +1,7 @@ From d51a2a1843b612b03f764703159a0946fe026750 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Thu, 14 Sep 2023 13:22:50 +0100 -Subject: [PATCH 15/27] libfsimage/xfs: Remove dead code +Subject: [PATCH 15/30] libfsimage/xfs: Remove dead code xfs_info.agnolog (and related code) and XFS_INO_AGBNO_BITS are dead code that serve no purpose. @@ -67,5 +67,5 @@ index d735a88e55..2800699f59 100644 xfs.btnode_ptr0_off = ((xfs.bsize - sizeof(xfs_btree_block_t)) / -- -2.42.0 +2.43.0 diff --git a/0016-libfsimage-xfs-Amend-mask32lo-to-allow-the-value-32.patch b/0016-libfsimage-xfs-Amend-mask32lo-to-allow-the-value-32.patch index a6b4527..6ad8532 100644 --- a/0016-libfsimage-xfs-Amend-mask32lo-to-allow-the-value-32.patch +++ b/0016-libfsimage-xfs-Amend-mask32lo-to-allow-the-value-32.patch @@ -1,7 +1,7 @@ From 7d520b8d4ec7495f1ef1e4343a4f705a363e0c9c Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Thu, 14 Sep 2023 13:22:51 +0100 -Subject: [PATCH 16/27] libfsimage/xfs: Amend mask32lo() to allow the value 32 +Subject: [PATCH 16/30] libfsimage/xfs: Amend mask32lo() to allow the value 32 agblklog could plausibly be 32, but that would overflow this shift. Perform the shift as ULL and cast to u32 at the end instead. @@ -29,5 +29,5 @@ index 2800699f59..4720bb4505 100644 #define XFS_INO_MASK(k) ((xfs_uint32_t)((1ULL << (k)) - 1)) #define XFS_INO_OFFSET_BITS xfs.inopblog -- -2.42.0 +2.43.0 diff --git a/0017-libfsimage-xfs-Sanity-check-the-superblock-during-mo.patch b/0017-libfsimage-xfs-Sanity-check-the-superblock-during-mo.patch index 4694ea1..50cf496 100644 --- a/0017-libfsimage-xfs-Sanity-check-the-superblock-during-mo.patch +++ b/0017-libfsimage-xfs-Sanity-check-the-superblock-during-mo.patch @@ -1,7 +1,7 @@ From 2de503f8fd0d07401e92abed1097ceb5fd1801f6 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Thu, 14 Sep 2023 13:22:52 +0100 -Subject: [PATCH 17/27] libfsimage/xfs: Sanity-check the superblock during +Subject: [PATCH 17/30] libfsimage/xfs: Sanity-check the superblock during mounts Sanity-check the XFS superblock for wellformedness at the mount handler. @@ -133,5 +133,5 @@ index 40699281e4..b87e37d3d7 100644 /* those are from xfs_btree.h */ -- -2.42.0 +2.43.0 diff --git a/0018-libfsimage-xfs-Add-compile-time-check-to-libfsimage.patch b/0018-libfsimage-xfs-Add-compile-time-check-to-libfsimage.patch index 2f2d7ca..288e5b6 100644 --- a/0018-libfsimage-xfs-Add-compile-time-check-to-libfsimage.patch +++ b/0018-libfsimage-xfs-Add-compile-time-check-to-libfsimage.patch @@ -1,7 +1,7 @@ From 766126159ee963cdc16ba9cb2b0ca54b98bc148f Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Thu, 14 Sep 2023 13:22:53 +0100 -Subject: [PATCH 18/27] libfsimage/xfs: Add compile-time check to libfsimage +Subject: [PATCH 18/30] libfsimage/xfs: Add compile-time check to libfsimage Adds the common tools include folder to the -I compile flags of libfsimage. This allows us to use: @@ -57,5 +57,5 @@ index e4eb7e1ee2..4a8dd6f239 100644 xfs.dirbsize = 1 << (super.sb_blocklog + super.sb_dirblklog); xfs.inopblog = super.sb_blocklog - super.sb_inodelog; -- -2.42.0 +2.43.0 diff --git a/0019-tools-pygrub-Remove-unnecessary-hypercall.patch b/0019-tools-pygrub-Remove-unnecessary-hypercall.patch index f345a6f..6c9e270 100644 --- a/0019-tools-pygrub-Remove-unnecessary-hypercall.patch +++ b/0019-tools-pygrub-Remove-unnecessary-hypercall.patch @@ -1,7 +1,7 @@ From 3d760a3bb9b55e5dd45534cac3cdb561a57f2ee0 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Mon, 25 Sep 2023 18:32:21 +0100 -Subject: [PATCH 19/27] tools/pygrub: Remove unnecessary hypercall +Subject: [PATCH 19/30] tools/pygrub: Remove unnecessary hypercall There's a hypercall being issued in order to determine whether PV64 is supported, but since Xen 4.3 that's strictly true so it's not required. @@ -56,5 +56,5 @@ index ce7ab0eb8c..ce4e07d3e8 100755 cfg["ramdisk"] = "/platform/i86pc/amd64/boot_archive" elif fs.file_exists("/platform/i86xpv/kernel/unix"): -- -2.42.0 +2.43.0 diff --git a/0020-tools-pygrub-Small-refactors.patch b/0020-tools-pygrub-Small-refactors.patch index 5b24800..937cf4c 100644 --- a/0020-tools-pygrub-Small-refactors.patch +++ b/0020-tools-pygrub-Small-refactors.patch @@ -1,7 +1,7 @@ From 4f46a077fde520dcdc466da611d7abd124f260f8 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Mon, 25 Sep 2023 18:32:22 +0100 -Subject: [PATCH 20/27] tools/pygrub: Small refactors +Subject: [PATCH 20/30] tools/pygrub: Small refactors Small tidy up to ensure output_directory always has a trailing '/' to ease concatenating paths and that `output` can only be a filename or None. @@ -61,5 +61,5 @@ index ce4e07d3e8..1042c05b86 100755 else: fd = os.open(output, os.O_WRONLY) -- -2.42.0 +2.43.0 diff --git a/0021-tools-pygrub-Open-the-output-files-earlier.patch b/0021-tools-pygrub-Open-the-output-files-earlier.patch index 7eb13b8..6cc4627 100644 --- a/0021-tools-pygrub-Open-the-output-files-earlier.patch +++ b/0021-tools-pygrub-Open-the-output-files-earlier.patch @@ -1,7 +1,7 @@ From d01f651da05b77714f0f172501993121b77039a7 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Mon, 25 Sep 2023 18:32:23 +0100 -Subject: [PATCH 21/27] tools/pygrub: Open the output files earlier +Subject: [PATCH 21/30] tools/pygrub: Open the output files earlier This patch allows pygrub to get ahold of every RW file descriptor it needs early on. A later patch will clamp the filesystem it can access so it can't @@ -101,5 +101,5 @@ index 1042c05b86..91e2ec2ab1 100755 args = None if chosencfg["args"]: -- -2.42.0 +2.43.0 diff --git a/0022-tools-libfsimage-Export-a-new-function-to-preload-al.patch b/0022-tools-libfsimage-Export-a-new-function-to-preload-al.patch index 3128eef..6b96640 100644 --- a/0022-tools-libfsimage-Export-a-new-function-to-preload-al.patch +++ b/0022-tools-libfsimage-Export-a-new-function-to-preload-al.patch @@ -1,7 +1,7 @@ From c1159b5ed4ad7fadc5c650f749b072da9a78fb13 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Mon, 25 Sep 2023 18:32:24 +0100 -Subject: [PATCH 22/27] tools/libfsimage: Export a new function to preload all +Subject: [PATCH 22/30] tools/libfsimage: Export a new function to preload all plugins This is work required in order to let pygrub operate in highly deprivileged @@ -122,5 +122,5 @@ index 2ebbbe35df..92fbf2851f 100644 METH_VARARGS|METH_KEYWORDS, fsimage_open__doc__ }, { "getbootstring", (PyCFunction)fsimage_getbootstring, -- -2.42.0 +2.43.0 diff --git a/0023-tools-pygrub-Deprivilege-pygrub.patch b/0023-tools-pygrub-Deprivilege-pygrub.patch index 8885b99..dc872e1 100644 --- a/0023-tools-pygrub-Deprivilege-pygrub.patch +++ b/0023-tools-pygrub-Deprivilege-pygrub.patch @@ -1,7 +1,7 @@ From 1395852e1bc352bf727d18ebe33426e279cdc967 Mon Sep 17 00:00:00 2001 From: Alejandro Vallejo <alejandro.vallejo@cloud.com> Date: Mon, 25 Sep 2023 18:32:25 +0100 -Subject: [PATCH 23/27] tools/pygrub: Deprivilege pygrub +Subject: [PATCH 23/30] tools/pygrub: Deprivilege pygrub Introduce a --runas=<uid> flag to deprivilege pygrub on Linux and *BSDs. It also implicitly creates a chroot env where it drops a deprivileged forked @@ -303,5 +303,5 @@ index 91e2ec2ab1..7cea496ade 100755 args = None -- -2.42.0 +2.43.0 diff --git a/0024-libxl-add-support-for-running-bootloader-in-restrict.patch b/0024-libxl-add-support-for-running-bootloader-in-restrict.patch index 89d7299..a7c4cd3 100644 --- a/0024-libxl-add-support-for-running-bootloader-in-restrict.patch +++ b/0024-libxl-add-support-for-running-bootloader-in-restrict.patch @@ -1,7 +1,7 @@ From 5182683fffa6b1d4c940203bbb85bb054558c137 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne <roger.pau@citrix.com> Date: Mon, 25 Sep 2023 14:30:20 +0200 -Subject: [PATCH 24/27] libxl: add support for running bootloader in restricted +Subject: [PATCH 24/30] libxl: add support for running bootloader in restricted mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 @@ -247,5 +247,5 @@ index cc27c72ecf..8415d1feed 100644 /* -- -2.42.0 +2.43.0 diff --git a/0025-libxl-limit-bootloader-execution-in-restricted-mode.patch b/0025-libxl-limit-bootloader-execution-in-restricted-mode.patch index 1b5fef6..93d6d8b 100644 --- a/0025-libxl-limit-bootloader-execution-in-restricted-mode.patch +++ b/0025-libxl-limit-bootloader-execution-in-restricted-mode.patch @@ -1,7 +1,7 @@ From a157b71cf530603d794d16eca3dd92ce83d4d55f Mon Sep 17 00:00:00 2001 From: Roger Pau Monne <roger.pau@citrix.com> Date: Thu, 28 Sep 2023 12:22:35 +0200 -Subject: [PATCH 25/27] libxl: limit bootloader execution in restricted mode +Subject: [PATCH 25/30] libxl: limit bootloader execution in restricted mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -154,5 +154,5 @@ index 8415d1feed..a9581289f4 100644 int nargs, argsspace; const char **args; -- -2.42.0 +2.43.0 diff --git a/0026-x86-svm-Fix-asymmetry-with-AMD-DR-MASK-context-switc.patch b/0026-x86-svm-Fix-asymmetry-with-AMD-DR-MASK-context-switc.patch index 97f95f3..6bc6827 100644 --- a/0026-x86-svm-Fix-asymmetry-with-AMD-DR-MASK-context-switc.patch +++ b/0026-x86-svm-Fix-asymmetry-with-AMD-DR-MASK-context-switc.patch @@ -1,7 +1,7 @@ From 3c22a9bf8703a297431ac5ad110e6d523758eae1 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Tue, 26 Sep 2023 20:06:57 +0100 -Subject: [PATCH 26/27] x86/svm: Fix asymmetry with AMD DR MASK context +Subject: [PATCH 26/30] x86/svm: Fix asymmetry with AMD DR MASK context switching The handling of MSR_DR{0..3}_MASK is asymmetric between PV and HVM guests. @@ -100,5 +100,5 @@ index f7992ff230..a142a63dd8 100644 { wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.msrs->dr_mask[0]); -- -2.42.0 +2.43.0 diff --git a/0027-x86-pv-Correct-the-auditing-of-guest-breakpoint-addr.patch b/0027-x86-pv-Correct-the-auditing-of-guest-breakpoint-addr.patch index 353a5e3..01f2f1d 100644 --- a/0027-x86-pv-Correct-the-auditing-of-guest-breakpoint-addr.patch +++ b/0027-x86-pv-Correct-the-auditing-of-guest-breakpoint-addr.patch @@ -1,7 +1,7 @@ From 29efce0f8f10e381417a61f2f9988b40d4f6bcf0 Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Tue, 26 Sep 2023 20:06:57 +0100 -Subject: [PATCH 27/27] x86/pv: Correct the auditing of guest breakpoint +Subject: [PATCH 27/30] x86/pv: Correct the auditing of guest breakpoint addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 @@ -82,5 +82,5 @@ index c57914efc6..cc29826524 100644 void activate_debugregs(const struct vcpu *); -- -2.42.0 +2.43.0 diff --git a/0028-iommu-amd-vi-use-correct-level-for-quarantine-domain.patch b/0028-iommu-amd-vi-use-correct-level-for-quarantine-domain.patch new file mode 100644 index 0000000..968ee32 --- /dev/null +++ b/0028-iommu-amd-vi-use-correct-level-for-quarantine-domain.patch @@ -0,0 +1,64 @@ +From 6fea3835d91aaa048f66036c34c25532e2dd2b67 Mon Sep 17 00:00:00 2001 +From: Roger Pau Monne <roger.pau@citrix.com> +Date: Wed, 11 Oct 2023 13:14:21 +0200 +Subject: [PATCH 28/30] iommu/amd-vi: use correct level for quarantine domain + page tables +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The current setup of the quarantine page tables assumes that the quarantine +domain (dom_io) has been initialized with an address width of +DEFAULT_DOMAIN_ADDRESS_WIDTH (48). + +However dom_io being a PV domain gets the AMD-Vi IOMMU page tables levels based +on the maximum (hot pluggable) RAM address, and hence on systems with no RAM +above the 512GB mark only 3 page-table levels are configured in the IOMMU. + +On systems without RAM above the 512GB boundary amd_iommu_quarantine_init() +will setup page tables for the scratch page with 4 levels, while the IOMMU will +be configured to use 3 levels only. The page destined to be used as level 1, +and to contain a directory of PTEs ends up being the address in a PTE itself, +and thus level 1 page becomes the leaf page. Without the level mismatch it's +level 0 page that should be the leaf page instead. + +The level 1 page won't be used as such, and hence it's not possible to use it +to gain access to other memory on the system. However that page is not cleared +in amd_iommu_quarantine_init() as part of re-initialization of the device +quarantine page tables, and hence data on the level 1 page can be leaked +between device usages. + +Fix this by making sure the paging levels setup by amd_iommu_quarantine_init() +match the number configured on the IOMMUs. + +Note that IVMD regions are not affected by this issue, as those areas are +mapped taking the configured paging levels into account. + +This is XSA-445 / CVE-2023-46835 + +Fixes: ea38867831da ('x86 / iommu: set up a scratch page in the quarantine domain') +Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> +Reviewed-by: Jan Beulich <jbeulich@suse.com> +(cherry picked from commit fe1e4668b373ec4c1e5602e75905a9fa8cc2be3f) +--- + xen/drivers/passthrough/amd/iommu_map.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c +index cf6f01b633..1b414a413b 100644 +--- a/xen/drivers/passthrough/amd/iommu_map.c ++++ b/xen/drivers/passthrough/amd/iommu_map.c +@@ -654,9 +654,7 @@ static int fill_qpt(union amd_iommu_pte *this, unsigned int level, + int amd_iommu_quarantine_init(struct pci_dev *pdev, bool scratch_page) + { + struct domain_iommu *hd = dom_iommu(dom_io); +- unsigned long end_gfn = +- 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT); +- unsigned int level = amd_iommu_get_paging_mode(end_gfn); ++ unsigned int level = hd->arch.amd.paging_mode; + unsigned int req_id = get_dma_requestor_id(pdev->seg, pdev->sbdf.bdf); + const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); + int rc; +-- +2.43.0 + diff --git a/0029-x86-spec-ctrl-Remove-conditional-IRQs-on-ness-for-IN.patch b/0029-x86-spec-ctrl-Remove-conditional-IRQs-on-ness-for-IN.patch new file mode 100644 index 0000000..7b371bc --- /dev/null +++ b/0029-x86-spec-ctrl-Remove-conditional-IRQs-on-ness-for-IN.patch @@ -0,0 +1,113 @@ +From 4dfe95177b948d1f3ed27a801f603ed7f1bc36e8 Mon Sep 17 00:00:00 2001 +From: Andrew Cooper <andrew.cooper3@citrix.com> +Date: Thu, 26 Oct 2023 14:37:38 +0100 +Subject: [PATCH 29/30] x86/spec-ctrl: Remove conditional IRQs-on-ness for INT + $0x80/0x82 paths +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Before speculation defences, some paths in Xen could genuinely get away with +being IRQs-on at entry. But XPTI invalidated this property on most paths, and +attempting to maintain it on the remaining paths was a mistake. + +Fast forward, and DO_SPEC_CTRL_COND_IBPB (protection for AMD BTC/SRSO) is not +IRQ-safe, running with IRQs enabled in some cases. The other actions taken on +these paths happen to be IRQ-safe. + +Make entry_int82() and int80_direct_trap() unconditionally Interrupt Gates +rather than Trap Gates. Remove the conditional re-adjustment of +int80_direct_trap() in smp_prepare_cpus(), and have entry_int82() explicitly +enable interrupts when safe to do so. + +In smp_prepare_cpus(), with the conditional re-adjustment removed, the +clearing of pv_cr3 is the only remaining action gated on XPTI, and it is out +of place anyway, repeating work already done by smp_prepare_boot_cpu(). Drop +the entire if() condition to avoid leaving an incorrect vestigial remnant. + +Also drop comments which make incorrect statements about when its safe to +enable interrupts. + +This is XSA-446 / CVE-2023-46836 + +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Roger Pau Monné <roger.pau@citrix.com> +(cherry picked from commit a48bb129f1b9ff55c22cf6d2b589247c8ba3b10e) +--- + xen/arch/x86/pv/traps.c | 4 ++-- + xen/arch/x86/smpboot.c | 14 -------------- + xen/arch/x86/x86_64/compat/entry.S | 2 ++ + xen/arch/x86/x86_64/entry.S | 1 - + 4 files changed, 4 insertions(+), 17 deletions(-) + +diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c +index 1e05a9f1cd..2fc24136c9 100644 +--- a/xen/arch/x86/pv/traps.c ++++ b/xen/arch/x86/pv/traps.c +@@ -149,11 +149,11 @@ void __init pv_trap_init(void) + #ifdef CONFIG_PV32 + /* The 32-on-64 hypercall vector is only accessible from ring 1. */ + _set_gate(idt_table + HYPERCALL_VECTOR, +- SYS_DESC_trap_gate, 1, entry_int82); ++ SYS_DESC_irq_gate, 1, entry_int82); + #endif + + /* Fast trap for int80 (faster than taking the #GP-fixup path). */ +- _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3, ++ _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_irq_gate, 3, + &int80_direct_trap); + + open_softirq(NMI_SOFTIRQ, nmi_softirq); +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index 0694173173..4a10a1869a 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -1167,20 +1167,6 @@ void __init smp_prepare_cpus(void) + + stack_base[0] = (void *)((unsigned long)stack_start & ~(STACK_SIZE - 1)); + +- if ( opt_xpti_hwdom || opt_xpti_domu ) +- { +- get_cpu_info()->pv_cr3 = 0; +- +-#ifdef CONFIG_PV +- /* +- * All entry points which may need to switch page tables have to start +- * with interrupts off. Re-write what pv_trap_init() has put there. +- */ +- _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_irq_gate, 3, +- &int80_direct_trap); +-#endif +- } +- + set_nr_sockets(); + + socket_cpumask = xzalloc_array(cpumask_t *, nr_sockets); +diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S +index b86d38d1c5..253bb1688c 100644 +--- a/xen/arch/x86/x86_64/compat/entry.S ++++ b/xen/arch/x86/x86_64/compat/entry.S +@@ -21,6 +21,8 @@ ENTRY(entry_int82) + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + ++ sti ++ + CR4_PV32_RESTORE + + GET_CURRENT(bx) +diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S +index 88ff5c150f..837a31b405 100644 +--- a/xen/arch/x86/x86_64/entry.S ++++ b/xen/arch/x86/x86_64/entry.S +@@ -327,7 +327,6 @@ ENTRY(sysenter_entry) + #ifdef CONFIG_XEN_SHSTK + ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK + #endif +- /* sti could live here when we don't switch page tables below. */ + pushq $FLAT_USER_SS + pushq $0 + pushfq +-- +2.43.0 + diff --git a/0030-xen-arm-page-Avoid-pointer-overflow-on-cache-clean-i.patch b/0030-xen-arm-page-Avoid-pointer-overflow-on-cache-clean-i.patch new file mode 100644 index 0000000..592d2c5 --- /dev/null +++ b/0030-xen-arm-page-Avoid-pointer-overflow-on-cache-clean-i.patch @@ -0,0 +1,119 @@ +From e7c3d6ceaf73120098f9213fd12f79fd50e8e588 Mon Sep 17 00:00:00 2001 +From: Michal Orzel <michal.orzel@amd.com> +Date: Tue, 12 Dec 2023 14:53:13 +0100 +Subject: [PATCH 30/30] xen/arm: page: Avoid pointer overflow on cache clean & + invalidate + +On Arm32, after cleaning and invalidating the last dcache line of the top +domheap page i.e. VA = 0xfffff000 (as a result of flushing the page to +RAM), we end up adding the value of a dcache line size to the pointer +once again, which results in a pointer arithmetic overflow (with 64B line +size, operation 0xffffffc0 + 0x40 overflows to 0x0). Such behavior is +undefined and given the wide range of compiler versions we support, it is +difficult to determine what could happen in such scenario. + +Modify clean_and_invalidate_dcache_va_range() as well as +clean_dcache_va_range() and invalidate_dcache_va_range() due to similarity +of handling to prevent pointer arithmetic overflow. Modify the loops to +use an additional variable to store the index of the next cacheline. +Add an assert to prevent passing a region that wraps around which is +illegal and would end up in a page fault anyway (region 0-2MB is +unmapped). Lastly, return early if size passed is 0. + +Note that on Arm64, we don't have this problem given that the max VA +space we support is 48-bits. + +This is XSA-447 / CVE-2023-46837. + +Signed-off-by: Michal Orzel <michal.orzel@amd.com> +Reviewed-by: Julien Grall <jgrall@amazon.com> +master commit: 190b7f49af6487a9665da63d43adc9d9a5fbd01e +master date: 2023-12-12 14:01:00 +0100 +--- + xen/include/asm-arm/page.h | 35 ++++++++++++++++++++++++++++------- + 1 file changed, 28 insertions(+), 7 deletions(-) + +diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h +index eff5883ef8..b6784417ed 100644 +--- a/xen/include/asm-arm/page.h ++++ b/xen/include/asm-arm/page.h +@@ -153,6 +153,13 @@ static inline size_t read_dcache_line_bytes(void) + static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + +@@ -165,11 +172,11 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + } + + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__invalidate_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__invalidate_dcache_one(0) : : "r" (p + idx)); + + if ( size > 0 ) +- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); ++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); + + dsb(sy); /* So we know the flushes happen before continuing */ + +@@ -179,14 +186,21 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + static inline int clean_dcache_va_range(const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + size += (uintptr_t)p & cacheline_mask; + size = (size + cacheline_mask) & ~cacheline_mask; + p = (void *)((uintptr_t)p & ~cacheline_mask); + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__clean_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__clean_dcache_one(0) : : "r" (p + idx)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +@@ -196,14 +210,21 @@ static inline int clean_and_invalidate_dcache_va_range + (const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + size += (uintptr_t)p & cacheline_mask; + size = (size + cacheline_mask) & ~cacheline_mask; + p = (void *)((uintptr_t)p & ~cacheline_mask); + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +-- +2.43.0 + @@ -1,6 +1,6 @@ -Xen upstream patchset #0 for 4.16.6-pre +Xen upstream patchset #1 for 4.16.6-pre Containing patches from RELEASE-4.16.5 (177c7edf16099ff4d804f4ed4d698233b372f334) to -staging-4.16 (29efce0f8f10e381417a61f2f9988b40d4f6bcf0) +staging-4.16 (e7c3d6ceaf73120098f9213fd12f79fd50e8e588) |