summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0048-x86-p2m-truly-free-paging-pool-memory-for-dying-doma.patch')
-rw-r--r--0048-x86-p2m-truly-free-paging-pool-memory-for-dying-doma.patch115
1 files changed, 0 insertions, 115 deletions
diff --git a/0048-x86-p2m-truly-free-paging-pool-memory-for-dying-doma.patch b/0048-x86-p2m-truly-free-paging-pool-memory-for-dying-doma.patch
deleted file mode 100644
index 280b6d8..0000000
--- a/0048-x86-p2m-truly-free-paging-pool-memory-for-dying-doma.patch
+++ /dev/null
@@ -1,115 +0,0 @@
-From 7f055b011a657f8f16b0df242301efb312058eea Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
-Date: Tue, 11 Oct 2022 15:08:42 +0200
-Subject: [PATCH 048/126] x86/p2m: truly free paging pool memory for dying
- domains
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Modify {hap,shadow}_free to free the page immediately if the domain is
-dying, so that pages don't accumulate in the pool when
-{shadow,hap}_final_teardown() get called. This is to limit the amount of
-work which needs to be done there (in a non-preemptable manner).
-
-Note the call to shadow_free() in shadow_free_p2m_page() is moved after
-increasing total_pages, so that the decrease done in shadow_free() in
-case the domain is dying doesn't underflow the counter, even if just for
-a short interval.
-
-This is part of CVE-2022-33746 / XSA-410.
-
-Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Acked-by: Tim Deegan <tim@xen.org>
-master commit: f50a2c0e1d057c00d6061f40ae24d068226052ad
-master date: 2022-10-11 14:23:51 +0200
----
- xen/arch/x86/mm/hap/hap.c | 12 ++++++++++++
- xen/arch/x86/mm/shadow/common.c | 28 +++++++++++++++++++++++++---
- 2 files changed, 37 insertions(+), 3 deletions(-)
-
-diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
-index 787991233e53..aef2297450e1 100644
---- a/xen/arch/x86/mm/hap/hap.c
-+++ b/xen/arch/x86/mm/hap/hap.c
-@@ -265,6 +265,18 @@ static void hap_free(struct domain *d, mfn_t mfn)
-
- ASSERT(paging_locked_by_me(d));
-
-+ /*
-+ * For dying domains, actually free the memory here. This way less work is
-+ * left to hap_final_teardown(), which cannot easily have preemption checks
-+ * added.
-+ */
-+ if ( unlikely(d->is_dying) )
-+ {
-+ free_domheap_page(pg);
-+ d->arch.paging.hap.total_pages--;
-+ return;
-+ }
-+
- d->arch.paging.hap.free_pages++;
- page_list_add_tail(pg, &d->arch.paging.hap.freelist);
- }
-diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
-index 9ad7e5a88650..366956c146aa 100644
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -1184,6 +1184,7 @@ mfn_t shadow_alloc(struct domain *d,
- void shadow_free(struct domain *d, mfn_t smfn)
- {
- struct page_info *next = NULL, *sp = mfn_to_page(smfn);
-+ bool dying = ACCESS_ONCE(d->is_dying);
- struct page_list_head *pin_list;
- unsigned int pages;
- u32 shadow_type;
-@@ -1226,11 +1227,32 @@ void shadow_free(struct domain *d, mfn_t smfn)
- * just before the allocator hands the page out again. */
- page_set_tlbflush_timestamp(sp);
- perfc_decr(shadow_alloc_count);
-- page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
-+
-+ /*
-+ * For dying domains, actually free the memory here. This way less
-+ * work is left to shadow_final_teardown(), which cannot easily have
-+ * preemption checks added.
-+ */
-+ if ( unlikely(dying) )
-+ {
-+ /*
-+ * The backpointer field (sh.back) used by shadow code aliases the
-+ * domain owner field, unconditionally clear it here to avoid
-+ * free_domheap_page() attempting to parse it.
-+ */
-+ page_set_owner(sp, NULL);
-+ free_domheap_page(sp);
-+ }
-+ else
-+ page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
-+
- sp = next;
- }
-
-- d->arch.paging.shadow.free_pages += pages;
-+ if ( unlikely(dying) )
-+ d->arch.paging.shadow.total_pages -= pages;
-+ else
-+ d->arch.paging.shadow.free_pages += pages;
- }
-
- /* Divert a page from the pool to be used by the p2m mapping.
-@@ -1300,9 +1322,9 @@ shadow_free_p2m_page(struct domain *d, struct page_info *pg)
- * paging lock) and the log-dirty code (which always does). */
- paging_lock_recursive(d);
-
-- shadow_free(d, page_to_mfn(pg));
- d->arch.paging.shadow.p2m_pages--;
- d->arch.paging.shadow.total_pages++;
-+ shadow_free(d, page_to_mfn(pg));
-
- paging_unlock(d);
- }
---
-2.37.4
-