summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0047-x86-p2m-refuse-new-allocations-for-dying-domains.patch')
-rw-r--r--0047-x86-p2m-refuse-new-allocations-for-dying-domains.patch100
1 files changed, 0 insertions, 100 deletions
diff --git a/0047-x86-p2m-refuse-new-allocations-for-dying-domains.patch b/0047-x86-p2m-refuse-new-allocations-for-dying-domains.patch
deleted file mode 100644
index 880b68d..0000000
--- a/0047-x86-p2m-refuse-new-allocations-for-dying-domains.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-From 4f9b535194f70582863f2a78f113547d8822b2b9 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
-Date: Tue, 11 Oct 2022 15:08:28 +0200
-Subject: [PATCH 047/126] x86/p2m: refuse new allocations for dying domains
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This will in particular prevent any attempts to add entries to the p2m,
-once - in a subsequent change - non-root entries have been removed.
-
-This is part of CVE-2022-33746 / XSA-410.
-
-Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Acked-by: Tim Deegan <tim@xen.org>
-master commit: ff600a8cf8e36f8ecbffecf96a035952e022ab87
-master date: 2022-10-11 14:23:22 +0200
----
- xen/arch/x86/mm/hap/hap.c | 5 ++++-
- xen/arch/x86/mm/shadow/common.c | 18 ++++++++++++++----
- 2 files changed, 18 insertions(+), 5 deletions(-)
-
-diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
-index d75dc2b9ed3d..787991233e53 100644
---- a/xen/arch/x86/mm/hap/hap.c
-+++ b/xen/arch/x86/mm/hap/hap.c
-@@ -245,6 +245,9 @@ static struct page_info *hap_alloc(struct domain *d)
-
- ASSERT(paging_locked_by_me(d));
-
-+ if ( unlikely(d->is_dying) )
-+ return NULL;
-+
- pg = page_list_remove_head(&d->arch.paging.hap.freelist);
- if ( unlikely(!pg) )
- return NULL;
-@@ -281,7 +284,7 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d)
- d->arch.paging.hap.p2m_pages++;
- ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
- }
-- else if ( !d->arch.paging.p2m_alloc_failed )
-+ else if ( !d->arch.paging.p2m_alloc_failed && !d->is_dying )
- {
- d->arch.paging.p2m_alloc_failed = 1;
- dprintk(XENLOG_ERR, "d%i failed to allocate from HAP pool\n",
-diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
-index fc4f7f78ce43..9ad7e5a88650 100644
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -938,6 +938,10 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
- if ( d->arch.paging.shadow.free_pages >= pages )
- return true;
-
-+ if ( unlikely(d->is_dying) )
-+ /* No reclaim when the domain is dying, teardown will take care of it. */
-+ return false;
-+
- /* Shouldn't have enabled shadows if we've no vcpus. */
- ASSERT(d->vcpu && d->vcpu[0]);
-
-@@ -990,7 +994,7 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
-
-- ASSERT(d->is_dying);
-+ ASSERT_UNREACHABLE();
-
- guest_flush_tlb_mask(d, d->dirty_cpumask);
-
-@@ -1004,10 +1008,13 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
- * to avoid freeing shadows that the caller is currently working on. */
- bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
- {
-- bool ret = _shadow_prealloc(d, shadow_size(type) * count);
-+ bool ret;
-
-- if ( !ret && !d->is_dying &&
-- (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
-+ if ( unlikely(d->is_dying) )
-+ return false;
-+
-+ ret = _shadow_prealloc(d, shadow_size(type) * count);
-+ if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
- /*
- * Failing to allocate memory required for shadow usage can only result in
- * a domain crash, do it here rather that relying on every caller to do it.
-@@ -1235,6 +1242,9 @@ shadow_alloc_p2m_page(struct domain *d)
- {
- struct page_info *pg = NULL;
-
-+ if ( unlikely(d->is_dying) )
-+ return NULL;
-+
- /* This is called both from the p2m code (which never holds the
- * paging lock) and the log-dirty code (which always does). */
- paging_lock_recursive(d);
---
-2.37.4
-