From 364cc6703e42a167e223662998592c26a315fd36 Mon Sep 17 00:00:00 2001 From: Florian Schmaus Date: Wed, 9 Nov 2022 09:54:09 +0100 Subject: Xen 4.15.4-pre-patchset-2 Signed-off-by: Florian Schmaus --- ...-Populate-pages-for-GICv2-mapping-in-p2m_.patch | 169 +++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 0069-xen-arm-p2m-Populate-pages-for-GICv2-mapping-in-p2m_.patch (limited to '0069-xen-arm-p2m-Populate-pages-for-GICv2-mapping-in-p2m_.patch') diff --git a/0069-xen-arm-p2m-Populate-pages-for-GICv2-mapping-in-p2m_.patch b/0069-xen-arm-p2m-Populate-pages-for-GICv2-mapping-in-p2m_.patch new file mode 100644 index 0000000..67cdb7a --- /dev/null +++ b/0069-xen-arm-p2m-Populate-pages-for-GICv2-mapping-in-p2m_.patch @@ -0,0 +1,169 @@ +From f8915cd5dbe0f51e9bb31a54fe40600b839dd707 Mon Sep 17 00:00:00 2001 +From: Henry Wang +Date: Tue, 25 Oct 2022 09:21:12 +0000 +Subject: [PATCH 069/126] xen/arm: p2m: Populate pages for GICv2 mapping in + p2m_init() + +Hardware using GICv2 needs to create a P2M mapping of 8KB GICv2 area +when the domain is created. Considering the worst case of page tables +which requires 6 P2M pages as the two pages will be consecutive but not +necessarily in the same L3 page table and keep a buffer, populate 16 +pages as the default value to the P2M pages pool in p2m_init() at the +domain creation stage to satisfy the GICv2 requirement. For GICv3, the +above-mentioned P2M mapping is not necessary, but since the allocated +16 pages here would not be lost, hence populate these pages +unconditionally. + +With the default 16 P2M pages populated, there would be a case that +failures would happen in the domain creation with P2M pages already in +use. To properly free the P2M for this case, firstly support the +optionally preemption of p2m_teardown(), then call p2m_teardown() and +p2m_set_allocation(d, 0, NULL) non-preemptively in p2m_final_teardown(). +As non-preemptive p2m_teardown() should only return 0, use a +BUG_ON to confirm that. + +Since p2m_final_teardown() is called either after +domain_relinquish_resources() where relinquish_p2m_mapping() has been +called, or from failure path of domain_create()/arch_domain_create() +where mappings that require p2m_put_l3_page() should never be created, +relinquish_p2m_mapping() is not added in p2m_final_teardown(), add +in-code comments to refer this. + +Fixes: cbea5a1149ca ("xen/arm: Allocate and free P2M pages from the P2M pool") +Suggested-by: Julien Grall +Signed-off-by: Henry Wang +Reviewed-by: Julien Grall +Reviewed-by: Bertrand Marquis +(cherry picked from commit: c7cff1188802646eaa38e918e5738da0e84949be) +--- + xen/arch/arm/domain.c | 2 +- + xen/arch/arm/p2m.c | 34 ++++++++++++++++++++++++++++++++-- + xen/include/asm-arm/p2m.h | 14 ++++++++++---- + 3 files changed, 43 insertions(+), 7 deletions(-) + +diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c +index a5ffd952ecd0..b11359b8cca3 100644 +--- a/xen/arch/arm/domain.c ++++ b/xen/arch/arm/domain.c +@@ -1041,7 +1041,7 @@ int domain_relinquish_resources(struct domain *d) + return ret; + + PROGRESS(p2m): +- ret = p2m_teardown(d); ++ ret = p2m_teardown(d, true); + if ( ret ) + return ret; + +diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c +index 25eb1d84cbc1..f6012f2a538f 100644 +--- a/xen/arch/arm/p2m.c ++++ b/xen/arch/arm/p2m.c +@@ -1664,7 +1664,7 @@ static void p2m_free_vmid(struct domain *d) + spin_unlock(&vmid_alloc_lock); + } + +-int p2m_teardown(struct domain *d) ++int p2m_teardown(struct domain *d, bool allow_preemption) + { + struct p2m_domain *p2m = p2m_get_hostp2m(d); + unsigned long count = 0; +@@ -1672,6 +1672,9 @@ int p2m_teardown(struct domain *d) + unsigned int i; + int rc = 0; + ++ if ( page_list_empty(&p2m->pages) ) ++ return 0; ++ + p2m_write_lock(p2m); + + /* +@@ -1695,7 +1698,7 @@ int p2m_teardown(struct domain *d) + p2m_free_page(p2m->domain, pg); + count++; + /* Arbitrarily preempt every 512 iterations */ +- if ( !(count % 512) && hypercall_preempt_check() ) ++ if ( allow_preemption && !(count % 512) && hypercall_preempt_check() ) + { + rc = -ERESTART; + break; +@@ -1715,7 +1718,20 @@ void p2m_final_teardown(struct domain *d) + if ( !p2m->domain ) + return; + ++ /* ++ * No need to call relinquish_p2m_mapping() here because ++ * p2m_final_teardown() is called either after domain_relinquish_resources() ++ * where relinquish_p2m_mapping() has been called, or from failure path of ++ * domain_create()/arch_domain_create() where mappings that require ++ * p2m_put_l3_page() should never be created. For the latter case, also see ++ * comment on top of the p2m_set_entry() for more info. ++ */ ++ ++ BUG_ON(p2m_teardown(d, false)); + ASSERT(page_list_empty(&p2m->pages)); ++ ++ while ( p2m_teardown_allocation(d) == -ERESTART ) ++ continue; /* No preemption support here */ + ASSERT(page_list_empty(&d->arch.paging.p2m_freelist)); + + if ( p2m->root ) +@@ -1782,6 +1798,20 @@ int p2m_init(struct domain *d) + if ( rc ) + return rc; + ++ /* ++ * Hardware using GICv2 needs to create a P2M mapping of 8KB GICv2 area ++ * when the domain is created. Considering the worst case for page ++ * tables and keep a buffer, populate 16 pages to the P2M pages pool here. ++ * For GICv3, the above-mentioned P2M mapping is not necessary, but since ++ * the allocated 16 pages here would not be lost, hence populate these ++ * pages unconditionally. ++ */ ++ spin_lock(&d->arch.paging.lock); ++ rc = p2m_set_allocation(d, 16, NULL); ++ spin_unlock(&d->arch.paging.lock); ++ if ( rc ) ++ return rc; ++ + return 0; + } + +diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h +index 18675b234570..ea7ca41d82b2 100644 +--- a/xen/include/asm-arm/p2m.h ++++ b/xen/include/asm-arm/p2m.h +@@ -194,14 +194,18 @@ int p2m_init(struct domain *d); + + /* + * The P2M resources are freed in two parts: +- * - p2m_teardown() will be called when relinquish the resources. It +- * will free large resources (e.g. intermediate page-tables) that +- * requires preemption. ++ * - p2m_teardown() will be called preemptively when relinquish the ++ * resources, in which case it will free large resources (e.g. intermediate ++ * page-tables) that requires preemption. + * - p2m_final_teardown() will be called when domain struct is been + * freed. This *cannot* be preempted and therefore one small + * resources should be freed here. ++ * Note that p2m_final_teardown() will also call p2m_teardown(), to properly ++ * free the P2M when failures happen in the domain creation with P2M pages ++ * already in use. In this case p2m_teardown() is called non-preemptively and ++ * p2m_teardown() will always return 0. + */ +-int p2m_teardown(struct domain *d); ++int p2m_teardown(struct domain *d, bool allow_preemption); + void p2m_final_teardown(struct domain *d); + + /* +@@ -266,6 +270,8 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, + /* + * Direct set a p2m entry: only for use by the P2M code. + * The P2M write lock should be taken. ++ * TODO: Add a check in __p2m_set_entry() to avoid creating a mapping in ++ * arch_domain_create() that requires p2m_put_l3_page() to be called. + */ + int p2m_set_entry(struct p2m_domain *p2m, + gfn_t sgfn, +-- +2.37.4 + -- cgit v1.2.3-65-gdbad