Commit fa6dc087 authored by David Vrabel's avatar David Vrabel Committed by Julien Grall

page_alloc: assert IRQs are enabled in heap alloc/free

Heap pages can only be safely allocated and freed with interrupts
enabled as they may require a TLB flush which may send IPIs (on x86).

Normally spinlock debugging would catch calls from the incorrect
context, but not from stop_machine_run() action functions as these are
called with spin lock debugging disabled.

Enhance the assertions in alloc_xenheap_pages() and
alloc_domheap_pages() to check interrupts are enabled. For consistency
the same asserts are used when freeing heap pages.

As an exception, when only 1 PCPU is online, allocations are permitted
with interrupts disabled as any TLB flushes would be local only. This
is necessary during early boot.
Signed-off-by: default avatarDavid Vrabel <dvrabel@amazon.co.uk>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
parent fbd24455
......@@ -162,6 +162,13 @@
static char __initdata opt_badpage[100] = "";
string_param("badpage", opt_badpage);
/*
* Heap allocations may need TLB flushes which may require IRQs to be
* enabled (except when only 1 PCPU is online).
*/
#define ASSERT_ALLOC_CONTEXT() \
ASSERT(!in_irq() && (local_irq_is_enabled() || num_online_cpus() <= 1))
/*
* no-bootscrub -> Free pages are not zeroed during boot.
*/
......@@ -2160,7 +2167,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN,
order, memflags | MEMF_no_scrub, NULL);
......@@ -2173,7 +2180,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
void free_xenheap_pages(void *v, unsigned int order)
{
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
if ( v == NULL )
return;
......@@ -2202,7 +2209,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
struct page_info *pg;
unsigned int i;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
if ( xenheap_bits && (memflags >> _MEMF_bits) > xenheap_bits )
memflags &= ~MEMF_bits(~0U);
......@@ -2224,7 +2231,7 @@ void free_xenheap_pages(void *v, unsigned int order)
struct page_info *pg;
unsigned int i;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
if ( v == NULL )
return;
......@@ -2249,7 +2256,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
{
mfn_t smfn, emfn;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
smfn = maddr_to_mfn(round_pgup(ps));
emfn = maddr_to_mfn(round_pgdown(pe));
......@@ -2369,7 +2376,7 @@ struct page_info *alloc_domheap_pages(
unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
unsigned int dma_zone;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d,
bits ? : (BITS_PER_LONG+PAGE_SHIFT));
......@@ -2419,7 +2426,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
unsigned int i;
bool drop_dom_ref;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
if ( unlikely(is_xen_heap_page(pg)) )
{
......@@ -2738,7 +2745,7 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
{
struct page_info *pg;
ASSERT(!in_irq());
ASSERT_ALLOC_CONTEXT();
pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
if ( !pg )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment