Commit 99266e31 authored by Juergen Gross's avatar Juergen Gross Committed by Andrew Cooper

xen/sched: fix latent races accessing vcpu->dirty_cpu

The dirty_cpu field of struct vcpu denotes which cpu still holds data
of a vcpu. All accesses to this field should be atomic in case the
vcpu could just be running, as it is accessed without any lock held
in most cases. Especially sync_local_execstate() and context_switch()
for the same vcpu running concurrently have a risk for failing.

There are some instances where accesses are not atomically done, and
even worse where multiple accesses are done when a single one would
be mandated.

Correct that in order to avoid potential problems.

Add some assertions to verify dirty_cpu is handled properly.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
parent b492c65d
......@@ -183,7 +183,7 @@ void startup_cpu_idle_loop(void)
ASSERT(is_idle_vcpu(v));
cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
v->dirty_cpu = v->processor;
write_atomic(&v->dirty_cpu, v->processor);
reset_stack_and_jump(idle_loop);
}
......@@ -1769,6 +1769,7 @@ static void __context_switch(void)
if ( !is_idle_domain(pd) )
{
ASSERT(read_atomic(&p->dirty_cpu) == cpu);
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
vcpu_save_fpu(p);
pd->arch.ctxt_switch->from(p);
......@@ -1832,7 +1833,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
{
unsigned int cpu = smp_processor_id();
const struct domain *prevd = prev->domain, *nextd = next->domain;
unsigned int dirty_cpu = next->dirty_cpu;
unsigned int dirty_cpu = read_atomic(&next->dirty_cpu);
ASSERT(prev != next);
ASSERT(local_irq_is_enabled());
......@@ -1844,6 +1845,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
{
/* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
flush_mask(cpumask_of(dirty_cpu), FLUSH_VCPU_STATE);
ASSERT(!vcpu_cpu_dirty(next));
}
_update_runstate_area(prev);
......@@ -1956,13 +1958,17 @@ void sync_local_execstate(void)
void sync_vcpu_execstate(struct vcpu *v)
{
if ( v->dirty_cpu == smp_processor_id() )
unsigned int dirty_cpu = read_atomic(&v->dirty_cpu);
if ( dirty_cpu == smp_processor_id() )
sync_local_execstate();
else if ( vcpu_cpu_dirty(v) )
else if ( is_vcpu_dirty_cpu(dirty_cpu) )
{
/* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
flush_mask(cpumask_of(v->dirty_cpu), FLUSH_VCPU_STATE);
flush_mask(cpumask_of(dirty_cpu), FLUSH_VCPU_STATE);
}
ASSERT(!is_vcpu_dirty_cpu(dirty_cpu) ||
read_atomic(&v->dirty_cpu) != dirty_cpu);
}
static int relinquish_memory(
......
......@@ -316,7 +316,7 @@ static void dump_domains(unsigned char key)
vcpu_info(v, evtchn_upcall_pending),
!vcpu_event_delivery_is_enabled(v));
if ( vcpu_cpu_dirty(v) )
printk("dirty_cpu=%u", v->dirty_cpu);
printk("dirty_cpu=%u", read_atomic(&v->dirty_cpu));
printk("\n");
printk(" pause_count=%d pause_flags=%lx\n",
atomic_read(&v->pause_count), v->pause_flags);
......
......@@ -844,7 +844,7 @@ static inline bool is_vcpu_dirty_cpu(unsigned int cpu)
static inline bool vcpu_cpu_dirty(const struct vcpu *v)
{
return is_vcpu_dirty_cpu(v->dirty_cpu);
return is_vcpu_dirty_cpu(read_atomic(&v->dirty_cpu));
}
void vcpu_block(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment