Commit c24536b6 authored by Jan Beulich's avatar Jan Beulich

replace d->nr_pirqs sized arrays with radix tree

With this it is questionable whether retaining struct domain's
nr_pirqs is actually necessary - the value now only serves for bounds
checking, and this boundary could easily be nr_irqs.

Note that ia64, the build of which is broken currently anyway, is only
being partially fixed up.

v2: adjustments for split setup/teardown of translation data

v3: re-sync with radix tree implementation changes
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
parent eebfd58f
......@@ -155,13 +155,13 @@ void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq)
/* dummy */
}
int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
{
/* dummy */
return -ENOSYS;
}
void msixtbl_pt_unregister(struct domain *d, int pirq)
void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
{
/* dummy */
}
......@@ -65,8 +65,11 @@ static long __do_pirq_guest_eoi(struct domain *d, int pirq)
{
if ( pirq < 0 || pirq >= NR_IRQS )
return -EINVAL;
if ( d->arch.pirq_eoi_map )
evtchn_unmask(d->pirq_to_evtchn[pirq]);
if ( d->arch.pirq_eoi_map ) {
spin_lock(&d->event_lock);
evtchn_unmask(pirq_to_evtchn(d, pirq));
spin_unlock(&d->event_lock);
}
return pirq_guest_eoi(d, pirq);
}
......
......@@ -363,15 +363,17 @@ void __do_IRQ_guest(int irq)
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct domain *d;
struct pirq *pirq;
int i, already_pending = 0;
for ( i = 0; i < action->nr_guests; i++ )
{
d = action->guest[i];
pirq = pirq_info(d, irq);
if ( (action->ack_type != ACKTYPE_NONE) &&
!test_and_set_bit(irq, &d->pirq_mask) )
!test_and_set_bool(pirq->masked) )
action->in_flight++;
if ( hvm_do_IRQ_dpci(d, irq) )
if ( hvm_do_IRQ_dpci(d, pirq) )
{
if ( action->ack_type == ACKTYPE_NONE )
{
......@@ -379,7 +381,7 @@ void __do_IRQ_guest(int irq)
desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
}
}
else if ( send_guest_pirq(d, irq) &&
else if ( send_guest_pirq(d, pirq) &&
(action->ack_type == ACKTYPE_NONE) )
{
already_pending++;
......@@ -423,26 +425,23 @@ static int pirq_acktype(int irq)
return ACKTYPE_NONE;
}
int pirq_guest_eoi(struct domain *d, int irq)
int pirq_guest_eoi(struct domain *d, struct pirq *pirq)
{
irq_desc_t *desc;
irq_guest_action_t *action;
if ( (irq < 0) || (irq >= NR_IRQS) )
return -EINVAL;
desc = &irq_desc[irq];
spin_lock_irq(&desc->lock);
action = (irq_guest_action_t *)desc->action;
if ( action->ack_type == ACKTYPE_NONE )
{
ASSERT(!test_bit(irq, d->pirq_mask));
ASSERT(!pirq->masked);
stop_timer(&irq_guest_eoi_timer[irq]);
_irq_guest_eoi(desc);
}
if ( test_and_clear_bit(irq, &d->pirq_mask) && (--action->in_flight == 0) )
if ( test_and_clear_bool(pirq->masked) && (--action->in_flight == 0) )
{
ASSERT(action->ack_type == ACKTYPE_UNMASK);
desc->handler->end(irq);
......@@ -455,24 +454,28 @@ int pirq_guest_eoi(struct domain *d, int irq)
int pirq_guest_unmask(struct domain *d)
{
int irq;
unsigned int pirq = 0, n, i;
struct pirq *pirqs[16];
shared_info_t *s = d->shared_info;
for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
irq < NR_IRQS;
irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
{
if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) )
pirq_guest_eoi(d, irq);
}
do {
n = radix_tree_gang_lookup(&d->pirq_tree, (void **)pirqs, pirq,
ARRAY_SIZE(pirqs));
for ( i = 0; i < n; ++i )
{
pirq = pirqs[i]->pirq;
if ( pirqs[i]->masked &&
!test_bit(pirqs[i]->evtchn, &s->evtchn_mask[0]) )
pirq_guest_eoi(d, pirqs[i]);
}
} while ( ++pirq < d->nr_pirqs && n == ARRAY_SIZE(pirqs) );
return 0;
}
int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
{
irq_desc_t *desc = &irq_desc[irq];
irq_desc_t *desc = &irq_desc[pirq->pirq];
irq_guest_action_t *action;
unsigned long flags;
int rc = 0;
......@@ -492,7 +495,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. In use by '%s'.\n",
irq, desc->action->name);
pirq->pirq, desc->action->name);
rc = -EBUSY;
goto out;
}
......@@ -502,7 +505,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. Out of memory.\n",
irq);
pirq->pirq);
rc = -ENOMEM;
goto out;
}
......@@ -515,7 +518,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
desc->depth = 0;
desc->status |= IRQ_GUEST;
desc->status &= ~IRQ_DISABLED;
desc->handler->startup(irq);
desc->handler->startup(pirq->pirq);
/* Attempt to bind the interrupt target to the correct CPU. */
#if 0 /* FIXME CONFIG_SMP ??? */
......@@ -528,7 +531,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. Will not share with others.\n",
irq);
pirq->pirq);
rc = -EBUSY;
goto out;
}
......@@ -537,7 +540,7 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. Already at max share.\n",
irq);
pirq->pirq);
rc = -EBUSY;
goto out;
}
......@@ -545,16 +548,16 @@ int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
action->guest[action->nr_guests++] = v->domain;
if ( action->ack_type != ACKTYPE_NONE )
set_pirq_eoi(v->domain, irq);
set_pirq_eoi(v->domain, pirq->pirq);
else
clear_pirq_eoi(v->domain, irq);
clear_pirq_eoi(v->domain, pirq->pirq);
out:
spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
void pirq_guest_unbind(struct domain *d, int irq)
void pirq_guest_unbind(struct domain *d, int irq, struct pirq *pirq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
......@@ -572,7 +575,7 @@ void pirq_guest_unbind(struct domain *d, int irq)
action->nr_guests--;
if ( action->ack_type == ACKTYPE_UNMASK )
if ( test_and_clear_bit(irq, &d->pirq_mask) &&
if ( test_and_clear_bool(pirq->masked) &&
(--action->in_flight == 0) )
desc->handler->end(irq);
......
......@@ -42,6 +42,7 @@
#include <xen/cpuidle.h>
#include <xen/trace.h>
#include <xen/sched-if.h>
#include <xen/irq.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/hpet.h>
......
......@@ -591,25 +591,9 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
d->arch.pirq_irq = xmalloc_array(int, d->nr_pirqs);
if ( !d->arch.pirq_irq )
goto fail;
memset(d->arch.pirq_irq, 0,
d->nr_pirqs * sizeof(*d->arch.pirq_irq));
if ( (rc = init_domain_irq_mapping(d)) != 0 )
goto fail;
if ( is_hvm_domain(d) )
{
d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
if ( !d->arch.pirq_emuirq )
goto fail;
for (i = 0; i < d->nr_pirqs; i++)
d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
}
if ( (rc = iommu_domain_init(d)) != 0 )
goto fail;
......@@ -643,8 +627,6 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
fail:
d->is_dying = DOMDYING_dead;
vmce_destroy_msr(d);
xfree(d->arch.pirq_irq);
xfree(d->arch.pirq_emuirq);
cleanup_domain_irq_mapping(d);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
......@@ -697,8 +679,6 @@ void arch_domain_destroy(struct domain *d)
#endif
free_xenheap_page(d->shared_info);
xfree(d->arch.pirq_irq);
xfree(d->arch.pirq_emuirq);
cleanup_domain_irq_mapping(d);
}
......
......@@ -249,32 +249,36 @@ void hvm_migrate_timers(struct vcpu *v)
pt_migrate(v);
}
void hvm_migrate_pirqs(struct vcpu *v)
static int hvm_migrate_pirq(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
void *arg)
{
int pirq, irq;
struct irq_desc *desc;
struct domain *d = v->domain;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
struct vcpu *v = arg;
spin_lock(&d->event_lock);
for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
pirq < d->nr_pirqs;
pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
(pirq_dpci->gmsi.dest_vcpu_id == v->vcpu_id) )
{
if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) ||
(hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id != v->vcpu_id) )
continue;
desc = domain_spin_lock_irq_desc(v->domain, pirq, NULL);
if (!desc)
continue;
irq = desc - irq_desc;
ASSERT(MSI_IRQ(irq));
struct irq_desc *desc =
pirq_spin_lock_irq_desc(d, dpci_pirq(pirq_dpci), NULL);
if ( !desc )
return 0;
ASSERT(MSI_IRQ(desc - irq_desc));
irq_set_affinity(desc, cpumask_of(v->processor));
spin_unlock_irq(&desc->lock);
}
return 0;
}
void hvm_migrate_pirqs(struct vcpu *v)
{
struct domain *d = v->domain;
if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci )
return;
spin_lock(&d->event_lock);
pt_pirq_iterate(d, hvm_migrate_pirq, v);
spin_unlock(&d->event_lock);
}
......
......@@ -31,7 +31,9 @@
/* Must be called with hvm_domain->irq_lock hold */
static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
{
int pirq = domain_emuirq_to_pirq(d, ioapic_gsi);
struct pirq *pirq =
pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi));
if ( hvm_domain_use_pirq(d, pirq) )
{
send_guest_pirq(d, pirq);
......@@ -44,7 +46,9 @@ static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
/* Must be called with hvm_domain->irq_lock hold */
static void deassert_irq(struct domain *d, unsigned isa_irq)
{
int pirq = domain_emuirq_to_pirq(d, isa_irq);
struct pirq *pirq =
pirq_info(d, domain_emuirq_to_pirq(d, isa_irq));
if ( !hvm_domain_use_pirq(d, pirq) )
vpic_irq_negative_edge(d, isa_irq);
}
......
......@@ -26,6 +26,7 @@
#include <xen/xenoprof.h>
#include <xen/hvm/save.h>
#include <xen/sched.h>
#include <xen/irq.h>
#include <asm/apic.h>
#include <asm/hvm/vlapic.h>
#include <asm/hvm/vpmu.h>
......
......@@ -111,11 +111,10 @@ int vmsi_deliver(
return 1;
}
int vmsi_deliver_pirq(struct domain *d, int pirq)
int vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
uint32_t flags = pirq_dpci->gmsi.gflags;
int vector = pirq_dpci->gmsi.gvec;
uint8_t dest = (uint8_t)flags;
uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
uint8_t delivery_mode = (flags & VMSI_DELIV_MASK)
......@@ -127,11 +126,7 @@ int vmsi_deliver_pirq(struct domain *d, int pirq)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI) )
{
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
return 0;
}
ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
return 1;
......@@ -361,7 +356,7 @@ static void del_msixtbl_entry(struct msixtbl_entry *entry)
call_rcu(&entry->rcu, free_msixtbl_entry);
}
int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
{
struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
......@@ -370,6 +365,7 @@ int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
int r = -EINVAL;
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
/*
* xmalloc() with irq_disabled causes the failure of check_lock()
......@@ -379,7 +375,7 @@ int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
if ( !new_entry )
return -ENOMEM;
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
irq_desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
if ( !irq_desc )
{
xfree(new_entry);
......@@ -416,7 +412,7 @@ out:
return r;
}
void msixtbl_pt_unregister(struct domain *d, int pirq)
void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
{
struct irq_desc *irq_desc;
struct msi_desc *msi_desc;
......@@ -424,8 +420,9 @@ void msixtbl_pt_unregister(struct domain *d, int pirq)
struct msixtbl_entry *entry;
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
irq_desc = pirq_spin_lock_irq_desc(d, pirq, NULL);
if ( !irq_desc )
return;
......
......@@ -22,6 +22,7 @@
#include <xen/config.h>
#include <xen/sched.h>
#include <xen/xenoprof.h>
#include <xen/irq.h>
#include <asm/system.h>
#include <asm/regs.h>
#include <asm/types.h>
......
This diff is collapsed.
......@@ -252,20 +252,28 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
case PHYSDEVOP_eoi: {
struct physdev_eoi eoi;
struct pirq *pirq;
ret = -EFAULT;
if ( copy_from_guest(&eoi, arg, 1) != 0 )
break;
ret = -EINVAL;
if ( eoi.irq >= v->domain->nr_pirqs )
break;
spin_lock(&v->domain->event_lock);
pirq = pirq_info(v->domain, eoi.irq);
if ( !pirq ) {
spin_unlock(&v->domain->event_lock);
break;
}
if ( !is_hvm_domain(v->domain) &&
v->domain->arch.pv_domain.pirq_eoi_map )
evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
evtchn_unmask(pirq->evtchn);
if ( !is_hvm_domain(v->domain) ||
domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
ret = pirq_guest_eoi(v->domain, eoi.irq);
else
ret = 0;
pirq->arch.hvm.emuirq == IRQ_PT )
pirq_guest_eoi(v->domain, pirq);
spin_unlock(&v->domain->event_lock);
ret = 0;
break;
}
......@@ -558,11 +566,23 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
break;
spin_lock(&d->event_lock);
out.pirq = get_free_pirq(d, out.type, 0);
d->arch.pirq_irq[out.pirq] = PIRQ_ALLOCATED;
ret = get_free_pirq(d, out.type, 0);
if ( ret >= 0 )
{
struct pirq *info = pirq_get_info(d, ret);
if ( info )
info->arch.irq = PIRQ_ALLOCATED;
else
ret = -ENOMEM;
}
spin_unlock(&d->event_lock);
ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
if ( ret >= 0 )
{
out.pirq = ret;
ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
}
rcu_unlock_domain(d);
break;
......
......@@ -21,6 +21,7 @@
#include <xen/acpi.h>
#include <xen/cpu.h>
#include <xen/pmstat.h>
#include <xen/irq.h>
#include <asm/current.h>
#include <public/platform.h>
#include <acpi/cpufreq/processor_perf.h>
......
......@@ -293,13 +293,7 @@ struct domain *domain_create(
if ( d->nr_pirqs > nr_irqs )
d->nr_pirqs = nr_irqs;
d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
d->pirq_mask = xmalloc_array(
unsigned long, BITS_TO_LONGS(d->nr_pirqs));
if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
goto fail;
memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
bitmap_zero(d->pirq_mask, d->nr_pirqs);
radix_tree_init(&d->pirq_tree);
if ( evtchn_init(d) != 0 )
goto fail;
......@@ -349,6 +343,7 @@ struct domain *domain_create(
{
evtchn_destroy(d);
evtchn_destroy_final(d);
radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
}
if ( init_status & INIT_rangeset )
rangeset_domain_destroy(d);
......@@ -356,8 +351,6 @@ struct domain *domain_create(
watchdog_domain_destroy(d);
if ( init_status & INIT_xsm )
xsm_free_security_domain(d);
xfree(d->pirq_mask);
xfree(d->pirq_to_evtchn);
free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);
return NULL;
......@@ -683,8 +676,7 @@ static void complete_domain_destroy(struct rcu_head *head)
evtchn_destroy_final(d);
xfree(d->pirq_mask);
xfree(d->pirq_to_evtchn);
radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
xsm_free_security_domain(d);
free_cpumask_var(d->domain_dirty_cpumask);
......@@ -966,6 +958,35 @@ long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
return -ENOSYS;
}
struct pirq *pirq_get_info(struct domain *d, int pirq)
{
struct pirq *info = pirq_info(d, pirq);
if ( !info && (info = alloc_pirq_struct(d)) != NULL )
{
info->pirq = pirq;
if ( radix_tree_insert(&d->pirq_tree, pirq, info) )
{
free_pirq_struct(info);
info = NULL;
}
}
return info;
}
static void _free_pirq_struct(struct rcu_head *head)
{
xfree(container_of(head, struct pirq, rcu_head));
}
void free_pirq_struct(void *ptr)
{
struct pirq *pirq = ptr;
call_rcu(&pirq->rcu_head, _free_pirq_struct);
}
struct migrate_info {
long (*func)(void *data);
void *data;
......
......@@ -325,6 +325,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
struct evtchn *chn;
struct domain *d = current->domain;
struct vcpu *v = d->vcpu[0];
struct pirq *info;
int port, pirq = bind->pirq;
long rc;
......@@ -336,7 +337,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
spin_lock(&d->event_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
if ( pirq_to_evtchn(d, pirq) != 0 )
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 )
......@@ -344,14 +345,18 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
chn = evtchn_from_port(d, port);
d->pirq_to_evtchn[pirq] = port;
info = pirq_get_info(d, pirq);
if ( !info )
ERROR_EXIT(-ENOMEM);
info->evtchn = port;
rc = (!is_hvm_domain(d)
? pirq_guest_bind(
v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE))
? pirq_guest_bind(v, info,
!!(bind->flags & BIND_PIRQ__WILL_SHARE))
: 0);
if ( rc != 0 )
{
d->pirq_to_evtchn[pirq] = 0;
info->evtchn = 0;
pirq_cleanup_check(info, d);
goto out;
}
......@@ -404,12 +409,18 @@ static long __evtchn_close(struct domain *d1, int port1)
case ECS_UNBOUND:
break;
case ECS_PIRQ:
case ECS_PIRQ: {
struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq);
if ( !pirq )
break;
if ( !is_hvm_domain(d1) )
pirq_guest_unbind(d1, chn1->u.pirq.irq);
d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
pirq_guest_unbind(d1, pirq);
pirq->evtchn = 0;
pirq_cleanup_check(pirq, d1);
unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
break;
}
case ECS_VIRQ:
for_each_vcpu ( d1, v )
......@@ -659,9 +670,9 @@ void send_guest_global_virq(struct domain *d, int virq)
spin_unlock_irqrestore(&v->virq_lock, flags);
}
int send_guest_pirq(struct domain *d, int pirq)
int send_guest_pirq(struct domain *d, const struct pirq *pirq)
{
int port = d->pirq_to_evtchn[pirq];
int port;
struct evtchn *chn;
/*
......@@ -670,7 +681,7 @@ int send_guest_pirq(struct domain *d, int pirq)
* HVM guests: Port is legitimately zero when the guest disables the
* emulated interrupt/evtchn.
*/
if ( port == 0 )
if ( pirq == NULL || (port = pirq->evtchn) == 0 )
{
BUG_ON(!is_hvm_domain(d));
return 0;
......@@ -812,13 +823,10 @@ int evtchn_unmask(unsigned int port)
struct domain *d = current->domain;
struct vcpu *v;
spin_lock(&d->event_lock);
ASSERT(spin_is_locked(&d->event_lock));
if ( unlikely(!port_is_valid(d, port)) )
{
spin_unlock(&d->event_lock);
return -EINVAL;
}
v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
......@@ -834,8 +842,6 @@ int evtchn_unmask(unsigned int port)
vcpu_mark_events_pending(v);
}
spin_unlock(&d->event_lock);
return 0;
}
......@@ -960,7 +966,9 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
struct evtchn_unmask unmask;
if ( copy_from_guest(&unmask, arg, 1) != 0 )
return -EFAULT;
spin_lock(&current->domain->event_lock);
rc = evtchn_unmask(unmask.port);
spin_unlock(&current->domain->event_lock);
break;
}
......
This diff is collapsed.
......@@ -243,12 +243,28 @@ out:
return ret;
}
static int pci_clean_dpci_irq(struct domain *d,
struct hvm_pirq_dpci *pirq_dpci, void *arg)
{
struct dev_intx_gsi_link *digl, *tmp;
pirq_guest_unbind(d, dpci_pirq(pirq_dpci));
if ( pt_irq_need_timer(pirq_dpci->flags) )
kill_timer(&pirq_dpci->timer);
list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
{
list_del(&digl->list);
xfree(digl);
}
return 0;
}