Commit 4aad5428 authored by Keir Fraser's avatar Keir Fraser

VT-d: Share VT-d code between x86 and IA64

Declare arch-dependent functions in vtd.h, and implement them for x86.
Signed-off-by: default avatarWeidong Han <weidong.han@intel.com>
parent f342e863
......@@ -25,7 +25,7 @@ static void pt_irq_time_out(void *data)
{
struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
......@@ -49,7 +49,7 @@ static void pt_irq_time_out(void *data)
int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
......@@ -65,11 +65,8 @@ int pt_irq_create_bind_vtd(
for ( int i = 0; i < NR_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
0, (unsigned long)hvm_irq_dpci) != 0 )
if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
xfree(hvm_irq_dpci);
hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
}
machine_gsi = pt_irq_bind->machine_irq;
......@@ -116,7 +113,7 @@ int pt_irq_create_bind_vtd(
int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct list_head *digl_list, *tmp;
......@@ -133,14 +130,15 @@ int pt_irq_destroy_bind_vtd(
hvm_irq_dpci->link_cnt[link]--;
gdprintk(XENLOG_INFO,
"pt_irq_destroy_bind_vtd: machine_gsi=%d, guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
memset(&hvm_irq_dpci->girq[guest_gsi], 0, sizeof(struct hvm_girq_dpci_mapping));
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
/* clear the mirq info */
if ( hvm_irq_dpci->mirq[machine_gsi].valid )
{
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[machine_gsi].digl_list )
{
......@@ -174,10 +172,10 @@ int pt_irq_destroy_bind_vtd(
int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
!hvm_irq->dpci->mirq[mirq].valid )
if ( !iommu_enabled || (d == dom0) || !dpci ||
!dpci->mirq[mirq].valid )
return 0;
/*
......@@ -186,58 +184,18 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
* this case the guest may not pick up the interrupt (e.g., masked at the
* PIC) and we need to detect that.
*/
set_bit(mirq, hvm_irq->dpci->dirq_mask);
set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)],
set_bit(mirq, dpci->dirq_mask);
set_timer(&dpci->hvm_timer[irq_to_vector(mirq)],
NOW() + PT_IRQ_TIME_OUT);
vcpu_kick(d->vcpu[0]);
return 1;
}
static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
struct hvm_irq_dpci *dpci = hvm_irq->dpci;
struct dev_intx_gsi_link *digl, *tmp;
int i;
ASSERT(isairq < NR_ISAIRQS);
if ( !iommu_enabled || !dpci ||
!test_bit(isairq, dpci->isairq_map) )
return;
/* Multiple mirq may be mapped to one isa irq */
for ( i = 0; i < NR_IRQS; i++ )
{
if ( !dpci->mirq[i].valid )
continue;
list_for_each_entry_safe ( digl, tmp,
&dpci->mirq[i].digl_list, list )
{
if ( hvm_irq->pci_link.route[digl->link] == isairq )
{
hvm_pci_intx_deassert(d, digl->device, digl->intx);
spin_lock(&dpci->dirq_lock);
if ( --dpci->mirq[i].pending == 0 )
{
spin_unlock(&dpci->dirq_lock);
gdprintk(XENLOG_INFO VTDPREFIX,
"hvm_dpci_isairq_eoi:: mirq = %x\n", i);
stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
pirq_guest_eoi(d, i);
}
else
spin_unlock(&dpci->dirq_lock);
}
}
}
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
union vioapic_redir_entry *ent)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
uint32_t device, intx, machine_gsi;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
......
......@@ -58,7 +58,7 @@ int assign_device(struct domain *d, u8 bus, u8 devfn)
void iommu_domain_destroy(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
uint32_t i;
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *digl_list, *tmp;
......
subdir-$(x86) += x86
obj-y += iommu.o
obj-y += dmar.o
obj-y += utils.o
......
......@@ -32,6 +32,7 @@
#include "../pci_regs.h"
#include "msi.h"
#include "extern.h"
#include "vtd.h"
#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)
......@@ -158,11 +159,11 @@ struct iommu_flush *iommu_get_flush(struct iommu *iommu)
return &(iommu->intel->flush);
}
unsigned int x86_clflush_size;
unsigned int clflush_size;
void clflush_cache_range(void *adr, int size)
{
int i;
for ( i = 0; i < size; i += x86_clflush_size )
for ( i = 0; i < size; i += clflush_size )
clflush(adr + i);
}
......@@ -172,10 +173,15 @@ static void __iommu_flush_cache(struct iommu *iommu, void *addr, int size)
clflush_cache_range(addr, size);
}
#define iommu_flush_cache_entry(iommu, addr) \
__iommu_flush_cache(iommu, addr, 8)
#define iommu_flush_cache_page(iommu, addr) \
__iommu_flush_cache(iommu, addr, PAGE_SIZE_4K)
void iommu_flush_cache_entry(struct iommu *iommu, void *addr)
{
__iommu_flush_cache(iommu, addr, 8);
}
void iommu_flush_cache_page(struct iommu *iommu, void *addr)
{
__iommu_flush_cache(iommu, addr, PAGE_SIZE_4K);
}
int nr_iommus;
/* context entry handling */
......@@ -1954,7 +1960,7 @@ int iommu_setup(void)
INIT_LIST_HEAD(&hd->pdev_list);
/* setup clflush size */
x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8;
clflush_size = get_clflush_size();
/* Allocate IO page directory page for the domain. */
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
......
......@@ -42,4 +42,13 @@ struct IO_APIC_route_remap_entry {
};
};
unsigned int get_clflush_size(void);
u64 alloc_pgtable_maddr(void);
void free_pgtable_maddr(u64 maddr);
void *map_vtd_domain_page(u64 maddr);
void unmap_vtd_domain_page(void *va);
void iommu_flush_cache_entry(struct iommu *iommu, void *addr);
void iommu_flush_cache_page(struct iommu *iommu, void *addr);
#endif // _VTD_H_
/*
* Copyright (c) 2008, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Copyright (C) Allen Kay <allen.m.kay@intel.com>
* Copyright (C) Weidong Han <weidong.han@intel.com>
*/
#include <xen/sched.h>
#include <xen/domain_page.h>
#include <xen/iommu.h>
#include "../iommu.h"
#include "../dmar.h"
#include "../vtd.h"
void *map_vtd_domain_page(u64 maddr)
{
return map_domain_page(maddr >> PAGE_SHIFT_4K);
}
void unmap_vtd_domain_page(void *va)
{
unmap_domain_page(va);
}
/* Allocate page table, return its machine address */
u64 alloc_pgtable_maddr(void)
{
struct page_info *pg;
u64 *vaddr;
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
pg = alloc_domheap_page(NULL, 0);
vaddr = map_domain_page(page_to_mfn(pg));
if ( !vaddr )
return 0;
memset(vaddr, 0, PAGE_SIZE);
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
iommu_flush_cache_page(iommu, vaddr);
unmap_domain_page(vaddr);
return page_to_maddr(pg);
}
void free_pgtable_maddr(u64 maddr)
{
if ( maddr != 0 )
free_domheap_page(maddr_to_page(maddr));
}
unsigned int get_clflush_size(void)
{
return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
}
struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
{
if ( !domain )
return NULL;
return domain->arch.hvm_domain.irq.dpci;
}
int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
{
if ( !domain || !dpci )
return 0;
domain->arch.hvm_domain.irq.dpci = dpci;
return 1;
}
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
struct dev_intx_gsi_link *digl, *tmp;
int i;
ASSERT(isairq < NR_ISAIRQS);
if ( !vtd_enabled || !dpci ||
!test_bit(isairq, dpci->isairq_map) )
return;
/* Multiple mirq may be mapped to one isa irq */
for ( i = 0; i < NR_IRQS; i++ )
{
if ( !dpci->mirq[i].valid )
continue;
list_for_each_entry_safe ( digl, tmp,
&dpci->mirq[i].digl_list, list )
{
if ( hvm_irq->pci_link.route[digl->link] == isairq )
{
hvm_pci_intx_deassert(d, digl->device, digl->intx);
spin_lock(&dpci->dirq_lock);
if ( --dpci->mirq[i].pending == 0 )
{
spin_unlock(&dpci->dirq_lock);
gdprintk(XENLOG_INFO VTDPREFIX,
"hvm_dpci_isairq_eoi:: mirq = %x\n", i);
stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
pirq_guest_eoi(d, i);
}
else
spin_unlock(&dpci->dirq_lock);
}
}
}
}
......@@ -98,6 +98,9 @@ void io_apic_write_remap_rte(unsigned int apic,
struct qi_ctrl *iommu_qi_ctrl(struct iommu *iommu);
struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu);
struct iommu_flush *iommu_get_flush(struct iommu *iommu);
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain);
int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci);
#define PT_IRQ_TIME_OUT MILLISECS(8)
#define VTDPREFIX "[VT-D]"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment