Commit a43d6abb authored by Keir Fraser's avatar Keir Fraser
Browse files

x86 iommu: Define vendor-neutral interface for access to IOMMU.

Signed-off-by: default avatarWei Wang <wei.wang2@amd.com>
Signed-off-by: default avatarKeir Fraser <keir.fraser@citrix.com>
parent 280e2b12
......@@ -530,7 +530,7 @@ long arch_do_domctl(
u8 bus, devfn;
ret = -EINVAL;
if ( !vtd_enabled )
if ( !iommu_enabled )
break;
bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
......@@ -553,7 +553,7 @@ long arch_do_domctl(
u8 bus, devfn;
ret = -EINVAL;
if ( !vtd_enabled )
if ( !iommu_enabled )
break;
if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
......@@ -589,9 +589,9 @@ long arch_do_domctl(
if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
break;
bind = &(domctl->u.bind_pt_irq);
if (vtd_enabled)
if ( iommu_enabled )
ret = pt_irq_create_bind_vtd(d, bind);
if (ret < 0)
if ( ret < 0 )
gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
rcu_unlock_domain(d);
}
......
......@@ -6,6 +6,7 @@ obj-y += i8254.o
obj-y += instrlen.o
obj-y += intercept.o
obj-y += io.o
obj-y += iommu.o
obj-y += irq.o
obj-y += mtrr.o
obj-y += platform.o
......
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <xen/init.h>
#include <xen/irq.h>
#include <xen/spinlock.h>
#include <xen/sched.h>
#include <xen/xmalloc.h>
#include <xen/domain_page.h>
#include <asm/delay.h>
#include <asm/string.h>
#include <asm/mm.h>
#include <asm/iommu.h>
#include <asm/hvm/vmx/intel-iommu.h>
extern struct iommu_ops intel_iommu_ops;
extern struct iommu_ops amd_iommu_ops;
int iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
spin_lock_init(&hd->mapping_lock);
spin_lock_init(&hd->iommu_list_lock);
INIT_LIST_HEAD(&hd->pdev_list);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
if ( !iommu_enabled )
return 0;
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
hd->platform_ops = &intel_iommu_ops;
break;
case X86_VENDOR_AMD:
hd->platform_ops = &amd_iommu_ops;
break;
default:
BUG();
}
return hd->platform_ops->init(domain);
}
int assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->assign_device(d, bus, devfn);
}
void iommu_domain_destroy(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t i;
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *digl_list, *tmp;
struct g2m_ioport *ioport;
struct dev_intx_gsi_link *digl;
if ( !iommu_enabled || !hd->platform_ops)
return;
if ( hvm_irq_dpci != NULL )
{
for ( i = 0; i < NR_IRQS; i++ )
{
if ( !hvm_irq_dpci->mirq[i].valid )
continue;
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[i].digl_list )
{
digl = list_entry(digl_list,
struct dev_intx_gsi_link, list);
list_del(&digl->list);
xfree(digl);
}
}
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
if ( hd )
{
list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
xfree(ioport);
}
}
return hd->platform_ops->teardown(d);
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->map_page(d, gfn, mfn);
}
int iommu_unmap_page(struct domain *d, unsigned long gfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->unmap_page(d, gfn);
}
......@@ -89,12 +89,14 @@ int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
u32 cap_header, cap_range;
u64 mmio_bar;
#if HACK_BIOS_SETTINGS
/* remove it when BIOS available */
write_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
write_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
/* remove it when BIOS available */
#endif
mmio_bar = (u64)read_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
......
......@@ -51,19 +51,17 @@ static void __init init_cleanup(void)
{
struct amd_iommu *iommu;
dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
for_each_amd_iommu(iommu) {
for_each_amd_iommu ( iommu )
unmap_iommu_mmio_region(iommu);
}
}
static void __init deallocate_iommu_table_struct(
struct table_struct *table)
struct table_struct *table)
{
if (table->buffer) {
if ( table->buffer )
{
free_xenheap_pages(table->buffer,
get_order_from_bytes(table->alloc_size));
get_order_from_bytes(table->alloc_size));
table->buffer = NULL;
}
}
......@@ -76,11 +74,10 @@ static void __init deallocate_iommu_resources(struct amd_iommu *iommu)
static void __init detect_cleanup(void)
{
struct amd_iommu *iommu;
struct amd_iommu *iommu, *next;
dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
for_each_amd_iommu(iommu) {
list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
{
list_del(&iommu->list);
deallocate_iommu_resources(iommu);
xfree(iommu);
......@@ -91,19 +88,21 @@ static int requestor_id_from_bdf(int bdf)
{
/* HACK - HACK */
/* account for possible 'aliasing' by parent device */
return bdf;
return bdf;
}
static int __init allocate_iommu_table_struct(struct table_struct *table,
const char *name)
const char *name)
{
table->buffer = (void *) alloc_xenheap_pages(
get_order_from_bytes(table->alloc_size));
if ( !table->buffer ) {
if ( !table->buffer )
{
dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name);
return -ENOMEM;
}
memset(table->buffer, 0, table->alloc_size);
return 0;
......@@ -114,32 +113,32 @@ static int __init allocate_iommu_resources(struct amd_iommu *iommu)
/* allocate 'device table' on a 4K boundary */
iommu->dev_table.alloc_size =
PAGE_ALIGN(((iommu->last_downstream_bus + 1) *
IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
IOMMU_DEV_TABLE_ENTRY_SIZE);
IOMMU_DEV_TABLE_ENTRIES_PER_BUS) *
IOMMU_DEV_TABLE_ENTRY_SIZE);
iommu->dev_table.entries =
iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
if (allocate_iommu_table_struct(&iommu->dev_table,
"Device Table") != 0)
if ( allocate_iommu_table_struct(&iommu->dev_table,
"Device Table") != 0 )
goto error_out;
/* allocate 'command buffer' in power of 2 increments of 4K */
iommu->cmd_buffer_tail = 0;
iommu->cmd_buffer.alloc_size =
PAGE_SIZE << get_order_from_bytes(
PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
IOMMU_CMD_BUFFER_ENTRY_SIZE));
PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
IOMMU_CMD_BUFFER_ENTRY_SIZE));
iommu->cmd_buffer.entries =
iommu->cmd_buffer.entries =
iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
if ( allocate_iommu_table_struct(&iommu->cmd_buffer,
"Command Buffer") != 0 )
"Command Buffer") != 0 )
goto error_out;
return 0;
error_out:
error_out:
deallocate_iommu_resources(iommu);
return -ENOMEM;
}
......@@ -149,7 +148,8 @@ int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
struct amd_iommu *iommu;
iommu = (struct amd_iommu *) xmalloc(struct amd_iommu);
if ( !iommu ) {
if ( !iommu )
{
dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n");
return -ENOMEM;
}
......@@ -170,7 +170,7 @@ int iommu_detect_callback(u8 bus, u8 dev, u8 func, u8 cap_ptr)
return 0;
error_out:
error_out:
xfree(iommu);
return -ENODEV;
}
......@@ -180,11 +180,12 @@ static int __init amd_iommu_init(void)
struct amd_iommu *iommu;
unsigned long flags;
for_each_amd_iommu(iommu) {
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
/* register IOMMU data strucures in MMIO space */
if (map_iommu_mmio_region(iommu) != 0)
if ( map_iommu_mmio_region(iommu) != 0 )
goto error_out;
register_iommu_dev_table_in_mmio_space(iommu);
register_iommu_cmd_buffer_in_mmio_space(iommu);
......@@ -200,7 +201,7 @@ static int __init amd_iommu_init(void)
return 0;
error_out:
error_out:
init_cleanup();
return -ENODEV;
}
......@@ -209,13 +210,16 @@ struct amd_iommu *find_iommu_for_device(int bus, int devfn)
{
struct amd_iommu *iommu;
for_each_amd_iommu(iommu) {
if ( bus == iommu->root_bus ) {
if ( devfn >= iommu->first_devfn &&
devfn <= iommu->last_devfn )
for_each_amd_iommu ( iommu )
{
if ( bus == iommu->root_bus )
{
if ( (devfn >= iommu->first_devfn) &&
(devfn <= iommu->last_devfn) )
return iommu;
}
else if ( bus <= iommu->last_downstream_bus ) {
else if ( bus <= iommu->last_downstream_bus )
{
if ( iommu->downstream_bus_present[bus] )
return iommu;
}
......@@ -238,16 +242,21 @@ void amd_iommu_setup_domain_device(
dte = iommu->dev_table.buffer +
(requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_set_dev_table_entry((u32 *)dte,
root_ptr, hd->domain_id, hd->paging_mode);
amd_iommu_set_dev_table_entry(
(u32 *)dte,
root_ptr, hd->domain_id, hd->paging_mode);
invalidate_dev_table_entry(iommu, requestor_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, "
"root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
requestor_id, root_ptr, hd->domain_id, hd->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
void __init amd_iommu_setup_dom0_devices(void)
......@@ -259,13 +268,16 @@ void __init amd_iommu_setup_dom0_devices(void)
u32 l;
int req_id, bdf;
for ( bus = 0; bus < 256; bus++ ) {
for ( dev = 0; dev < 32; dev++ ) {
for ( func = 0; func < 8; func++ ) {
for ( bus = 0; bus < 256; bus++ )
{
for ( dev = 0; dev < 32; dev++ )
{
for ( func = 0; func < 8; func++ )
{
l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
/* some broken boards return 0 or ~0 if a slot is empty: */
if ( l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000 )
l == 0x0000ffff || l == 0xffff0000 )
continue;
pdev = xmalloc(struct pci_dev);
......@@ -288,29 +300,33 @@ int amd_iommu_detect(void)
{
unsigned long i;
if ( !enable_amd_iommu ) {
if ( !enable_amd_iommu )
{
printk("AMD IOMMU: Disabled\n");
return 0;
}
INIT_LIST_HEAD(&amd_iommu_head);
if ( scan_for_iommu(iommu_detect_callback) != 0 ) {
if ( scan_for_iommu(iommu_detect_callback) != 0 )
{
dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n");
goto error_out;
}
if ( !iommu_found() ) {
if ( !iommu_found() )
{
printk("AMD IOMMU: Not found!\n");
return 0;
}
if ( amd_iommu_init() != 0 ) {
if ( amd_iommu_init() != 0 )
{
dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n");
goto error_out;
}
if ( amd_iommu_domain_init(dom0) != 0 )
if ( iommu_domain_init(dom0) != 0 )
goto error_out;
/* setup 1:1 page table for dom0 */
......@@ -320,21 +336,31 @@ int amd_iommu_detect(void)
amd_iommu_setup_dom0_devices();
return 0;
error_out:
detect_cleanup();
return -ENODEV;
error_out:
detect_cleanup();
return -ENODEV;
}
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
hd->root_table = (void *)alloc_xenheap_page();
unsigned long flags;
spin_lock_irqsave(&hd->mapping_lock, flags);
if ( !hd->root_table )
return -ENOMEM;
memset((u8*)hd->root_table, 0, PAGE_SIZE);
{
hd->root_table = (void *)alloc_xenheap_page();
if ( !hd->root_table )
goto error_out;
memset((u8*)hd->root_table, 0, PAGE_SIZE);
}
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return 0;
error_out:
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -ENOMEM;
}
static int get_paging_mode(unsigned long entries)
......@@ -346,7 +372,8 @@ static int get_paging_mode(unsigned long entries)
if ( entries > max_page )
entries = max_page;
while ( entries > PTE_PER_TABLE_SIZE ) {
while ( entries > PTE_PER_TABLE_SIZE )
{
entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
++level;
if ( level > 6 )
......@@ -362,14 +389,11 @@ int amd_iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
spin_lock_init(&hd->mapping_lock);
spin_lock_init(&hd->iommu_list_lock);
INIT_LIST_HEAD(&hd->pdev_list);
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 ) {
dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__);
goto error_out;
if ( allocate_domain_resources(hd) != 0 )
{
deallocate_domain_resources(hd);
return -ENOMEM;
}
if ( is_hvm_domain(domain) )
......@@ -380,10 +404,168 @@ int amd_iommu_domain_init(struct domain *domain)
hd->domain_id = domain->domain_id;
return 0;
}
error_out:
deallocate_domain_resources(hd);
return -ENOMEM;
static void amd_iommu_disable_domain_device(
struct domain *domain, struct amd_iommu *iommu, u16 requestor_id)
{
void *dte;
unsigned long flags;
dte = iommu->dev_table.buffer +
(requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
spin_lock_irqsave(&iommu->lock, flags);
memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
invalidate_dev_table_entry(iommu, requestor_id);
flush_command_buffer(iommu);
dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x,"
" domain_id:%d, paging_mode:%d\n",
requestor_id, domain_hvm_iommu(domain)->domain_id,
domain_hvm_iommu(domain)->paging_mode);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
extern void pdev_flr(u8 bus, u8 devfn);
static int reassign_device( struct domain *source, struct domain *target,
u8 bus, u8 devfn)
{
struct hvm_iommu *source_hd = domain_hvm_iommu(source);
struct hvm_iommu *target_hd = domain_hvm_iommu(target);
struct pci_dev *pdev;
struct amd_iommu *iommu;
int req_id, bdf;
unsigned long flags;
for_each_pdev( source, pdev )
{
if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
continue;
pdev->bus = bus;
pdev->devfn = devfn;
bdf = (bus << 8) | devfn;
req_id = requestor_id_from_bdf(bdf);
iommu = find_iommu_for_device(bus, devfn);
if ( iommu )
{
amd_iommu_disable_domain_device(source, iommu, req_id);
/* Move pci device from the source domain to target domain. */
spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
list_move(&pdev->list, &target_hd->pdev_list);
spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
amd_iommu_setup_domain_device(target, iommu, req_id);
gdprintk(XENLOG_INFO ,
"AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
}
else
{
gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu."
" %x:%x.%x cannot be assigned to domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
return -ENODEV;
}
break;
}
return 0;
}
int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
pdev_flr(bus, devfn);
return reassign_device(dom0, d, bus, devfn);
}
static void release_domain_devices(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct pci_dev *pdev;
while ( !list_empty(&hd->pdev_list) )
{
pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
pdev_flr(pdev->bus, pdev->devfn);
gdprintk(XENLOG_INFO ,
"AMD IOMMU: release devices %x:%x.%x\n",
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
reassign_device(d, dom0, pdev->bus, pdev->devfn);
}
}
static void deallocate_next_page_table(void *table, unsigned long index,
int level)
{
unsigned long next_index;
void *next_table, *pde;
int next_level;
pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
if ( next_table )
{
next_level = level - 1;
if ( next_level > 1 )
{
next_index = 0;
do
{
deallocate_next_page_table(next_table,
next_index, next_level);
++next_index;
} while (next_index < PTE_PER_TABLE_SIZE);
}
free_xenheap_page(next_table);
}
}
static void deallocate_iommu_page_tables(struct domain *d)
{
unsigned long index;
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( hd ->root_table )
{
index = 0;
do
{
deallocate_next_page_table(hd->root_table,
index, hd->paging_mode);
++index;
} while ( index < PTE_PER_TABLE_SIZE );
free_xenheap_page(hd ->root_table);
}
hd ->root_table = NULL;
}
void amd_iommu_domain_destroy(struct domain *d)
{
if ( !amd_iommu_enabled )
return;
deallocate_iommu_page_tables(d);
release_domain_devices(d);
}
struct iommu_ops amd_iommu_ops = {
.init = amd_iommu_domain_init,
.assign_device = amd_iommu_assign_device,
.teardown = amd_iommu_domain_destroy,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
};
......@@ -94,6 +94,46 @@ static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
}
static void svm_dirq_assist(struct vcpu *v)
{
unsigned int irq;
uint32_t device, intx;
struct domain *d = v->domain;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct dev_intx_gsi_link *digl;
if ( !amd_iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
return;
for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
irq < NR_IRQS;
irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
{
stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
clear_bit(irq, &hvm_irq_dpci->dirq_mask);
list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
{
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
* Set a timer to see if the guest can finish the interrupt or not. For
* example, the guest OS may unmask the PIC during boot, before the
* guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
* guest will never deal with the irq, then the physical interrupt line
* will never be deasserted.
*/
set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
NOW() + PT_IRQ_TIME_OUT);
}
}
asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
......@@ -102,6 +142,7 @@ asmlinkage void svm_intr_assist(void)
/* Crank the handle on interrupt state. */
pt_update_irq(v);
svm_dirq_assist(v);
do {
intack = hvm_vcpu_has_pending_irq(v);
......
......@@ -458,7 +458,7 @@ void vioapic_update_EOI(struct domain *d, int vector)
ent->fields.remote_irr = 0;
if ( vtd_enabled )
if ( iommu_enabled )
{
spin_unlock(&d->arch.hvm_domain.irq_lock);
hvm_dpci_eoi(current->domain, gsi, ent);
......
......@@ -1047,7 +1047,7 @@ static void free_iommu(struct iommu *iommu)
agaw = 64; \
agaw; })
int iommu_domain_init(struct domain *domain)
int intel_iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
struct iommu *iommu = NULL;
......@@ -1056,11 +1056,6 @@ int iommu_domain_init(struct domain *domain)
unsigned long sagaw;
struct acpi_drhd_unit *drhd;
spin_lock_init(&hd->mapping_lock);
spin_lock_init(&hd->iommu_list_lock);
INIT_LIST_HEAD(&hd->pdev_list);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
return 0;
......@@ -1550,7 +1545,8 @@ static int domain_context_mapped(struct pci_dev *pdev)
return 0;
}
int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
int intel_iommu_map_page(
struct domain *d, unsigned long gfn, unsigned long mfn)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
......@@ -1566,12 +1562,12 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
return 0;
#endif
pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
pg = addr_to_dma_page(d, (paddr_t)gfn << PAGE_SHIFT_4K);
if ( !pg )
return -ENOMEM;
pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += gfn & LEVEL_MASK;
dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
iommu_flush_cache_entry(iommu, pte);
unmap_domain_page(pte);
......@@ -1581,7 +1577,7 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
(paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
}
......@@ -1589,7 +1585,7 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
return 0;
}
int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
......@@ -1603,12 +1599,12 @@ int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
return 0;
#endif
dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K);
dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
return 0;
}
int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
int iommu_page_mapping(struct domain *domain, paddr_t iova,
void *hpa, size_t size, int prot)
{
struct acpi_drhd_unit *drhd;
......@@ -1655,14 +1651,14 @@ int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
return 0;
}
int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size)
int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
{
dma_pte_clear_range(domain, addr, addr + size);
return 0;
}
void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu = NULL;
......@@ -1673,7 +1669,7 @@ void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
(paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
}
......@@ -1921,7 +1917,7 @@ int device_assigned(u8 bus, u8 devfn)
return 1;
}
int assign_device(struct domain *d, u8 bus, u8 devfn)
int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct acpi_rmrr_unit *rmrr;
struct pci_dev *pdev;
......@@ -2151,6 +2147,14 @@ int iommu_resume(void)
return 0;
}
struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.assign_device = intel_iommu_assign_device,
.teardown = iommu_domain_teardown,
.map_page = intel_iommu_map_page,
.unmap_page = intel_iommu_unmap_page,
};
/*
* Local variables:
* mode: C
......
......@@ -141,7 +141,7 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
!hvm_irq->dpci->mirq[mirq].valid )
return 0;
......@@ -167,7 +167,7 @@ static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
int i;
ASSERT(isairq < NR_ISAIRQS);
if ( !vtd_enabled || !dpci ||
if ( !iommu_enabled || !dpci ||
!test_bit(isairq, dpci->isairq_map) )
return;
......@@ -205,7 +205,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t device, intx, machine_gsi;
if ( !vtd_enabled || (hvm_irq_dpci == NULL) ||
if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
return;
......@@ -235,50 +235,3 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
else
spin_unlock(&hvm_irq_dpci->dirq_lock);
}
void iommu_domain_destroy(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t i;
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *digl_list, *tmp;
struct g2m_ioport *ioport;
struct dev_intx_gsi_link *digl;
if ( !vtd_enabled )
return;
if ( hvm_irq_dpci != NULL )
{
for ( i = 0; i < NR_IRQS; i++ )
if ( hvm_irq_dpci->mirq[i].valid )
{
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[i].digl_list )
{
digl = list_entry(digl_list,
struct dev_intx_gsi_link, list);
list_del(&digl->list);
xfree(digl);
}
}
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
if ( hd )
{
list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
xfree(ioport);
}
}
iommu_domain_teardown(d);
}
......@@ -255,8 +255,21 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
/* level 1 entry */
paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
iommu_flush(d, gfn, (u64*)p2m_entry);
if ( iommu_enabled && is_hvm_domain(d) )
{
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
if ( (p2mt == p2m_mmio_direct) )
iommu_flush(d, gfn, (u64*)p2m_entry);
}
else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
{
if ( p2mt == p2m_ram_rw )
iommu_map_page(d, gfn, mfn_x(mfn));
else
iommu_unmap_page(d, gfn);
}
}
/* Success */
rv = 1;
......
......@@ -48,6 +48,9 @@ struct hvm_iommu {
int domain_id;
int paging_mode;
void *root_table;
/* iommu_ops */
struct iommu_ops *platform_ops;
};
#endif // __ASM_X86_HVM_IOMMU_H__
......@@ -263,6 +263,10 @@
#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK 0xFFFFFFFF
#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT 0
/* INVALIDATE_DEVTAB_ENTRY command */
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK 0x0000FFFF
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT 0
/* Event Log */
#define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10
#define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET 0x14
......@@ -415,5 +419,6 @@
#define IOMMU_PAGE_TABLE_LEVEL_4 4
#define IOMMU_IO_WRITE_ENABLED 1
#define IOMMU_IO_READ_ENABLED 1
#define HACK_BIOS_SETTINGS 0
#endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
......@@ -27,13 +27,15 @@
list_for_each_entry(amd_iommu, \
&amd_iommu_head, list)
#define for_each_pdev(domain, pdev) \
list_for_each_entry(pdev, \
&(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
#define DMA_32BIT_MASK 0x00000000ffffffffULL
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 cap_ptr);
typedef int (*iommu_detect_callback_ptr_t)(
u8 bus, u8 dev, u8 func, u8 cap_ptr);
/* amd-iommu-detect functions */
int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback);
......@@ -49,16 +51,20 @@ void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu);
void __init enable_iommu(struct amd_iommu *iommu);
/* mapping functions */
int amd_iommu_map_page(struct domain *d, unsigned long gfn,
unsigned long mfn);
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
/* device table functions */
void amd_iommu_set_dev_table_entry(u32 *dte,
u64 root_ptr, u16 domain_id, u8 paging_mode);
int amd_iommu_is_dte_page_translation_valid(u32 *entry);
void invalidate_dev_table_entry(struct amd_iommu *iommu,
u16 devic_id);
/* send cmd to iommu */
int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
void flush_command_buffer(struct amd_iommu *iommu);
/* iommu domain funtions */
int amd_iommu_domain_init(struct domain *domain);
......
......@@ -422,8 +422,6 @@ struct poll_info {
#define VTD_PAGE_TABLE_LEVEL_3 3
#define VTD_PAGE_TABLE_LEVEL_4 4
typedef paddr_t dma_addr_t;
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_IOMMUS 32
#define MAX_IOMMU_REGS 0xc0
......@@ -447,8 +445,10 @@ struct ir_ctrl {
};
struct iommu_flush {
int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, int non_present_entry_flush);
int (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
u64 type, int non_present_entry_flush);
};
struct intel_iommu {
......
......@@ -28,7 +28,9 @@
#include <public/domctl.h>
extern int vtd_enabled;
extern int amd_iommu_enabled;
#define iommu_enabled ( amd_iommu_enabled || vtd_enabled )
#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
#define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
#define iommu_qi_ctrl(iommu) (&(iommu->intel.qi_ctrl));
......@@ -72,9 +74,9 @@ int iommu_domain_init(struct domain *d);
void iommu_domain_destroy(struct domain *d);
int device_assigned(u8 bus, u8 devfn);
int assign_device(struct domain *d, u8 bus, u8 devfn);
int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn);
int iommu_unmap_page(struct domain *d, dma_addr_t gfn);
void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry);
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int iommu_unmap_page(struct domain *d, unsigned long gfn);
void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
void iommu_set_pgd(struct domain *d);
void iommu_domain_teardown(struct domain *d);
int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
......@@ -89,4 +91,12 @@ void io_apic_write_remap_rte(unsigned int apic,
#define PT_IRQ_TIME_OUT MILLISECS(8)
#define VTDPREFIX "[VT-D]"
struct iommu_ops {
int (*init)(struct domain *d);
int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
void (*teardown)(struct domain *d);
int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
int (*unmap_page)(struct domain *d, unsigned long gfn);
};
#endif /* _IOMMU_H_ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment