Commit 4905b35c authored by Julien Grall's avatar Julien Grall Committed by Jan Beulich

iommu: introduce arch specific code

Currently the structure hvm_iommu (xen/include/xen/hvm/iommu.h) contains
x86 specific fields.

This patch creates:
    - arch_hvm_iommu structure which will contain architecture depend
    fields
    - arch_iommu_domain_{init,destroy} function to execute arch
    specific during domain creation/destruction

Also move iommu_use_hap_pt and domain_hvm_iommu in asm-x86/iommu.h.
Signed-off-by: default avatarJulien Grall <julien.grall@linaro.org>
Acked-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
Tested-by: default avatarAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
parent 5979e0c9
......@@ -752,7 +752,7 @@ long arch_do_domctl(
"ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if (g2m_ioport->mport == fmp )
{
g2m_ioport->gport = fgp;
......@@ -771,7 +771,7 @@ long arch_do_domctl(
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
}
if ( !ret )
ret = ioports_permit_access(d, fmp, fmp + np - 1);
......@@ -786,7 +786,7 @@ long arch_do_domctl(
printk(XENLOG_G_INFO
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if ( g2m_ioport->mport == fmp )
{
list_del(&g2m_ioport->list);
......
......@@ -355,7 +355,7 @@ int dpci_ioport_intercept(ioreq_t *p)
unsigned int s = 0, e = 0;
int rc;
list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
{
s = g2m_ioport->gport;
e = s + g2m_ioport->np;
......
......@@ -228,7 +228,8 @@ static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
if ( !is_idle_domain(d) )
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
update_iommu_mac(&ctx, hd->arch.pgd_maddr,
agaw_to_level(hd->arch.agaw));
}
}
......
......@@ -60,12 +60,12 @@ static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
static inline struct guest_iommu *domain_iommu(struct domain *d)
{
return domain_hvm_iommu(d)->g_iommu;
return domain_hvm_iommu(d)->arch.g_iommu;
}
static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
{
return domain_hvm_iommu(v->domain)->g_iommu;
return domain_hvm_iommu(v->domain)->arch.g_iommu;
}
static void guest_iommu_enable(struct guest_iommu *iommu)
......@@ -886,7 +886,7 @@ int guest_iommu_init(struct domain* d)
guest_iommu_reg_init(iommu);
iommu->domain = d;
hd->g_iommu = iommu;
hd->arch.g_iommu = iommu;
tasklet_init(&iommu->cmd_buffer_tasklet,
guest_iommu_process_command, (unsigned long)d);
......@@ -907,7 +907,7 @@ void guest_iommu_destroy(struct domain *d)
tasklet_kill(&iommu->cmd_buffer_tasklet);
xfree(iommu);
domain_hvm_iommu(d)->g_iommu = NULL;
domain_hvm_iommu(d)->arch.g_iommu = NULL;
}
static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
......
......@@ -344,7 +344,7 @@ static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
struct hvm_iommu *hd = domain_hvm_iommu(d);
bool_t ok = 0;
ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
next_level = merge_level - 1;
......@@ -398,7 +398,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
unsigned long first_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
table = map_domain_page(pt_mfn);
pde = table + pfn_to_pde_idx(gfn, merge_level);
......@@ -448,8 +448,8 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
struct page_info *table;
struct hvm_iommu *hd = domain_hvm_iommu(d);
table = hd->root_table;
level = hd->paging_mode;
table = hd->arch.root_table;
level = hd->arch.paging_mode;
BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 ||
level > IOMMU_PAGING_MODE_LEVEL_6 );
......@@ -557,11 +557,11 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
unsigned long old_root_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
level = hd->paging_mode;
old_root = hd->root_table;
level = hd->arch.paging_mode;
old_root = hd->arch.root_table;
offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
ASSERT(spin_is_locked(&hd->mapping_lock) && is_hvm_domain(d));
ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
while ( offset >= PTE_PER_TABLE_SIZE )
{
......@@ -587,8 +587,8 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
if ( new_root != NULL )
{
hd->paging_mode = level;
hd->root_table = new_root;
hd->arch.paging_mode = level;
hd->arch.root_table = new_root;
if ( !spin_is_locked(&pcidevs_lock) )
AMD_IOMMU_DEBUG("%s Try to access pdev_list "
......@@ -613,9 +613,9 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
/* valid = 0 only works for dom0 passthrough mode */
amd_iommu_set_root_page_table((u32 *)device_entry,
page_to_maddr(hd->root_table),
page_to_maddr(hd->arch.root_table),
d->domain_id,
hd->paging_mode, 1);
hd->arch.paging_mode, 1);
amd_iommu_flush_device(iommu, req_id);
bdf += pdev->phantom_stride;
......@@ -638,14 +638,14 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned long pt_mfn[7];
unsigned int merge_level;
BUG_ON( !hd->root_table );
BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
spin_lock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
......@@ -653,7 +653,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
{
if ( update_paging_mode(d, gfn) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
......@@ -662,7 +662,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
......@@ -684,7 +684,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
amd_iommu_flush_pages(d, gfn, 0);
for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
merge_level <= hd->paging_mode; merge_level++ )
merge_level <= hd->arch.paging_mode; merge_level++ )
{
if ( pt_mfn[merge_level] == 0 )
break;
......@@ -697,7 +697,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn,
flags, merge_level) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
"gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
domain_crash(d);
......@@ -706,7 +706,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
}
out:
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
return 0;
}
......@@ -715,14 +715,14 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
unsigned long pt_mfn[7];
struct hvm_iommu *hd = domain_hvm_iommu(d);
BUG_ON( !hd->root_table );
BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
spin_lock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
......@@ -730,7 +730,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
if ( update_paging_mode(d, gfn) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
......@@ -739,7 +739,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
......@@ -747,7 +747,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
/* mark PTE as 'page not present' */
clear_iommu_pte_present(pt_mfn[1], gfn);
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
amd_iommu_flush_pages(d, gfn, 0);
......@@ -792,13 +792,13 @@ void amd_iommu_share_p2m(struct domain *d)
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
p2m_table = mfn_to_page(mfn_x(pgd_mfn));
if ( hd->root_table != p2m_table )
if ( hd->arch.root_table != p2m_table )
{
free_amd_iommu_pgtable(hd->root_table);
hd->root_table = p2m_table;
free_amd_iommu_pgtable(hd->arch.root_table);
hd->arch.root_table = p2m_table;
/* When sharing p2m with iommu, paging mode = 4 */
hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
mfn_x(pgd_mfn));
}
......
......@@ -120,7 +120,8 @@ static void amd_iommu_setup_domain_device(
struct hvm_iommu *hd = domain_hvm_iommu(domain);
BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
!iommu->dev_table.buffer );
if ( iommu_passthrough && is_hardware_domain(domain) )
valid = 0;
......@@ -138,8 +139,8 @@ static void amd_iommu_setup_domain_device(
{
/* bind DTE to domain page-tables */
amd_iommu_set_root_page_table(
(u32 *)dte, page_to_maddr(hd->root_table), domain->domain_id,
hd->paging_mode, valid);
(u32 *)dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
hd->arch.paging_mode, valid);
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
......@@ -151,8 +152,8 @@ static void amd_iommu_setup_domain_device(
"root table = %#"PRIx64", "
"domain = %d, paging mode = %d\n",
req_id, pdev->type,
page_to_maddr(hd->root_table),
domain->domain_id, hd->paging_mode);
page_to_maddr(hd->arch.root_table),
domain->domain_id, hd->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
......@@ -226,17 +227,17 @@ int __init amd_iov_detect(void)
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
spin_lock(&hd->mapping_lock);
if ( !hd->root_table )
spin_lock(&hd->arch.mapping_lock);
if ( !hd->arch.root_table )
{
hd->root_table = alloc_amd_iommu_pgtable();
if ( !hd->root_table )
hd->arch.root_table = alloc_amd_iommu_pgtable();
if ( !hd->arch.root_table )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
}
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
return 0;
}
......@@ -263,14 +264,14 @@ static int amd_iommu_domain_init(struct domain *d)
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 )
{
if ( hd->root_table )
free_domheap_page(hd->root_table);
if ( hd->arch.root_table )
free_domheap_page(hd->arch.root_table);
return -ENOMEM;
}
/* For pv and dom0, stick with get_paging_mode(max_page)
* For HVM dom0, use 2 level page table at first */
hd->paging_mode = is_hvm_domain(d) ?
hd->arch.paging_mode = is_hvm_domain(d) ?
IOMMU_PAGING_MODE_LEVEL_2 :
get_paging_mode(max_page);
......@@ -333,7 +334,7 @@ void amd_iommu_disable_domain_device(struct domain *domain,
AMD_IOMMU_DEBUG("Disable: device id = %#x, "
"domain = %d, paging mode = %d\n",
req_id, domain->domain_id,
domain_hvm_iommu(domain)->paging_mode);
domain_hvm_iommu(domain)->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
......@@ -373,7 +374,7 @@ static int reassign_device(struct domain *source, struct domain *target,
/* IO page tables might be destroyed after pci-detach the last device
* In this case, we have to re-allocate root table for next pci-attach.*/
if ( t->root_table == NULL )
if ( t->arch.root_table == NULL )
allocate_domain_resources(t);
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
......@@ -455,13 +456,13 @@ static void deallocate_iommu_page_tables(struct domain *d)
if ( iommu_use_hap_pt(d) )
return;
spin_lock(&hd->mapping_lock);
if ( hd->root_table )
spin_lock(&hd->arch.mapping_lock);
if ( hd->arch.root_table )
{
deallocate_next_page_table(hd->root_table, hd->paging_mode);
hd->root_table = NULL;
deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
hd->arch.root_table = NULL;
}
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
}
......@@ -592,11 +593,11 @@ static void amd_dump_p2m_table(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !hd->root_table )
if ( !hd->arch.root_table )
return;
printk("p2m table has %d levels\n", hd->paging_mode);
amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
printk("p2m table has %d levels\n", hd->arch.paging_mode);
amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
}
const struct iommu_ops amd_iommu_ops = {
......
......@@ -117,10 +117,11 @@ static void __init parse_iommu_param(char *s)
int iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
int ret = 0;
spin_lock_init(&hd->mapping_lock);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
INIT_LIST_HEAD(&hd->mapped_rmrrs);
ret = arch_iommu_domain_init(d);
if ( ret )
return ret;
if ( !iommu_enabled )
return 0;
......@@ -188,9 +189,7 @@ void iommu_teardown(struct domain *d)
void iommu_domain_destroy(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *tmp;
struct g2m_ioport *ioport;
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return;
......@@ -198,12 +197,7 @@ void iommu_domain_destroy(struct domain *d)
if ( need_iommu(d) )
iommu_teardown(d);
list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
xfree(ioport);
}
arch_iommu_domain_destroy(d);
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
......
......@@ -254,16 +254,16 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
struct acpi_drhd_unit *drhd;
struct pci_dev *pdev;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
int addr_width = agaw_to_width(hd->agaw);
int addr_width = agaw_to_width(hd->arch.agaw);
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(hd->agaw);
int level = agaw_to_level(hd->arch.agaw);
int offset;
u64 pte_maddr = 0, maddr;
u64 *vaddr = NULL;
addr &= (((u64)1) << addr_width) - 1;
ASSERT(spin_is_locked(&hd->mapping_lock));
if ( hd->pgd_maddr == 0 )
ASSERT(spin_is_locked(&hd->arch.mapping_lock));
if ( hd->arch.pgd_maddr == 0 )
{
/*
* just get any passthrough device in the domainr - assume user
......@@ -271,11 +271,11 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
*/
pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
drhd = acpi_find_matched_drhd_unit(pdev);
if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
goto out;
}
parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
while ( level > 1 )
{
offset = address_level_offset(addr, level);
......@@ -585,7 +585,7 @@ static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
{
iommu = drhd->iommu;
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
......@@ -626,12 +626,12 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
spin_lock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
/* get last level pte */
pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
if ( pg_maddr == 0 )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
return;
}
......@@ -640,13 +640,13 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
if ( !dma_pte_present(*pte) )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return;
}
dma_clear_pte(*pte);
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
if ( !this_cpu(iommu_dont_flush_iotlb) )
......@@ -1237,7 +1237,7 @@ static int intel_iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
return 0;
}
......@@ -1334,16 +1334,16 @@ int domain_context_mapping_one(
}
else
{
spin_lock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
/* Ensure we have pagetables allocated down to leaf PTE. */
if ( hd->pgd_maddr == 0 )
if ( hd->arch.pgd_maddr == 0 )
{
addr_to_dma_page_maddr(domain, 0, 1);
if ( hd->pgd_maddr == 0 )
if ( hd->arch.pgd_maddr == 0 )
{
nomem:
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
return -ENOMEM;
......@@ -1351,7 +1351,7 @@ int domain_context_mapping_one(
}
/* Skip top levels of page tables for 2- and 3-level DRHDs. */
pgd_maddr = hd->pgd_maddr;
pgd_maddr = hd->arch.pgd_maddr;
for ( agaw = level_to_agaw(4);
agaw != level_to_agaw(iommu->nr_pt_levels);
agaw-- )
......@@ -1369,7 +1369,7 @@ int domain_context_mapping_one(
else
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
}
if ( context_set_domain_id(context, domain, iommu) )
......@@ -1395,7 +1395,7 @@ int domain_context_mapping_one(
iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
}
set_bit(iommu->index, &hd->iommu_bitmap);
set_bit(iommu->index, &hd->arch.iommu_bitmap);
unmap_vtd_domain_page(context_entries);
......@@ -1638,7 +1638,7 @@ static int domain_context_unmap(
struct hvm_iommu *hd = domain_hvm_iommu(domain);
int iommu_domid;
clear_bit(iommu->index, &hd->iommu_bitmap);
clear_bit(iommu->index, &hd->arch.iommu_bitmap);
iommu_domid = domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
......@@ -1695,7 +1695,7 @@ static void iommu_domain_teardown(struct domain *d)
if ( list_empty(&acpi_drhd_units) )
return;
list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
{
list_del(&mrmrr->list);
xfree(mrmrr);
......@@ -1704,10 +1704,10 @@ static void iommu_domain_teardown(struct domain *d)
if ( iommu_use_hap_pt(d) )
return;
spin_lock(&hd->mapping_lock);
iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
hd->pgd_maddr = 0;
spin_unlock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
hd->arch.pgd_maddr = 0;
spin_unlock(&hd->arch.mapping_lock);
}
static int intel_iommu_map_page(
......@@ -1726,12 +1726,12 @@ static int intel_iommu_map_page(
if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
spin_lock(&hd->mapping_lock);
spin_lock(&hd->arch.mapping_lock);
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
if ( pg_maddr == 0 )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
......@@ -1748,14 +1748,14 @@ static int intel_iommu_map_page(
if ( old.val == new.val )
{
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return 0;
}
*pte = new;
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
spin_unlock(&hd->mapping_lock);
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
if ( !this_cpu(iommu_dont_flush_iotlb) )
......@@ -1789,7 +1789,7 @@ void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
......@@ -1830,7 +1830,7 @@ static void iommu_set_pgd(struct domain *d)
return;
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
hd->arch.pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
}
static int rmrr_identity_mapping(struct domain *d,
......@@ -1845,10 +1845,10 @@ static int rmrr_identity_mapping(struct domain *d,
ASSERT(rmrr->base_address < rmrr->end_address);
/*
* No need to acquire hd->mapping_lock: Both insertion and removal
* No need to acquire hd->arch.mapping_lock: Both insertion and removal
* get done while holding pcidevs_lock.
*/
list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
{
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
......@@ -1877,7 +1877,7 @@ static int rmrr_identity_mapping(struct domain *d,
mrmrr->base = rmrr->base_address;
mrmrr->end = rmrr->end_address;
mrmrr->count = 1;
list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
return 0;
}
......@@ -1964,7 +1964,7 @@ static int intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
* get done while holding pcidevs_lock.
*/
ASSERT(spin_is_locked(&pcidevs_lock));
list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
{
unsigned long base_pfn, end_pfn;
......@@ -2458,8 +2458,8 @@ static void vtd_dump_p2m_table(struct domain *d)
return;
hd = domain_hvm_iommu(d);
printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
}
const struct iommu_ops intel_iommu_ops = {
......
......@@ -110,6 +110,31 @@ void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d)
panic("Presently, iommu must be enabled for PVH hardware domain\n");
}
int arch_iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
spin_lock_init(&hd->arch.mapping_lock);
INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
return 0;
}
void arch_iommu_domain_destroy(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *tmp;
struct g2m_ioport *ioport;
list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&