Commit cf44ba98 authored by Roger Pau Monné's avatar Roger Pau Monné Committed by Jan Beulich

iommu: rename iommu_dom0_strict and iommu_passthrough

To iommu_hwdom_strict and iommu_hwdom_passthrough which is more
descriptive of their usage. Also change their type from bool_t to
bool.

No functional change.
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarWei Liu <wei.liu2@citrix.com>
Acked-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
parent 1dfb8e6e
......@@ -1426,7 +1426,8 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( ret )
goto destroy_m2p;
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
if ( iommu_enabled && !iommu_hwdom_passthrough &&
!need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
......
......@@ -1062,7 +1062,7 @@ static void __init amd_iommu_init_cleanup(void)
radix_tree_destroy(&ivrs_maps, xfree);
iommu_enabled = 0;
iommu_passthrough = 0;
iommu_hwdom_passthrough = false;
iommu_intremap = 0;
iommuv2_enabled = 0;
}
......
......@@ -121,7 +121,7 @@ static void amd_iommu_setup_domain_device(
BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
!iommu->dev_table.buffer );
if ( iommu_passthrough && is_hardware_domain(domain) )
if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
valid = 0;
if ( ats_enabled )
......@@ -256,7 +256,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
if ( !iommu_passthrough && !need_iommu(d) )
if ( !iommu_hwdom_passthrough && !need_iommu(d) )
{
int rc = 0;
......
......@@ -52,15 +52,16 @@ custom_param("iommu", parse_iommu_param);
bool_t __initdata iommu_enable = 1;
bool_t __read_mostly iommu_enabled;
bool_t __read_mostly force_iommu;
bool_t __hwdom_initdata iommu_dom0_strict;
bool_t __read_mostly iommu_verbose;
bool_t __read_mostly iommu_workaround_bios_bug;
bool_t __read_mostly iommu_igfx = 1;
bool_t __read_mostly iommu_passthrough;
bool_t __read_mostly iommu_snoop = 1;
bool_t __read_mostly iommu_qinval = 1;
bool_t __read_mostly iommu_intremap = 1;
bool __hwdom_initdata iommu_hwdom_strict;
bool __read_mostly iommu_hwdom_passthrough;
/*
* In the current implementation of VT-d posted interrupts, in some extreme
* cases, the per cpu list which saves the blocked vCPU will be very long,
......@@ -121,9 +122,9 @@ static int __init parse_iommu_param(const char *s)
else if ( !strncmp(s, "amd-iommu-perdev-intremap", ss - s) )
amd_iommu_perdev_intremap = val;
else if ( !strncmp(s, "dom0-passthrough", ss - s) )
iommu_passthrough = val;
iommu_hwdom_passthrough = val;
else if ( !strncmp(s, "dom0-strict", ss - s) )
iommu_dom0_strict = val;
iommu_hwdom_strict = val;
else if ( !strncmp(s, "sharept", ss - s) )
iommu_hap_pt_share = val;
else
......@@ -158,10 +159,10 @@ static void __hwdom_init check_hwdom_reqs(struct domain *d)
arch_iommu_check_autotranslated_hwdom(d);
if ( iommu_passthrough )
if ( iommu_hwdom_passthrough )
panic("Dom0 uses paging translated mode, dom0-passthrough must not be enabled\n");
iommu_dom0_strict = 1;
iommu_hwdom_strict = true;
}
void __hwdom_init iommu_hwdom_init(struct domain *d)
......@@ -174,7 +175,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
return;
register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
d->need_iommu = !!iommu_dom0_strict;
d->need_iommu = iommu_hwdom_strict;
if ( need_iommu(d) && !iommu_use_hap_pt(d) )
{
struct page_info *page;
......@@ -370,8 +371,8 @@ int __init iommu_setup(void)
int rc = -ENODEV;
bool_t force_intremap = force_iommu && iommu_intremap;
if ( iommu_dom0_strict )
iommu_passthrough = 0;
if ( iommu_hwdom_strict )
iommu_hwdom_passthrough = false;
if ( iommu_enable )
{
......@@ -392,15 +393,15 @@ int __init iommu_setup(void)
if ( !iommu_enabled )
{
iommu_snoop = 0;
iommu_passthrough = 0;
iommu_dom0_strict = 0;
iommu_hwdom_passthrough = false;
iommu_hwdom_strict = false;
}
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
if ( iommu_enabled )
{
printk(" - Dom0 mode: %s\n",
iommu_passthrough ? "Passthrough" :
iommu_dom0_strict ? "Strict" : "Relaxed");
iommu_hwdom_passthrough ? "Passthrough" :
iommu_hwdom_strict ? "Strict" : "Relaxed");
printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis");
tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0);
}
......
......@@ -1304,7 +1304,7 @@ static void __hwdom_init intel_iommu_hwdom_init(struct domain *d)
{
struct acpi_drhd_unit *drhd;
if ( !iommu_passthrough && is_pv_domain(d) )
if ( !iommu_hwdom_passthrough && is_pv_domain(d) )
{
/* Set up 1:1 page table for hardware domain. */
vtd_set_hwdom_mapping(d);
......@@ -1391,7 +1391,7 @@ int domain_context_mapping_one(
return res;
}
if ( iommu_passthrough && is_hardware_domain(domain) )
if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
{
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
agaw = level_to_agaw(iommu->nr_pt_levels);
......@@ -1781,7 +1781,7 @@ static int __must_check intel_iommu_map_page(struct domain *d,
return 0;
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_passthrough && is_hardware_domain(d) )
if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
spin_lock(&hd->arch.mapping_lock);
......@@ -1826,7 +1826,7 @@ static int __must_check intel_iommu_unmap_page(struct domain *d,
unsigned long gfn)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_passthrough && is_hardware_domain(d) )
if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
......@@ -2269,8 +2269,8 @@ int __init intel_vtd_setup(void)
if ( iommu_snoop && !ecap_snp_ctl(iommu->ecap) )
iommu_snoop = 0;
if ( iommu_passthrough && !ecap_pass_thru(iommu->ecap) )
iommu_passthrough = 0;
if ( iommu_hwdom_passthrough && !ecap_pass_thru(iommu->ecap) )
iommu_hwdom_passthrough = false;
if ( iommu_qinval && !ecap_queued_inval(iommu->ecap) )
iommu_qinval = 0;
......@@ -2308,7 +2308,7 @@ int __init intel_vtd_setup(void)
#define P(p,s) printk("Intel VT-d %s %senabled.\n", s, (p)? "" : "not ")
P(iommu_snoop, "Snoop Control");
P(iommu_passthrough, "Dom0 DMA Passthrough");
P(iommu_hwdom_passthrough, "Dom0 DMA Passthrough");
P(iommu_qinval, "Queued Invalidation");
P(iommu_intremap, "Interrupt Remapping");
P(iommu_intpost, "Posted Interrupt");
......@@ -2330,7 +2330,7 @@ int __init intel_vtd_setup(void)
error:
iommu_enabled = 0;
iommu_snoop = 0;
iommu_passthrough = 0;
iommu_hwdom_passthrough = false;
iommu_qinval = 0;
iommu_intremap = 0;
iommu_intpost = 0;
......
......@@ -103,7 +103,7 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
* If dom0-strict mode is enabled then exclude conventional RAM
* and let the common code map dom0's pages.
*/
if ( iommu_dom0_strict &&
if ( iommu_hwdom_strict &&
page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
......
......@@ -29,13 +29,15 @@
#include <asm/iommu.h>
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_dom0_strict, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx, iommu_passthrough;
extern bool_t force_iommu, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx;
extern bool_t iommu_snoop, iommu_qinval, iommu_intremap, iommu_intpost;
extern bool_t iommu_hap_pt_share;
extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
extern bool iommu_hwdom_strict, iommu_hwdom_passthrough;
extern unsigned int iommu_dev_iotlb_timeout;
int iommu_setup(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment