Commit 66a9274c authored by Roger Pau Monné's avatar Roger Pau Monné Committed by Jan Beulich

iommu: make iommu_inclusive_mapping a suboption of dom0-iommu

Introduce a new dom0-iommu=map-inclusive generic option that
supersedes iommu_inclusive_mapping. The previous behavior is preserved
and the option should only be enabled by default on Intel hardware.
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reviewed-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Acked-by: default avatarJulien Grall <julien.grall@arm.com>
Acked-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
parent 391266f0
......@@ -682,7 +682,7 @@ Flag that makes a dom0 use shadow paging. Only works when "pvh" is
enabled.
### dom0-iommu
> `= List of [ passthrough | strict ]`
> `= List of [ passthrough | strict | map-inclusive ]`
This list of booleans controls the iommu usage by Dom0:
......@@ -696,6 +696,14 @@ This list of booleans controls the iommu usage by Dom0:
`true` for a PVH Dom0 and any attempt to overwrite it from the command line
is ignored.
* `map-inclusive`: sets up DMA remapping for all the non-RAM regions below 4GB
except for unusable ranges. Use this to work around firmware issues providing
incorrect RMRR/IVMD entries. Rather than only mapping RAM pages for IOMMU
accesses for Dom0, with this option all pages up to 4GB, not marked as
unusable in the E820 table, will get a mapping established. Note that this
option is only applicable to a PV Dom0 and is enabled by default on Intel
hardware.
### dom0\_ioports\_disable (x86)
> `= List of <hex>-<hex>`
......@@ -1233,6 +1241,9 @@ wait descriptor timed out', try increasing this value.
### iommu\_inclusive\_mapping (VT-d)
> `= <boolean>`
**WARNING: This command line option is deprecated, and superseded by
_dom0-iommu=map-inclusive_ - using both options in combination is undefined.**
> Default: `true`
Use this to work around firmware issues providing incorrect RMRR entries.
......
......@@ -253,6 +253,10 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
unsigned long i;
const struct amd_iommu *iommu;
/* Inclusive IOMMU mappings are disabled by default on AMD hardware. */
if ( iommu_hwdom_inclusive == -1 )
iommu_hwdom_inclusive = 0;
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
......
......@@ -73,3 +73,7 @@ int arch_iommu_populate_page_table(struct domain *d)
/* The IOMMU shares the p2m with the CPU */
return -ENOSYS;
}
void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
{
}
......@@ -2727,6 +2727,11 @@ static int arm_smmu_iommu_domain_init(struct domain *d)
static void __hwdom_init arm_smmu_iommu_hwdom_init(struct domain *d)
{
/* Set to false options not supported on ARM. */
if ( iommu_hwdom_inclusive == 1 )
printk(XENLOG_WARNING
"map-inclusive dom0-iommu option is not supported on ARM\n");
iommu_hwdom_inclusive = 0;
}
static void arm_smmu_iommu_domain_teardown(struct domain *d)
......
......@@ -61,6 +61,7 @@ bool_t __read_mostly iommu_intremap = 1;
bool __hwdom_initdata iommu_hwdom_strict;
bool __read_mostly iommu_hwdom_passthrough;
int8_t __hwdom_initdata iommu_hwdom_inclusive = -1;
/*
* In the current implementation of VT-d posted interrupts, in some extreme
......@@ -152,6 +153,8 @@ static int __init parse_dom0_iommu_param(const char *s)
iommu_hwdom_passthrough = val;
else if ( (val = parse_boolean("strict", s, ss)) >= 0 )
iommu_hwdom_strict = val;
else if ( (val = parse_boolean("map-inclusive", s, ss)) >= 0 )
iommu_hwdom_inclusive = val;
else
rc = -EINVAL;
......@@ -232,6 +235,16 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
}
hd->platform_ops->hwdom_init(d);
ASSERT(iommu_hwdom_inclusive != -1);
if ( iommu_hwdom_inclusive && !is_pv_domain(d) )
{
printk(XENLOG_WARNING
"IOMMU inclusive mappings are only supported on PV Dom0\n");
iommu_hwdom_inclusive = 0;
}
arch_iommu_hwdom_init(d);
}
void iommu_teardown(struct domain *d)
......
......@@ -99,6 +99,4 @@ void pci_vtd_quirk(const struct pci_dev *);
bool_t platform_supports_intremap(void);
bool_t platform_supports_x2apic(void);
void vtd_set_hwdom_mapping(struct domain *d);
#endif // _VTD_EXTERN_H_
......@@ -1304,11 +1304,9 @@ static void __hwdom_init intel_iommu_hwdom_init(struct domain *d)
{
struct acpi_drhd_unit *drhd;
if ( !iommu_hwdom_passthrough && is_pv_domain(d) )
{
/* Set up 1:1 page table for hardware domain. */
vtd_set_hwdom_mapping(d);
}
/* Inclusive mappings are enabled by default on Intel hardware for PV. */
if ( iommu_hwdom_inclusive == -1 )
iommu_hwdom_inclusive = is_pv_domain(d);
setup_hwdom_pci_devices(d, setup_hwdom_device);
setup_hwdom_rmrr(d);
......
......@@ -25,7 +25,6 @@
#include <xen/irq.h>
#include <xen/numa.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include "../iommu.h"
#include "../dmar.h"
#include "../vtd.h"
......@@ -35,8 +34,7 @@
* iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
* 1:1 iommu mappings except xen and unusable regions.
*/
static bool_t __hwdom_initdata iommu_inclusive_mapping = 1;
boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
boolean_param("iommu_inclusive_mapping", iommu_hwdom_inclusive);
void *map_vtd_domain_page(u64 maddr)
{
......@@ -63,57 +61,3 @@ void flush_all_cache()
wbinvd();
}
void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
{
unsigned long i, top, max_pfn;
BUG_ON(!is_hardware_domain(d));
max_pfn = (GB(4) >> PAGE_SHIFT) - 1;
top = max(max_pdx, pfn_to_pdx(max_pfn) + 1);
for ( i = 0; i < top; i++ )
{
unsigned long pfn = pdx_to_pfn(i);
bool map;
int rc;
/*
* Set up 1:1 mapping for dom0. Default to include only
* conventional RAM areas and let RMRRs include needed reserved
* regions. When set, the inclusive mapping additionally maps in
* every pfn up to 4GB except those that fall in unusable ranges.
*/
if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) )
continue;
if ( iommu_inclusive_mapping && pfn <= max_pfn )
map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE);
else
map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL);
if ( !map )
continue;
/* Exclude Xen bits */
if ( xen_in_range(pfn) )
continue;
/*
* If dom0-strict mode is enabled then exclude conventional RAM
* and let the common code map dom0's pages.
*/
if ( iommu_hwdom_strict &&
page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
if (!(i & 0xfffff))
process_pending_softirqs();
}
}
......@@ -20,6 +20,8 @@
#include <xen/softirq.h>
#include <xsm/xsm.h>
#include <asm/setup.h>
void iommu_update_ire_from_apic(
unsigned int apic, unsigned int reg, unsigned int value)
{
......@@ -132,6 +134,63 @@ void arch_iommu_domain_destroy(struct domain *d)
{
}
void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
{
unsigned long i, top, max_pfn;
BUG_ON(!is_hardware_domain(d));
if ( iommu_hwdom_passthrough || !is_pv_domain(d) )
return;
max_pfn = (GB(4) >> PAGE_SHIFT) - 1;
top = max(max_pdx, pfn_to_pdx(max_pfn) + 1);
for ( i = 0; i < top; i++ )
{
unsigned long pfn = pdx_to_pfn(i);
bool map;
int rc;
/*
* Set up 1:1 mapping for dom0. Default to include only
* conventional RAM areas and let RMRRs include needed reserved
* regions. When set, the inclusive mapping additionally maps in
* every pfn up to 4GB except those that fall in unusable ranges.
*/
if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) )
continue;
if ( iommu_hwdom_inclusive && pfn <= max_pfn )
map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE);
else
map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL);
if ( !map )
continue;
/* Exclude Xen bits */
if ( xen_in_range(pfn) )
continue;
/*
* If dom0-strict mode is enabled then exclude conventional RAM
* and let the common code map dom0's pages.
*/
if ( iommu_hwdom_strict &&
page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
if (!(i & 0xfffff))
process_pending_softirqs();
}
}
/*
* Local variables:
* mode: C
......
......@@ -37,6 +37,7 @@ extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
extern bool iommu_hwdom_strict, iommu_hwdom_passthrough;
extern int8_t iommu_hwdom_inclusive;
extern unsigned int iommu_dev_iotlb_timeout;
......@@ -51,6 +52,7 @@ void arch_iommu_domain_destroy(struct domain *d);
int arch_iommu_domain_init(struct domain *d);
int arch_iommu_populate_page_table(struct domain *d);
void arch_iommu_check_autotranslated_hwdom(struct domain *d);
void arch_iommu_hwdom_init(struct domain *d);
int iommu_construct(struct domain *d);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment