Commit 4ccbb9c3 authored by Jan Beulich's avatar Jan Beulich

IOMMU: iommu_intpost is x86/HVM-only

Provide a #define for all other cases.
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarPaul Durrant <paul@xen.org>
parent 5f62fdcb
......@@ -1309,6 +1309,8 @@ boolean (e.g. `iommu=no`) can override this and leave the IOMMUs disabled.
This option depends on `intremap`, and is disabled by default due to some
corner cases in the implementation which have yet to be resolved.
This option is only valid on x86, and only builds of Xen with HVM support.
* The `crash-disable` boolean controls disabling IOMMU functionality (DMAR/IR/QI)
before switching to a crash kernel. This option is inactive by default and
is for compatibility with older kdump kernels only. Modern kernels copy
......
......@@ -43,14 +43,6 @@ bool __read_mostly iommu_hwdom_passthrough;
bool __hwdom_initdata iommu_hwdom_inclusive;
int8_t __hwdom_initdata iommu_hwdom_reserved = -1;
/*
* In the current implementation of VT-d posted interrupts, in some extreme
* cases, the per cpu list which saves the blocked vCPU will be very long,
* and this will affect the interrupt latency, so let this feature off by
* default until we find a good solution to resolve it.
*/
bool_t __read_mostly iommu_intpost;
#ifndef iommu_hap_pt_share
bool __read_mostly iommu_hap_pt_share = true;
#endif
......@@ -93,8 +85,10 @@ static int __init parse_iommu_param(const char *s)
else if ( (val = parse_boolean("intremap", s, ss)) >= 0 )
iommu_intremap = val ? iommu_intremap_full : iommu_intremap_off;
#endif
#ifndef iommu_intpost
else if ( (val = parse_boolean("intpost", s, ss)) >= 0 )
iommu_intpost = val;
#endif
#ifdef CONFIG_KEXEC
else if ( (val = parse_boolean("crash-disable", s, ss)) >= 0 )
iommu_crash_disable = val;
......@@ -486,8 +480,10 @@ int __init iommu_setup(void)
panic("Couldn't enable %s and iommu=required/force\n",
!iommu_enabled ? "IOMMU" : "Interrupt Remapping");
#ifndef iommu_intpost
if ( !iommu_intremap )
iommu_intpost = 0;
iommu_intpost = false;
#endif
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
if ( !iommu_enabled )
......@@ -563,10 +559,13 @@ void iommu_crash_shutdown(void)
if ( iommu_enabled )
iommu_get_ops()->crash_shutdown();
iommu_enabled = iommu_intpost = 0;
iommu_enabled = false;
#ifndef iommu_intremap
iommu_intremap = iommu_intremap_off;
#endif
#ifndef iommu_intpost
iommu_intpost = false;
#endif
}
int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
......
......@@ -2297,13 +2297,15 @@ static int __init vtd_setup(void)
if ( iommu_intremap && !ecap_intr_remap(iommu->ecap) )
iommu_intremap = iommu_intremap_off;
#ifndef iommu_intpost
/*
* We cannot use posted interrupt if X86_FEATURE_CX16 is
* not supported, since we count on this feature to
* atomically update 16-byte IRTE in posted format.
*/
if ( !cap_intr_post(iommu->cap) || !iommu_intremap || !cpu_has_cx16 )
iommu_intpost = 0;
iommu_intpost = false;
#endif
if ( !vtd_ept_page_compatible(iommu) )
clear_iommu_hap_pt_share();
......@@ -2330,7 +2332,9 @@ static int __init vtd_setup(void)
P(iommu_hwdom_passthrough, "Dom0 DMA Passthrough");
P(iommu_qinval, "Queued Invalidation");
P(iommu_intremap, "Interrupt Remapping");
#ifndef iommu_intpost
P(iommu_intpost, "Posted Interrupt");
#endif
P(iommu_hap_pt_share, "Shared EPT tables");
#undef P
......@@ -2348,7 +2352,9 @@ static int __init vtd_setup(void)
iommu_hwdom_passthrough = false;
iommu_qinval = 0;
iommu_intremap = iommu_intremap_off;
iommu_intpost = 0;
#ifndef iommu_intpost
iommu_intpost = false;
#endif
return ret;
}
......
......@@ -29,6 +29,16 @@ struct iommu_ops __read_mostly iommu_ops;
enum iommu_intremap __read_mostly iommu_intremap = iommu_intremap_full;
#ifndef iommu_intpost
/*
* In the current implementation of VT-d posted interrupts, in some extreme
* cases, the per cpu list which saves the blocked vCPU will be very long,
* and this will affect the interrupt latency, so let this feature off by
* default until we find a good solution to resolve it.
*/
bool __read_mostly iommu_intpost;
#endif
int __init iommu_hardware_setup(void)
{
struct IO_APIC_route_entry **ioapic_entries = NULL;
......
......@@ -54,7 +54,7 @@ static inline bool_t dfn_eq(dfn_t x, dfn_t y)
extern bool_t iommu_enable, iommu_enabled;
extern bool force_iommu, iommu_quarantine, iommu_verbose, iommu_igfx;
extern bool_t iommu_snoop, iommu_qinval, iommu_intpost;
extern bool_t iommu_snoop, iommu_qinval;
#ifdef CONFIG_X86
extern enum __packed iommu_intremap {
......@@ -74,6 +74,12 @@ extern enum __packed iommu_intremap {
# define iommu_intremap false
#endif
#if defined(CONFIG_X86) && defined(CONFIG_HVM)
extern bool iommu_intpost;
#else
# define iommu_intpost false
#endif
#if defined(CONFIG_IOMMU_FORCE_PT_SHARE)
#define iommu_hap_pt_share true
#elif defined(CONFIG_HVM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment