Commit 548a932a authored by Andrew Cooper's avatar Andrew Cooper
Browse files

x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers


Three synthetic features are introduced, as we need individual control of
each, depending on circumstances.  A later change will enable them at
appropriate points.

The verw_sel field doesn't strictly need to live in struct cpu_info.  It lives
there because there is a convenient hole it can fill, and it reduces the
complexity of the SPEC_CTRL_EXIT_TO_{PV,HVM} assembly by avoiding the need for
any temporary stack maintenance.

This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
Signed-off-by: default avatarAndrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
parent d4f6116c
......@@ -110,6 +110,7 @@ void __dummy__(void)
BLANK();
OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
......
......@@ -32,3 +32,6 @@ XEN_CPUFEATURE(SC_RSB_PV, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for
XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for HVM */
XEN_CPUFEATURE(SC_MSR_IDLE, (FSCAPINTS+0)*32+21) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
XEN_CPUFEATURE(XEN_LBR, (FSCAPINTS+0)*32+22) /* Xen uses MSR_DEBUGCTL.LBR */
XEN_CPUFEATURE(SC_VERW_PV, (FSCAPINTS+0)*32+23) /* VERW used by Xen for PV */
XEN_CPUFEATURE(SC_VERW_HVM, (FSCAPINTS+0)*32+24) /* VERW used by Xen for HVM */
XEN_CPUFEATURE(SC_VERW_IDLE, (FSCAPINTS+0)*32+25) /* VERW used by Xen for idle */
......@@ -38,6 +38,7 @@ struct vcpu;
struct cpu_info {
struct cpu_user_regs guest_cpu_user_regs;
unsigned int processor_id;
unsigned int verw_sel;
struct vcpu *current_vcpu;
unsigned long per_cpu_offset;
unsigned long cr4;
......
......@@ -61,6 +61,13 @@ static inline void init_shadow_spec_ctrl_state(void)
info->shadow_spec_ctrl = 0;
info->xen_spec_ctrl = default_xen_spec_ctrl;
info->spec_ctrl_flags = default_spec_ctrl_flags;
/*
* For least latency, the VERW selector should be a writeable data
* descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache
* line with __HYPERVISOR_CS, so is expected to be very cache-hot.
*/
info->verw_sel = __HYPERVISOR_DS32;
}
/* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
......@@ -81,6 +88,22 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
barrier();
/*
* Microarchitectural Store Buffer Data Sampling:
*
* On vulnerable systems, store buffer entries are statically partitioned
* between active threads. When entering idle, our store buffer entries
* are re-partitioned to allow the other threads to use them.
*
* Flush the buffers to ensure that no sensitive data of ours can be
* leaked by a sibling after it gets our store buffer entries.
*
* Note: VERW must be encoded with a memory operand, as it is only that
* form which causes a flush.
*/
alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,
[sel] "m" (info->verw_sel));
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
......@@ -99,6 +122,17 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
barrier();
/*
* Microarchitectural Store Buffer Data Sampling:
*
* On vulnerable systems, store buffer entries are statically partitioned
* between active threads. When exiting idle, the other threads store
* buffer entries are re-partitioned to give us some.
*
* We now have store buffer entries with stale data from sibling threads.
* A flush if necessary will be performed on the return to guest path.
*/
}
#endif /* __ASSEMBLY__ */
......
......@@ -241,12 +241,16 @@
/* Use when exiting to PV guest context. */
#define SPEC_CTRL_EXIT_TO_PV \
ALTERNATIVE "", \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \
ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \
X86_FEATURE_SC_VERW_PV
/* Use when exiting to HVM guest context. */
#define SPEC_CTRL_EXIT_TO_HVM \
ALTERNATIVE "", \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \
ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \
X86_FEATURE_SC_VERW_HVM
/*
* Use in IST interrupt/exception context. May interrupt Xen or PV context.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment