untrusted comment: verify with openbsd-64-base.pub
RWQq6XmS4eDAcbbz7TR4/6ehlfYTKckJ+j0NXXnCMXLE0xcP1oJ1LPWB4Nk8K0pnFO2ss1Iy14Dg0IKkYr6+kI7OBewyIdE88Q4=

OpenBSD 6.4 errata 018, May 29, 2019:

Intel CPUs have a cross privilege side-channel attack (MDS).

Apply by doing:
    signify -Vep /etc/signify/openbsd-64-base.pub -x 018_mds.patch.sig \
        -m - | (cd /usr/src && patch -p0)

And then rebuild and install a new kernel:
    fw_update
    KK=`sysctl -n kern.osversion | cut -d# -f1`
    cd /usr/src/sys/arch/`machine`/compile/$KK
    make obj
    make config
    make
    make install

Index: sys/arch/amd64/amd64/cpu.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/cpu.c,v
retrieving revision 1.129
diff -u -p -r1.129 cpu.c
--- sys/arch/amd64/amd64/cpu.c	4 Oct 2018 05:00:40 -0000	1.129
+++ sys/arch/amd64/amd64/cpu.c	26 May 2019 02:13:14 -0000
@@ -141,6 +141,7 @@ struct cpu_softc {
 
 void	replacesmap(void);
 void	replacemeltdown(void);
+void	replacemds(void);
 
 extern long _stac;
 extern long _clac;
@@ -185,6 +186,129 @@ replacemeltdown(void)
 	splx(s);
 }
 
+void
+replacemds(void)
+{
+	static int replacedone = 0;
+	extern long mds_handler_bdw, mds_handler_ivb, mds_handler_skl;
+	extern long mds_handler_skl_sse, mds_handler_skl_avx;
+	extern long mds_handler_silvermont, mds_handler_knights;
+	struct cpu_info *ci = &cpu_info_primary;
+	CPU_INFO_ITERATOR cii;
+	void *handler = NULL, *vmm_handler = NULL;
+	const char *type;
+	int has_verw, s;
+
+	/* ci_mds_tmp must be 32byte aligned for AVX instructions */
+	CTASSERT((offsetof(struct cpu_info, ci_mds_tmp) -
+		  offsetof(struct cpu_info, ci_PAGEALIGN)) % 32 == 0);
+
+	if (replacedone)
+		return;
+	replacedone = 1;
+
+	if (strcmp(cpu_vendor, "GenuineIntel") != 0 ||
+	    ((ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP) &&
+	     (rdmsr(MSR_ARCH_CAPABILITIES) & ARCH_CAPABILITIES_MDS_NO))) {
+		/* Unaffected, nop out the handling code */
+		has_verw = 0;
+	} else if (ci->ci_feature_sefflags_edx & SEFF0EDX_MD_CLEAR) {
+		/* new firmware, use VERW */
+		has_verw = 1;
+	} else {
+		int family = ci->ci_family;
+		int model = ci->ci_model;
+		int stepping = CPUID2STEPPING(ci->ci_signature);
+
+		has_verw = 0;
+		if (family == 0x6 &&
+		    (model == 0x2e || model == 0x1e || model == 0x1f ||
+		     model == 0x1a || model == 0x2f || model == 0x25 ||
+		     model == 0x2c || model == 0x2d || model == 0x2a ||
+		     model == 0x3e || model == 0x3a)) {
+			/* Nehalem, SandyBridge, IvyBridge */
+			handler = vmm_handler = &mds_handler_ivb;
+			type = "IvyBridge";
+			CPU_INFO_FOREACH(cii, ci) {
+				ci->ci_mds_buf = malloc(672, M_DEVBUF,
+				    M_WAITOK);
+				memset(ci->ci_mds_buf, 0, 16);
+			}
+		} else if (family == 0x6 &&
+		    (model == 0x3f || model == 0x3c || model == 0x45 ||
+		     model == 0x46 || model == 0x56 || model == 0x4f ||
+		     model == 0x47 || model == 0x3d)) {
+			/* Haswell and Broadwell */
+			handler = vmm_handler = &mds_handler_bdw;
+			type = "Broadwell";
+			CPU_INFO_FOREACH(cii, ci) {
+				ci->ci_mds_buf = malloc(1536, M_DEVBUF,
+				    M_WAITOK);
+			}
+		} else if (family == 0x6 &&
+		    ((model == 0x55 && stepping <= 5) || model == 0x4e ||
+		    model == 0x5e || (model == 0x8e && stepping <= 0xb) ||
+		    (model == 0x9e && stepping <= 0xc))) {
+			/*
+			 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
+			 * CascadeLake
+			 */
+			/* XXX mds_handler_skl_avx512 */
+			if (xgetbv(0) & XCR0_AVX) {
+				handler = &mds_handler_skl_avx;
+				type = "Skylake AVX";
+			} else {
+				handler = &mds_handler_skl_sse;
+				type = "Skylake SSE";
+			}
+			vmm_handler = &mds_handler_skl;
+			CPU_INFO_FOREACH(cii, ci) {
+				vaddr_t b64;
+				b64 = (vaddr_t)malloc(6 * 1024 + 64 + 63,
+				    M_DEVBUF, M_WAITOK);
+				ci->ci_mds_buf = (void *)((b64 + 63) & ~63);
+				memset(ci->ci_mds_buf, 0, 64);
+			}
+		} else if (family == 0x6 &&
+		    (model == 0x37 || model == 0x4a || model == 0x4c ||
+		     model == 0x4d || model == 0x5a || model == 0x5d ||
+		     model == 0x6e || model == 0x65 || model == 0x75)) {
+			/* Silvermont, Airmont */
+			handler = vmm_handler = &mds_handler_silvermont;
+			type = "Silvermont";
+			CPU_INFO_FOREACH(cii, ci) {
+				ci->ci_mds_buf = malloc(256, M_DEVBUF,
+				    M_WAITOK);
+				memset(ci->ci_mds_buf, 0, 16);
+			}
+		} else if (family == 0x6 && (model == 0x85 || model == 0x57)) {
+			handler = vmm_handler = &mds_handler_knights;
+			type = "KnightsLanding";
+			CPU_INFO_FOREACH(cii, ci) {
+				vaddr_t b64;
+				b64 = (vaddr_t)malloc(1152 + 63, M_DEVBUF,
+				    M_WAITOK);
+				ci->ci_mds_buf = (void *)((b64 + 63) & ~63);
+			}
+		}
+	}
+
+	if (handler != NULL) {
+		printf("cpu0: using %s MDS workaround\n", type);
+		s = splhigh();
+		codepatch_call(CPTAG_MDS, handler);
+		codepatch_call(CPTAG_MDS_VMM, vmm_handler);
+		splx(s);
+	} else if (has_verw)
+		printf("cpu0: using %s MDS workaround\n", "VERW");
+	else {
+		s = splhigh();
+		codepatch_nop(CPTAG_MDS);
+		codepatch_nop(CPTAG_MDS_VMM);
+		splx(s);
+	}
+}
+
 #ifdef MULTIPROCESSOR
 int mp_cpu_start(struct cpu_info *);
 void mp_cpu_start_cleanup(struct cpu_info *);
@@ -910,6 +1034,9 @@ extern vector Xsyscall_meltdown, Xsyscal
 void
 cpu_init_msrs(struct cpu_info *ci)
 {
+	uint64_t msr;
+	int family;
+
 	wrmsr(MSR_STAR,
 	    ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
 	    ((uint64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48));
@@ -921,6 +1048,16 @@ cpu_init_msrs(struct cpu_info *ci)
 	wrmsr(MSR_FSBASE, 0);
 	wrmsr(MSR_GSBASE, (u_int64_t)ci);
 	wrmsr(MSR_KERNELGSBASE, 0);
+
+	family = ci->ci_family;
+	if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+	    (family > 6 || (family == 6 && ci->ci_model >= 0xd)) &&
+	    rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
+	    (msr & MISC_ENABLE_FAST_STRINGS) == 0) {
+		msr |= MISC_ENABLE_FAST_STRINGS;
+		wrmsr(MSR_MISC_ENABLE, msr);
+		DPRINTF("%s: enabled fast strings\n", ci->ci_dev->dv_xname);
+	}
 }
 
 void
Index: sys/arch/amd64/amd64/genassym.cf
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/genassym.cf,v
retrieving revision 1.39
diff -u -p -r1.39 genassym.cf
--- sys/arch/amd64/amd64/genassym.cf	4 Oct 2018 05:00:40 -0000	1.39
+++ sys/arch/amd64/amd64/genassym.cf	26 May 2019 02:13:14 -0000
@@ -129,6 +129,8 @@ member	CPU_INFO_KERN_CR3	ci_kern_cr3
 member	CPU_INFO_USER_CR3	ci_user_cr3
 member	CPU_INFO_KERN_RSP	ci_kern_rsp
 member	CPU_INFO_INTR_RSP	ci_intr_rsp
+member	CPU_INFO_MDS_BUF	ci_mds_buf
+member	CPU_INFO_MDS_TMP	ci_mds_tmp
 
 export	CPUF_USERSEGS
 export	CPUF_USERXSTATE
Index: sys/arch/amd64/amd64/identcpu.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/identcpu.c,v
retrieving revision 1.109
diff -u -p -r1.109 identcpu.c
--- sys/arch/amd64/amd64/identcpu.c	4 Oct 2018 05:00:40 -0000	1.109
+++ sys/arch/amd64/amd64/identcpu.c	26 May 2019 02:13:14 -0000
@@ -207,6 +207,8 @@ const struct {
 }, cpu_seff0_edxfeatures[] = {
 	{ SEFF0EDX_AVX512_4FNNIW, "AVX512FNNIW" },
 	{ SEFF0EDX_AVX512_4FMAPS, "AVX512FMAPS" },
+	{ SEFF0EDX_MD_CLEAR,	"MD_CLEAR" },
+	{ SEFF0EDX_TSXFA,	"TSXFA" },
 	{ SEFF0EDX_IBRS,	"IBRS,IBPB" },
 	{ SEFF0EDX_STIBP,	"STIBP" },
 	{ SEFF0EDX_L1DF,	"L1DF" },
Index: sys/arch/amd64/amd64/locore.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/locore.S,v
retrieving revision 1.111
diff -u -p -r1.111 locore.S
--- sys/arch/amd64/amd64/locore.S	7 Oct 2018 22:43:06 -0000	1.111
+++ sys/arch/amd64/amd64/locore.S	26 May 2019 02:13:14 -0000
@@ -662,8 +662,6 @@ IDTVEC_NOALIGN(syscall)
 .Lsyscall_restore_registers:
 	RET_STACK_REFILL_WITH_RCX
 
-	movq	TF_RDI(%rsp),%rdi
-	movq	TF_RSI(%rsp),%rsi
 	movq	TF_R8(%rsp),%r8
 	movq	TF_R9(%rsp),%r9
 	movq	TF_R10(%rsp),%r10
@@ -671,6 +669,14 @@ IDTVEC_NOALIGN(syscall)
 	movq	TF_R13(%rsp),%r13
 	movq	TF_R14(%rsp),%r14
 	movq	TF_R15(%rsp),%r15
+
+	CODEPATCH_START
+	movw	%ds,TF_R8(%rsp)
+	verw	TF_R8(%rsp)
+	CODEPATCH_END(CPTAG_MDS)
+
+	movq	TF_RDI(%rsp),%rdi
+	movq	TF_RSI(%rsp),%rsi
 	movq	TF_RBP(%rsp),%rbp
 	movq	TF_RBX(%rsp),%rbx
 
@@ -829,8 +835,6 @@ intr_user_exit_post_ast:
 .Lintr_restore_registers:
 	RET_STACK_REFILL_WITH_RCX
 
-	movq	TF_RDI(%rsp),%rdi
-	movq	TF_RSI(%rsp),%rsi
 	movq	TF_R8(%rsp),%r8
 	movq	TF_R9(%rsp),%r9
 	movq	TF_R10(%rsp),%r10
@@ -838,6 +842,14 @@ intr_user_exit_post_ast:
 	movq	TF_R13(%rsp),%r13
 	movq	TF_R14(%rsp),%r14
 	movq	TF_R15(%rsp),%r15
+
+	CODEPATCH_START
+	movw	%ds,TF_R8(%rsp)
+	verw	TF_R8(%rsp)
+	CODEPATCH_END(CPTAG_MDS)
+
+	movq	TF_RDI(%rsp),%rdi
+	movq	TF_RSI(%rsp),%rsi
 	movq	TF_RBP(%rsp),%rbp
 	movq	TF_RBX(%rsp),%rbx
 
@@ -1141,6 +1153,28 @@ ENTRY(pagezero)
 	jne     1b
 	sfence
 	RETGUARD_CHECK(pagezero, r11)
+	ret
+
+/* int rdmsr_safe(u_int msr, uint64_t *data) */
+ENTRY(rdmsr_safe)
+	RETGUARD_SETUP(rdmsr_safe, r10)
+
+	movl	%edi,	%ecx	/* u_int msr */
+	.globl	rdmsr_safe_fault
+rdmsr_safe_fault:
+	rdmsr
+	salq	$32, %rdx
+	movl	%eax, %eax
+	orq	%rdx, %rax
+	movq	%rax, (%rsi)	/* *data */
+	xorq	%rax, %rax
+
+	RETGUARD_CHECK(rdmsr_safe, r10)
+	ret
+
+NENTRY(rdmsr_resume)
+	movl	$0x1, %eax
+	RETGUARD_CHECK(rdmsr_safe, r10)
 	ret
 
 #if NXEN > 0
Index: sys/arch/amd64/amd64/mainbus.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/mainbus.c,v
retrieving revision 1.45
diff -u -p -r1.45 mainbus.c
--- sys/arch/amd64/amd64/mainbus.c	22 Sep 2018 17:41:52 -0000	1.45
+++ sys/arch/amd64/amd64/mainbus.c	26 May 2019 02:13:14 -0000
@@ -73,6 +73,8 @@
 #include <machine/efifbvar.h>
 #endif
 
+void	replacemds(void);
+
 int	mainbus_match(struct device *, void *, void *);
 void	mainbus_attach(struct device *, struct device *, void *);
 
@@ -204,6 +206,9 @@ mainbus_attach(struct device *parent, st
 
 		config_found(self, &caa, mainbus_print);
 	}
+
+	/* All CPUs are attached, handle MDS */
+	replacemds();
 
 #if NACPI > 0
 	if (!acpi_hasprocfvs)
Index: sys/arch/amd64/amd64/mds.S
===================================================================
RCS file: sys/arch/amd64/amd64/mds.S
diff -N sys/arch/amd64/amd64/mds.S
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ sys/arch/amd64/amd64/mds.S	26 May 2019 02:13:14 -0000
@@ -0,0 +1,192 @@
+/*	$OpenBSD$	*/
+/*
+ * Copyright (c) 2019 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * ASM sequences for mitigating MDS on different Intel CPU models, taken from
+ *	https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-microarchitectural-data-sampling
+ * and adjusted to fit OpenBSD style and kernel usage.
+ * Some naming inspired by FreeBSD's usage of these sequences.
+ */
+
+#include "assym.h"
+
+#include <machine/asm.h>
+#include <machine/specialreg.h>
+
+ENTRY(mds_handler_ivb)
+	RETGUARD_SETUP(mds_handler_ivb, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	movdqa	%xmm0,CPUVAR(MDS_TMP)
+	pxor	%xmm0,%xmm0
+
+	lfence
+	orpd	(%rax),%xmm0
+	orpd	(%rax),%xmm0
+	mfence
+	movl	$40,%ecx
+	addq	$16,%rax
+1:	movntdq	%xmm0,(%rax)
+	addq	$16,%rax
+	decl	%ecx
+	jnz	1b
+	mfence
+
+	movdqa	CPUVAR(MDS_TMP),%xmm0
+	RETGUARD_CHECK(mds_handler_ivb, r11)
+	retq
+END(mds_handler_ivb)
+
+ENTRY(mds_handler_bdw)
+	RETGUARD_SETUP(mds_handler_bdw, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	movdqa	%xmm0,CPUVAR(MDS_TMP)
+	pxor	%xmm0,%xmm0
+
+	movq	%rax,%rdi
+	movq	%rax,%rsi
+	movl	$40,%ecx
+1:	movntdq	%xmm0,(%rax)
+	addq	$16,%rax
+	decl	%ecx
+	jnz	1b
+	mfence
+	movl	$1536,%ecx
+	rep movsb
+	lfence
+
+	movdqa	CPUVAR(MDS_TMP),%xmm0
+	RETGUARD_CHECK(mds_handler_bdw, r11)
+	retq
+END(mds_handler_bdw)
+
+ENTRY(mds_handler_skl)
+	xorl	%ecx,%ecx
+	xgetbv
+	testb	$XCR0_AVX,%al
+	jne	mds_handler_skl_avx
+	jmp	mds_handler_skl_sse
+END(mds_handler_skl)
+
+ENTRY(mds_handler_skl_sse)
+	RETGUARD_SETUP(mds_handler_skl_sse, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	leaq	64(%rax),%rdi
+	movdqa	%xmm0,CPUVAR(MDS_TMP)
+	pxor	%xmm0,%xmm0
+
+	lfence
+	orpd	(%rax),%xmm0
+	orpd	(%rax),%xmm0
+	xorl	%eax,%eax
+1:	clflushopt	5376(%rdi,%rax,8)
+	addl	$8,%eax
+	cmpl	$8*12,%eax
+	jb	1b
+	sfence
+	movl	$6144,%ecx
+	xorl	%eax,%eax
+	rep stosb
+	mfence
+
+	movdqa	CPUVAR(MDS_TMP),%xmm0
+	RETGUARD_CHECK(mds_handler_skl_sse, r11)
+	retq
+END(mds_handler_skl_sse)
+
+ENTRY(mds_handler_skl_avx)
+	RETGUARD_SETUP(mds_handler_skl_avx, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	leaq	64(%rax),%rdi
+	vmovdqa	%ymm0,CPUVAR(MDS_TMP)
+	vpxor	%ymm0,%ymm0,%ymm0
+
+	lfence
+	vorpd	(%rax),%ymm0,%ymm0
+	vorpd	(%rax),%ymm0,%ymm0
+	xorl	%eax,%eax
+1:	clflushopt	5376(%rdi,%rax,8)
+	addl	$8,%eax
+	cmpl	$8*12,%eax
+	jb	1b
+	sfence
+	movl	$6144,%ecx
+	xorl	%eax,%eax
+	rep stosb
+	mfence
+
+	vmovdqa	CPUVAR(MDS_TMP),%ymm0
+	RETGUARD_CHECK(mds_handler_skl_avx, r11)
+	retq
+END(mds_handler_skl_avx)
+
+/* we don't support AVX512 yet */
+#if 0
+ENTRY(mds_handler_skl_avx512)
+	RETGUARD_SETUP(mds_handler_skl_avx512, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	leaq	64(%rax),%rdi
+	vmovdqa64	%zmm0,CPUVAR(MDS_TMP)
+	vpxor	%zmm0,%zmm0,%zmm0
+
+	lfence
+	vorpd	(%rax),%zmm0,%zmm0
+	vorpd	(%rax),%zmm0,%zmm0
+	xorl	%eax,%eax
+1:	clflushopt	5376(%rdi,%rax,8)
+	addl	$8,%eax
+	cmpl	$8*12,%eax
+	jb	1b
+	sfence
+	movl	$6144,%ecx
+	xorl	%eax,%eax
+	rep stosb
+	mfence
+
+	vmovdqa64	CPUVAR(MDS_TMP),%zmm0
+	RETGUARD_CHECK(mds_handler_skl_avx512, r11)
+	retq
+END(mds_handler_skl_avx512)
+#endif
+
+ENTRY(mds_handler_silvermont)
+	RETGUARD_SETUP(mds_handler_silvermont, r11)
+	movq	CPUVAR(MDS_BUF),%rax
+	movdqa	%xmm0,CPUVAR(MDS_TMP)
+	pxor	%xmm0,%xmm0
+
+	movl	$16,%ecx
+1:	movntdq	%xmm0,(%rax)
+	addq	$16,%rax
+	decl	%ecx
+	jnz	1b
+	mfence
+
+	movdqa	CPUVAR(MDS_TMP),%xmm0
+	RETGUARD_CHECK(mds_handler_silvermont, r11)
+	retq
+END(mds_handler_silvermont)
+
+ENTRY(mds_handler_knights)
+	RETGUARD_SETUP(mds_handler_knights, r11)
+	movq	CPUVAR(MDS_BUF),%rdi
+	xorl	%eax,%eax
+	movl	$16,%ecx
+	rep stosq
+	movl	$128,%ecx
+	rep stosq
+	mfence
+	RETGUARD_CHECK(mds_handler_knights, r11)
+END(mds_handler_knights)
Index: sys/arch/amd64/amd64/vector.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vector.S,v
retrieving revision 1.77
diff -u -p -r1.77 vector.S
--- sys/arch/amd64/amd64/vector.S	7 Oct 2018 22:43:06 -0000	1.77
+++ sys/arch/amd64/amd64/vector.S	26 May 2019 02:13:15 -0000
@@ -263,11 +263,12 @@ IDTVEC(trap0c)
 	TRAP(T_STKFLT)
 
 /*
- * The #GP (general protection fault) handler has a couple weird cases
+ * The #GP (general protection fault) handler has a few weird cases
  * to handle:
  *  - trapping in iretq to userspace and
  *  - trapping in xrstor in the kernel.
- * We detect both of these by examining the %rip in the iretq_frame.
+ *  - trapping when invalid MSRs are read in rdmsr_safe
+ * We detect these by examining the %rip in the iretq_frame.
  * Handling them is done by updating %rip in the iretq_frame to point
  * to a stub handler of some sort and then iretq'ing to it.  For the
  * iretq fault we resume in a stub which acts like we got a fresh #GP.
@@ -287,9 +288,17 @@ IDTVEC(trap0d)
 	leaq	xsetbv_fault(%rip),%rcx
 	cmpq	%rcx,%rdx
 	je	.Lhandle_xsetbv
+	leaq	rdmsr_safe_fault(%rip),%rcx
+	cmpq	%rcx,%rdx
+	je	.Lhandle_rdmsr_safe
 	popq	%rcx
 	popq	%rdx
 	TRAP(T_PROTFLT)
+
+.Lhandle_rdmsr_safe:
+	/* rdmsr faulted; just resume in rdmsr_resume */
+	leaq	rdmsr_resume(%rip),%rcx
+	jmp	1f
 
 .Lhandle_xrstor:
 	/* xrstor faulted; just resume in xrstor_resume */
Index: sys/arch/amd64/amd64/vmm.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vmm.c,v
retrieving revision 1.221.2.1
diff -u -p -r1.221.2.1 vmm.c
--- sys/arch/amd64/amd64/vmm.c	26 Mar 2019 19:50:56 -0000	1.221.2.1
+++ sys/arch/amd64/amd64/vmm.c	26 May 2019 02:13:15 -0000
@@ -6057,7 +6057,7 @@ vmm_handle_cpuid(struct vcpu *vcpu)
 			*rax = 0;	/* Highest subleaf supported */
 			*rbx = curcpu()->ci_feature_sefflags_ebx & VMM_SEFF0EBX_MASK;
 			*rcx = curcpu()->ci_feature_sefflags_ecx & VMM_SEFF0ECX_MASK;
-			*rdx = 0;
+			*rdx = curcpu()->ci_feature_sefflags_edx & VMM_SEFF0EDX_MASK;
 		} else {
 			/* Unsupported subleaf */
 			DPRINTF("%s: function 0x07 (SEFF) unsupported subleaf "
Index: sys/arch/amd64/amd64/vmm_support.S
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vmm_support.S,v
retrieving revision 1.14
diff -u -p -r1.14 vmm_support.S
--- sys/arch/amd64/amd64/vmm_support.S	18 Sep 2018 16:02:08 -0000	1.14
+++ sys/arch/amd64/amd64/vmm_support.S	26 May 2019 02:13:15 -0000
@@ -18,6 +18,7 @@
 #include "assym.h"
 #include <machine/param.h>
 #include <machine/asm.h>
+#include <machine/codepatch.h>
 #include <machine/psl.h>
 #include <machine/specialreg.h>
 
@@ -246,6 +247,19 @@ skip_init:
 	pushq	%rbp
 	pushq	%rbx
 	pushq	%rsi		/* Guest Regs Pointer */
+
+	/*
+	 * XXX this MDS mitigation and the L1TF mitigation are believed
+	 * XXX to overlap in some cases, but Intel hasn't provided the
+	 * XXX information yet to make the correct choices.
+	 */
+	CODEPATCH_START
+	subq	$8, %rsp
+	movw	%ds, (%rsp)
+	verw	(%rsp)
+	addq	$8, %rsp
+	CODEPATCH_END(CPTAG_MDS_VMM)
+	movq	(%rsp),%rsi	/* reload now that it's mucked with */
 
 	movq	$VMCS_HOST_IA32_RSP, %rdi
 	movq	%rsp, %rax
Index: sys/arch/amd64/conf/Makefile.amd64
===================================================================
RCS file: /cvs/src/sys/arch/amd64/conf/Makefile.amd64,v
retrieving revision 1.104
diff -u -p -r1.104 Makefile.amd64
--- sys/arch/amd64/conf/Makefile.amd64	12 Sep 2018 04:34:59 -0000	1.104
+++ sys/arch/amd64/conf/Makefile.amd64	26 May 2019 02:13:15 -0000
@@ -161,7 +161,7 @@ cleandir: clean
 depend obj:
 
 locore0.o: ${_machdir}/${_mach}/locore0.S assym.h
-mutex.o vector.o copy.o spl.o: assym.h
+mutex.o vector.o copy.o spl.o mds.o: assym.h
 mptramp.o acpi_wakecode.o vmm_support.o: assym.h
 
 hardlink-obsd:
Index: sys/arch/amd64/conf/files.amd64
===================================================================
RCS file: /cvs/src/sys/arch/amd64/conf/files.amd64,v
retrieving revision 1.100
diff -u -p -r1.100 files.amd64
--- sys/arch/amd64/conf/files.amd64	21 Aug 2018 18:06:12 -0000	1.100
+++ sys/arch/amd64/conf/files.amd64	26 May 2019 02:13:15 -0000
@@ -32,6 +32,7 @@ file	arch/amd64/amd64/cacheinfo.c
 file	arch/amd64/amd64/vector.S
 file	arch/amd64/amd64/copy.S
 file	arch/amd64/amd64/spl.S
+file	arch/amd64/amd64/mds.S
 
 file	arch/amd64/amd64/intr.c
 file	arch/amd64/amd64/bus_space.c
Index: sys/arch/amd64/include/codepatch.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/codepatch.h,v
retrieving revision 1.8
diff -u -p -r1.8 codepatch.h
--- sys/arch/amd64/include/codepatch.h	4 Oct 2018 05:00:40 -0000	1.8
+++ sys/arch/amd64/include/codepatch.h	26 May 2019 02:13:15 -0000
@@ -59,6 +59,8 @@ void codepatch_disable(void);
 #define CPTAG_XSAVE		5
 #define CPTAG_MELTDOWN_NOP	6
 #define CPTAG_PCID_SET_REUSE	7
+#define CPTAG_MDS		8
+#define CPTAG_MDS_VMM		9
 
 /*
  * As stac/clac SMAP instructions are 3 bytes, we want the fastest
Index: sys/arch/amd64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v
retrieving revision 1.127
diff -u -p -r1.127 cpu.h
--- sys/arch/amd64/include/cpu.h	21 Aug 2018 19:04:40 -0000	1.127
+++ sys/arch/amd64/include/cpu.h	26 May 2019 02:13:15 -0000
@@ -116,6 +116,10 @@ struct cpu_info {
 	u_int64_t ci_intr_rsp;	/* U<-->K trampoline stack */
 	u_int64_t ci_user_cr3;	/* U-K page table */
 
+	/* bits for mitigating Micro-architectural Data Sampling */
+	char		ci_mds_tmp[32];		/* 32byte aligned */
+	void		*ci_mds_buf;
+
 	struct pcb *ci_curpcb;
 	struct pcb *ci_idle_pcb;
 
Index: sys/arch/amd64/include/cpu_full.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/cpu_full.h,v
retrieving revision 1.4
diff -u -p -r1.4 cpu_full.h
--- sys/arch/amd64/include/cpu_full.h	12 Sep 2018 07:00:51 -0000	1.4
+++ sys/arch/amd64/include/cpu_full.h	26 May 2019 02:13:15 -0000
@@ -1,6 +1,6 @@
 /*	$OpenBSD: cpu_full.h,v 1.4 2018/09/12 07:00:51 guenther Exp $	*/
 /*
- * Copyright (c) Philip Guenther <guenther@openbsd.org>
+ * Copyright (c) 2018 Philip Guenther <guenther@openbsd.org>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
Index: sys/arch/amd64/include/cpufunc.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/cpufunc.h,v
retrieving revision 1.31
diff -u -p -r1.31 cpufunc.h
--- sys/arch/amd64/include/cpufunc.h	4 Oct 2018 05:00:40 -0000	1.31
+++ sys/arch/amd64/include/cpufunc.h	26 May 2019 02:13:15 -0000
@@ -353,6 +353,8 @@ void cpu_ucode_apply(struct cpu_info *);
 struct cpu_info_full;
 void cpu_enter_pages(struct cpu_info_full *);
 
+int rdmsr_safe(u_int msr, uint64_t *);
+
 #endif /* _KERNEL */
 
 #endif /* !_MACHINE_CPUFUNC_H_ */
Index: sys/arch/amd64/include/specialreg.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/specialreg.h,v
retrieving revision 1.81
diff -u -p -r1.81 specialreg.h
--- sys/arch/amd64/include/specialreg.h	11 Sep 2018 07:13:23 -0000	1.81
+++ sys/arch/amd64/include/specialreg.h	26 May 2019 02:13:15 -0000
@@ -220,6 +220,8 @@
 /* SEFF EDX bits */
 #define SEFF0EDX_AVX512_4FNNIW	0x00000004 /* AVX-512 neural network insns */
 #define SEFF0EDX_AVX512_4FMAPS	0x00000008 /* AVX-512 mult accum single prec */
+#define SEFF0EDX_MD_CLEAR	0x00000400 /* Microarch Data Clear */
+#define SEFF0EDX_TSXFA		0x00002000 /* TSX Forced Abort */
 #define SEFF0EDX_IBRS		0x04000000 /* IBRS / IBPB Speculation Control */
 #define SEFF0EDX_STIBP		0x08000000 /* STIBP Speculation Control */
 #define SEFF0EDX_L1DF		0x10000000 /* L1D_FLUSH */
@@ -372,6 +374,7 @@
 #define ARCH_CAPABILITIES_RSBA		(1 << 2)	/* RSB Alternate */
 #define ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY	(1 << 3)
 #define ARCH_CAPABILITIES_SSB_NO	(1 << 4)	/* Spec St Byp safe */
+#define ARCH_CAPABILITIES_MDS_NO	(1 << 5) /* microarch data-sampling */
 #define MSR_FLUSH_CMD		0x10b
 #define FLUSH_CMD_L1D_FLUSH	(1ULL << 0)
 #define	MSR_BBL_CR_ADDR		0x116	/* PII+ only */
Index: sys/arch/amd64/include/vmmvar.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/vmmvar.h,v
retrieving revision 1.59
diff -u -p -r1.59 vmmvar.h
--- sys/arch/amd64/include/vmmvar.h	20 Sep 2018 14:32:59 -0000	1.59
+++ sys/arch/amd64/include/vmmvar.h	26 May 2019 02:13:15 -0000
@@ -608,6 +608,9 @@ struct vm_rwregs_params {
     SEFF0EBX_AVX512BW | SEFF0EBX_AVX512VL)
 #define VMM_SEFF0ECX_MASK ~(SEFF0ECX_AVX512VBMI)
 
+/* EDX mask contains the bits to include */
+#define VMM_SEFF0EDX_MASK (SEFF0EDX_MD_CLEAR)
+
 /*
  * Extended function flags - copy from host minus:
  * 0x80000001  EDX:RDTSCP Support