diff mbox series

[4.9,11/24] ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17

Message ID 20181031135713.2873-12-dave.long@linaro.org
State Superseded
Headers show
Series V4.9 backport of 32-bit arm spectre patches | expand

Commit Message

David Long Oct. 31, 2018, 1:57 p.m. UTC
From: Marc Zyngier <marc.zyngier@arm.com>


Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

In order to avoid aliasing attacks against the branch predictor,
let's invalidate the BTB on guest exit. This is made complicated
by the fact that we cannot take a branch before invalidating the
BTB.

We only apply this to A12 and A17, which are the only two ARM
cores on which this useful.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

Boot-tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Tony Lindgren <tony@atomide.com>

Signed-off-by: David A. Long <dave.long@linaro.org>

---
 arch/arm/include/asm/kvm_asm.h |  2 -
 arch/arm/include/asm/kvm_mmu.h | 17 ++++++++-
 arch/arm/kvm/hyp/hyp-entry.S   | 69 ++++++++++++++++++++++++++++++++++
 3 files changed, 85 insertions(+), 3 deletions(-)

-- 
2.17.1

Comments

Marc Zyngier Nov. 5, 2018, 9:13 a.m. UTC | #1
David,

On 31/10/18 13:57, David Long wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>

> 

> Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

> 

> In order to avoid aliasing attacks against the branch predictor,

> let's invalidate the BTB on guest exit. This is made complicated

> by the fact that we cannot take a branch before invalidating the

> BTB.

> 

> We only apply this to A12 and A17, which are the only two ARM

> cores on which this useful.

> 

> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

> Boot-tested-by: Tony Lindgren <tony@atomide.com>

> Reviewed-by: Tony Lindgren <tony@atomide.com>

> Signed-off-by: David A. Long <dave.long@linaro.org>

> ---

>  arch/arm/include/asm/kvm_asm.h |  2 -

>  arch/arm/include/asm/kvm_mmu.h | 17 ++++++++-

>  arch/arm/kvm/hyp/hyp-entry.S   | 69 ++++++++++++++++++++++++++++++++++

>  3 files changed, 85 insertions(+), 3 deletions(-)

> 


[...]

> diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S

> index 96beb53934c9..de242d9598c6 100644

> --- a/arch/arm/kvm/hyp/hyp-entry.S

> +++ b/arch/arm/kvm/hyp/hyp-entry.S

> @@ -71,6 +71,66 @@ __kvm_hyp_vector:

>  	W(b)	hyp_irq

>  	W(b)	hyp_fiq

>  

> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

> +	.align 5

> +__kvm_hyp_vector_bp_inv:

> +	.global __kvm_hyp_vector_bp_inv

> +

> +	/*

> +	 * We encode the exception entry in the bottom 3 bits of

> +	 * SP, and we have to guarantee to be 8 bytes aligned.

> +	 */

> +	W(add)	sp, sp, #1	/* Reset 	  7 */

> +	W(add)	sp, sp, #1	/* Undef	  6 */

> +	W(add)	sp, sp, #1	/* Syscall	  5 */

> +	W(add)	sp, sp, #1	/* Prefetch abort 4 */

> +	W(add)	sp, sp, #1	/* Data abort	  3 */

> +	W(add)	sp, sp, #1	/* HVC		  2 */

> +	W(add)	sp, sp, #1	/* IRQ		  1 */

> +	W(nop)			/* FIQ		  0 */

> +

> +	mcr	p15, 0, r0, c7, c5, 6	/* BPIALL */

> +	isb

> +

> +#ifdef CONFIG_THUMB2_KERNEL

> +	/*

> +	 * Yet another silly hack: Use VPIDR as a temp register.

> +	 * Thumb2 is really a pain, as SP cannot be used with most

> +	 * of the bitwise instructions. The vect_br macro ensures

> +	 * things gets cleaned-up.

> +	 */

> +	mcr	p15, 4, r0, c0, c0, 0	/* VPIDR */

> +	mov	r0, sp

> +	and	r0, r0, #7

> +	sub	sp, sp, r0

> +	push	{r1, r2}

> +	mov	r1, r0

> +	mrc	p15, 4, r0, c0, c0, 0	/* VPIDR */

> +	mrc	p15, 0, r2, c0, c0, 0	/* MIDR  */

> +	mcr	p15, 4, r2, c0, c0, 0	/* VPIDR */

> +#endif

> +

> +.macro vect_br val, targ

> +ARM(	eor	sp, sp, #\val	)

> +ARM(	tst	sp, #7		)

> +ARM(	eorne	sp, sp, #\val	)

> +

> +THUMB(	cmp	r1, #\val	)

> +THUMB(	popeq	{r1, r2}	)

> +

> +	beq	\targ

> +.endm

> +

> +	vect_br	0, hyp_fiq

> +	vect_br	1, hyp_irq

> +	vect_br	2, hyp_hvc

> +	vect_br	3, hyp_dabt

> +	vect_br	4, hyp_pabt

> +	vect_br	5, hyp_svc

> +	vect_br	6, hyp_undef

> +	vect_br	7, hyp_reset

> +#endif

> +

>  .macro invalid_vector label, cause

>  	.align

>  \label:	mov	r0, #\cause

> @@ -132,6 +192,14 @@ hyp_hvc:

>  	beq	1f

>  

>  	push	{lr}

> +	/*

> +	 * Pushing r2 here is just a way of keeping the stack aligned to

> +	 * 8 bytes on any path that can trigger a HYP exception. Here,

> +	 * we may well be about to jump into the guest, and the guest

> +	 * exit would otherwise be badly decoded by our fancy

> +	 * "decode-exception-without-a-branch" code...

> +	 */

> +	push	{r2, lr}

>  

>  	mov	lr, r0

>  	mov	r0, r1

> @@ -142,6 +210,7 @@ THUMB(	orr	lr, #1)

>  	blx	lr			@ Call the HYP function

>  

>  	pop	{lr}

> +	pop	{r2, lr}



I don't see how this can work. This clearly isn't the right resolution
for merging 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f, as it contradicts
the very comment you are merging here.

I wouldn't be surprised if the crash you're observing would be due to
this problem (unaligned stack, bad decoding of the vector, branch to the
wrong handler, HYP on fire).

	M.
-- 
Jazz is not dead. It just smells funny...
David Long Nov. 7, 2018, 2:22 a.m. UTC | #2
On 11/5/18 4:13 AM, Marc Zyngier wrote:
> David,

> 

> On 31/10/18 13:57, David Long wrote:

>> From: Marc Zyngier <marc.zyngier@arm.com>

>>

>> Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

>>

>> In order to avoid aliasing attacks against the branch predictor,

>> let's invalidate the BTB on guest exit. This is made complicated

>> by the fact that we cannot take a branch before invalidating the

>> BTB.

>>

>> We only apply this to A12 and A17, which are the only two ARM

>> cores on which this useful.

>>

>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

>> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

>> Boot-tested-by: Tony Lindgren <tony@atomide.com>

>> Reviewed-by: Tony Lindgren <tony@atomide.com>

>> Signed-off-by: David A. Long <dave.long@linaro.org>

>> ---

>>   arch/arm/include/asm/kvm_asm.h |  2 -

>>   arch/arm/include/asm/kvm_mmu.h | 17 ++++++++-

>>   arch/arm/kvm/hyp/hyp-entry.S   | 69 ++++++++++++++++++++++++++++++++++

>>   3 files changed, 85 insertions(+), 3 deletions(-)

>>

> 

> [...]

> 

>> diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S

>> index 96beb53934c9..de242d9598c6 100644

>> --- a/arch/arm/kvm/hyp/hyp-entry.S

>> +++ b/arch/arm/kvm/hyp/hyp-entry.S

>> @@ -71,6 +71,66 @@ __kvm_hyp_vector:

>>   	W(b)	hyp_irq

>>   	W(b)	hyp_fiq

>>   

>> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

>> +	.align 5

>> +__kvm_hyp_vector_bp_inv:

>> +	.global __kvm_hyp_vector_bp_inv

>> +

>> +	/*

>> +	 * We encode the exception entry in the bottom 3 bits of

>> +	 * SP, and we have to guarantee to be 8 bytes aligned.

>> +	 */

>> +	W(add)	sp, sp, #1	/* Reset 	  7 */

>> +	W(add)	sp, sp, #1	/* Undef	  6 */

>> +	W(add)	sp, sp, #1	/* Syscall	  5 */

>> +	W(add)	sp, sp, #1	/* Prefetch abort 4 */

>> +	W(add)	sp, sp, #1	/* Data abort	  3 */

>> +	W(add)	sp, sp, #1	/* HVC		  2 */

>> +	W(add)	sp, sp, #1	/* IRQ		  1 */

>> +	W(nop)			/* FIQ		  0 */

>> +

>> +	mcr	p15, 0, r0, c7, c5, 6	/* BPIALL */

>> +	isb

>> +

>> +#ifdef CONFIG_THUMB2_KERNEL

>> +	/*

>> +	 * Yet another silly hack: Use VPIDR as a temp register.

>> +	 * Thumb2 is really a pain, as SP cannot be used with most

>> +	 * of the bitwise instructions. The vect_br macro ensures

>> +	 * things gets cleaned-up.

>> +	 */

>> +	mcr	p15, 4, r0, c0, c0, 0	/* VPIDR */

>> +	mov	r0, sp

>> +	and	r0, r0, #7

>> +	sub	sp, sp, r0

>> +	push	{r1, r2}

>> +	mov	r1, r0

>> +	mrc	p15, 4, r0, c0, c0, 0	/* VPIDR */

>> +	mrc	p15, 0, r2, c0, c0, 0	/* MIDR  */

>> +	mcr	p15, 4, r2, c0, c0, 0	/* VPIDR */

>> +#endif

>> +

>> +.macro vect_br val, targ

>> +ARM(	eor	sp, sp, #\val	)

>> +ARM(	tst	sp, #7		)

>> +ARM(	eorne	sp, sp, #\val	)

>> +

>> +THUMB(	cmp	r1, #\val	)

>> +THUMB(	popeq	{r1, r2}	)

>> +

>> +	beq	\targ

>> +.endm

>> +

>> +	vect_br	0, hyp_fiq

>> +	vect_br	1, hyp_irq

>> +	vect_br	2, hyp_hvc

>> +	vect_br	3, hyp_dabt

>> +	vect_br	4, hyp_pabt

>> +	vect_br	5, hyp_svc

>> +	vect_br	6, hyp_undef

>> +	vect_br	7, hyp_reset

>> +#endif

>> +

>>   .macro invalid_vector label, cause

>>   	.align

>>   \label:	mov	r0, #\cause

>> @@ -132,6 +192,14 @@ hyp_hvc:

>>   	beq	1f

>>   

>>   	push	{lr}

>> +	/*

>> +	 * Pushing r2 here is just a way of keeping the stack aligned to

>> +	 * 8 bytes on any path that can trigger a HYP exception. Here,

>> +	 * we may well be about to jump into the guest, and the guest

>> +	 * exit would otherwise be badly decoded by our fancy

>> +	 * "decode-exception-without-a-branch" code...

>> +	 */

>> +	push	{r2, lr}

>>   

>>   	mov	lr, r0

>>   	mov	r0, r1

>> @@ -142,6 +210,7 @@ THUMB(	orr	lr, #1)

>>   	blx	lr			@ Call the HYP function

>>   

>>   	pop	{lr}

>> +	pop	{r2, lr}

> 

> 

> I don't see how this can work. This clearly isn't the right resolution

> for merging 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f, as it contradicts

> the very comment you are merging here.

> 

> I wouldn't be surprised if the crash you're observing would be due to

> this problem (unaligned stack, bad decoding of the vector, branch to the

> wrong handler, HYP on fire).

> 

> 	M.

> 


Thanks, I see the problem now.  I removed the redundant (and 
asymmetrical) push/pop of r2 and it passes kvm-unit-tests without 
regressions.  I'll send out a v2 patch soon.

-dl
David Long Nov. 7, 2018, 2:23 a.m. UTC | #3
On 11/5/18 4:13 AM, Marc Zyngier wrote:
> David,

> 

> On 31/10/18 13:57, David Long wrote:

>> From: Marc Zyngier <marc.zyngier@arm.com>

>>

>> Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

>>

>> In order to avoid aliasing attacks against the branch predictor,

>> let's invalidate the BTB on guest exit. This is made complicated

>> by the fact that we cannot take a branch before invalidating the

>> BTB.

>>

>> We only apply this to A12 and A17, which are the only two ARM

>> cores on which this useful.

>>

>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

>> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

>> Boot-tested-by: Tony Lindgren <tony@atomide.com>

>> Reviewed-by: Tony Lindgren <tony@atomide.com>

>> Signed-off-by: David A. Long <dave.long@linaro.org>

>> ---

>>   arch/arm/include/asm/kvm_asm.h |  2 -

>>   arch/arm/include/asm/kvm_mmu.h | 17 ++++++++-

>>   arch/arm/kvm/hyp/hyp-entry.S   | 69 ++++++++++++++++++++++++++++++++++

>>   3 files changed, 85 insertions(+), 3 deletions(-)

>>

> 

> [...]

> 

>> diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S

>> index 96beb53934c9..de242d9598c6 100644

>> --- a/arch/arm/kvm/hyp/hyp-entry.S

>> +++ b/arch/arm/kvm/hyp/hyp-entry.S

>> @@ -71,6 +71,66 @@ __kvm_hyp_vector:

>>   	W(b)	hyp_irq

>>   	W(b)	hyp_fiq

>>   

>> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

>> +	.align 5

>> +__kvm_hyp_vector_bp_inv:

>> +	.global __kvm_hyp_vector_bp_inv

>> +

>> +	/*

>> +	 * We encode the exception entry in the bottom 3 bits of

>> +	 * SP, and we have to guarantee to be 8 bytes aligned.

>> +	 */

>> +	W(add)	sp, sp, #1	/* Reset 	  7 */

>> +	W(add)	sp, sp, #1	/* Undef	  6 */

>> +	W(add)	sp, sp, #1	/* Syscall	  5 */

>> +	W(add)	sp, sp, #1	/* Prefetch abort 4 */

>> +	W(add)	sp, sp, #1	/* Data abort	  3 */

>> +	W(add)	sp, sp, #1	/* HVC		  2 */

>> +	W(add)	sp, sp, #1	/* IRQ		  1 */

>> +	W(nop)			/* FIQ		  0 */

>> +

>> +	mcr	p15, 0, r0, c7, c5, 6	/* BPIALL */

>> +	isb

>> +

>> +#ifdef CONFIG_THUMB2_KERNEL

>> +	/*

>> +	 * Yet another silly hack: Use VPIDR as a temp register.

>> +	 * Thumb2 is really a pain, as SP cannot be used with most

>> +	 * of the bitwise instructions. The vect_br macro ensures

>> +	 * things gets cleaned-up.

>> +	 */

>> +	mcr	p15, 4, r0, c0, c0, 0	/* VPIDR */

>> +	mov	r0, sp

>> +	and	r0, r0, #7

>> +	sub	sp, sp, r0

>> +	push	{r1, r2}

>> +	mov	r1, r0

>> +	mrc	p15, 4, r0, c0, c0, 0	/* VPIDR */

>> +	mrc	p15, 0, r2, c0, c0, 0	/* MIDR  */

>> +	mcr	p15, 4, r2, c0, c0, 0	/* VPIDR */

>> +#endif

>> +

>> +.macro vect_br val, targ

>> +ARM(	eor	sp, sp, #\val	)

>> +ARM(	tst	sp, #7		)

>> +ARM(	eorne	sp, sp, #\val	)

>> +

>> +THUMB(	cmp	r1, #\val	)

>> +THUMB(	popeq	{r1, r2}	)

>> +

>> +	beq	\targ

>> +.endm

>> +

>> +	vect_br	0, hyp_fiq

>> +	vect_br	1, hyp_irq

>> +	vect_br	2, hyp_hvc

>> +	vect_br	3, hyp_dabt

>> +	vect_br	4, hyp_pabt

>> +	vect_br	5, hyp_svc

>> +	vect_br	6, hyp_undef

>> +	vect_br	7, hyp_reset

>> +#endif

>> +

>>   .macro invalid_vector label, cause

>>   	.align

>>   \label:	mov	r0, #\cause

>> @@ -132,6 +192,14 @@ hyp_hvc:

>>   	beq	1f

>>   

>>   	push	{lr}

>> +	/*

>> +	 * Pushing r2 here is just a way of keeping the stack aligned to

>> +	 * 8 bytes on any path that can trigger a HYP exception. Here,

>> +	 * we may well be about to jump into the guest, and the guest

>> +	 * exit would otherwise be badly decoded by our fancy

>> +	 * "decode-exception-without-a-branch" code...

>> +	 */

>> +	push	{r2, lr}

>>   

>>   	mov	lr, r0

>>   	mov	r0, r1

>> @@ -142,6 +210,7 @@ THUMB(	orr	lr, #1)

>>   	blx	lr			@ Call the HYP function

>>   

>>   	pop	{lr}

>> +	pop	{r2, lr}

> 

> 

> I don't see how this can work. This clearly isn't the right resolution

> for merging 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f, as it contradicts

> the very comment you are merging here.

> 

> I wouldn't be surprised if the crash you're observing would be due to

> this problem (unaligned stack, bad decoding of the vector, branch to the

> wrong handler, HYP on fire).

> 

> 	M.

> 


Sorry, I meant I removed the "lr" push/pop.

-dl
diff mbox series

Patch

diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 8ef05381984b..24f3ec7c9fbe 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,8 +61,6 @@  struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index e2f05cedaf97..625edef2a54f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -248,7 +248,22 @@  static inline int kvm_read_guest_lock(struct kvm *kvm,
 
 static inline void *kvm_get_hyp_vector(void)
 {
-	return kvm_ksym_ref(__kvm_hyp_vector);
+	switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	case ARM_CPU_PART_CORTEX_A12:
+	case ARM_CPU_PART_CORTEX_A17:
+	{
+		extern char __kvm_hyp_vector_bp_inv[];
+		return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+	}
+
+#endif
+	default:
+	{
+		extern char __kvm_hyp_vector[];
+		return kvm_ksym_ref(__kvm_hyp_vector);
+	}
+	}
 }
 
 static inline int kvm_map_vectors(void)
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 96beb53934c9..de242d9598c6 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -71,6 +71,66 @@  __kvm_hyp_vector:
 	W(b)	hyp_irq
 	W(b)	hyp_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	.align 5
+__kvm_hyp_vector_bp_inv:
+	.global __kvm_hyp_vector_bp_inv
+
+	/*
+	 * We encode the exception entry in the bottom 3 bits of
+	 * SP, and we have to guarantee to be 8 bytes aligned.
+	 */
+	W(add)	sp, sp, #1	/* Reset 	  7 */
+	W(add)	sp, sp, #1	/* Undef	  6 */
+	W(add)	sp, sp, #1	/* Syscall	  5 */
+	W(add)	sp, sp, #1	/* Prefetch abort 4 */
+	W(add)	sp, sp, #1	/* Data abort	  3 */
+	W(add)	sp, sp, #1	/* HVC		  2 */
+	W(add)	sp, sp, #1	/* IRQ		  1 */
+	W(nop)			/* FIQ		  0 */
+
+	mcr	p15, 0, r0, c7, c5, 6	/* BPIALL */
+	isb
+
+#ifdef CONFIG_THUMB2_KERNEL
+	/*
+	 * Yet another silly hack: Use VPIDR as a temp register.
+	 * Thumb2 is really a pain, as SP cannot be used with most
+	 * of the bitwise instructions. The vect_br macro ensures
+	 * things gets cleaned-up.
+	 */
+	mcr	p15, 4, r0, c0, c0, 0	/* VPIDR */
+	mov	r0, sp
+	and	r0, r0, #7
+	sub	sp, sp, r0
+	push	{r1, r2}
+	mov	r1, r0
+	mrc	p15, 4, r0, c0, c0, 0	/* VPIDR */
+	mrc	p15, 0, r2, c0, c0, 0	/* MIDR  */
+	mcr	p15, 4, r2, c0, c0, 0	/* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM(	eor	sp, sp, #\val	)
+ARM(	tst	sp, #7		)
+ARM(	eorne	sp, sp, #\val	)
+
+THUMB(	cmp	r1, #\val	)
+THUMB(	popeq	{r1, r2}	)
+
+	beq	\targ
+.endm
+
+	vect_br	0, hyp_fiq
+	vect_br	1, hyp_irq
+	vect_br	2, hyp_hvc
+	vect_br	3, hyp_dabt
+	vect_br	4, hyp_pabt
+	vect_br	5, hyp_svc
+	vect_br	6, hyp_undef
+	vect_br	7, hyp_reset
+#endif
+
 .macro invalid_vector label, cause
 	.align
 \label:	mov	r0, #\cause
@@ -132,6 +192,14 @@  hyp_hvc:
 	beq	1f
 
 	push	{lr}
+	/*
+	 * Pushing r2 here is just a way of keeping the stack aligned to
+	 * 8 bytes on any path that can trigger a HYP exception. Here,
+	 * we may well be about to jump into the guest, and the guest
+	 * exit would otherwise be badly decoded by our fancy
+	 * "decode-exception-without-a-branch" code...
+	 */
+	push	{r2, lr}
 
 	mov	lr, r0
 	mov	r0, r1
@@ -142,6 +210,7 @@  THUMB(	orr	lr, #1)
 	blx	lr			@ Call the HYP function
 
 	pop	{lr}
+	pop	{r2, lr}
 1:	eret
 
 guest_trap: