diff mbox

[AArch64] Add more cfi annotations to tlsdesc entry points

Message ID 584AB057.5050605@arm.com
State New
Headers show

Commit Message

Szabolcs Nagy Dec. 9, 2016, 1:23 p.m. UTC
Backtrace through _dl_tlsdesc_resolve_rela was broken because the offset
of x30 from cfa was not in the debug info.

Add enough annotation so backtracing from the dynamic linker through
tlsdesc entry points works and the debugger shows registers correctly.

(manually tested with gdb.)

2016-12-09  Szabolcs Nagy  <szabolcs.nagy@arm.com>

	* sysdeps/aarch64/dl-tlsdesc.S (_dl_tlsdesc_dynamic): Add cfi
	annotation.
	(_dl_tlsdesc_resolve_rela, _dl_tlsdesc_resolve_hold): Likewise.

Comments

Richard Henderson Dec. 9, 2016, 5:38 p.m. UTC | #1
On 12/09/2016 05:23 AM, Szabolcs Nagy wrote:
>  	stp	x1,  x2, [sp, #32+16*0]

> +	cfi_rel_offset (x1, 32)

> +	cfi_rel_offset (x2, 32+8)

>  	stp	x3,  x4, [sp, #32+16*1]

> +	cfi_rel_offset (x3, 32+16)

> +	cfi_rel_offset (x4, 32+24)


FWIW, you'll produce equivalent, but smaller unwind info if you put all of the
cfi_rel_offsets together (constrained by the assembly for when the original
register values change.  So

	stp	x1, x2, [sp, #32+16*0]
	stp	x3, x4, [sp, #32+16*1]
	cfi_rel_offset (x1, 32)
	cfi_rel_offset (x2, 32+8)
	cfi_rel_offset (x3, 32+16)
	cfi_rel_offset (x4, 32+24)

>  	stp	 x5,  x6, [sp, #-16*NSAVEXREGPAIRS]!

>  	cfi_adjust_cfa_offset (16*NSAVEXREGPAIRS)

> +	cfi_rel_offset (x5, 0)

> +	cfi_rel_offset (x6, 8)

>  	stp	 x7,  x8, [sp, #16*1]

> +	cfi_rel_offset (x7, 16)

> +	cfi_rel_offset (x8, 16+8)

>  	stp	 x9, x10, [sp, #16*2]

> +	cfi_rel_offset (x9, 16*2)

> +	cfi_rel_offset (x10, 16*2+8)

>  	stp	x11, x12, [sp, #16*3]

> +	cfi_rel_offset (x11, 16*3)

> +	cfi_rel_offset (x12, 16*3+8)

>  	stp	x13, x14, [sp, #16*4]

> +	cfi_rel_offset (x13, 16*4)

> +	cfi_rel_offset (x14, 16*4+8)

>  	stp	x15, x16, [sp, #16*5]

> +	cfi_rel_offset (x15, 16*5)

> +	cfi_rel_offset (x16, 16*5+8)

>  	stp	x17, x18, [sp, #16*6]

> +	cfi_rel_offset (x17, 16*6)

> +	cfi_rel_offset (x18, 16*6+8)


Likewise.  But of course you can't move the cfi_adjust_cfa_offset.


r~
Florian Weimer Dec. 13, 2016, 3:05 p.m. UTC | #2
On 12/09/2016 06:38 PM, Richard Henderson wrote:
> On 12/09/2016 05:23 AM, Szabolcs Nagy wrote:

>>  	stp	x1,  x2, [sp, #32+16*0]

>> +	cfi_rel_offset (x1, 32)

>> +	cfi_rel_offset (x2, 32+8)

>>  	stp	x3,  x4, [sp, #32+16*1]

>> +	cfi_rel_offset (x3, 32+16)

>> +	cfi_rel_offset (x4, 32+24)

>

> FWIW, you'll produce equivalent, but smaller unwind info if you put all of the

> cfi_rel_offsets together (constrained by the assembly for when the original

> register values change.  So

>

> 	stp	x1, x2, [sp, #32+16*0]

> 	stp	x3, x4, [sp, #32+16*1]

> 	cfi_rel_offset (x1, 32)

> 	cfi_rel_offset (x2, 32+8)

> 	cfi_rel_offset (x3, 32+16)

> 	cfi_rel_offset (x4, 32+24)


Will this cause problems with async cancellation?

Thanks,
Florian
Richard Henderson Dec. 13, 2016, 3:11 p.m. UTC | #3
On 12/13/2016 07:05 AM, Florian Weimer wrote:
> On 12/09/2016 06:38 PM, Richard Henderson wrote:

>> On 12/09/2016 05:23 AM, Szabolcs Nagy wrote:

>>>      stp    x1,  x2, [sp, #32+16*0]

>>> +    cfi_rel_offset (x1, 32)

>>> +    cfi_rel_offset (x2, 32+8)

>>>      stp    x3,  x4, [sp, #32+16*1]

>>> +    cfi_rel_offset (x3, 32+16)

>>> +    cfi_rel_offset (x4, 32+24)

>>

>> FWIW, you'll produce equivalent, but smaller unwind info if you put all of the

>> cfi_rel_offsets together (constrained by the assembly for when the original

>> register values change.  So

>>

>>     stp    x1, x2, [sp, #32+16*0]

>>     stp    x3, x4, [sp, #32+16*1]

>>     cfi_rel_offset (x1, 32)

>>     cfi_rel_offset (x2, 32+8)

>>     cfi_rel_offset (x3, 32+16)

>>     cfi_rel_offset (x4, 32+24)

>

> Will this cause problems with async cancellation?


No, because the registers have not been modified.  All this does is eliminate 
extra "advance" opcodes.

r~
diff mbox

Patch

diff --git a/sysdeps/aarch64/dl-tlsdesc.S b/sysdeps/aarch64/dl-tlsdesc.S
index 9e557dd..40a19d4 100644
--- a/sysdeps/aarch64/dl-tlsdesc.S
+++ b/sysdeps/aarch64/dl-tlsdesc.S
@@ -176,6 +176,8 @@  _dl_tlsdesc_dynamic:
 # define NSAVEXREGPAIRS 2
 	stp	x29, x30, [sp,#-(32+16*NSAVEXREGPAIRS)]!
 	cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
+	cfi_rel_offset (x29, 0)
+	cfi_rel_offset (x30, 8)
 	mov	x29, sp
 	DELOUSE (0)
 
@@ -183,7 +185,11 @@  _dl_tlsdesc_dynamic:
 	   into slow path we will save additional registers.  */
 
 	stp	x1,  x2, [sp, #32+16*0]
+	cfi_rel_offset (x1, 32)
+	cfi_rel_offset (x2, 32+8)
 	stp	x3,  x4, [sp, #32+16*1]
+	cfi_rel_offset (x3, 32+16)
+	cfi_rel_offset (x4, 32+24)
 
 	mrs	x4, tpidr_el0
 	/* The ldar here happens after the load from [x0] at the call site
@@ -213,6 +219,8 @@  _dl_tlsdesc_dynamic:
 
 	ldp	x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
 	cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
+	cfi_restore (x29)
+	cfi_restore (x30)
 # undef NSAVEXREGPAIRS
 	RET
 2:
@@ -224,12 +232,26 @@  _dl_tlsdesc_dynamic:
 # define NSAVEXREGPAIRS 7
 	stp	 x5,  x6, [sp, #-16*NSAVEXREGPAIRS]!
 	cfi_adjust_cfa_offset (16*NSAVEXREGPAIRS)
+	cfi_rel_offset (x5, 0)
+	cfi_rel_offset (x6, 8)
 	stp	 x7,  x8, [sp, #16*1]
+	cfi_rel_offset (x7, 16)
+	cfi_rel_offset (x8, 16+8)
 	stp	 x9, x10, [sp, #16*2]
+	cfi_rel_offset (x9, 16*2)
+	cfi_rel_offset (x10, 16*2+8)
 	stp	x11, x12, [sp, #16*3]
+	cfi_rel_offset (x11, 16*3)
+	cfi_rel_offset (x12, 16*3+8)
 	stp	x13, x14, [sp, #16*4]
+	cfi_rel_offset (x13, 16*4)
+	cfi_rel_offset (x14, 16*4+8)
 	stp	x15, x16, [sp, #16*5]
+	cfi_rel_offset (x15, 16*5)
+	cfi_rel_offset (x16, 16*5+8)
 	stp	x17, x18, [sp, #16*6]
+	cfi_rel_offset (x17, 16*6)
+	cfi_rel_offset (x18, 16*6+8)
 
 	SAVE_Q_REGISTERS
 
@@ -268,18 +290,41 @@  _dl_tlsdesc_dynamic:
 	.align 2
 _dl_tlsdesc_resolve_rela:
 #define	NSAVEXREGPAIRS 9
+	/* The tlsdesc PLT entry pushes x2 and x3 to the stack.  */
+	cfi_adjust_cfa_offset (16)
+	cfi_rel_offset (x2, 0)
+	cfi_rel_offset (x3, 8)
 	stp	x29, x30, [sp, #-(32+16*NSAVEXREGPAIRS)]!
 	cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
+	cfi_rel_offset (x29, 0)
+	cfi_rel_offset (x30, 8)
 	mov	x29, sp
 	stp	 x1,  x4, [sp, #32+16*0]
+	cfi_rel_offset (x1, 32)
+	cfi_rel_offset (x4, 32+8)
 	stp	 x5,  x6, [sp, #32+16*1]
+	cfi_rel_offset (x5, 32+16)
+	cfi_rel_offset (x6, 32+16+8)
 	stp	 x7,  x8, [sp, #32+16*2]
+	cfi_rel_offset (x7, 32+16*2)
+	cfi_rel_offset (x8, 32+16*2+8)
 	stp	 x9, x10, [sp, #32+16*3]
+	cfi_rel_offset (x9, 32+16*3)
+	cfi_rel_offset (x10, 32+16*3+8)
 	stp	x11, x12, [sp, #32+16*4]
+	cfi_rel_offset (x11, 32+16*4)
+	cfi_rel_offset (x12, 32+16*4+8)
 	stp	x13, x14, [sp, #32+16*5]
+	cfi_rel_offset (x13, 32+16*5)
+	cfi_rel_offset (x14, 32+16*5+8)
 	stp	x15, x16, [sp, #32+16*6]
+	cfi_rel_offset (x15, 32+16*6)
+	cfi_rel_offset (x16, 32+16*6+8)
 	stp	x17, x18, [sp, #32+16*7]
+	cfi_rel_offset (x17, 32+16*7)
+	cfi_rel_offset (x18, 32+16*7+8)
 	str	x0,       [sp, #32+16*8]
+	cfi_rel_offset (x0, 32+16*8)
 
 	SAVE_Q_REGISTERS
 
@@ -304,6 +349,8 @@  _dl_tlsdesc_resolve_rela:
 	ldp	x17, x18, [sp, #32+16*7]
 	ldp	x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
 	cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
+	cfi_restore (x29)
+	cfi_restore (x30)
 	ldp	x2, x3, [sp], #16
 	cfi_adjust_cfa_offset (-16)
 	RET
@@ -332,17 +379,38 @@  _dl_tlsdesc_resolve_hold:
 1:
 	stp	x29, x30, [sp, #-(32+16*NSAVEXREGPAIRS)]!
 	cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
+	cfi_rel_offset (x29, 0)
+	cfi_rel_offset (x30, 8)
 	mov	x29, sp
 	stp	 x1,  x2, [sp, #32+16*0]
+	cfi_rel_offset (x1, 32)
+	cfi_rel_offset (x2, 32+8)
 	stp	 x3,  x4, [sp, #32+16*1]
+	cfi_rel_offset (x3, 32+16)
+	cfi_rel_offset (x4, 32+16+8)
 	stp	 x5,  x6, [sp, #32+16*2]
+	cfi_rel_offset (x5, 32+16*2)
+	cfi_rel_offset (x6, 32+16*2+8)
 	stp	 x7,  x8, [sp, #32+16*3]
+	cfi_rel_offset (x7, 32+16*3)
+	cfi_rel_offset (x8, 32+16*3+8)
 	stp	 x9, x10, [sp, #32+16*4]
+	cfi_rel_offset (x9, 32+16*4)
+	cfi_rel_offset (x10, 32+16*4+8)
 	stp	x11, x12, [sp, #32+16*5]
+	cfi_rel_offset (x11, 32+16*5)
+	cfi_rel_offset (x12, 32+16*5+8)
 	stp	x13, x14, [sp, #32+16*6]
+	cfi_rel_offset (x13, 32+16*6)
+	cfi_rel_offset (x14, 32+16*6+8)
 	stp	x15, x16, [sp, #32+16*7]
+	cfi_rel_offset (x15, 32+16*7)
+	cfi_rel_offset (x16, 32+16*7+8)
 	stp	x17, x18, [sp, #32+16*8]
+	cfi_rel_offset (x17, 32+16*8)
+	cfi_rel_offset (x18, 32+16*8+8)
 	str	x0,       [sp, #32+16*9]
+	cfi_rel_offset (x0, 32+16*9)
 
 	SAVE_Q_REGISTERS
 
@@ -367,6 +435,8 @@  _dl_tlsdesc_resolve_hold:
 	ldp	x17, x18, [sp, #32+16*8]
 	ldp	x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
 	cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
+	cfi_restore (x29)
+	cfi_restore (x30)
 	RET
 	cfi_endproc
 	.size	_dl_tlsdesc_resolve_hold, .-_dl_tlsdesc_resolve_hold