@@ -173,15 +173,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
/*
* Adjust our own GOT
*/
- leal _got(%ebx), %edx
- leal _egot(%ebx), %ecx
-1:
- cmpl %ecx, %edx
- jae 2f
- addl %ebx, (%edx)
- addl $4, %edx
- jmp 1b
-2:
+ call .Ladjust_got
/*
* Do the extraction, and jump to the new kernel..
@@ -211,6 +203,38 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%eax
SYM_FUNC_END(.Lrelocated)
+/*
+ * Adjust the global offset table
+ *
+ * The relocation base address is passed in EBX. If the kernel is being
+ * relocated to a new address, this function must be called after the kernel
+ * has been copied to the new location. We keep track of the relocation address
+ * so that it can be backed out if this function is called repeatedly.
+ */
+
+SYM_FUNC_START_LOCAL(.Ladjust_got)
+ /* Get the new relocation base address */
+ movl %ebx, %eax
+ /* Backout the previous relocation address if any */
+ subl got_relocation_base(%ebx), %eax
+ /* Store the relocation base address for future reference */
+ addl %eax, got_relocation_base(%ebx)
+
+ leal _got(%ebx), %edx
+ leal _egot(%ebx), %ecx
+1:
+ cmpl %ecx, %edx
+ jae 2f
+ addl %eax, (%edx)
+ addl $4, %edx
+ jmp 1b
+2:
+ ret
+SYM_FUNC_END(.Ladjust_got)
+
+ .data
+SYM_DATA_LOCAL(got_relocation_base, .long 0)
+
/*
* Stack and heap for uncompression
*/
Save the new relocation address so that this function can be called safely multiple times. Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> --- arch/x86/boot/compressed/head_32.S | 42 +++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 9 deletions(-)