diff mbox series

x86_64: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S

Message ID 20201029180525.1797645-1-maskray@google.com
State Accepted
Commit 4d6ffa27b8e5116c0abb318790fd01d4e12d75e6
Headers show
Series x86_64: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S | expand

Commit Message

Fangrui Song Oct. 29, 2020, 6:05 p.m. UTC
Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for
memset/memmove/memcpy functions") added .weak directives to
arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*
macros. This can lead to the assembly snippet `.weak memcpy ... .globl
memcpy` which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL
memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since
https://reviews.llvm.org/D90108) will error on such an overridden symbol
binding.

Use the appropriate SYM_FUNC_START_WEAK instead.

Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
Reported-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Fangrui Song <maskray@google.com>
Cc: <stable@vger.kernel.org>
---
 arch/x86/lib/memcpy_64.S  | 4 +---
 arch/x86/lib/memmove_64.S | 4 +---
 arch/x86/lib/memset_64.S  | 4 +---
 3 files changed, 3 insertions(+), 9 deletions(-)

Comments

Sami Tolvanen Oct. 29, 2020, 6:42 p.m. UTC | #1
On Thu, Oct 29, 2020 at 11:05 AM 'Fangrui Song' via Clang Built Linux
<clang-built-linux@googlegroups.com> wrote:
>

> Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for

> memset/memmove/memcpy functions") added .weak directives to

> arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*

> macros. This can lead to the assembly snippet `.weak memcpy ... .globl

> memcpy` which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL

> memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since

> https://reviews.llvm.org/D90108) will error on such an overridden symbol

> binding.

>

> Use the appropriate SYM_FUNC_START_WEAK instead.

>

> Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")

> Reported-by: Sami Tolvanen <samitolvanen@google.com>

> Signed-off-by: Fangrui Song <maskray@google.com>

> Cc: <stable@vger.kernel.org>

> ---

>  arch/x86/lib/memcpy_64.S  | 4 +---

>  arch/x86/lib/memmove_64.S | 4 +---

>  arch/x86/lib/memset_64.S  | 4 +---

>  3 files changed, 3 insertions(+), 9 deletions(-)


Thank you for fixing this! I tested the patch with gcc 9.3 and clang
12, both with and without Clang's integrated assembler, and the kernel
builds and boots with all compilers again.

Tested-by: Sami Tolvanen <samitolvanen@google.com>


Sami
Nick Desaulniers Oct. 29, 2020, 6:58 p.m. UTC | #2
On Thu, Oct 29, 2020 at 11:05 AM 'Fangrui Song' via Clang Built Linux
<clang-built-linux@googlegroups.com> wrote:
>

> Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for

> memset/memmove/memcpy functions") added .weak directives to

> arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*

> macros. This can lead to the assembly snippet `.weak memcpy ... .globl

> memcpy` which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL

> memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since

> https://reviews.llvm.org/D90108) will error on such an overridden symbol

> binding.

>

> Use the appropriate SYM_FUNC_START_WEAK instead.

>

> Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")

> Reported-by: Sami Tolvanen <samitolvanen@google.com>

> Signed-off-by: Fangrui Song <maskray@google.com>

> Cc: <stable@vger.kernel.org>

> ---

>  arch/x86/lib/memcpy_64.S  | 4 +---

>  arch/x86/lib/memmove_64.S | 4 +---

>  arch/x86/lib/memset_64.S  | 4 +---

>  3 files changed, 3 insertions(+), 9 deletions(-)

>

> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S

> index 037faac46b0c..1e299ac73c86 100644

> --- a/arch/x86/lib/memcpy_64.S

> +++ b/arch/x86/lib/memcpy_64.S

> @@ -16,8 +16,6 @@

>   * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.

>   */

>

> -.weak memcpy

> -

>  /*

>   * memcpy - Copy a memory block.

>   *

> @@ -30,7 +28,7 @@

>   * rax original destination

>   */

>  SYM_FUNC_START_ALIAS(__memcpy)

> -SYM_FUNC_START_LOCAL(memcpy)

> +SYM_FUNC_START_WEAK(memcpy)

>         ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \

>                       "jmp memcpy_erms", X86_FEATURE_ERMS


Thanks for the patch.  This exposes a lack of symmetry in the
assembler subroutine for memcpy; memmove and memset use SYM_FUNC_START
for their double underscore prefixed symbols, and SYM_FUNC_START_WEAK
for the non prefixed symbols, and no SYM_FUNC_START_ALIAS after your
patch.  It's also curious to me why you removed SYM_FUNC_START_ALIAS
for memmove and memset, but not memcpy?  Can we sort that out so that
they all follow the same convention?

Before your patch, with GNU `as` I see:
$ llvm-readelf -s arch/x86/lib/memcpy_64.o | grep -e memcpy -e __memcpy
    16: 0000000000000000    26 FUNC    GLOBAL DEFAULT     4 __memcpy
    17: 0000000000000000    26 FUNC    WEAK   DEFAULT     4 memcpy
$ llvm-readelf -s arch/x86/lib/memmove_64.o| grep -e memmove -e __memmove
    13: 0000000000000000   409 FUNC    WEAK   DEFAULT     1 memmove
    14: 0000000000000000   409 FUNC    GLOBAL DEFAULT     1 __memmove
$ llvm-readelf -s arch/x86/lib/memset_64.o| grep -e memset -e __memset
    15: 0000000000000000    47 FUNC    WEAK   DEFAULT     1 memset
    16: 0000000000000000    47 FUNC    GLOBAL DEFAULT     1 __memset

After your patch, with GNU `as` I see:
$ llvm-readelf -s arch/x86/lib/memcpy_64.o | grep -e memcpy -e __memcpy
    16: 0000000000000000    26 FUNC    GLOBAL DEFAULT     4 __memcpy
    17: 0000000000000000    26 FUNC    WEAK   DEFAULT     4 memcpy
$ llvm-readelf -s arch/x86/lib/memmove_64.o| grep -e memmove -e __memmove
    13: 0000000000000000   409 FUNC    WEAK   DEFAULT     1 memmove
    14: 0000000000000000   409 FUNC    GLOBAL DEFAULT     1 __memmove
$ llvm-readelf -s arch/x86/lib/memset_64.o| grep -e memset -e __memset
    15: 0000000000000000    47 FUNC    WEAK   DEFAULT     1 memset
    16: 0000000000000000    47 FUNC    GLOBAL DEFAULT     1 __memset

So in that sense, your patch is no functional change, and simply
resolves ambiguities in repeatedly defining a symbol with different
bindings.  I guess we can save uncovering why memcpy doesn't follow
the same convention for another day.  In that sense:

Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>

Tested-by: Nick Desaulniers <ndesaulniers@google.com>


And thanks for the patch!

>

> diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S

> index 7ff00ea64e4f..41902fe8b859 100644

> --- a/arch/x86/lib/memmove_64.S

> +++ b/arch/x86/lib/memmove_64.S

> @@ -24,9 +24,7 @@

>   * Output:

>   * rax: dest

>   */

> -.weak memmove

> -

> -SYM_FUNC_START_ALIAS(memmove)

> +SYM_FUNC_START_WEAK(memmove)

>  SYM_FUNC_START(__memmove)

>

>         mov %rdi, %rax

> diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S

> index 9ff15ee404a4..0bfd26e4ca9e 100644

> --- a/arch/x86/lib/memset_64.S

> +++ b/arch/x86/lib/memset_64.S

> @@ -6,8 +6,6 @@

>  #include <asm/alternative-asm.h>

>  #include <asm/export.h>

>

> -.weak memset

> -

>  /*

>   * ISO C memset - set a memory block to a byte value. This function uses fast

>   * string to get better performance than the original function. The code is

> @@ -19,7 +17,7 @@

>   *

>   * rax   original destination

>   */

> -SYM_FUNC_START_ALIAS(memset)

> +SYM_FUNC_START_WEAK(memset)

>  SYM_FUNC_START(__memset)

>         /*

>          * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended

> --

> 2.29.1.341.ge80a0c044ae-goog

>

> --

-- 
Thanks,
~Nick Desaulniers
Jiri Slaby Oct. 30, 2020, 9:48 a.m. UTC | #3
On 29. 10. 20, 19:05, Fangrui Song wrote:
> Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for

> memset/memmove/memcpy functions") added .weak directives to

> arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*

> macros.


There were no SYM_FUNC_START_* macros in 2015.

> This can lead to the assembly snippet `.weak memcpy ... .globl

> memcpy`


SYM_FUNC_START_LOCAL(memcpy)
does not add .globl, so I don't understand the rationale.

> which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL

> memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since

> https://reviews.llvm.org/D90108) will error on such an overridden symbol

> binding.

> 

> Use the appropriate SYM_FUNC_START_WEAK instead.

> 

> Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")

> Reported-by: Sami Tolvanen <samitolvanen@google.com>

> Signed-off-by: Fangrui Song <maskray@google.com>

> Cc: <stable@vger.kernel.org>

> ---

>   arch/x86/lib/memcpy_64.S  | 4 +---

>   arch/x86/lib/memmove_64.S | 4 +---

>   arch/x86/lib/memset_64.S  | 4 +---

>   3 files changed, 3 insertions(+), 9 deletions(-)

> 

> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S

> index 037faac46b0c..1e299ac73c86 100644

> --- a/arch/x86/lib/memcpy_64.S

> +++ b/arch/x86/lib/memcpy_64.S

> @@ -16,8 +16,6 @@

>    * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.

>    */

>   

> -.weak memcpy

> -

>   /*

>    * memcpy - Copy a memory block.

>    *

> @@ -30,7 +28,7 @@

>    * rax original destination

>    */

>   SYM_FUNC_START_ALIAS(__memcpy)

> -SYM_FUNC_START_LOCAL(memcpy)

> +SYM_FUNC_START_WEAK(memcpy)

>   	ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \

>   		      "jmp memcpy_erms", X86_FEATURE_ERMS

>   

> diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S

> index 7ff00ea64e4f..41902fe8b859 100644

> --- a/arch/x86/lib/memmove_64.S

> +++ b/arch/x86/lib/memmove_64.S

> @@ -24,9 +24,7 @@

>    * Output:

>    * rax: dest

>    */

> -.weak memmove

> -

> -SYM_FUNC_START_ALIAS(memmove)

> +SYM_FUNC_START_WEAK(memmove)

>   SYM_FUNC_START(__memmove)

>   

>   	mov %rdi, %rax

> diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S

> index 9ff15ee404a4..0bfd26e4ca9e 100644

> --- a/arch/x86/lib/memset_64.S

> +++ b/arch/x86/lib/memset_64.S

> @@ -6,8 +6,6 @@

>   #include <asm/alternative-asm.h>

>   #include <asm/export.h>

>   

> -.weak memset

> -

>   /*

>    * ISO C memset - set a memory block to a byte value. This function uses fast

>    * string to get better performance than the original function. The code is

> @@ -19,7 +17,7 @@

>    *

>    * rax   original destination

>    */

> -SYM_FUNC_START_ALIAS(memset)

> +SYM_FUNC_START_WEAK(memset)

>   SYM_FUNC_START(__memset)

>   	/*

>   	 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended

> 



-- 
js
suse labs
Fangrui Song Oct. 30, 2020, 4:08 p.m. UTC | #4
On Fri, Oct 30, 2020 at 2:48 AM Jiri Slaby <jirislaby@kernel.org> wrote:
>

> On 29. 10. 20, 19:05, Fangrui Song wrote:

> > Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for

> > memset/memmove/memcpy functions") added .weak directives to

> > arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*

> > macros.

>

> There were no SYM_FUNC_START_* macros in 2015.


include/linux/linkage.h had WEAK in 2015 and WEAK should have been
used instead. Do I just need to fix the description?

> > This can lead to the assembly snippet `.weak memcpy ... .globl

> > memcpy`

>

> SYM_FUNC_START_LOCAL(memcpy)

> does not add .globl, so I don't understand the rationale.


There is no problem using
.weak
SYM_FUNC_START_LOCAL
just looks suspicious so I changed it, too.

> > which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL

> > memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since

> > https://reviews.llvm.org/D90108) will error on such an overridden symbol

> > binding.

> >

> > Use the appropriate SYM_FUNC_START_WEAK instead.

> >

> > Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")

> > Reported-by: Sami Tolvanen <samitolvanen@google.com>

> > Signed-off-by: Fangrui Song <maskray@google.com>

> > Cc: <stable@vger.kernel.org>

> > ---

> >   arch/x86/lib/memcpy_64.S  | 4 +---

> >   arch/x86/lib/memmove_64.S | 4 +---

> >   arch/x86/lib/memset_64.S  | 4 +---

> >   3 files changed, 3 insertions(+), 9 deletions(-)

> >

> > diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S

> > index 037faac46b0c..1e299ac73c86 100644

> > --- a/arch/x86/lib/memcpy_64.S

> > +++ b/arch/x86/lib/memcpy_64.S

> > @@ -16,8 +16,6 @@

> >    * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.

> >    */

> >

> > -.weak memcpy

> > -

> >   /*

> >    * memcpy - Copy a memory block.

> >    *

> > @@ -30,7 +28,7 @@

> >    * rax original destination

> >    */

> >   SYM_FUNC_START_ALIAS(__memcpy)

> > -SYM_FUNC_START_LOCAL(memcpy)

> > +SYM_FUNC_START_WEAK(memcpy)

> >       ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \

> >                     "jmp memcpy_erms", X86_FEATURE_ERMS

> >

> > diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S

> > index 7ff00ea64e4f..41902fe8b859 100644

> > --- a/arch/x86/lib/memmove_64.S

> > +++ b/arch/x86/lib/memmove_64.S

> > @@ -24,9 +24,7 @@

> >    * Output:

> >    * rax: dest

> >    */

> > -.weak memmove

> > -

> > -SYM_FUNC_START_ALIAS(memmove)

> > +SYM_FUNC_START_WEAK(memmove)

> >   SYM_FUNC_START(__memmove)

> >

> >       mov %rdi, %rax

> > diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S

> > index 9ff15ee404a4..0bfd26e4ca9e 100644

> > --- a/arch/x86/lib/memset_64.S

> > +++ b/arch/x86/lib/memset_64.S

> > @@ -6,8 +6,6 @@

> >   #include <asm/alternative-asm.h>

> >   #include <asm/export.h>

> >

> > -.weak memset

> > -

> >   /*

> >    * ISO C memset - set a memory block to a byte value. This function uses fast

> >    * string to get better performance than the original function. The code is

> > @@ -19,7 +17,7 @@

> >    *

> >    * rax   original destination

> >    */

> > -SYM_FUNC_START_ALIAS(memset)

> > +SYM_FUNC_START_WEAK(memset)

> >   SYM_FUNC_START(__memset)

> >       /*

> >        * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended

> >

>

>

> --

> js

> suse labs




-- 
宋方睿
Jiri Slaby Nov. 2, 2020, 8:23 a.m. UTC | #5
On 30. 10. 20, 17:08, Fāng-ruì Sòng wrote:
> On Fri, Oct 30, 2020 at 2:48 AM Jiri Slaby <jirislaby@kernel.org> wrote:

>>

>> On 29. 10. 20, 19:05, Fangrui Song wrote:

>>> Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for

>>> memset/memmove/memcpy functions") added .weak directives to

>>> arch/x86/lib/mem*_64.S instead of changing the existing SYM_FUNC_START_*

>>> macros.

>>

>> There were no SYM_FUNC_START_* macros in 2015.

> 

> include/linux/linkage.h had WEAK in 2015 and WEAK should have been

> used instead. Do I just need to fix the description?

> 

>>> This can lead to the assembly snippet `.weak memcpy ... .globl

>>> memcpy`

>>

>> SYM_FUNC_START_LOCAL(memcpy)

>> does not add .globl, so I don't understand the rationale.

> 

> There is no problem using

> .weak

> SYM_FUNC_START_LOCAL

> just looks suspicious so I changed it, too.


So the commit log doesn't correspond to your changes ;). You need to 
update it and describe the difference from memmove/memset properly.

>>> which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL

>>> memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since

>>> https://reviews.llvm.org/D90108) will error on such an overridden symbol

>>> binding.

>>>

>>> Use the appropriate SYM_FUNC_START_WEAK instead.

>>>

>>> Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")

>>> Reported-by: Sami Tolvanen <samitolvanen@google.com>

>>> Signed-off-by: Fangrui Song <maskray@google.com>

>>> Cc: <stable@vger.kernel.org>

>>> ---

>>>    arch/x86/lib/memcpy_64.S  | 4 +---

>>>    arch/x86/lib/memmove_64.S | 4 +---

>>>    arch/x86/lib/memset_64.S  | 4 +---

>>>    3 files changed, 3 insertions(+), 9 deletions(-)

>>>

>>> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S

>>> index 037faac46b0c..1e299ac73c86 100644

>>> --- a/arch/x86/lib/memcpy_64.S

>>> +++ b/arch/x86/lib/memcpy_64.S

>>> @@ -16,8 +16,6 @@

>>>     * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.

>>>     */

>>>

>>> -.weak memcpy

>>> -

>>>    /*

>>>     * memcpy - Copy a memory block.

>>>     *

>>> @@ -30,7 +28,7 @@

>>>     * rax original destination

>>>     */

>>>    SYM_FUNC_START_ALIAS(__memcpy)

>>> -SYM_FUNC_START_LOCAL(memcpy)

>>> +SYM_FUNC_START_WEAK(memcpy)

>>>        ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \

>>>                      "jmp memcpy_erms", X86_FEATURE_ERMS

>>>

>>> diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S

>>> index 7ff00ea64e4f..41902fe8b859 100644

>>> --- a/arch/x86/lib/memmove_64.S

>>> +++ b/arch/x86/lib/memmove_64.S

>>> @@ -24,9 +24,7 @@

>>>     * Output:

>>>     * rax: dest

>>>     */

>>> -.weak memmove

>>> -

>>> -SYM_FUNC_START_ALIAS(memmove)

>>> +SYM_FUNC_START_WEAK(memmove)

>>>    SYM_FUNC_START(__memmove)

>>>

>>>        mov %rdi, %rax

>>> diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S

>>> index 9ff15ee404a4..0bfd26e4ca9e 100644

>>> --- a/arch/x86/lib/memset_64.S

>>> +++ b/arch/x86/lib/memset_64.S

>>> @@ -6,8 +6,6 @@

>>>    #include <asm/alternative-asm.h>

>>>    #include <asm/export.h>

>>>

>>> -.weak memset

>>> -

>>>    /*

>>>     * ISO C memset - set a memory block to a byte value. This function uses fast

>>>     * string to get better performance than the original function. The code is

>>> @@ -19,7 +17,7 @@

>>>     *

>>>     * rax   original destination

>>>     */

>>> -SYM_FUNC_START_ALIAS(memset)

>>> +SYM_FUNC_START_WEAK(memset)

>>>    SYM_FUNC_START(__memset)

>>>        /*

>>>         * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended

>>>

>>

>>

>> --

>> js

>> suse labs

> 

> 

> 



-- 
js
diff mbox series

Patch

diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 037faac46b0c..1e299ac73c86 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -16,8 +16,6 @@ 
  * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
  */
 
-.weak memcpy
-
 /*
  * memcpy - Copy a memory block.
  *
@@ -30,7 +28,7 @@ 
  * rax original destination
  */
 SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_LOCAL(memcpy)
+SYM_FUNC_START_WEAK(memcpy)
 	ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
 		      "jmp memcpy_erms", X86_FEATURE_ERMS
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 7ff00ea64e4f..41902fe8b859 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -24,9 +24,7 @@ 
  * Output:
  * rax: dest
  */
-.weak memmove
-
-SYM_FUNC_START_ALIAS(memmove)
+SYM_FUNC_START_WEAK(memmove)
 SYM_FUNC_START(__memmove)
 
 	mov %rdi, %rax
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9ff15ee404a4..0bfd26e4ca9e 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -6,8 +6,6 @@ 
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
-.weak memset
-
 /*
  * ISO C memset - set a memory block to a byte value. This function uses fast
  * string to get better performance than the original function. The code is
@@ -19,7 +17,7 @@ 
  *
  * rax   original destination
  */
-SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START_WEAK(memset)
 SYM_FUNC_START(__memset)
 	/*
 	 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended