@@ -23,7 +23,7 @@ long int
__syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
__syscall_arg_t a2, __syscall_arg_t a3,
__syscall_arg_t a4, __syscall_arg_t a5,
- __syscall_arg_t a6)
+ __syscall_arg_t a6 __SYSCALL_CANCEL7_ARG_DEF)
{
pthread_t self = (pthread_t) THREAD_SELF;
struct pthread *pd = (struct pthread *) self;
@@ -33,7 +33,8 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
if (pd->cancelhandling & CANCELSTATE_BITMASK)
{
INTERNAL_SYSCALL_DECL (err);
- result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6);
+ result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6
+ __SYSCALL_CANCEL7_ARG7);
if (INTERNAL_SYSCALL_ERROR_P (result, err))
return -INTERNAL_SYSCALL_ERRNO (result, err);
return result;
@@ -42,7 +43,7 @@ __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
/* Call the arch-specific entry points that contains the globals markers
to be checked by SIGCANCEL handler. */
result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
- a6);
+ a6 __SYSCALL_CANCEL7_ARG7);
if ((result == -EINTR)
&& (pd->cancelhandling & CANCELED_BITMASK)
@@ -331,7 +331,8 @@ __do_cancel (void)
extern long int __syscall_cancel_arch (volatile int *, __syscall_arg_t nr,
__syscall_arg_t arg1, __syscall_arg_t arg2, __syscall_arg_t arg3,
- __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6);
+ __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6
+ __SYSCALL_CANCEL7_ARG_DEF);
libc_hidden_proto (__syscall_cancel_arch);
extern _Noreturn void __syscall_do_cancel (void)
@@ -35,7 +35,7 @@
# define READ_THREAD_POINTER() (__builtin_thread_pointer ())
#else
/* Note: rd must be $v1 to be ABI-conformant. */
-# if __mips_isa_rev >= 2
+# if defined __mips_isa_rev && __mips_isa_rev >= 2
# define READ_THREAD_POINTER() \
({ void *__result; \
asm volatile ("rdhwr\t%0, $29" : "=v" (__result)); \
@@ -121,29 +121,50 @@ typedef long int __syscall_arg_t;
# define __SSC(__x) ((__syscall_arg_t) (__x))
#endif
+/* Adjust both the __syscall_cancel and the SYSCALL_CANCEL macro to support
+ 7 arguments instead of default 6 (curently only mip32). It avoid add
+ the requirement to each architecture to support 7 argument macros
+ {INTERNAL,INLINE}_SYSCALL. */
+#ifdef HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS
+# define __SYSCALL_CANCEL7_ARG_DEF , __syscall_arg_t arg7
+# define __SYSCALL_CANCEL7_ARG , 0
+# define __SYSCALL_CANCEL7_ARG7 , arg7
+#else
+# define __SYSCALL_CANCEL7_ARG_DEF
+# define __SYSCALL_CANCEL7_ARG
+# define __SYSCALL_CANCEL7_ARG7
+#endif
+
long int __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
__syscall_arg_t arg2, __syscall_arg_t arg3,
__syscall_arg_t arg4, __syscall_arg_t arg5,
- __syscall_arg_t arg6);
+ __syscall_arg_t arg6 __SYSCALL_CANCEL7_ARG_DEF);
libc_hidden_proto (__syscall_cancel);
#define __SYSCALL_CANCEL0(name) \
- (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0)
+ (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0 \
+ __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL1(name, a1) \
- (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0)
+ (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0 \
+ __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL2(name, a1, a2) \
- (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0)
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0 \
+ __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL3(name, a1, a2, a3) \
- (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0)
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0 \
+ __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL4(name, a1, a2, a3, a4) \
(__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
- __SSC(a4), 0, 0)
+ __SSC(a4), 0, 0 __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL5(name, a1, a2, a3, a4, a5) \
(__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
- __SSC(a4), __SSC(a5), 0)
+ __SSC(a4), __SSC(a5), 0 __SYSCALL_CANCEL7_ARG)
#define __SYSCALL_CANCEL6(name, a1, a2, a3, a4, a5, a6) \
(__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
- __SSC(a4), __SSC(a5), __SSC(a6))
+ __SSC(a4), __SSC(a5), __SSC(a6) __SYSCALL_CANCEL7_ARG)
+#define __SYSCALL_CANCEL7(name, a1, a2, a3, a4, a5, a6, a7) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+ __SSC(a4), __SSC(a5), __SSC(a6), __SSC(a7))
#define __SYSCALL_CANCEL_NARGS_X(a,b,c,d,e,f,g,h,n,...) n
#define __SYSCALL_CANCEL_NARGS(...) \
new file mode 100644
@@ -0,0 +1,128 @@
+/* Cancellable syscall wrapper. Linux/mips32 version.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/asm.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+ __syscall_arg_t nr,
+ __syscall_arg_t arg1,
+ __syscall_arg_t arg2,
+ __syscall_arg_t arg3,
+ __syscall_arg_t arg4,
+ __syscall_arg_t arg5,
+ __syscall_arg_t arg6,
+ __syscall_arg_t arg7) */
+
+#define FRAME_SIZE 56
+
+NESTED (__syscall_cancel_arch, FRAME_SIZE, fp)
+ .mask 0xc0070000,-SZREG
+ .fmask 0x00000000,0
+
+ PTR_ADDIU sp, -FRAME_SIZE
+ cfi_def_cfa_offset (FRAME_SIZE)
+
+ sw fp,48(sp)
+ sw ra,52(sp)
+ sw s2,44(sp)
+ sw s1,40(sp)
+ sw s0,36(sp)
+#ifdef __PIC__
+ .cprestore 16
+#endif
+ cfi_offset (31, -4)
+ cfi_offset (30, -8)
+ cfi_offset (18, -12)
+ cfi_offset (17, -16)
+ cfi_offset (16, -20)
+ move fp,sp
+ cfi_def_cfa_register (30)
+
+ .globl __syscall_cancel_arch_start
+ .type __syscall_cancel_arch_start, @function
+__syscall_cancel_arch_start:
+
+ lw v0,0(a0)
+ andi v0,v0,0x4
+ bne v0,zero,2f
+
+ addiu sp,sp,-16
+ addiu v0,sp,16
+ sw v0,24(fp)
+
+ move s0,a1
+ move a0,a2
+ move a1,a3
+ lw a2,72(fp)
+ lw a3,76(fp)
+ lw v0,84(fp)
+ lw s1,80(fp)
+ lw s2,88(fp)
+
+ .set noreorder
+ subu sp, 32
+ sw s1, 16(sp)
+ sw v0, 20(sp)
+ sw s2, 24(sp)
+ move v0, $16
+ syscall
+
+ .globl __syscall_cancel_arch_end
+ .type __syscall_cancel_arch_end, @function
+__syscall_cancel_arch_end:
+ addiu sp, 32
+ .set reorder
+
+ beq a3,zero,1f
+ subu v0,zero,v0
+1:
+ move sp,fp
+ cfi_remember_state
+ cfi_def_cfa_register (29)
+ lw ra,52(fp)
+ lw fp,48(sp)
+ lw s2,44(sp)
+ lw s1,40(sp)
+ lw s0,36(sp)
+ .set noreorder
+ .set nomacro
+ jr ra
+ addiu sp,sp,FRAME_SIZE
+
+ .set macro
+ .set reorder
+
+ cfi_def_cfa_offset (0)
+ cfi_restore (16)
+ cfi_restore (17)
+ cfi_restore (18)
+ cfi_restore (30)
+ cfi_restore (31)
+
+2:
+ cfi_restore_state
+#ifdef __PIC__
+ PTR_LA t9, __syscall_do_cancel
+ jalr t9
+#else
+ jal __syscall_do_cancel
+#endif
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
@@ -18,6 +18,10 @@
#ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
#define _LINUX_MIPS_MIPS32_SYSDEP_H 1
+/* mips32 have cancelable syscalls with 7 arguments (currently only
+ sync_file_range). */
+#define HAVE_CANCELABLE_SYSCALL_WITH_7_ARGS 1
+
/* There is some commonality. */
#include <sysdeps/unix/sysv/linux/mips/sysdep.h>
#include <sysdeps/unix/sysv/linux/sysdep.h>
new file mode 100644
@@ -0,0 +1,114 @@
+/* Cancellable syscall wrapper. Linux/mips64 version.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/asm.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+ __syscall_arg_t nr,
+ __syscall_arg_t arg1,
+ __syscall_arg_t arg2,
+ __syscall_arg_t arg3,
+ __syscall_arg_t arg4,
+ __syscall_arg_t arg5,
+ __syscall_arg_t arg6,
+ __syscall_arg_t arg7) */
+
+#define FRAME_SIZE 32
+#define ADDIU LONG_ADDIU
+#define ADDU LONG_ADDU
+#define SUBU LONG_SUBU
+#define LOAD_L LONG_L
+
+NESTED (__syscall_cancel_arch, FRAME_SIZE, fp)
+ .mask 0x90010000, -SZREG
+ .fmask 0x00000000, 0
+ ADDIU sp, sp, -32
+ cfi_def_cfa_offset (32)
+ sd gp, 16(sp)
+ cfi_offset (gp, -16)
+ lui gp, %hi(%neg(%gp_rel(__syscall_cancel_arch)))
+ ADDU gp, gp, t9
+ sd ra, 24(sp)
+ sd s0, 8(sp)
+ cfi_offset (ra, -8)
+ cfi_offset (s0, -24)
+ ADDIU gp, gp, %lo(%neg(%gp_rel(__syscall_cancel_arch)))
+
+ .global __syscall_cancel_arch_start
+ .type __syscall_cancel_arch_start,%function
+__syscall_cancel_arch_start:
+
+ lw v0, 0(a0)
+ andi v0, v0, 0x4
+ .set noreorder
+ .set nomacro
+ bne v0, zero, 2f
+ move s0, a1
+ .set macro
+ .set reorder
+
+ move a0, a2
+ move a1, a3
+ move a2, a4
+ move a3, a5
+ move a4, a6
+ move a5, a7
+
+ .set noreorder
+ move v0, s0
+ syscall
+ .set reorder
+
+ .global __syscall_cancel_arch_end
+ .type __syscall_cancel_arch_end,%function
+__syscall_cancel_arch_end:
+
+ .set noreorder
+ .set nomacro
+ bnel a3, zero, 1f
+ SUBU v0, zero, v0
+ .set macro
+ .set reorder
+
+1:
+ ld ra, 24(sp)
+ ld gp, 16(sp)
+ ld s0, 8(sp)
+
+ .set noreorder
+ .set nomacro
+ jr ra
+ ADDIU sp, sp, 32
+ .set macro
+ .set reorder
+
+ cfi_remember_state
+ cfi_def_cfa_offset (0)
+ cfi_restore (s0)
+ cfi_restore (gp)
+ cfi_restore (ra)
+
+ .align 3
+2:
+ cfi_restore_state
+ LOAD_L t9, %got_disp(__syscall_do_cancel)(gp)
+ .reloc 3f, R_MIPS_JALR, __syscall_do_cancel
+3: jalr t9
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
@@ -40,7 +40,8 @@ long int
__syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
__syscall_arg_t a1, __syscall_arg_t a2,
__syscall_arg_t a3, __syscall_arg_t a4,
- __syscall_arg_t a5, __syscall_arg_t a6)
+ __syscall_arg_t a5, __syscall_arg_t a6
+ __SYSCALL_CANCEL7_ARG_DEF)
{
#define ADD_LABEL(__label) \
asm volatile ( \
@@ -53,7 +54,8 @@ __syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
__syscall_do_cancel();
INTERNAL_SYSCALL_DECL(err);
- long int result = INTERNAL_SYSCALL_NCS (nr, err, 6, a1, a2, a3, a4, a5, a6);
+ long int result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5,
+ a6 __SYSCALL_CANCEL7_ARG7);
ADD_LABEL ("__syscall_cancel_arch_end");
if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
return -INTERNAL_SYSCALL_ERRNO (result, err);