@@ -10,7 +10,7 @@
#include "compiler.h"
#include "crt.h"
-#if !defined(_ABIO32)
+#if !defined(_ABIO32) && !defined(_ABIN32) && !defined(_ABI64)
#error Unsupported MIPS ABI
#endif
@@ -32,11 +32,32 @@
* - the arguments are cast to long and assigned into the target registers
* which are then simply passed as registers to the asm code, so that we
* don't have to experience issues with register constraints.
+ *
+ * Syscalls for MIPS ABI N32, same as ABI O32 with the following differences :
+ * - arguments are in a0, a1, a2, a3, t0, t1, t2, t3.
+ * t0..t3 are also known as a4..a7.
+ * - stack is 16-byte aligned
*/
+#if defined(_ABIO32)
+
#define _NOLIBC_SYSCALL_CLOBBERLIST \
"memory", "cc", "at", "v1", "hi", "lo", \
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9"
+#define _NOLIBC_SYSCALL_STACK_RESERVE "addiu $sp, $sp, -32\n"
+#define _NOLIBC_SYSCALL_STACK_UNRESERVE "addiu $sp, $sp, 32\n"
+
+#else /* _ABIN32 || _ABI64 */
+
+/* binutils, GCC and clang disagree about register aliases, use numbers instead. */
+#define _NOLIBC_SYSCALL_CLOBBERLIST \
+ "memory", "cc", "at", "v1", \
+ "10", "11", "12", "13", "14", "15", "24", "25"
+
+#define _NOLIBC_SYSCALL_STACK_RESERVE
+#define _NOLIBC_SYSCALL_STACK_UNRESERVE
+
+#endif /* _ABIO32 */
#define my_syscall0(num) \
({ \
@@ -44,9 +65,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
: _NOLIBC_SYSCALL_CLOBBERLIST \
@@ -61,9 +82,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1) \
@@ -80,9 +101,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
@@ -100,9 +121,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
@@ -120,9 +141,9 @@
register long _arg4 __asm__ ("a3") = (long)(arg4); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
@@ -131,6 +152,8 @@
_arg4 ? -_num : _num; \
})
+#if defined(_ABIO32)
+
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
register long _num __asm__ ("v0") = (num); \
@@ -141,10 +164,10 @@
register long _arg5 = (long)(arg5); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"sw %7, 16($sp)\n" \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
@@ -164,11 +187,53 @@
register long _arg6 = (long)(arg6); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"sw %7, 16($sp)\n" \
"sw %8, 20($sp)\n" \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#else /* _ABIN32 || _ABI64 */
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("$4") = (long)(arg1); \
+ register long _arg2 __asm__ ("$5") = (long)(arg2); \
+ register long _arg3 __asm__ ("$6") = (long)(arg3); \
+ register long _arg4 __asm__ ("$7") = (long)(arg4); \
+ register long _arg5 __asm__ ("$8") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("$4") = (long)(arg1); \
+ register long _arg2 __asm__ ("$5") = (long)(arg2); \
+ register long _arg3 __asm__ ("$6") = (long)(arg3); \
+ register long _arg4 __asm__ ("$7") = (long)(arg4); \
+ register long _arg5 __asm__ ("$8") = (long)(arg5); \
+ register long _arg6 __asm__ ("$9") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -178,15 +243,25 @@
_arg4 ? -_num : _num; \
})
+#endif /* _ABIO32 */
+
/* startup code, note that it's called __start on MIPS */
void __start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
{
__asm__ volatile (
"move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
+#if defined(_ABIO32)
"addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
+#endif /* _ABIO32 */
"lui $t9, %hi(_start_c)\n" /* ABI requires current function address in $t9 */
"ori $t9, %lo(_start_c)\n"
+#if defined(_ABI64)
+ "lui $t0, %highest(_start_c)\n"
+ "ori $t0, %higher(_start_c)\n"
+ "dsll $t0, 0x20\n"
+ "or $t9, $t0\n"
+#endif /* _ABI64 */
"jalr $t9\n" /* transfer to c runtime */
);
__nolibc_entrypoint_epilogue();
@@ -53,6 +53,10 @@ ARCH_ppc64 = powerpc
ARCH_ppc64le = powerpc
ARCH_mips32le = mips
ARCH_mips32be = mips
+ARCH_mipsn32le = mips
+ARCH_mipsn32be = mips
+ARCH_mips64le = mips
+ARCH_mips64be = mips
ARCH_riscv32 = riscv
ARCH_riscv64 = riscv
ARCH_s390x = s390
@@ -69,6 +73,10 @@ IMAGE_arm = arch/arm/boot/zImage
IMAGE_armthumb = arch/arm/boot/zImage
IMAGE_mips32le = vmlinuz
IMAGE_mips32be = vmlinuz
+IMAGE_mipsn32le = vmlinuz
+IMAGE_mipsn32be = vmlinuz
+IMAGE_mips64le = vmlinuz
+IMAGE_mips64be = vmlinuz
IMAGE_ppc = vmlinux
IMAGE_ppc64 = vmlinux
IMAGE_ppc64le = arch/powerpc/boot/zImage
@@ -93,6 +101,10 @@ DEFCONFIG_arm = multi_v7_defconfig
DEFCONFIG_armthumb = multi_v7_defconfig
DEFCONFIG_mips32le = malta_defconfig
DEFCONFIG_mips32be = malta_defconfig generic/eb.config
+DEFCONFIG_mipsn32le = malta_defconfig generic/64r2.config
+DEFCONFIG_mipsn32be = malta_defconfig generic/64r6.config generic/eb.config
+DEFCONFIG_mips64le = malta_defconfig generic/64r6.config
+DEFCONFIG_mips64be = malta_defconfig generic/64r2.config generic/eb.config
DEFCONFIG_ppc = pmac32_defconfig
DEFCONFIG_ppc64 = powernv_be_defconfig
DEFCONFIG_ppc64le = powernv_defconfig
@@ -124,6 +136,10 @@ QEMU_ARCH_arm = arm
QEMU_ARCH_armthumb = arm
QEMU_ARCH_mips32le = mipsel # works with malta_defconfig
QEMU_ARCH_mips32be = mips
+QEMU_ARCH_mipsn32le = mips64el
+QEMU_ARCH_mipsn32be = mips64
+QEMU_ARCH_mips64le = mips64el
+QEMU_ARCH_mips64be = mips64
QEMU_ARCH_ppc = ppc
QEMU_ARCH_ppc64 = ppc64
QEMU_ARCH_ppc64le = ppc64
@@ -139,6 +155,8 @@ QEMU_ARCH_m68k = m68k
QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
QEMU_ARCH_USER_ppc64le = ppc64le
+QEMU_ARCH_USER_mipsn32le = mipsn32el
+QEMU_ARCH_USER_mipsn32be = mipsn32
QEMU_ARCH_USER = $(or $(QEMU_ARCH_USER_$(XARCH)),$(QEMU_ARCH_$(XARCH)))
QEMU_BIOS_DIR = /usr/share/edk2/
@@ -157,6 +175,10 @@ QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_armthumb = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32le = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32be = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mipsn32le = -M malta -cpu 5KEc -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mipsn32be = -M malta -cpu I6400 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips64le = -M malta -cpu I6400 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips64be = -M malta -cpu 5KEc -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_ppc64 = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
@@ -191,6 +213,10 @@ CFLAGS_s390x = -m64
CFLAGS_s390 = -m31
CFLAGS_mips32le = -EL -mabi=32 -fPIC
CFLAGS_mips32be = -EB -mabi=32
+CFLAGS_mipsn32le = -EL -mabi=n32 -fPIC -march=mips64r2
+CFLAGS_mipsn32be = -EB -mabi=n32 -march=mips64r6
+CFLAGS_mips64le = -EL -mabi=64 -march=mips64r6
+CFLAGS_mips64be = -EB -mabi=64 -march=mips64r2
CFLAGS_sparc32 = $(call cc-option,-m32)
ifeq ($(origin XARCH),command line)
CFLAGS_XARCH = $(CFLAGS_$(XARCH))
@@ -20,7 +20,7 @@ llvm=
all_archs=(
i386 x86_64
arm64 arm armthumb
- mips32le mips32be
+ mips32le mips32be mipsn32le mipsn32be mips64le mips64be
ppc ppc64 ppc64le
riscv32 riscv64
s390x s390