@@ -12,5 +12,7 @@ static inline void enable_copy_safe_slow(void)
__must_check unsigned long
copy_safe_slow(void *dst, const void *src, size_t cnt);
+__must_check unsigned long
+copy_safe_fast(void *dst, const void *src, size_t cnt);
#endif /* _COPY_SAFE_H_ */
@@ -25,7 +25,7 @@ void enable_copy_safe_slow(void)
*
* We only call into the slow version on systems that have trouble
* actually do machine check recovery. Everyone else can just
- * use memcpy().
+ * use copy_safe_fast().
*
* Return 0 for success, or number of bytes not copied if there was an
* exception.
@@ -34,8 +34,7 @@ __must_check unsigned long copy_safe(void *dst, const void *src, size_t cnt)
{
if (static_branch_unlikely(©_safe_slow_key))
return copy_safe_slow(dst, src, cnt);
- memcpy(dst, src, cnt);
- return 0;
+ return copy_safe_fast(dst, src, cnt);
}
EXPORT_SYMBOL_GPL(copy_safe);
@@ -2,7 +2,9 @@
/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
#include <asm/copy_safe_test.h>
+#include <asm/cpufeatures.h>
#include <asm/export.h>
#include <asm/asm.h>
@@ -120,4 +122,42 @@ EXPORT_SYMBOL_GPL(copy_safe_slow)
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+
+/*
+ * copy_safe_fast - memory copy with exception handling
+ *
+ * Fast string copy + fault / exception handling. If the CPU does
+ * support machine check exception recovery, but does not support
+ * recovering from fast-string exceptions then this CPU needs to be
+ * added to the copy_safe_slow_key set of quirks. Otherwise, absent any
+ * machine check recovery support this version should be no slower than
+ * standard memcpy.
+ */
+SYM_FUNC_START(copy_safe_fast)
+ ALTERNATIVE "jmp copy_safe_slow", "", X86_FEATURE_ERMS
+ movq %rdi, %rax
+ movq %rdx, %rcx
+.L_copy:
+ rep movsb
+ /* Copy successful. Return zero */
+ xorl %eax, %eax
+ ret
+SYM_FUNC_END(copy_safe_fast)
+EXPORT_SYMBOL_GPL(copy_safe_fast)
+
+ .section .fixup, "ax"
+.E_copy:
+ /*
+ * On fault %rcx is updated such that the copy instruction could
+ * optionally be restarted at the fault position, i.e. it
+ * contains 'bytes remaining'. A non-zero return indicates error
+ * to copy_safe() users, or indicate short transfers to
+ * user-copy routines.
+ */
+ movq %rcx, %rax
+ ret
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
#endif
@@ -533,6 +533,7 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_shift_out_of_bounds",
/* misc */
"csum_partial_copy_generic",
+ "copy_safe_fast",
"copy_safe_slow",
"copy_safe_slow_handle_tail",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */