Merge branch 'sched/core'
[deliverable/linux.git] / arch / x86 / include / asm / string_64.h
1 #ifndef _ASM_X86_STRING_64_H
2 #define _ASM_X86_STRING_64_H
3
4 #ifdef __KERNEL__
5 #include <linux/jump_label.h>
6
7 /* Written 2002 by Andi Kleen */
8
9 /* Only used for special circumstances. Stolen from i386/string.h */
10 static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
11 {
12 unsigned long d0, d1, d2;
13 asm volatile("rep ; movsl\n\t"
14 "testb $2,%b4\n\t"
15 "je 1f\n\t"
16 "movsw\n"
17 "1:\ttestb $1,%b4\n\t"
18 "je 2f\n\t"
19 "movsb\n"
20 "2:"
21 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
23 : "memory");
24 return to;
25 }
26
27 /* Even with __builtin_ the compiler may decide to use the out of line
28 function. */
29
30 #define __HAVE_ARCH_MEMCPY 1
31 extern void *memcpy(void *to, const void *from, size_t len);
32 extern void *__memcpy(void *to, const void *from, size_t len);
33
34 #ifndef CONFIG_KMEMCHECK
35 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
36 #define memcpy(dst, src, len) \
37 ({ \
38 size_t __len = (len); \
39 void *__ret; \
40 if (__builtin_constant_p(len) && __len >= 64) \
41 __ret = __memcpy((dst), (src), __len); \
42 else \
43 __ret = __builtin_memcpy((dst), (src), __len); \
44 __ret; \
45 })
46 #endif
47 #else
48 /*
49 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
50 * because it means that we know both memory operands in advance.
51 */
52 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
53 #endif
54
55 #define __HAVE_ARCH_MEMSET
56 void *memset(void *s, int c, size_t n);
57 void *__memset(void *s, int c, size_t n);
58
59 #define __HAVE_ARCH_MEMMOVE
60 void *memmove(void *dest, const void *src, size_t count);
61 void *__memmove(void *dest, const void *src, size_t count);
62
63 int memcmp(const void *cs, const void *ct, size_t count);
64 size_t strlen(const char *s);
65 char *strcpy(char *dest, const char *src);
66 char *strcat(char *dest, const char *src);
67 int strcmp(const char *cs, const char *ct);
68
69 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
70
71 /*
72 * For files that not instrumented (e.g. mm/slub.c) we
73 * should use not instrumented version of mem* functions.
74 */
75
76 #undef memcpy
77 #define memcpy(dst, src, len) __memcpy(dst, src, len)
78 #define memmove(dst, src, len) __memmove(dst, src, len)
79 #define memset(s, c, n) __memset(s, c, n)
80 #endif
81
82 __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
83 DECLARE_STATIC_KEY_FALSE(mcsafe_key);
84
85 /**
86 * memcpy_mcsafe - copy memory with indication if a machine check happened
87 *
88 * @dst: destination address
89 * @src: source address
90 * @cnt: number of bytes to copy
91 *
92 * Low level memory copy function that catches machine checks
93 * We only call into the "safe" function on systems that can
94 * actually do machine check recovery. Everyone else can just
95 * use memcpy().
96 *
97 * Return 0 for success, -EFAULT for fail
98 */
99 static __always_inline __must_check int
100 memcpy_mcsafe(void *dst, const void *src, size_t cnt)
101 {
102 #ifdef CONFIG_X86_MCE
103 if (static_branch_unlikely(&mcsafe_key))
104 return memcpy_mcsafe_unrolled(dst, src, cnt);
105 else
106 #endif
107 memcpy(dst, src, cnt);
108 return 0;
109 }
110
111 #endif /* __KERNEL__ */
112
113 #endif /* _ASM_X86_STRING_64_H */
This page took 0.041951 seconds and 6 git commands to generate.