Merge tag 'drm-intel-fixes-2016-05-02' of git://anongit.freedesktop.org/drm-intel...
[deliverable/linux.git] / include / linux / compiler.h
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 # define __pmem __attribute__((noderef, address_space(5)))
21 #ifdef CONFIG_SPARSE_RCU_POINTER
22 # define __rcu __attribute__((noderef, address_space(4)))
23 #else /* CONFIG_SPARSE_RCU_POINTER */
24 # define __rcu
25 #endif /* CONFIG_SPARSE_RCU_POINTER */
26 # define __private __attribute__((noderef))
27 extern void __chk_user_ptr(const volatile void __user *);
28 extern void __chk_io_ptr(const volatile void __iomem *);
29 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
30 #else /* __CHECKER__ */
31 # define __user
32 # define __kernel
33 # define __safe
34 # define __force
35 # define __nocast
36 # define __iomem
37 # define __chk_user_ptr(x) (void)0
38 # define __chk_io_ptr(x) (void)0
39 # define __builtin_warning(x, y...) (1)
40 # define __must_hold(x)
41 # define __acquires(x)
42 # define __releases(x)
43 # define __acquire(x) (void)0
44 # define __release(x) (void)0
45 # define __cond_lock(x,c) (c)
46 # define __percpu
47 # define __rcu
48 # define __pmem
49 # define __private
50 # define ACCESS_PRIVATE(p, member) ((p)->member)
51 #endif /* __CHECKER__ */
52
53 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
54 #define ___PASTE(a,b) a##b
55 #define __PASTE(a,b) ___PASTE(a,b)
56
57 #ifdef __KERNEL__
58
59 #ifdef __GNUC__
60 #include <linux/compiler-gcc.h>
61 #endif
62
63 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
64 #define notrace __attribute__((hotpatch(0,0)))
65 #else
66 #define notrace __attribute__((no_instrument_function))
67 #endif
68
69 /* Intel compiler defines __GNUC__. So we will overwrite implementations
70 * coming from above header files here
71 */
72 #ifdef __INTEL_COMPILER
73 # include <linux/compiler-intel.h>
74 #endif
75
76 /* Clang compiler defines __GNUC__. So we will overwrite implementations
77 * coming from above header files here
78 */
79 #ifdef __clang__
80 #include <linux/compiler-clang.h>
81 #endif
82
83 /*
84 * Generic compiler-dependent macros required for kernel
85 * build go below this comment. Actual compiler/compiler version
86 * specific implementations come from the above header files
87 */
88
89 struct ftrace_branch_data {
90 const char *func;
91 const char *file;
92 unsigned line;
93 union {
94 struct {
95 unsigned long correct;
96 unsigned long incorrect;
97 };
98 struct {
99 unsigned long miss;
100 unsigned long hit;
101 };
102 unsigned long miss_hit[2];
103 };
104 };
105
106 /*
107 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
108 * to disable branch tracing on a per file basis.
109 */
110 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
111 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
112 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
113
114 #define likely_notrace(x) __builtin_expect(!!(x), 1)
115 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
116
117 #define __branch_check__(x, expect) ({ \
118 int ______r; \
119 static struct ftrace_branch_data \
120 __attribute__((__aligned__(4))) \
121 __attribute__((section("_ftrace_annotated_branch"))) \
122 ______f = { \
123 .func = __func__, \
124 .file = __FILE__, \
125 .line = __LINE__, \
126 }; \
127 ______r = likely_notrace(x); \
128 ftrace_likely_update(&______f, ______r, expect); \
129 ______r; \
130 })
131
132 /*
133 * Using __builtin_constant_p(x) to ignore cases where the return
134 * value is always the same. This idea is taken from a similar patch
135 * written by Daniel Walker.
136 */
137 # ifndef likely
138 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
139 # endif
140 # ifndef unlikely
141 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
142 # endif
143
144 #ifdef CONFIG_PROFILE_ALL_BRANCHES
145 /*
146 * "Define 'is'", Bill Clinton
147 * "Define 'if'", Steven Rostedt
148 */
149 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
150 #define __trace_if(cond) \
151 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
152 ({ \
153 int ______r; \
154 static struct ftrace_branch_data \
155 __attribute__((__aligned__(4))) \
156 __attribute__((section("_ftrace_branch"))) \
157 ______f = { \
158 .func = __func__, \
159 .file = __FILE__, \
160 .line = __LINE__, \
161 }; \
162 ______r = !!(cond); \
163 ______f.miss_hit[______r]++; \
164 ______r; \
165 }))
166 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
167
168 #else
169 # define likely(x) __builtin_expect(!!(x), 1)
170 # define unlikely(x) __builtin_expect(!!(x), 0)
171 #endif
172
173 /* Optimization barrier */
174 #ifndef barrier
175 # define barrier() __memory_barrier()
176 #endif
177
178 #ifndef barrier_data
179 # define barrier_data(ptr) barrier()
180 #endif
181
182 /* Unreachable code */
183 #ifndef unreachable
184 # define unreachable() do { } while (1)
185 #endif
186
187 #ifndef RELOC_HIDE
188 # define RELOC_HIDE(ptr, off) \
189 ({ unsigned long __ptr; \
190 __ptr = (unsigned long) (ptr); \
191 (typeof(ptr)) (__ptr + (off)); })
192 #endif
193
194 #ifndef OPTIMIZER_HIDE_VAR
195 #define OPTIMIZER_HIDE_VAR(var) barrier()
196 #endif
197
198 /* Not-quite-unique ID. */
199 #ifndef __UNIQUE_ID
200 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
201 #endif
202
203 #include <uapi/linux/types.h>
204
205 #define __READ_ONCE_SIZE \
206 ({ \
207 switch (size) { \
208 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
209 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
210 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
211 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
212 default: \
213 barrier(); \
214 __builtin_memcpy((void *)res, (const void *)p, size); \
215 barrier(); \
216 } \
217 })
218
219 static __always_inline
220 void __read_once_size(const volatile void *p, void *res, int size)
221 {
222 __READ_ONCE_SIZE;
223 }
224
225 #ifdef CONFIG_KASAN
226 /*
227 * This function is not 'inline' because __no_sanitize_address confilcts
228 * with inlining. Attempt to inline it may cause a build failure.
229 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
230 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
231 */
232 static __no_sanitize_address __maybe_unused
233 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
234 {
235 __READ_ONCE_SIZE;
236 }
237 #else
238 static __always_inline
239 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
240 {
241 __READ_ONCE_SIZE;
242 }
243 #endif
244
245 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
246 {
247 switch (size) {
248 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
249 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
250 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
251 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
252 default:
253 barrier();
254 __builtin_memcpy((void *)p, (const void *)res, size);
255 barrier();
256 }
257 }
258
259 /*
260 * Prevent the compiler from merging or refetching reads or writes. The
261 * compiler is also forbidden from reordering successive instances of
262 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
263 * compiler is aware of some particular ordering. One way to make the
264 * compiler aware of ordering is to put the two invocations of READ_ONCE,
265 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
266 *
267 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
268 * data types like structs or unions. If the size of the accessed data
269 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
270 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
271 * least two memcpy()s: one for the __builtin_memcpy() and then one for
272 * the macro doing the copy of variable - '__u' allocated on the stack.
273 *
274 * Their two major use cases are: (1) Mediating communication between
275 * process-level code and irq/NMI handlers, all running on the same CPU,
276 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
277 * mutilate accesses that either do not require ordering or that interact
278 * with an explicit memory barrier or atomic instruction that provides the
279 * required ordering.
280 */
281
282 #define __READ_ONCE(x, check) \
283 ({ \
284 union { typeof(x) __val; char __c[1]; } __u; \
285 if (check) \
286 __read_once_size(&(x), __u.__c, sizeof(x)); \
287 else \
288 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
289 __u.__val; \
290 })
291 #define READ_ONCE(x) __READ_ONCE(x, 1)
292
293 /*
294 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
295 * to hide memory access from KASAN.
296 */
297 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
298
299 #define WRITE_ONCE(x, val) \
300 ({ \
301 union { typeof(x) __val; char __c[1]; } __u = \
302 { .__val = (__force typeof(x)) (val) }; \
303 __write_once_size(&(x), __u.__c, sizeof(x)); \
304 __u.__val; \
305 })
306
307 /**
308 * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
309 * @cond: boolean expression to wait for
310 *
311 * Equivalent to using smp_load_acquire() on the condition variable but employs
312 * the control dependency of the wait to reduce the barrier on many platforms.
313 *
314 * The control dependency provides a LOAD->STORE order, the additional RMB
315 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
316 * aka. ACQUIRE.
317 */
318 #define smp_cond_acquire(cond) do { \
319 while (!(cond)) \
320 cpu_relax(); \
321 smp_rmb(); /* ctrl + rmb := acquire */ \
322 } while (0)
323
324 #endif /* __KERNEL__ */
325
326 #endif /* __ASSEMBLY__ */
327
328 #ifdef __KERNEL__
329 /*
330 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
331 * warning for each use, in hopes of speeding the functions removal.
332 * Usage is:
333 * int __deprecated foo(void)
334 */
335 #ifndef __deprecated
336 # define __deprecated /* unimplemented */
337 #endif
338
339 #ifdef MODULE
340 #define __deprecated_for_modules __deprecated
341 #else
342 #define __deprecated_for_modules
343 #endif
344
345 #ifndef __must_check
346 #define __must_check
347 #endif
348
349 #ifndef CONFIG_ENABLE_MUST_CHECK
350 #undef __must_check
351 #define __must_check
352 #endif
353 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
354 #undef __deprecated
355 #undef __deprecated_for_modules
356 #define __deprecated
357 #define __deprecated_for_modules
358 #endif
359
360 /*
361 * Allow us to avoid 'defined but not used' warnings on functions and data,
362 * as well as force them to be emitted to the assembly file.
363 *
364 * As of gcc 3.4, static functions that are not marked with attribute((used))
365 * may be elided from the assembly file. As of gcc 3.4, static data not so
366 * marked will not be elided, but this may change in a future gcc version.
367 *
368 * NOTE: Because distributions shipped with a backported unit-at-a-time
369 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
370 * for gcc >=3.3 instead of 3.4.
371 *
372 * In prior versions of gcc, such functions and data would be emitted, but
373 * would be warned about except with attribute((unused)).
374 *
375 * Mark functions that are referenced only in inline assembly as __used so
376 * the code is emitted even though it appears to be unreferenced.
377 */
378 #ifndef __used
379 # define __used /* unimplemented */
380 #endif
381
382 #ifndef __maybe_unused
383 # define __maybe_unused /* unimplemented */
384 #endif
385
386 #ifndef __always_unused
387 # define __always_unused /* unimplemented */
388 #endif
389
390 #ifndef noinline
391 #define noinline
392 #endif
393
394 /*
395 * Rather then using noinline to prevent stack consumption, use
396 * noinline_for_stack instead. For documentation reasons.
397 */
398 #define noinline_for_stack noinline
399
400 #ifndef __always_inline
401 #define __always_inline inline
402 #endif
403
404 #endif /* __KERNEL__ */
405
406 /*
407 * From the GCC manual:
408 *
409 * Many functions do not examine any values except their arguments,
410 * and have no effects except the return value. Basically this is
411 * just slightly more strict class than the `pure' attribute above,
412 * since function is not allowed to read global memory.
413 *
414 * Note that a function that has pointer arguments and examines the
415 * data pointed to must _not_ be declared `const'. Likewise, a
416 * function that calls a non-`const' function usually must not be
417 * `const'. It does not make sense for a `const' function to return
418 * `void'.
419 */
420 #ifndef __attribute_const__
421 # define __attribute_const__ /* unimplemented */
422 #endif
423
424 /*
425 * Tell gcc if a function is cold. The compiler will assume any path
426 * directly leading to the call is unlikely.
427 */
428
429 #ifndef __cold
430 #define __cold
431 #endif
432
433 /* Simple shorthand for a section definition */
434 #ifndef __section
435 # define __section(S) __attribute__ ((__section__(#S)))
436 #endif
437
438 #ifndef __visible
439 #define __visible
440 #endif
441
442 /*
443 * Assume alignment of return value.
444 */
445 #ifndef __assume_aligned
446 #define __assume_aligned(a, ...)
447 #endif
448
449
450 /* Are two types/vars the same type (ignoring qualifiers)? */
451 #ifndef __same_type
452 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
453 #endif
454
455 /* Is this type a native word size -- useful for atomic operations */
456 #ifndef __native_word
457 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
458 #endif
459
460 /* Compile time object size, -1 for unknown */
461 #ifndef __compiletime_object_size
462 # define __compiletime_object_size(obj) -1
463 #endif
464 #ifndef __compiletime_warning
465 # define __compiletime_warning(message)
466 #endif
467 #ifndef __compiletime_error
468 # define __compiletime_error(message)
469 /*
470 * Sparse complains of variable sized arrays due to the temporary variable in
471 * __compiletime_assert. Unfortunately we can't just expand it out to make
472 * sparse see a constant array size without breaking compiletime_assert on old
473 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
474 */
475 # ifndef __CHECKER__
476 # define __compiletime_error_fallback(condition) \
477 do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
478 # endif
479 #endif
480 #ifndef __compiletime_error_fallback
481 # define __compiletime_error_fallback(condition) do { } while (0)
482 #endif
483
484 #define __compiletime_assert(condition, msg, prefix, suffix) \
485 do { \
486 bool __cond = !(condition); \
487 extern void prefix ## suffix(void) __compiletime_error(msg); \
488 if (__cond) \
489 prefix ## suffix(); \
490 __compiletime_error_fallback(__cond); \
491 } while (0)
492
493 #define _compiletime_assert(condition, msg, prefix, suffix) \
494 __compiletime_assert(condition, msg, prefix, suffix)
495
496 /**
497 * compiletime_assert - break build and emit msg if condition is false
498 * @condition: a compile-time constant condition to check
499 * @msg: a message to emit if condition is false
500 *
501 * In tradition of POSIX assert, this macro will break the build if the
502 * supplied condition is *false*, emitting the supplied error message if the
503 * compiler has support to do so.
504 */
505 #define compiletime_assert(condition, msg) \
506 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
507
508 #define compiletime_assert_atomic_type(t) \
509 compiletime_assert(__native_word(t), \
510 "Need native word sized stores/loads for atomicity.")
511
512 /*
513 * Prevent the compiler from merging or refetching accesses. The compiler
514 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
515 * but only when the compiler is aware of some particular ordering. One way
516 * to make the compiler aware of ordering is to put the two invocations of
517 * ACCESS_ONCE() in different C statements.
518 *
519 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
520 * on a union member will work as long as the size of the member matches the
521 * size of the union and the size is smaller than word size.
522 *
523 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
524 * between process-level code and irq/NMI handlers, all running on the same CPU,
525 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
526 * mutilate accesses that either do not require ordering or that interact
527 * with an explicit memory barrier or atomic instruction that provides the
528 * required ordering.
529 *
530 * If possible use READ_ONCE()/WRITE_ONCE() instead.
531 */
532 #define __ACCESS_ONCE(x) ({ \
533 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
534 (volatile typeof(x) *)&(x); })
535 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
536
537 /**
538 * lockless_dereference() - safely load a pointer for later dereference
539 * @p: The pointer to load
540 *
541 * Similar to rcu_dereference(), but for situations where the pointed-to
542 * object's lifetime is managed by something other than RCU. That
543 * "something other" might be reference counting or simple immortality.
544 */
545 #define lockless_dereference(p) \
546 ({ \
547 typeof(p) _________p1 = READ_ONCE(p); \
548 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
549 (_________p1); \
550 })
551
552 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
553 #ifdef CONFIG_KPROBES
554 # define __kprobes __attribute__((__section__(".kprobes.text")))
555 # define nokprobe_inline __always_inline
556 #else
557 # define __kprobes
558 # define nokprobe_inline inline
559 #endif
560 #endif /* __LINUX_COMPILER_H */
This page took 0.053268 seconds and 5 git commands to generate.