Merge tag 'efi-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi...
[deliverable/linux.git] / arch / x86 / include / asm / barrier.h
1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6
7 /*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13 #ifdef CONFIG_X86_32
14 /*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21 #else
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
25 #endif
26
27 #ifdef CONFIG_X86_PPRO_FENCE
28 #define dma_rmb() rmb()
29 #else
30 #define dma_rmb() barrier()
31 #endif
32 #define dma_wmb() barrier()
33
34 #ifdef CONFIG_SMP
35 #define smp_mb() mb()
36 #define smp_rmb() dma_rmb()
37 #define smp_wmb() barrier()
38 #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39 #else /* !SMP */
40 #define smp_mb() barrier()
41 #define smp_rmb() barrier()
42 #define smp_wmb() barrier()
43 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
44 #endif /* SMP */
45
46 #define read_barrier_depends() do { } while (0)
47 #define smp_read_barrier_depends() do { } while (0)
48
49 #if defined(CONFIG_X86_PPRO_FENCE)
50
51 /*
52 * For this option x86 doesn't have a strong TSO memory
53 * model and we should fall back to full barriers.
54 */
55
56 #define smp_store_release(p, v) \
57 do { \
58 compiletime_assert_atomic_type(*p); \
59 smp_mb(); \
60 WRITE_ONCE(*p, v); \
61 } while (0)
62
63 #define smp_load_acquire(p) \
64 ({ \
65 typeof(*p) ___p1 = READ_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \
67 smp_mb(); \
68 ___p1; \
69 })
70
71 #else /* regular x86 TSO memory ordering */
72
73 #define smp_store_release(p, v) \
74 do { \
75 compiletime_assert_atomic_type(*p); \
76 barrier(); \
77 WRITE_ONCE(*p, v); \
78 } while (0)
79
80 #define smp_load_acquire(p) \
81 ({ \
82 typeof(*p) ___p1 = READ_ONCE(*p); \
83 compiletime_assert_atomic_type(*p); \
84 barrier(); \
85 ___p1; \
86 })
87
88 #endif
89
90 /* Atomic operations are already serializing on x86 */
91 #define smp_mb__before_atomic() barrier()
92 #define smp_mb__after_atomic() barrier()
93
94 #endif /* _ASM_X86_BARRIER_H */
This page took 0.045936 seconds and 6 git commands to generate.