1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
4 #include <asm/alternative.h>
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
27 #ifdef CONFIG_X86_PPRO_FENCE
28 #define dma_rmb() rmb()
30 #define dma_rmb() barrier()
32 #define dma_wmb() barrier()
34 #define __smp_mb() mb()
35 #define __smp_rmb() dma_rmb()
36 #define __smp_wmb() barrier()
37 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39 #if defined(CONFIG_X86_PPRO_FENCE)
42 * For this option x86 doesn't have a strong TSO memory
43 * model and we should fall back to full barriers.
46 #define __smp_store_release(p, v) \
48 compiletime_assert_atomic_type(*p); \
53 #define __smp_load_acquire(p) \
55 typeof(*p) ___p1 = READ_ONCE(*p); \
56 compiletime_assert_atomic_type(*p); \
61 #else /* regular x86 TSO memory ordering */
63 #define __smp_store_release(p, v) \
65 compiletime_assert_atomic_type(*p); \
70 #define __smp_load_acquire(p) \
72 typeof(*p) ___p1 = READ_ONCE(*p); \
73 compiletime_assert_atomic_type(*p); \
80 /* Atomic operations are already serializing on x86 */
81 #define __smp_mb__before_atomic() barrier()
82 #define __smp_mb__after_atomic() barrier()
84 #include <asm-generic/barrier.h>
86 #endif /* _ASM_X86_BARRIER_H */
This page took 0.050481 seconds and 6 git commands to generate.