Commit | Line | Data |
---|---|---|
0004a9df RB |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) | |
7 | */ | |
8 | #ifndef __ASM_BARRIER_H | |
9 | #define __ASM_BARRIER_H | |
10 | ||
11 | /* | |
12 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
13 | * depend on. | |
14 | * | |
15 | * No data-dependent reads from memory-like regions are ever reordered | |
16 | * over this barrier. All reads preceding this primitive are guaranteed | |
17 | * to access memory (but not necessarily other CPUs' caches) before any | |
18 | * reads following this primitive that depend on the data return by | |
19 | * any of the preceding reads. This primitive is much lighter weight than | |
20 | * rmb() on most CPUs, and is never heavier weight than is | |
21 | * rmb(). | |
22 | * | |
23 | * These ordering constraints are respected by both the local CPU | |
24 | * and the compiler. | |
25 | * | |
26 | * Ordering is not guaranteed by anything other than these primitives, | |
27 | * not even by data dependencies. See the documentation for | |
28 | * memory_barrier() for examples and URLs to more information. | |
29 | * | |
30 | * For example, the following code would force ordering (the initial | |
31 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
32 | * | |
33 | * <programlisting> | |
34 | * CPU 0 CPU 1 | |
35 | * | |
36 | * b = 2; | |
37 | * memory_barrier(); | |
38 | * p = &b; q = p; | |
39 | * read_barrier_depends(); | |
40 | * d = *q; | |
41 | * </programlisting> | |
42 | * | |
43 | * because the read of "*q" depends on the read of "p" and these | |
44 | * two reads are separated by a read_barrier_depends(). However, | |
45 | * the following code, with the same initial values for "a" and "b": | |
46 | * | |
47 | * <programlisting> | |
48 | * CPU 0 CPU 1 | |
49 | * | |
50 | * a = 2; | |
51 | * memory_barrier(); | |
52 | * b = 3; y = b; | |
53 | * read_barrier_depends(); | |
54 | * x = a; | |
55 | * </programlisting> | |
56 | * | |
57 | * does not enforce ordering, since there is no data dependency between | |
58 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
59 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
60 | * in cases like this where there are no data dependencies. | |
61 | */ | |
62 | ||
63 | #define read_barrier_depends() do { } while(0) | |
64 | #define smp_read_barrier_depends() do { } while(0) | |
65 | ||
66 | #ifdef CONFIG_CPU_HAS_SYNC | |
67 | #define __sync() \ | |
68 | __asm__ __volatile__( \ | |
69 | ".set push\n\t" \ | |
70 | ".set noreorder\n\t" \ | |
71 | ".set mips2\n\t" \ | |
72 | "sync\n\t" \ | |
73 | ".set pop" \ | |
74 | : /* no output */ \ | |
75 | : /* no input */ \ | |
76 | : "memory") | |
77 | #else | |
78 | #define __sync() do { } while(0) | |
79 | #endif | |
80 | ||
81 | #define __fast_iob() \ | |
82 | __asm__ __volatile__( \ | |
83 | ".set push\n\t" \ | |
84 | ".set noreorder\n\t" \ | |
85 | "lw $0,%0\n\t" \ | |
86 | "nop\n\t" \ | |
87 | ".set pop" \ | |
88 | : /* no output */ \ | |
89 | : "m" (*(int *)CKSEG1) \ | |
90 | : "memory") | |
91 | ||
92 | #define fast_wmb() __sync() | |
93 | #define fast_rmb() __sync() | |
94 | #define fast_mb() __sync() | |
95 | #define fast_iob() \ | |
96 | do { \ | |
97 | __sync(); \ | |
98 | __fast_iob(); \ | |
99 | } while (0) | |
100 | ||
101 | #ifdef CONFIG_CPU_HAS_WB | |
102 | ||
103 | #include <asm/wbflush.h> | |
104 | ||
105 | #define wmb() fast_wmb() | |
106 | #define rmb() fast_rmb() | |
107 | #define mb() wbflush() | |
108 | #define iob() wbflush() | |
109 | ||
110 | #else /* !CONFIG_CPU_HAS_WB */ | |
111 | ||
112 | #define wmb() fast_wmb() | |
113 | #define rmb() fast_rmb() | |
114 | #define mb() fast_mb() | |
115 | #define iob() fast_iob() | |
116 | ||
117 | #endif /* !CONFIG_CPU_HAS_WB */ | |
118 | ||
119 | #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) | |
120 | #define __WEAK_ORDERING_MB " sync \n" | |
121 | #else | |
122 | #define __WEAK_ORDERING_MB " \n" | |
123 | #endif | |
17099b11 RB |
124 | #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) |
125 | #define __WEAK_LLSC_MB " sync \n" | |
126 | #else | |
127 | #define __WEAK_LLSC_MB " \n" | |
128 | #endif | |
0004a9df RB |
129 | |
130 | #define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | |
131 | #define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | |
132 | #define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | |
133 | ||
134 | #define set_mb(var, value) \ | |
135 | do { var = value; smp_mb(); } while (0) | |
136 | ||
17099b11 RB |
137 | #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") |
138 | #define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | |
139 | #define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | |
140 | ||
0004a9df | 141 | #endif /* __ASM_BARRIER_H */ |