Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/kernel.h> |
5 | #include <asm/segment.h> | |
6 | #include <asm/cpufeature.h> | |
a436ed9c | 7 | #include <asm/cmpxchg.h> |
1da177e4 LT |
8 | |
9 | #ifdef __KERNEL__ | |
10 | ||
11 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | |
12 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | |
13 | ||
47a5c6fa LT |
14 | /* |
15 | * Saving eflags is important. It switches not only IOPL between tasks, | |
16 | * it also protects other tasks from NT leaking through sysenter etc. | |
17 | */ | |
1da177e4 LT |
18 | #define switch_to(prev,next,last) do { \ |
19 | unsigned long esi,edi; \ | |
47a5c6fa LT |
20 | asm volatile("pushfl\n\t" /* Save flags */ \ |
21 | "pushl %%ebp\n\t" \ | |
1da177e4 LT |
22 | "movl %%esp,%0\n\t" /* save ESP */ \ |
23 | "movl %5,%%esp\n\t" /* restore ESP */ \ | |
24 | "movl $1f,%1\n\t" /* save EIP */ \ | |
25 | "pushl %6\n\t" /* restore EIP */ \ | |
26 | "jmp __switch_to\n" \ | |
27 | "1:\t" \ | |
28 | "popl %%ebp\n\t" \ | |
47a5c6fa | 29 | "popfl" \ |
1da177e4 LT |
30 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
31 | "=a" (last),"=S" (esi),"=D" (edi) \ | |
32 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | |
33 | "2" (prev), "d" (next)); \ | |
34 | } while (0) | |
35 | ||
36 | #define _set_base(addr,base) do { unsigned long __pr; \ | |
37 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
38 | "rorl $16,%%edx\n\t" \ | |
39 | "movb %%dl,%2\n\t" \ | |
40 | "movb %%dh,%3" \ | |
41 | :"=&d" (__pr) \ | |
42 | :"m" (*((addr)+2)), \ | |
43 | "m" (*((addr)+4)), \ | |
44 | "m" (*((addr)+7)), \ | |
45 | "0" (base) \ | |
46 | ); } while(0) | |
47 | ||
48 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | |
49 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
50 | "rorl $16,%%edx\n\t" \ | |
51 | "movb %2,%%dh\n\t" \ | |
52 | "andb $0xf0,%%dh\n\t" \ | |
53 | "orb %%dh,%%dl\n\t" \ | |
54 | "movb %%dl,%2" \ | |
55 | :"=&d" (__lr) \ | |
56 | :"m" (*(addr)), \ | |
57 | "m" (*((addr)+6)), \ | |
58 | "0" (limit) \ | |
59 | ); } while(0) | |
60 | ||
61 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | |
5fe9fe3c | 62 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) |
1da177e4 | 63 | |
1da177e4 LT |
64 | /* |
65 | * Load a segment. Fall back on loading the zero | |
66 | * segment if something goes wrong.. | |
67 | */ | |
68 | #define loadsegment(seg,value) \ | |
69 | asm volatile("\n" \ | |
70 | "1:\t" \ | |
fd51f666 | 71 | "mov %0,%%" #seg "\n" \ |
1da177e4 LT |
72 | "2:\n" \ |
73 | ".section .fixup,\"ax\"\n" \ | |
74 | "3:\t" \ | |
75 | "pushl $0\n\t" \ | |
76 | "popl %%" #seg "\n\t" \ | |
77 | "jmp 2b\n" \ | |
78 | ".previous\n" \ | |
79 | ".section __ex_table,\"a\"\n\t" \ | |
80 | ".align 4\n\t" \ | |
81 | ".long 1b,3b\n" \ | |
82 | ".previous" \ | |
4d37e7e3 | 83 | : :"rm" (value)) |
1da177e4 LT |
84 | |
85 | /* | |
86 | * Save a segment register away | |
87 | */ | |
88 | #define savesegment(seg, value) \ | |
4d37e7e3 | 89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
1da177e4 | 90 | |
90a0a06a RR |
91 | |
92 | static inline void native_clts(void) | |
93 | { | |
94 | asm volatile ("clts"); | |
95 | } | |
96 | ||
97 | static inline unsigned long native_read_cr0(void) | |
98 | { | |
99 | unsigned long val; | |
100 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); | |
101 | return val; | |
102 | } | |
103 | ||
104 | static inline void native_write_cr0(unsigned long val) | |
105 | { | |
106 | asm volatile("movl %0,%%cr0": :"r" (val)); | |
107 | } | |
108 | ||
109 | static inline unsigned long native_read_cr2(void) | |
110 | { | |
111 | unsigned long val; | |
112 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); | |
113 | return val; | |
114 | } | |
115 | ||
116 | static inline void native_write_cr2(unsigned long val) | |
117 | { | |
118 | asm volatile("movl %0,%%cr2": :"r" (val)); | |
119 | } | |
120 | ||
121 | static inline unsigned long native_read_cr3(void) | |
122 | { | |
123 | unsigned long val; | |
124 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); | |
125 | return val; | |
126 | } | |
127 | ||
128 | static inline void native_write_cr3(unsigned long val) | |
129 | { | |
130 | asm volatile("movl %0,%%cr3": :"r" (val)); | |
131 | } | |
132 | ||
133 | static inline unsigned long native_read_cr4(void) | |
134 | { | |
135 | unsigned long val; | |
136 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); | |
137 | return val; | |
138 | } | |
139 | ||
140 | static inline unsigned long native_read_cr4_safe(void) | |
141 | { | |
142 | unsigned long val; | |
143 | /* This could fault if %cr4 does not exist */ | |
144 | asm("1: movl %%cr4, %0 \n" | |
145 | "2: \n" | |
146 | ".section __ex_table,\"a\" \n" | |
147 | ".long 1b,2b \n" | |
148 | ".previous \n" | |
149 | : "=r" (val): "0" (0)); | |
150 | return val; | |
151 | } | |
152 | ||
153 | static inline void native_write_cr4(unsigned long val) | |
154 | { | |
155 | asm volatile("movl %0,%%cr4": :"r" (val)); | |
156 | } | |
157 | ||
158 | static inline void native_wbinvd(void) | |
159 | { | |
160 | asm volatile("wbinvd": : :"memory"); | |
161 | } | |
162 | ||
163 | ||
d3561b7f RR |
164 | #ifdef CONFIG_PARAVIRT |
165 | #include <asm/paravirt.h> | |
166 | #else | |
90a0a06a RR |
167 | #define read_cr0() (native_read_cr0()) |
168 | #define write_cr0(x) (native_write_cr0(x)) | |
169 | #define read_cr2() (native_read_cr2()) | |
170 | #define write_cr2(x) (native_write_cr2(x)) | |
171 | #define read_cr3() (native_read_cr3()) | |
172 | #define write_cr3(x) (native_write_cr3(x)) | |
173 | #define read_cr4() (native_read_cr4()) | |
174 | #define read_cr4_safe() (native_read_cr4_safe()) | |
175 | #define write_cr4(x) (native_write_cr4(x)) | |
176 | #define wbinvd() (native_wbinvd()) | |
d3561b7f RR |
177 | |
178 | /* Clear the 'TS' bit */ | |
90a0a06a RR |
179 | #define clts() (native_clts()) |
180 | ||
d3561b7f RR |
181 | #endif/* CONFIG_PARAVIRT */ |
182 | ||
183 | /* Set the 'TS' bit */ | |
1da177e4 LT |
184 | #define stts() write_cr0(8 | read_cr0()) |
185 | ||
186 | #endif /* __KERNEL__ */ | |
187 | ||
1da177e4 LT |
188 | static inline unsigned long get_limit(unsigned long segment) |
189 | { | |
190 | unsigned long __limit; | |
191 | __asm__("lsll %1,%0" | |
192 | :"=r" (__limit):"r" (segment)); | |
193 | return __limit+1; | |
194 | } | |
195 | ||
196 | #define nop() __asm__ __volatile__ ("nop") | |
197 | ||
1da177e4 LT |
198 | /* |
199 | * Force strict CPU ordering. | |
200 | * And yes, this is required on UP too when we're talking | |
201 | * to devices. | |
202 | * | |
203 | * For now, "wmb()" doesn't actually do anything, as all | |
204 | * Intel CPU's follow what Intel calls a *Processor Order*, | |
205 | * in which all writes are seen in the program order even | |
206 | * outside the CPU. | |
207 | * | |
208 | * I expect future Intel CPU's to have a weaker ordering, | |
209 | * but I'd also expect them to finally get their act together | |
210 | * and add some real memory barriers if so. | |
211 | * | |
212 | * Some non intel clones support out of order store. wmb() ceases to be a | |
213 | * nop for these. | |
214 | */ | |
215 | ||
216 | ||
217 | /* | |
218 | * Actually only lfence would be needed for mb() because all stores done | |
219 | * by the kernel should be already ordered. But keep a full barrier for now. | |
220 | */ | |
221 | ||
222 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
223 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
224 | ||
225 | /** | |
226 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
227 | * depend on. | |
228 | * | |
229 | * No data-dependent reads from memory-like regions are ever reordered | |
230 | * over this barrier. All reads preceding this primitive are guaranteed | |
231 | * to access memory (but not necessarily other CPUs' caches) before any | |
232 | * reads following this primitive that depend on the data return by | |
233 | * any of the preceding reads. This primitive is much lighter weight than | |
234 | * rmb() on most CPUs, and is never heavier weight than is | |
235 | * rmb(). | |
236 | * | |
237 | * These ordering constraints are respected by both the local CPU | |
238 | * and the compiler. | |
239 | * | |
240 | * Ordering is not guaranteed by anything other than these primitives, | |
241 | * not even by data dependencies. See the documentation for | |
242 | * memory_barrier() for examples and URLs to more information. | |
243 | * | |
244 | * For example, the following code would force ordering (the initial | |
245 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
246 | * | |
247 | * <programlisting> | |
248 | * CPU 0 CPU 1 | |
249 | * | |
250 | * b = 2; | |
251 | * memory_barrier(); | |
252 | * p = &b; q = p; | |
253 | * read_barrier_depends(); | |
254 | * d = *q; | |
255 | * </programlisting> | |
256 | * | |
257 | * because the read of "*q" depends on the read of "p" and these | |
258 | * two reads are separated by a read_barrier_depends(). However, | |
259 | * the following code, with the same initial values for "a" and "b": | |
260 | * | |
261 | * <programlisting> | |
262 | * CPU 0 CPU 1 | |
263 | * | |
264 | * a = 2; | |
265 | * memory_barrier(); | |
266 | * b = 3; y = b; | |
267 | * read_barrier_depends(); | |
268 | * x = a; | |
269 | * </programlisting> | |
270 | * | |
271 | * does not enforce ordering, since there is no data dependency between | |
272 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
273 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
d6e05edc | 274 | * in cases like this where there are no data dependencies. |
1da177e4 LT |
275 | **/ |
276 | ||
277 | #define read_barrier_depends() do { } while(0) | |
278 | ||
279 | #ifdef CONFIG_X86_OOSTORE | |
280 | /* Actually there are no OOO store capable CPUs for now that do SSE, | |
281 | but make it already an possibility. */ | |
282 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
283 | #else | |
284 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
285 | #endif | |
286 | ||
287 | #ifdef CONFIG_SMP | |
288 | #define smp_mb() mb() | |
289 | #define smp_rmb() rmb() | |
290 | #define smp_wmb() wmb() | |
291 | #define smp_read_barrier_depends() read_barrier_depends() | |
911b0ad2 | 292 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
1da177e4 LT |
293 | #else |
294 | #define smp_mb() barrier() | |
295 | #define smp_rmb() barrier() | |
296 | #define smp_wmb() barrier() | |
297 | #define smp_read_barrier_depends() do { } while(0) | |
298 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
299 | #endif | |
300 | ||
55f327fa | 301 | #include <linux/irqflags.h> |
1da177e4 LT |
302 | |
303 | /* | |
304 | * disable hlt during certain critical i/o operations | |
305 | */ | |
306 | #define HAVE_DISABLE_HLT | |
307 | void disable_hlt(void); | |
308 | void enable_hlt(void); | |
309 | ||
310 | extern int es7000_plat; | |
311 | void cpu_idle_wait(void); | |
312 | ||
313 | extern unsigned long arch_align_stack(unsigned long sp); | |
9a0b5817 | 314 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
1da177e4 | 315 | |
cdb04527 AB |
316 | void default_idle(void); |
317 | ||
1da177e4 | 318 | #endif |