Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_SYSTEM_H |
2 | #define __ASM_SH_SYSTEM_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | |
6 | * Copyright (C) 2002 Paul Mundt | |
7 | */ | |
8 | ||
afbfb52e | 9 | #include <linux/irqflags.h> |
310f7963 | 10 | #include <linux/compiler.h> |
e4e3b5cc | 11 | #include <asm/types.h> |
3a2e117e | 12 | #include <asm/ptrace.h> |
1da177e4 LT |
13 | |
14 | /* | |
15 | * switch_to() should switch tasks to task nr n, first | |
16 | */ | |
17 | ||
18 | #define switch_to(prev, next, last) do { \ | |
36c8b586 | 19 | struct task_struct *__last; \ |
1da177e4 LT |
20 | register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ |
21 | register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ | |
22 | register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ | |
23 | register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ | |
24 | register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ | |
25 | register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ | |
26 | __asm__ __volatile__ (".balign 4\n\t" \ | |
27 | "stc.l gbr, @-r15\n\t" \ | |
28 | "sts.l pr, @-r15\n\t" \ | |
29 | "mov.l r8, @-r15\n\t" \ | |
30 | "mov.l r9, @-r15\n\t" \ | |
31 | "mov.l r10, @-r15\n\t" \ | |
32 | "mov.l r11, @-r15\n\t" \ | |
33 | "mov.l r12, @-r15\n\t" \ | |
34 | "mov.l r13, @-r15\n\t" \ | |
35 | "mov.l r14, @-r15\n\t" \ | |
36 | "mov.l r15, @r1 ! save SP\n\t" \ | |
37 | "mov.l @r6, r15 ! change to new stack\n\t" \ | |
38 | "mova 1f, %0\n\t" \ | |
39 | "mov.l %0, @r2 ! save PC\n\t" \ | |
40 | "mov.l 2f, %0\n\t" \ | |
41 | "jmp @%0 ! call __switch_to\n\t" \ | |
42 | " lds r7, pr ! with return to new PC\n\t" \ | |
43 | ".balign 4\n" \ | |
44 | "2:\n\t" \ | |
45 | ".long __switch_to\n" \ | |
46 | "1:\n\t" \ | |
47 | "mov.l @r15+, r14\n\t" \ | |
48 | "mov.l @r15+, r13\n\t" \ | |
49 | "mov.l @r15+, r12\n\t" \ | |
50 | "mov.l @r15+, r11\n\t" \ | |
51 | "mov.l @r15+, r10\n\t" \ | |
52 | "mov.l @r15+, r9\n\t" \ | |
53 | "mov.l @r15+, r8\n\t" \ | |
54 | "lds.l @r15+, pr\n\t" \ | |
55 | "ldc.l @r15+, gbr\n\t" \ | |
56 | : "=z" (__last) \ | |
57 | : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ | |
58 | "r" (__ts5), "r" (__ts6), "r" (__ts7) \ | |
59 | : "r3", "t"); \ | |
60 | last = __last; \ | |
61 | } while (0) | |
62 | ||
4dc7a0bb IM |
63 | /* |
64 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
65 | * it needs a way to flush as much of the CPU's caches as possible. | |
66 | * | |
67 | * TODO: fill this in! | |
68 | */ | |
69 | static inline void sched_cacheflush(void) | |
70 | { | |
71 | } | |
72 | ||
29847622 PM |
73 | #ifdef CONFIG_CPU_SH4A |
74 | #define __icbi() \ | |
75 | { \ | |
76 | unsigned long __addr; \ | |
77 | __addr = 0xa8000000; \ | |
78 | __asm__ __volatile__( \ | |
79 | "icbi %0\n\t" \ | |
80 | : /* no output */ \ | |
81 | : "m" (__m(__addr))); \ | |
82 | } | |
83 | #endif | |
1da177e4 | 84 | |
29847622 PM |
85 | /* |
86 | * A brief note on ctrl_barrier(), the control register write barrier. | |
87 | * | |
88 | * Legacy SH cores typically require a sequence of 8 nops after | |
89 | * modification of a control register in order for the changes to take | |
90 | * effect. On newer cores (like the sh4a and sh5) this is accomplished | |
91 | * with icbi. | |
92 | * | |
93 | * Also note that on sh4a in the icbi case we can forego a synco for the | |
94 | * write barrier, as it's not necessary for control registers. | |
95 | * | |
96 | * Historically we have only done this type of barrier for the MMUCR, but | |
97 | * it's also necessary for the CCR, so we make it generic here instead. | |
98 | */ | |
fdfc74f9 | 99 | #ifdef CONFIG_CPU_SH4A |
29847622 PM |
100 | #define mb() __asm__ __volatile__ ("synco": : :"memory") |
101 | #define rmb() mb() | |
102 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") | |
103 | #define ctrl_barrier() __icbi() | |
fdfc74f9 PM |
104 | #define read_barrier_depends() do { } while(0) |
105 | #else | |
29847622 PM |
106 | #define mb() __asm__ __volatile__ ("": : :"memory") |
107 | #define rmb() mb() | |
108 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
109 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | |
1da177e4 | 110 | #define read_barrier_depends() do { } while(0) |
fdfc74f9 | 111 | #endif |
1da177e4 LT |
112 | |
113 | #ifdef CONFIG_SMP | |
114 | #define smp_mb() mb() | |
115 | #define smp_rmb() rmb() | |
116 | #define smp_wmb() wmb() | |
117 | #define smp_read_barrier_depends() read_barrier_depends() | |
118 | #else | |
119 | #define smp_mb() barrier() | |
120 | #define smp_rmb() barrier() | |
121 | #define smp_wmb() barrier() | |
122 | #define smp_read_barrier_depends() do { } while(0) | |
123 | #endif | |
124 | ||
125 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
1da177e4 | 126 | |
1da177e4 LT |
127 | /* |
128 | * Jump to P2 area. | |
129 | * When handling TLB or caches, we need to do it from P2 area. | |
130 | */ | |
131 | #define jump_to_P2() \ | |
132 | do { \ | |
133 | unsigned long __dummy; \ | |
134 | __asm__ __volatile__( \ | |
135 | "mov.l 1f, %0\n\t" \ | |
136 | "or %1, %0\n\t" \ | |
137 | "jmp @%0\n\t" \ | |
138 | " nop\n\t" \ | |
139 | ".balign 4\n" \ | |
140 | "1: .long 2f\n" \ | |
141 | "2:" \ | |
142 | : "=&r" (__dummy) \ | |
143 | : "r" (0x20000000)); \ | |
144 | } while (0) | |
145 | ||
146 | /* | |
147 | * Back to P1 area. | |
148 | */ | |
149 | #define back_to_P1() \ | |
150 | do { \ | |
151 | unsigned long __dummy; \ | |
29847622 | 152 | ctrl_barrier(); \ |
1da177e4 | 153 | __asm__ __volatile__( \ |
1da177e4 LT |
154 | "mov.l 1f, %0\n\t" \ |
155 | "jmp @%0\n\t" \ | |
156 | " nop\n\t" \ | |
157 | ".balign 4\n" \ | |
158 | "1: .long 2f\n" \ | |
159 | "2:" \ | |
160 | : "=&r" (__dummy)); \ | |
161 | } while (0) | |
162 | ||
00b3aa3f | 163 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
1da177e4 LT |
164 | { |
165 | unsigned long flags, retval; | |
166 | ||
167 | local_irq_save(flags); | |
168 | retval = *m; | |
169 | *m = val; | |
170 | local_irq_restore(flags); | |
171 | return retval; | |
172 | } | |
173 | ||
00b3aa3f | 174 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) |
1da177e4 LT |
175 | { |
176 | unsigned long flags, retval; | |
177 | ||
178 | local_irq_save(flags); | |
179 | retval = *m; | |
180 | *m = val & 0xff; | |
181 | local_irq_restore(flags); | |
182 | return retval; | |
183 | } | |
184 | ||
00b3aa3f PM |
185 | extern void __xchg_called_with_bad_pointer(void); |
186 | ||
187 | #define __xchg(ptr, x, size) \ | |
188 | ({ \ | |
189 | unsigned long __xchg__res; \ | |
190 | volatile void *__xchg_ptr = (ptr); \ | |
191 | switch (size) { \ | |
192 | case 4: \ | |
193 | __xchg__res = xchg_u32(__xchg_ptr, x); \ | |
194 | break; \ | |
195 | case 1: \ | |
196 | __xchg__res = xchg_u8(__xchg_ptr, x); \ | |
197 | break; \ | |
198 | default: \ | |
199 | __xchg_called_with_bad_pointer(); \ | |
200 | __xchg__res = x; \ | |
201 | break; \ | |
202 | } \ | |
203 | \ | |
204 | __xchg__res; \ | |
205 | }) | |
206 | ||
207 | #define xchg(ptr,x) \ | |
208 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) | |
1da177e4 | 209 | |
e4e3b5cc TR |
210 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, |
211 | unsigned long new) | |
212 | { | |
213 | __u32 retval; | |
214 | unsigned long flags; | |
215 | ||
216 | local_irq_save(flags); | |
217 | retval = *m; | |
218 | if (retval == old) | |
219 | *m = new; | |
220 | local_irq_restore(flags); /* implies memory barrier */ | |
221 | return retval; | |
222 | } | |
223 | ||
224 | /* This function doesn't exist, so you'll get a linker error | |
225 | * if something tries to do an invalid cmpxchg(). */ | |
226 | extern void __cmpxchg_called_with_bad_pointer(void); | |
227 | ||
228 | #define __HAVE_ARCH_CMPXCHG 1 | |
229 | ||
230 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |
231 | unsigned long new, int size) | |
232 | { | |
233 | switch (size) { | |
234 | case 4: | |
235 | return __cmpxchg_u32(ptr, old, new); | |
236 | } | |
237 | __cmpxchg_called_with_bad_pointer(); | |
238 | return old; | |
239 | } | |
240 | ||
241 | #define cmpxchg(ptr,o,n) \ | |
242 | ({ \ | |
243 | __typeof__(*(ptr)) _o_ = (o); \ | |
244 | __typeof__(*(ptr)) _n_ = (n); \ | |
245 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
246 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
247 | }) | |
248 | ||
3a2e117e PM |
249 | extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); |
250 | ||
1f666587 PM |
251 | extern void *set_exception_table_vec(unsigned int vec, void *handler); |
252 | ||
253 | static inline void *set_exception_table_evt(unsigned int evt, void *handler) | |
254 | { | |
255 | return set_exception_table_vec(evt >> 5, handler); | |
256 | } | |
257 | ||
bd079997 PM |
258 | /* |
259 | * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. | |
260 | */ | |
261 | #ifdef CONFIG_CPU_SH2A | |
262 | extern unsigned int instruction_size(unsigned int insn); | |
263 | #else | |
264 | #define instruction_size(insn) (2) | |
265 | #endif | |
266 | ||
1da177e4 LT |
267 | /* XXX |
268 | * disable hlt during certain critical i/o operations | |
269 | */ | |
270 | #define HAVE_DISABLE_HLT | |
271 | void disable_hlt(void); | |
272 | void enable_hlt(void); | |
273 | ||
274 | #define arch_align_stack(x) (x) | |
275 | ||
276 | #endif |