Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
4 | #ifndef __PPC_SYSTEM_H | |
5 | #define __PPC_SYSTEM_H | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/kernel.h> | |
9 | ||
10 | #include <asm/atomic.h> | |
11 | #include <asm/hw_irq.h> | |
12 | ||
13 | /* | |
14 | * Memory barrier. | |
15 | * The sync instruction guarantees that all memory accesses initiated | |
16 | * by this processor have been performed (with respect to all other | |
17 | * mechanisms that access memory). The eieio instruction is a barrier | |
18 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
19 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
20 | * | |
21 | * mb() prevents loads and stores being reordered across this point. | |
22 | * rmb() prevents loads being reordered across this point. | |
23 | * wmb() prevents stores being reordered across this point. | |
24 | * read_barrier_depends() prevents data-dependent loads being reordered | |
25 | * across this point (nop on PPC). | |
26 | * | |
27 | * We can use the eieio instruction for wmb, but since it doesn't | |
28 | * give any ordering guarantees about loads, we have to use the | |
29 | * stronger but slower sync instruction for mb and rmb. | |
30 | */ | |
31 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
32 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | |
33 | #define wmb() __asm__ __volatile__ ("eieio" : : : "memory") | |
34 | #define read_barrier_depends() do { } while(0) | |
35 | ||
36 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
37 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
38 | ||
39 | #ifdef CONFIG_SMP | |
40 | #define smp_mb() mb() | |
41 | #define smp_rmb() rmb() | |
42 | #define smp_wmb() wmb() | |
43 | #define smp_read_barrier_depends() read_barrier_depends() | |
44 | #else | |
45 | #define smp_mb() barrier() | |
46 | #define smp_rmb() barrier() | |
47 | #define smp_wmb() barrier() | |
48 | #define smp_read_barrier_depends() do { } while(0) | |
49 | #endif /* CONFIG_SMP */ | |
50 | ||
51 | #ifdef __KERNEL__ | |
52 | struct task_struct; | |
53 | struct pt_regs; | |
54 | ||
55 | extern void print_backtrace(unsigned long *); | |
56 | extern void show_regs(struct pt_regs * regs); | |
57 | extern void flush_instruction_cache(void); | |
58 | extern void hard_reset_now(void); | |
59 | extern void poweroff_now(void); | |
60 | #ifdef CONFIG_6xx | |
61 | extern long _get_L2CR(void); | |
62 | extern long _get_L3CR(void); | |
63 | extern void _set_L2CR(unsigned long); | |
64 | extern void _set_L3CR(unsigned long); | |
65 | #else | |
66 | #define _get_L2CR() 0L | |
67 | #define _get_L3CR() 0L | |
68 | #define _set_L2CR(val) do { } while(0) | |
69 | #define _set_L3CR(val) do { } while(0) | |
70 | #endif | |
71 | extern void via_cuda_init(void); | |
72 | extern void pmac_nvram_init(void); | |
73 | extern void read_rtc_time(void); | |
74 | extern void pmac_find_display(void); | |
75 | extern void giveup_fpu(struct task_struct *); | |
76 | extern void enable_kernel_fp(void); | |
77 | extern void enable_kernel_altivec(void); | |
78 | extern void giveup_altivec(struct task_struct *); | |
79 | extern void load_up_altivec(struct task_struct *); | |
80 | extern void giveup_spe(struct task_struct *); | |
81 | extern void load_up_spe(struct task_struct *); | |
82 | extern int fix_alignment(struct pt_regs *); | |
83 | extern void cvt_fd(float *from, double *to, unsigned long *fpscr); | |
84 | extern void cvt_df(double *from, float *to, unsigned long *fpscr); | |
85 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | |
86 | extern void cacheable_memzero(void *p, unsigned int nb); | |
87 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | |
88 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | |
89 | extern void die(const char *, struct pt_regs *, long); | |
90 | ||
91 | struct device_node; | |
92 | extern void note_scsi_host(struct device_node *, void *); | |
93 | ||
94 | extern struct task_struct *__switch_to(struct task_struct *, | |
95 | struct task_struct *); | |
96 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | |
97 | ||
98 | struct thread_struct; | |
99 | extern struct task_struct *_switch(struct thread_struct *prev, | |
100 | struct thread_struct *next); | |
101 | ||
102 | extern unsigned int rtas_data; | |
103 | ||
104 | static __inline__ unsigned long | |
105 | xchg_u32(volatile void *p, unsigned long val) | |
106 | { | |
107 | unsigned long prev; | |
108 | ||
109 | __asm__ __volatile__ ("\n\ | |
110 | 1: lwarx %0,0,%2 \n" | |
111 | PPC405_ERR77(0,%2) | |
112 | " stwcx. %3,0,%2 \n\ | |
113 | bne- 1b" | |
114 | : "=&r" (prev), "=m" (*(volatile unsigned long *)p) | |
115 | : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) | |
116 | : "cc", "memory"); | |
117 | ||
118 | return prev; | |
119 | } | |
120 | ||
121 | /* | |
122 | * This function doesn't exist, so you'll get a linker error | |
123 | * if something tries to do an invalid xchg(). | |
124 | */ | |
125 | extern void __xchg_called_with_bad_pointer(void); | |
126 | ||
127 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
128 | #define tas(ptr) (xchg((ptr),1)) | |
129 | ||
130 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
131 | { | |
132 | switch (size) { | |
133 | case 4: | |
134 | return (unsigned long) xchg_u32(ptr, x); | |
135 | #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */ | |
136 | case 8: | |
137 | return (unsigned long) xchg_u64(ptr, x); | |
138 | #endif /* 0 */ | |
139 | } | |
140 | __xchg_called_with_bad_pointer(); | |
141 | return x; | |
142 | ||
143 | ||
144 | } | |
145 | ||
146 | extern inline void * xchg_ptr(void * m, void * val) | |
147 | { | |
148 | return (void *) xchg_u32(m, (unsigned long) val); | |
149 | } | |
150 | ||
151 | ||
152 | #define __HAVE_ARCH_CMPXCHG 1 | |
153 | ||
154 | static __inline__ unsigned long | |
155 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | |
156 | { | |
157 | unsigned int prev; | |
158 | ||
159 | __asm__ __volatile__ ("\n\ | |
160 | 1: lwarx %0,0,%2 \n\ | |
161 | cmpw 0,%0,%3 \n\ | |
162 | bne 2f \n" | |
163 | PPC405_ERR77(0,%2) | |
164 | " stwcx. %4,0,%2 \n\ | |
165 | bne- 1b\n" | |
166 | #ifdef CONFIG_SMP | |
167 | " sync\n" | |
168 | #endif /* CONFIG_SMP */ | |
169 | "2:" | |
170 | : "=&r" (prev), "=m" (*p) | |
171 | : "r" (p), "r" (old), "r" (new), "m" (*p) | |
172 | : "cc", "memory"); | |
173 | ||
174 | return prev; | |
175 | } | |
176 | ||
177 | /* This function doesn't exist, so you'll get a linker error | |
178 | if something tries to do an invalid cmpxchg(). */ | |
179 | extern void __cmpxchg_called_with_bad_pointer(void); | |
180 | ||
181 | static __inline__ unsigned long | |
182 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
183 | { | |
184 | switch (size) { | |
185 | case 4: | |
186 | return __cmpxchg_u32(ptr, old, new); | |
187 | #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */ | |
188 | case 8: | |
189 | return __cmpxchg_u64(ptr, old, new); | |
190 | #endif /* 0 */ | |
191 | } | |
192 | __cmpxchg_called_with_bad_pointer(); | |
193 | return old; | |
194 | } | |
195 | ||
196 | #define cmpxchg(ptr,o,n) \ | |
197 | ({ \ | |
198 | __typeof__(*(ptr)) _o_ = (o); \ | |
199 | __typeof__(*(ptr)) _n_ = (n); \ | |
200 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
201 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
202 | }) | |
203 | ||
204 | #define arch_align_stack(x) (x) | |
205 | ||
206 | #endif /* __KERNEL__ */ | |
207 | #endif /* __PPC_SYSTEM_H */ |