Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / sparc / include / asm / spitfire.h
1 /* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 */
5
6 #ifndef _SPARC64_SPITFIRE_H
7 #define _SPARC64_SPITFIRE_H
8
9 #ifdef CONFIG_SPARC64
10
11 #include <asm/asi.h>
12
13 /* The following register addresses are accessible via ASI_DMMU
14 * and ASI_IMMU, that is there is a distinct and unique copy of
15 * each these registers for each TLB.
16 */
17 #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
18 #define TLB_SFSR 0x0000000000000018 /* All chips */
19 #define TSB_REG 0x0000000000000028 /* All chips */
20 #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
21 #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
22 #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
23 #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
24 #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
25 #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
26 #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
27
28 /* These registers only exist as one entity, and are accessed
29 * via ASI_DMMU only.
30 */
31 #define PRIMARY_CONTEXT 0x0000000000000008
32 #define SECONDARY_CONTEXT 0x0000000000000010
33 #define DMMU_SFAR 0x0000000000000020
34 #define VIRT_WATCHPOINT 0x0000000000000038
35 #define PHYS_WATCHPOINT 0x0000000000000040
36
37 #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
38 #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
39
40 #define L1DCACHE_SIZE 0x4000
41
42 #define SUN4V_CHIP_INVALID 0x00
43 #define SUN4V_CHIP_NIAGARA1 0x01
44 #define SUN4V_CHIP_NIAGARA2 0x02
45 #define SUN4V_CHIP_NIAGARA3 0x03
46 #define SUN4V_CHIP_NIAGARA4 0x04
47 #define SUN4V_CHIP_NIAGARA5 0x05
48 #define SUN4V_CHIP_SPARC_M6 0x06
49 #define SUN4V_CHIP_SPARC_M7 0x07
50 #define SUN4V_CHIP_SPARC64X 0x8a
51 #define SUN4V_CHIP_UNKNOWN 0xff
52
53 #ifndef __ASSEMBLY__
54
55 enum ultra_tlb_layout {
56 spitfire = 0,
57 cheetah = 1,
58 cheetah_plus = 2,
59 hypervisor = 3,
60 };
61
62 extern enum ultra_tlb_layout tlb_type;
63
64 extern int sun4v_chip_type;
65
66 extern int cheetah_pcache_forced_on;
67 void cheetah_enable_pcache(void);
68
69 #define sparc64_highest_locked_tlbent() \
70 (tlb_type == spitfire ? \
71 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
72 CHEETAH_HIGHEST_LOCKED_TLBENT)
73
74 extern int num_kernel_image_mappings;
75
76 /* The data cache is write through, so this just invalidates the
77 * specified line.
78 */
79 static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
80 {
81 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
82 "membar #Sync"
83 : /* No outputs */
84 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
85 }
86
87 /* The instruction cache lines are flushed with this, but note that
88 * this does not flush the pipeline. It is possible for a line to
89 * get flushed but stale instructions to still be in the pipeline,
90 * a flush instruction (to any address) is sufficient to handle
91 * this issue after the line is invalidated.
92 */
93 static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
94 {
95 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
96 "membar #Sync"
97 : /* No outputs */
98 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
99 }
100
101 static inline unsigned long spitfire_get_dtlb_data(int entry)
102 {
103 unsigned long data;
104
105 __asm__ __volatile__("ldxa [%1] %2, %0"
106 : "=r" (data)
107 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
108
109 /* Clear TTE diag bits. */
110 data &= ~0x0003fe0000000000UL;
111
112 return data;
113 }
114
115 static inline unsigned long spitfire_get_dtlb_tag(int entry)
116 {
117 unsigned long tag;
118
119 __asm__ __volatile__("ldxa [%1] %2, %0"
120 : "=r" (tag)
121 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
122 return tag;
123 }
124
125 static inline void spitfire_put_dtlb_data(int entry, unsigned long data)
126 {
127 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
128 "membar #Sync"
129 : /* No outputs */
130 : "r" (data), "r" (entry << 3),
131 "i" (ASI_DTLB_DATA_ACCESS));
132 }
133
134 static inline unsigned long spitfire_get_itlb_data(int entry)
135 {
136 unsigned long data;
137
138 __asm__ __volatile__("ldxa [%1] %2, %0"
139 : "=r" (data)
140 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
141
142 /* Clear TTE diag bits. */
143 data &= ~0x0003fe0000000000UL;
144
145 return data;
146 }
147
148 static inline unsigned long spitfire_get_itlb_tag(int entry)
149 {
150 unsigned long tag;
151
152 __asm__ __volatile__("ldxa [%1] %2, %0"
153 : "=r" (tag)
154 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
155 return tag;
156 }
157
158 static inline void spitfire_put_itlb_data(int entry, unsigned long data)
159 {
160 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
161 "membar #Sync"
162 : /* No outputs */
163 : "r" (data), "r" (entry << 3),
164 "i" (ASI_ITLB_DATA_ACCESS));
165 }
166
167 static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
168 {
169 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
170 "membar #Sync"
171 : /* No outputs */
172 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
173 }
174
175 static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
176 {
177 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
178 "membar #Sync"
179 : /* No outputs */
180 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
181 }
182
183 /* Cheetah has "all non-locked" tlb flushes. */
184 static inline void cheetah_flush_dtlb_all(void)
185 {
186 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
187 "membar #Sync"
188 : /* No outputs */
189 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
190 }
191
192 static inline void cheetah_flush_itlb_all(void)
193 {
194 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
195 "membar #Sync"
196 : /* No outputs */
197 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
198 }
199
200 /* Cheetah has a 4-tlb layout so direct access is a bit different.
201 * The first two TLBs are fully assosciative, hold 16 entries, and are
202 * used only for locked and >8K sized translations. One exists for
203 * data accesses and one for instruction accesses.
204 *
205 * The third TLB is for data accesses to 8K non-locked translations, is
206 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
207 * instruction accesses to 8K non-locked translations, is 2 way
208 * assosciative, and holds 128 entries.
209 *
210 * Cheetah has some bug where bogus data can be returned from
211 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
212 * the problem for me. -DaveM
213 */
214 static inline unsigned long cheetah_get_ldtlb_data(int entry)
215 {
216 unsigned long data;
217
218 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
219 "ldxa [%1] %2, %0"
220 : "=r" (data)
221 : "r" ((0 << 16) | (entry << 3)),
222 "i" (ASI_DTLB_DATA_ACCESS));
223
224 return data;
225 }
226
227 static inline unsigned long cheetah_get_litlb_data(int entry)
228 {
229 unsigned long data;
230
231 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
232 "ldxa [%1] %2, %0"
233 : "=r" (data)
234 : "r" ((0 << 16) | (entry << 3)),
235 "i" (ASI_ITLB_DATA_ACCESS));
236
237 return data;
238 }
239
240 static inline unsigned long cheetah_get_ldtlb_tag(int entry)
241 {
242 unsigned long tag;
243
244 __asm__ __volatile__("ldxa [%1] %2, %0"
245 : "=r" (tag)
246 : "r" ((0 << 16) | (entry << 3)),
247 "i" (ASI_DTLB_TAG_READ));
248
249 return tag;
250 }
251
252 static inline unsigned long cheetah_get_litlb_tag(int entry)
253 {
254 unsigned long tag;
255
256 __asm__ __volatile__("ldxa [%1] %2, %0"
257 : "=r" (tag)
258 : "r" ((0 << 16) | (entry << 3)),
259 "i" (ASI_ITLB_TAG_READ));
260
261 return tag;
262 }
263
264 static inline void cheetah_put_ldtlb_data(int entry, unsigned long data)
265 {
266 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
267 "membar #Sync"
268 : /* No outputs */
269 : "r" (data),
270 "r" ((0 << 16) | (entry << 3)),
271 "i" (ASI_DTLB_DATA_ACCESS));
272 }
273
274 static inline void cheetah_put_litlb_data(int entry, unsigned long data)
275 {
276 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
277 "membar #Sync"
278 : /* No outputs */
279 : "r" (data),
280 "r" ((0 << 16) | (entry << 3)),
281 "i" (ASI_ITLB_DATA_ACCESS));
282 }
283
284 static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb)
285 {
286 unsigned long data;
287
288 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
289 "ldxa [%1] %2, %0"
290 : "=r" (data)
291 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
292
293 return data;
294 }
295
296 static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
297 {
298 unsigned long tag;
299
300 __asm__ __volatile__("ldxa [%1] %2, %0"
301 : "=r" (tag)
302 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
303 return tag;
304 }
305
306 static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
307 {
308 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
309 "membar #Sync"
310 : /* No outputs */
311 : "r" (data),
312 "r" ((tlb << 16) | (entry << 3)),
313 "i" (ASI_DTLB_DATA_ACCESS));
314 }
315
316 static inline unsigned long cheetah_get_itlb_data(int entry)
317 {
318 unsigned long data;
319
320 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
321 "ldxa [%1] %2, %0"
322 : "=r" (data)
323 : "r" ((2 << 16) | (entry << 3)),
324 "i" (ASI_ITLB_DATA_ACCESS));
325
326 return data;
327 }
328
329 static inline unsigned long cheetah_get_itlb_tag(int entry)
330 {
331 unsigned long tag;
332
333 __asm__ __volatile__("ldxa [%1] %2, %0"
334 : "=r" (tag)
335 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
336 return tag;
337 }
338
339 static inline void cheetah_put_itlb_data(int entry, unsigned long data)
340 {
341 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
342 "membar #Sync"
343 : /* No outputs */
344 : "r" (data), "r" ((2 << 16) | (entry << 3)),
345 "i" (ASI_ITLB_DATA_ACCESS));
346 }
347
348 #endif /* !(__ASSEMBLY__) */
349 #endif /* CONFIG_SPARC64 */
350 #endif /* !(_SPARC64_SPITFIRE_H) */
This page took 0.038768 seconds and 6 git commands to generate.