Merge branch 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[deliverable/linux.git] / arch / mips / include / asm / processor.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 */
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
13
14 #include <linux/cpumask.h>
15 #include <linux/threads.h>
16
17 #include <asm/cachectl.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu-info.h>
20 #include <asm/mipsregs.h>
21 #include <asm/prefetch.h>
22
23 /*
24 * Return current * instruction pointer ("program counter").
25 */
26 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
27
28 /*
29 * System setup and hardware flags..
30 */
31
32 extern unsigned int vced_count, vcei_count;
33
34 /*
35 * MIPS does have an arch_pick_mmap_layout()
36 */
37 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
38
39 /*
40 * A special page (the vdso) is mapped into all processes at the very
41 * top of the virtual memory space.
42 */
43 #define SPECIAL_PAGES_SIZE PAGE_SIZE
44
45 #ifdef CONFIG_32BIT
46 #ifdef CONFIG_KVM_GUEST
47 /* User space process size is limited to 1GB in KVM Guest Mode */
48 #define TASK_SIZE 0x3fff8000UL
49 #else
50 /*
51 * User space process size: 2GB. This is hardcoded into a few places,
52 * so don't change it unless you know what you are doing.
53 */
54 #define TASK_SIZE 0x7fff8000UL
55 #endif
56
57 #ifdef __KERNEL__
58 #define STACK_TOP_MAX TASK_SIZE
59 #endif
60
61 #define TASK_IS_32BIT_ADDR 1
62
63 #endif
64
65 #ifdef CONFIG_64BIT
66 /*
67 * User space process size: 1TB. This is hardcoded into a few places,
68 * so don't change it unless you know what you are doing. TASK_SIZE
69 * is limited to 1TB by the R4000 architecture; R10000 and better can
70 * support 16TB; the architectural reserve for future expansion is
71 * 8192EB ...
72 */
73 #define TASK_SIZE32 0x7fff8000UL
74 #define TASK_SIZE64 0x10000000000UL
75 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
76
77 #ifdef __KERNEL__
78 #define STACK_TOP_MAX TASK_SIZE64
79 #endif
80
81
82 #define TASK_SIZE_OF(tsk) \
83 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
84
85 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
86
87 #endif
88
89 #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
90
91 /*
92 * This decides where the kernel will search for a free chunk of vm
93 * space during mmap's.
94 */
95 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
96
97
98 #define NUM_FPU_REGS 32
99
100 typedef __u64 fpureg_t;
101
102 /*
103 * It would be nice to add some more fields for emulator statistics, but there
104 * are a number of fixed offsets in offset.h and elsewhere that would have to
105 * be recalculated by hand. So the additional information will be private to
106 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
107 */
108
109 struct mips_fpu_struct {
110 fpureg_t fpr[NUM_FPU_REGS];
111 unsigned int fcr31;
112 };
113
114 #define NUM_DSP_REGS 6
115
116 typedef __u32 dspreg_t;
117
118 struct mips_dsp_state {
119 dspreg_t dspr[NUM_DSP_REGS];
120 unsigned int dspcontrol;
121 };
122
123 #define INIT_CPUMASK { \
124 {0,} \
125 }
126
127 struct mips3264_watch_reg_state {
128 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
129 64 bit kernel. We use unsigned long as it has the same
130 property. */
131 unsigned long watchlo[NUM_WATCH_REGS];
132 /* Only the mask and IRW bits from watchhi. */
133 u16 watchhi[NUM_WATCH_REGS];
134 };
135
136 union mips_watch_reg_state {
137 struct mips3264_watch_reg_state mips3264;
138 };
139
140 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
141
142 struct octeon_cop2_state {
143 /* DMFC2 rt, 0x0201 */
144 unsigned long cop2_crc_iv;
145 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
146 unsigned long cop2_crc_length;
147 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
148 unsigned long cop2_crc_poly;
149 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
150 unsigned long cop2_llm_dat[2];
151 /* DMFC2 rt, 0x0084 */
152 unsigned long cop2_3des_iv;
153 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
154 unsigned long cop2_3des_key[3];
155 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
156 unsigned long cop2_3des_result;
157 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
158 unsigned long cop2_aes_inp0;
159 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
160 unsigned long cop2_aes_iv[2];
161 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
162 * rt, 0x0107 */
163 unsigned long cop2_aes_key[4];
164 /* DMFC2 rt, 0x0110 */
165 unsigned long cop2_aes_keylen;
166 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
167 unsigned long cop2_aes_result[2];
168 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
169 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
170 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
171 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
172 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
173 unsigned long cop2_hsh_datw[15];
174 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
175 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
176 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
177 unsigned long cop2_hsh_ivw[8];
178 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
179 unsigned long cop2_gfm_mult[2];
180 /* DMFC2 rt, 0x025E - Pass2 */
181 unsigned long cop2_gfm_poly;
182 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
183 unsigned long cop2_gfm_result[2];
184 };
185 #define COP2_INIT \
186 .cp2 = {0,},
187
188 struct octeon_cvmseg_state {
189 unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
190 [cpu_dcache_line_size() / sizeof(unsigned long)];
191 };
192
193 #elif defined(CONFIG_CPU_XLP)
194 struct nlm_cop2_state {
195 u64 rx[4];
196 u64 tx[4];
197 u32 tx_msg_status;
198 u32 rx_msg_status;
199 };
200
201 #define COP2_INIT \
202 .cp2 = {{0}, {0}, 0, 0},
203 #else
204 #define COP2_INIT
205 #endif
206
207 typedef struct {
208 unsigned long seg;
209 } mm_segment_t;
210
211 #define ARCH_MIN_TASKALIGN 8
212
213 struct mips_abi;
214
215 /*
216 * If you change thread_struct remember to change the #defines below too!
217 */
218 struct thread_struct {
219 /* Saved main processor registers. */
220 unsigned long reg16;
221 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
222 unsigned long reg29, reg30, reg31;
223
224 /* Saved cp0 stuff. */
225 unsigned long cp0_status;
226
227 /* Saved fpu/fpu emulator stuff. */
228 struct mips_fpu_struct fpu;
229 #ifdef CONFIG_MIPS_MT_FPAFF
230 /* Emulated instruction count */
231 unsigned long emulated_fp;
232 /* Saved per-thread scheduler affinity mask */
233 cpumask_t user_cpus_allowed;
234 #endif /* CONFIG_MIPS_MT_FPAFF */
235
236 /* Saved state of the DSP ASE, if available. */
237 struct mips_dsp_state dsp;
238
239 /* Saved watch register state, if available. */
240 union mips_watch_reg_state watch;
241
242 /* Other stuff associated with the thread. */
243 unsigned long cp0_badvaddr; /* Last user fault */
244 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
245 unsigned long error_code;
246 #ifdef CONFIG_CPU_CAVIUM_OCTEON
247 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
248 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
249 #endif
250 #ifdef CONFIG_CPU_XLP
251 struct nlm_cop2_state cp2;
252 #endif
253 struct mips_abi *abi;
254 };
255
256 #ifdef CONFIG_MIPS_MT_FPAFF
257 #define FPAFF_INIT \
258 .emulated_fp = 0, \
259 .user_cpus_allowed = INIT_CPUMASK,
260 #else
261 #define FPAFF_INIT
262 #endif /* CONFIG_MIPS_MT_FPAFF */
263
264 #define INIT_THREAD { \
265 /* \
266 * Saved main processor registers \
267 */ \
268 .reg16 = 0, \
269 .reg17 = 0, \
270 .reg18 = 0, \
271 .reg19 = 0, \
272 .reg20 = 0, \
273 .reg21 = 0, \
274 .reg22 = 0, \
275 .reg23 = 0, \
276 .reg29 = 0, \
277 .reg30 = 0, \
278 .reg31 = 0, \
279 /* \
280 * Saved cp0 stuff \
281 */ \
282 .cp0_status = 0, \
283 /* \
284 * Saved FPU/FPU emulator stuff \
285 */ \
286 .fpu = { \
287 .fpr = {0,}, \
288 .fcr31 = 0, \
289 }, \
290 /* \
291 * FPU affinity state (null if not FPAFF) \
292 */ \
293 FPAFF_INIT \
294 /* \
295 * Saved DSP stuff \
296 */ \
297 .dsp = { \
298 .dspr = {0, }, \
299 .dspcontrol = 0, \
300 }, \
301 /* \
302 * saved watch register stuff \
303 */ \
304 .watch = {{{0,},},}, \
305 /* \
306 * Other stuff associated with the process \
307 */ \
308 .cp0_badvaddr = 0, \
309 .cp0_baduaddr = 0, \
310 .error_code = 0, \
311 /* \
312 * Platform specific cop2 registers(null if no COP2) \
313 */ \
314 COP2_INIT \
315 }
316
317 struct task_struct;
318
319 /* Free all resources held by a thread. */
320 #define release_thread(thread) do { } while(0)
321
322 extern unsigned long thread_saved_pc(struct task_struct *tsk);
323
324 /*
325 * Do necessary setup to start up a newly executed thread.
326 */
327 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
328
329 unsigned long get_wchan(struct task_struct *p);
330
331 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
332 THREAD_SIZE - 32 - sizeof(struct pt_regs))
333 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
334 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
335 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
336 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
337
338 #define cpu_relax() barrier()
339
340 /*
341 * Return_address is a replacement for __builtin_return_address(count)
342 * which on certain architectures cannot reasonably be implemented in GCC
343 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
344 * Note that __builtin_return_address(x>=1) is forbidden because GCC
345 * aborts compilation on some CPUs. It's simply not possible to unwind
346 * some CPU's stackframes.
347 *
348 * __builtin_return_address works only for non-leaf functions. We avoid the
349 * overhead of a function call by forcing the compiler to save the return
350 * address register on the stack.
351 */
352 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
353
354 #ifdef CONFIG_CPU_HAS_PREFETCH
355
356 #define ARCH_HAS_PREFETCH
357 #define prefetch(x) __builtin_prefetch((x), 0, 1)
358
359 #define ARCH_HAS_PREFETCHW
360 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
361
362 /*
363 * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
364 * systems.
365 */
366 #define __ARCH_WANT_UNLOCKED_CTXSW
367
368 #endif
369
370 #endif /* _ASM_PROCESSOR_H */
This page took 0.044646 seconds and 6 git commands to generate.