Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_PROCESSOR_H |
2 | #define _ASM_IA64_PROCESSOR_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * Stephane Eranian <eranian@hpl.hp.com> | |
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
10 | * | |
11 | * 11/24/98 S.Eranian added ia64_set_iva() | |
12 | * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API | |
13 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support | |
14 | */ | |
15 | ||
1da177e4 LT |
16 | |
17 | #include <asm/intrinsics.h> | |
18 | #include <asm/kregs.h> | |
19 | #include <asm/ptrace.h> | |
20 | #include <asm/ustack.h> | |
21 | ||
1da177e4 LT |
22 | #define IA64_NUM_DBG_REGS 8 |
23 | /* | |
24 | * Limits for PMC and PMD are set to less than maximum architected values | |
25 | * but should be sufficient for a while | |
26 | */ | |
9179cb65 SE |
27 | #define IA64_NUM_PMC_REGS 64 |
28 | #define IA64_NUM_PMD_REGS 64 | |
1da177e4 LT |
29 | |
30 | #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) | |
31 | #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) | |
32 | ||
33 | /* | |
34 | * TASK_SIZE really is a mis-named. It really is the maximum user | |
35 | * space address (plus one). On IA-64, there are five regions of 2TB | |
36 | * each (assuming 8KB page size), for a total of 8TB of user virtual | |
37 | * address space. | |
38 | */ | |
39 | #define TASK_SIZE (current->thread.task_size) | |
40 | ||
1da177e4 LT |
41 | /* |
42 | * This decides where the kernel will search for a free chunk of vm | |
43 | * space during mmap's. | |
44 | */ | |
45 | #define TASK_UNMAPPED_BASE (current->thread.map_base) | |
46 | ||
47 | #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ | |
48 | #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ | |
49 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | |
50 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | |
51 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | |
e08e6c52 BC |
52 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
53 | sync at ctx sw */ | |
1da177e4 LT |
54 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
55 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | |
56 | ||
57 | #define IA64_THREAD_UAC_SHIFT 3 | |
58 | #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) | |
59 | #define IA64_THREAD_FPEMU_SHIFT 6 | |
60 | #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) | |
61 | ||
62 | ||
63 | /* | |
64 | * This shift should be large enough to be able to represent 1000000000/itc_freq with good | |
65 | * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits | |
66 | * (this will give enough slack to represent 10 seconds worth of time as a scaled number). | |
67 | */ | |
68 | #define IA64_NSEC_PER_CYC_SHIFT 30 | |
69 | ||
70 | #ifndef __ASSEMBLY__ | |
71 | ||
72 | #include <linux/cache.h> | |
73 | #include <linux/compiler.h> | |
74 | #include <linux/threads.h> | |
75 | #include <linux/types.h> | |
76 | ||
77 | #include <asm/fpu.h> | |
78 | #include <asm/page.h> | |
79 | #include <asm/percpu.h> | |
80 | #include <asm/rse.h> | |
81 | #include <asm/unwind.h> | |
82 | #include <asm/atomic.h> | |
83 | #ifdef CONFIG_NUMA | |
84 | #include <asm/nodedata.h> | |
85 | #endif | |
86 | ||
87 | /* like above but expressed as bitfields for more efficient access: */ | |
88 | struct ia64_psr { | |
89 | __u64 reserved0 : 1; | |
90 | __u64 be : 1; | |
91 | __u64 up : 1; | |
92 | __u64 ac : 1; | |
93 | __u64 mfl : 1; | |
94 | __u64 mfh : 1; | |
95 | __u64 reserved1 : 7; | |
96 | __u64 ic : 1; | |
97 | __u64 i : 1; | |
98 | __u64 pk : 1; | |
99 | __u64 reserved2 : 1; | |
100 | __u64 dt : 1; | |
101 | __u64 dfl : 1; | |
102 | __u64 dfh : 1; | |
103 | __u64 sp : 1; | |
104 | __u64 pp : 1; | |
105 | __u64 di : 1; | |
106 | __u64 si : 1; | |
107 | __u64 db : 1; | |
108 | __u64 lp : 1; | |
109 | __u64 tb : 1; | |
110 | __u64 rt : 1; | |
111 | __u64 reserved3 : 4; | |
112 | __u64 cpl : 2; | |
113 | __u64 is : 1; | |
114 | __u64 mc : 1; | |
115 | __u64 it : 1; | |
116 | __u64 id : 1; | |
117 | __u64 da : 1; | |
118 | __u64 dd : 1; | |
119 | __u64 ss : 1; | |
120 | __u64 ri : 2; | |
121 | __u64 ed : 1; | |
122 | __u64 bn : 1; | |
123 | __u64 reserved4 : 19; | |
124 | }; | |
125 | ||
126 | /* | |
127 | * CPU type, hardware bug flags, and per-CPU state. Frequently used | |
128 | * state comes earlier: | |
129 | */ | |
130 | struct cpuinfo_ia64 { | |
131 | __u32 softirq_pending; | |
132 | __u64 itm_delta; /* # of clock cycles between clock ticks */ | |
133 | __u64 itm_next; /* interval timer mask value to use for next clock tick */ | |
134 | __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ | |
135 | __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ | |
136 | __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ | |
1da177e4 LT |
137 | __u64 itc_freq; /* frequency of ITC counter */ |
138 | __u64 proc_freq; /* frequency of processor */ | |
139 | __u64 cyc_per_usec; /* itc_freq/1000000 */ | |
140 | __u64 ptce_base; | |
141 | __u32 ptce_count[2]; | |
142 | __u32 ptce_stride[2]; | |
143 | struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ | |
144 | ||
145 | #ifdef CONFIG_SMP | |
146 | __u64 loops_per_jiffy; | |
147 | int cpu; | |
e927ecb0 SS |
148 | __u32 socket_id; /* physical processor socket id */ |
149 | __u16 core_id; /* core id */ | |
150 | __u16 thread_id; /* thread id */ | |
151 | __u16 num_log; /* Total number of logical processors on | |
152 | * this socket that were successfully booted */ | |
153 | __u8 cores_per_socket; /* Cores per processor socket */ | |
154 | __u8 threads_per_core; /* Threads per core */ | |
1da177e4 LT |
155 | #endif |
156 | ||
157 | /* CPUID-derived information: */ | |
158 | __u64 ppn; | |
159 | __u64 features; | |
160 | __u8 number; | |
161 | __u8 revision; | |
162 | __u8 model; | |
163 | __u8 family; | |
164 | __u8 archrev; | |
165 | char vendor[16]; | |
166 | ||
167 | #ifdef CONFIG_NUMA | |
168 | struct ia64_node_data *node_data; | |
169 | #endif | |
170 | }; | |
171 | ||
172 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |
173 | ||
174 | /* | |
175 | * The "local" data variable. It refers to the per-CPU data of the currently executing | |
176 | * CPU, much like "current" points to the per-task data of the currently executing task. | |
177 | * Do not use the address of local_cpu_data, since it will be different from | |
178 | * cpu_data(smp_processor_id())! | |
179 | */ | |
180 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | |
181 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | |
182 | ||
1da177e4 LT |
183 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
184 | ||
185 | typedef struct { | |
186 | unsigned long seg; | |
187 | } mm_segment_t; | |
188 | ||
189 | #define SET_UNALIGN_CTL(task,value) \ | |
190 | ({ \ | |
191 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ | |
192 | | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ | |
193 | 0; \ | |
194 | }) | |
195 | #define GET_UNALIGN_CTL(task,addr) \ | |
196 | ({ \ | |
197 | put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ | |
198 | (int __user *) (addr)); \ | |
199 | }) | |
200 | ||
201 | #define SET_FPEMU_CTL(task,value) \ | |
202 | ({ \ | |
203 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ | |
204 | | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ | |
205 | 0; \ | |
206 | }) | |
207 | #define GET_FPEMU_CTL(task,addr) \ | |
208 | ({ \ | |
209 | put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ | |
210 | (int __user *) (addr)); \ | |
211 | }) | |
212 | ||
213 | #ifdef CONFIG_IA32_SUPPORT | |
214 | struct desc_struct { | |
215 | unsigned int a, b; | |
216 | }; | |
217 | ||
218 | #define desc_empty(desc) (!((desc)->a + (desc)->b)) | |
219 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | |
220 | ||
221 | #define GDT_ENTRY_TLS_ENTRIES 3 | |
222 | #define GDT_ENTRY_TLS_MIN 6 | |
223 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | |
224 | ||
225 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | |
226 | ||
227 | struct partial_page_list; | |
228 | #endif | |
229 | ||
230 | struct thread_struct { | |
231 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ | |
232 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ | |
233 | __u8 on_ustack; /* executing on user-stacks? */ | |
234 | __u8 pad[3]; | |
235 | __u64 ksp; /* kernel stack pointer */ | |
236 | __u64 map_base; /* base address for get_unmapped_area() */ | |
237 | __u64 task_size; /* limit for task size */ | |
238 | __u64 rbs_bot; /* the base address for the RBS */ | |
239 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ | |
240 | ||
241 | #ifdef CONFIG_IA32_SUPPORT | |
242 | __u64 eflag; /* IA32 EFLAGS reg */ | |
243 | __u64 fsr; /* IA32 floating pt status reg */ | |
244 | __u64 fcr; /* IA32 floating pt control reg */ | |
245 | __u64 fir; /* IA32 fp except. instr. reg */ | |
246 | __u64 fdr; /* IA32 fp except. data reg */ | |
247 | __u64 old_k1; /* old value of ar.k1 */ | |
248 | __u64 old_iob; /* old IOBase value */ | |
249 | struct partial_page_list *ppl; /* partial page list for 4K page size issue */ | |
250 | /* cached TLS descriptors. */ | |
251 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
252 | ||
253 | # define INIT_THREAD_IA32 .eflag = 0, \ | |
254 | .fsr = 0, \ | |
255 | .fcr = 0x17800000037fULL, \ | |
256 | .fir = 0, \ | |
257 | .fdr = 0, \ | |
258 | .old_k1 = 0, \ | |
259 | .old_iob = 0, \ | |
260 | .ppl = NULL, | |
261 | #else | |
262 | # define INIT_THREAD_IA32 | |
263 | #endif /* CONFIG_IA32_SUPPORT */ | |
264 | #ifdef CONFIG_PERFMON | |
265 | __u64 pmcs[IA64_NUM_PMC_REGS]; | |
266 | __u64 pmds[IA64_NUM_PMD_REGS]; | |
267 | void *pfm_context; /* pointer to detailed PMU context */ | |
268 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ | |
269 | # define INIT_THREAD_PM .pmcs = {0UL, }, \ | |
270 | .pmds = {0UL, }, \ | |
271 | .pfm_context = NULL, \ | |
272 | .pfm_needs_checking = 0UL, | |
273 | #else | |
274 | # define INIT_THREAD_PM | |
275 | #endif | |
276 | __u64 dbr[IA64_NUM_DBG_REGS]; | |
277 | __u64 ibr[IA64_NUM_DBG_REGS]; | |
278 | struct ia64_fpreg fph[96]; /* saved/loaded on demand */ | |
279 | }; | |
280 | ||
281 | #define INIT_THREAD { \ | |
282 | .flags = 0, \ | |
283 | .on_ustack = 0, \ | |
284 | .ksp = 0, \ | |
285 | .map_base = DEFAULT_MAP_BASE, \ | |
286 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ | |
287 | .task_size = DEFAULT_TASK_SIZE, \ | |
288 | .last_fph_cpu = -1, \ | |
289 | INIT_THREAD_IA32 \ | |
290 | INIT_THREAD_PM \ | |
291 | .dbr = {0, }, \ | |
292 | .ibr = {0, }, \ | |
293 | .fph = {{{{0}}}, } \ | |
294 | } | |
295 | ||
296 | #define start_thread(regs,new_ip,new_sp) do { \ | |
297 | set_fs(USER_DS); \ | |
298 | regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ | |
299 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ | |
300 | regs->cr_iip = new_ip; \ | |
301 | regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ | |
302 | regs->ar_rnat = 0; \ | |
303 | regs->ar_bspstore = current->thread.rbs_bot; \ | |
304 | regs->ar_fpsr = FPSR_DEFAULT; \ | |
305 | regs->loadrs = 0; \ | |
306 | regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ | |
307 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ | |
308 | if (unlikely(!current->mm->dumpable)) { \ | |
309 | /* \ | |
310 | * Zap scratch regs to avoid leaking bits between processes with different \ | |
311 | * uid/privileges. \ | |
312 | */ \ | |
313 | regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ | |
314 | regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ | |
315 | } \ | |
316 | } while (0) | |
317 | ||
318 | /* Forward declarations, a strange C thing... */ | |
319 | struct mm_struct; | |
320 | struct task_struct; | |
321 | ||
322 | /* | |
323 | * Free all resources held by a thread. This is called after the | |
324 | * parent of DEAD_TASK has collected the exit status of the task via | |
325 | * wait(). | |
326 | */ | |
327 | #define release_thread(dead_task) | |
328 | ||
329 | /* Prepare to copy thread state - unlazy all lazy status */ | |
330 | #define prepare_to_copy(tsk) do { } while (0) | |
331 | ||
332 | /* | |
333 | * This is the mechanism for creating a new kernel thread. | |
334 | * | |
335 | * NOTE 1: Only a kernel-only process (ie the swapper or direct | |
336 | * descendants who haven't done an "execve()") should use this: it | |
337 | * will work within a system call from a "real" process, but the | |
338 | * process memory space will not be free'd until both the parent and | |
339 | * the child have exited. | |
340 | * | |
341 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get | |
342 | * into trouble in init/main.c when the child thread returns to | |
343 | * do_basic_setup() and the timing is such that free_initmem() has | |
344 | * been called already. | |
345 | */ | |
346 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); | |
347 | ||
348 | /* Get wait channel for task P. */ | |
349 | extern unsigned long get_wchan (struct task_struct *p); | |
350 | ||
351 | /* Return instruction pointer of blocked task TSK. */ | |
352 | #define KSTK_EIP(tsk) \ | |
353 | ({ \ | |
6450578f | 354 | struct pt_regs *_regs = task_pt_regs(tsk); \ |
1da177e4 LT |
355 | _regs->cr_iip + ia64_psr(_regs)->ri; \ |
356 | }) | |
357 | ||
358 | /* Return stack pointer of blocked task TSK. */ | |
359 | #define KSTK_ESP(tsk) ((tsk)->thread.ksp) | |
360 | ||
361 | extern void ia64_getreg_unknown_kr (void); | |
362 | extern void ia64_setreg_unknown_kr (void); | |
363 | ||
364 | #define ia64_get_kr(regnum) \ | |
365 | ({ \ | |
366 | unsigned long r = 0; \ | |
367 | \ | |
368 | switch (regnum) { \ | |
369 | case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ | |
370 | case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ | |
371 | case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ | |
372 | case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ | |
373 | case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ | |
374 | case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ | |
375 | case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ | |
376 | case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ | |
377 | default: ia64_getreg_unknown_kr(); break; \ | |
378 | } \ | |
379 | r; \ | |
380 | }) | |
381 | ||
382 | #define ia64_set_kr(regnum, r) \ | |
383 | ({ \ | |
384 | switch (regnum) { \ | |
385 | case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ | |
386 | case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ | |
387 | case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ | |
388 | case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ | |
389 | case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ | |
390 | case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ | |
391 | case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ | |
392 | case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ | |
393 | default: ia64_setreg_unknown_kr(); break; \ | |
394 | } \ | |
395 | }) | |
396 | ||
397 | /* | |
398 | * The following three macros can't be inline functions because we don't have struct | |
399 | * task_struct at this point. | |
400 | */ | |
401 | ||
05062d96 PC |
402 | /* |
403 | * Return TRUE if task T owns the fph partition of the CPU we're running on. | |
404 | * Must be called from code that has preemption disabled. | |
405 | */ | |
1da177e4 LT |
406 | #define ia64_is_local_fpu_owner(t) \ |
407 | ({ \ | |
408 | struct task_struct *__ia64_islfo_task = (t); \ | |
409 | (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ | |
410 | && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ | |
411 | }) | |
412 | ||
05062d96 PC |
413 | /* |
414 | * Mark task T as owning the fph partition of the CPU we're running on. | |
415 | * Must be called from code that has preemption disabled. | |
416 | */ | |
1da177e4 LT |
417 | #define ia64_set_local_fpu_owner(t) do { \ |
418 | struct task_struct *__ia64_slfo_task = (t); \ | |
419 | __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ | |
420 | ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ | |
421 | } while (0) | |
422 | ||
423 | /* Mark the fph partition of task T as being invalid on all CPUs. */ | |
424 | #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) | |
425 | ||
426 | extern void __ia64_init_fpu (void); | |
427 | extern void __ia64_save_fpu (struct ia64_fpreg *fph); | |
428 | extern void __ia64_load_fpu (struct ia64_fpreg *fph); | |
429 | extern void ia64_save_debug_regs (unsigned long *save_area); | |
430 | extern void ia64_load_debug_regs (unsigned long *save_area); | |
431 | ||
432 | #ifdef CONFIG_IA32_SUPPORT | |
433 | extern void ia32_save_state (struct task_struct *task); | |
434 | extern void ia32_load_state (struct task_struct *task); | |
435 | #endif | |
436 | ||
437 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
438 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
439 | ||
440 | /* load fp 0.0 into fph */ | |
441 | static inline void | |
442 | ia64_init_fpu (void) { | |
443 | ia64_fph_enable(); | |
444 | __ia64_init_fpu(); | |
445 | ia64_fph_disable(); | |
446 | } | |
447 | ||
448 | /* save f32-f127 at FPH */ | |
449 | static inline void | |
450 | ia64_save_fpu (struct ia64_fpreg *fph) { | |
451 | ia64_fph_enable(); | |
452 | __ia64_save_fpu(fph); | |
453 | ia64_fph_disable(); | |
454 | } | |
455 | ||
456 | /* load f32-f127 from FPH */ | |
457 | static inline void | |
458 | ia64_load_fpu (struct ia64_fpreg *fph) { | |
459 | ia64_fph_enable(); | |
460 | __ia64_load_fpu(fph); | |
461 | ia64_fph_disable(); | |
462 | } | |
463 | ||
464 | static inline __u64 | |
465 | ia64_clear_ic (void) | |
466 | { | |
467 | __u64 psr; | |
468 | psr = ia64_getreg(_IA64_REG_PSR); | |
469 | ia64_stop(); | |
470 | ia64_rsm(IA64_PSR_I | IA64_PSR_IC); | |
471 | ia64_srlz_i(); | |
472 | return psr; | |
473 | } | |
474 | ||
475 | /* | |
476 | * Restore the psr. | |
477 | */ | |
478 | static inline void | |
479 | ia64_set_psr (__u64 psr) | |
480 | { | |
481 | ia64_stop(); | |
482 | ia64_setreg(_IA64_REG_PSR_L, psr); | |
483 | ia64_srlz_d(); | |
484 | } | |
485 | ||
486 | /* | |
487 | * Insert a translation into an instruction and/or data translation | |
488 | * register. | |
489 | */ | |
490 | static inline void | |
491 | ia64_itr (__u64 target_mask, __u64 tr_num, | |
492 | __u64 vmaddr, __u64 pte, | |
493 | __u64 log_page_size) | |
494 | { | |
495 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
496 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
497 | ia64_stop(); | |
498 | if (target_mask & 0x1) | |
499 | ia64_itri(tr_num, pte); | |
500 | if (target_mask & 0x2) | |
501 | ia64_itrd(tr_num, pte); | |
502 | } | |
503 | ||
504 | /* | |
505 | * Insert a translation into the instruction and/or data translation | |
506 | * cache. | |
507 | */ | |
508 | static inline void | |
509 | ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, | |
510 | __u64 log_page_size) | |
511 | { | |
512 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
513 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
514 | ia64_stop(); | |
515 | /* as per EAS2.6, itc must be the last instruction in an instruction group */ | |
516 | if (target_mask & 0x1) | |
517 | ia64_itci(pte); | |
518 | if (target_mask & 0x2) | |
519 | ia64_itcd(pte); | |
520 | } | |
521 | ||
522 | /* | |
523 | * Purge a range of addresses from instruction and/or data translation | |
524 | * register(s). | |
525 | */ | |
526 | static inline void | |
527 | ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) | |
528 | { | |
529 | if (target_mask & 0x1) | |
530 | ia64_ptri(vmaddr, (log_size << 2)); | |
531 | if (target_mask & 0x2) | |
532 | ia64_ptrd(vmaddr, (log_size << 2)); | |
533 | } | |
534 | ||
535 | /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ | |
536 | static inline void | |
537 | ia64_set_iva (void *ivt_addr) | |
538 | { | |
539 | ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); | |
540 | ia64_srlz_i(); | |
541 | } | |
542 | ||
543 | /* Set the page table address and control bits. */ | |
544 | static inline void | |
545 | ia64_set_pta (__u64 pta) | |
546 | { | |
547 | /* Note: srlz.i implies srlz.d */ | |
548 | ia64_setreg(_IA64_REG_CR_PTA, pta); | |
549 | ia64_srlz_i(); | |
550 | } | |
551 | ||
552 | static inline void | |
553 | ia64_eoi (void) | |
554 | { | |
555 | ia64_setreg(_IA64_REG_CR_EOI, 0); | |
556 | ia64_srlz_d(); | |
557 | } | |
558 | ||
559 | #define cpu_relax() ia64_hint(ia64_hint_pause) | |
560 | ||
a5878691 BH |
561 | static inline int |
562 | ia64_get_irr(unsigned int vector) | |
563 | { | |
564 | unsigned int reg = vector / 64; | |
565 | unsigned int bit = vector % 64; | |
566 | u64 irr; | |
567 | ||
568 | switch (reg) { | |
569 | case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; | |
570 | case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; | |
571 | case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; | |
572 | case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; | |
573 | } | |
574 | ||
575 | return test_bit(bit, &irr); | |
576 | } | |
577 | ||
1da177e4 LT |
578 | static inline void |
579 | ia64_set_lrr0 (unsigned long val) | |
580 | { | |
581 | ia64_setreg(_IA64_REG_CR_LRR0, val); | |
582 | ia64_srlz_d(); | |
583 | } | |
584 | ||
585 | static inline void | |
586 | ia64_set_lrr1 (unsigned long val) | |
587 | { | |
588 | ia64_setreg(_IA64_REG_CR_LRR1, val); | |
589 | ia64_srlz_d(); | |
590 | } | |
591 | ||
592 | ||
593 | /* | |
594 | * Given the address to which a spill occurred, return the unat bit | |
595 | * number that corresponds to this address. | |
596 | */ | |
597 | static inline __u64 | |
598 | ia64_unat_pos (void *spill_addr) | |
599 | { | |
600 | return ((__u64) spill_addr >> 3) & 0x3f; | |
601 | } | |
602 | ||
603 | /* | |
604 | * Set the NaT bit of an integer register which was spilled at address | |
605 | * SPILL_ADDR. UNAT is the mask to be updated. | |
606 | */ | |
607 | static inline void | |
608 | ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) | |
609 | { | |
610 | __u64 bit = ia64_unat_pos(spill_addr); | |
611 | __u64 mask = 1UL << bit; | |
612 | ||
613 | *unat = (*unat & ~mask) | (nat << bit); | |
614 | } | |
615 | ||
616 | /* | |
617 | * Return saved PC of a blocked thread. | |
618 | * Note that the only way T can block is through a call to schedule() -> switch_to(). | |
619 | */ | |
620 | static inline unsigned long | |
621 | thread_saved_pc (struct task_struct *t) | |
622 | { | |
623 | struct unw_frame_info info; | |
624 | unsigned long ip; | |
625 | ||
626 | unw_init_from_blocked_task(&info, t); | |
627 | if (unw_unwind(&info) < 0) | |
628 | return 0; | |
629 | unw_get_ip(&info, &ip); | |
630 | return ip; | |
631 | } | |
632 | ||
633 | /* | |
634 | * Get the current instruction/program counter value. | |
635 | */ | |
636 | #define current_text_addr() \ | |
637 | ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) | |
638 | ||
639 | static inline __u64 | |
640 | ia64_get_ivr (void) | |
641 | { | |
642 | __u64 r; | |
643 | ia64_srlz_d(); | |
644 | r = ia64_getreg(_IA64_REG_CR_IVR); | |
645 | ia64_srlz_d(); | |
646 | return r; | |
647 | } | |
648 | ||
649 | static inline void | |
650 | ia64_set_dbr (__u64 regnum, __u64 value) | |
651 | { | |
652 | __ia64_set_dbr(regnum, value); | |
653 | #ifdef CONFIG_ITANIUM | |
654 | ia64_srlz_d(); | |
655 | #endif | |
656 | } | |
657 | ||
658 | static inline __u64 | |
659 | ia64_get_dbr (__u64 regnum) | |
660 | { | |
661 | __u64 retval; | |
662 | ||
663 | retval = __ia64_get_dbr(regnum); | |
664 | #ifdef CONFIG_ITANIUM | |
665 | ia64_srlz_d(); | |
666 | #endif | |
667 | return retval; | |
668 | } | |
669 | ||
670 | static inline __u64 | |
671 | ia64_rotr (__u64 w, __u64 n) | |
672 | { | |
673 | return (w >> n) | (w << (64 - n)); | |
674 | } | |
675 | ||
676 | #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) | |
677 | ||
678 | /* | |
679 | * Take a mapped kernel address and return the equivalent address | |
680 | * in the region 7 identity mapped virtual area. | |
681 | */ | |
682 | static inline void * | |
683 | ia64_imva (void *addr) | |
684 | { | |
685 | void *result; | |
686 | result = (void *) ia64_tpa(addr); | |
687 | return __va(result); | |
688 | } | |
689 | ||
690 | #define ARCH_HAS_PREFETCH | |
691 | #define ARCH_HAS_PREFETCHW | |
692 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
693 | #define PREFETCH_STRIDE L1_CACHE_BYTES | |
694 | ||
695 | static inline void | |
696 | prefetch (const void *x) | |
697 | { | |
698 | ia64_lfetch(ia64_lfhint_none, x); | |
699 | } | |
700 | ||
701 | static inline void | |
702 | prefetchw (const void *x) | |
703 | { | |
704 | ia64_lfetch_excl(ia64_lfhint_none, x); | |
705 | } | |
706 | ||
707 | #define spin_lock_prefetch(x) prefetchw(x) | |
708 | ||
709 | extern unsigned long boot_option_idle_override; | |
710 | ||
711 | #endif /* !__ASSEMBLY__ */ | |
712 | ||
713 | #endif /* _ASM_IA64_PROCESSOR_H */ |