Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: processor.h,v 1.83 2002/02/10 06:04:33 davem Exp $ |
2 | * include/asm-sparc64/processor.h | |
3 | * | |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | |
5 | */ | |
6 | ||
7 | #ifndef __ASM_SPARC64_PROCESSOR_H | |
8 | #define __ASM_SPARC64_PROCESSOR_H | |
9 | ||
10 | /* | |
11 | * Sparc64 implementation of macro that returns current | |
12 | * instruction pointer ("program counter"). | |
13 | */ | |
14 | #define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; }) | |
15 | ||
1da177e4 LT |
16 | #include <asm/asi.h> |
17 | #include <asm/a.out.h> | |
18 | #include <asm/pstate.h> | |
19 | #include <asm/ptrace.h> | |
1da177e4 LT |
20 | #include <asm/page.h> |
21 | ||
22 | /* The sparc has no problems with write protection */ | |
23 | #define wp_works_ok 1 | |
24 | #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ | |
25 | ||
26 | /* | |
27 | * User lives in his very own context, and cannot reference us. Note | |
28 | * that TASK_SIZE is a misnomer, it really gives maximum user virtual | |
29 | * address that the kernel will allocate out. | |
74bf4312 DM |
30 | * |
31 | * XXX No longer using virtual page tables, kill this upper limit... | |
1da177e4 LT |
32 | */ |
33 | #define VA_BITS 44 | |
34 | #ifndef __ASSEMBLY__ | |
35 | #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3)) | |
36 | #else | |
37 | #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3)) | |
38 | #endif | |
39 | #define TASK_SIZE ((unsigned long)-VPTE_SIZE) | |
40 | ||
1da177e4 LT |
41 | #ifndef __ASSEMBLY__ |
42 | ||
43 | typedef struct { | |
44 | unsigned char seg; | |
45 | } mm_segment_t; | |
46 | ||
47 | /* The Sparc processor specific thread struct. */ | |
48 | /* XXX This should die, everything can go into thread_info now. */ | |
49 | struct thread_struct { | |
50 | #ifdef CONFIG_DEBUG_SPINLOCK | |
51 | /* How many spinlocks held by this thread. | |
52 | * Used with spin lock debugging to catch tasks | |
53 | * sleeping illegally with locks held. | |
54 | */ | |
55 | int smp_lock_count; | |
56 | unsigned int smp_lock_pc; | |
57 | #else | |
58 | int dummy; /* f'in gcc bug... */ | |
59 | #endif | |
60 | }; | |
61 | ||
62 | #endif /* !(__ASSEMBLY__) */ | |
63 | ||
64 | #ifndef CONFIG_DEBUG_SPINLOCK | |
65 | #define INIT_THREAD { \ | |
66 | 0, \ | |
67 | } | |
68 | #else /* CONFIG_DEBUG_SPINLOCK */ | |
69 | #define INIT_THREAD { \ | |
70 | /* smp_lock_count, smp_lock_pc, */ \ | |
71 | 0, 0, \ | |
72 | } | |
73 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | |
74 | ||
75 | #ifndef __ASSEMBLY__ | |
76 | ||
77 | #include <linux/types.h> | |
78 | ||
79 | /* Return saved PC of a blocked thread. */ | |
80 | struct task_struct; | |
81 | extern unsigned long thread_saved_pc(struct task_struct *); | |
82 | ||
83 | /* On Uniprocessor, even in RMO processes see TSO semantics */ | |
84 | #ifdef CONFIG_SMP | |
85 | #define TSTATE_INITIAL_MM TSTATE_TSO | |
86 | #else | |
87 | #define TSTATE_INITIAL_MM TSTATE_RMO | |
88 | #endif | |
89 | ||
90 | /* Do necessary setup to start up a newly executed thread. */ | |
91 | #define start_thread(regs, pc, sp) \ | |
92 | do { \ | |
0f05da6d DM |
93 | unsigned long __asi = ASI_PNF; \ |
94 | regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \ | |
1da177e4 LT |
95 | regs->tpc = ((pc & (~3)) - 4); \ |
96 | regs->tnpc = regs->tpc + 4; \ | |
97 | regs->y = 0; \ | |
98 | set_thread_wstate(1 << 3); \ | |
99 | if (current_thread_info()->utraps) { \ | |
100 | if (*(current_thread_info()->utraps) < 2) \ | |
101 | kfree(current_thread_info()->utraps); \ | |
102 | else \ | |
103 | (*(current_thread_info()->utraps))--; \ | |
104 | current_thread_info()->utraps = NULL; \ | |
105 | } \ | |
106 | __asm__ __volatile__( \ | |
107 | "stx %%g0, [%0 + %2 + 0x00]\n\t" \ | |
108 | "stx %%g0, [%0 + %2 + 0x08]\n\t" \ | |
109 | "stx %%g0, [%0 + %2 + 0x10]\n\t" \ | |
110 | "stx %%g0, [%0 + %2 + 0x18]\n\t" \ | |
111 | "stx %%g0, [%0 + %2 + 0x20]\n\t" \ | |
112 | "stx %%g0, [%0 + %2 + 0x28]\n\t" \ | |
113 | "stx %%g0, [%0 + %2 + 0x30]\n\t" \ | |
114 | "stx %%g0, [%0 + %2 + 0x38]\n\t" \ | |
115 | "stx %%g0, [%0 + %2 + 0x40]\n\t" \ | |
116 | "stx %%g0, [%0 + %2 + 0x48]\n\t" \ | |
117 | "stx %%g0, [%0 + %2 + 0x50]\n\t" \ | |
118 | "stx %%g0, [%0 + %2 + 0x58]\n\t" \ | |
119 | "stx %%g0, [%0 + %2 + 0x60]\n\t" \ | |
120 | "stx %%g0, [%0 + %2 + 0x68]\n\t" \ | |
121 | "stx %1, [%0 + %2 + 0x70]\n\t" \ | |
122 | "stx %%g0, [%0 + %2 + 0x78]\n\t" \ | |
123 | "wrpr %%g0, (1 << 3), %%wstate\n\t" \ | |
124 | : \ | |
125 | : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \ | |
126 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ | |
127 | } while (0) | |
128 | ||
129 | #define start_thread32(regs, pc, sp) \ | |
130 | do { \ | |
0f05da6d | 131 | unsigned long __asi = ASI_PNF; \ |
1da177e4 LT |
132 | pc &= 0x00000000ffffffffUL; \ |
133 | sp &= 0x00000000ffffffffUL; \ | |
0f05da6d | 134 | regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \ |
1da177e4 LT |
135 | regs->tpc = ((pc & (~3)) - 4); \ |
136 | regs->tnpc = regs->tpc + 4; \ | |
137 | regs->y = 0; \ | |
138 | set_thread_wstate(2 << 3); \ | |
139 | if (current_thread_info()->utraps) { \ | |
140 | if (*(current_thread_info()->utraps) < 2) \ | |
141 | kfree(current_thread_info()->utraps); \ | |
142 | else \ | |
143 | (*(current_thread_info()->utraps))--; \ | |
144 | current_thread_info()->utraps = NULL; \ | |
145 | } \ | |
146 | __asm__ __volatile__( \ | |
147 | "stx %%g0, [%0 + %2 + 0x00]\n\t" \ | |
148 | "stx %%g0, [%0 + %2 + 0x08]\n\t" \ | |
149 | "stx %%g0, [%0 + %2 + 0x10]\n\t" \ | |
150 | "stx %%g0, [%0 + %2 + 0x18]\n\t" \ | |
151 | "stx %%g0, [%0 + %2 + 0x20]\n\t" \ | |
152 | "stx %%g0, [%0 + %2 + 0x28]\n\t" \ | |
153 | "stx %%g0, [%0 + %2 + 0x30]\n\t" \ | |
154 | "stx %%g0, [%0 + %2 + 0x38]\n\t" \ | |
155 | "stx %%g0, [%0 + %2 + 0x40]\n\t" \ | |
156 | "stx %%g0, [%0 + %2 + 0x48]\n\t" \ | |
157 | "stx %%g0, [%0 + %2 + 0x50]\n\t" \ | |
158 | "stx %%g0, [%0 + %2 + 0x58]\n\t" \ | |
159 | "stx %%g0, [%0 + %2 + 0x60]\n\t" \ | |
160 | "stx %%g0, [%0 + %2 + 0x68]\n\t" \ | |
161 | "stx %1, [%0 + %2 + 0x70]\n\t" \ | |
162 | "stx %%g0, [%0 + %2 + 0x78]\n\t" \ | |
163 | "wrpr %%g0, (2 << 3), %%wstate\n\t" \ | |
164 | : \ | |
165 | : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \ | |
166 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ | |
167 | } while (0) | |
168 | ||
169 | /* Free all resources held by a thread. */ | |
170 | #define release_thread(tsk) do { } while (0) | |
171 | ||
172 | /* Prepare to copy thread state - unlazy all lazy status */ | |
173 | #define prepare_to_copy(tsk) do { } while (0) | |
174 | ||
175 | extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |
176 | ||
177 | extern unsigned long get_wchan(struct task_struct *task); | |
178 | ||
26ecbdea AV |
179 | #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) |
180 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) | |
181 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) | |
1da177e4 LT |
182 | |
183 | #define cpu_relax() barrier() | |
184 | ||
7049e680 DM |
185 | /* Prefetch support. This is tuned for UltraSPARC-III and later. |
186 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has | |
187 | * a shallower prefetch queue than later chips. | |
188 | */ | |
189 | #define ARCH_HAS_PREFETCH | |
190 | #define ARCH_HAS_PREFETCHW | |
191 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
192 | ||
193 | static inline void prefetch(const void *x) | |
194 | { | |
195 | /* We do not use the read prefetch mnemonic because that | |
196 | * prefetches into the prefetch-cache which only is accessible | |
197 | * by floating point operations in UltraSPARC-III and later. | |
198 | * By contrast, "#one_write" prefetches into the L2 cache | |
199 | * in shared state. | |
200 | */ | |
201 | __asm__ __volatile__("prefetch [%0], #one_write" | |
202 | : /* no outputs */ | |
203 | : "r" (x)); | |
204 | } | |
205 | ||
206 | static inline void prefetchw(const void *x) | |
207 | { | |
208 | /* The most optimal prefetch to use for writes is | |
209 | * "#n_writes". This brings the cacheline into the | |
210 | * L2 cache in "owned" state. | |
211 | */ | |
212 | __asm__ __volatile__("prefetch [%0], #n_writes" | |
213 | : /* no outputs */ | |
214 | : "r" (x)); | |
215 | } | |
216 | ||
217 | #define spin_lock_prefetch(x) prefetchw(x) | |
218 | ||
a91690dd DM |
219 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
220 | ||
1da177e4 LT |
221 | #endif /* !(__ASSEMBLY__) */ |
222 | ||
223 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ |