Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | |
3 | * | |
dde39798 HD |
4 | * System call entry code / Linux gateway page |
5 | * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> | |
1da177e4 LT |
6 | * Licensed under the GNU GPL. |
7 | * thanks to Philipp Rumpf, Mike Shaver and various others | |
8 | * sorry about the wall, puffin.. | |
9 | */ | |
10 | ||
dde39798 HD |
11 | /* |
12 | How does the Linux gateway page on PA-RISC work? | |
13 | ------------------------------------------------ | |
14 | The Linux gateway page on PA-RISC is "special". | |
15 | It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc | |
16 | terminology it's Execute, promote to PL0) in the page map. So anything | |
17 | executing on this page executes with kernel level privilege (there's more to it | |
18 | than that: to have this happen, you also have to use a branch with a ,gate | |
19 | completer to activate the privilege promotion). The upshot is that everything | |
20 | that runs on the gateway page runs at kernel privilege but with the current | |
21 | user process address space (although you have access to kernel space via %sr2). | |
22 | For the 0x100 syscall entry, we redo the space registers to point to the kernel | |
23 | address space (preserving the user address space in %sr3), move to wide mode if | |
24 | required, save the user registers and branch into the kernel syscall entry | |
25 | point. For all the other functions, we execute at kernel privilege but don't | |
26 | flip address spaces. The basic upshot of this is that these code snippets are | |
27 | executed atomically (because the kernel can't be pre-empted) and they may | |
28 | perform architecturally forbidden (to PL3) operations (like setting control | |
29 | registers). | |
30 | */ | |
31 | ||
32 | ||
0013a854 | 33 | #include <asm/asm-offsets.h> |
1da177e4 LT |
34 | #include <asm/unistd.h> |
35 | #include <asm/errno.h> | |
1c593571 | 36 | #include <asm/page.h> |
1da177e4 LT |
37 | #include <asm/psw.h> |
38 | #include <asm/thread_info.h> | |
1da177e4 LT |
39 | #include <asm/assembly.h> |
40 | #include <asm/processor.h> | |
6a45716a | 41 | #include <asm/cache.h> |
1da177e4 | 42 | |
8e9e9844 HD |
43 | #include <linux/linkage.h> |
44 | ||
1da177e4 LT |
45 | /* We fill the empty parts of the gateway page with |
46 | * something that will kill the kernel or a | |
47 | * userspace application. | |
48 | */ | |
49 | #define KILL_INSN break 0,0 | |
50 | ||
0b3d643f | 51 | .level LEVEL |
8e9e9844 | 52 | |
dfcf753b | 53 | .text |
1da177e4 LT |
54 | |
55 | .import syscall_exit,code | |
56 | .import syscall_exit_rfi,code | |
1da177e4 LT |
57 | |
58 | /* Linux gateway page is aliased to virtual page 0 in the kernel | |
59 | * address space. Since it is a gateway page it cannot be | |
60 | * dereferenced, so null pointers will still fault. We start | |
61 | * the actual entry point at 0x100. We put break instructions | |
62 | * at the beginning of the page to trap null indirect function | |
63 | * pointers. | |
64 | */ | |
65 | ||
1c593571 | 66 | .align PAGE_SIZE |
8e9e9844 | 67 | ENTRY(linux_gateway_page) |
1da177e4 LT |
68 | |
69 | /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ | |
70 | .rept 44 | |
71 | KILL_INSN | |
72 | .endr | |
73 | ||
f4c0346c | 74 | /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ |
1da177e4 LT |
75 | /* Light-weight-syscall entry must always be located at 0xb0 */ |
76 | /* WARNING: Keep this number updated with table size changes */ | |
77 | #define __NR_lws_entries (2) | |
78 | ||
79 | lws_entry: | |
f4c0346c JDA |
80 | gate lws_start, %r0 /* increase privilege */ |
81 | depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ | |
1da177e4 | 82 | |
f4c0346c JDA |
83 | /* Fill from 0xb8 to 0xe0 */ |
84 | .rept 10 | |
1da177e4 LT |
85 | KILL_INSN |
86 | .endr | |
87 | ||
88 | /* This function MUST be located at 0xe0 for glibc's threading | |
89 | mechanism to work. DO NOT MOVE THIS CODE EVER! */ | |
90 | set_thread_pointer: | |
91 | gate .+8, %r0 /* increase privilege */ | |
92 | depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ | |
93 | be 0(%sr7,%r31) /* return to user space */ | |
94 | mtctl %r26, %cr27 /* move arg0 to the control register */ | |
95 | ||
96 | /* Increase the chance of trapping if random jumps occur to this | |
97 | address, fill from 0xf0 to 0x100 */ | |
98 | .rept 4 | |
99 | KILL_INSN | |
100 | .endr | |
101 | ||
102 | /* This address must remain fixed at 0x100 for glibc's syscalls to work */ | |
103 | .align 256 | |
104 | linux_gateway_entry: | |
105 | gate .+8, %r0 /* become privileged */ | |
106 | mtsp %r0,%sr4 /* get kernel space into sr4 */ | |
107 | mtsp %r0,%sr5 /* get kernel space into sr5 */ | |
108 | mtsp %r0,%sr6 /* get kernel space into sr6 */ | |
109 | mfsp %sr7,%r1 /* save user sr7 */ | |
110 | mtsp %r1,%sr3 /* and store it in sr3 */ | |
111 | ||
413059f2 | 112 | #ifdef CONFIG_64BIT |
1da177e4 LT |
113 | /* for now we can *always* set the W bit on entry to the syscall |
114 | * since we don't support wide userland processes. We could | |
115 | * also save the current SM other than in r0 and restore it on | |
116 | * exit from the syscall, and also use that value to know | |
117 | * whether to do narrow or wide syscalls. -PB | |
118 | */ | |
119 | ssm PSW_SM_W, %r1 | |
120 | extrd,u %r1,PSW_W_BIT,1,%r1 | |
121 | /* sp must be aligned on 4, so deposit the W bit setting into | |
122 | * the bottom of sp temporarily */ | |
123 | or,ev %r1,%r30,%r30 | |
124 | b,n 1f | |
125 | /* The top halves of argument registers must be cleared on syscall | |
126 | * entry from narrow executable. | |
127 | */ | |
128 | depdi 0, 31, 32, %r26 | |
129 | depdi 0, 31, 32, %r25 | |
130 | depdi 0, 31, 32, %r24 | |
131 | depdi 0, 31, 32, %r23 | |
132 | depdi 0, 31, 32, %r22 | |
133 | depdi 0, 31, 32, %r21 | |
134 | 1: | |
135 | #endif | |
136 | mfctl %cr30,%r1 | |
137 | xor %r1,%r30,%r30 /* ye olde xor trick */ | |
138 | xor %r1,%r30,%r1 | |
139 | xor %r1,%r30,%r30 | |
140 | ||
141 | ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */ | |
142 | ||
143 | /* N.B.: It is critical that we don't set sr7 to 0 until r30 | |
144 | * contains a valid kernel stack pointer. It is also | |
145 | * critical that we don't start using the kernel stack | |
146 | * until after sr7 has been set to 0. | |
147 | */ | |
148 | ||
149 | mtsp %r0,%sr7 /* get kernel space into sr7 */ | |
150 | STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ | |
151 | mfctl %cr30,%r1 /* get task ptr in %r1 */ | |
152 | LDREG TI_TASK(%r1),%r1 | |
153 | ||
154 | /* Save some registers for sigcontext and potential task | |
155 | switch (see entry.S for the details of which ones are | |
156 | saved/restored). TASK_PT_PSW is zeroed so we can see whether | |
157 | a process is on a syscall or not. For an interrupt the real | |
158 | PSW value is stored. This is needed for gdb and sys_ptrace. */ | |
159 | STREG %r0, TASK_PT_PSW(%r1) | |
160 | STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */ | |
161 | STREG %r19, TASK_PT_GR19(%r1) | |
162 | ||
163 | LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */ | |
413059f2 | 164 | #ifdef CONFIG_64BIT |
1da177e4 LT |
165 | extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */ |
166 | #if 0 | |
167 | xor %r19,%r2,%r2 /* clear bottom bit */ | |
168 | depd,z %r19,1,1,%r19 | |
169 | std %r19,TASK_PT_PSW(%r1) | |
170 | #endif | |
171 | #endif | |
172 | STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ | |
173 | ||
aa0eecb0 | 174 | STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */ |
1da177e4 LT |
175 | STREG %r21, TASK_PT_GR21(%r1) |
176 | STREG %r22, TASK_PT_GR22(%r1) | |
177 | STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ | |
178 | STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */ | |
179 | STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */ | |
180 | STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */ | |
181 | STREG %r27, TASK_PT_GR27(%r1) /* user dp */ | |
182 | STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */ | |
00df111e | 183 | STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */ |
1da177e4 LT |
184 | STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */ |
185 | STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */ | |
186 | ||
187 | ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */ | |
188 | save_fp %r27 /* or potential task switch */ | |
189 | ||
190 | mfctl %cr11, %r27 /* i.e. SAR */ | |
191 | STREG %r27, TASK_PT_SAR(%r1) | |
192 | ||
193 | loadgp | |
194 | ||
413059f2 | 195 | #ifdef CONFIG_64BIT |
1da177e4 LT |
196 | ldo -16(%r30),%r29 /* Reference param save area */ |
197 | copy %r19,%r2 /* W bit back to r2 */ | |
198 | #else | |
199 | /* no need to save these on stack in wide mode because the first 8 | |
200 | * args are passed in registers */ | |
201 | stw %r22, -52(%r30) /* 5th argument */ | |
202 | stw %r21, -56(%r30) /* 6th argument */ | |
203 | #endif | |
204 | ||
205 | /* Are we being ptraced? */ | |
206 | mfctl %cr30, %r1 | |
64482bd8 AV |
207 | LDREG TI_FLAGS(%r1),%r1 |
208 | ldi _TIF_SYSCALL_TRACE_MASK, %r19 | |
209 | and,COND(=) %r1, %r19, %r0 | |
210 | b,n .Ltracesys | |
1da177e4 LT |
211 | |
212 | /* Note! We cannot use the syscall table that is mapped | |
213 | nearby since the gateway page is mapped execute-only. */ | |
214 | ||
413059f2 | 215 | #ifdef CONFIG_64BIT |
1da177e4 LT |
216 | ldil L%sys_call_table, %r1 |
217 | or,= %r2,%r2,%r2 | |
218 | addil L%(sys_call_table64-sys_call_table), %r1 | |
219 | ldo R%sys_call_table(%r1), %r19 | |
220 | or,= %r2,%r2,%r2 | |
221 | ldo R%sys_call_table64(%r1), %r19 | |
222 | #else | |
223 | ldil L%sys_call_table, %r1 | |
224 | ldo R%sys_call_table(%r1), %r19 | |
225 | #endif | |
3bb457af | 226 | comiclr,>> __NR_Linux_syscalls, %r20, %r0 |
1da177e4 LT |
227 | b,n .Lsyscall_nosys |
228 | ||
229 | LDREGX %r20(%r19), %r19 | |
230 | ||
231 | /* If this is a sys_rt_sigreturn call, and the signal was received | |
232 | * when not in_syscall, then we want to return via syscall_exit_rfi, | |
233 | * not syscall_exit. Signal no. in r20, in_syscall in r25 (see | |
234 | * trampoline code in signal.c). | |
235 | */ | |
236 | ldi __NR_rt_sigreturn,%r2 | |
237 | comb,= %r2,%r20,.Lrt_sigreturn | |
238 | .Lin_syscall: | |
239 | ldil L%syscall_exit,%r2 | |
240 | be 0(%sr7,%r19) | |
241 | ldo R%syscall_exit(%r2),%r2 | |
242 | .Lrt_sigreturn: | |
243 | comib,<> 0,%r25,.Lin_syscall | |
244 | ldil L%syscall_exit_rfi,%r2 | |
245 | be 0(%sr7,%r19) | |
246 | ldo R%syscall_exit_rfi(%r2),%r2 | |
247 | ||
248 | /* Note! Because we are not running where we were linked, any | |
249 | calls to functions external to this file must be indirect. To | |
250 | be safe, we apply the opposite rule to functions within this | |
251 | file, with local labels given to them to ensure correctness. */ | |
252 | ||
253 | .Lsyscall_nosys: | |
254 | syscall_nosys: | |
255 | ldil L%syscall_exit,%r1 | |
256 | be R%syscall_exit(%sr7,%r1) | |
257 | ldo -ENOSYS(%r0),%r28 /* set errno */ | |
258 | ||
259 | ||
260 | /* Warning! This trace code is a virtual duplicate of the code above so be | |
261 | * sure to maintain both! */ | |
262 | .Ltracesys: | |
263 | tracesys: | |
264 | /* Need to save more registers so the debugger can see where we | |
265 | * are. This saves only the lower 8 bits of PSW, so that the C | |
266 | * bit is still clear on syscalls, and the D bit is set if this | |
267 | * full register save path has been executed. We check the D | |
268 | * bit on syscall_return_rfi to determine which registers to | |
269 | * restore. An interrupt results in a full PSW saved with the | |
270 | * C bit set, a non-straced syscall entry results in C and D clear | |
271 | * in the saved PSW. | |
272 | */ | |
273 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | |
274 | LDREG TI_TASK(%r1), %r1 | |
275 | ssm 0,%r2 | |
276 | STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */ | |
277 | mfsp %sr0,%r2 | |
278 | STREG %r2,TASK_PT_SR0(%r1) | |
279 | mfsp %sr1,%r2 | |
280 | STREG %r2,TASK_PT_SR1(%r1) | |
281 | mfsp %sr2,%r2 | |
282 | STREG %r2,TASK_PT_SR2(%r1) | |
283 | mfsp %sr3,%r2 | |
284 | STREG %r2,TASK_PT_SR3(%r1) | |
285 | STREG %r2,TASK_PT_SR4(%r1) | |
286 | STREG %r2,TASK_PT_SR5(%r1) | |
287 | STREG %r2,TASK_PT_SR6(%r1) | |
288 | STREG %r2,TASK_PT_SR7(%r1) | |
289 | STREG %r2,TASK_PT_IASQ0(%r1) | |
290 | STREG %r2,TASK_PT_IASQ1(%r1) | |
291 | LDREG TASK_PT_GR31(%r1),%r2 | |
292 | STREG %r2,TASK_PT_IAOQ0(%r1) | |
293 | ldo 4(%r2),%r2 | |
294 | STREG %r2,TASK_PT_IAOQ1(%r1) | |
295 | ldo TASK_REGS(%r1),%r2 | |
296 | /* reg_save %r2 */ | |
297 | STREG %r3,PT_GR3(%r2) | |
298 | STREG %r4,PT_GR4(%r2) | |
299 | STREG %r5,PT_GR5(%r2) | |
300 | STREG %r6,PT_GR6(%r2) | |
301 | STREG %r7,PT_GR7(%r2) | |
302 | STREG %r8,PT_GR8(%r2) | |
303 | STREG %r9,PT_GR9(%r2) | |
304 | STREG %r10,PT_GR10(%r2) | |
305 | STREG %r11,PT_GR11(%r2) | |
306 | STREG %r12,PT_GR12(%r2) | |
307 | STREG %r13,PT_GR13(%r2) | |
308 | STREG %r14,PT_GR14(%r2) | |
309 | STREG %r15,PT_GR15(%r2) | |
310 | STREG %r16,PT_GR16(%r2) | |
311 | STREG %r17,PT_GR17(%r2) | |
312 | STREG %r18,PT_GR18(%r2) | |
313 | /* Finished saving things for the debugger */ | |
314 | ||
2798af1a KM |
315 | copy %r2,%r26 |
316 | ldil L%do_syscall_trace_enter,%r1 | |
1da177e4 | 317 | ldil L%tracesys_next,%r2 |
2798af1a | 318 | be R%do_syscall_trace_enter(%sr7,%r1) |
1da177e4 LT |
319 | ldo R%tracesys_next(%r2),%r2 |
320 | ||
2798af1a KM |
321 | tracesys_next: |
322 | /* do_syscall_trace_enter either returned the syscallno, or -1L, | |
323 | * so we skip restoring the PT_GR20 below, since we pulled it from | |
324 | * task->thread.regs.gr[20] above. | |
325 | */ | |
326 | copy %ret0,%r20 | |
1da177e4 LT |
327 | ldil L%sys_call_table,%r1 |
328 | ldo R%sys_call_table(%r1), %r19 | |
329 | ||
330 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | |
331 | LDREG TI_TASK(%r1), %r1 | |
1da177e4 LT |
332 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ |
333 | LDREG TASK_PT_GR25(%r1), %r25 | |
334 | LDREG TASK_PT_GR24(%r1), %r24 | |
335 | LDREG TASK_PT_GR23(%r1), %r23 | |
1da177e4 LT |
336 | LDREG TASK_PT_GR22(%r1), %r22 |
337 | LDREG TASK_PT_GR21(%r1), %r21 | |
52ab532e | 338 | #ifdef CONFIG_64BIT |
1da177e4 | 339 | ldo -16(%r30),%r29 /* Reference param save area */ |
52ab532e AV |
340 | #else |
341 | stw %r22, -52(%r30) /* 5th argument */ | |
342 | stw %r21, -56(%r30) /* 6th argument */ | |
1da177e4 LT |
343 | #endif |
344 | ||
345 | comiclr,>>= __NR_Linux_syscalls, %r20, %r0 | |
346 | b,n .Lsyscall_nosys | |
347 | ||
348 | LDREGX %r20(%r19), %r19 | |
349 | ||
350 | /* If this is a sys_rt_sigreturn call, and the signal was received | |
351 | * when not in_syscall, then we want to return via syscall_exit_rfi, | |
352 | * not syscall_exit. Signal no. in r20, in_syscall in r25 (see | |
353 | * trampoline code in signal.c). | |
354 | */ | |
355 | ldi __NR_rt_sigreturn,%r2 | |
356 | comb,= %r2,%r20,.Ltrace_rt_sigreturn | |
357 | .Ltrace_in_syscall: | |
358 | ldil L%tracesys_exit,%r2 | |
359 | be 0(%sr7,%r19) | |
360 | ldo R%tracesys_exit(%r2),%r2 | |
361 | ||
362 | /* Do *not* call this function on the gateway page, because it | |
363 | makes a direct call to syscall_trace. */ | |
364 | ||
365 | tracesys_exit: | |
366 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | |
367 | LDREG TI_TASK(%r1), %r1 | |
413059f2 | 368 | #ifdef CONFIG_64BIT |
1da177e4 LT |
369 | ldo -16(%r30),%r29 /* Reference param save area */ |
370 | #endif | |
2798af1a KM |
371 | ldo TASK_REGS(%r1),%r26 |
372 | bl do_syscall_trace_exit,%r2 | |
1da177e4 LT |
373 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
374 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | |
375 | LDREG TI_TASK(%r1), %r1 | |
376 | LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */ | |
377 | ||
378 | ldil L%syscall_exit,%r1 | |
379 | be,n R%syscall_exit(%sr7,%r1) | |
380 | ||
381 | .Ltrace_rt_sigreturn: | |
382 | comib,<> 0,%r25,.Ltrace_in_syscall | |
383 | ldil L%tracesys_sigexit,%r2 | |
384 | be 0(%sr7,%r19) | |
385 | ldo R%tracesys_sigexit(%r2),%r2 | |
386 | ||
387 | tracesys_sigexit: | |
388 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | |
5837d42f | 389 | LDREG TI_TASK(%r1), %r1 |
413059f2 | 390 | #ifdef CONFIG_64BIT |
1da177e4 LT |
391 | ldo -16(%r30),%r29 /* Reference param save area */ |
392 | #endif | |
2798af1a KM |
393 | bl do_syscall_trace_exit,%r2 |
394 | ldo TASK_REGS(%r1),%r26 | |
1da177e4 LT |
395 | |
396 | ldil L%syscall_exit_rfi,%r1 | |
397 | be,n R%syscall_exit_rfi(%sr7,%r1) | |
398 | ||
399 | ||
400 | /********************************************************* | |
c84c3a69 | 401 | 32/64-bit Light-Weight-Syscall ABI |
1da177e4 | 402 | |
c84c3a69 HD |
403 | * - Indicates a hint for userspace inline asm |
404 | implementations. | |
1da177e4 | 405 | |
c84c3a69 HD |
406 | Syscall number (caller-saves) |
407 | - %r20 | |
408 | * In asm clobber. | |
1da177e4 | 409 | |
c84c3a69 HD |
410 | Argument registers (caller-saves) |
411 | - %r26, %r25, %r24, %r23, %r22 | |
412 | * In asm input. | |
413 | ||
414 | Return registers (caller-saves) | |
415 | - %r28 (return), %r21 (errno) | |
416 | * In asm output. | |
417 | ||
418 | Caller-saves registers | |
419 | - %r1, %r27, %r29 | |
420 | - %r2 (return pointer) | |
421 | - %r31 (ble link register) | |
422 | * In asm clobber. | |
423 | ||
424 | Callee-saves registers | |
425 | - %r3-%r18 | |
426 | - %r30 (stack pointer) | |
427 | * Not in asm clobber. | |
428 | ||
429 | If userspace is 32-bit: | |
430 | Callee-saves registers | |
431 | - %r19 (32-bit PIC register) | |
432 | ||
433 | Differences from 32-bit calling convention: | |
434 | - Syscall number in %r20 | |
435 | - Additional argument register %r22 (arg4) | |
436 | - Callee-saves %r19. | |
437 | ||
438 | If userspace is 64-bit: | |
439 | Callee-saves registers | |
440 | - %r27 (64-bit PIC register) | |
441 | ||
442 | Differences from 64-bit calling convention: | |
443 | - Syscall number in %r20 | |
444 | - Additional argument register %r22 (arg4) | |
445 | - Callee-saves %r27. | |
1da177e4 LT |
446 | |
447 | Error codes returned by entry path: | |
448 | ||
449 | ENOSYS - r20 was an invalid LWS number. | |
450 | ||
451 | *********************************************************/ | |
452 | lws_start: | |
1da177e4 | 453 | |
413059f2 | 454 | #ifdef CONFIG_64BIT |
1da177e4 LT |
455 | /* FIXME: If we are a 64-bit kernel just |
456 | * turn this on unconditionally. | |
457 | */ | |
458 | ssm PSW_SM_W, %r1 | |
459 | extrd,u %r1,PSW_W_BIT,1,%r1 | |
460 | /* sp must be aligned on 4, so deposit the W bit setting into | |
461 | * the bottom of sp temporarily */ | |
462 | or,ev %r1,%r30,%r30 | |
463 | ||
464 | /* Clip LWS number to a 32-bit value always */ | |
465 | depdi 0, 31, 32, %r20 | |
466 | #endif | |
467 | ||
468 | /* Is the lws entry number valid? */ | |
f4c0346c | 469 | comiclr,>> __NR_lws_entries, %r20, %r0 |
1da177e4 LT |
470 | b,n lws_exit_nosys |
471 | ||
472 | /* WARNING: Trashing sr2 and sr3 */ | |
473 | mfsp %sr7,%r1 /* get userspace into sr3 */ | |
474 | mtsp %r1,%sr3 | |
475 | mtsp %r0,%sr2 /* get kernel space into sr2 */ | |
476 | ||
477 | /* Load table start */ | |
478 | ldil L%lws_table, %r1 | |
479 | ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ | |
480 | LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */ | |
481 | ||
482 | /* Jump to lws, lws table pointers already relocated */ | |
483 | be,n 0(%sr2,%r21) | |
484 | ||
485 | lws_exit_nosys: | |
486 | ldo -ENOSYS(%r0),%r21 /* set errno */ | |
487 | /* Fall through: Return to userspace */ | |
488 | ||
489 | lws_exit: | |
413059f2 | 490 | #ifdef CONFIG_64BIT |
1da177e4 LT |
491 | /* decide whether to reset the wide mode bit |
492 | * | |
493 | * For a syscall, the W bit is stored in the lowest bit | |
494 | * of sp. Extract it and reset W if it is zero */ | |
495 | extrd,u,*<> %r30,63,1,%r1 | |
496 | rsm PSW_SM_W, %r0 | |
497 | /* now reset the lowest bit of sp if it was set */ | |
498 | xor %r30,%r1,%r30 | |
499 | #endif | |
f4c0346c | 500 | be,n 0(%sr7, %r31) |
1da177e4 LT |
501 | |
502 | ||
503 | ||
504 | /*************************************************** | |
505 | Implementing CAS as an atomic operation: | |
506 | ||
507 | %r26 - Address to examine | |
508 | %r25 - Old value to check (old) | |
509 | %r24 - New value to set (new) | |
510 | %r28 - Return prev through this register. | |
511 | %r21 - Kernel error code | |
512 | ||
513 | If debugging is DISabled: | |
514 | ||
515 | %r21 has the following meanings: | |
516 | ||
517 | EAGAIN - CAS is busy, ldcw failed, try again. | |
518 | EFAULT - Read or write failed. | |
519 | ||
520 | If debugging is enabled: | |
521 | ||
522 | EDEADLOCK - CAS called recursively. | |
523 | EAGAIN && r28 == 1 - CAS is busy. Lock contended. | |
524 | EAGAIN && r28 == 2 - CAS is busy. ldcw failed. | |
525 | EFAULT - Read or write failed. | |
526 | ||
527 | Scratch: r20, r28, r1 | |
528 | ||
529 | ****************************************************/ | |
530 | ||
531 | /* Do not enable LWS debugging */ | |
532 | #define ENABLE_LWS_DEBUG 0 | |
533 | ||
534 | /* ELF64 Process entry path */ | |
535 | lws_compare_and_swap64: | |
413059f2 | 536 | #ifdef CONFIG_64BIT |
1da177e4 LT |
537 | b,n lws_compare_and_swap |
538 | #else | |
539 | /* If we are not a 64-bit kernel, then we don't | |
c84c3a69 HD |
540 | * have 64-bit input registers, and calling |
541 | * the 64-bit LWS CAS returns ENOSYS. | |
1da177e4 LT |
542 | */ |
543 | b,n lws_exit_nosys | |
544 | #endif | |
545 | ||
546 | /* ELF32 Process entry path */ | |
547 | lws_compare_and_swap32: | |
413059f2 | 548 | #ifdef CONFIG_64BIT |
1da177e4 LT |
549 | /* Clip all the input registers */ |
550 | depdi 0, 31, 32, %r26 | |
551 | depdi 0, 31, 32, %r25 | |
552 | depdi 0, 31, 32, %r24 | |
553 | #endif | |
554 | ||
555 | lws_compare_and_swap: | |
1da177e4 LT |
556 | /* Load start of lock table */ |
557 | ldil L%lws_lock_start, %r20 | |
558 | ldo R%lws_lock_start(%r20), %r28 | |
559 | ||
560 | /* Extract four bits from r26 and hash lock (Bits 4-7) */ | |
561 | extru %r26, 27, 4, %r20 | |
562 | ||
563 | /* Find lock to use, the hash is either one of 0 to | |
564 | 15, multiplied by 16 (keep it 16-byte aligned) | |
565 | and add to the lock table offset. */ | |
566 | shlw %r20, 4, %r20 | |
567 | add %r20, %r28, %r20 | |
568 | ||
b5e8b733 | 569 | # if ENABLE_LWS_DEBUG |
1da177e4 LT |
570 | /* |
571 | DEBUG, check for deadlock! | |
572 | If the thread register values are the same | |
573 | then we were the one that locked it last and | |
574 | this is a recurisve call that will deadlock. | |
575 | We *must* giveup this call and fail. | |
576 | */ | |
577 | ldw 4(%sr2,%r20), %r28 /* Load thread register */ | |
aa0eecb0 | 578 | /* WARNING: If cr27 cycles to the same value we have problems */ |
1da177e4 LT |
579 | mfctl %cr27, %r21 /* Get current thread register */ |
580 | cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ | |
581 | b lws_exit /* Return error! */ | |
582 | ldo -EDEADLOCK(%r0), %r21 | |
583 | cas_lock: | |
584 | cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */ | |
585 | ldo 1(%r0), %r28 /* 1st case */ | |
586 | b lws_exit /* Contended... */ | |
587 | ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ | |
588 | cas_nocontend: | |
589 | # endif | |
590 | /* ENABLE_LWS_DEBUG */ | |
591 | ||
64f49532 | 592 | LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ |
1da177e4 LT |
593 | cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ |
594 | cas_wouldblock: | |
595 | ldo 2(%r0), %r28 /* 2nd case */ | |
596 | b lws_exit /* Contended... */ | |
597 | ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ | |
1da177e4 LT |
598 | |
599 | /* | |
600 | prev = *addr; | |
601 | if ( prev == old ) | |
602 | *addr = new; | |
603 | return prev; | |
604 | */ | |
605 | ||
606 | /* NOTES: | |
607 | This all works becuse intr_do_signal | |
608 | and schedule both check the return iasq | |
609 | and see that we are on the kernel page | |
610 | so this process is never scheduled off | |
611 | or is ever sent any signal of any sort, | |
612 | thus it is wholly atomic from usrspaces | |
613 | perspective | |
614 | */ | |
615 | cas_action: | |
b5e8b733 | 616 | #if defined CONFIG_SMP && ENABLE_LWS_DEBUG |
1da177e4 LT |
617 | /* DEBUG */ |
618 | mfctl %cr27, %r1 | |
619 | stw %r1, 4(%sr2,%r20) | |
620 | #endif | |
621 | /* The load and store could fail */ | |
622 | 1: ldw 0(%sr3,%r26), %r28 | |
623 | sub,<> %r28, %r25, %r0 | |
624 | 2: stw %r24, 0(%sr3,%r26) | |
1da177e4 LT |
625 | /* Free lock */ |
626 | stw %r20, 0(%sr2,%r20) | |
f4c0346c | 627 | #if ENABLE_LWS_DEBUG |
1da177e4 LT |
628 | /* Clear thread register indicator */ |
629 | stw %r0, 4(%sr2,%r20) | |
1da177e4 LT |
630 | #endif |
631 | /* Return to userspace, set no error */ | |
632 | b lws_exit | |
633 | copy %r0, %r21 | |
634 | ||
635 | 3: | |
25985edc | 636 | /* Error occurred on load or store */ |
1da177e4 LT |
637 | /* Free lock */ |
638 | stw %r20, 0(%sr2,%r20) | |
f4c0346c | 639 | #if ENABLE_LWS_DEBUG |
1da177e4 | 640 | stw %r0, 4(%sr2,%r20) |
1da177e4 LT |
641 | #endif |
642 | b lws_exit | |
643 | ldo -EFAULT(%r0),%r21 /* set errno */ | |
644 | nop | |
645 | nop | |
646 | nop | |
647 | nop | |
648 | ||
649 | /* Two exception table entries, one for the load, | |
650 | the other for the store. Either return -EFAULT. | |
651 | Each of the entries must be relocated. */ | |
61dbbaeb HD |
652 | ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page) |
653 | ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) | |
1da177e4 | 654 | |
1da177e4 LT |
655 | |
656 | /* Make sure nothing else is placed on this page */ | |
1c593571 | 657 | .align PAGE_SIZE |
8e9e9844 HD |
658 | END(linux_gateway_page) |
659 | ENTRY(end_linux_gateway_page) | |
1da177e4 LT |
660 | |
661 | /* Relocate symbols assuming linux_gateway_page is mapped | |
662 | to virtual address 0x0 */ | |
8e9e9844 | 663 | |
0b3d643f | 664 | #define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page) |
1da177e4 | 665 | |
1bcdd854 HD |
666 | .section .rodata,"a" |
667 | ||
6a45716a | 668 | .align 8 |
1da177e4 LT |
669 | /* Light-weight-syscall table */ |
670 | /* Start of lws table. */ | |
8e9e9844 | 671 | ENTRY(lws_table) |
1da177e4 LT |
672 | LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ |
673 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ | |
8e9e9844 | 674 | END(lws_table) |
1da177e4 LT |
675 | /* End of lws table */ |
676 | ||
6a45716a | 677 | .align 8 |
8e9e9844 | 678 | ENTRY(sys_call_table) |
1da177e4 | 679 | #include "syscall_table.S" |
8e9e9844 | 680 | END(sys_call_table) |
1da177e4 | 681 | |
413059f2 | 682 | #ifdef CONFIG_64BIT |
6a45716a | 683 | .align 8 |
8e9e9844 | 684 | ENTRY(sys_call_table64) |
1da177e4 LT |
685 | #define SYSCALL_TABLE_64BIT |
686 | #include "syscall_table.S" | |
8e9e9844 | 687 | END(sys_call_table64) |
1da177e4 LT |
688 | #endif |
689 | ||
1da177e4 LT |
690 | /* |
691 | All light-weight-syscall atomic operations | |
692 | will use this set of locks | |
c84c3a69 HD |
693 | |
694 | NOTE: The lws_lock_start symbol must be | |
695 | at least 16-byte aligned for safe use | |
696 | with ldcw. | |
1da177e4 | 697 | */ |
dfcf753b | 698 | .section .data |
6a45716a | 699 | .align L1_CACHE_BYTES |
8e9e9844 | 700 | ENTRY(lws_lock_start) |
1da177e4 | 701 | /* lws locks */ |
1da177e4 LT |
702 | .rept 16 |
703 | /* Keep locks aligned at 16-bytes */ | |
704 | .word 1 | |
705 | .word 0 | |
706 | .word 0 | |
707 | .word 0 | |
708 | .endr | |
8e9e9844 | 709 | END(lws_lock_start) |
1da177e4 | 710 | .previous |
1da177e4 LT |
711 | |
712 | .end | |
713 | ||
714 |