[PARISC] Replace some calls to bl with b,l or bv to use longer offsets
[deliverable/linux.git] / arch / parisc / kernel / entry.S
1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 #include <linux/config.h>
26 #include <asm/asm-offsets.h>
27
28 /* we have the following possibilities to act on an interruption:
29 * - handle in assembly and use shadowed registers only
30 * - save registers to kernel stack and handle in assembly or C */
31
32
33 #include <asm/psw.h>
34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h>
36 #include <asm/signal.h>
37 #include <asm/unistd.h>
38 #include <asm/thread_info.h>
39
40 #ifdef __LP64__
41 #define CMPIB cmpib,*
42 #define CMPB cmpb,*
43 #define COND(x) *x
44
45 .level 2.0w
46 #else
47 #define CMPIB cmpib,
48 #define CMPB cmpb,
49 #define COND(x) x
50
51 .level 2.0
52 #endif
53
54 .import pa_dbit_lock,data
55
56 /* space_to_prot macro creates a prot id from a space id */
57
58 #if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot
61 .endm
62 #else
63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65 .endm
66 #endif
67
68 /* Switch to virtual mapping, trashing only %r1 */
69 .macro virt_map
70 /* pcxt_ssm_bug */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
72 mtsp %r0, %sr4
73 mtsp %r0, %sr5
74 mfsp %sr7, %r1
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
76 mtsp %r1, %sr3
77 tovirt_r1 %r29
78 load32 KERNEL_PSW, %r1
79
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
81 mtsp %r0, %sr6
82 mtsp %r0, %sr7
83 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */
85 mtctl %r1, %ipsw
86 load32 4f, %r1
87 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 ldo 4(%r1), %r1
89 mtctl %r1, %cr18 /* Set IIAOQ head */
90 rfir
91 nop
92 4:
93 .endm
94
95 /*
96 * The "get_stack" macros are responsible for determining the
97 * kernel stack value.
98 *
99 * For Faults:
100 * If sr7 == 0
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
104 * else
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
110 *
111 * For Interrupts:
112 * If sr7 == 0
113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack.
120 * else
121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases.
130 *
131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir.
141 *
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143 */
144
145 .macro get_stack_use_cr30
146
147 /* we save the registers in the task struct */
148
149 mfctl %cr30, %r1
150 tophys %r1,%r9
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 tophys %r1,%r9
153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9)
157 copy %r9,%r29
158 mfctl %cr30, %r1
159 ldo THREAD_SZ_ALGN(%r1), %r30
160 .endm
161
162 .macro get_stack_use_r30
163
164 /* we put a struct pt_regs on the stack and save the registers there */
165
166 tophys %r30,%r9
167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9)
171 copy %r9,%r29
172 .endm
173
174 .macro rest_stack
175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29
178 .endm
179
180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */
182 .macro def code
183 b intr_save
184 ldi \code, %r8
185 .align 32
186 .endm
187
188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */
190 .macro extint code
191 b intr_extint
192 mfsp %sr7,%r16
193 .align 32
194 .endm
195
196 .import os_hpmc, code
197
198 /* HPMC handler */
199 .macro hpmc code
200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3
202 bv,n 0(%r3)
203 nop
204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */
207 .endm
208
209 /*
210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form.
213 */
214
215 /* Register definitions for tlb miss handler macros */
216
217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */
219
220 #ifndef __LP64__
221
222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224 */
225
226 .macro itlb_11 code
227
228 mfctl %pcsq, spc
229 b itlb_miss_11
230 mfctl %pcoq, va
231
232 .align 32
233 .endm
234 #endif
235
236 /*
237 * itlb miss interruption handler (parisc 2.0)
238 */
239
240 .macro itlb_20 code
241 mfctl %pcsq, spc
242 #ifdef __LP64__
243 b itlb_miss_20w
244 #else
245 b itlb_miss_20
246 #endif
247 mfctl %pcoq, va
248
249 .align 32
250 .endm
251
252 #ifndef __LP64__
253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 *
256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses
259 * have the faulting address in the
260 * IOR/ISR.
261 */
262
263 .macro naitlb_11 code
264
265 mfctl %isr,spc
266 b itlb_miss_11
267 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
270 */
271
272 .align 32
273 .endm
274 #endif
275
276 /*
277 * naitlb miss interruption handler (parisc 2.0)
278 *
279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses
282 * have the faulting address in the
283 * IOR/ISR.
284 */
285
286 .macro naitlb_20 code
287
288 mfctl %isr,spc
289 #ifdef __LP64__
290 b itlb_miss_20w
291 #else
292 b itlb_miss_20
293 #endif
294 mfctl %ior,va
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them
297 */
298
299 .align 32
300 .endm
301
302 #ifndef __LP64__
303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */
306
307 .macro dtlb_11 code
308
309 mfctl %isr, spc
310 b dtlb_miss_11
311 mfctl %ior, va
312
313 .align 32
314 .endm
315 #endif
316
317 /*
318 * dtlb miss interruption handler (parisc 2.0)
319 */
320
321 .macro dtlb_20 code
322
323 mfctl %isr, spc
324 #ifdef __LP64__
325 b dtlb_miss_20w
326 #else
327 b dtlb_miss_20
328 #endif
329 mfctl %ior, va
330
331 .align 32
332 .endm
333
334 #ifndef __LP64__
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337 .macro nadtlb_11 code
338
339 mfctl %isr,spc
340 b nadtlb_miss_11
341 mfctl %ior,va
342
343 .align 32
344 .endm
345 #endif
346
347 /* nadtlb miss interruption handler (parisc 2.0) */
348
349 .macro nadtlb_20 code
350
351 mfctl %isr,spc
352 #ifdef __LP64__
353 b nadtlb_miss_20w
354 #else
355 b nadtlb_miss_20
356 #endif
357 mfctl %ior,va
358
359 .align 32
360 .endm
361
362 #ifndef __LP64__
363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */
366
367 .macro dbit_11 code
368
369 mfctl %isr,spc
370 b dbit_trap_11
371 mfctl %ior,va
372
373 .align 32
374 .endm
375 #endif
376
377 /*
378 * dirty bit trap interruption handler (parisc 2.0)
379 */
380
381 .macro dbit_20 code
382
383 mfctl %isr,spc
384 #ifdef __LP64__
385 b dbit_trap_20w
386 #else
387 b dbit_trap_20
388 #endif
389 mfctl %ior,va
390
391 .align 32
392 .endm
393
394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2
397 #ifdef __LP64__
398 extrd,u \reg1,32+\start,\length,\reg2
399 #else
400 extrw,u \reg1,\start,\length,\reg2
401 #endif
402 .endm
403
404 .macro DEP reg1,start,length,reg2
405 #ifdef __LP64__
406 depd \reg1,32+\start,\length,\reg2
407 #else
408 depw \reg1,\start,\length,\reg2
409 #endif
410 .endm
411
412 .macro DEPI val,start,length,reg
413 #ifdef __LP64__
414 depdi \val,32+\start,\length,\reg
415 #else
416 depwi \val,\start,\length,\reg
417 #endif
418 .endm
419
420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp
424 #ifdef __LP64__
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va
428 #endif
429 .endm
430
431 .import swapper_pg_dir,code
432
433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0
440 mfctl %cr25,\reg
441 .endm
442
443 /*
444 space_check(spc,tmp,fault)
445
446 spc - The space we saw the fault with.
447 tmp - The place to store the current space.
448 fault - Function to call on failure.
449
450 Only allow faults on different spaces from the
451 currently active one if we're the kernel
452
453 */
454 .macro space_check spc,tmp,fault
455 mfsp %sr7,\tmp
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space
458 * check if it is */
459 copy \spc,\tmp
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault
462 .endm
463
464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present
466 *
467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB
469 */
470 .macro L2_ptep pmd,pte,index,va,fault
471 #if PT_NLEVELS == 3
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473 #else
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475 #endif
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 copy %r0,\pte
478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9
482 #ifdef __LP64__
483 shld %r9,PxD_VALUE_SHIFT,\pmd
484 #else
485 shlw %r9,PxD_VALUE_SHIFT,\pmd
486 #endif
487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490 LDREG %r0(\pmd),\pte /* pmd is now pte */
491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
492 .endm
493
494 /* Look up PTE in a 3-Level scheme.
495 *
496 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497 * first pmd adjacent to the pgd. This means that we can
498 * subtract a constant offset to get to it. The pmd and pgd
499 * sizes are arranged so that a single pmd covers 4GB (giving
500 * a full LP64 process access to 8TB) so our lookups are
501 * effectively L2 for the first 4GB of the kernel (i.e. for
502 * all ILP32 processes and all the kernel for machines with
503 * under 4GB of memory) */
504 .macro L3_ptep pgd,pte,index,va,fault
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
506 copy %r0,\pte
507 extrd,u,*= \va,31,32,%r0
508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,31,32,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,31,32,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,31,32,%r0
514 copy \index,\pgd
515 extrd,u,*<> \va,31,32,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
517 L2_ptep \pgd,\pte,\index,\va,\fault
518 .endm
519
520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
521 * don't needlessly dirty the cache line if it was already set */
522 .macro update_ptep ptep,pte,tmp,tmp1
523 ldi _PAGE_ACCESSED,\tmp1
524 or \tmp1,\pte,\tmp
525 and,COND(<>) \tmp1,\pte,%r0
526 STREG \tmp,0(\ptep)
527 .endm
528
529 /* Set the dirty bit (and accessed bit). No need to be
530 * clever, this is only used from the dirty fault */
531 .macro update_dirty ptep,pte,tmp
532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
533 or \tmp,\pte,\pte
534 STREG \pte,0(\ptep)
535 .endm
536
537 /* Convert the pte and prot to tlb insertion values. How
538 * this happens is quite subtle, read below */
539 .macro make_insert_tlb spc,pte,prot
540 space_to_prot \spc \prot /* create prot id from space */
541 /* The following is the real subtlety. This is depositing
542 * T <-> _PAGE_REFTRAP
543 * D <-> _PAGE_DIRTY
544 * B <-> _PAGE_DMB (memory break)
545 *
546 * Then incredible subtlety: The access rights are
547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548 * See 3-14 of the parisc 2.0 manual
549 *
550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551 * trigger an access rights trap in user space if the user
552 * tries to read an unreadable page */
553 depd \pte,8,7,\prot
554
555 /* PAGE_USER indicates the page can be read with user privileges,
556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557 * contains _PAGE_READ */
558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
559 depdi 7,11,3,\prot
560 /* If we're a gateway page, drop PL2 back to zero for promotion
561 * to kernel privilege (so we can execute the page as kernel).
562 * Any privilege promotion page always denys read and write */
563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
565
566 /* Get rid of prot bits and convert to page addr for iitlbt */
567
568 depd %r0,63,PAGE_SHIFT,\pte
569 extrd,u \pte,56,32,\pte
570 .endm
571
572 /* Identical macro to make_insert_tlb above, except it
573 * makes the tlb entry for the differently formatted pa11
574 * insertion instructions */
575 .macro make_insert_tlb_11 spc,pte,prot
576 zdep \spc,30,15,\prot
577 dep \pte,8,7,\prot
578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
579 depi 1,12,1,\prot
580 extru,= \pte,_PAGE_USER_BIT,1,%r0
581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
584
585 /* Get rid of prot bits and convert to page addr for iitlba */
586
587 depi 0,31,12,\pte
588 extru \pte,24,25,\pte
589
590 .endm
591
592 /* This is for ILP32 PA2.0 only. The TLB insertion needs
593 * to extend into I/O space if the address is 0xfXXXXXXX
594 * so we extend the f's into the top word of the pte in
595 * this case */
596 .macro f_extend pte,tmp
597 extrd,s \pte,42,4,\tmp
598 addi,<> 1,\tmp,%r0
599 extrd,s \pte,63,25,\pte
600 .endm
601
602 /* The alias region is an 8MB aligned 16MB to do clear and
603 * copy user pages at addresses congruent with the user
604 * virtual address.
605 *
606 * To use the alias page, you set %r26 up with the to TLB
607 * entry (identifying the physical page) and %r23 up with
608 * the from tlb entry (or nothing if only a to entry---for
609 * clear_user_page_asm) */
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp
613 #if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */
616 depdi 0,31,32,\tmp
617 #endif
618 copy \va,\tmp1
619 DEPI 0,31,23,\tmp1
620 cmpb,COND(<>),n \tmp,\tmp1,\fault
621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622 depd,z \prot,8,7,\prot
623 /*
624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26.
626 */
627 #ifdef __LP64__
628 extrd,u,*= \va,41,1,%r0
629 #else
630 extrw,u,= \va,9,1,%r0
631 #endif
632 or,COND(tr) %r23,%r0,\pte
633 or %r26,%r0,\pte
634 .endm
635
636
637 /*
638 * Align fault_vector_20 on 4K boundary so that both
639 * fault_vector_11 and fault_vector_20 are on the
640 * same page. This is only necessary as long as we
641 * write protect the kernel text, which we may stop
642 * doing once we use large page translations to cover
643 * the static part of the kernel address space.
644 */
645
646 .export fault_vector_20
647
648 .text
649
650 .align 4096
651
652 fault_vector_20:
653 /* First vector is invalid (0) */
654 .ascii "cows can fly"
655 .byte 0
656 .align 32
657
658 hpmc 1
659 def 2
660 def 3
661 extint 4
662 def 5
663 itlb_20 6
664 def 7
665 def 8
666 def 9
667 def 10
668 def 11
669 def 12
670 def 13
671 def 14
672 dtlb_20 15
673 #if 0
674 naitlb_20 16
675 #else
676 def 16
677 #endif
678 nadtlb_20 17
679 def 18
680 def 19
681 dbit_20 20
682 def 21
683 def 22
684 def 23
685 def 24
686 def 25
687 def 26
688 def 27
689 def 28
690 def 29
691 def 30
692 def 31
693
694 #ifndef __LP64__
695
696 .export fault_vector_11
697
698 .align 2048
699
700 fault_vector_11:
701 /* First vector is invalid (0) */
702 .ascii "cows can fly"
703 .byte 0
704 .align 32
705
706 hpmc 1
707 def 2
708 def 3
709 extint 4
710 def 5
711 itlb_11 6
712 def 7
713 def 8
714 def 9
715 def 10
716 def 11
717 def 12
718 def 13
719 def 14
720 dtlb_11 15
721 #if 0
722 naitlb_11 16
723 #else
724 def 16
725 #endif
726 nadtlb_11 17
727 def 18
728 def 19
729 dbit_11 20
730 def 21
731 def 22
732 def 23
733 def 24
734 def 25
735 def 26
736 def 27
737 def 28
738 def 29
739 def 30
740 def 31
741
742 #endif
743
744 .import handle_interruption,code
745 .import do_cpu_irq_mask,code
746
747 /*
748 * r26 = function to be called
749 * r25 = argument to pass in
750 * r24 = flags for do_fork()
751 *
752 * Kernel threads don't ever return, so they don't need
753 * a true register context. We just save away the arguments
754 * for copy_thread/ret_ to properly set up the child.
755 */
756
757 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
758 #define CLONE_UNTRACED 0x00800000
759
760 .export __kernel_thread, code
761 .import do_fork
762 __kernel_thread:
763 STREG %r2, -RP_OFFSET(%r30)
764
765 copy %r30, %r1
766 ldo PT_SZ_ALGN(%r30),%r30
767 #ifdef __LP64__
768 /* Yo, function pointers in wide mode are little structs... -PB */
769 ldd 24(%r26), %r2
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
771 ldd 16(%r26), %r26
772
773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
774 copy %r0, %r22 /* user_tid */
775 #endif
776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
777 STREG %r25, PT_GR25(%r1)
778 ldil L%CLONE_UNTRACED, %r26
779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */
783 #ifdef __LP64__
784 ldo -16(%r30),%r29 /* Reference param save area */
785 #endif
786 BL do_fork, %r2
787 copy %r1, %r24 /* pt_regs */
788
789 /* Parent Returns here */
790
791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792 ldo -PT_SZ_ALGN(%r30), %r30
793 bv %r0(%r2)
794 nop
795
796 /*
797 * Child Returns here
798 *
799 * copy_thread moved args from temp save area set up above
800 * into task save area.
801 */
802
803 .export ret_from_kernel_thread
804 ret_from_kernel_thread:
805
806 /* Call schedule_tail first though */
807 BL schedule_tail, %r2
808 nop
809
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26
812 #ifdef __LP64__
813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22
815 #endif
816 LDREG TASK_PT_GR26(%r1), %r1
817 ble 0(%sr7, %r1)
818 copy %r31, %r2
819
820 #ifdef __LP64__
821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */
823 #endif
824 #ifndef CONFIG_64BIT
825 b sys_exit
826 #else
827 load32 sys_exit, %r1
828 bv %r0(%r1)
829 #endif
830 ldi 0, %r26
831
832 .import sys_execve, code
833 .export __execve, code
834 __execve:
835 copy %r2, %r15
836 copy %r30, %r16
837 ldo PT_SZ_ALGN(%r30), %r30
838 STREG %r26, PT_GR26(%r16)
839 STREG %r25, PT_GR25(%r16)
840 STREG %r24, PT_GR24(%r16)
841 #ifdef __LP64__
842 ldo -16(%r30),%r29 /* Reference param save area */
843 #endif
844 BL sys_execve, %r2
845 copy %r16, %r26
846
847 cmpib,=,n 0,%r28,intr_return /* forward */
848
849 /* yes, this will trap and die. */
850 copy %r15, %r2
851 copy %r16, %r30
852 bv %r0(%r2)
853 nop
854
855 .align 4
856
857 /*
858 * struct task_struct *_switch_to(struct task_struct *prev,
859 * struct task_struct *next)
860 *
861 * switch kernel stacks and return prev */
862 .export _switch_to, code
863 _switch_to:
864 STREG %r2, -RP_OFFSET(%r30)
865
866 callee_save
867
868 load32 _switch_to_ret, %r2
869
870 STREG %r2, TASK_PT_KPC(%r26)
871 LDREG TASK_PT_KPC(%r25), %r2
872
873 STREG %r30, TASK_PT_KSP(%r26)
874 LDREG TASK_PT_KSP(%r25), %r30
875 LDREG TASK_THREAD_INFO(%r25), %r25
876 bv %r0(%r2)
877 mtctl %r25,%cr30
878
879 _switch_to_ret:
880 mtctl %r0, %cr0 /* Needed for single stepping */
881 callee_rest
882
883 LDREG -RP_OFFSET(%r30), %r2
884 bv %r0(%r2)
885 copy %r26, %r28
886
887 /*
888 * Common rfi return path for interruptions, kernel execve, and
889 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
890 * return via this path if the signal was received when the process
891 * was running; if the process was blocked on a syscall then the
892 * normal syscall_exit path is used. All syscalls for traced
893 * proceses exit via intr_restore.
894 *
895 * XXX If any syscalls that change a processes space id ever exit
896 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
897 * adjust IASQ[0..1].
898 *
899 */
900
901 .align 4096
902
903 .export syscall_exit_rfi
904 syscall_exit_rfi:
905 mfctl %cr30,%r16
906 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
907 ldo TASK_REGS(%r16),%r16
908 /* Force iaoq to userspace, as the user has had access to our current
909 * context via sigcontext. Also Filter the PSW for the same reason.
910 */
911 LDREG PT_IAOQ0(%r16),%r19
912 depi 3,31,2,%r19
913 STREG %r19,PT_IAOQ0(%r16)
914 LDREG PT_IAOQ1(%r16),%r19
915 depi 3,31,2,%r19
916 STREG %r19,PT_IAOQ1(%r16)
917 LDREG PT_PSW(%r16),%r19
918 load32 USER_PSW_MASK,%r1
919 #ifdef __LP64__
920 load32 USER_PSW_HI_MASK,%r20
921 depd %r20,31,32,%r1
922 #endif
923 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
924 load32 USER_PSW,%r1
925 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
926 STREG %r19,PT_PSW(%r16)
927
928 /*
929 * If we aren't being traced, we never saved space registers
930 * (we don't store them in the sigcontext), so set them
931 * to "proper" values now (otherwise we'll wind up restoring
932 * whatever was last stored in the task structure, which might
933 * be inconsistent if an interrupt occured while on the gateway
934 * page) Note that we may be "trashing" values the user put in
935 * them, but we don't support the the user changing them.
936 */
937
938 STREG %r0,PT_SR2(%r16)
939 mfsp %sr3,%r19
940 STREG %r19,PT_SR0(%r16)
941 STREG %r19,PT_SR1(%r16)
942 STREG %r19,PT_SR3(%r16)
943 STREG %r19,PT_SR4(%r16)
944 STREG %r19,PT_SR5(%r16)
945 STREG %r19,PT_SR6(%r16)
946 STREG %r19,PT_SR7(%r16)
947
948 intr_return:
949 /* NOTE: Need to enable interrupts incase we schedule. */
950 ssm PSW_SM_I, %r0
951
952 /* Check for software interrupts */
953
954 .import irq_stat,data
955
956 load32 irq_stat,%r19
957 #ifdef CONFIG_SMP
958 mfctl %cr30,%r1
959 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
960 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
961 ** irq_stat[] is defined using ____cacheline_aligned.
962 */
963 #ifdef __LP64__
964 shld %r1, 6, %r20
965 #else
966 shlw %r1, 5, %r20
967 #endif
968 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
969 #endif /* CONFIG_SMP */
970
971 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
972 cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
973
974 intr_check_resched:
975
976 /* check for reschedule */
977 mfctl %cr30,%r1
978 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
979 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
980
981 intr_check_sig:
982 /* As above */
983 mfctl %cr30,%r1
984 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
985 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
986
987 intr_restore:
988 copy %r16,%r29
989 ldo PT_FR31(%r29),%r1
990 rest_fp %r1
991 rest_general %r29
992
993 /* inverse of virt_map */
994 pcxt_ssm_bug
995 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
996 tophys_r1 %r29
997
998 /* Restore space id's and special cr's from PT_REGS
999 * structure pointed to by r29
1000 */
1001 rest_specials %r29
1002
1003 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1004 * It also restores r1 and r30.
1005 */
1006 rest_stack
1007
1008 rfi
1009 nop
1010 nop
1011 nop
1012 nop
1013 nop
1014 nop
1015 nop
1016 nop
1017
1018 .import do_softirq,code
1019 intr_do_softirq:
1020 BL do_softirq,%r2
1021 #ifdef __LP64__
1022 ldo -16(%r30),%r29 /* Reference param save area */
1023 #else
1024 nop
1025 #endif
1026 b intr_check_resched
1027 nop
1028
1029 .import schedule,code
1030 intr_do_resched:
1031 /* Only do reschedule if we are returning to user space */
1032 LDREG PT_IASQ0(%r16), %r20
1033 CMPIB= 0,%r20,intr_restore /* backward */
1034 nop
1035 LDREG PT_IASQ1(%r16), %r20
1036 CMPIB= 0,%r20,intr_restore /* backward */
1037 nop
1038
1039 #ifdef __LP64__
1040 ldo -16(%r30),%r29 /* Reference param save area */
1041 #endif
1042
1043 ldil L%intr_check_sig, %r2
1044 #ifndef CONFIG_64BIT
1045 b schedule
1046 #else
1047 load32 schedule, %r20
1048 bv %r0(%r20)
1049 #endif
1050 ldo R%intr_check_sig(%r2), %r2
1051
1052
1053 .import do_signal,code
1054 intr_do_signal:
1055 /*
1056 This check is critical to having LWS
1057 working. The IASQ is zero on the gateway
1058 page and we cannot deliver any signals until
1059 we get off the gateway page.
1060
1061 Only do signals if we are returning to user space
1062 */
1063 LDREG PT_IASQ0(%r16), %r20
1064 CMPIB= 0,%r20,intr_restore /* backward */
1065 nop
1066 LDREG PT_IASQ1(%r16), %r20
1067 CMPIB= 0,%r20,intr_restore /* backward */
1068 nop
1069
1070 copy %r0, %r24 /* unsigned long in_syscall */
1071 copy %r16, %r25 /* struct pt_regs *regs */
1072 #ifdef __LP64__
1073 ldo -16(%r30),%r29 /* Reference param save area */
1074 #endif
1075
1076 BL do_signal,%r2
1077 copy %r0, %r26 /* sigset_t *oldset = NULL */
1078
1079 b intr_check_sig
1080 nop
1081
1082 /*
1083 * External interrupts.
1084 */
1085
1086 intr_extint:
1087 CMPIB=,n 0,%r16,1f
1088 get_stack_use_cr30
1089 b,n 3f
1090
1091 1:
1092 #if 0 /* Interrupt Stack support not working yet! */
1093 mfctl %cr31,%r1
1094 copy %r30,%r17
1095 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1096 #ifdef __LP64__
1097 depdi 0,63,15,%r17
1098 #else
1099 depi 0,31,15,%r17
1100 #endif
1101 CMPB=,n %r1,%r17,2f
1102 get_stack_use_cr31
1103 b,n 3f
1104 #endif
1105 2:
1106 get_stack_use_r30
1107
1108 3:
1109 save_specials %r29
1110 virt_map
1111 save_general %r29
1112
1113 ldo PT_FR0(%r29), %r24
1114 save_fp %r24
1115
1116 loadgp
1117
1118 copy %r29, %r26 /* arg0 is pt_regs */
1119 copy %r29, %r16 /* save pt_regs */
1120
1121 ldil L%intr_return, %r2
1122
1123 #ifdef __LP64__
1124 ldo -16(%r30),%r29 /* Reference param save area */
1125 #endif
1126
1127 b do_cpu_irq_mask
1128 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1129
1130
1131 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1132
1133 .export intr_save, code /* for os_hpmc */
1134
1135 intr_save:
1136 mfsp %sr7,%r16
1137 CMPIB=,n 0,%r16,1f
1138 get_stack_use_cr30
1139 b 2f
1140 copy %r8,%r26
1141
1142 1:
1143 get_stack_use_r30
1144 copy %r8,%r26
1145
1146 2:
1147 save_specials %r29
1148
1149 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1150
1151 /*
1152 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1153 * traps.c.
1154 * 2) Once we start executing code above 4 Gb, we need
1155 * to adjust iasq/iaoq here in the same way we
1156 * adjust isr/ior below.
1157 */
1158
1159 CMPIB=,n 6,%r26,skip_save_ior
1160
1161
1162 mfctl %cr20, %r16 /* isr */
1163 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1164 mfctl %cr21, %r17 /* ior */
1165
1166
1167 #ifdef __LP64__
1168 /*
1169 * If the interrupted code was running with W bit off (32 bit),
1170 * clear the b bits (bits 0 & 1) in the ior.
1171 * save_specials left ipsw value in r8 for us to test.
1172 */
1173 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1174 depdi 0,1,2,%r17
1175
1176 /*
1177 * FIXME: This code has hardwired assumptions about the split
1178 * between space bits and offset bits. This will change
1179 * when we allow alternate page sizes.
1180 */
1181
1182 /* adjust isr/ior. */
1183
1184 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
1185 depd %r1,31,7,%r17 /* deposit them into ior */
1186 depdi 0,63,7,%r16 /* clear them from isr */
1187 #endif
1188 STREG %r16, PT_ISR(%r29)
1189 STREG %r17, PT_IOR(%r29)
1190
1191
1192 skip_save_ior:
1193 virt_map
1194 save_general %r29
1195
1196 ldo PT_FR0(%r29), %r25
1197 save_fp %r25
1198
1199 loadgp
1200
1201 copy %r29, %r25 /* arg1 is pt_regs */
1202 #ifdef __LP64__
1203 ldo -16(%r30),%r29 /* Reference param save area */
1204 #endif
1205
1206 ldil L%intr_check_sig, %r2
1207 copy %r25, %r16 /* save pt_regs */
1208
1209 b handle_interruption
1210 ldo R%intr_check_sig(%r2), %r2
1211
1212
1213 /*
1214 * Note for all tlb miss handlers:
1215 *
1216 * cr24 contains a pointer to the kernel address space
1217 * page directory.
1218 *
1219 * cr25 contains a pointer to the current user address
1220 * space page directory.
1221 *
1222 * sr3 will contain the space id of the user address space
1223 * of the current running thread while that thread is
1224 * running in the kernel.
1225 */
1226
1227 /*
1228 * register number allocations. Note that these are all
1229 * in the shadowed registers
1230 */
1231
1232 t0 = r1 /* temporary register 0 */
1233 va = r8 /* virtual address for which the trap occured */
1234 t1 = r9 /* temporary register 1 */
1235 pte = r16 /* pte/phys page # */
1236 prot = r17 /* prot bits */
1237 spc = r24 /* space for which the trap occured */
1238 ptp = r25 /* page directory/page table pointer */
1239
1240 #ifdef __LP64__
1241
1242 dtlb_miss_20w:
1243 space_adjust spc,va,t0
1244 get_pgd spc,ptp
1245 space_check spc,t0,dtlb_fault
1246
1247 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1248
1249 update_ptep ptp,pte,t0,t1
1250
1251 make_insert_tlb spc,pte,prot
1252
1253 idtlbt pte,prot
1254
1255 rfir
1256 nop
1257
1258 dtlb_check_alias_20w:
1259 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1260
1261 idtlbt pte,prot
1262
1263 rfir
1264 nop
1265
1266 nadtlb_miss_20w:
1267 space_adjust spc,va,t0
1268 get_pgd spc,ptp
1269 space_check spc,t0,nadtlb_fault
1270
1271 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1272
1273 update_ptep ptp,pte,t0,t1
1274
1275 make_insert_tlb spc,pte,prot
1276
1277 idtlbt pte,prot
1278
1279 rfir
1280 nop
1281
1282 nadtlb_check_flush_20w:
1283 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1284
1285 /* Insert a "flush only" translation */
1286
1287 depdi,z 7,7,3,prot
1288 depdi 1,10,1,prot
1289
1290 /* Get rid of prot bits and convert to page addr for idtlbt */
1291
1292 depdi 0,63,12,pte
1293 extrd,u pte,56,52,pte
1294 idtlbt pte,prot
1295
1296 rfir
1297 nop
1298
1299 #else
1300
1301 dtlb_miss_11:
1302 get_pgd spc,ptp
1303
1304 space_check spc,t0,dtlb_fault
1305
1306 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1307
1308 update_ptep ptp,pte,t0,t1
1309
1310 make_insert_tlb_11 spc,pte,prot
1311
1312 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1313 mtsp spc,%sr1
1314
1315 idtlba pte,(%sr1,va)
1316 idtlbp prot,(%sr1,va)
1317
1318 mtsp t0, %sr1 /* Restore sr1 */
1319
1320 rfir
1321 nop
1322
1323 dtlb_check_alias_11:
1324
1325 /* Check to see if fault is in the temporary alias region */
1326
1327 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1328 ldil L%(TMPALIAS_MAP_START),t0
1329 copy va,t1
1330 depwi 0,31,23,t1
1331 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1332 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1333 depw,z prot,8,7,prot
1334
1335 /*
1336 * OK, it is in the temp alias region, check whether "from" or "to".
1337 * Check "subtle" note in pacache.S re: r23/r26.
1338 */
1339
1340 extrw,u,= va,9,1,r0
1341 or,tr %r23,%r0,pte /* If "from" use "from" page */
1342 or %r26,%r0,pte /* else "to", use "to" page */
1343
1344 idtlba pte,(va)
1345 idtlbp prot,(va)
1346
1347 rfir
1348 nop
1349
1350 nadtlb_miss_11:
1351 get_pgd spc,ptp
1352
1353 space_check spc,t0,nadtlb_fault
1354
1355 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1356
1357 update_ptep ptp,pte,t0,t1
1358
1359 make_insert_tlb_11 spc,pte,prot
1360
1361
1362 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1363 mtsp spc,%sr1
1364
1365 idtlba pte,(%sr1,va)
1366 idtlbp prot,(%sr1,va)
1367
1368 mtsp t0, %sr1 /* Restore sr1 */
1369
1370 rfir
1371 nop
1372
1373 nadtlb_check_flush_11:
1374 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1375
1376 /* Insert a "flush only" translation */
1377
1378 zdepi 7,7,3,prot
1379 depi 1,10,1,prot
1380
1381 /* Get rid of prot bits and convert to page addr for idtlba */
1382
1383 depi 0,31,12,pte
1384 extru pte,24,25,pte
1385
1386 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1387 mtsp spc,%sr1
1388
1389 idtlba pte,(%sr1,va)
1390 idtlbp prot,(%sr1,va)
1391
1392 mtsp t0, %sr1 /* Restore sr1 */
1393
1394 rfir
1395 nop
1396
1397 dtlb_miss_20:
1398 space_adjust spc,va,t0
1399 get_pgd spc,ptp
1400 space_check spc,t0,dtlb_fault
1401
1402 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1403
1404 update_ptep ptp,pte,t0,t1
1405
1406 make_insert_tlb spc,pte,prot
1407
1408 f_extend pte,t0
1409
1410 idtlbt pte,prot
1411
1412 rfir
1413 nop
1414
1415 dtlb_check_alias_20:
1416 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1417
1418 idtlbt pte,prot
1419
1420 rfir
1421 nop
1422
1423 nadtlb_miss_20:
1424 get_pgd spc,ptp
1425
1426 space_check spc,t0,nadtlb_fault
1427
1428 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1429
1430 update_ptep ptp,pte,t0,t1
1431
1432 make_insert_tlb spc,pte,prot
1433
1434 f_extend pte,t0
1435
1436 idtlbt pte,prot
1437
1438 rfir
1439 nop
1440
1441 nadtlb_check_flush_20:
1442 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1443
1444 /* Insert a "flush only" translation */
1445
1446 depdi,z 7,7,3,prot
1447 depdi 1,10,1,prot
1448
1449 /* Get rid of prot bits and convert to page addr for idtlbt */
1450
1451 depdi 0,63,12,pte
1452 extrd,u pte,56,32,pte
1453 idtlbt pte,prot
1454
1455 rfir
1456 nop
1457 #endif
1458
1459 nadtlb_emulate:
1460
1461 /*
1462 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1463 * probei instructions. We don't want to fault for these
1464 * instructions (not only does it not make sense, it can cause
1465 * deadlocks, since some flushes are done with the mmap
1466 * semaphore held). If the translation doesn't exist, we can't
1467 * insert a translation, so have to emulate the side effects
1468 * of the instruction. Since we don't insert a translation
1469 * we can get a lot of faults during a flush loop, so it makes
1470 * sense to try to do it here with minimum overhead. We only
1471 * emulate fdc,fic,pdc,probew,prober instructions whose base
1472 * and index registers are not shadowed. We defer everything
1473 * else to the "slow" path.
1474 */
1475
1476 mfctl %cr19,%r9 /* Get iir */
1477
1478 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1479 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1480
1481 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1482 ldi 0x280,%r16
1483 and %r9,%r16,%r17
1484 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1485 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1486 BL get_register,%r25
1487 extrw,u %r9,15,5,%r8 /* Get index register # */
1488 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1489 copy %r1,%r24
1490 BL get_register,%r25
1491 extrw,u %r9,10,5,%r8 /* Get base register # */
1492 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1493 BL set_register,%r25
1494 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1495
1496 nadtlb_nullify:
1497 mfctl %ipsw,%r8
1498 ldil L%PSW_N,%r9
1499 or %r8,%r9,%r8 /* Set PSW_N */
1500 mtctl %r8,%ipsw
1501
1502 rfir
1503 nop
1504
1505 /*
1506 When there is no translation for the probe address then we
1507 must nullify the insn and return zero in the target regsiter.
1508 This will indicate to the calling code that it does not have
1509 write/read privileges to this address.
1510
1511 This should technically work for prober and probew in PA 1.1,
1512 and also probe,r and probe,w in PA 2.0
1513
1514 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1515 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1516
1517 */
1518 nadtlb_probe_check:
1519 ldi 0x80,%r16
1520 and %r9,%r16,%r17
1521 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1522 BL get_register,%r25 /* Find the target register */
1523 extrw,u %r9,31,5,%r8 /* Get target register */
1524 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1525 BL set_register,%r25
1526 copy %r0,%r1 /* Write zero to target register */
1527 b nadtlb_nullify /* Nullify return insn */
1528 nop
1529
1530
1531 #ifdef __LP64__
1532 itlb_miss_20w:
1533
1534 /*
1535 * I miss is a little different, since we allow users to fault
1536 * on the gateway page which is in the kernel address space.
1537 */
1538
1539 space_adjust spc,va,t0
1540 get_pgd spc,ptp
1541 space_check spc,t0,itlb_fault
1542
1543 L3_ptep ptp,pte,t0,va,itlb_fault
1544
1545 update_ptep ptp,pte,t0,t1
1546
1547 make_insert_tlb spc,pte,prot
1548
1549 iitlbt pte,prot
1550
1551 rfir
1552 nop
1553
1554 #else
1555
1556 itlb_miss_11:
1557 get_pgd spc,ptp
1558
1559 space_check spc,t0,itlb_fault
1560
1561 L2_ptep ptp,pte,t0,va,itlb_fault
1562
1563 update_ptep ptp,pte,t0,t1
1564
1565 make_insert_tlb_11 spc,pte,prot
1566
1567 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1568 mtsp spc,%sr1
1569
1570 iitlba pte,(%sr1,va)
1571 iitlbp prot,(%sr1,va)
1572
1573 mtsp t0, %sr1 /* Restore sr1 */
1574
1575 rfir
1576 nop
1577
1578 itlb_miss_20:
1579 get_pgd spc,ptp
1580
1581 space_check spc,t0,itlb_fault
1582
1583 L2_ptep ptp,pte,t0,va,itlb_fault
1584
1585 update_ptep ptp,pte,t0,t1
1586
1587 make_insert_tlb spc,pte,prot
1588
1589 f_extend pte,t0
1590
1591 iitlbt pte,prot
1592
1593 rfir
1594 nop
1595
1596 #endif
1597
1598 #ifdef __LP64__
1599
1600 dbit_trap_20w:
1601 space_adjust spc,va,t0
1602 get_pgd spc,ptp
1603 space_check spc,t0,dbit_fault
1604
1605 L3_ptep ptp,pte,t0,va,dbit_fault
1606
1607 #ifdef CONFIG_SMP
1608 CMPIB=,n 0,spc,dbit_nolock_20w
1609 load32 PA(pa_dbit_lock),t0
1610
1611 dbit_spin_20w:
1612 ldcw 0(t0),t1
1613 cmpib,= 0,t1,dbit_spin_20w
1614 nop
1615
1616 dbit_nolock_20w:
1617 #endif
1618 update_dirty ptp,pte,t1
1619
1620 make_insert_tlb spc,pte,prot
1621
1622 idtlbt pte,prot
1623 #ifdef CONFIG_SMP
1624 CMPIB=,n 0,spc,dbit_nounlock_20w
1625 ldi 1,t1
1626 stw t1,0(t0)
1627
1628 dbit_nounlock_20w:
1629 #endif
1630
1631 rfir
1632 nop
1633 #else
1634
1635 dbit_trap_11:
1636
1637 get_pgd spc,ptp
1638
1639 space_check spc,t0,dbit_fault
1640
1641 L2_ptep ptp,pte,t0,va,dbit_fault
1642
1643 #ifdef CONFIG_SMP
1644 CMPIB=,n 0,spc,dbit_nolock_11
1645 load32 PA(pa_dbit_lock),t0
1646
1647 dbit_spin_11:
1648 ldcw 0(t0),t1
1649 cmpib,= 0,t1,dbit_spin_11
1650 nop
1651
1652 dbit_nolock_11:
1653 #endif
1654 update_dirty ptp,pte,t1
1655
1656 make_insert_tlb_11 spc,pte,prot
1657
1658 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1659 mtsp spc,%sr1
1660
1661 idtlba pte,(%sr1,va)
1662 idtlbp prot,(%sr1,va)
1663
1664 mtsp t1, %sr1 /* Restore sr1 */
1665 #ifdef CONFIG_SMP
1666 CMPIB=,n 0,spc,dbit_nounlock_11
1667 ldi 1,t1
1668 stw t1,0(t0)
1669
1670 dbit_nounlock_11:
1671 #endif
1672
1673 rfir
1674 nop
1675
1676 dbit_trap_20:
1677 get_pgd spc,ptp
1678
1679 space_check spc,t0,dbit_fault
1680
1681 L2_ptep ptp,pte,t0,va,dbit_fault
1682
1683 #ifdef CONFIG_SMP
1684 CMPIB=,n 0,spc,dbit_nolock_20
1685 load32 PA(pa_dbit_lock),t0
1686
1687 dbit_spin_20:
1688 ldcw 0(t0),t1
1689 cmpib,= 0,t1,dbit_spin_20
1690 nop
1691
1692 dbit_nolock_20:
1693 #endif
1694 update_dirty ptp,pte,t1
1695
1696 make_insert_tlb spc,pte,prot
1697
1698 f_extend pte,t1
1699
1700 idtlbt pte,prot
1701
1702 #ifdef CONFIG_SMP
1703 CMPIB=,n 0,spc,dbit_nounlock_20
1704 ldi 1,t1
1705 stw t1,0(t0)
1706
1707 dbit_nounlock_20:
1708 #endif
1709
1710 rfir
1711 nop
1712 #endif
1713
1714 .import handle_interruption,code
1715
1716 kernel_bad_space:
1717 b intr_save
1718 ldi 31,%r8 /* Use an unused code */
1719
1720 dbit_fault:
1721 b intr_save
1722 ldi 20,%r8
1723
1724 itlb_fault:
1725 b intr_save
1726 ldi 6,%r8
1727
1728 nadtlb_fault:
1729 b intr_save
1730 ldi 17,%r8
1731
1732 dtlb_fault:
1733 b intr_save
1734 ldi 15,%r8
1735
1736 /* Register saving semantics for system calls:
1737
1738 %r1 clobbered by system call macro in userspace
1739 %r2 saved in PT_REGS by gateway page
1740 %r3 - %r18 preserved by C code (saved by signal code)
1741 %r19 - %r20 saved in PT_REGS by gateway page
1742 %r21 - %r22 non-standard syscall args
1743 stored in kernel stack by gateway page
1744 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1745 %r27 - %r30 saved in PT_REGS by gateway page
1746 %r31 syscall return pointer
1747 */
1748
1749 /* Floating point registers (FIXME: what do we do with these?)
1750
1751 %fr0 - %fr3 status/exception, not preserved
1752 %fr4 - %fr7 arguments
1753 %fr8 - %fr11 not preserved by C code
1754 %fr12 - %fr21 preserved by C code
1755 %fr22 - %fr31 not preserved by C code
1756 */
1757
1758 .macro reg_save regs
1759 STREG %r3, PT_GR3(\regs)
1760 STREG %r4, PT_GR4(\regs)
1761 STREG %r5, PT_GR5(\regs)
1762 STREG %r6, PT_GR6(\regs)
1763 STREG %r7, PT_GR7(\regs)
1764 STREG %r8, PT_GR8(\regs)
1765 STREG %r9, PT_GR9(\regs)
1766 STREG %r10,PT_GR10(\regs)
1767 STREG %r11,PT_GR11(\regs)
1768 STREG %r12,PT_GR12(\regs)
1769 STREG %r13,PT_GR13(\regs)
1770 STREG %r14,PT_GR14(\regs)
1771 STREG %r15,PT_GR15(\regs)
1772 STREG %r16,PT_GR16(\regs)
1773 STREG %r17,PT_GR17(\regs)
1774 STREG %r18,PT_GR18(\regs)
1775 .endm
1776
1777 .macro reg_restore regs
1778 LDREG PT_GR3(\regs), %r3
1779 LDREG PT_GR4(\regs), %r4
1780 LDREG PT_GR5(\regs), %r5
1781 LDREG PT_GR6(\regs), %r6
1782 LDREG PT_GR7(\regs), %r7
1783 LDREG PT_GR8(\regs), %r8
1784 LDREG PT_GR9(\regs), %r9
1785 LDREG PT_GR10(\regs),%r10
1786 LDREG PT_GR11(\regs),%r11
1787 LDREG PT_GR12(\regs),%r12
1788 LDREG PT_GR13(\regs),%r13
1789 LDREG PT_GR14(\regs),%r14
1790 LDREG PT_GR15(\regs),%r15
1791 LDREG PT_GR16(\regs),%r16
1792 LDREG PT_GR17(\regs),%r17
1793 LDREG PT_GR18(\regs),%r18
1794 .endm
1795
1796 .export sys_fork_wrapper
1797 .export child_return
1798 sys_fork_wrapper:
1799 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1800 ldo TASK_REGS(%r1),%r1
1801 reg_save %r1
1802 mfctl %cr27, %r3
1803 STREG %r3, PT_CR27(%r1)
1804
1805 STREG %r2,-RP_OFFSET(%r30)
1806 ldo FRAME_SIZE(%r30),%r30
1807 #ifdef __LP64__
1808 ldo -16(%r30),%r29 /* Reference param save area */
1809 #endif
1810
1811 /* These are call-clobbered registers and therefore
1812 also syscall-clobbered (we hope). */
1813 STREG %r2,PT_GR19(%r1) /* save for child */
1814 STREG %r30,PT_GR21(%r1)
1815
1816 LDREG PT_GR30(%r1),%r25
1817 copy %r1,%r24
1818 BL sys_clone,%r2
1819 ldi SIGCHLD,%r26
1820
1821 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1822 wrapper_exit:
1823 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1824 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1825 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1826
1827 LDREG PT_CR27(%r1), %r3
1828 mtctl %r3, %cr27
1829 reg_restore %r1
1830
1831 /* strace expects syscall # to be preserved in r20 */
1832 ldi __NR_fork,%r20
1833 bv %r0(%r2)
1834 STREG %r20,PT_GR20(%r1)
1835
1836 /* Set the return value for the child */
1837 child_return:
1838 BL schedule_tail, %r2
1839 nop
1840
1841 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1842 LDREG TASK_PT_GR19(%r1),%r2
1843 b wrapper_exit
1844 copy %r0,%r28
1845
1846
1847 .export sys_clone_wrapper
1848 sys_clone_wrapper:
1849 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1850 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1851 reg_save %r1
1852 mfctl %cr27, %r3
1853 STREG %r3, PT_CR27(%r1)
1854
1855 STREG %r2,-RP_OFFSET(%r30)
1856 ldo FRAME_SIZE(%r30),%r30
1857 #ifdef __LP64__
1858 ldo -16(%r30),%r29 /* Reference param save area */
1859 #endif
1860
1861 STREG %r2,PT_GR19(%r1) /* save for child */
1862 STREG %r30,PT_GR21(%r1)
1863 BL sys_clone,%r2
1864 copy %r1,%r24
1865
1866 b wrapper_exit
1867 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1868
1869 .export sys_vfork_wrapper
1870 sys_vfork_wrapper:
1871 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1872 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1873 reg_save %r1
1874 mfctl %cr27, %r3
1875 STREG %r3, PT_CR27(%r1)
1876
1877 STREG %r2,-RP_OFFSET(%r30)
1878 ldo FRAME_SIZE(%r30),%r30
1879 #ifdef __LP64__
1880 ldo -16(%r30),%r29 /* Reference param save area */
1881 #endif
1882
1883 STREG %r2,PT_GR19(%r1) /* save for child */
1884 STREG %r30,PT_GR21(%r1)
1885
1886 BL sys_vfork,%r2
1887 copy %r1,%r26
1888
1889 b wrapper_exit
1890 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1891
1892
1893 .macro execve_wrapper execve
1894 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1895 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1896
1897 /*
1898 * Do we need to save/restore r3-r18 here?
1899 * I don't think so. why would new thread need old
1900 * threads registers?
1901 */
1902
1903 /* %arg0 - %arg3 are already saved for us. */
1904
1905 STREG %r2,-RP_OFFSET(%r30)
1906 ldo FRAME_SIZE(%r30),%r30
1907 #ifdef __LP64__
1908 ldo -16(%r30),%r29 /* Reference param save area */
1909 #endif
1910 BL \execve,%r2
1911 copy %r1,%arg0
1912
1913 ldo -FRAME_SIZE(%r30),%r30
1914 LDREG -RP_OFFSET(%r30),%r2
1915
1916 /* If exec succeeded we need to load the args */
1917
1918 ldo -1024(%r0),%r1
1919 cmpb,>>= %r28,%r1,error_\execve
1920 copy %r2,%r19
1921
1922 error_\execve:
1923 bv %r0(%r19)
1924 nop
1925 .endm
1926
1927 .export sys_execve_wrapper
1928 .import sys_execve
1929
1930 sys_execve_wrapper:
1931 execve_wrapper sys_execve
1932
1933 #ifdef __LP64__
1934 .export sys32_execve_wrapper
1935 .import sys32_execve
1936
1937 sys32_execve_wrapper:
1938 execve_wrapper sys32_execve
1939 #endif
1940
1941 .export sys_rt_sigreturn_wrapper
1942 sys_rt_sigreturn_wrapper:
1943 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1944 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1945 /* Don't save regs, we are going to restore them from sigcontext. */
1946 STREG %r2, -RP_OFFSET(%r30)
1947 #ifdef __LP64__
1948 ldo FRAME_SIZE(%r30), %r30
1949 BL sys_rt_sigreturn,%r2
1950 ldo -16(%r30),%r29 /* Reference param save area */
1951 #else
1952 BL sys_rt_sigreturn,%r2
1953 ldo FRAME_SIZE(%r30), %r30
1954 #endif
1955
1956 ldo -FRAME_SIZE(%r30), %r30
1957 LDREG -RP_OFFSET(%r30), %r2
1958
1959 /* FIXME: I think we need to restore a few more things here. */
1960 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1961 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1962 reg_restore %r1
1963
1964 /* If the signal was received while the process was blocked on a
1965 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1966 * take us to syscall_exit_rfi and on to intr_return.
1967 */
1968 bv %r0(%r2)
1969 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1970
1971 .export sys_sigaltstack_wrapper
1972 sys_sigaltstack_wrapper:
1973 /* Get the user stack pointer */
1974 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1975 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1976 LDREG TASK_PT_GR30(%r24),%r24
1977 STREG %r2, -RP_OFFSET(%r30)
1978 #ifdef __LP64__
1979 ldo FRAME_SIZE(%r30), %r30
1980 b,l do_sigaltstack,%r2
1981 ldo -16(%r30),%r29 /* Reference param save area */
1982 #else
1983 bl do_sigaltstack,%r2
1984 ldo FRAME_SIZE(%r30), %r30
1985 #endif
1986
1987 ldo -FRAME_SIZE(%r30), %r30
1988 LDREG -RP_OFFSET(%r30), %r2
1989 bv %r0(%r2)
1990 nop
1991
1992 #ifdef __LP64__
1993 .export sys32_sigaltstack_wrapper
1994 sys32_sigaltstack_wrapper:
1995 /* Get the user stack pointer */
1996 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1997 LDREG TASK_PT_GR30(%r24),%r24
1998 STREG %r2, -RP_OFFSET(%r30)
1999 ldo FRAME_SIZE(%r30), %r30
2000 b,l do_sigaltstack32,%r2
2001 ldo -16(%r30),%r29 /* Reference param save area */
2002
2003 ldo -FRAME_SIZE(%r30), %r30
2004 LDREG -RP_OFFSET(%r30), %r2
2005 bv %r0(%r2)
2006 nop
2007 #endif
2008
2009 .export sys_rt_sigsuspend_wrapper
2010 sys_rt_sigsuspend_wrapper:
2011 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2012 ldo TASK_REGS(%r1),%r24
2013 reg_save %r24
2014
2015 STREG %r2, -RP_OFFSET(%r30)
2016 #ifdef __LP64__
2017 ldo FRAME_SIZE(%r30), %r30
2018 b,l sys_rt_sigsuspend,%r2
2019 ldo -16(%r30),%r29 /* Reference param save area */
2020 #else
2021 bl sys_rt_sigsuspend,%r2
2022 ldo FRAME_SIZE(%r30), %r30
2023 #endif
2024
2025 ldo -FRAME_SIZE(%r30), %r30
2026 LDREG -RP_OFFSET(%r30), %r2
2027
2028 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2029 ldo TASK_REGS(%r1),%r1
2030 reg_restore %r1
2031
2032 bv %r0(%r2)
2033 nop
2034
2035 .export syscall_exit
2036 syscall_exit:
2037
2038 /* NOTE: HP-UX syscalls also come through here
2039 * after hpux_syscall_exit fixes up return
2040 * values. */
2041
2042 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2043 * via syscall_exit_rfi if the signal was received while the process
2044 * was running.
2045 */
2046
2047 /* save return value now */
2048
2049 mfctl %cr30, %r1
2050 LDREG TI_TASK(%r1),%r1
2051 STREG %r28,TASK_PT_GR28(%r1)
2052
2053 #ifdef CONFIG_HPUX
2054
2055 /* <linux/personality.h> cannot be easily included */
2056 #define PER_HPUX 0x10
2057 LDREG TASK_PERSONALITY(%r1),%r19
2058
2059 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2060 ldo -PER_HPUX(%r19), %r19
2061 CMPIB<>,n 0,%r19,1f
2062
2063 /* Save other hpux returns if personality is PER_HPUX */
2064 STREG %r22,TASK_PT_GR22(%r1)
2065 STREG %r29,TASK_PT_GR29(%r1)
2066 1:
2067
2068 #endif /* CONFIG_HPUX */
2069
2070 /* Seems to me that dp could be wrong here, if the syscall involved
2071 * calling a module, and nothing got round to restoring dp on return.
2072 */
2073 loadgp
2074
2075 syscall_check_bh:
2076
2077 /* Check for software interrupts */
2078
2079 .import irq_stat,data
2080
2081 load32 irq_stat,%r19
2082
2083 #ifdef CONFIG_SMP
2084 /* sched.h: int processor */
2085 /* %r26 is used as scratch register to index into irq_stat[] */
2086 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2087
2088 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2089 #ifdef __LP64__
2090 shld %r26, 6, %r20
2091 #else
2092 shlw %r26, 5, %r20
2093 #endif
2094 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2095 #endif /* CONFIG_SMP */
2096
2097 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
2098 cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
2099
2100 syscall_check_resched:
2101
2102 /* check for reschedule */
2103
2104 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2105 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2106
2107 syscall_check_sig:
2108 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2109 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2110
2111 syscall_restore:
2112 /* Are we being ptraced? */
2113 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2114
2115 LDREG TASK_PTRACE(%r1), %r19
2116 bb,< %r19,31,syscall_restore_rfi
2117 nop
2118
2119 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2120 rest_fp %r19
2121
2122 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2123 mtsar %r19
2124
2125 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2126 LDREG TASK_PT_GR19(%r1),%r19
2127 LDREG TASK_PT_GR20(%r1),%r20
2128 LDREG TASK_PT_GR21(%r1),%r21
2129 LDREG TASK_PT_GR22(%r1),%r22
2130 LDREG TASK_PT_GR23(%r1),%r23
2131 LDREG TASK_PT_GR24(%r1),%r24
2132 LDREG TASK_PT_GR25(%r1),%r25
2133 LDREG TASK_PT_GR26(%r1),%r26
2134 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2135 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2136 LDREG TASK_PT_GR29(%r1),%r29
2137 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2138
2139 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2140 rsm PSW_SM_I, %r0
2141 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2142 mfsp %sr3,%r1 /* Get users space id */
2143 mtsp %r1,%sr7 /* Restore sr7 */
2144 ssm PSW_SM_I, %r0
2145
2146 /* Set sr2 to zero for userspace syscalls to work. */
2147 mtsp %r0,%sr2
2148 mtsp %r1,%sr4 /* Restore sr4 */
2149 mtsp %r1,%sr5 /* Restore sr5 */
2150 mtsp %r1,%sr6 /* Restore sr6 */
2151
2152 depi 3,31,2,%r31 /* ensure return to user mode. */
2153
2154 #ifdef __LP64__
2155 /* decide whether to reset the wide mode bit
2156 *
2157 * For a syscall, the W bit is stored in the lowest bit
2158 * of sp. Extract it and reset W if it is zero */
2159 extrd,u,*<> %r30,63,1,%r1
2160 rsm PSW_SM_W, %r0
2161 /* now reset the lowest bit of sp if it was set */
2162 xor %r30,%r1,%r30
2163 #endif
2164 be,n 0(%sr3,%r31) /* return to user space */
2165
2166 /* We have to return via an RFI, so that PSW T and R bits can be set
2167 * appropriately.
2168 * This sets up pt_regs so we can return via intr_restore, which is not
2169 * the most efficient way of doing things, but it works.
2170 */
2171 syscall_restore_rfi:
2172 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2173 mtctl %r2,%cr0 /* for immediate trap */
2174 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2175 ldi 0x0b,%r20 /* Create new PSW */
2176 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2177
2178 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2179 * set in include/linux/ptrace.h and converted to PA bitmap
2180 * numbers in asm-offsets.c */
2181
2182 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2183 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2184 depi -1,27,1,%r20 /* R bit */
2185
2186 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2187 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2188 depi -1,7,1,%r20 /* T bit */
2189
2190 STREG %r20,TASK_PT_PSW(%r1)
2191
2192 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2193
2194 mfsp %sr3,%r25
2195 STREG %r25,TASK_PT_SR3(%r1)
2196 STREG %r25,TASK_PT_SR4(%r1)
2197 STREG %r25,TASK_PT_SR5(%r1)
2198 STREG %r25,TASK_PT_SR6(%r1)
2199 STREG %r25,TASK_PT_SR7(%r1)
2200 STREG %r25,TASK_PT_IASQ0(%r1)
2201 STREG %r25,TASK_PT_IASQ1(%r1)
2202
2203 /* XXX W bit??? */
2204 /* Now if old D bit is clear, it means we didn't save all registers
2205 * on syscall entry, so do that now. This only happens on TRACEME
2206 * calls, or if someone attached to us while we were on a syscall.
2207 * We could make this more efficient by not saving r3-r18, but
2208 * then we wouldn't be able to use the common intr_restore path.
2209 * It is only for traced processes anyway, so performance is not
2210 * an issue.
2211 */
2212 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2213 ldo TASK_REGS(%r1),%r25
2214 reg_save %r25 /* Save r3 to r18 */
2215
2216 /* Save the current sr */
2217 mfsp %sr0,%r2
2218 STREG %r2,TASK_PT_SR0(%r1)
2219
2220 /* Save the scratch sr */
2221 mfsp %sr1,%r2
2222 STREG %r2,TASK_PT_SR1(%r1)
2223
2224 /* sr2 should be set to zero for userspace syscalls */
2225 STREG %r0,TASK_PT_SR2(%r1)
2226
2227 pt_regs_ok:
2228 LDREG TASK_PT_GR31(%r1),%r2
2229 depi 3,31,2,%r2 /* ensure return to user mode. */
2230 STREG %r2,TASK_PT_IAOQ0(%r1)
2231 ldo 4(%r2),%r2
2232 STREG %r2,TASK_PT_IAOQ1(%r1)
2233 copy %r25,%r16
2234 b intr_restore
2235 nop
2236
2237 .import do_softirq,code
2238 syscall_do_softirq:
2239 BL do_softirq,%r2
2240 nop
2241 /* NOTE: We enable I-bit incase we schedule later,
2242 * and we might be going back to userspace if we were
2243 * traced. */
2244 b syscall_check_resched
2245 ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
2246
2247 .import schedule,code
2248 syscall_do_resched:
2249 BL schedule,%r2
2250 #ifdef __LP64__
2251 ldo -16(%r30),%r29 /* Reference param save area */
2252 #else
2253 nop
2254 #endif
2255 b syscall_check_bh /* if resched, we start over again */
2256 nop
2257
2258 .import do_signal,code
2259 syscall_do_signal:
2260 /* Save callee-save registers (for sigcontext).
2261 FIXME: After this point the process structure should be
2262 consistent with all the relevant state of the process
2263 before the syscall. We need to verify this. */
2264 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2265 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2266 reg_save %r25
2267
2268 ldi 1, %r24 /* unsigned long in_syscall */
2269
2270 #ifdef __LP64__
2271 ldo -16(%r30),%r29 /* Reference param save area */
2272 #endif
2273 BL do_signal,%r2
2274 copy %r0, %r26 /* sigset_t *oldset = NULL */
2275
2276 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2277 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2278 reg_restore %r20
2279
2280 b,n syscall_check_sig
2281
2282 /*
2283 * get_register is used by the non access tlb miss handlers to
2284 * copy the value of the general register specified in r8 into
2285 * r1. This routine can't be used for shadowed registers, since
2286 * the rfir will restore the original value. So, for the shadowed
2287 * registers we put a -1 into r1 to indicate that the register
2288 * should not be used (the register being copied could also have
2289 * a -1 in it, but that is OK, it just means that we will have
2290 * to use the slow path instead).
2291 */
2292
2293 get_register:
2294 blr %r8,%r0
2295 nop
2296 bv %r0(%r25) /* r0 */
2297 copy %r0,%r1
2298 bv %r0(%r25) /* r1 - shadowed */
2299 ldi -1,%r1
2300 bv %r0(%r25) /* r2 */
2301 copy %r2,%r1
2302 bv %r0(%r25) /* r3 */
2303 copy %r3,%r1
2304 bv %r0(%r25) /* r4 */
2305 copy %r4,%r1
2306 bv %r0(%r25) /* r5 */
2307 copy %r5,%r1
2308 bv %r0(%r25) /* r6 */
2309 copy %r6,%r1
2310 bv %r0(%r25) /* r7 */
2311 copy %r7,%r1
2312 bv %r0(%r25) /* r8 - shadowed */
2313 ldi -1,%r1
2314 bv %r0(%r25) /* r9 - shadowed */
2315 ldi -1,%r1
2316 bv %r0(%r25) /* r10 */
2317 copy %r10,%r1
2318 bv %r0(%r25) /* r11 */
2319 copy %r11,%r1
2320 bv %r0(%r25) /* r12 */
2321 copy %r12,%r1
2322 bv %r0(%r25) /* r13 */
2323 copy %r13,%r1
2324 bv %r0(%r25) /* r14 */
2325 copy %r14,%r1
2326 bv %r0(%r25) /* r15 */
2327 copy %r15,%r1
2328 bv %r0(%r25) /* r16 - shadowed */
2329 ldi -1,%r1
2330 bv %r0(%r25) /* r17 - shadowed */
2331 ldi -1,%r1
2332 bv %r0(%r25) /* r18 */
2333 copy %r18,%r1
2334 bv %r0(%r25) /* r19 */
2335 copy %r19,%r1
2336 bv %r0(%r25) /* r20 */
2337 copy %r20,%r1
2338 bv %r0(%r25) /* r21 */
2339 copy %r21,%r1
2340 bv %r0(%r25) /* r22 */
2341 copy %r22,%r1
2342 bv %r0(%r25) /* r23 */
2343 copy %r23,%r1
2344 bv %r0(%r25) /* r24 - shadowed */
2345 ldi -1,%r1
2346 bv %r0(%r25) /* r25 - shadowed */
2347 ldi -1,%r1
2348 bv %r0(%r25) /* r26 */
2349 copy %r26,%r1
2350 bv %r0(%r25) /* r27 */
2351 copy %r27,%r1
2352 bv %r0(%r25) /* r28 */
2353 copy %r28,%r1
2354 bv %r0(%r25) /* r29 */
2355 copy %r29,%r1
2356 bv %r0(%r25) /* r30 */
2357 copy %r30,%r1
2358 bv %r0(%r25) /* r31 */
2359 copy %r31,%r1
2360
2361 /*
2362 * set_register is used by the non access tlb miss handlers to
2363 * copy the value of r1 into the general register specified in
2364 * r8.
2365 */
2366
2367 set_register:
2368 blr %r8,%r0
2369 nop
2370 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2371 copy %r1,%r0
2372 bv %r0(%r25) /* r1 */
2373 copy %r1,%r1
2374 bv %r0(%r25) /* r2 */
2375 copy %r1,%r2
2376 bv %r0(%r25) /* r3 */
2377 copy %r1,%r3
2378 bv %r0(%r25) /* r4 */
2379 copy %r1,%r4
2380 bv %r0(%r25) /* r5 */
2381 copy %r1,%r5
2382 bv %r0(%r25) /* r6 */
2383 copy %r1,%r6
2384 bv %r0(%r25) /* r7 */
2385 copy %r1,%r7
2386 bv %r0(%r25) /* r8 */
2387 copy %r1,%r8
2388 bv %r0(%r25) /* r9 */
2389 copy %r1,%r9
2390 bv %r0(%r25) /* r10 */
2391 copy %r1,%r10
2392 bv %r0(%r25) /* r11 */
2393 copy %r1,%r11
2394 bv %r0(%r25) /* r12 */
2395 copy %r1,%r12
2396 bv %r0(%r25) /* r13 */
2397 copy %r1,%r13
2398 bv %r0(%r25) /* r14 */
2399 copy %r1,%r14
2400 bv %r0(%r25) /* r15 */
2401 copy %r1,%r15
2402 bv %r0(%r25) /* r16 */
2403 copy %r1,%r16
2404 bv %r0(%r25) /* r17 */
2405 copy %r1,%r17
2406 bv %r0(%r25) /* r18 */
2407 copy %r1,%r18
2408 bv %r0(%r25) /* r19 */
2409 copy %r1,%r19
2410 bv %r0(%r25) /* r20 */
2411 copy %r1,%r20
2412 bv %r0(%r25) /* r21 */
2413 copy %r1,%r21
2414 bv %r0(%r25) /* r22 */
2415 copy %r1,%r22
2416 bv %r0(%r25) /* r23 */
2417 copy %r1,%r23
2418 bv %r0(%r25) /* r24 */
2419 copy %r1,%r24
2420 bv %r0(%r25) /* r25 */
2421 copy %r1,%r25
2422 bv %r0(%r25) /* r26 */
2423 copy %r1,%r26
2424 bv %r0(%r25) /* r27 */
2425 copy %r1,%r27
2426 bv %r0(%r25) /* r28 */
2427 copy %r1,%r28
2428 bv %r0(%r25) /* r29 */
2429 copy %r1,%r29
2430 bv %r0(%r25) /* r30 */
2431 copy %r1,%r30
2432 bv %r0(%r25) /* r31 */
2433 copy %r1,%r31
This page took 0.125614 seconds and 5 git commands to generate.