MIPS: Implement HAVE_CONTEXT_TRACKING.
[deliverable/linux.git] / arch / mips / kernel / unaligned.c
CommitLineData
1da177e4
LT
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 *
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
15 *
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
21 *
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
70342287 24 * problems with user programs have been fixed. For programmers this is the
1da177e4
LT
25 * right way to go.
26 *
27 * Fixing address errors is a per process option. The option is inherited
70342287 28 * across fork(2) and execve(2) calls. If you really want to use the
1da177e4
LT
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
31 *
32 * #include <sys/sysmips.h>
33 *
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
37 *
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
39 *
40 * Below a little program to play around with this feature.
41 *
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
44 *
45 * struct foo {
70342287 46 * unsigned char bar[8];
1da177e4
LT
47 * };
48 *
49 * main(int argc, char *argv[])
50 * {
70342287
RB
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
1da177e4 54 *
70342287
RB
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
1da177e4 57 *
70342287 58 * printf("*p = %08lx\n", *p);
1da177e4 59 *
70342287 60 * *p = 0xdeadface;
1da177e4 61 *
70342287
RB
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
1da177e4
LT
65 * }
66 *
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
69 *
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
70342287
RB
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
1da177e4 74 */
c3fc5cd5 75#include <linux/context_tracking.h>
1da177e4 76#include <linux/mm.h>
1da177e4
LT
77#include <linux/signal.h>
78#include <linux/smp.h>
e8edc6e0 79#include <linux/sched.h>
6312e0ee 80#include <linux/debugfs.h>
7f788d2d
DCZ
81#include <linux/perf_event.h>
82
1da177e4
LT
83#include <asm/asm.h>
84#include <asm/branch.h>
85#include <asm/byteorder.h>
69f3a7de 86#include <asm/cop2.h>
102cedc3
LY
87#include <asm/fpu.h>
88#include <asm/fpu_emulator.h>
1da177e4
LT
89#include <asm/inst.h>
90#include <asm/uaccess.h>
34c2f668
LY
91#include <asm/fpu.h>
92#include <asm/fpu_emulator.h>
1da177e4 93
70342287 94#define STR(x) __STR(x)
1da177e4
LT
95#define __STR(x) #x
96
6312e0ee
AN
97enum {
98 UNALIGNED_ACTION_QUIET,
99 UNALIGNED_ACTION_SIGNAL,
100 UNALIGNED_ACTION_SHOW,
101};
102#ifdef CONFIG_DEBUG_FS
103static u32 unaligned_instructions;
104static u32 unaligned_action;
105#else
106#define unaligned_action UNALIGNED_ACTION_QUIET
1da177e4 107#endif
6312e0ee 108extern void show_registers(struct pt_regs *regs);
1da177e4 109
34c2f668
LY
110#ifdef __BIG_ENDIAN
111#define LoadHW(addr, value, res) \
112 __asm__ __volatile__ (".set\tnoat\n" \
113 "1:\tlb\t%0, 0(%2)\n" \
114 "2:\tlbu\t$1, 1(%2)\n\t" \
115 "sll\t%0, 0x8\n\t" \
116 "or\t%0, $1\n\t" \
117 "li\t%1, 0\n" \
118 "3:\t.set\tat\n\t" \
119 ".insn\n\t" \
120 ".section\t.fixup,\"ax\"\n\t" \
121 "4:\tli\t%1, %3\n\t" \
122 "j\t3b\n\t" \
123 ".previous\n\t" \
124 ".section\t__ex_table,\"a\"\n\t" \
125 STR(PTR)"\t1b, 4b\n\t" \
126 STR(PTR)"\t2b, 4b\n\t" \
127 ".previous" \
128 : "=&r" (value), "=r" (res) \
129 : "r" (addr), "i" (-EFAULT));
130
131#define LoadW(addr, value, res) \
132 __asm__ __volatile__ ( \
133 "1:\tlwl\t%0, (%2)\n" \
134 "2:\tlwr\t%0, 3(%2)\n\t" \
135 "li\t%1, 0\n" \
136 "3:\n\t" \
137 ".insn\n\t" \
138 ".section\t.fixup,\"ax\"\n\t" \
139 "4:\tli\t%1, %3\n\t" \
140 "j\t3b\n\t" \
141 ".previous\n\t" \
142 ".section\t__ex_table,\"a\"\n\t" \
143 STR(PTR)"\t1b, 4b\n\t" \
144 STR(PTR)"\t2b, 4b\n\t" \
145 ".previous" \
146 : "=&r" (value), "=r" (res) \
147 : "r" (addr), "i" (-EFAULT));
148
149#define LoadHWU(addr, value, res) \
150 __asm__ __volatile__ ( \
151 ".set\tnoat\n" \
152 "1:\tlbu\t%0, 0(%2)\n" \
153 "2:\tlbu\t$1, 1(%2)\n\t" \
154 "sll\t%0, 0x8\n\t" \
155 "or\t%0, $1\n\t" \
156 "li\t%1, 0\n" \
157 "3:\n\t" \
158 ".insn\n\t" \
159 ".set\tat\n\t" \
160 ".section\t.fixup,\"ax\"\n\t" \
161 "4:\tli\t%1, %3\n\t" \
162 "j\t3b\n\t" \
163 ".previous\n\t" \
164 ".section\t__ex_table,\"a\"\n\t" \
165 STR(PTR)"\t1b, 4b\n\t" \
166 STR(PTR)"\t2b, 4b\n\t" \
167 ".previous" \
168 : "=&r" (value), "=r" (res) \
169 : "r" (addr), "i" (-EFAULT));
170
171#define LoadWU(addr, value, res) \
172 __asm__ __volatile__ ( \
173 "1:\tlwl\t%0, (%2)\n" \
174 "2:\tlwr\t%0, 3(%2)\n\t" \
175 "dsll\t%0, %0, 32\n\t" \
176 "dsrl\t%0, %0, 32\n\t" \
177 "li\t%1, 0\n" \
178 "3:\n\t" \
179 ".insn\n\t" \
180 "\t.section\t.fixup,\"ax\"\n\t" \
181 "4:\tli\t%1, %3\n\t" \
182 "j\t3b\n\t" \
183 ".previous\n\t" \
184 ".section\t__ex_table,\"a\"\n\t" \
185 STR(PTR)"\t1b, 4b\n\t" \
186 STR(PTR)"\t2b, 4b\n\t" \
187 ".previous" \
188 : "=&r" (value), "=r" (res) \
189 : "r" (addr), "i" (-EFAULT));
190
191#define LoadDW(addr, value, res) \
192 __asm__ __volatile__ ( \
193 "1:\tldl\t%0, (%2)\n" \
194 "2:\tldr\t%0, 7(%2)\n\t" \
195 "li\t%1, 0\n" \
196 "3:\n\t" \
197 ".insn\n\t" \
198 "\t.section\t.fixup,\"ax\"\n\t" \
199 "4:\tli\t%1, %3\n\t" \
200 "j\t3b\n\t" \
201 ".previous\n\t" \
202 ".section\t__ex_table,\"a\"\n\t" \
203 STR(PTR)"\t1b, 4b\n\t" \
204 STR(PTR)"\t2b, 4b\n\t" \
205 ".previous" \
206 : "=&r" (value), "=r" (res) \
207 : "r" (addr), "i" (-EFAULT));
208
209#define StoreHW(addr, value, res) \
210 __asm__ __volatile__ ( \
211 ".set\tnoat\n" \
212 "1:\tsb\t%1, 1(%2)\n\t" \
213 "srl\t$1, %1, 0x8\n" \
214 "2:\tsb\t$1, 0(%2)\n\t" \
215 ".set\tat\n\t" \
216 "li\t%0, 0\n" \
217 "3:\n\t" \
218 ".insn\n\t" \
219 ".section\t.fixup,\"ax\"\n\t" \
220 "4:\tli\t%0, %3\n\t" \
221 "j\t3b\n\t" \
222 ".previous\n\t" \
223 ".section\t__ex_table,\"a\"\n\t" \
224 STR(PTR)"\t1b, 4b\n\t" \
225 STR(PTR)"\t2b, 4b\n\t" \
226 ".previous" \
227 : "=r" (res) \
228 : "r" (value), "r" (addr), "i" (-EFAULT));
229
230#define StoreW(addr, value, res) \
231 __asm__ __volatile__ ( \
232 "1:\tswl\t%1,(%2)\n" \
233 "2:\tswr\t%1, 3(%2)\n\t" \
234 "li\t%0, 0\n" \
235 "3:\n\t" \
236 ".insn\n\t" \
237 ".section\t.fixup,\"ax\"\n\t" \
238 "4:\tli\t%0, %3\n\t" \
239 "j\t3b\n\t" \
240 ".previous\n\t" \
241 ".section\t__ex_table,\"a\"\n\t" \
242 STR(PTR)"\t1b, 4b\n\t" \
243 STR(PTR)"\t2b, 4b\n\t" \
244 ".previous" \
245 : "=r" (res) \
246 : "r" (value), "r" (addr), "i" (-EFAULT));
247
248#define StoreDW(addr, value, res) \
249 __asm__ __volatile__ ( \
250 "1:\tsdl\t%1,(%2)\n" \
251 "2:\tsdr\t%1, 7(%2)\n\t" \
252 "li\t%0, 0\n" \
253 "3:\n\t" \
254 ".insn\n\t" \
255 ".section\t.fixup,\"ax\"\n\t" \
256 "4:\tli\t%0, %3\n\t" \
257 "j\t3b\n\t" \
258 ".previous\n\t" \
259 ".section\t__ex_table,\"a\"\n\t" \
260 STR(PTR)"\t1b, 4b\n\t" \
261 STR(PTR)"\t2b, 4b\n\t" \
262 ".previous" \
263 : "=r" (res) \
264 : "r" (value), "r" (addr), "i" (-EFAULT));
265#endif
266
267#ifdef __LITTLE_ENDIAN
268#define LoadHW(addr, value, res) \
269 __asm__ __volatile__ (".set\tnoat\n" \
270 "1:\tlb\t%0, 1(%2)\n" \
271 "2:\tlbu\t$1, 0(%2)\n\t" \
272 "sll\t%0, 0x8\n\t" \
273 "or\t%0, $1\n\t" \
274 "li\t%1, 0\n" \
275 "3:\t.set\tat\n\t" \
276 ".insn\n\t" \
277 ".section\t.fixup,\"ax\"\n\t" \
278 "4:\tli\t%1, %3\n\t" \
279 "j\t3b\n\t" \
280 ".previous\n\t" \
281 ".section\t__ex_table,\"a\"\n\t" \
282 STR(PTR)"\t1b, 4b\n\t" \
283 STR(PTR)"\t2b, 4b\n\t" \
284 ".previous" \
285 : "=&r" (value), "=r" (res) \
286 : "r" (addr), "i" (-EFAULT));
287
288#define LoadW(addr, value, res) \
289 __asm__ __volatile__ ( \
290 "1:\tlwl\t%0, 3(%2)\n" \
291 "2:\tlwr\t%0, (%2)\n\t" \
292 "li\t%1, 0\n" \
293 "3:\n\t" \
294 ".insn\n\t" \
295 ".section\t.fixup,\"ax\"\n\t" \
296 "4:\tli\t%1, %3\n\t" \
297 "j\t3b\n\t" \
298 ".previous\n\t" \
299 ".section\t__ex_table,\"a\"\n\t" \
300 STR(PTR)"\t1b, 4b\n\t" \
301 STR(PTR)"\t2b, 4b\n\t" \
302 ".previous" \
303 : "=&r" (value), "=r" (res) \
304 : "r" (addr), "i" (-EFAULT));
305
306#define LoadHWU(addr, value, res) \
307 __asm__ __volatile__ ( \
308 ".set\tnoat\n" \
309 "1:\tlbu\t%0, 1(%2)\n" \
310 "2:\tlbu\t$1, 0(%2)\n\t" \
311 "sll\t%0, 0x8\n\t" \
312 "or\t%0, $1\n\t" \
313 "li\t%1, 0\n" \
314 "3:\n\t" \
315 ".insn\n\t" \
316 ".set\tat\n\t" \
317 ".section\t.fixup,\"ax\"\n\t" \
318 "4:\tli\t%1, %3\n\t" \
319 "j\t3b\n\t" \
320 ".previous\n\t" \
321 ".section\t__ex_table,\"a\"\n\t" \
322 STR(PTR)"\t1b, 4b\n\t" \
323 STR(PTR)"\t2b, 4b\n\t" \
324 ".previous" \
325 : "=&r" (value), "=r" (res) \
326 : "r" (addr), "i" (-EFAULT));
327
328#define LoadWU(addr, value, res) \
329 __asm__ __volatile__ ( \
330 "1:\tlwl\t%0, 3(%2)\n" \
331 "2:\tlwr\t%0, (%2)\n\t" \
332 "dsll\t%0, %0, 32\n\t" \
333 "dsrl\t%0, %0, 32\n\t" \
334 "li\t%1, 0\n" \
335 "3:\n\t" \
336 ".insn\n\t" \
337 "\t.section\t.fixup,\"ax\"\n\t" \
338 "4:\tli\t%1, %3\n\t" \
339 "j\t3b\n\t" \
340 ".previous\n\t" \
341 ".section\t__ex_table,\"a\"\n\t" \
342 STR(PTR)"\t1b, 4b\n\t" \
343 STR(PTR)"\t2b, 4b\n\t" \
344 ".previous" \
345 : "=&r" (value), "=r" (res) \
346 : "r" (addr), "i" (-EFAULT));
347
348#define LoadDW(addr, value, res) \
349 __asm__ __volatile__ ( \
350 "1:\tldl\t%0, 7(%2)\n" \
351 "2:\tldr\t%0, (%2)\n\t" \
352 "li\t%1, 0\n" \
353 "3:\n\t" \
354 ".insn\n\t" \
355 "\t.section\t.fixup,\"ax\"\n\t" \
356 "4:\tli\t%1, %3\n\t" \
357 "j\t3b\n\t" \
358 ".previous\n\t" \
359 ".section\t__ex_table,\"a\"\n\t" \
360 STR(PTR)"\t1b, 4b\n\t" \
361 STR(PTR)"\t2b, 4b\n\t" \
362 ".previous" \
363 : "=&r" (value), "=r" (res) \
364 : "r" (addr), "i" (-EFAULT));
365
366#define StoreHW(addr, value, res) \
367 __asm__ __volatile__ ( \
368 ".set\tnoat\n" \
369 "1:\tsb\t%1, 0(%2)\n\t" \
370 "srl\t$1,%1, 0x8\n" \
371 "2:\tsb\t$1, 1(%2)\n\t" \
372 ".set\tat\n\t" \
373 "li\t%0, 0\n" \
374 "3:\n\t" \
375 ".insn\n\t" \
376 ".section\t.fixup,\"ax\"\n\t" \
377 "4:\tli\t%0, %3\n\t" \
378 "j\t3b\n\t" \
379 ".previous\n\t" \
380 ".section\t__ex_table,\"a\"\n\t" \
381 STR(PTR)"\t1b, 4b\n\t" \
382 STR(PTR)"\t2b, 4b\n\t" \
383 ".previous" \
384 : "=r" (res) \
385 : "r" (value), "r" (addr), "i" (-EFAULT));
386
387#define StoreW(addr, value, res) \
388 __asm__ __volatile__ ( \
389 "1:\tswl\t%1, 3(%2)\n" \
390 "2:\tswr\t%1, (%2)\n\t" \
391 "li\t%0, 0\n" \
392 "3:\n\t" \
393 ".insn\n\t" \
394 ".section\t.fixup,\"ax\"\n\t" \
395 "4:\tli\t%0, %3\n\t" \
396 "j\t3b\n\t" \
397 ".previous\n\t" \
398 ".section\t__ex_table,\"a\"\n\t" \
399 STR(PTR)"\t1b, 4b\n\t" \
400 STR(PTR)"\t2b, 4b\n\t" \
401 ".previous" \
402 : "=r" (res) \
403 : "r" (value), "r" (addr), "i" (-EFAULT));
404
405#define StoreDW(addr, value, res) \
406 __asm__ __volatile__ ( \
407 "1:\tsdl\t%1, 7(%2)\n" \
408 "2:\tsdr\t%1, (%2)\n\t" \
409 "li\t%0, 0\n" \
410 "3:\n\t" \
411 ".insn\n\t" \
412 ".section\t.fixup,\"ax\"\n\t" \
413 "4:\tli\t%0, %3\n\t" \
414 "j\t3b\n\t" \
415 ".previous\n\t" \
416 ".section\t__ex_table,\"a\"\n\t" \
417 STR(PTR)"\t1b, 4b\n\t" \
418 STR(PTR)"\t2b, 4b\n\t" \
419 ".previous" \
420 : "=r" (res) \
421 : "r" (value), "r" (addr), "i" (-EFAULT));
422#endif
423
7f18f151
RB
424static void emulate_load_store_insn(struct pt_regs *regs,
425 void __user *addr, unsigned int __user *pc)
1da177e4
LT
426{
427 union mips_instruction insn;
428 unsigned long value;
429 unsigned int res;
34c2f668
LY
430 unsigned long origpc;
431 unsigned long orig31;
102cedc3 432 void __user *fault_addr = NULL;
1da177e4 433
34c2f668
LY
434 origpc = (unsigned long)pc;
435 orig31 = regs->regs[31];
436
a8b0ca17 437 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
7f788d2d 438
1da177e4
LT
439 /*
440 * This load never faults.
441 */
fe00f943 442 __get_user(insn.word, pc);
1da177e4
LT
443
444 switch (insn.i_format.opcode) {
34c2f668
LY
445 /*
446 * These are instructions that a compiler doesn't generate. We
447 * can assume therefore that the code is MIPS-aware and
448 * really buggy. Emulating these instructions would break the
449 * semantics anyway.
450 */
1da177e4
LT
451 case ll_op:
452 case lld_op:
453 case sc_op:
454 case scd_op:
455
34c2f668
LY
456 /*
457 * For these instructions the only way to create an address
458 * error is an attempted access to kernel/supervisor address
459 * space.
460 */
1da177e4
LT
461 case ldl_op:
462 case ldr_op:
463 case lwl_op:
464 case lwr_op:
465 case sdl_op:
466 case sdr_op:
467 case swl_op:
468 case swr_op:
469 case lb_op:
470 case lbu_op:
471 case sb_op:
472 goto sigbus;
473
34c2f668
LY
474 /*
475 * The remaining opcodes are the ones that are really of
476 * interest.
477 */
1da177e4
LT
478 case lh_op:
479 if (!access_ok(VERIFY_READ, addr, 2))
480 goto sigbus;
481
34c2f668 482 LoadHW(addr, value, res);
1da177e4
LT
483 if (res)
484 goto fault;
7f18f151
RB
485 compute_return_epc(regs);
486 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
487 break;
488
489 case lw_op:
490 if (!access_ok(VERIFY_READ, addr, 4))
491 goto sigbus;
492
34c2f668 493 LoadW(addr, value, res);
1da177e4
LT
494 if (res)
495 goto fault;
7f18f151
RB
496 compute_return_epc(regs);
497 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
498 break;
499
500 case lhu_op:
501 if (!access_ok(VERIFY_READ, addr, 2))
502 goto sigbus;
503
34c2f668 504 LoadHWU(addr, value, res);
1da177e4
LT
505 if (res)
506 goto fault;
7f18f151
RB
507 compute_return_epc(regs);
508 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
509 break;
510
511 case lwu_op:
875d43e7 512#ifdef CONFIG_64BIT
1da177e4
LT
513 /*
514 * A 32-bit kernel might be running on a 64-bit processor. But
515 * if we're on a 32-bit processor and an i-cache incoherency
516 * or race makes us see a 64-bit instruction here the sdl/sdr
517 * would blow up, so for now we don't handle unaligned 64-bit
518 * instructions on 32-bit kernels.
519 */
520 if (!access_ok(VERIFY_READ, addr, 4))
521 goto sigbus;
522
34c2f668 523 LoadWU(addr, value, res);
1da177e4
LT
524 if (res)
525 goto fault;
7f18f151
RB
526 compute_return_epc(regs);
527 regs->regs[insn.i_format.rt] = value;
1da177e4 528 break;
875d43e7 529#endif /* CONFIG_64BIT */
1da177e4
LT
530
531 /* Cannot handle 64-bit instructions in 32-bit kernel */
532 goto sigill;
533
534 case ld_op:
875d43e7 535#ifdef CONFIG_64BIT
1da177e4
LT
536 /*
537 * A 32-bit kernel might be running on a 64-bit processor. But
538 * if we're on a 32-bit processor and an i-cache incoherency
539 * or race makes us see a 64-bit instruction here the sdl/sdr
540 * would blow up, so for now we don't handle unaligned 64-bit
541 * instructions on 32-bit kernels.
542 */
543 if (!access_ok(VERIFY_READ, addr, 8))
544 goto sigbus;
545
34c2f668 546 LoadDW(addr, value, res);
1da177e4
LT
547 if (res)
548 goto fault;
7f18f151
RB
549 compute_return_epc(regs);
550 regs->regs[insn.i_format.rt] = value;
1da177e4 551 break;
875d43e7 552#endif /* CONFIG_64BIT */
1da177e4
LT
553
554 /* Cannot handle 64-bit instructions in 32-bit kernel */
555 goto sigill;
556
557 case sh_op:
558 if (!access_ok(VERIFY_WRITE, addr, 2))
559 goto sigbus;
560
34c2f668 561 compute_return_epc(regs);
1da177e4 562 value = regs->regs[insn.i_format.rt];
34c2f668 563 StoreHW(addr, value, res);
1da177e4
LT
564 if (res)
565 goto fault;
566 break;
567
568 case sw_op:
569 if (!access_ok(VERIFY_WRITE, addr, 4))
570 goto sigbus;
571
34c2f668 572 compute_return_epc(regs);
1da177e4 573 value = regs->regs[insn.i_format.rt];
34c2f668 574 StoreW(addr, value, res);
1da177e4
LT
575 if (res)
576 goto fault;
577 break;
578
579 case sd_op:
875d43e7 580#ifdef CONFIG_64BIT
1da177e4
LT
581 /*
582 * A 32-bit kernel might be running on a 64-bit processor. But
583 * if we're on a 32-bit processor and an i-cache incoherency
584 * or race makes us see a 64-bit instruction here the sdl/sdr
585 * would blow up, so for now we don't handle unaligned 64-bit
586 * instructions on 32-bit kernels.
587 */
588 if (!access_ok(VERIFY_WRITE, addr, 8))
589 goto sigbus;
590
34c2f668 591 compute_return_epc(regs);
1da177e4 592 value = regs->regs[insn.i_format.rt];
34c2f668 593 StoreDW(addr, value, res);
1da177e4
LT
594 if (res)
595 goto fault;
596 break;
875d43e7 597#endif /* CONFIG_64BIT */
1da177e4
LT
598
599 /* Cannot handle 64-bit instructions in 32-bit kernel */
600 goto sigill;
601
602 case lwc1_op:
603 case ldc1_op:
604 case swc1_op:
605 case sdc1_op:
102cedc3
LY
606 die_if_kernel("Unaligned FP access in kernel code", regs);
607 BUG_ON(!used_math());
608 BUG_ON(!is_fpu_owner());
609
610 lose_fpu(1); /* Save FPU state for the emulator. */
611 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
612 &fault_addr);
613 own_fpu(1); /* Restore FPU state. */
614
615 /* Signal if something went wrong. */
616 process_fpemu_return(res, fault_addr);
617
618 if (res == 0)
619 break;
620 return;
1da177e4 621
69f3a7de
RB
622 /*
623 * COP2 is available to implementor for application specific use.
624 * It's up to applications to register a notifier chain and do
625 * whatever they have to do, including possible sending of signals.
626 */
1da177e4 627 case lwc2_op:
69f3a7de
RB
628 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
629 break;
630
1da177e4 631 case ldc2_op:
69f3a7de
RB
632 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
633 break;
634
1da177e4 635 case swc2_op:
69f3a7de
RB
636 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
637 break;
638
1da177e4 639 case sdc2_op:
69f3a7de
RB
640 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
641 break;
642
1da177e4
LT
643 default:
644 /*
645 * Pheeee... We encountered an yet unknown instruction or
646 * cache coherence problem. Die sucker, die ...
647 */
648 goto sigill;
649 }
650
6312e0ee 651#ifdef CONFIG_DEBUG_FS
1da177e4
LT
652 unaligned_instructions++;
653#endif
654
7f18f151 655 return;
1da177e4
LT
656
657fault:
34c2f668
LY
658 /* roll back jump/branch */
659 regs->cp0_epc = origpc;
660 regs->regs[31] = orig31;
661 /* Did we have an exception handler installed? */
662 if (fixup_exception(regs))
663 return;
664
665 die_if_kernel("Unhandled kernel unaligned access", regs);
666 force_sig(SIGSEGV, current);
667
668 return;
669
670sigbus:
671 die_if_kernel("Unhandled kernel unaligned access", regs);
672 force_sig(SIGBUS, current);
673
674 return;
675
676sigill:
677 die_if_kernel
678 ("Unhandled kernel unaligned access or invalid instruction", regs);
679 force_sig(SIGILL, current);
680}
681
682/* Recode table from 16-bit register notation to 32-bit GPR. */
683const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
684
685/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
686const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
687
688void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
689{
690 unsigned long value;
691 unsigned int res;
692 int i;
693 unsigned int reg = 0, rvar;
694 unsigned long orig31;
695 u16 __user *pc16;
696 u16 halfword;
697 unsigned int word;
698 unsigned long origpc, contpc;
699 union mips_instruction insn;
700 struct mm_decoded_insn mminsn;
701 void __user *fault_addr = NULL;
702
703 origpc = regs->cp0_epc;
704 orig31 = regs->regs[31];
705
706 mminsn.micro_mips_mode = 1;
707
708 /*
709 * This load never faults.
710 */
711 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
712 __get_user(halfword, pc16);
713 pc16++;
714 contpc = regs->cp0_epc + 2;
715 word = ((unsigned int)halfword << 16);
716 mminsn.pc_inc = 2;
717
718 if (!mm_insn_16bit(halfword)) {
719 __get_user(halfword, pc16);
720 pc16++;
721 contpc = regs->cp0_epc + 4;
722 mminsn.pc_inc = 4;
723 word |= halfword;
724 }
725 mminsn.insn = word;
726
727 if (get_user(halfword, pc16))
728 goto fault;
729 mminsn.next_pc_inc = 2;
730 word = ((unsigned int)halfword << 16);
731
732 if (!mm_insn_16bit(halfword)) {
733 pc16++;
734 if (get_user(halfword, pc16))
735 goto fault;
736 mminsn.next_pc_inc = 4;
737 word |= halfword;
738 }
739 mminsn.next_insn = word;
740
741 insn = (union mips_instruction)(mminsn.insn);
742 if (mm_isBranchInstr(regs, mminsn, &contpc))
743 insn = (union mips_instruction)(mminsn.next_insn);
744
745 /* Parse instruction to find what to do */
746
747 switch (insn.mm_i_format.opcode) {
748
749 case mm_pool32a_op:
750 switch (insn.mm_x_format.func) {
751 case mm_lwxs_op:
752 reg = insn.mm_x_format.rd;
753 goto loadW;
754 }
755
756 goto sigbus;
757
758 case mm_pool32b_op:
759 switch (insn.mm_m_format.func) {
760 case mm_lwp_func:
761 reg = insn.mm_m_format.rd;
762 if (reg == 31)
763 goto sigbus;
764
765 if (!access_ok(VERIFY_READ, addr, 8))
766 goto sigbus;
767
768 LoadW(addr, value, res);
769 if (res)
770 goto fault;
771 regs->regs[reg] = value;
772 addr += 4;
773 LoadW(addr, value, res);
774 if (res)
775 goto fault;
776 regs->regs[reg + 1] = value;
777 goto success;
778
779 case mm_swp_func:
780 reg = insn.mm_m_format.rd;
781 if (reg == 31)
782 goto sigbus;
783
784 if (!access_ok(VERIFY_WRITE, addr, 8))
785 goto sigbus;
786
787 value = regs->regs[reg];
788 StoreW(addr, value, res);
789 if (res)
790 goto fault;
791 addr += 4;
792 value = regs->regs[reg + 1];
793 StoreW(addr, value, res);
794 if (res)
795 goto fault;
796 goto success;
797
798 case mm_ldp_func:
799#ifdef CONFIG_64BIT
800 reg = insn.mm_m_format.rd;
801 if (reg == 31)
802 goto sigbus;
803
804 if (!access_ok(VERIFY_READ, addr, 16))
805 goto sigbus;
806
807 LoadDW(addr, value, res);
808 if (res)
809 goto fault;
810 regs->regs[reg] = value;
811 addr += 8;
812 LoadDW(addr, value, res);
813 if (res)
814 goto fault;
815 regs->regs[reg + 1] = value;
816 goto success;
817#endif /* CONFIG_64BIT */
818
819 goto sigill;
820
821 case mm_sdp_func:
822#ifdef CONFIG_64BIT
823 reg = insn.mm_m_format.rd;
824 if (reg == 31)
825 goto sigbus;
826
827 if (!access_ok(VERIFY_WRITE, addr, 16))
828 goto sigbus;
829
830 value = regs->regs[reg];
831 StoreDW(addr, value, res);
832 if (res)
833 goto fault;
834 addr += 8;
835 value = regs->regs[reg + 1];
836 StoreDW(addr, value, res);
837 if (res)
838 goto fault;
839 goto success;
840#endif /* CONFIG_64BIT */
841
842 goto sigill;
843
844 case mm_lwm32_func:
845 reg = insn.mm_m_format.rd;
846 rvar = reg & 0xf;
847 if ((rvar > 9) || !reg)
848 goto sigill;
849 if (reg & 0x10) {
850 if (!access_ok
851 (VERIFY_READ, addr, 4 * (rvar + 1)))
852 goto sigbus;
853 } else {
854 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
855 goto sigbus;
856 }
857 if (rvar == 9)
858 rvar = 8;
859 for (i = 16; rvar; rvar--, i++) {
860 LoadW(addr, value, res);
861 if (res)
862 goto fault;
863 addr += 4;
864 regs->regs[i] = value;
865 }
866 if ((reg & 0xf) == 9) {
867 LoadW(addr, value, res);
868 if (res)
869 goto fault;
870 addr += 4;
871 regs->regs[30] = value;
872 }
873 if (reg & 0x10) {
874 LoadW(addr, value, res);
875 if (res)
876 goto fault;
877 regs->regs[31] = value;
878 }
879 goto success;
880
881 case mm_swm32_func:
882 reg = insn.mm_m_format.rd;
883 rvar = reg & 0xf;
884 if ((rvar > 9) || !reg)
885 goto sigill;
886 if (reg & 0x10) {
887 if (!access_ok
888 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
889 goto sigbus;
890 } else {
891 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
892 goto sigbus;
893 }
894 if (rvar == 9)
895 rvar = 8;
896 for (i = 16; rvar; rvar--, i++) {
897 value = regs->regs[i];
898 StoreW(addr, value, res);
899 if (res)
900 goto fault;
901 addr += 4;
902 }
903 if ((reg & 0xf) == 9) {
904 value = regs->regs[30];
905 StoreW(addr, value, res);
906 if (res)
907 goto fault;
908 addr += 4;
909 }
910 if (reg & 0x10) {
911 value = regs->regs[31];
912 StoreW(addr, value, res);
913 if (res)
914 goto fault;
915 }
916 goto success;
917
918 case mm_ldm_func:
919#ifdef CONFIG_64BIT
920 reg = insn.mm_m_format.rd;
921 rvar = reg & 0xf;
922 if ((rvar > 9) || !reg)
923 goto sigill;
924 if (reg & 0x10) {
925 if (!access_ok
926 (VERIFY_READ, addr, 8 * (rvar + 1)))
927 goto sigbus;
928 } else {
929 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
930 goto sigbus;
931 }
932 if (rvar == 9)
933 rvar = 8;
934
935 for (i = 16; rvar; rvar--, i++) {
936 LoadDW(addr, value, res);
937 if (res)
938 goto fault;
939 addr += 4;
940 regs->regs[i] = value;
941 }
942 if ((reg & 0xf) == 9) {
943 LoadDW(addr, value, res);
944 if (res)
945 goto fault;
946 addr += 8;
947 regs->regs[30] = value;
948 }
949 if (reg & 0x10) {
950 LoadDW(addr, value, res);
951 if (res)
952 goto fault;
953 regs->regs[31] = value;
954 }
955 goto success;
956#endif /* CONFIG_64BIT */
957
958 goto sigill;
959
960 case mm_sdm_func:
961#ifdef CONFIG_64BIT
962 reg = insn.mm_m_format.rd;
963 rvar = reg & 0xf;
964 if ((rvar > 9) || !reg)
965 goto sigill;
966 if (reg & 0x10) {
967 if (!access_ok
968 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
969 goto sigbus;
970 } else {
971 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
972 goto sigbus;
973 }
974 if (rvar == 9)
975 rvar = 8;
976
977 for (i = 16; rvar; rvar--, i++) {
978 value = regs->regs[i];
979 StoreDW(addr, value, res);
980 if (res)
981 goto fault;
982 addr += 8;
983 }
984 if ((reg & 0xf) == 9) {
985 value = regs->regs[30];
986 StoreDW(addr, value, res);
987 if (res)
988 goto fault;
989 addr += 8;
990 }
991 if (reg & 0x10) {
992 value = regs->regs[31];
993 StoreDW(addr, value, res);
994 if (res)
995 goto fault;
996 }
997 goto success;
998#endif /* CONFIG_64BIT */
999
1000 goto sigill;
1001
1002 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1003 }
1004
1005 goto sigbus;
1006
1007 case mm_pool32c_op:
1008 switch (insn.mm_m_format.func) {
1009 case mm_lwu_func:
1010 reg = insn.mm_m_format.rd;
1011 goto loadWU;
1012 }
1013
1014 /* LL,SC,LLD,SCD are not serviced */
1015 goto sigbus;
1016
1017 case mm_pool32f_op:
1018 switch (insn.mm_x_format.func) {
1019 case mm_lwxc1_func:
1020 case mm_swxc1_func:
1021 case mm_ldxc1_func:
1022 case mm_sdxc1_func:
1023 goto fpu_emul;
1024 }
1025
1026 goto sigbus;
1027
1028 case mm_ldc132_op:
1029 case mm_sdc132_op:
1030 case mm_lwc132_op:
1031 case mm_swc132_op:
1032fpu_emul:
1033 /* roll back jump/branch */
1034 regs->cp0_epc = origpc;
1035 regs->regs[31] = orig31;
1036
1037 die_if_kernel("Unaligned FP access in kernel code", regs);
1038 BUG_ON(!used_math());
1039 BUG_ON(!is_fpu_owner());
1040
1041 lose_fpu(1); /* save the FPU state for the emulator */
1042 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1043 &fault_addr);
1044 own_fpu(1); /* restore FPU state */
1045
1046 /* If something went wrong, signal */
1047 process_fpemu_return(res, fault_addr);
1048
1049 if (res == 0)
1050 goto success;
1051 return;
1052
1053 case mm_lh32_op:
1054 reg = insn.mm_i_format.rt;
1055 goto loadHW;
1056
1057 case mm_lhu32_op:
1058 reg = insn.mm_i_format.rt;
1059 goto loadHWU;
1060
1061 case mm_lw32_op:
1062 reg = insn.mm_i_format.rt;
1063 goto loadW;
1064
1065 case mm_sh32_op:
1066 reg = insn.mm_i_format.rt;
1067 goto storeHW;
1068
1069 case mm_sw32_op:
1070 reg = insn.mm_i_format.rt;
1071 goto storeW;
1072
1073 case mm_ld32_op:
1074 reg = insn.mm_i_format.rt;
1075 goto loadDW;
1076
1077 case mm_sd32_op:
1078 reg = insn.mm_i_format.rt;
1079 goto storeDW;
1080
1081 case mm_pool16c_op:
1082 switch (insn.mm16_m_format.func) {
1083 case mm_lwm16_op:
1084 reg = insn.mm16_m_format.rlist;
1085 rvar = reg + 1;
1086 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1087 goto sigbus;
1088
1089 for (i = 16; rvar; rvar--, i++) {
1090 LoadW(addr, value, res);
1091 if (res)
1092 goto fault;
1093 addr += 4;
1094 regs->regs[i] = value;
1095 }
1096 LoadW(addr, value, res);
1097 if (res)
1098 goto fault;
1099 regs->regs[31] = value;
1100
1101 goto success;
1102
1103 case mm_swm16_op:
1104 reg = insn.mm16_m_format.rlist;
1105 rvar = reg + 1;
1106 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1107 goto sigbus;
1108
1109 for (i = 16; rvar; rvar--, i++) {
1110 value = regs->regs[i];
1111 StoreW(addr, value, res);
1112 if (res)
1113 goto fault;
1114 addr += 4;
1115 }
1116 value = regs->regs[31];
1117 StoreW(addr, value, res);
1118 if (res)
1119 goto fault;
1120
1121 goto success;
1122
1123 }
1124
1125 goto sigbus;
1126
1127 case mm_lhu16_op:
1128 reg = reg16to32[insn.mm16_rb_format.rt];
1129 goto loadHWU;
1130
1131 case mm_lw16_op:
1132 reg = reg16to32[insn.mm16_rb_format.rt];
1133 goto loadW;
1134
1135 case mm_sh16_op:
1136 reg = reg16to32st[insn.mm16_rb_format.rt];
1137 goto storeHW;
1138
1139 case mm_sw16_op:
1140 reg = reg16to32st[insn.mm16_rb_format.rt];
1141 goto storeW;
1142
1143 case mm_lwsp16_op:
1144 reg = insn.mm16_r5_format.rt;
1145 goto loadW;
1146
1147 case mm_swsp16_op:
1148 reg = insn.mm16_r5_format.rt;
1149 goto storeW;
1150
1151 case mm_lwgp16_op:
1152 reg = reg16to32[insn.mm16_r3_format.rt];
1153 goto loadW;
1154
1155 default:
1156 goto sigill;
1157 }
1158
1159loadHW:
1160 if (!access_ok(VERIFY_READ, addr, 2))
1161 goto sigbus;
1162
1163 LoadHW(addr, value, res);
1164 if (res)
1165 goto fault;
1166 regs->regs[reg] = value;
1167 goto success;
1168
1169loadHWU:
1170 if (!access_ok(VERIFY_READ, addr, 2))
1171 goto sigbus;
1172
1173 LoadHWU(addr, value, res);
1174 if (res)
1175 goto fault;
1176 regs->regs[reg] = value;
1177 goto success;
1178
1179loadW:
1180 if (!access_ok(VERIFY_READ, addr, 4))
1181 goto sigbus;
1182
1183 LoadW(addr, value, res);
1184 if (res)
1185 goto fault;
1186 regs->regs[reg] = value;
1187 goto success;
1188
1189loadWU:
1190#ifdef CONFIG_64BIT
1191 /*
1192 * A 32-bit kernel might be running on a 64-bit processor. But
1193 * if we're on a 32-bit processor and an i-cache incoherency
1194 * or race makes us see a 64-bit instruction here the sdl/sdr
1195 * would blow up, so for now we don't handle unaligned 64-bit
1196 * instructions on 32-bit kernels.
1197 */
1198 if (!access_ok(VERIFY_READ, addr, 4))
1199 goto sigbus;
1200
1201 LoadWU(addr, value, res);
1202 if (res)
1203 goto fault;
1204 regs->regs[reg] = value;
1205 goto success;
1206#endif /* CONFIG_64BIT */
1207
1208 /* Cannot handle 64-bit instructions in 32-bit kernel */
1209 goto sigill;
1210
1211loadDW:
1212#ifdef CONFIG_64BIT
1213 /*
1214 * A 32-bit kernel might be running on a 64-bit processor. But
1215 * if we're on a 32-bit processor and an i-cache incoherency
1216 * or race makes us see a 64-bit instruction here the sdl/sdr
1217 * would blow up, so for now we don't handle unaligned 64-bit
1218 * instructions on 32-bit kernels.
1219 */
1220 if (!access_ok(VERIFY_READ, addr, 8))
1221 goto sigbus;
1222
1223 LoadDW(addr, value, res);
1224 if (res)
1225 goto fault;
1226 regs->regs[reg] = value;
1227 goto success;
1228#endif /* CONFIG_64BIT */
1229
1230 /* Cannot handle 64-bit instructions in 32-bit kernel */
1231 goto sigill;
1232
1233storeHW:
1234 if (!access_ok(VERIFY_WRITE, addr, 2))
1235 goto sigbus;
1236
1237 value = regs->regs[reg];
1238 StoreHW(addr, value, res);
1239 if (res)
1240 goto fault;
1241 goto success;
1242
1243storeW:
1244 if (!access_ok(VERIFY_WRITE, addr, 4))
1245 goto sigbus;
1246
1247 value = regs->regs[reg];
1248 StoreW(addr, value, res);
1249 if (res)
1250 goto fault;
1251 goto success;
1252
1253storeDW:
1254#ifdef CONFIG_64BIT
1255 /*
1256 * A 32-bit kernel might be running on a 64-bit processor. But
1257 * if we're on a 32-bit processor and an i-cache incoherency
1258 * or race makes us see a 64-bit instruction here the sdl/sdr
1259 * would blow up, so for now we don't handle unaligned 64-bit
1260 * instructions on 32-bit kernels.
1261 */
1262 if (!access_ok(VERIFY_WRITE, addr, 8))
1263 goto sigbus;
1264
1265 value = regs->regs[reg];
1266 StoreDW(addr, value, res);
1267 if (res)
1268 goto fault;
1269 goto success;
1270#endif /* CONFIG_64BIT */
1271
1272 /* Cannot handle 64-bit instructions in 32-bit kernel */
1273 goto sigill;
1274
1275success:
1276 regs->cp0_epc = contpc; /* advance or branch */
1277
1278#ifdef CONFIG_DEBUG_FS
1279 unaligned_instructions++;
1280#endif
1281 return;
1282
1283fault:
1284 /* roll back jump/branch */
1285 regs->cp0_epc = origpc;
1286 regs->regs[31] = orig31;
1da177e4
LT
1287 /* Did we have an exception handler installed? */
1288 if (fixup_exception(regs))
7f18f151 1289 return;
1da177e4 1290
49a89efb 1291 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1292 force_sig(SIGSEGV, current);
1da177e4 1293
7f18f151 1294 return;
1da177e4
LT
1295
1296sigbus:
1297 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1298 force_sig(SIGBUS, current);
1da177e4 1299
7f18f151 1300 return;
1da177e4
LT
1301
1302sigill:
34c2f668
LY
1303 die_if_kernel
1304 ("Unhandled kernel unaligned access or invalid instruction", regs);
a6d5ff04 1305 force_sig(SIGILL, current);
1da177e4
LT
1306}
1307
451b001b
SH
1308static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1309{
1310 unsigned long value;
1311 unsigned int res;
1312 int reg;
1313 unsigned long orig31;
1314 u16 __user *pc16;
1315 unsigned long origpc;
1316 union mips16e_instruction mips16inst, oldinst;
1317
1318 origpc = regs->cp0_epc;
1319 orig31 = regs->regs[31];
1320 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1321 /*
1322 * This load never faults.
1323 */
1324 __get_user(mips16inst.full, pc16);
1325 oldinst = mips16inst;
1326
1327 /* skip EXTEND instruction */
1328 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1329 pc16++;
1330 __get_user(mips16inst.full, pc16);
1331 } else if (delay_slot(regs)) {
1332 /* skip jump instructions */
1333 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1334 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1335 pc16++;
1336 pc16++;
1337 if (get_user(mips16inst.full, pc16))
1338 goto sigbus;
1339 }
1340
1341 switch (mips16inst.ri.opcode) {
1342 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1343 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1344 case MIPS16e_ldpc_func:
1345 case MIPS16e_ldsp_func:
1346 reg = reg16to32[mips16inst.ri64.ry];
1347 goto loadDW;
1348
1349 case MIPS16e_sdsp_func:
1350 reg = reg16to32[mips16inst.ri64.ry];
1351 goto writeDW;
1352
1353 case MIPS16e_sdrasp_func:
1354 reg = 29; /* GPRSP */
1355 goto writeDW;
1356 }
1357
1358 goto sigbus;
1359
1360 case MIPS16e_swsp_op:
1361 case MIPS16e_lwpc_op:
1362 case MIPS16e_lwsp_op:
1363 reg = reg16to32[mips16inst.ri.rx];
1364 break;
1365
1366 case MIPS16e_i8_op:
1367 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1368 goto sigbus;
1369 reg = 29; /* GPRSP */
1370 break;
1371
1372 default:
1373 reg = reg16to32[mips16inst.rri.ry];
1374 break;
1375 }
1376
1377 switch (mips16inst.ri.opcode) {
1378
1379 case MIPS16e_lb_op:
1380 case MIPS16e_lbu_op:
1381 case MIPS16e_sb_op:
1382 goto sigbus;
1383
1384 case MIPS16e_lh_op:
1385 if (!access_ok(VERIFY_READ, addr, 2))
1386 goto sigbus;
1387
1388 LoadHW(addr, value, res);
1389 if (res)
1390 goto fault;
1391 MIPS16e_compute_return_epc(regs, &oldinst);
1392 regs->regs[reg] = value;
1393 break;
1394
1395 case MIPS16e_lhu_op:
1396 if (!access_ok(VERIFY_READ, addr, 2))
1397 goto sigbus;
1398
1399 LoadHWU(addr, value, res);
1400 if (res)
1401 goto fault;
1402 MIPS16e_compute_return_epc(regs, &oldinst);
1403 regs->regs[reg] = value;
1404 break;
1405
1406 case MIPS16e_lw_op:
1407 case MIPS16e_lwpc_op:
1408 case MIPS16e_lwsp_op:
1409 if (!access_ok(VERIFY_READ, addr, 4))
1410 goto sigbus;
1411
1412 LoadW(addr, value, res);
1413 if (res)
1414 goto fault;
1415 MIPS16e_compute_return_epc(regs, &oldinst);
1416 regs->regs[reg] = value;
1417 break;
1418
1419 case MIPS16e_lwu_op:
1420#ifdef CONFIG_64BIT
1421 /*
1422 * A 32-bit kernel might be running on a 64-bit processor. But
1423 * if we're on a 32-bit processor and an i-cache incoherency
1424 * or race makes us see a 64-bit instruction here the sdl/sdr
1425 * would blow up, so for now we don't handle unaligned 64-bit
1426 * instructions on 32-bit kernels.
1427 */
1428 if (!access_ok(VERIFY_READ, addr, 4))
1429 goto sigbus;
1430
1431 LoadWU(addr, value, res);
1432 if (res)
1433 goto fault;
1434 MIPS16e_compute_return_epc(regs, &oldinst);
1435 regs->regs[reg] = value;
1436 break;
1437#endif /* CONFIG_64BIT */
1438
1439 /* Cannot handle 64-bit instructions in 32-bit kernel */
1440 goto sigill;
1441
1442 case MIPS16e_ld_op:
1443loadDW:
1444#ifdef CONFIG_64BIT
1445 /*
1446 * A 32-bit kernel might be running on a 64-bit processor. But
1447 * if we're on a 32-bit processor and an i-cache incoherency
1448 * or race makes us see a 64-bit instruction here the sdl/sdr
1449 * would blow up, so for now we don't handle unaligned 64-bit
1450 * instructions on 32-bit kernels.
1451 */
1452 if (!access_ok(VERIFY_READ, addr, 8))
1453 goto sigbus;
1454
1455 LoadDW(addr, value, res);
1456 if (res)
1457 goto fault;
1458 MIPS16e_compute_return_epc(regs, &oldinst);
1459 regs->regs[reg] = value;
1460 break;
1461#endif /* CONFIG_64BIT */
1462
1463 /* Cannot handle 64-bit instructions in 32-bit kernel */
1464 goto sigill;
1465
1466 case MIPS16e_sh_op:
1467 if (!access_ok(VERIFY_WRITE, addr, 2))
1468 goto sigbus;
1469
1470 MIPS16e_compute_return_epc(regs, &oldinst);
1471 value = regs->regs[reg];
1472 StoreHW(addr, value, res);
1473 if (res)
1474 goto fault;
1475 break;
1476
1477 case MIPS16e_sw_op:
1478 case MIPS16e_swsp_op:
1479 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1480 if (!access_ok(VERIFY_WRITE, addr, 4))
1481 goto sigbus;
1482
1483 MIPS16e_compute_return_epc(regs, &oldinst);
1484 value = regs->regs[reg];
1485 StoreW(addr, value, res);
1486 if (res)
1487 goto fault;
1488 break;
1489
1490 case MIPS16e_sd_op:
1491writeDW:
1492#ifdef CONFIG_64BIT
1493 /*
1494 * A 32-bit kernel might be running on a 64-bit processor. But
1495 * if we're on a 32-bit processor and an i-cache incoherency
1496 * or race makes us see a 64-bit instruction here the sdl/sdr
1497 * would blow up, so for now we don't handle unaligned 64-bit
1498 * instructions on 32-bit kernels.
1499 */
1500 if (!access_ok(VERIFY_WRITE, addr, 8))
1501 goto sigbus;
1502
1503 MIPS16e_compute_return_epc(regs, &oldinst);
1504 value = regs->regs[reg];
1505 StoreDW(addr, value, res);
1506 if (res)
1507 goto fault;
1508 break;
1509#endif /* CONFIG_64BIT */
1510
1511 /* Cannot handle 64-bit instructions in 32-bit kernel */
1512 goto sigill;
1513
1514 default:
1515 /*
1516 * Pheeee... We encountered an yet unknown instruction or
1517 * cache coherence problem. Die sucker, die ...
1518 */
1519 goto sigill;
1520 }
1521
1522#ifdef CONFIG_DEBUG_FS
1523 unaligned_instructions++;
1524#endif
1525
1526 return;
1527
1528fault:
1529 /* roll back jump/branch */
1530 regs->cp0_epc = origpc;
1531 regs->regs[31] = orig31;
1532 /* Did we have an exception handler installed? */
1533 if (fixup_exception(regs))
1534 return;
1535
1536 die_if_kernel("Unhandled kernel unaligned access", regs);
1537 force_sig(SIGSEGV, current);
1538
1539 return;
1540
1541sigbus:
1542 die_if_kernel("Unhandled kernel unaligned access", regs);
1543 force_sig(SIGBUS, current);
1544
1545 return;
1546
1547sigill:
1548 die_if_kernel
1549 ("Unhandled kernel unaligned access or invalid instruction", regs);
1550 force_sig(SIGILL, current);
1551}
1da177e4
LT
1552asmlinkage void do_ade(struct pt_regs *regs)
1553{
c3fc5cd5 1554 enum ctx_state prev_state;
fe00f943 1555 unsigned int __user *pc;
1da177e4 1556 mm_segment_t seg;
1da177e4 1557
c3fc5cd5 1558 prev_state = exception_enter();
7f788d2d 1559 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
a8b0ca17 1560 1, regs, regs->cp0_badvaddr);
1da177e4
LT
1561 /*
1562 * Did we catch a fault trying to load an instruction?
1da177e4 1563 */
34c2f668 1564 if (regs->cp0_badvaddr == regs->cp0_epc)
1da177e4
LT
1565 goto sigbus;
1566
293c5bd1 1567 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1da177e4 1568 goto sigbus;
6312e0ee
AN
1569 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1570 goto sigbus;
1da177e4
LT
1571
1572 /*
1573 * Do branch emulation only if we didn't forward the exception.
1574 * This is all so but ugly ...
1575 */
34c2f668
LY
1576
1577 /*
1578 * Are we running in microMIPS mode?
1579 */
1580 if (get_isa16_mode(regs->cp0_epc)) {
1581 /*
1582 * Did we catch a fault trying to load an instruction in
1583 * 16-bit mode?
1584 */
1585 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1586 goto sigbus;
1587 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1588 show_registers(regs);
1589
1590 if (cpu_has_mmips) {
1591 seg = get_fs();
1592 if (!user_mode(regs))
1593 set_fs(KERNEL_DS);
1594 emulate_load_store_microMIPS(regs,
1595 (void __user *)regs->cp0_badvaddr);
1596 set_fs(seg);
1597
1598 return;
1599 }
1600
451b001b
SH
1601 if (cpu_has_mips16) {
1602 seg = get_fs();
1603 if (!user_mode(regs))
1604 set_fs(KERNEL_DS);
1605 emulate_load_store_MIPS16e(regs,
1606 (void __user *)regs->cp0_badvaddr);
1607 set_fs(seg);
1608
1609 return;
1610 }
1611
34c2f668
LY
1612 goto sigbus;
1613 }
1614
1615 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1616 show_registers(regs);
1617 pc = (unsigned int __user *)exception_epc(regs);
1618
1da177e4
LT
1619 seg = get_fs();
1620 if (!user_mode(regs))
1621 set_fs(KERNEL_DS);
7f18f151 1622 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1da177e4
LT
1623 set_fs(seg);
1624
1625 return;
1626
1627sigbus:
1628 die_if_kernel("Kernel unaligned instruction access", regs);
1629 force_sig(SIGBUS, current);
1630
1631 /*
1632 * XXX On return from the signal handler we should advance the epc
1633 */
c3fc5cd5 1634 exception_exit(prev_state);
1da177e4 1635}
6312e0ee
AN
1636
1637#ifdef CONFIG_DEBUG_FS
1638extern struct dentry *mips_debugfs_dir;
1639static int __init debugfs_unaligned(void)
1640{
1641 struct dentry *d;
1642
1643 if (!mips_debugfs_dir)
1644 return -ENODEV;
1645 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1646 mips_debugfs_dir, &unaligned_instructions);
b517531c
Z
1647 if (!d)
1648 return -ENOMEM;
6312e0ee
AN
1649 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1650 mips_debugfs_dir, &unaligned_action);
b517531c
Z
1651 if (!d)
1652 return -ENOMEM;
6312e0ee
AN
1653 return 0;
1654}
1655__initcall(debugfs_unaligned);
1656#endif
This page took 2.37557 seconds and 5 git commands to generate.