MIPS: kernel: unaligned: Add EVA instruction wrappers
[deliverable/linux.git] / arch / mips / kernel / unaligned.c
CommitLineData
1da177e4
LT
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
9d8e5736 10 * Copyright (C) 2014 Imagination Technologies Ltd.
1da177e4
LT
11 *
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
16 *
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
22 *
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
70342287 25 * problems with user programs have been fixed. For programmers this is the
1da177e4
LT
26 * right way to go.
27 *
28 * Fixing address errors is a per process option. The option is inherited
70342287 29 * across fork(2) and execve(2) calls. If you really want to use the
1da177e4
LT
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
32 *
33 * #include <sys/sysmips.h>
34 *
35 * ...
36 * sysmips(MIPS_FIXADE, x);
37 * ...
38 *
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
40 *
41 * Below a little program to play around with this feature.
42 *
43 * #include <stdio.h>
44 * #include <sys/sysmips.h>
45 *
46 * struct foo {
70342287 47 * unsigned char bar[8];
1da177e4
LT
48 * };
49 *
50 * main(int argc, char *argv[])
51 * {
70342287
RB
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
54 * int i;
1da177e4 55 *
70342287
RB
56 * if (argc > 1)
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
1da177e4 58 *
70342287 59 * printf("*p = %08lx\n", *p);
1da177e4 60 *
70342287 61 * *p = 0xdeadface;
1da177e4 62 *
70342287
RB
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
65 * printf("\n");
1da177e4
LT
66 * }
67 *
68 * Coprocessor loads are not supported; I think this case is unimportant
69 * in the practice.
70 *
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
70342287
RB
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
1da177e4 75 */
c3fc5cd5 76#include <linux/context_tracking.h>
1da177e4 77#include <linux/mm.h>
1da177e4
LT
78#include <linux/signal.h>
79#include <linux/smp.h>
e8edc6e0 80#include <linux/sched.h>
6312e0ee 81#include <linux/debugfs.h>
7f788d2d
DCZ
82#include <linux/perf_event.h>
83
1da177e4
LT
84#include <asm/asm.h>
85#include <asm/branch.h>
86#include <asm/byteorder.h>
69f3a7de 87#include <asm/cop2.h>
102cedc3
LY
88#include <asm/fpu.h>
89#include <asm/fpu_emulator.h>
1da177e4
LT
90#include <asm/inst.h>
91#include <asm/uaccess.h>
34c2f668
LY
92#include <asm/fpu.h>
93#include <asm/fpu_emulator.h>
1da177e4 94
70342287 95#define STR(x) __STR(x)
1da177e4
LT
96#define __STR(x) #x
97
6312e0ee
AN
98enum {
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
102};
103#ifdef CONFIG_DEBUG_FS
104static u32 unaligned_instructions;
105static u32 unaligned_action;
106#else
107#define unaligned_action UNALIGNED_ACTION_QUIET
1da177e4 108#endif
6312e0ee 109extern void show_registers(struct pt_regs *regs);
1da177e4 110
34c2f668
LY
111#ifdef __BIG_ENDIAN
112#define LoadHW(addr, value, res) \
113 __asm__ __volatile__ (".set\tnoat\n" \
9d8e5736
MC
114 "1:\t"user_lb("%0", "0(%2)")"\n" \
115 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
34c2f668
LY
116 "sll\t%0, 0x8\n\t" \
117 "or\t%0, $1\n\t" \
118 "li\t%1, 0\n" \
119 "3:\t.set\tat\n\t" \
120 ".insn\n\t" \
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
123 "j\t3b\n\t" \
124 ".previous\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
128 ".previous" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT));
131
132#define LoadW(addr, value, res) \
133 __asm__ __volatile__ ( \
9d8e5736
MC
134 "1:\t"user_lwl("%0", "(%2)")"\n" \
135 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
34c2f668
LY
136 "li\t%1, 0\n" \
137 "3:\n\t" \
138 ".insn\n\t" \
139 ".section\t.fixup,\"ax\"\n\t" \
140 "4:\tli\t%1, %3\n\t" \
141 "j\t3b\n\t" \
142 ".previous\n\t" \
143 ".section\t__ex_table,\"a\"\n\t" \
144 STR(PTR)"\t1b, 4b\n\t" \
145 STR(PTR)"\t2b, 4b\n\t" \
146 ".previous" \
147 : "=&r" (value), "=r" (res) \
148 : "r" (addr), "i" (-EFAULT));
149
150#define LoadHWU(addr, value, res) \
151 __asm__ __volatile__ ( \
152 ".set\tnoat\n" \
9d8e5736
MC
153 "1:\t"user_lbu("%0", "0(%2)")"\n" \
154 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
34c2f668
LY
155 "sll\t%0, 0x8\n\t" \
156 "or\t%0, $1\n\t" \
157 "li\t%1, 0\n" \
158 "3:\n\t" \
159 ".insn\n\t" \
160 ".set\tat\n\t" \
161 ".section\t.fixup,\"ax\"\n\t" \
162 "4:\tli\t%1, %3\n\t" \
163 "j\t3b\n\t" \
164 ".previous\n\t" \
165 ".section\t__ex_table,\"a\"\n\t" \
166 STR(PTR)"\t1b, 4b\n\t" \
167 STR(PTR)"\t2b, 4b\n\t" \
168 ".previous" \
169 : "=&r" (value), "=r" (res) \
170 : "r" (addr), "i" (-EFAULT));
171
172#define LoadWU(addr, value, res) \
173 __asm__ __volatile__ ( \
9d8e5736
MC
174 "1:\t"user_lwl("%0", "(%2)")"\n" \
175 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
34c2f668
LY
176 "dsll\t%0, %0, 32\n\t" \
177 "dsrl\t%0, %0, 32\n\t" \
178 "li\t%1, 0\n" \
179 "3:\n\t" \
180 ".insn\n\t" \
181 "\t.section\t.fixup,\"ax\"\n\t" \
182 "4:\tli\t%1, %3\n\t" \
183 "j\t3b\n\t" \
184 ".previous\n\t" \
185 ".section\t__ex_table,\"a\"\n\t" \
186 STR(PTR)"\t1b, 4b\n\t" \
187 STR(PTR)"\t2b, 4b\n\t" \
188 ".previous" \
189 : "=&r" (value), "=r" (res) \
190 : "r" (addr), "i" (-EFAULT));
191
192#define LoadDW(addr, value, res) \
193 __asm__ __volatile__ ( \
194 "1:\tldl\t%0, (%2)\n" \
195 "2:\tldr\t%0, 7(%2)\n\t" \
196 "li\t%1, 0\n" \
197 "3:\n\t" \
198 ".insn\n\t" \
199 "\t.section\t.fixup,\"ax\"\n\t" \
200 "4:\tli\t%1, %3\n\t" \
201 "j\t3b\n\t" \
202 ".previous\n\t" \
203 ".section\t__ex_table,\"a\"\n\t" \
204 STR(PTR)"\t1b, 4b\n\t" \
205 STR(PTR)"\t2b, 4b\n\t" \
206 ".previous" \
207 : "=&r" (value), "=r" (res) \
208 : "r" (addr), "i" (-EFAULT));
209
210#define StoreHW(addr, value, res) \
211 __asm__ __volatile__ ( \
212 ".set\tnoat\n" \
9d8e5736 213 "1:\t"user_sb("%1", "1(%2)")"\n" \
34c2f668 214 "srl\t$1, %1, 0x8\n" \
9d8e5736 215 "2:\t"user_sb("$1", "0(%2)")"\n" \
34c2f668
LY
216 ".set\tat\n\t" \
217 "li\t%0, 0\n" \
218 "3:\n\t" \
219 ".insn\n\t" \
220 ".section\t.fixup,\"ax\"\n\t" \
221 "4:\tli\t%0, %3\n\t" \
222 "j\t3b\n\t" \
223 ".previous\n\t" \
224 ".section\t__ex_table,\"a\"\n\t" \
225 STR(PTR)"\t1b, 4b\n\t" \
226 STR(PTR)"\t2b, 4b\n\t" \
227 ".previous" \
228 : "=r" (res) \
229 : "r" (value), "r" (addr), "i" (-EFAULT));
230
231#define StoreW(addr, value, res) \
232 __asm__ __volatile__ ( \
9d8e5736
MC
233 "1:\t"user_swl("%1", "(%2)")"\n" \
234 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
34c2f668
LY
235 "li\t%0, 0\n" \
236 "3:\n\t" \
237 ".insn\n\t" \
238 ".section\t.fixup,\"ax\"\n\t" \
239 "4:\tli\t%0, %3\n\t" \
240 "j\t3b\n\t" \
241 ".previous\n\t" \
242 ".section\t__ex_table,\"a\"\n\t" \
243 STR(PTR)"\t1b, 4b\n\t" \
244 STR(PTR)"\t2b, 4b\n\t" \
245 ".previous" \
246 : "=r" (res) \
247 : "r" (value), "r" (addr), "i" (-EFAULT));
248
249#define StoreDW(addr, value, res) \
250 __asm__ __volatile__ ( \
251 "1:\tsdl\t%1,(%2)\n" \
252 "2:\tsdr\t%1, 7(%2)\n\t" \
253 "li\t%0, 0\n" \
254 "3:\n\t" \
255 ".insn\n\t" \
256 ".section\t.fixup,\"ax\"\n\t" \
257 "4:\tli\t%0, %3\n\t" \
258 "j\t3b\n\t" \
259 ".previous\n\t" \
260 ".section\t__ex_table,\"a\"\n\t" \
261 STR(PTR)"\t1b, 4b\n\t" \
262 STR(PTR)"\t2b, 4b\n\t" \
263 ".previous" \
264 : "=r" (res) \
265 : "r" (value), "r" (addr), "i" (-EFAULT));
266#endif
267
268#ifdef __LITTLE_ENDIAN
269#define LoadHW(addr, value, res) \
270 __asm__ __volatile__ (".set\tnoat\n" \
9d8e5736
MC
271 "1:\t"user_lb("%0", "1(%2)")"\n" \
272 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
34c2f668
LY
273 "sll\t%0, 0x8\n\t" \
274 "or\t%0, $1\n\t" \
275 "li\t%1, 0\n" \
276 "3:\t.set\tat\n\t" \
277 ".insn\n\t" \
278 ".section\t.fixup,\"ax\"\n\t" \
279 "4:\tli\t%1, %3\n\t" \
280 "j\t3b\n\t" \
281 ".previous\n\t" \
282 ".section\t__ex_table,\"a\"\n\t" \
283 STR(PTR)"\t1b, 4b\n\t" \
284 STR(PTR)"\t2b, 4b\n\t" \
285 ".previous" \
286 : "=&r" (value), "=r" (res) \
287 : "r" (addr), "i" (-EFAULT));
288
289#define LoadW(addr, value, res) \
290 __asm__ __volatile__ ( \
9d8e5736
MC
291 "1:\t"user_lwl("%0", "3(%2)")"\n" \
292 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
34c2f668
LY
293 "li\t%1, 0\n" \
294 "3:\n\t" \
295 ".insn\n\t" \
296 ".section\t.fixup,\"ax\"\n\t" \
297 "4:\tli\t%1, %3\n\t" \
298 "j\t3b\n\t" \
299 ".previous\n\t" \
300 ".section\t__ex_table,\"a\"\n\t" \
301 STR(PTR)"\t1b, 4b\n\t" \
302 STR(PTR)"\t2b, 4b\n\t" \
303 ".previous" \
304 : "=&r" (value), "=r" (res) \
305 : "r" (addr), "i" (-EFAULT));
306
307#define LoadHWU(addr, value, res) \
308 __asm__ __volatile__ ( \
309 ".set\tnoat\n" \
9d8e5736
MC
310 "1:\t"user_lbu("%0", "1(%2)")"\n" \
311 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
34c2f668
LY
312 "sll\t%0, 0x8\n\t" \
313 "or\t%0, $1\n\t" \
314 "li\t%1, 0\n" \
315 "3:\n\t" \
316 ".insn\n\t" \
317 ".set\tat\n\t" \
318 ".section\t.fixup,\"ax\"\n\t" \
319 "4:\tli\t%1, %3\n\t" \
320 "j\t3b\n\t" \
321 ".previous\n\t" \
322 ".section\t__ex_table,\"a\"\n\t" \
323 STR(PTR)"\t1b, 4b\n\t" \
324 STR(PTR)"\t2b, 4b\n\t" \
325 ".previous" \
326 : "=&r" (value), "=r" (res) \
327 : "r" (addr), "i" (-EFAULT));
328
329#define LoadWU(addr, value, res) \
330 __asm__ __volatile__ ( \
9d8e5736
MC
331 "1:\t"user_lwl("%0", "3(%2)")"\n" \
332 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
34c2f668
LY
333 "dsll\t%0, %0, 32\n\t" \
334 "dsrl\t%0, %0, 32\n\t" \
335 "li\t%1, 0\n" \
336 "3:\n\t" \
337 ".insn\n\t" \
338 "\t.section\t.fixup,\"ax\"\n\t" \
339 "4:\tli\t%1, %3\n\t" \
340 "j\t3b\n\t" \
341 ".previous\n\t" \
342 ".section\t__ex_table,\"a\"\n\t" \
343 STR(PTR)"\t1b, 4b\n\t" \
344 STR(PTR)"\t2b, 4b\n\t" \
345 ".previous" \
346 : "=&r" (value), "=r" (res) \
347 : "r" (addr), "i" (-EFAULT));
348
349#define LoadDW(addr, value, res) \
350 __asm__ __volatile__ ( \
351 "1:\tldl\t%0, 7(%2)\n" \
352 "2:\tldr\t%0, (%2)\n\t" \
353 "li\t%1, 0\n" \
354 "3:\n\t" \
355 ".insn\n\t" \
356 "\t.section\t.fixup,\"ax\"\n\t" \
357 "4:\tli\t%1, %3\n\t" \
358 "j\t3b\n\t" \
359 ".previous\n\t" \
360 ".section\t__ex_table,\"a\"\n\t" \
361 STR(PTR)"\t1b, 4b\n\t" \
362 STR(PTR)"\t2b, 4b\n\t" \
363 ".previous" \
364 : "=&r" (value), "=r" (res) \
365 : "r" (addr), "i" (-EFAULT));
366
367#define StoreHW(addr, value, res) \
368 __asm__ __volatile__ ( \
369 ".set\tnoat\n" \
9d8e5736 370 "1:\t"user_sb("%1", "0(%2)")"\n" \
34c2f668 371 "srl\t$1,%1, 0x8\n" \
9d8e5736 372 "2:\t"user_sb("$1", "1(%2)")"\n" \
34c2f668
LY
373 ".set\tat\n\t" \
374 "li\t%0, 0\n" \
375 "3:\n\t" \
376 ".insn\n\t" \
377 ".section\t.fixup,\"ax\"\n\t" \
378 "4:\tli\t%0, %3\n\t" \
379 "j\t3b\n\t" \
380 ".previous\n\t" \
381 ".section\t__ex_table,\"a\"\n\t" \
382 STR(PTR)"\t1b, 4b\n\t" \
383 STR(PTR)"\t2b, 4b\n\t" \
384 ".previous" \
385 : "=r" (res) \
386 : "r" (value), "r" (addr), "i" (-EFAULT));
387
388#define StoreW(addr, value, res) \
389 __asm__ __volatile__ ( \
9d8e5736
MC
390 "1:\t"user_swl("%1", "3(%2)")"\n" \
391 "2:\t"user_swr("%1", "(%2)")"\n\t" \
34c2f668
LY
392 "li\t%0, 0\n" \
393 "3:\n\t" \
394 ".insn\n\t" \
395 ".section\t.fixup,\"ax\"\n\t" \
396 "4:\tli\t%0, %3\n\t" \
397 "j\t3b\n\t" \
398 ".previous\n\t" \
399 ".section\t__ex_table,\"a\"\n\t" \
400 STR(PTR)"\t1b, 4b\n\t" \
401 STR(PTR)"\t2b, 4b\n\t" \
402 ".previous" \
403 : "=r" (res) \
404 : "r" (value), "r" (addr), "i" (-EFAULT));
405
406#define StoreDW(addr, value, res) \
407 __asm__ __volatile__ ( \
408 "1:\tsdl\t%1, 7(%2)\n" \
409 "2:\tsdr\t%1, (%2)\n\t" \
410 "li\t%0, 0\n" \
411 "3:\n\t" \
412 ".insn\n\t" \
413 ".section\t.fixup,\"ax\"\n\t" \
414 "4:\tli\t%0, %3\n\t" \
415 "j\t3b\n\t" \
416 ".previous\n\t" \
417 ".section\t__ex_table,\"a\"\n\t" \
418 STR(PTR)"\t1b, 4b\n\t" \
419 STR(PTR)"\t2b, 4b\n\t" \
420 ".previous" \
421 : "=r" (res) \
422 : "r" (value), "r" (addr), "i" (-EFAULT));
423#endif
424
7f18f151
RB
425static void emulate_load_store_insn(struct pt_regs *regs,
426 void __user *addr, unsigned int __user *pc)
1da177e4
LT
427{
428 union mips_instruction insn;
429 unsigned long value;
430 unsigned int res;
34c2f668
LY
431 unsigned long origpc;
432 unsigned long orig31;
102cedc3 433 void __user *fault_addr = NULL;
1da177e4 434
34c2f668
LY
435 origpc = (unsigned long)pc;
436 orig31 = regs->regs[31];
437
a8b0ca17 438 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
7f788d2d 439
1da177e4
LT
440 /*
441 * This load never faults.
442 */
fe00f943 443 __get_user(insn.word, pc);
1da177e4
LT
444
445 switch (insn.i_format.opcode) {
34c2f668
LY
446 /*
447 * These are instructions that a compiler doesn't generate. We
448 * can assume therefore that the code is MIPS-aware and
449 * really buggy. Emulating these instructions would break the
450 * semantics anyway.
451 */
1da177e4
LT
452 case ll_op:
453 case lld_op:
454 case sc_op:
455 case scd_op:
456
34c2f668
LY
457 /*
458 * For these instructions the only way to create an address
459 * error is an attempted access to kernel/supervisor address
460 * space.
461 */
1da177e4
LT
462 case ldl_op:
463 case ldr_op:
464 case lwl_op:
465 case lwr_op:
466 case sdl_op:
467 case sdr_op:
468 case swl_op:
469 case swr_op:
470 case lb_op:
471 case lbu_op:
472 case sb_op:
473 goto sigbus;
474
34c2f668
LY
475 /*
476 * The remaining opcodes are the ones that are really of
477 * interest.
478 */
1da177e4
LT
479 case lh_op:
480 if (!access_ok(VERIFY_READ, addr, 2))
481 goto sigbus;
482
34c2f668 483 LoadHW(addr, value, res);
1da177e4
LT
484 if (res)
485 goto fault;
7f18f151
RB
486 compute_return_epc(regs);
487 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
488 break;
489
490 case lw_op:
491 if (!access_ok(VERIFY_READ, addr, 4))
492 goto sigbus;
493
34c2f668 494 LoadW(addr, value, res);
1da177e4
LT
495 if (res)
496 goto fault;
7f18f151
RB
497 compute_return_epc(regs);
498 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
499 break;
500
501 case lhu_op:
502 if (!access_ok(VERIFY_READ, addr, 2))
503 goto sigbus;
504
34c2f668 505 LoadHWU(addr, value, res);
1da177e4
LT
506 if (res)
507 goto fault;
7f18f151
RB
508 compute_return_epc(regs);
509 regs->regs[insn.i_format.rt] = value;
1da177e4
LT
510 break;
511
512 case lwu_op:
875d43e7 513#ifdef CONFIG_64BIT
1da177e4
LT
514 /*
515 * A 32-bit kernel might be running on a 64-bit processor. But
516 * if we're on a 32-bit processor and an i-cache incoherency
517 * or race makes us see a 64-bit instruction here the sdl/sdr
518 * would blow up, so for now we don't handle unaligned 64-bit
519 * instructions on 32-bit kernels.
520 */
521 if (!access_ok(VERIFY_READ, addr, 4))
522 goto sigbus;
523
34c2f668 524 LoadWU(addr, value, res);
1da177e4
LT
525 if (res)
526 goto fault;
7f18f151
RB
527 compute_return_epc(regs);
528 regs->regs[insn.i_format.rt] = value;
1da177e4 529 break;
875d43e7 530#endif /* CONFIG_64BIT */
1da177e4
LT
531
532 /* Cannot handle 64-bit instructions in 32-bit kernel */
533 goto sigill;
534
535 case ld_op:
875d43e7 536#ifdef CONFIG_64BIT
1da177e4
LT
537 /*
538 * A 32-bit kernel might be running on a 64-bit processor. But
539 * if we're on a 32-bit processor and an i-cache incoherency
540 * or race makes us see a 64-bit instruction here the sdl/sdr
541 * would blow up, so for now we don't handle unaligned 64-bit
542 * instructions on 32-bit kernels.
543 */
544 if (!access_ok(VERIFY_READ, addr, 8))
545 goto sigbus;
546
34c2f668 547 LoadDW(addr, value, res);
1da177e4
LT
548 if (res)
549 goto fault;
7f18f151
RB
550 compute_return_epc(regs);
551 regs->regs[insn.i_format.rt] = value;
1da177e4 552 break;
875d43e7 553#endif /* CONFIG_64BIT */
1da177e4
LT
554
555 /* Cannot handle 64-bit instructions in 32-bit kernel */
556 goto sigill;
557
558 case sh_op:
559 if (!access_ok(VERIFY_WRITE, addr, 2))
560 goto sigbus;
561
34c2f668 562 compute_return_epc(regs);
1da177e4 563 value = regs->regs[insn.i_format.rt];
34c2f668 564 StoreHW(addr, value, res);
1da177e4
LT
565 if (res)
566 goto fault;
567 break;
568
569 case sw_op:
570 if (!access_ok(VERIFY_WRITE, addr, 4))
571 goto sigbus;
572
34c2f668 573 compute_return_epc(regs);
1da177e4 574 value = regs->regs[insn.i_format.rt];
34c2f668 575 StoreW(addr, value, res);
1da177e4
LT
576 if (res)
577 goto fault;
578 break;
579
580 case sd_op:
875d43e7 581#ifdef CONFIG_64BIT
1da177e4
LT
582 /*
583 * A 32-bit kernel might be running on a 64-bit processor. But
584 * if we're on a 32-bit processor and an i-cache incoherency
585 * or race makes us see a 64-bit instruction here the sdl/sdr
586 * would blow up, so for now we don't handle unaligned 64-bit
587 * instructions on 32-bit kernels.
588 */
589 if (!access_ok(VERIFY_WRITE, addr, 8))
590 goto sigbus;
591
34c2f668 592 compute_return_epc(regs);
1da177e4 593 value = regs->regs[insn.i_format.rt];
34c2f668 594 StoreDW(addr, value, res);
1da177e4
LT
595 if (res)
596 goto fault;
597 break;
875d43e7 598#endif /* CONFIG_64BIT */
1da177e4
LT
599
600 /* Cannot handle 64-bit instructions in 32-bit kernel */
601 goto sigill;
602
603 case lwc1_op:
604 case ldc1_op:
605 case swc1_op:
606 case sdc1_op:
102cedc3
LY
607 die_if_kernel("Unaligned FP access in kernel code", regs);
608 BUG_ON(!used_math());
609 BUG_ON(!is_fpu_owner());
610
611 lose_fpu(1); /* Save FPU state for the emulator. */
612 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
613 &fault_addr);
614 own_fpu(1); /* Restore FPU state. */
615
616 /* Signal if something went wrong. */
617 process_fpemu_return(res, fault_addr);
618
619 if (res == 0)
620 break;
621 return;
1da177e4 622
69f3a7de
RB
623 /*
624 * COP2 is available to implementor for application specific use.
625 * It's up to applications to register a notifier chain and do
626 * whatever they have to do, including possible sending of signals.
627 */
1da177e4 628 case lwc2_op:
69f3a7de
RB
629 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
630 break;
631
1da177e4 632 case ldc2_op:
69f3a7de
RB
633 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
634 break;
635
1da177e4 636 case swc2_op:
69f3a7de
RB
637 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
638 break;
639
1da177e4 640 case sdc2_op:
69f3a7de
RB
641 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
642 break;
643
1da177e4
LT
644 default:
645 /*
646 * Pheeee... We encountered an yet unknown instruction or
647 * cache coherence problem. Die sucker, die ...
648 */
649 goto sigill;
650 }
651
6312e0ee 652#ifdef CONFIG_DEBUG_FS
1da177e4
LT
653 unaligned_instructions++;
654#endif
655
7f18f151 656 return;
1da177e4
LT
657
658fault:
34c2f668
LY
659 /* roll back jump/branch */
660 regs->cp0_epc = origpc;
661 regs->regs[31] = orig31;
662 /* Did we have an exception handler installed? */
663 if (fixup_exception(regs))
664 return;
665
666 die_if_kernel("Unhandled kernel unaligned access", regs);
667 force_sig(SIGSEGV, current);
668
669 return;
670
671sigbus:
672 die_if_kernel("Unhandled kernel unaligned access", regs);
673 force_sig(SIGBUS, current);
674
675 return;
676
677sigill:
678 die_if_kernel
679 ("Unhandled kernel unaligned access or invalid instruction", regs);
680 force_sig(SIGILL, current);
681}
682
683/* Recode table from 16-bit register notation to 32-bit GPR. */
684const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
685
686/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
687const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
688
74338805
DD
689static void emulate_load_store_microMIPS(struct pt_regs *regs,
690 void __user *addr)
34c2f668
LY
691{
692 unsigned long value;
693 unsigned int res;
694 int i;
695 unsigned int reg = 0, rvar;
696 unsigned long orig31;
697 u16 __user *pc16;
698 u16 halfword;
699 unsigned int word;
700 unsigned long origpc, contpc;
701 union mips_instruction insn;
702 struct mm_decoded_insn mminsn;
703 void __user *fault_addr = NULL;
704
705 origpc = regs->cp0_epc;
706 orig31 = regs->regs[31];
707
708 mminsn.micro_mips_mode = 1;
709
710 /*
711 * This load never faults.
712 */
713 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
714 __get_user(halfword, pc16);
715 pc16++;
716 contpc = regs->cp0_epc + 2;
717 word = ((unsigned int)halfword << 16);
718 mminsn.pc_inc = 2;
719
720 if (!mm_insn_16bit(halfword)) {
721 __get_user(halfword, pc16);
722 pc16++;
723 contpc = regs->cp0_epc + 4;
724 mminsn.pc_inc = 4;
725 word |= halfword;
726 }
727 mminsn.insn = word;
728
729 if (get_user(halfword, pc16))
730 goto fault;
731 mminsn.next_pc_inc = 2;
732 word = ((unsigned int)halfword << 16);
733
734 if (!mm_insn_16bit(halfword)) {
735 pc16++;
736 if (get_user(halfword, pc16))
737 goto fault;
738 mminsn.next_pc_inc = 4;
739 word |= halfword;
740 }
741 mminsn.next_insn = word;
742
743 insn = (union mips_instruction)(mminsn.insn);
744 if (mm_isBranchInstr(regs, mminsn, &contpc))
745 insn = (union mips_instruction)(mminsn.next_insn);
746
747 /* Parse instruction to find what to do */
748
749 switch (insn.mm_i_format.opcode) {
750
751 case mm_pool32a_op:
752 switch (insn.mm_x_format.func) {
753 case mm_lwxs_op:
754 reg = insn.mm_x_format.rd;
755 goto loadW;
756 }
757
758 goto sigbus;
759
760 case mm_pool32b_op:
761 switch (insn.mm_m_format.func) {
762 case mm_lwp_func:
763 reg = insn.mm_m_format.rd;
764 if (reg == 31)
765 goto sigbus;
766
767 if (!access_ok(VERIFY_READ, addr, 8))
768 goto sigbus;
769
770 LoadW(addr, value, res);
771 if (res)
772 goto fault;
773 regs->regs[reg] = value;
774 addr += 4;
775 LoadW(addr, value, res);
776 if (res)
777 goto fault;
778 regs->regs[reg + 1] = value;
779 goto success;
780
781 case mm_swp_func:
782 reg = insn.mm_m_format.rd;
783 if (reg == 31)
784 goto sigbus;
785
786 if (!access_ok(VERIFY_WRITE, addr, 8))
787 goto sigbus;
788
789 value = regs->regs[reg];
790 StoreW(addr, value, res);
791 if (res)
792 goto fault;
793 addr += 4;
794 value = regs->regs[reg + 1];
795 StoreW(addr, value, res);
796 if (res)
797 goto fault;
798 goto success;
799
800 case mm_ldp_func:
801#ifdef CONFIG_64BIT
802 reg = insn.mm_m_format.rd;
803 if (reg == 31)
804 goto sigbus;
805
806 if (!access_ok(VERIFY_READ, addr, 16))
807 goto sigbus;
808
809 LoadDW(addr, value, res);
810 if (res)
811 goto fault;
812 regs->regs[reg] = value;
813 addr += 8;
814 LoadDW(addr, value, res);
815 if (res)
816 goto fault;
817 regs->regs[reg + 1] = value;
818 goto success;
819#endif /* CONFIG_64BIT */
820
821 goto sigill;
822
823 case mm_sdp_func:
824#ifdef CONFIG_64BIT
825 reg = insn.mm_m_format.rd;
826 if (reg == 31)
827 goto sigbus;
828
829 if (!access_ok(VERIFY_WRITE, addr, 16))
830 goto sigbus;
831
832 value = regs->regs[reg];
833 StoreDW(addr, value, res);
834 if (res)
835 goto fault;
836 addr += 8;
837 value = regs->regs[reg + 1];
838 StoreDW(addr, value, res);
839 if (res)
840 goto fault;
841 goto success;
842#endif /* CONFIG_64BIT */
843
844 goto sigill;
845
846 case mm_lwm32_func:
847 reg = insn.mm_m_format.rd;
848 rvar = reg & 0xf;
849 if ((rvar > 9) || !reg)
850 goto sigill;
851 if (reg & 0x10) {
852 if (!access_ok
853 (VERIFY_READ, addr, 4 * (rvar + 1)))
854 goto sigbus;
855 } else {
856 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
857 goto sigbus;
858 }
859 if (rvar == 9)
860 rvar = 8;
861 for (i = 16; rvar; rvar--, i++) {
862 LoadW(addr, value, res);
863 if (res)
864 goto fault;
865 addr += 4;
866 regs->regs[i] = value;
867 }
868 if ((reg & 0xf) == 9) {
869 LoadW(addr, value, res);
870 if (res)
871 goto fault;
872 addr += 4;
873 regs->regs[30] = value;
874 }
875 if (reg & 0x10) {
876 LoadW(addr, value, res);
877 if (res)
878 goto fault;
879 regs->regs[31] = value;
880 }
881 goto success;
882
883 case mm_swm32_func:
884 reg = insn.mm_m_format.rd;
885 rvar = reg & 0xf;
886 if ((rvar > 9) || !reg)
887 goto sigill;
888 if (reg & 0x10) {
889 if (!access_ok
890 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
891 goto sigbus;
892 } else {
893 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
894 goto sigbus;
895 }
896 if (rvar == 9)
897 rvar = 8;
898 for (i = 16; rvar; rvar--, i++) {
899 value = regs->regs[i];
900 StoreW(addr, value, res);
901 if (res)
902 goto fault;
903 addr += 4;
904 }
905 if ((reg & 0xf) == 9) {
906 value = regs->regs[30];
907 StoreW(addr, value, res);
908 if (res)
909 goto fault;
910 addr += 4;
911 }
912 if (reg & 0x10) {
913 value = regs->regs[31];
914 StoreW(addr, value, res);
915 if (res)
916 goto fault;
917 }
918 goto success;
919
920 case mm_ldm_func:
921#ifdef CONFIG_64BIT
922 reg = insn.mm_m_format.rd;
923 rvar = reg & 0xf;
924 if ((rvar > 9) || !reg)
925 goto sigill;
926 if (reg & 0x10) {
927 if (!access_ok
928 (VERIFY_READ, addr, 8 * (rvar + 1)))
929 goto sigbus;
930 } else {
931 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
932 goto sigbus;
933 }
934 if (rvar == 9)
935 rvar = 8;
936
937 for (i = 16; rvar; rvar--, i++) {
938 LoadDW(addr, value, res);
939 if (res)
940 goto fault;
941 addr += 4;
942 regs->regs[i] = value;
943 }
944 if ((reg & 0xf) == 9) {
945 LoadDW(addr, value, res);
946 if (res)
947 goto fault;
948 addr += 8;
949 regs->regs[30] = value;
950 }
951 if (reg & 0x10) {
952 LoadDW(addr, value, res);
953 if (res)
954 goto fault;
955 regs->regs[31] = value;
956 }
957 goto success;
958#endif /* CONFIG_64BIT */
959
960 goto sigill;
961
962 case mm_sdm_func:
963#ifdef CONFIG_64BIT
964 reg = insn.mm_m_format.rd;
965 rvar = reg & 0xf;
966 if ((rvar > 9) || !reg)
967 goto sigill;
968 if (reg & 0x10) {
969 if (!access_ok
970 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
971 goto sigbus;
972 } else {
973 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
974 goto sigbus;
975 }
976 if (rvar == 9)
977 rvar = 8;
978
979 for (i = 16; rvar; rvar--, i++) {
980 value = regs->regs[i];
981 StoreDW(addr, value, res);
982 if (res)
983 goto fault;
984 addr += 8;
985 }
986 if ((reg & 0xf) == 9) {
987 value = regs->regs[30];
988 StoreDW(addr, value, res);
989 if (res)
990 goto fault;
991 addr += 8;
992 }
993 if (reg & 0x10) {
994 value = regs->regs[31];
995 StoreDW(addr, value, res);
996 if (res)
997 goto fault;
998 }
999 goto success;
1000#endif /* CONFIG_64BIT */
1001
1002 goto sigill;
1003
1004 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1005 }
1006
1007 goto sigbus;
1008
1009 case mm_pool32c_op:
1010 switch (insn.mm_m_format.func) {
1011 case mm_lwu_func:
1012 reg = insn.mm_m_format.rd;
1013 goto loadWU;
1014 }
1015
1016 /* LL,SC,LLD,SCD are not serviced */
1017 goto sigbus;
1018
1019 case mm_pool32f_op:
1020 switch (insn.mm_x_format.func) {
1021 case mm_lwxc1_func:
1022 case mm_swxc1_func:
1023 case mm_ldxc1_func:
1024 case mm_sdxc1_func:
1025 goto fpu_emul;
1026 }
1027
1028 goto sigbus;
1029
1030 case mm_ldc132_op:
1031 case mm_sdc132_op:
1032 case mm_lwc132_op:
1033 case mm_swc132_op:
1034fpu_emul:
1035 /* roll back jump/branch */
1036 regs->cp0_epc = origpc;
1037 regs->regs[31] = orig31;
1038
1039 die_if_kernel("Unaligned FP access in kernel code", regs);
1040 BUG_ON(!used_math());
1041 BUG_ON(!is_fpu_owner());
1042
1043 lose_fpu(1); /* save the FPU state for the emulator */
1044 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1045 &fault_addr);
1046 own_fpu(1); /* restore FPU state */
1047
1048 /* If something went wrong, signal */
1049 process_fpemu_return(res, fault_addr);
1050
1051 if (res == 0)
1052 goto success;
1053 return;
1054
1055 case mm_lh32_op:
1056 reg = insn.mm_i_format.rt;
1057 goto loadHW;
1058
1059 case mm_lhu32_op:
1060 reg = insn.mm_i_format.rt;
1061 goto loadHWU;
1062
1063 case mm_lw32_op:
1064 reg = insn.mm_i_format.rt;
1065 goto loadW;
1066
1067 case mm_sh32_op:
1068 reg = insn.mm_i_format.rt;
1069 goto storeHW;
1070
1071 case mm_sw32_op:
1072 reg = insn.mm_i_format.rt;
1073 goto storeW;
1074
1075 case mm_ld32_op:
1076 reg = insn.mm_i_format.rt;
1077 goto loadDW;
1078
1079 case mm_sd32_op:
1080 reg = insn.mm_i_format.rt;
1081 goto storeDW;
1082
1083 case mm_pool16c_op:
1084 switch (insn.mm16_m_format.func) {
1085 case mm_lwm16_op:
1086 reg = insn.mm16_m_format.rlist;
1087 rvar = reg + 1;
1088 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1089 goto sigbus;
1090
1091 for (i = 16; rvar; rvar--, i++) {
1092 LoadW(addr, value, res);
1093 if (res)
1094 goto fault;
1095 addr += 4;
1096 regs->regs[i] = value;
1097 }
1098 LoadW(addr, value, res);
1099 if (res)
1100 goto fault;
1101 regs->regs[31] = value;
1102
1103 goto success;
1104
1105 case mm_swm16_op:
1106 reg = insn.mm16_m_format.rlist;
1107 rvar = reg + 1;
1108 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1109 goto sigbus;
1110
1111 for (i = 16; rvar; rvar--, i++) {
1112 value = regs->regs[i];
1113 StoreW(addr, value, res);
1114 if (res)
1115 goto fault;
1116 addr += 4;
1117 }
1118 value = regs->regs[31];
1119 StoreW(addr, value, res);
1120 if (res)
1121 goto fault;
1122
1123 goto success;
1124
1125 }
1126
1127 goto sigbus;
1128
1129 case mm_lhu16_op:
1130 reg = reg16to32[insn.mm16_rb_format.rt];
1131 goto loadHWU;
1132
1133 case mm_lw16_op:
1134 reg = reg16to32[insn.mm16_rb_format.rt];
1135 goto loadW;
1136
1137 case mm_sh16_op:
1138 reg = reg16to32st[insn.mm16_rb_format.rt];
1139 goto storeHW;
1140
1141 case mm_sw16_op:
1142 reg = reg16to32st[insn.mm16_rb_format.rt];
1143 goto storeW;
1144
1145 case mm_lwsp16_op:
1146 reg = insn.mm16_r5_format.rt;
1147 goto loadW;
1148
1149 case mm_swsp16_op:
1150 reg = insn.mm16_r5_format.rt;
1151 goto storeW;
1152
1153 case mm_lwgp16_op:
1154 reg = reg16to32[insn.mm16_r3_format.rt];
1155 goto loadW;
1156
1157 default:
1158 goto sigill;
1159 }
1160
1161loadHW:
1162 if (!access_ok(VERIFY_READ, addr, 2))
1163 goto sigbus;
1164
1165 LoadHW(addr, value, res);
1166 if (res)
1167 goto fault;
1168 regs->regs[reg] = value;
1169 goto success;
1170
1171loadHWU:
1172 if (!access_ok(VERIFY_READ, addr, 2))
1173 goto sigbus;
1174
1175 LoadHWU(addr, value, res);
1176 if (res)
1177 goto fault;
1178 regs->regs[reg] = value;
1179 goto success;
1180
1181loadW:
1182 if (!access_ok(VERIFY_READ, addr, 4))
1183 goto sigbus;
1184
1185 LoadW(addr, value, res);
1186 if (res)
1187 goto fault;
1188 regs->regs[reg] = value;
1189 goto success;
1190
1191loadWU:
1192#ifdef CONFIG_64BIT
1193 /*
1194 * A 32-bit kernel might be running on a 64-bit processor. But
1195 * if we're on a 32-bit processor and an i-cache incoherency
1196 * or race makes us see a 64-bit instruction here the sdl/sdr
1197 * would blow up, so for now we don't handle unaligned 64-bit
1198 * instructions on 32-bit kernels.
1199 */
1200 if (!access_ok(VERIFY_READ, addr, 4))
1201 goto sigbus;
1202
1203 LoadWU(addr, value, res);
1204 if (res)
1205 goto fault;
1206 regs->regs[reg] = value;
1207 goto success;
1208#endif /* CONFIG_64BIT */
1209
1210 /* Cannot handle 64-bit instructions in 32-bit kernel */
1211 goto sigill;
1212
1213loadDW:
1214#ifdef CONFIG_64BIT
1215 /*
1216 * A 32-bit kernel might be running on a 64-bit processor. But
1217 * if we're on a 32-bit processor and an i-cache incoherency
1218 * or race makes us see a 64-bit instruction here the sdl/sdr
1219 * would blow up, so for now we don't handle unaligned 64-bit
1220 * instructions on 32-bit kernels.
1221 */
1222 if (!access_ok(VERIFY_READ, addr, 8))
1223 goto sigbus;
1224
1225 LoadDW(addr, value, res);
1226 if (res)
1227 goto fault;
1228 regs->regs[reg] = value;
1229 goto success;
1230#endif /* CONFIG_64BIT */
1231
1232 /* Cannot handle 64-bit instructions in 32-bit kernel */
1233 goto sigill;
1234
1235storeHW:
1236 if (!access_ok(VERIFY_WRITE, addr, 2))
1237 goto sigbus;
1238
1239 value = regs->regs[reg];
1240 StoreHW(addr, value, res);
1241 if (res)
1242 goto fault;
1243 goto success;
1244
1245storeW:
1246 if (!access_ok(VERIFY_WRITE, addr, 4))
1247 goto sigbus;
1248
1249 value = regs->regs[reg];
1250 StoreW(addr, value, res);
1251 if (res)
1252 goto fault;
1253 goto success;
1254
1255storeDW:
1256#ifdef CONFIG_64BIT
1257 /*
1258 * A 32-bit kernel might be running on a 64-bit processor. But
1259 * if we're on a 32-bit processor and an i-cache incoherency
1260 * or race makes us see a 64-bit instruction here the sdl/sdr
1261 * would blow up, so for now we don't handle unaligned 64-bit
1262 * instructions on 32-bit kernels.
1263 */
1264 if (!access_ok(VERIFY_WRITE, addr, 8))
1265 goto sigbus;
1266
1267 value = regs->regs[reg];
1268 StoreDW(addr, value, res);
1269 if (res)
1270 goto fault;
1271 goto success;
1272#endif /* CONFIG_64BIT */
1273
1274 /* Cannot handle 64-bit instructions in 32-bit kernel */
1275 goto sigill;
1276
1277success:
1278 regs->cp0_epc = contpc; /* advance or branch */
1279
1280#ifdef CONFIG_DEBUG_FS
1281 unaligned_instructions++;
1282#endif
1283 return;
1284
1285fault:
1286 /* roll back jump/branch */
1287 regs->cp0_epc = origpc;
1288 regs->regs[31] = orig31;
1da177e4
LT
1289 /* Did we have an exception handler installed? */
1290 if (fixup_exception(regs))
7f18f151 1291 return;
1da177e4 1292
49a89efb 1293 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1294 force_sig(SIGSEGV, current);
1da177e4 1295
7f18f151 1296 return;
1da177e4
LT
1297
1298sigbus:
1299 die_if_kernel("Unhandled kernel unaligned access", regs);
a6d5ff04 1300 force_sig(SIGBUS, current);
1da177e4 1301
7f18f151 1302 return;
1da177e4
LT
1303
1304sigill:
34c2f668
LY
1305 die_if_kernel
1306 ("Unhandled kernel unaligned access or invalid instruction", regs);
a6d5ff04 1307 force_sig(SIGILL, current);
1da177e4
LT
1308}
1309
451b001b
SH
1310static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1311{
1312 unsigned long value;
1313 unsigned int res;
1314 int reg;
1315 unsigned long orig31;
1316 u16 __user *pc16;
1317 unsigned long origpc;
1318 union mips16e_instruction mips16inst, oldinst;
1319
1320 origpc = regs->cp0_epc;
1321 orig31 = regs->regs[31];
1322 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1323 /*
1324 * This load never faults.
1325 */
1326 __get_user(mips16inst.full, pc16);
1327 oldinst = mips16inst;
1328
1329 /* skip EXTEND instruction */
1330 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1331 pc16++;
1332 __get_user(mips16inst.full, pc16);
1333 } else if (delay_slot(regs)) {
1334 /* skip jump instructions */
1335 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1336 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1337 pc16++;
1338 pc16++;
1339 if (get_user(mips16inst.full, pc16))
1340 goto sigbus;
1341 }
1342
1343 switch (mips16inst.ri.opcode) {
1344 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1345 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1346 case MIPS16e_ldpc_func:
1347 case MIPS16e_ldsp_func:
1348 reg = reg16to32[mips16inst.ri64.ry];
1349 goto loadDW;
1350
1351 case MIPS16e_sdsp_func:
1352 reg = reg16to32[mips16inst.ri64.ry];
1353 goto writeDW;
1354
1355 case MIPS16e_sdrasp_func:
1356 reg = 29; /* GPRSP */
1357 goto writeDW;
1358 }
1359
1360 goto sigbus;
1361
1362 case MIPS16e_swsp_op:
1363 case MIPS16e_lwpc_op:
1364 case MIPS16e_lwsp_op:
1365 reg = reg16to32[mips16inst.ri.rx];
1366 break;
1367
1368 case MIPS16e_i8_op:
1369 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1370 goto sigbus;
1371 reg = 29; /* GPRSP */
1372 break;
1373
1374 default:
1375 reg = reg16to32[mips16inst.rri.ry];
1376 break;
1377 }
1378
1379 switch (mips16inst.ri.opcode) {
1380
1381 case MIPS16e_lb_op:
1382 case MIPS16e_lbu_op:
1383 case MIPS16e_sb_op:
1384 goto sigbus;
1385
1386 case MIPS16e_lh_op:
1387 if (!access_ok(VERIFY_READ, addr, 2))
1388 goto sigbus;
1389
1390 LoadHW(addr, value, res);
1391 if (res)
1392 goto fault;
1393 MIPS16e_compute_return_epc(regs, &oldinst);
1394 regs->regs[reg] = value;
1395 break;
1396
1397 case MIPS16e_lhu_op:
1398 if (!access_ok(VERIFY_READ, addr, 2))
1399 goto sigbus;
1400
1401 LoadHWU(addr, value, res);
1402 if (res)
1403 goto fault;
1404 MIPS16e_compute_return_epc(regs, &oldinst);
1405 regs->regs[reg] = value;
1406 break;
1407
1408 case MIPS16e_lw_op:
1409 case MIPS16e_lwpc_op:
1410 case MIPS16e_lwsp_op:
1411 if (!access_ok(VERIFY_READ, addr, 4))
1412 goto sigbus;
1413
1414 LoadW(addr, value, res);
1415 if (res)
1416 goto fault;
1417 MIPS16e_compute_return_epc(regs, &oldinst);
1418 regs->regs[reg] = value;
1419 break;
1420
1421 case MIPS16e_lwu_op:
1422#ifdef CONFIG_64BIT
1423 /*
1424 * A 32-bit kernel might be running on a 64-bit processor. But
1425 * if we're on a 32-bit processor and an i-cache incoherency
1426 * or race makes us see a 64-bit instruction here the sdl/sdr
1427 * would blow up, so for now we don't handle unaligned 64-bit
1428 * instructions on 32-bit kernels.
1429 */
1430 if (!access_ok(VERIFY_READ, addr, 4))
1431 goto sigbus;
1432
1433 LoadWU(addr, value, res);
1434 if (res)
1435 goto fault;
1436 MIPS16e_compute_return_epc(regs, &oldinst);
1437 regs->regs[reg] = value;
1438 break;
1439#endif /* CONFIG_64BIT */
1440
1441 /* Cannot handle 64-bit instructions in 32-bit kernel */
1442 goto sigill;
1443
1444 case MIPS16e_ld_op:
1445loadDW:
1446#ifdef CONFIG_64BIT
1447 /*
1448 * A 32-bit kernel might be running on a 64-bit processor. But
1449 * if we're on a 32-bit processor and an i-cache incoherency
1450 * or race makes us see a 64-bit instruction here the sdl/sdr
1451 * would blow up, so for now we don't handle unaligned 64-bit
1452 * instructions on 32-bit kernels.
1453 */
1454 if (!access_ok(VERIFY_READ, addr, 8))
1455 goto sigbus;
1456
1457 LoadDW(addr, value, res);
1458 if (res)
1459 goto fault;
1460 MIPS16e_compute_return_epc(regs, &oldinst);
1461 regs->regs[reg] = value;
1462 break;
1463#endif /* CONFIG_64BIT */
1464
1465 /* Cannot handle 64-bit instructions in 32-bit kernel */
1466 goto sigill;
1467
1468 case MIPS16e_sh_op:
1469 if (!access_ok(VERIFY_WRITE, addr, 2))
1470 goto sigbus;
1471
1472 MIPS16e_compute_return_epc(regs, &oldinst);
1473 value = regs->regs[reg];
1474 StoreHW(addr, value, res);
1475 if (res)
1476 goto fault;
1477 break;
1478
1479 case MIPS16e_sw_op:
1480 case MIPS16e_swsp_op:
1481 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1482 if (!access_ok(VERIFY_WRITE, addr, 4))
1483 goto sigbus;
1484
1485 MIPS16e_compute_return_epc(regs, &oldinst);
1486 value = regs->regs[reg];
1487 StoreW(addr, value, res);
1488 if (res)
1489 goto fault;
1490 break;
1491
1492 case MIPS16e_sd_op:
1493writeDW:
1494#ifdef CONFIG_64BIT
1495 /*
1496 * A 32-bit kernel might be running on a 64-bit processor. But
1497 * if we're on a 32-bit processor and an i-cache incoherency
1498 * or race makes us see a 64-bit instruction here the sdl/sdr
1499 * would blow up, so for now we don't handle unaligned 64-bit
1500 * instructions on 32-bit kernels.
1501 */
1502 if (!access_ok(VERIFY_WRITE, addr, 8))
1503 goto sigbus;
1504
1505 MIPS16e_compute_return_epc(regs, &oldinst);
1506 value = regs->regs[reg];
1507 StoreDW(addr, value, res);
1508 if (res)
1509 goto fault;
1510 break;
1511#endif /* CONFIG_64BIT */
1512
1513 /* Cannot handle 64-bit instructions in 32-bit kernel */
1514 goto sigill;
1515
1516 default:
1517 /*
1518 * Pheeee... We encountered an yet unknown instruction or
1519 * cache coherence problem. Die sucker, die ...
1520 */
1521 goto sigill;
1522 }
1523
1524#ifdef CONFIG_DEBUG_FS
1525 unaligned_instructions++;
1526#endif
1527
1528 return;
1529
1530fault:
1531 /* roll back jump/branch */
1532 regs->cp0_epc = origpc;
1533 regs->regs[31] = orig31;
1534 /* Did we have an exception handler installed? */
1535 if (fixup_exception(regs))
1536 return;
1537
1538 die_if_kernel("Unhandled kernel unaligned access", regs);
1539 force_sig(SIGSEGV, current);
1540
1541 return;
1542
1543sigbus:
1544 die_if_kernel("Unhandled kernel unaligned access", regs);
1545 force_sig(SIGBUS, current);
1546
1547 return;
1548
1549sigill:
1550 die_if_kernel
1551 ("Unhandled kernel unaligned access or invalid instruction", regs);
1552 force_sig(SIGILL, current);
1553}
fc192e50 1554
1da177e4
LT
1555asmlinkage void do_ade(struct pt_regs *regs)
1556{
c3fc5cd5 1557 enum ctx_state prev_state;
fe00f943 1558 unsigned int __user *pc;
1da177e4 1559 mm_segment_t seg;
1da177e4 1560
c3fc5cd5 1561 prev_state = exception_enter();
7f788d2d 1562 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
a8b0ca17 1563 1, regs, regs->cp0_badvaddr);
1da177e4
LT
1564 /*
1565 * Did we catch a fault trying to load an instruction?
1da177e4 1566 */
34c2f668 1567 if (regs->cp0_badvaddr == regs->cp0_epc)
1da177e4
LT
1568 goto sigbus;
1569
293c5bd1 1570 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1da177e4 1571 goto sigbus;
6312e0ee
AN
1572 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1573 goto sigbus;
1da177e4
LT
1574
1575 /*
1576 * Do branch emulation only if we didn't forward the exception.
1577 * This is all so but ugly ...
1578 */
34c2f668
LY
1579
1580 /*
1581 * Are we running in microMIPS mode?
1582 */
1583 if (get_isa16_mode(regs->cp0_epc)) {
1584 /*
1585 * Did we catch a fault trying to load an instruction in
1586 * 16-bit mode?
1587 */
1588 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1589 goto sigbus;
1590 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1591 show_registers(regs);
1592
1593 if (cpu_has_mmips) {
1594 seg = get_fs();
1595 if (!user_mode(regs))
1596 set_fs(KERNEL_DS);
1597 emulate_load_store_microMIPS(regs,
1598 (void __user *)regs->cp0_badvaddr);
1599 set_fs(seg);
1600
1601 return;
1602 }
1603
451b001b
SH
1604 if (cpu_has_mips16) {
1605 seg = get_fs();
1606 if (!user_mode(regs))
1607 set_fs(KERNEL_DS);
1608 emulate_load_store_MIPS16e(regs,
1609 (void __user *)regs->cp0_badvaddr);
1610 set_fs(seg);
1611
1612 return;
1613 }
1614
34c2f668
LY
1615 goto sigbus;
1616 }
1617
1618 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1619 show_registers(regs);
1620 pc = (unsigned int __user *)exception_epc(regs);
1621
1da177e4
LT
1622 seg = get_fs();
1623 if (!user_mode(regs))
1624 set_fs(KERNEL_DS);
7f18f151 1625 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1da177e4
LT
1626 set_fs(seg);
1627
1628 return;
1629
1630sigbus:
1631 die_if_kernel("Kernel unaligned instruction access", regs);
1632 force_sig(SIGBUS, current);
1633
1634 /*
1635 * XXX On return from the signal handler we should advance the epc
1636 */
c3fc5cd5 1637 exception_exit(prev_state);
1da177e4 1638}
6312e0ee
AN
1639
1640#ifdef CONFIG_DEBUG_FS
1641extern struct dentry *mips_debugfs_dir;
1642static int __init debugfs_unaligned(void)
1643{
1644 struct dentry *d;
1645
1646 if (!mips_debugfs_dir)
1647 return -ENODEV;
1648 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1649 mips_debugfs_dir, &unaligned_instructions);
b517531c
Z
1650 if (!d)
1651 return -ENOMEM;
6312e0ee
AN
1652 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1653 mips_debugfs_dir, &unaligned_action);
b517531c
Z
1654 if (!d)
1655 return -ENOMEM;
6312e0ee
AN
1656 return 0;
1657}
1658__initcall(debugfs_unaligned);
1659#endif
This page took 0.685987 seconds and 5 git commands to generate.