MIPS: KVM: Check MSA presence at uasm time
[deliverable/linux.git] / arch / mips / kvm / entry.c
CommitLineData
90e9311a
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Generation of main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 *
11 * Copyright (C) 2016 Imagination Technologies Ltd.
12 */
13
14#include <linux/kvm_host.h>
15#include <asm/msa.h>
16#include <asm/setup.h>
17#include <asm/uasm.h>
18
19/* Register names */
20#define ZERO 0
21#define AT 1
22#define V0 2
23#define V1 3
24#define A0 4
25#define A1 5
26
27#if _MIPS_SIM == _MIPS_SIM_ABI32
28#define T0 8
29#define T1 9
30#define T2 10
31#define T3 11
32#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
33
34#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
35#define T0 12
36#define T1 13
37#define T2 14
38#define T3 15
39#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
40
41#define S0 16
42#define S1 17
43#define T9 25
44#define K0 26
45#define K1 27
46#define GP 28
47#define SP 29
48#define RA 31
49
50/* Some CP0 registers */
51#define C0_HWRENA 7, 0
52#define C0_BADVADDR 8, 0
53#define C0_ENTRYHI 10, 0
54#define C0_STATUS 12, 0
55#define C0_CAUSE 13, 0
56#define C0_EPC 14, 0
57#define C0_EBASE 15, 1
90e9311a
JH
58#define C0_CONFIG5 16, 5
59#define C0_DDATA_LO 28, 3
60#define C0_ERROREPC 30, 0
61
62#define CALLFRAME_SIZ 32
63
64enum label_id {
65 label_fpu_1 = 1,
66 label_msa_1,
67 label_return_to_host,
68 label_kernel_asid,
69};
70
71UASM_L_LA(_fpu_1)
72UASM_L_LA(_msa_1)
73UASM_L_LA(_return_to_host)
74UASM_L_LA(_kernel_asid)
75
76static void *kvm_mips_build_enter_guest(void *addr);
77static void *kvm_mips_build_ret_from_exit(void *addr);
78static void *kvm_mips_build_ret_to_guest(void *addr);
79static void *kvm_mips_build_ret_to_host(void *addr);
80
81/**
82 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
83 * @addr: Address to start writing code.
84 *
85 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
86 * conforms to the following prototype:
87 *
88 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
89 *
90 * The exit from the guest and return to the caller is handled by the code
91 * generated by kvm_mips_build_ret_to_host().
92 *
93 * Returns: Next address after end of written function.
94 */
95void *kvm_mips_build_vcpu_run(void *addr)
96{
97 u32 *p = addr;
98 unsigned int i;
99
100 /*
101 * A0: run
102 * A1: vcpu
103 */
104
105 /* k0/k1 not being used in host kernel context */
106 uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs));
107 for (i = 16; i < 32; ++i) {
108 if (i == 24)
109 i = 28;
110 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
111 }
112
113 /* Save hi/lo */
114 uasm_i_mflo(&p, V0);
115 UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1);
116 uasm_i_mfhi(&p, V1);
117 UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1);
118
119 /* Save host status */
120 uasm_i_mfc0(&p, V0, C0_STATUS);
121 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
122
123 /* Save DDATA_LO, will be used to store pointer to vcpu */
124 uasm_i_mfc0(&p, V1, C0_DDATA_LO);
125 UASM_i_SW(&p, V1, offsetof(struct pt_regs, cp0_epc), K1);
126
127 /* DDATA_LO has pointer to vcpu */
128 uasm_i_mtc0(&p, A1, C0_DDATA_LO);
129
130 /* Offset into vcpu->arch */
131 uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
132
133 /*
134 * Save the host stack to VCPU, used for exception processing
135 * when we exit from the Guest
136 */
137 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
138
139 /* Save the kernel gp as well */
140 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
141
142 /*
143 * Setup status register for running the guest in UM, interrupts
144 * are disabled
145 */
146 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV);
147 uasm_i_mtc0(&p, K0, C0_STATUS);
148 uasm_i_ehb(&p);
149
150 /* load up the new EBASE */
151 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
152 uasm_i_mtc0(&p, K0, C0_EBASE);
153
154 /*
155 * Now that the new EBASE has been loaded, unset BEV, set
156 * interrupt mask as it was but make sure that timer interrupts
157 * are enabled
158 */
159 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE);
160 uasm_i_andi(&p, V0, V0, ST0_IM);
161 uasm_i_or(&p, K0, K0, V0);
162 uasm_i_mtc0(&p, K0, C0_STATUS);
163 uasm_i_ehb(&p);
164
165 p = kvm_mips_build_enter_guest(p);
166
167 return p;
168}
169
170/**
171 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
172 * @addr: Address to start writing code.
173 *
174 * Assemble the code to resume guest execution. This code is common between the
175 * initial entry into the guest from the host, and returning from the exit
176 * handler back to the guest.
177 *
178 * Returns: Next address after end of written function.
179 */
180static void *kvm_mips_build_enter_guest(void *addr)
181{
182 u32 *p = addr;
183 unsigned int i;
184 struct uasm_label labels[2];
185 struct uasm_reloc relocs[2];
186 struct uasm_label *l = labels;
187 struct uasm_reloc *r = relocs;
188
189 memset(labels, 0, sizeof(labels));
190 memset(relocs, 0, sizeof(relocs));
191
192 /* Set Guest EPC */
193 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
194 uasm_i_mtc0(&p, T0, C0_EPC);
195
196 /* Set the ASID for the Guest Kernel */
197 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
198 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
199 T0);
200 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
201 uasm_i_xori(&p, T0, T0, KSU_USER);
202 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
203 uasm_i_addiu(&p, T1, K1,
204 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
205 /* else user */
206 uasm_i_addiu(&p, T1, K1,
207 offsetof(struct kvm_vcpu_arch, guest_user_asid));
208 uasm_l_kernel_asid(&l, p);
209
210 /* t1: contains the base of the ASID array, need to get the cpu id */
211 /* smp_processor_id */
212 UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP);
213 /* x4 */
214 uasm_i_sll(&p, T2, T2, 2);
215 UASM_i_ADDU(&p, T3, T1, T2);
216 UASM_i_LW(&p, K0, 0, T3);
217#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
218 /* x sizeof(struct cpuinfo_mips)/4 */
219 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
220 uasm_i_mul(&p, T2, T2, T3);
221
222 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
223 UASM_i_ADDU(&p, AT, AT, T2);
224 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
225 uasm_i_and(&p, K0, K0, T2);
226#else
227 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
228#endif
229 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
230 uasm_i_ehb(&p);
231
232 /* Disable RDHWR access */
233 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
234
235 /* load the guest context from VCPU and return */
236 for (i = 1; i < 32; ++i) {
237 /* Guest k0/k1 loaded later */
238 if (i == K0 || i == K1)
239 continue;
240 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
241 }
242
243 /* Restore hi/lo */
244 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
245 uasm_i_mthi(&p, K0);
246
247 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
248 uasm_i_mtlo(&p, K0);
249
250 /* Restore the guest's k0/k1 registers */
251 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
252 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
253
254 /* Jump to guest */
255 uasm_i_eret(&p);
256
257 uasm_resolve_relocs(relocs, labels);
258
259 return p;
260}
261
262/**
263 * kvm_mips_build_exception() - Assemble first level guest exception handler.
264 * @addr: Address to start writing code.
265 *
266 * Assemble exception vector code for guest execution. The generated vector will
267 * jump to the common exception handler generated by kvm_mips_build_exit().
268 *
269 * Returns: Next address after end of written function.
270 */
271void *kvm_mips_build_exception(void *addr)
272{
273 u32 *p = addr;
274
275 /* Save guest k0 */
276 uasm_i_mtc0(&p, K0, C0_ERROREPC);
277 uasm_i_ehb(&p);
278
279 /* Get EBASE */
280 uasm_i_mfc0(&p, K0, C0_EBASE);
281 /* Get rid of CPUNum */
282 uasm_i_srl(&p, K0, K0, 10);
283 uasm_i_sll(&p, K0, K0, 10);
284 /* Save k1 @ offset 0x3000 */
285 UASM_i_SW(&p, K1, 0x3000, K0);
286
287 /* Exception handler is installed @ offset 0x2000 */
288 uasm_i_addiu(&p, K0, K0, 0x2000);
289 /* Jump to the function */
290 uasm_i_jr(&p, K0);
291 uasm_i_nop(&p);
292
293 return p;
294}
295
296/**
297 * kvm_mips_build_exit() - Assemble common guest exit handler.
298 * @addr: Address to start writing code.
299 *
300 * Assemble the generic guest exit handling code. This is called by the
301 * exception vectors (generated by kvm_mips_build_exception()), and calls
302 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
303 * depending on the return value.
304 *
305 * Returns: Next address after end of written function.
306 */
307void *kvm_mips_build_exit(void *addr)
308{
309 u32 *p = addr;
310 unsigned int i;
311 struct uasm_label labels[3];
312 struct uasm_reloc relocs[3];
313 struct uasm_label *l = labels;
314 struct uasm_reloc *r = relocs;
315
316 memset(labels, 0, sizeof(labels));
317 memset(relocs, 0, sizeof(relocs));
318
319 /*
320 * Generic Guest exception handler. We end up here when the guest
321 * does something that causes a trap to kernel mode.
322 */
323
324 /* Get the VCPU pointer from DDATA_LO */
325 uasm_i_mfc0(&p, K1, C0_DDATA_LO);
326 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
327
328 /* Start saving Guest context to VCPU */
329 for (i = 0; i < 32; ++i) {
330 /* Guest k0/k1 saved later */
331 if (i == K0 || i == K1)
332 continue;
333 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
334 }
335
336 /* We need to save hi/lo and restore them on the way out */
337 uasm_i_mfhi(&p, T0);
338 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
339
340 uasm_i_mflo(&p, T0);
341 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
342
343 /* Finally save guest k0/k1 to VCPU */
344 uasm_i_mfc0(&p, T0, C0_ERROREPC);
345 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
346
347 /* Get GUEST k1 and save it in VCPU */
348 uasm_i_addiu(&p, T1, ZERO, ~0x2ff);
349 uasm_i_mfc0(&p, T0, C0_EBASE);
350 uasm_i_and(&p, T0, T0, T1);
351 UASM_i_LW(&p, T0, 0x3000, T0);
352 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
353
354 /* Now that context has been saved, we can use other registers */
355
356 /* Restore vcpu */
357 uasm_i_mfc0(&p, A1, C0_DDATA_LO);
358 uasm_i_move(&p, S1, A1);
359
360 /* Restore run (vcpu->run) */
361 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
362 /* Save pointer to run in s0, will be saved by the compiler */
363 uasm_i_move(&p, S0, A0);
364
365 /*
366 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
367 * the exception
368 */
369 uasm_i_mfc0(&p, K0, C0_EPC);
370 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
371
372 uasm_i_mfc0(&p, K0, C0_BADVADDR);
373 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
374 K1);
375
376 uasm_i_mfc0(&p, K0, C0_CAUSE);
377 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
378
379 /* Now restore the host state just enough to run the handlers */
380
381 /* Switch EBASE to the one used by Linux */
382 /* load up the host EBASE */
383 uasm_i_mfc0(&p, V0, C0_STATUS);
384
385 uasm_i_lui(&p, AT, ST0_BEV >> 16);
386 uasm_i_or(&p, K0, V0, AT);
387
388 uasm_i_mtc0(&p, K0, C0_STATUS);
389 uasm_i_ehb(&p);
390
391 UASM_i_LA_mostly(&p, K0, (long)&ebase);
392 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
393 uasm_i_mtc0(&p, K0, C0_EBASE);
394
d37f4038
JH
395 if (raw_cpu_has_fpu) {
396 /*
397 * If FPU is enabled, save FCR31 and clear it so that later
398 * ctc1's don't trigger FPE for pending exceptions.
399 */
400 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
401 uasm_i_and(&p, V1, V0, AT);
402 uasm_il_beqz(&p, &r, V1, label_fpu_1);
403 uasm_i_nop(&p);
404 uasm_i_cfc1(&p, T0, 31);
405 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
406 K1);
407 uasm_i_ctc1(&p, ZERO, 31);
408 uasm_l_fpu_1(&l, p);
409 }
90e9311a 410
38ea7a71
JH
411 if (cpu_has_msa) {
412 /*
413 * If MSA is enabled, save MSACSR and clear it so that later
414 * instructions don't trigger MSAFPE for pending exceptions.
415 */
416 uasm_i_mfc0(&p, T0, C0_CONFIG5);
417 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
418 uasm_il_beqz(&p, &r, T0, label_msa_1);
419 uasm_i_nop(&p);
420 uasm_i_cfcmsa(&p, T0, MSA_CSR);
421 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
422 K1);
423 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
424 uasm_l_msa_1(&l, p);
425 }
90e9311a
JH
426
427 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
428 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
429 uasm_i_and(&p, V0, V0, AT);
430 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
431 uasm_i_or(&p, V0, V0, AT);
432 uasm_i_mtc0(&p, V0, C0_STATUS);
433 uasm_i_ehb(&p);
434
435 /* Load up host GP */
436 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
437
438 /* Need a stack before we can jump to "C" */
439 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
440
441 /* Saved host state */
442 uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs));
443
444 /*
445 * XXXKYMA do we need to load the host ASID, maybe not because the
446 * kernel entries are marked GLOBAL, need to verify
447 */
448
449 /* Restore host DDATA_LO */
450 UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), SP);
451 uasm_i_mtc0(&p, K0, C0_DDATA_LO);
452
453 /* Restore RDHWR access */
454 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
455 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
456 uasm_i_mtc0(&p, K0, C0_HWRENA);
457
458 /* Jump to handler */
459 /*
460 * XXXKYMA: not sure if this is safe, how large is the stack??
461 * Now jump to the kvm_mips_handle_exit() to see if we can deal
462 * with this in the kernel
463 */
464 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
465 uasm_i_jalr(&p, RA, T9);
466 uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ);
467
468 uasm_resolve_relocs(relocs, labels);
469
470 p = kvm_mips_build_ret_from_exit(p);
471
472 return p;
473}
474
475/**
476 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
477 * @addr: Address to start writing code.
478 *
479 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
480 * resuming the guest or returning to the host depending on the return value.
481 *
482 * Returns: Next address after end of written function.
483 */
484static void *kvm_mips_build_ret_from_exit(void *addr)
485{
486 u32 *p = addr;
487 struct uasm_label labels[2];
488 struct uasm_reloc relocs[2];
489 struct uasm_label *l = labels;
490 struct uasm_reloc *r = relocs;
491
492 memset(labels, 0, sizeof(labels));
493 memset(relocs, 0, sizeof(relocs));
494
495 /* Return from handler Make sure interrupts are disabled */
496 uasm_i_di(&p, ZERO);
497 uasm_i_ehb(&p);
498
499 /*
500 * XXXKYMA: k0/k1 could have been blown away if we processed
501 * an exception while we were handling the exception from the
502 * guest, reload k1
503 */
504
505 uasm_i_move(&p, K1, S1);
506 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
507
508 /*
509 * Check return value, should tell us if we are returning to the
510 * host (handle I/O etc)or resuming the guest
511 */
512 uasm_i_andi(&p, T0, V0, RESUME_HOST);
513 uasm_il_bnez(&p, &r, T0, label_return_to_host);
514 uasm_i_nop(&p);
515
516 p = kvm_mips_build_ret_to_guest(p);
517
518 uasm_l_return_to_host(&l, p);
519 p = kvm_mips_build_ret_to_host(p);
520
521 uasm_resolve_relocs(relocs, labels);
522
523 return p;
524}
525
526/**
527 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
528 * @addr: Address to start writing code.
529 *
530 * Assemble the code to handle return from the guest exit handler
531 * (kvm_mips_handle_exit()) back to the guest.
532 *
533 * Returns: Next address after end of written function.
534 */
535static void *kvm_mips_build_ret_to_guest(void *addr)
536{
537 u32 *p = addr;
538
539 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
540 uasm_i_mtc0(&p, S1, C0_DDATA_LO);
541
542 /* Load up the Guest EBASE to minimize the window where BEV is set */
543 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
544
545 /* Switch EBASE back to the one used by KVM */
546 uasm_i_mfc0(&p, V1, C0_STATUS);
547 uasm_i_lui(&p, AT, ST0_BEV >> 16);
548 uasm_i_or(&p, K0, V1, AT);
549 uasm_i_mtc0(&p, K0, C0_STATUS);
550 uasm_i_ehb(&p);
551 uasm_i_mtc0(&p, T0, C0_EBASE);
552
553 /* Setup status register for running guest in UM */
554 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
555 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
556 uasm_i_and(&p, V1, V1, AT);
557 uasm_i_mtc0(&p, V1, C0_STATUS);
558 uasm_i_ehb(&p);
559
560 p = kvm_mips_build_enter_guest(p);
561
562 return p;
563}
564
565/**
566 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
567 * @addr: Address to start writing code.
568 *
569 * Assemble the code to handle return from the guest exit handler
570 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
571 * function generated by kvm_mips_build_vcpu_run().
572 *
573 * Returns: Next address after end of written function.
574 */
575static void *kvm_mips_build_ret_to_host(void *addr)
576{
577 u32 *p = addr;
578 unsigned int i;
579
580 /* EBASE is already pointing to Linux */
581 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
582 uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs));
583
584 /* Restore host DDATA_LO */
585 UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), K1);
586 uasm_i_mtc0(&p, K0, C0_DDATA_LO);
587
588 /*
589 * r2/v0 is the return code, shift it down by 2 (arithmetic)
590 * to recover the err code
591 */
592 uasm_i_sra(&p, K0, V0, 2);
593 uasm_i_move(&p, V0, K0);
594
595 /* Load context saved on the host stack */
596 for (i = 16; i < 31; ++i) {
597 if (i == 24)
598 i = 28;
599 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
600 }
601
602 UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1);
603 uasm_i_mthi(&p, K0);
604
605 UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1);
606 uasm_i_mtlo(&p, K0);
607
608 /* Restore RDHWR access */
609 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
610 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
611 uasm_i_mtc0(&p, K0, C0_HWRENA);
612
613 /* Restore RA, which is the address we will return to */
614 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
615 uasm_i_jr(&p, RA);
616 uasm_i_nop(&p);
617
618 return p;
619}
620
This page took 0.046536 seconds and 5 git commands to generate.