MIPS: APRP: Split VPE loader into separate files.
[deliverable/linux.git] / arch / mips / kernel / vpe.c
1 /*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */
17
18 /*
19 * VPE support module
20 *
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
26 *
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
29 */
30 #include <linux/kernel.h>
31 #include <linux/device.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <asm/uaccess.h>
35 #include <linux/slab.h>
36 #include <linux/list.h>
37 #include <linux/vmalloc.h>
38 #include <linux/elf.h>
39 #include <linux/seq_file.h>
40 #include <linux/syscalls.h>
41 #include <linux/moduleloader.h>
42 #include <linux/interrupt.h>
43 #include <linux/poll.h>
44 #include <linux/bootmem.h>
45 #include <asm/mipsregs.h>
46 #include <asm/mipsmtregs.h>
47 #include <asm/cacheflush.h>
48 #include <linux/atomic.h>
49 #include <asm/cpu.h>
50 #include <asm/mips_mt.h>
51 #include <asm/processor.h>
52 #include <asm/vpe.h>
53
54 #ifndef ARCH_SHF_SMALL
55 #define ARCH_SHF_SMALL 0
56 #endif
57
58 /* If this is set, the section belongs in the init part of the module */
59 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
60
61 struct vpe_control vpecontrol = {
62 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
63 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
64 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
65 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
66 };
67
68 /* get the vpe associated with this minor */
69 struct vpe *get_vpe(int minor)
70 {
71 struct vpe *res, *v;
72
73 if (!cpu_has_mipsmt)
74 return NULL;
75
76 res = NULL;
77 spin_lock(&vpecontrol.vpe_list_lock);
78 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
79 if (v->minor == VPE_MODULE_MINOR) {
80 res = v;
81 break;
82 }
83 }
84 spin_unlock(&vpecontrol.vpe_list_lock);
85
86 return res;
87 }
88
89 /* get the vpe associated with this minor */
90 struct tc *get_tc(int index)
91 {
92 struct tc *res, *t;
93
94 res = NULL;
95 spin_lock(&vpecontrol.tc_list_lock);
96 list_for_each_entry(t, &vpecontrol.tc_list, list) {
97 if (t->index == index) {
98 res = t;
99 break;
100 }
101 }
102 spin_unlock(&vpecontrol.tc_list_lock);
103
104 return res;
105 }
106
107 /* allocate a vpe and associate it with this minor (or index) */
108 struct vpe *alloc_vpe(int minor)
109 {
110 struct vpe *v;
111
112 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
113 return NULL;
114
115 INIT_LIST_HEAD(&v->tc);
116 spin_lock(&vpecontrol.vpe_list_lock);
117 list_add_tail(&v->list, &vpecontrol.vpe_list);
118 spin_unlock(&vpecontrol.vpe_list_lock);
119
120 INIT_LIST_HEAD(&v->notify);
121 v->minor = VPE_MODULE_MINOR;
122
123 return v;
124 }
125
126 /* allocate a tc. At startup only tc0 is running, all other can be halted. */
127 struct tc *alloc_tc(int index)
128 {
129 struct tc *tc;
130
131 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
132 goto out;
133
134 INIT_LIST_HEAD(&tc->tc);
135 tc->index = index;
136
137 spin_lock(&vpecontrol.tc_list_lock);
138 list_add_tail(&tc->list, &vpecontrol.tc_list);
139 spin_unlock(&vpecontrol.tc_list_lock);
140
141 out:
142 return tc;
143 }
144
145 /* clean up and free everything */
146 void release_vpe(struct vpe *v)
147 {
148 list_del(&v->list);
149 if (v->load_addr)
150 release_progmem(v);
151 kfree(v);
152 }
153
154 /* Find some VPE program space */
155 void *alloc_progmem(unsigned long len)
156 {
157 void *addr;
158
159 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
160 /*
161 * This means you must tell Linux to use less memory than you
162 * physically have, for example by passing a mem= boot argument.
163 */
164 addr = pfn_to_kaddr(max_low_pfn);
165 memset(addr, 0, len);
166 #else
167 /* simple grab some mem for now */
168 addr = kzalloc(len, GFP_KERNEL);
169 #endif
170
171 return addr;
172 }
173
174 void release_progmem(void *ptr)
175 {
176 #ifndef CONFIG_MIPS_VPE_LOADER_TOM
177 kfree(ptr);
178 #endif
179 }
180
181 /* Update size with this section: return offset. */
182 static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
183 {
184 long ret;
185
186 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
187 *size = ret + sechdr->sh_size;
188 return ret;
189 }
190
191 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
192 might -- code, read-only data, read-write data, small data. Tally
193 sizes, and place the offsets into sh_entsize fields: high bit means it
194 belongs in init. */
195 static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
196 Elf_Shdr * sechdrs, const char *secstrings)
197 {
198 static unsigned long const masks[][2] = {
199 /* NOTE: all executable code must be the first section
200 * in this array; otherwise modify the text_size
201 * finder in the two loops below */
202 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
203 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
204 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
205 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
206 };
207 unsigned int m, i;
208
209 for (i = 0; i < hdr->e_shnum; i++)
210 sechdrs[i].sh_entsize = ~0UL;
211
212 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
213 for (i = 0; i < hdr->e_shnum; ++i) {
214 Elf_Shdr *s = &sechdrs[i];
215
216 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
217 if ((s->sh_flags & masks[m][0]) != masks[m][0]
218 || (s->sh_flags & masks[m][1])
219 || s->sh_entsize != ~0UL)
220 continue;
221 s->sh_entsize =
222 get_offset((unsigned long *)&mod->core_size, s);
223 }
224
225 if (m == 0)
226 mod->core_text_size = mod->core_size;
227
228 }
229 }
230
231
232 /* from module-elf32.c, but subverted a little */
233
234 struct mips_hi16 {
235 struct mips_hi16 *next;
236 Elf32_Addr *addr;
237 Elf32_Addr value;
238 };
239
240 static struct mips_hi16 *mips_hi16_list;
241 static unsigned int gp_offs, gp_addr;
242
243 static int apply_r_mips_none(struct module *me, uint32_t *location,
244 Elf32_Addr v)
245 {
246 return 0;
247 }
248
249 static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
250 Elf32_Addr v)
251 {
252 int rel;
253
254 if( !(*location & 0xffff) ) {
255 rel = (int)v - gp_addr;
256 }
257 else {
258 /* .sbss + gp(relative) + offset */
259 /* kludge! */
260 rel = (int)(short)((int)v + gp_offs +
261 (int)(short)(*location & 0xffff) - gp_addr);
262 }
263
264 if( (rel > 32768) || (rel < -32768) ) {
265 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
266 "relative address 0x%x out of range of gp register\n",
267 rel);
268 return -ENOEXEC;
269 }
270
271 *location = (*location & 0xffff0000) | (rel & 0xffff);
272
273 return 0;
274 }
275
276 static int apply_r_mips_pc16(struct module *me, uint32_t *location,
277 Elf32_Addr v)
278 {
279 int rel;
280 rel = (((unsigned int)v - (unsigned int)location));
281 rel >>= 2; // because the offset is in _instructions_ not bytes.
282 rel -= 1; // and one instruction less due to the branch delay slot.
283
284 if( (rel > 32768) || (rel < -32768) ) {
285 printk(KERN_DEBUG "VPE loader: "
286 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
287 return -ENOEXEC;
288 }
289
290 *location = (*location & 0xffff0000) | (rel & 0xffff);
291
292 return 0;
293 }
294
295 static int apply_r_mips_32(struct module *me, uint32_t *location,
296 Elf32_Addr v)
297 {
298 *location += v;
299
300 return 0;
301 }
302
303 static int apply_r_mips_26(struct module *me, uint32_t *location,
304 Elf32_Addr v)
305 {
306 if (v % 4) {
307 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
308 " unaligned relocation\n");
309 return -ENOEXEC;
310 }
311
312 /*
313 * Not desperately convinced this is a good check of an overflow condition
314 * anyway. But it gets in the way of handling undefined weak symbols which
315 * we want to set to zero.
316 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
317 * printk(KERN_ERR
318 * "module %s: relocation overflow\n",
319 * me->name);
320 * return -ENOEXEC;
321 * }
322 */
323
324 *location = (*location & ~0x03ffffff) |
325 ((*location + (v >> 2)) & 0x03ffffff);
326 return 0;
327 }
328
329 static int apply_r_mips_hi16(struct module *me, uint32_t *location,
330 Elf32_Addr v)
331 {
332 struct mips_hi16 *n;
333
334 /*
335 * We cannot relocate this one now because we don't know the value of
336 * the carry we need to add. Save the information, and let LO16 do the
337 * actual relocation.
338 */
339 n = kmalloc(sizeof *n, GFP_KERNEL);
340 if (!n)
341 return -ENOMEM;
342
343 n->addr = location;
344 n->value = v;
345 n->next = mips_hi16_list;
346 mips_hi16_list = n;
347
348 return 0;
349 }
350
351 static int apply_r_mips_lo16(struct module *me, uint32_t *location,
352 Elf32_Addr v)
353 {
354 unsigned long insnlo = *location;
355 Elf32_Addr val, vallo;
356 struct mips_hi16 *l, *next;
357
358 /* Sign extend the addend we extract from the lo insn. */
359 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
360
361 if (mips_hi16_list != NULL) {
362
363 l = mips_hi16_list;
364 while (l != NULL) {
365 unsigned long insn;
366
367 /*
368 * The value for the HI16 had best be the same.
369 */
370 if (v != l->value) {
371 printk(KERN_DEBUG "VPE loader: "
372 "apply_r_mips_lo16/hi16: \t"
373 "inconsistent value information\n");
374 goto out_free;
375 }
376
377 /*
378 * Do the HI16 relocation. Note that we actually don't
379 * need to know anything about the LO16 itself, except
380 * where to find the low 16 bits of the addend needed
381 * by the LO16.
382 */
383 insn = *l->addr;
384 val = ((insn & 0xffff) << 16) + vallo;
385 val += v;
386
387 /*
388 * Account for the sign extension that will happen in
389 * the low bits.
390 */
391 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
392
393 insn = (insn & ~0xffff) | val;
394 *l->addr = insn;
395
396 next = l->next;
397 kfree(l);
398 l = next;
399 }
400
401 mips_hi16_list = NULL;
402 }
403
404 /*
405 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
406 */
407 val = v + vallo;
408 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
409 *location = insnlo;
410
411 return 0;
412
413 out_free:
414 while (l != NULL) {
415 next = l->next;
416 kfree(l);
417 l = next;
418 }
419 mips_hi16_list = NULL;
420
421 return -ENOEXEC;
422 }
423
424 static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
425 Elf32_Addr v) = {
426 [R_MIPS_NONE] = apply_r_mips_none,
427 [R_MIPS_32] = apply_r_mips_32,
428 [R_MIPS_26] = apply_r_mips_26,
429 [R_MIPS_HI16] = apply_r_mips_hi16,
430 [R_MIPS_LO16] = apply_r_mips_lo16,
431 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
432 [R_MIPS_PC16] = apply_r_mips_pc16
433 };
434
435 static char *rstrs[] = {
436 [R_MIPS_NONE] = "MIPS_NONE",
437 [R_MIPS_32] = "MIPS_32",
438 [R_MIPS_26] = "MIPS_26",
439 [R_MIPS_HI16] = "MIPS_HI16",
440 [R_MIPS_LO16] = "MIPS_LO16",
441 [R_MIPS_GPREL16] = "MIPS_GPREL16",
442 [R_MIPS_PC16] = "MIPS_PC16"
443 };
444
445 static int apply_relocations(Elf32_Shdr *sechdrs,
446 const char *strtab,
447 unsigned int symindex,
448 unsigned int relsec,
449 struct module *me)
450 {
451 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
452 Elf32_Sym *sym;
453 uint32_t *location;
454 unsigned int i;
455 Elf32_Addr v;
456 int res;
457
458 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
459 Elf32_Word r_info = rel[i].r_info;
460
461 /* This is where to make the change */
462 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
463 + rel[i].r_offset;
464 /* This is the symbol it is referring to */
465 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
466 + ELF32_R_SYM(r_info);
467
468 if (!sym->st_value) {
469 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
470 me->name, strtab + sym->st_name);
471 /* just print the warning, dont barf */
472 }
473
474 v = sym->st_value;
475
476 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
477 if( res ) {
478 char *r = rstrs[ELF32_R_TYPE(r_info)];
479 printk(KERN_WARNING "VPE loader: .text+0x%x "
480 "relocation type %s for symbol \"%s\" failed\n",
481 rel[i].r_offset, r ? r : "UNKNOWN",
482 strtab + sym->st_name);
483 return res;
484 }
485 }
486
487 return 0;
488 }
489
490 static inline void save_gp_address(unsigned int secbase, unsigned int rel)
491 {
492 gp_addr = secbase + rel;
493 gp_offs = gp_addr - (secbase & 0xffff0000);
494 }
495 /* end module-elf32.c */
496
497
498
499 /* Change all symbols so that sh_value encodes the pointer directly. */
500 static void simplify_symbols(Elf_Shdr * sechdrs,
501 unsigned int symindex,
502 const char *strtab,
503 const char *secstrings,
504 unsigned int nsecs, struct module *mod)
505 {
506 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
507 unsigned long secbase, bssbase = 0;
508 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
509 int size;
510
511 /* find the .bss section for COMMON symbols */
512 for (i = 0; i < nsecs; i++) {
513 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
514 bssbase = sechdrs[i].sh_addr;
515 break;
516 }
517 }
518
519 for (i = 1; i < n; i++) {
520 switch (sym[i].st_shndx) {
521 case SHN_COMMON:
522 /* Allocate space for the symbol in the .bss section.
523 st_value is currently size.
524 We want it to have the address of the symbol. */
525
526 size = sym[i].st_value;
527 sym[i].st_value = bssbase;
528
529 bssbase += size;
530 break;
531
532 case SHN_ABS:
533 /* Don't need to do anything */
534 break;
535
536 case SHN_UNDEF:
537 /* ret = -ENOENT; */
538 break;
539
540 case SHN_MIPS_SCOMMON:
541 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
542 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
543 sym[i].st_shndx);
544 // .sbss section
545 break;
546
547 default:
548 secbase = sechdrs[sym[i].st_shndx].sh_addr;
549
550 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
551 save_gp_address(secbase, sym[i].st_value);
552 }
553
554 sym[i].st_value += secbase;
555 break;
556 }
557 }
558 }
559
560 #ifdef DEBUG_ELFLOADER
561 static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
562 const char *strtab, struct module *mod)
563 {
564 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
565 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
566
567 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
568 for (i = 1; i < n; i++) {
569 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
570 strtab + sym[i].st_name, sym[i].st_value);
571 }
572 }
573 #endif
574
575 static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
576 unsigned int symindex, const char *strtab,
577 struct module *mod)
578 {
579 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
580 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
581
582 for (i = 1; i < n; i++) {
583 if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
584 v->__start = sym[i].st_value;
585 }
586
587 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
588 v->shared_ptr = (void *)sym[i].st_value;
589 }
590 }
591
592 if ( (v->__start == 0) || (v->shared_ptr == NULL))
593 return -1;
594
595 return 0;
596 }
597
598 /*
599 * Allocates a VPE with some program code space(the load address), copies the
600 * contents of the program (p)buffer performing relocatations/etc, free's it
601 * when finished.
602 */
603 static int vpe_elfload(struct vpe * v)
604 {
605 Elf_Ehdr *hdr;
606 Elf_Shdr *sechdrs;
607 long err = 0;
608 char *secstrings, *strtab = NULL;
609 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
610 struct module mod; // so we can re-use the relocations code
611
612 memset(&mod, 0, sizeof(struct module));
613 strcpy(mod.name, "VPE loader");
614
615 hdr = (Elf_Ehdr *) v->pbuffer;
616 len = v->plen;
617
618 /* Sanity checks against insmoding binaries or wrong arch,
619 weird elf version */
620 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
621 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
622 || !elf_check_arch(hdr)
623 || hdr->e_shentsize != sizeof(*sechdrs)) {
624 printk(KERN_WARNING
625 "VPE loader: program wrong arch or weird elf version\n");
626
627 return -ENOEXEC;
628 }
629
630 if (hdr->e_type == ET_REL)
631 relocate = 1;
632
633 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
634 printk(KERN_ERR "VPE loader: program length %u truncated\n",
635 len);
636
637 return -ENOEXEC;
638 }
639
640 /* Convenience variables */
641 sechdrs = (void *)hdr + hdr->e_shoff;
642 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
643 sechdrs[0].sh_addr = 0;
644
645 /* And these should exist, but gcc whinges if we don't init them */
646 symindex = strindex = 0;
647
648 if (relocate) {
649 for (i = 1; i < hdr->e_shnum; i++) {
650 if (sechdrs[i].sh_type != SHT_NOBITS
651 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
652 printk(KERN_ERR "VPE program length %u truncated\n",
653 len);
654 return -ENOEXEC;
655 }
656
657 /* Mark all sections sh_addr with their address in the
658 temporary image. */
659 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
660
661 /* Internal symbols and strings. */
662 if (sechdrs[i].sh_type == SHT_SYMTAB) {
663 symindex = i;
664 strindex = sechdrs[i].sh_link;
665 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
666 }
667 }
668 layout_sections(&mod, hdr, sechdrs, secstrings);
669 }
670
671 v->load_addr = alloc_progmem(mod.core_size);
672 if (!v->load_addr)
673 return -ENOMEM;
674
675 pr_info("VPE loader: loading to %p\n", v->load_addr);
676
677 if (relocate) {
678 for (i = 0; i < hdr->e_shnum; i++) {
679 void *dest;
680
681 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
682 continue;
683
684 dest = v->load_addr + sechdrs[i].sh_entsize;
685
686 if (sechdrs[i].sh_type != SHT_NOBITS)
687 memcpy(dest, (void *)sechdrs[i].sh_addr,
688 sechdrs[i].sh_size);
689 /* Update sh_addr to point to copy in image. */
690 sechdrs[i].sh_addr = (unsigned long)dest;
691
692 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
693 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
694 }
695
696 /* Fix up syms, so that st_value is a pointer to location. */
697 simplify_symbols(sechdrs, symindex, strtab, secstrings,
698 hdr->e_shnum, &mod);
699
700 /* Now do relocations. */
701 for (i = 1; i < hdr->e_shnum; i++) {
702 const char *strtab = (char *)sechdrs[strindex].sh_addr;
703 unsigned int info = sechdrs[i].sh_info;
704
705 /* Not a valid relocation section? */
706 if (info >= hdr->e_shnum)
707 continue;
708
709 /* Don't bother with non-allocated sections */
710 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
711 continue;
712
713 if (sechdrs[i].sh_type == SHT_REL)
714 err = apply_relocations(sechdrs, strtab, symindex, i,
715 &mod);
716 else if (sechdrs[i].sh_type == SHT_RELA)
717 err = apply_relocate_add(sechdrs, strtab, symindex, i,
718 &mod);
719 if (err < 0)
720 return err;
721
722 }
723 } else {
724 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
725
726 for (i = 0; i < hdr->e_phnum; i++) {
727 if (phdr->p_type == PT_LOAD) {
728 memcpy((void *)phdr->p_paddr,
729 (char *)hdr + phdr->p_offset,
730 phdr->p_filesz);
731 memset((void *)phdr->p_paddr + phdr->p_filesz,
732 0, phdr->p_memsz - phdr->p_filesz);
733 }
734 phdr++;
735 }
736
737 for (i = 0; i < hdr->e_shnum; i++) {
738 /* Internal symbols and strings. */
739 if (sechdrs[i].sh_type == SHT_SYMTAB) {
740 symindex = i;
741 strindex = sechdrs[i].sh_link;
742 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
743
744 /* mark the symtab's address for when we try to find the
745 magic symbols */
746 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
747 }
748 }
749 }
750
751 /* make sure it's physically written out */
752 flush_icache_range((unsigned long)v->load_addr,
753 (unsigned long)v->load_addr + v->len);
754
755 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
756 if (v->__start == 0) {
757 printk(KERN_WARNING "VPE loader: program does not contain "
758 "a __start symbol\n");
759 return -ENOEXEC;
760 }
761
762 if (v->shared_ptr == NULL)
763 printk(KERN_WARNING "VPE loader: "
764 "program does not contain vpe_shared symbol.\n"
765 " Unable to use AMVP (AP/SP) facilities.\n");
766 }
767
768 printk(" elf loaded\n");
769 return 0;
770 }
771
772 static int getcwd(char *buff, int size)
773 {
774 mm_segment_t old_fs;
775 int ret;
776
777 old_fs = get_fs();
778 set_fs(KERNEL_DS);
779
780 ret = sys_getcwd(buff, size);
781
782 set_fs(old_fs);
783
784 return ret;
785 }
786
787 /* checks VPE is unused and gets ready to load program */
788 static int vpe_open(struct inode *inode, struct file *filp)
789 {
790 enum vpe_state state;
791 struct vpe_notifications *not;
792 struct vpe *v;
793 int ret;
794
795 if (VPE_MODULE_MINOR != iminor(inode)) {
796 /* assume only 1 device at the moment. */
797 pr_warning("VPE loader: only vpe1 is supported\n");
798
799 return -ENODEV;
800 }
801
802 if ((v = get_vpe(aprp_cpu_index())) == NULL) {
803 pr_warning("VPE loader: unable to get vpe\n");
804
805 return -ENODEV;
806 }
807
808 state = xchg(&v->state, VPE_STATE_INUSE);
809 if (state != VPE_STATE_UNUSED) {
810 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
811
812 list_for_each_entry(not, &v->notify, list) {
813 not->stop(aprp_cpu_index());
814 }
815
816 release_progmem(v->load_addr);
817 cleanup_tc(get_tc(aprp_cpu_index()));
818 }
819
820 /* this of-course trashes what was there before... */
821 v->pbuffer = vmalloc(P_SIZE);
822 if (!v->pbuffer) {
823 pr_warning("VPE loader: unable to allocate memory\n");
824 return -ENOMEM;
825 }
826 v->plen = P_SIZE;
827 v->load_addr = NULL;
828 v->len = 0;
829
830 v->uid = filp->f_cred->fsuid;
831 v->gid = filp->f_cred->fsgid;
832
833 v->cwd[0] = 0;
834 ret = getcwd(v->cwd, VPE_PATH_MAX);
835 if (ret < 0)
836 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
837
838 v->shared_ptr = NULL;
839 v->__start = 0;
840
841 return 0;
842 }
843
844 static int vpe_release(struct inode *inode, struct file *filp)
845 {
846 struct vpe *v;
847 Elf_Ehdr *hdr;
848 int ret = 0;
849
850 v = get_vpe(aprp_cpu_index());
851 if (v == NULL)
852 return -ENODEV;
853
854 hdr = (Elf_Ehdr *) v->pbuffer;
855 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
856 if ((vpe_elfload(v) >= 0) && vpe_run) {
857 vpe_run(v);
858 } else {
859 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
860 ret = -ENOEXEC;
861 }
862 } else {
863 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
864 ret = -ENOEXEC;
865 }
866
867 /* It's good to be able to run the SP and if it chokes have a look at
868 the /dev/rt?. But if we reset the pointer to the shared struct we
869 lose what has happened. So perhaps if garbage is sent to the vpe
870 device, use it as a trigger for the reset. Hopefully a nice
871 executable will be along shortly. */
872 if (ret < 0)
873 v->shared_ptr = NULL;
874
875 vfree(v->pbuffer);
876 v->plen = 0;
877
878 return ret;
879 }
880
881 static ssize_t vpe_write(struct file *file, const char __user * buffer,
882 size_t count, loff_t * ppos)
883 {
884 size_t ret = count;
885 struct vpe *v;
886
887 if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
888 return -ENODEV;
889
890 v = get_vpe(aprp_cpu_index());
891 if (v == NULL)
892 return -ENODEV;
893
894 if ((count + v->len) > v->plen) {
895 printk(KERN_WARNING
896 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
897 return -ENOMEM;
898 }
899
900 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
901 if (!count)
902 return -EFAULT;
903
904 v->len += count;
905 return ret;
906 }
907
908 const struct file_operations vpe_fops = {
909 .owner = THIS_MODULE,
910 .open = vpe_open,
911 .release = vpe_release,
912 .write = vpe_write,
913 .llseek = noop_llseek,
914 };
915
916 void *vpe_get_shared(int index)
917 {
918 struct vpe *v;
919
920 if ((v = get_vpe(index)) == NULL)
921 return NULL;
922
923 return v->shared_ptr;
924 }
925
926 EXPORT_SYMBOL(vpe_get_shared);
927
928 int vpe_getuid(int index)
929 {
930 struct vpe *v;
931
932 if ((v = get_vpe(index)) == NULL)
933 return -1;
934
935 return v->uid;
936 }
937
938 EXPORT_SYMBOL(vpe_getuid);
939
940 int vpe_getgid(int index)
941 {
942 struct vpe *v;
943
944 if ((v = get_vpe(index)) == NULL)
945 return -1;
946
947 return v->gid;
948 }
949
950 EXPORT_SYMBOL(vpe_getgid);
951
952 int vpe_notify(int index, struct vpe_notifications *notify)
953 {
954 struct vpe *v;
955
956 if ((v = get_vpe(index)) == NULL)
957 return -1;
958
959 list_add(&notify->list, &v->notify);
960 return 0;
961 }
962
963 EXPORT_SYMBOL(vpe_notify);
964
965 char *vpe_getcwd(int index)
966 {
967 struct vpe *v;
968
969 if ((v = get_vpe(index)) == NULL)
970 return NULL;
971
972 return v->cwd;
973 }
974
975 EXPORT_SYMBOL(vpe_getcwd);
976
977 module_init(vpe_module_init);
978 module_exit(vpe_module_exit);
979 MODULE_DESCRIPTION("MIPS VPE Loader");
980 MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
981 MODULE_LICENSE("GPL");
This page took 0.052923 seconds and 5 git commands to generate.