c13dbd8aac2d6fee811593137beec7611d523e59
[deliverable/binutils-gdb.git] / sim / mcore / interp.c
1 /* Simulator for Motorola's MCore processor
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Cygnus Solutions.
4
5 This file is part of GDB, the GNU debugger.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/times.h>
25 #include <sys/param.h>
26 #include <unistd.h>
27 #include "bfd.h"
28 #include "gdb/callback.h"
29 #include "libiberty.h"
30 #include "gdb/remote-sim.h"
31
32 #include "sim-main.h"
33 #include "sim-base.h"
34 #include "sim-options.h"
35
36 #define target_big_endian (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
37
38
39 static unsigned long
40 mcore_extract_unsigned_integer (unsigned char *addr, int len)
41 {
42 unsigned long retval;
43 unsigned char * p;
44 unsigned char * startaddr = (unsigned char *)addr;
45 unsigned char * endaddr = startaddr + len;
46
47 if (len > (int) sizeof (unsigned long))
48 printf ("That operation is not available on integers of more than %zu bytes.",
49 sizeof (unsigned long));
50
51 /* Start at the most significant end of the integer, and work towards
52 the least significant. */
53 retval = 0;
54
55 if (! target_big_endian)
56 {
57 for (p = endaddr; p > startaddr;)
58 retval = (retval << 8) | * -- p;
59 }
60 else
61 {
62 for (p = startaddr; p < endaddr;)
63 retval = (retval << 8) | * p ++;
64 }
65
66 return retval;
67 }
68
69 static void
70 mcore_store_unsigned_integer (unsigned char *addr, int len, unsigned long val)
71 {
72 unsigned char * p;
73 unsigned char * startaddr = (unsigned char *)addr;
74 unsigned char * endaddr = startaddr + len;
75
76 if (! target_big_endian)
77 {
78 for (p = startaddr; p < endaddr;)
79 {
80 * p ++ = val & 0xff;
81 val >>= 8;
82 }
83 }
84 else
85 {
86 for (p = endaddr; p > startaddr;)
87 {
88 * -- p = val & 0xff;
89 val >>= 8;
90 }
91 }
92 }
93
94 /* The machine state.
95 This state is maintained in host byte order. The
96 fetch/store register functions must translate between host
97 byte order and the target processor byte order.
98 Keeping this data in target byte order simplifies the register
99 read/write functions. Keeping this data in native order improves
100 the performance of the simulator. Simulation speed is deemed more
101 important. */
102 /* TODO: Should be moved to sim-main.h:sim_cpu. */
103
104 /* The ordering of the mcore_regset structure is matched in the
105 gdb/config/mcore/tm-mcore.h file in the REGISTER_NAMES macro. */
106 struct mcore_regset
107 {
108 word gregs [16]; /* primary registers */
109 word alt_gregs [16]; /* alt register file */
110 word cregs [32]; /* control registers */
111 int ticks;
112 int stalls;
113 int cycles;
114 int insts;
115 int exception;
116 word * active_gregs;
117 };
118
119 union
120 {
121 struct mcore_regset asregs;
122 word asints [1]; /* but accessed larger... */
123 } cpu;
124
125 #define LAST_VALID_CREG 32 /* only 0..12 implemented */
126 #define NUM_MCORE_REGS (16 + 16 + LAST_VALID_CREG + 1)
127
128 static int memcycles = 1;
129
130 #define gr asregs.active_gregs
131 #define cr asregs.cregs
132 #define sr asregs.cregs[0]
133 #define vbr asregs.cregs[1]
134 #define esr asregs.cregs[2]
135 #define fsr asregs.cregs[3]
136 #define epc asregs.cregs[4]
137 #define fpc asregs.cregs[5]
138 #define ss0 asregs.cregs[6]
139 #define ss1 asregs.cregs[7]
140 #define ss2 asregs.cregs[8]
141 #define ss3 asregs.cregs[9]
142 #define ss4 asregs.cregs[10]
143 #define gcr asregs.cregs[11]
144 #define gsr asregs.cregs[12]
145
146 /* maniuplate the carry bit */
147 #define C_ON() (cpu.sr & 1)
148 #define C_VALUE() (cpu.sr & 1)
149 #define C_OFF() ((cpu.sr & 1) == 0)
150 #define SET_C() {cpu.sr |= 1;}
151 #define CLR_C() {cpu.sr &= 0xfffffffe;}
152 #define NEW_C(v) {CLR_C(); cpu.sr |= ((v) & 1);}
153
154 #define SR_AF() ((cpu.sr >> 1) & 1)
155
156 #define TRAPCODE 1 /* r1 holds which function we want */
157 #define PARM1 2 /* first parameter */
158 #define PARM2 3
159 #define PARM3 4
160 #define PARM4 5
161 #define RET1 2 /* register for return values. */
162
163 /* Default to a 8 Mbyte (== 2^23) memory space. */
164 #define DEFAULT_MEMORY_SIZE 0x800000
165
166 static void
167 set_initial_gprs (SIM_CPU *scpu)
168 {
169 int i;
170 long space;
171
172 /* Set up machine just out of reset. */
173 CPU_PC_SET (scpu, 0);
174 cpu.sr = 0;
175
176 /* Clean out the GPRs and alternate GPRs. */
177 for (i = 0; i < 16; i++)
178 {
179 cpu.asregs.gregs[i] = 0;
180 cpu.asregs.alt_gregs[i] = 0;
181 }
182
183 /* Make our register set point to the right place. */
184 if (SR_AF())
185 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
186 else
187 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
188
189 /* ABI specifies initial values for these registers. */
190 cpu.gr[0] = DEFAULT_MEMORY_SIZE - 4;
191
192 /* dac fix, the stack address must be 8-byte aligned! */
193 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
194 cpu.gr[PARM1] = 0;
195 cpu.gr[PARM2] = 0;
196 cpu.gr[PARM3] = 0;
197 cpu.gr[PARM4] = cpu.gr[0];
198 }
199
200 /* Read/write functions for system call interface. */
201
202 static int
203 syscall_read_mem (host_callback *cb, struct cb_syscall *sc,
204 unsigned long taddr, char *buf, int bytes)
205 {
206 SIM_DESC sd = (SIM_DESC) sc->p1;
207 SIM_CPU *cpu = (SIM_CPU *) sc->p2;
208
209 return sim_core_read_buffer (sd, cpu, read_map, buf, taddr, bytes);
210 }
211
212 static int
213 syscall_write_mem (host_callback *cb, struct cb_syscall *sc,
214 unsigned long taddr, const char *buf, int bytes)
215 {
216 SIM_DESC sd = (SIM_DESC) sc->p1;
217 SIM_CPU *cpu = (SIM_CPU *) sc->p2;
218
219 return sim_core_write_buffer (sd, cpu, write_map, buf, taddr, bytes);
220 }
221
222 /* Simulate a monitor trap. */
223
224 static void
225 handle_trap1 (SIM_DESC sd)
226 {
227 host_callback *cb = STATE_CALLBACK (sd);
228 CB_SYSCALL sc;
229
230 CB_SYSCALL_INIT (&sc);
231
232 sc.func = cpu.gr[TRAPCODE];
233 sc.arg1 = cpu.gr[PARM1];
234 sc.arg2 = cpu.gr[PARM2];
235 sc.arg3 = cpu.gr[PARM3];
236 sc.arg4 = cpu.gr[PARM4];
237
238 sc.p1 = (PTR) sd;
239 sc.p2 = (PTR) STATE_CPU (sd, 0);
240 sc.read_mem = syscall_read_mem;
241 sc.write_mem = syscall_write_mem;
242
243 cb_syscall (cb, &sc);
244
245 /* XXX: We don't pass back the actual errno value. */
246 cpu.gr[RET1] = sc.result;
247 }
248
249 static void
250 process_stub (SIM_DESC sd, int what)
251 {
252 /* These values should match those in libgloss/mcore/syscalls.s. */
253 switch (what)
254 {
255 case 3: /* _read */
256 case 4: /* _write */
257 case 5: /* _open */
258 case 6: /* _close */
259 case 10: /* _unlink */
260 case 19: /* _lseek */
261 case 43: /* _times */
262 cpu.gr [TRAPCODE] = what;
263 handle_trap1 (sd);
264 break;
265
266 default:
267 if (STATE_VERBOSE_P (sd))
268 fprintf (stderr, "Unhandled stub opcode: %d\n", what);
269 break;
270 }
271 }
272
273 static void
274 util (SIM_DESC sd, unsigned what)
275 {
276 switch (what)
277 {
278 case 0: /* exit */
279 cpu.asregs.exception = SIGQUIT;
280 break;
281
282 case 1: /* printf */
283 if (STATE_VERBOSE_P (sd))
284 fprintf (stderr, "WARNING: printf unimplemented\n");
285 break;
286
287 case 2: /* scanf */
288 if (STATE_VERBOSE_P (sd))
289 fprintf (stderr, "WARNING: scanf unimplemented\n");
290 break;
291
292 case 3: /* utime */
293 cpu.gr[RET1] = cpu.asregs.insts;
294 break;
295
296 case 0xFF:
297 process_stub (sd, cpu.gr[1]);
298 break;
299
300 default:
301 if (STATE_VERBOSE_P (sd))
302 fprintf (stderr, "Unhandled util code: %x\n", what);
303 break;
304 }
305 }
306
307 /* For figuring out whether we carried; addc/subc use this. */
308 static int
309 iu_carry (unsigned long a, unsigned long b, int cin)
310 {
311 unsigned long x;
312
313 x = (a & 0xffff) + (b & 0xffff) + cin;
314 x = (x >> 16) + (a >> 16) + (b >> 16);
315 x >>= 16;
316
317 return (x != 0);
318 }
319
320 /* TODO: Convert to common watchpoints. */
321 #undef WATCHFUNCTIONS
322 #ifdef WATCHFUNCTIONS
323
324 #define MAXWL 80
325 word WL[MAXWL];
326 char * WLstr[MAXWL];
327
328 int ENDWL=0;
329 int WLincyc;
330 int WLcyc[MAXWL];
331 int WLcnts[MAXWL];
332 int WLmax[MAXWL];
333 int WLmin[MAXWL];
334 word WLendpc;
335 int WLbcyc;
336 int WLW;
337 #endif
338
339 #define RD (inst & 0xF)
340 #define RS ((inst >> 4) & 0xF)
341 #define RX ((inst >> 8) & 0xF)
342 #define IMM5 ((inst >> 4) & 0x1F)
343 #define IMM4 ((inst) & 0xF)
344
345 #define rbat(X) sim_core_read_1 (scpu, 0, read_map, X)
346 #define rhat(X) sim_core_read_2 (scpu, 0, read_map, X)
347 #define rlat(X) sim_core_read_4 (scpu, 0, read_map, X)
348 #define wbat(X, D) sim_core_write_1 (scpu, 0, write_map, X, D)
349 #define what(X, D) sim_core_write_2 (scpu, 0, write_map, X, D)
350 #define wlat(X, D) sim_core_write_4 (scpu, 0, write_map, X, D)
351
352 static int tracing = 0;
353
354 void
355 sim_resume (SIM_DESC sd, int step, int siggnal)
356 {
357 SIM_CPU *scpu = STATE_CPU (sd, 0);
358 int needfetch;
359 word ibuf;
360 word pc;
361 unsigned short inst;
362 int memops;
363 int bonus_cycles;
364 int insts;
365 int w;
366 int cycs;
367 #ifdef WATCHFUNCTIONS
368 word WLhash;
369 #endif
370
371 cpu.asregs.exception = step ? SIGTRAP: 0;
372 pc = CPU_PC_GET (scpu);
373
374 /* Fetch the initial instructions that we'll decode. */
375 ibuf = rlat (pc & 0xFFFFFFFC);
376 needfetch = 0;
377
378 memops = 0;
379 bonus_cycles = 0;
380 insts = 0;
381
382 /* make our register set point to the right place */
383 if (SR_AF ())
384 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
385 else
386 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
387
388 #ifdef WATCHFUNCTIONS
389 /* make a hash to speed exec loop, hope it's nonzero */
390 WLhash = 0xFFFFFFFF;
391
392 for (w = 1; w <= ENDWL; w++)
393 WLhash = WLhash & WL[w];
394 #endif
395
396 do
397 {
398 word oldpc;
399
400 insts ++;
401
402 if (pc & 02)
403 {
404 if (! target_big_endian)
405 inst = ibuf >> 16;
406 else
407 inst = ibuf & 0xFFFF;
408 needfetch = 1;
409 }
410 else
411 {
412 if (! target_big_endian)
413 inst = ibuf & 0xFFFF;
414 else
415 inst = ibuf >> 16;
416 }
417
418 #ifdef WATCHFUNCTIONS
419 /* now scan list of watch addresses, if match, count it and
420 note return address and count cycles until pc=return address */
421
422 if ((WLincyc == 1) && (pc == WLendpc))
423 {
424 cycs = (cpu.asregs.cycles + (insts + bonus_cycles +
425 (memops * memcycles)) - WLbcyc);
426
427 if (WLcnts[WLW] == 1)
428 {
429 WLmax[WLW] = cycs;
430 WLmin[WLW] = cycs;
431 WLcyc[WLW] = 0;
432 }
433
434 if (cycs > WLmax[WLW])
435 {
436 WLmax[WLW] = cycs;
437 }
438
439 if (cycs < WLmin[WLW])
440 {
441 WLmin[WLW] = cycs;
442 }
443
444 WLcyc[WLW] += cycs;
445 WLincyc = 0;
446 WLendpc = 0;
447 }
448
449 /* Optimize with a hash to speed loop. */
450 if (WLincyc == 0)
451 {
452 if ((WLhash == 0) || ((WLhash & pc) != 0))
453 {
454 for (w=1; w <= ENDWL; w++)
455 {
456 if (pc == WL[w])
457 {
458 WLcnts[w]++;
459 WLbcyc = cpu.asregs.cycles + insts
460 + bonus_cycles + (memops * memcycles);
461 WLendpc = cpu.gr[15];
462 WLincyc = 1;
463 WLW = w;
464 break;
465 }
466 }
467 }
468 }
469 #endif
470
471 if (tracing)
472 fprintf (stderr, "%.4lx: inst = %.4x ", pc, inst);
473
474 oldpc = pc;
475
476 pc += 2;
477
478 switch (inst >> 8)
479 {
480 case 0x00:
481 switch RS
482 {
483 case 0x0:
484 switch RD
485 {
486 case 0x0: /* bkpt */
487 cpu.asregs.exception = SIGTRAP;
488 pc -= 2;
489 break;
490
491 case 0x1: /* sync */
492 break;
493
494 case 0x2: /* rte */
495 pc = cpu.epc;
496 cpu.sr = cpu.esr;
497 needfetch = 1;
498
499 if (SR_AF ())
500 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
501 else
502 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
503 break;
504
505 case 0x3: /* rfi */
506 pc = cpu.fpc;
507 cpu.sr = cpu.fsr;
508 needfetch = 1;
509
510 if (SR_AF ())
511 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
512 else
513 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
514 break;
515
516 case 0x4: /* stop */
517 if (STATE_VERBOSE_P (sd))
518 fprintf (stderr, "WARNING: stop unimplemented\n");
519 break;
520
521 case 0x5: /* wait */
522 if (STATE_VERBOSE_P (sd))
523 fprintf (stderr, "WARNING: wait unimplemented\n");
524 break;
525
526 case 0x6: /* doze */
527 if (STATE_VERBOSE_P (sd))
528 fprintf (stderr, "WARNING: doze unimplemented\n");
529 break;
530
531 case 0x7:
532 cpu.asregs.exception = SIGILL; /* illegal */
533 break;
534
535 case 0x8: /* trap 0 */
536 case 0xA: /* trap 2 */
537 case 0xB: /* trap 3 */
538 cpu.asregs.exception = SIGTRAP;
539 break;
540
541 case 0xC: /* trap 4 */
542 case 0xD: /* trap 5 */
543 case 0xE: /* trap 6 */
544 cpu.asregs.exception = SIGILL; /* illegal */
545 break;
546
547 case 0xF: /* trap 7 */
548 cpu.asregs.exception = SIGTRAP; /* integer div-by-0 */
549 break;
550
551 case 0x9: /* trap 1 */
552 handle_trap1 (sd);
553 break;
554 }
555 break;
556
557 case 0x1:
558 cpu.asregs.exception = SIGILL; /* illegal */
559 break;
560
561 case 0x2: /* mvc */
562 cpu.gr[RD] = C_VALUE();
563 break;
564 case 0x3: /* mvcv */
565 cpu.gr[RD] = C_OFF();
566 break;
567 case 0x4: /* ldq */
568 {
569 word addr = cpu.gr[RD];
570 int regno = 4; /* always r4-r7 */
571
572 bonus_cycles++;
573 memops += 4;
574 do
575 {
576 cpu.gr[regno] = rlat(addr);
577 addr += 4;
578 regno++;
579 }
580 while ((regno&0x3) != 0);
581 }
582 break;
583 case 0x5: /* stq */
584 {
585 word addr = cpu.gr[RD];
586 int regno = 4; /* always r4-r7 */
587
588 memops += 4;
589 bonus_cycles++;
590 do
591 {
592 wlat(addr, cpu.gr[regno]);
593 addr += 4;
594 regno++;
595 }
596 while ((regno & 0x3) != 0);
597 }
598 break;
599 case 0x6: /* ldm */
600 {
601 word addr = cpu.gr[0];
602 int regno = RD;
603
604 /* bonus cycle is really only needed if
605 the next insn shifts the last reg loaded.
606
607 bonus_cycles++;
608 */
609 memops += 16-regno;
610 while (regno <= 0xF)
611 {
612 cpu.gr[regno] = rlat(addr);
613 addr += 4;
614 regno++;
615 }
616 }
617 break;
618 case 0x7: /* stm */
619 {
620 word addr = cpu.gr[0];
621 int regno = RD;
622
623 /* this should be removed! */
624 /* bonus_cycles ++; */
625
626 memops += 16 - regno;
627 while (regno <= 0xF)
628 {
629 wlat(addr, cpu.gr[regno]);
630 addr += 4;
631 regno++;
632 }
633 }
634 break;
635
636 case 0x8: /* dect */
637 cpu.gr[RD] -= C_VALUE();
638 break;
639 case 0x9: /* decf */
640 cpu.gr[RD] -= C_OFF();
641 break;
642 case 0xA: /* inct */
643 cpu.gr[RD] += C_VALUE();
644 break;
645 case 0xB: /* incf */
646 cpu.gr[RD] += C_OFF();
647 break;
648 case 0xC: /* jmp */
649 pc = cpu.gr[RD];
650 if (tracing && RD == 15)
651 fprintf (stderr, "Func return, r2 = %lxx, r3 = %lx\n",
652 cpu.gr[2], cpu.gr[3]);
653 bonus_cycles++;
654 needfetch = 1;
655 break;
656 case 0xD: /* jsr */
657 cpu.gr[15] = pc;
658 pc = cpu.gr[RD];
659 bonus_cycles++;
660 needfetch = 1;
661 break;
662 case 0xE: /* ff1 */
663 {
664 word tmp, i;
665 tmp = cpu.gr[RD];
666 for (i = 0; !(tmp & 0x80000000) && i < 32; i++)
667 tmp <<= 1;
668 cpu.gr[RD] = i;
669 }
670 break;
671 case 0xF: /* brev */
672 {
673 word tmp;
674 tmp = cpu.gr[RD];
675 tmp = ((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1);
676 tmp = ((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2);
677 tmp = ((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4);
678 tmp = ((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8);
679 cpu.gr[RD] = ((tmp & 0xffff0000) >> 16) | ((tmp & 0x0000ffff) << 16);
680 }
681 break;
682 }
683 break;
684 case 0x01:
685 switch RS
686 {
687 case 0x0: /* xtrb3 */
688 cpu.gr[1] = (cpu.gr[RD]) & 0xFF;
689 NEW_C (cpu.gr[RD] != 0);
690 break;
691 case 0x1: /* xtrb2 */
692 cpu.gr[1] = (cpu.gr[RD]>>8) & 0xFF;
693 NEW_C (cpu.gr[RD] != 0);
694 break;
695 case 0x2: /* xtrb1 */
696 cpu.gr[1] = (cpu.gr[RD]>>16) & 0xFF;
697 NEW_C (cpu.gr[RD] != 0);
698 break;
699 case 0x3: /* xtrb0 */
700 cpu.gr[1] = (cpu.gr[RD]>>24) & 0xFF;
701 NEW_C (cpu.gr[RD] != 0);
702 break;
703 case 0x4: /* zextb */
704 cpu.gr[RD] &= 0x000000FF;
705 break;
706 case 0x5: /* sextb */
707 {
708 long tmp;
709 tmp = cpu.gr[RD];
710 tmp <<= 24;
711 tmp >>= 24;
712 cpu.gr[RD] = tmp;
713 }
714 break;
715 case 0x6: /* zexth */
716 cpu.gr[RD] &= 0x0000FFFF;
717 break;
718 case 0x7: /* sexth */
719 {
720 long tmp;
721 tmp = cpu.gr[RD];
722 tmp <<= 16;
723 tmp >>= 16;
724 cpu.gr[RD] = tmp;
725 }
726 break;
727 case 0x8: /* declt */
728 --cpu.gr[RD];
729 NEW_C ((long)cpu.gr[RD] < 0);
730 break;
731 case 0x9: /* tstnbz */
732 {
733 word tmp = cpu.gr[RD];
734 NEW_C ((tmp & 0xFF000000) != 0 &&
735 (tmp & 0x00FF0000) != 0 && (tmp & 0x0000FF00) != 0 &&
736 (tmp & 0x000000FF) != 0);
737 }
738 break;
739 case 0xA: /* decgt */
740 --cpu.gr[RD];
741 NEW_C ((long)cpu.gr[RD] > 0);
742 break;
743 case 0xB: /* decne */
744 --cpu.gr[RD];
745 NEW_C ((long)cpu.gr[RD] != 0);
746 break;
747 case 0xC: /* clrt */
748 if (C_ON())
749 cpu.gr[RD] = 0;
750 break;
751 case 0xD: /* clrf */
752 if (C_OFF())
753 cpu.gr[RD] = 0;
754 break;
755 case 0xE: /* abs */
756 if (cpu.gr[RD] & 0x80000000)
757 cpu.gr[RD] = ~cpu.gr[RD] + 1;
758 break;
759 case 0xF: /* not */
760 cpu.gr[RD] = ~cpu.gr[RD];
761 break;
762 }
763 break;
764 case 0x02: /* movt */
765 if (C_ON())
766 cpu.gr[RD] = cpu.gr[RS];
767 break;
768 case 0x03: /* mult */
769 /* consume 2 bits per cycle from rs, until rs is 0 */
770 {
771 unsigned int t = cpu.gr[RS];
772 int ticks;
773 for (ticks = 0; t != 0 ; t >>= 2)
774 ticks++;
775 bonus_cycles += ticks;
776 }
777 bonus_cycles += 2; /* min. is 3, so add 2, plus ticks above */
778 if (tracing)
779 fprintf (stderr, " mult %lx by %lx to give %lx",
780 cpu.gr[RD], cpu.gr[RS], cpu.gr[RD] * cpu.gr[RS]);
781 cpu.gr[RD] = cpu.gr[RD] * cpu.gr[RS];
782 break;
783 case 0x04: /* loopt */
784 if (C_ON())
785 {
786 pc += (IMM4 << 1) - 32;
787 bonus_cycles ++;
788 needfetch = 1;
789 }
790 --cpu.gr[RS]; /* not RD! */
791 NEW_C (((long)cpu.gr[RS]) > 0);
792 break;
793 case 0x05: /* subu */
794 cpu.gr[RD] -= cpu.gr[RS];
795 break;
796 case 0x06: /* addc */
797 {
798 unsigned long tmp, a, b;
799 a = cpu.gr[RD];
800 b = cpu.gr[RS];
801 cpu.gr[RD] = a + b + C_VALUE ();
802 tmp = iu_carry (a, b, C_VALUE ());
803 NEW_C (tmp);
804 }
805 break;
806 case 0x07: /* subc */
807 {
808 unsigned long tmp, a, b;
809 a = cpu.gr[RD];
810 b = cpu.gr[RS];
811 cpu.gr[RD] = a - b + C_VALUE () - 1;
812 tmp = iu_carry (a,~b, C_VALUE ());
813 NEW_C (tmp);
814 }
815 break;
816 case 0x08: /* illegal */
817 case 0x09: /* illegal*/
818 cpu.asregs.exception = SIGILL;
819 break;
820 case 0x0A: /* movf */
821 if (C_OFF())
822 cpu.gr[RD] = cpu.gr[RS];
823 break;
824 case 0x0B: /* lsr */
825 {
826 unsigned long dst, src;
827 dst = cpu.gr[RD];
828 src = cpu.gr[RS];
829 /* We must not rely solely upon the native shift operations, since they
830 may not match the M*Core's behaviour on boundary conditions. */
831 dst = src > 31 ? 0 : dst >> src;
832 cpu.gr[RD] = dst;
833 }
834 break;
835 case 0x0C: /* cmphs */
836 NEW_C ((unsigned long )cpu.gr[RD] >=
837 (unsigned long)cpu.gr[RS]);
838 break;
839 case 0x0D: /* cmplt */
840 NEW_C ((long)cpu.gr[RD] < (long)cpu.gr[RS]);
841 break;
842 case 0x0E: /* tst */
843 NEW_C ((cpu.gr[RD] & cpu.gr[RS]) != 0);
844 break;
845 case 0x0F: /* cmpne */
846 NEW_C (cpu.gr[RD] != cpu.gr[RS]);
847 break;
848 case 0x10: case 0x11: /* mfcr */
849 {
850 unsigned r;
851 r = IMM5;
852 if (r <= LAST_VALID_CREG)
853 cpu.gr[RD] = cpu.cr[r];
854 else
855 cpu.asregs.exception = SIGILL;
856 }
857 break;
858
859 case 0x12: /* mov */
860 cpu.gr[RD] = cpu.gr[RS];
861 if (tracing)
862 fprintf (stderr, "MOV %lx into reg %d", cpu.gr[RD], RD);
863 break;
864
865 case 0x13: /* bgenr */
866 if (cpu.gr[RS] & 0x20)
867 cpu.gr[RD] = 0;
868 else
869 cpu.gr[RD] = 1 << (cpu.gr[RS] & 0x1F);
870 break;
871
872 case 0x14: /* rsub */
873 cpu.gr[RD] = cpu.gr[RS] - cpu.gr[RD];
874 break;
875
876 case 0x15: /* ixw */
877 cpu.gr[RD] += cpu.gr[RS]<<2;
878 break;
879
880 case 0x16: /* and */
881 cpu.gr[RD] &= cpu.gr[RS];
882 break;
883
884 case 0x17: /* xor */
885 cpu.gr[RD] ^= cpu.gr[RS];
886 break;
887
888 case 0x18: case 0x19: /* mtcr */
889 {
890 unsigned r;
891 r = IMM5;
892 if (r <= LAST_VALID_CREG)
893 cpu.cr[r] = cpu.gr[RD];
894 else
895 cpu.asregs.exception = SIGILL;
896
897 /* we might have changed register sets... */
898 if (SR_AF ())
899 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
900 else
901 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
902 }
903 break;
904
905 case 0x1A: /* asr */
906 /* We must not rely solely upon the native shift operations, since they
907 may not match the M*Core's behaviour on boundary conditions. */
908 if (cpu.gr[RS] > 30)
909 cpu.gr[RD] = ((long) cpu.gr[RD]) < 0 ? -1 : 0;
910 else
911 cpu.gr[RD] = (long) cpu.gr[RD] >> cpu.gr[RS];
912 break;
913
914 case 0x1B: /* lsl */
915 /* We must not rely solely upon the native shift operations, since they
916 may not match the M*Core's behaviour on boundary conditions. */
917 cpu.gr[RD] = cpu.gr[RS] > 31 ? 0 : cpu.gr[RD] << cpu.gr[RS];
918 break;
919
920 case 0x1C: /* addu */
921 cpu.gr[RD] += cpu.gr[RS];
922 break;
923
924 case 0x1D: /* ixh */
925 cpu.gr[RD] += cpu.gr[RS] << 1;
926 break;
927
928 case 0x1E: /* or */
929 cpu.gr[RD] |= cpu.gr[RS];
930 break;
931
932 case 0x1F: /* andn */
933 cpu.gr[RD] &= ~cpu.gr[RS];
934 break;
935 case 0x20: case 0x21: /* addi */
936 cpu.gr[RD] =
937 cpu.gr[RD] + (IMM5 + 1);
938 break;
939 case 0x22: case 0x23: /* cmplti */
940 {
941 int tmp = (IMM5 + 1);
942 if (cpu.gr[RD] < tmp)
943 {
944 SET_C();
945 }
946 else
947 {
948 CLR_C();
949 }
950 }
951 break;
952 case 0x24: case 0x25: /* subi */
953 cpu.gr[RD] =
954 cpu.gr[RD] - (IMM5 + 1);
955 break;
956 case 0x26: case 0x27: /* illegal */
957 cpu.asregs.exception = SIGILL;
958 break;
959 case 0x28: case 0x29: /* rsubi */
960 cpu.gr[RD] =
961 IMM5 - cpu.gr[RD];
962 break;
963 case 0x2A: case 0x2B: /* cmpnei */
964 if (cpu.gr[RD] != IMM5)
965 {
966 SET_C();
967 }
968 else
969 {
970 CLR_C();
971 }
972 break;
973
974 case 0x2C: case 0x2D: /* bmaski, divu */
975 {
976 unsigned imm = IMM5;
977
978 if (imm == 1)
979 {
980 int exe;
981 int rxnlz, r1nlz;
982 unsigned int rx, r1;
983
984 rx = cpu.gr[RD];
985 r1 = cpu.gr[1];
986 exe = 0;
987
988 /* unsigned divide */
989 cpu.gr[RD] = (word) ((unsigned int) cpu.gr[RD] / (unsigned int)cpu.gr[1] );
990
991 /* compute bonus_cycles for divu */
992 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32); r1nlz ++)
993 r1 = r1 << 1;
994
995 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32); rxnlz ++)
996 rx = rx << 1;
997
998 if (r1nlz < rxnlz)
999 exe += 4;
1000 else
1001 exe += 5 + r1nlz - rxnlz;
1002
1003 if (exe >= (2 * memcycles - 1))
1004 {
1005 bonus_cycles += exe - (2 * memcycles) + 1;
1006 }
1007 }
1008 else if (imm == 0 || imm >= 8)
1009 {
1010 /* bmaski */
1011 if (imm == 0)
1012 cpu.gr[RD] = -1;
1013 else
1014 cpu.gr[RD] = (1 << imm) - 1;
1015 }
1016 else
1017 {
1018 /* illegal */
1019 cpu.asregs.exception = SIGILL;
1020 }
1021 }
1022 break;
1023 case 0x2E: case 0x2F: /* andi */
1024 cpu.gr[RD] = cpu.gr[RD] & IMM5;
1025 break;
1026 case 0x30: case 0x31: /* bclri */
1027 cpu.gr[RD] = cpu.gr[RD] & ~(1<<IMM5);
1028 break;
1029 case 0x32: case 0x33: /* bgeni, divs */
1030 {
1031 unsigned imm = IMM5;
1032 if (imm == 1)
1033 {
1034 int exe,sc;
1035 int rxnlz, r1nlz;
1036 signed int rx, r1;
1037
1038 /* compute bonus_cycles for divu */
1039 rx = cpu.gr[RD];
1040 r1 = cpu.gr[1];
1041 exe = 0;
1042
1043 if (((rx < 0) && (r1 > 0)) || ((rx >= 0) && (r1 < 0)))
1044 sc = 1;
1045 else
1046 sc = 0;
1047
1048 rx = abs (rx);
1049 r1 = abs (r1);
1050
1051 /* signed divide, general registers are of type int, so / op is OK */
1052 cpu.gr[RD] = cpu.gr[RD] / cpu.gr[1];
1053
1054 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32) ; r1nlz ++ )
1055 r1 = r1 << 1;
1056
1057 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32) ; rxnlz ++ )
1058 rx = rx << 1;
1059
1060 if (r1nlz < rxnlz)
1061 exe += 5;
1062 else
1063 exe += 6 + r1nlz - rxnlz + sc;
1064
1065 if (exe >= (2 * memcycles - 1))
1066 {
1067 bonus_cycles += exe - (2 * memcycles) + 1;
1068 }
1069 }
1070 else if (imm >= 7)
1071 {
1072 /* bgeni */
1073 cpu.gr[RD] = (1 << IMM5);
1074 }
1075 else
1076 {
1077 /* illegal */
1078 cpu.asregs.exception = SIGILL;
1079 }
1080 break;
1081 }
1082 case 0x34: case 0x35: /* bseti */
1083 cpu.gr[RD] = cpu.gr[RD] | (1 << IMM5);
1084 break;
1085 case 0x36: case 0x37: /* btsti */
1086 NEW_C (cpu.gr[RD] >> IMM5);
1087 break;
1088 case 0x38: case 0x39: /* xsr, rotli */
1089 {
1090 unsigned imm = IMM5;
1091 unsigned long tmp = cpu.gr[RD];
1092 if (imm == 0)
1093 {
1094 word cbit;
1095 cbit = C_VALUE();
1096 NEW_C (tmp);
1097 cpu.gr[RD] = (cbit << 31) | (tmp >> 1);
1098 }
1099 else
1100 cpu.gr[RD] = (tmp << imm) | (tmp >> (32 - imm));
1101 }
1102 break;
1103 case 0x3A: case 0x3B: /* asrc, asri */
1104 {
1105 unsigned imm = IMM5;
1106 long tmp = cpu.gr[RD];
1107 if (imm == 0)
1108 {
1109 NEW_C (tmp);
1110 cpu.gr[RD] = tmp >> 1;
1111 }
1112 else
1113 cpu.gr[RD] = tmp >> imm;
1114 }
1115 break;
1116 case 0x3C: case 0x3D: /* lslc, lsli */
1117 {
1118 unsigned imm = IMM5;
1119 unsigned long tmp = cpu.gr[RD];
1120 if (imm == 0)
1121 {
1122 NEW_C (tmp >> 31);
1123 cpu.gr[RD] = tmp << 1;
1124 }
1125 else
1126 cpu.gr[RD] = tmp << imm;
1127 }
1128 break;
1129 case 0x3E: case 0x3F: /* lsrc, lsri */
1130 {
1131 unsigned imm = IMM5;
1132 unsigned long tmp = cpu.gr[RD];
1133 if (imm == 0)
1134 {
1135 NEW_C (tmp);
1136 cpu.gr[RD] = tmp >> 1;
1137 }
1138 else
1139 cpu.gr[RD] = tmp >> imm;
1140 }
1141 break;
1142 case 0x40: case 0x41: case 0x42: case 0x43:
1143 case 0x44: case 0x45: case 0x46: case 0x47:
1144 case 0x48: case 0x49: case 0x4A: case 0x4B:
1145 case 0x4C: case 0x4D: case 0x4E: case 0x4F:
1146 cpu.asregs.exception = SIGILL;
1147 break;
1148 case 0x50:
1149 util (sd, inst & 0xFF);
1150 break;
1151 case 0x51: case 0x52: case 0x53:
1152 case 0x54: case 0x55: case 0x56: case 0x57:
1153 case 0x58: case 0x59: case 0x5A: case 0x5B:
1154 case 0x5C: case 0x5D: case 0x5E: case 0x5F:
1155 cpu.asregs.exception = SIGILL;
1156 break;
1157 case 0x60: case 0x61: case 0x62: case 0x63: /* movi */
1158 case 0x64: case 0x65: case 0x66: case 0x67:
1159 cpu.gr[RD] = (inst >> 4) & 0x7F;
1160 break;
1161 case 0x68: case 0x69: case 0x6A: case 0x6B:
1162 case 0x6C: case 0x6D: case 0x6E: case 0x6F: /* illegal */
1163 cpu.asregs.exception = SIGILL;
1164 break;
1165 case 0x71: case 0x72: case 0x73:
1166 case 0x74: case 0x75: case 0x76: case 0x77:
1167 case 0x78: case 0x79: case 0x7A: case 0x7B:
1168 case 0x7C: case 0x7D: case 0x7E: /* lrw */
1169 cpu.gr[RX] = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1170 if (tracing)
1171 fprintf (stderr, "LRW of 0x%x from 0x%lx to reg %d",
1172 rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC),
1173 (pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC, RX);
1174 memops++;
1175 break;
1176 case 0x7F: /* jsri */
1177 cpu.gr[15] = pc;
1178 if (tracing)
1179 fprintf (stderr,
1180 "func call: r2 = %lx r3 = %lx r4 = %lx r5 = %lx r6 = %lx r7 = %lx\n",
1181 cpu.gr[2], cpu.gr[3], cpu.gr[4], cpu.gr[5], cpu.gr[6], cpu.gr[7]);
1182 case 0x70: /* jmpi */
1183 pc = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1184 memops++;
1185 bonus_cycles++;
1186 needfetch = 1;
1187 break;
1188
1189 case 0x80: case 0x81: case 0x82: case 0x83:
1190 case 0x84: case 0x85: case 0x86: case 0x87:
1191 case 0x88: case 0x89: case 0x8A: case 0x8B:
1192 case 0x8C: case 0x8D: case 0x8E: case 0x8F: /* ld */
1193 cpu.gr[RX] = rlat (cpu.gr[RD] + ((inst >> 2) & 0x003C));
1194 if (tracing)
1195 fprintf (stderr, "load reg %d from 0x%lx with 0x%lx",
1196 RX,
1197 cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1198 memops++;
1199 break;
1200 case 0x90: case 0x91: case 0x92: case 0x93:
1201 case 0x94: case 0x95: case 0x96: case 0x97:
1202 case 0x98: case 0x99: case 0x9A: case 0x9B:
1203 case 0x9C: case 0x9D: case 0x9E: case 0x9F: /* st */
1204 wlat (cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1205 if (tracing)
1206 fprintf (stderr, "store reg %d (containing 0x%lx) to 0x%lx",
1207 RX, cpu.gr[RX],
1208 cpu.gr[RD] + ((inst >> 2) & 0x003C));
1209 memops++;
1210 break;
1211 case 0xA0: case 0xA1: case 0xA2: case 0xA3:
1212 case 0xA4: case 0xA5: case 0xA6: case 0xA7:
1213 case 0xA8: case 0xA9: case 0xAA: case 0xAB:
1214 case 0xAC: case 0xAD: case 0xAE: case 0xAF: /* ld.b */
1215 cpu.gr[RX] = rbat (cpu.gr[RD] + RS);
1216 memops++;
1217 break;
1218 case 0xB0: case 0xB1: case 0xB2: case 0xB3:
1219 case 0xB4: case 0xB5: case 0xB6: case 0xB7:
1220 case 0xB8: case 0xB9: case 0xBA: case 0xBB:
1221 case 0xBC: case 0xBD: case 0xBE: case 0xBF: /* st.b */
1222 wbat (cpu.gr[RD] + RS, cpu.gr[RX]);
1223 memops++;
1224 break;
1225 case 0xC0: case 0xC1: case 0xC2: case 0xC3:
1226 case 0xC4: case 0xC5: case 0xC6: case 0xC7:
1227 case 0xC8: case 0xC9: case 0xCA: case 0xCB:
1228 case 0xCC: case 0xCD: case 0xCE: case 0xCF: /* ld.h */
1229 cpu.gr[RX] = rhat (cpu.gr[RD] + ((inst >> 3) & 0x001E));
1230 memops++;
1231 break;
1232 case 0xD0: case 0xD1: case 0xD2: case 0xD3:
1233 case 0xD4: case 0xD5: case 0xD6: case 0xD7:
1234 case 0xD8: case 0xD9: case 0xDA: case 0xDB:
1235 case 0xDC: case 0xDD: case 0xDE: case 0xDF: /* st.h */
1236 what (cpu.gr[RD] + ((inst >> 3) & 0x001E), cpu.gr[RX]);
1237 memops++;
1238 break;
1239 case 0xE8: case 0xE9: case 0xEA: case 0xEB:
1240 case 0xEC: case 0xED: case 0xEE: case 0xEF: /* bf */
1241 if (C_OFF())
1242 {
1243 int disp;
1244 disp = inst & 0x03FF;
1245 if (inst & 0x0400)
1246 disp |= 0xFFFFFC00;
1247 pc += disp<<1;
1248 bonus_cycles++;
1249 needfetch = 1;
1250 }
1251 break;
1252 case 0xE0: case 0xE1: case 0xE2: case 0xE3:
1253 case 0xE4: case 0xE5: case 0xE6: case 0xE7: /* bt */
1254 if (C_ON())
1255 {
1256 int disp;
1257 disp = inst & 0x03FF;
1258 if (inst & 0x0400)
1259 disp |= 0xFFFFFC00;
1260 pc += disp<<1;
1261 bonus_cycles++;
1262 needfetch = 1;
1263 }
1264 break;
1265
1266 case 0xF8: case 0xF9: case 0xFA: case 0xFB:
1267 case 0xFC: case 0xFD: case 0xFE: case 0xFF: /* bsr */
1268 cpu.gr[15] = pc;
1269 case 0xF0: case 0xF1: case 0xF2: case 0xF3:
1270 case 0xF4: case 0xF5: case 0xF6: case 0xF7: /* br */
1271 {
1272 int disp;
1273 disp = inst & 0x03FF;
1274 if (inst & 0x0400)
1275 disp |= 0xFFFFFC00;
1276 pc += disp<<1;
1277 bonus_cycles++;
1278 needfetch = 1;
1279 }
1280 break;
1281
1282 }
1283
1284 if (tracing)
1285 fprintf (stderr, "\n");
1286
1287 if (needfetch)
1288 {
1289 ibuf = rlat (pc & 0xFFFFFFFC);
1290 needfetch = 0;
1291 }
1292 }
1293 while (!cpu.asregs.exception);
1294
1295 /* Hide away the things we've cached while executing. */
1296 CPU_PC_SET (scpu, pc);
1297 cpu.asregs.insts += insts; /* instructions done ... */
1298 cpu.asregs.cycles += insts; /* and each takes a cycle */
1299 cpu.asregs.cycles += bonus_cycles; /* and extra cycles for branches */
1300 cpu.asregs.cycles += memops * memcycles; /* and memop cycle delays */
1301 }
1302
1303 int
1304 sim_store_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1305 {
1306 if (rn < NUM_MCORE_REGS && rn >= 0)
1307 {
1308 if (length == 4)
1309 {
1310 long ival;
1311
1312 /* misalignment safe */
1313 ival = mcore_extract_unsigned_integer (memory, 4);
1314 cpu.asints[rn] = ival;
1315 }
1316
1317 return 4;
1318 }
1319 else
1320 return 0;
1321 }
1322
1323 int
1324 sim_fetch_register (SIM_DESC sd, int rn, unsigned char *memory, int length)
1325 {
1326 if (rn < NUM_MCORE_REGS && rn >= 0)
1327 {
1328 if (length == 4)
1329 {
1330 long ival = cpu.asints[rn];
1331
1332 /* misalignment-safe */
1333 mcore_store_unsigned_integer (memory, 4, ival);
1334 }
1335
1336 return 4;
1337 }
1338 else
1339 return 0;
1340 }
1341
1342 void
1343 sim_stop_reason (SIM_DESC sd, enum sim_stop *reason, int *sigrc)
1344 {
1345 if (cpu.asregs.exception == SIGQUIT)
1346 {
1347 * reason = sim_exited;
1348 * sigrc = cpu.gr[PARM1];
1349 }
1350 else
1351 {
1352 * reason = sim_stopped;
1353 * sigrc = cpu.asregs.exception;
1354 }
1355 }
1356
1357 void
1358 sim_info (SIM_DESC sd, int verbose)
1359 {
1360 #ifdef WATCHFUNCTIONS
1361 int w, wcyc;
1362 #endif
1363 double virttime = cpu.asregs.cycles / 36.0e6;
1364 host_callback *callback = STATE_CALLBACK (sd);
1365
1366 callback->printf_filtered (callback, "\n\n# instructions executed %10d\n",
1367 cpu.asregs.insts);
1368 callback->printf_filtered (callback, "# cycles %10d\n",
1369 cpu.asregs.cycles);
1370 callback->printf_filtered (callback, "# pipeline stalls %10d\n",
1371 cpu.asregs.stalls);
1372 callback->printf_filtered (callback, "# virtual time taken %10.4f\n",
1373 virttime);
1374
1375 #ifdef WATCHFUNCTIONS
1376 callback->printf_filtered (callback, "\nNumber of watched functions: %d\n",
1377 ENDWL);
1378
1379 wcyc = 0;
1380
1381 for (w = 1; w <= ENDWL; w++)
1382 {
1383 callback->printf_filtered (callback, "WL = %s %8x\n",WLstr[w],WL[w]);
1384 callback->printf_filtered (callback, " calls = %d, cycles = %d\n",
1385 WLcnts[w],WLcyc[w]);
1386
1387 if (WLcnts[w] != 0)
1388 callback->printf_filtered (callback,
1389 " maxcpc = %d, mincpc = %d, avecpc = %d\n",
1390 WLmax[w],WLmin[w],WLcyc[w]/WLcnts[w]);
1391 wcyc += WLcyc[w];
1392 }
1393
1394 callback->printf_filtered (callback,
1395 "Total cycles for watched functions: %d\n",wcyc);
1396 #endif
1397 }
1398
1399 static sim_cia
1400 mcore_pc_get (sim_cpu *cpu)
1401 {
1402 return cpu->pc;
1403 }
1404
1405 static void
1406 mcore_pc_set (sim_cpu *cpu, sim_cia pc)
1407 {
1408 cpu->pc = pc;
1409 }
1410
1411 static void
1412 free_state (SIM_DESC sd)
1413 {
1414 if (STATE_MODULES (sd) != NULL)
1415 sim_module_uninstall (sd);
1416 sim_cpu_free_all (sd);
1417 sim_state_free (sd);
1418 }
1419
1420 SIM_DESC
1421 sim_open (SIM_OPEN_KIND kind, host_callback *cb, struct bfd *abfd, char **argv)
1422 {
1423 int i;
1424 SIM_DESC sd = sim_state_alloc (kind, cb);
1425 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
1426
1427 /* The cpu data is kept in a separately allocated chunk of memory. */
1428 if (sim_cpu_alloc_all (sd, 1, /*cgen_cpu_max_extra_bytes ()*/0) != SIM_RC_OK)
1429 {
1430 free_state (sd);
1431 return 0;
1432 }
1433
1434 if (sim_pre_argv_init (sd, argv[0]) != SIM_RC_OK)
1435 {
1436 free_state (sd);
1437 return 0;
1438 }
1439
1440 /* getopt will print the error message so we just have to exit if this fails.
1441 FIXME: Hmmm... in the case of gdb we need getopt to call
1442 print_filtered. */
1443 if (sim_parse_args (sd, argv) != SIM_RC_OK)
1444 {
1445 free_state (sd);
1446 return 0;
1447 }
1448
1449 /* Check for/establish the a reference program image. */
1450 if (sim_analyze_program (sd,
1451 (STATE_PROG_ARGV (sd) != NULL
1452 ? *STATE_PROG_ARGV (sd)
1453 : NULL), abfd) != SIM_RC_OK)
1454 {
1455 free_state (sd);
1456 return 0;
1457 }
1458
1459 /* Configure/verify the target byte order and other runtime
1460 configuration options. */
1461 if (sim_config (sd) != SIM_RC_OK)
1462 {
1463 sim_module_uninstall (sd);
1464 return 0;
1465 }
1466
1467 if (sim_post_argv_init (sd) != SIM_RC_OK)
1468 {
1469 /* Uninstall the modules to avoid memory leaks,
1470 file descriptor leaks, etc. */
1471 sim_module_uninstall (sd);
1472 return 0;
1473 }
1474
1475 /* CPU specific initialization. */
1476 for (i = 0; i < MAX_NR_PROCESSORS; ++i)
1477 {
1478 SIM_CPU *cpu = STATE_CPU (sd, i);
1479
1480 CPU_PC_FETCH (cpu) = mcore_pc_get;
1481 CPU_PC_STORE (cpu) = mcore_pc_set;
1482
1483 set_initial_gprs (cpu); /* Reset the GPR registers. */
1484 }
1485
1486 /* Default to a 8 Mbyte (== 2^23) memory space. */
1487 sim_do_commandf (sd, "memory-size %#x", DEFAULT_MEMORY_SIZE);
1488
1489 return sd;
1490 }
1491
1492 void
1493 sim_close (SIM_DESC sd, int quitting)
1494 {
1495 /* nothing to do */
1496 }
1497
1498 SIM_RC
1499 sim_create_inferior (SIM_DESC sd, struct bfd *prog_bfd, char **argv, char **env)
1500 {
1501 SIM_CPU *scpu = STATE_CPU (sd, 0);
1502 char ** avp;
1503 int nargs = 0;
1504 int nenv = 0;
1505 int s_length;
1506 int l;
1507 unsigned long strings;
1508 unsigned long pointers;
1509 unsigned long hi_stack;
1510
1511
1512 /* Set the initial register set. */
1513 set_initial_gprs (scpu);
1514
1515 hi_stack = DEFAULT_MEMORY_SIZE - 4;
1516 CPU_PC_SET (scpu, bfd_get_start_address (prog_bfd));
1517
1518 /* Calculate the argument and environment strings. */
1519 s_length = 0;
1520 nargs = 0;
1521 avp = argv;
1522 while (avp && *avp)
1523 {
1524 l = strlen (*avp) + 1; /* include the null */
1525 s_length += (l + 3) & ~3; /* make it a 4 byte boundary */
1526 nargs++; avp++;
1527 }
1528
1529 nenv = 0;
1530 avp = env;
1531 while (avp && *avp)
1532 {
1533 l = strlen (*avp) + 1; /* include the null */
1534 s_length += (l + 3) & ~ 3;/* make it a 4 byte boundary */
1535 nenv++; avp++;
1536 }
1537
1538 /* Claim some memory for the pointers and strings. */
1539 pointers = hi_stack - sizeof(word) * (nenv+1+nargs+1);
1540 pointers &= ~3; /* must be 4-byte aligned */
1541 cpu.gr[0] = pointers;
1542
1543 strings = cpu.gr[0] - s_length;
1544 strings &= ~3; /* want to make it 4-byte aligned */
1545 cpu.gr[0] = strings;
1546 /* dac fix, the stack address must be 8-byte aligned! */
1547 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
1548
1549 /* Loop through the arguments and fill them in. */
1550 cpu.gr[PARM1] = nargs;
1551 if (nargs == 0)
1552 {
1553 /* No strings to fill in. */
1554 cpu.gr[PARM2] = 0;
1555 }
1556 else
1557 {
1558 cpu.gr[PARM2] = pointers;
1559 avp = argv;
1560 while (avp && *avp)
1561 {
1562 /* Save where we're putting it. */
1563 wlat (pointers, strings);
1564
1565 /* Copy the string. */
1566 l = strlen (* avp) + 1;
1567 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1568
1569 /* Bump the pointers. */
1570 avp++;
1571 pointers += 4;
1572 strings += l+1;
1573 }
1574
1575 /* A null to finish the list. */
1576 wlat (pointers, 0);
1577 pointers += 4;
1578 }
1579
1580 /* Now do the environment pointers. */
1581 if (nenv == 0)
1582 {
1583 /* No strings to fill in. */
1584 cpu.gr[PARM3] = 0;
1585 }
1586 else
1587 {
1588 cpu.gr[PARM3] = pointers;
1589 avp = env;
1590
1591 while (avp && *avp)
1592 {
1593 /* Save where we're putting it. */
1594 wlat (pointers, strings);
1595
1596 /* Copy the string. */
1597 l = strlen (* avp) + 1;
1598 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1599
1600 /* Bump the pointers. */
1601 avp++;
1602 pointers += 4;
1603 strings += l+1;
1604 }
1605
1606 /* A null to finish the list. */
1607 wlat (pointers, 0);
1608 pointers += 4;
1609 }
1610
1611 return SIM_RC_OK;
1612 }
This page took 0.060678 seconds and 3 git commands to generate.