e89385f845374e7e326b24af82ce3e34906b4426
[deliverable/binutils-gdb.git] / sim / mips / sky-pke.c
1 /* Copyright (C) 1998, Cygnus Solutions */
2
3
4 #include "config.h"
5
6 #include <stdlib.h>
7 #include "sim-main.h"
8 #include "sim-bits.h"
9 #include "sim-assert.h"
10 #include "sky-pke.h"
11 #include "sky-dma.h"
12 #include "sky-vu.h"
13 #include "sky-gpuif.h"
14 #include "sky-gdb.h"
15 #include "sky-device.h"
16
17
18 #ifdef HAVE_STRING_H
19 #include <string.h>
20 #else
21 #ifdef HAVE_STRINGS_H
22 #include <strings.h>
23 #endif
24 #endif
25
26
27 /* Internal function declarations */
28
29 static int pke_io_read_buffer(device*, void*, int, address_word,
30 unsigned, sim_cpu*, sim_cia);
31 static int pke_io_write_buffer(device*, const void*, int, address_word,
32 unsigned, sim_cpu*, sim_cia);
33 static void pke_reset(struct pke_device*);
34 static void pke_issue(SIM_DESC, struct pke_device*);
35 static void pke_pc_advance(struct pke_device*, int num_words);
36 static struct fifo_quadword* pke_pcrel_fifo(struct pke_device*, int operand_num,
37 unsigned_4** operand);
38 static unsigned_4* pke_pcrel_operand(struct pke_device*, int operand_num);
39 static unsigned_4 pke_pcrel_operand_bits(struct pke_device*, int bit_offset,
40 int bit_width, unsigned_4* sourceaddr);
41 static void pke_attach(SIM_DESC sd, struct pke_device* me);
42 enum pke_check_target { chk_vu, chk_path1, chk_path2, chk_path3 };
43 static int pke_check_stall(struct pke_device* me, enum pke_check_target what);
44 static void pke_flip_dbf(struct pke_device* me);
45 static void pke_begin_interrupt_stall(struct pke_device* me);
46 /* PKEcode handlers */
47 static void pke_code_nop(struct pke_device* me, unsigned_4 pkecode);
48 static void pke_code_stcycl(struct pke_device* me, unsigned_4 pkecode);
49 static void pke_code_offset(struct pke_device* me, unsigned_4 pkecode);
50 static void pke_code_base(struct pke_device* me, unsigned_4 pkecode);
51 static void pke_code_itop(struct pke_device* me, unsigned_4 pkecode);
52 static void pke_code_stmod(struct pke_device* me, unsigned_4 pkecode);
53 static void pke_code_mskpath3(struct pke_device* me, unsigned_4 pkecode);
54 static void pke_code_pkemark(struct pke_device* me, unsigned_4 pkecode);
55 static void pke_code_flushe(struct pke_device* me, unsigned_4 pkecode);
56 static void pke_code_flush(struct pke_device* me, unsigned_4 pkecode);
57 static void pke_code_flusha(struct pke_device* me, unsigned_4 pkecode);
58 static void pke_code_pkemscal(struct pke_device* me, unsigned_4 pkecode);
59 static void pke_code_pkemscnt(struct pke_device* me, unsigned_4 pkecode);
60 static void pke_code_pkemscalf(struct pke_device* me, unsigned_4 pkecode);
61 static void pke_code_stmask(struct pke_device* me, unsigned_4 pkecode);
62 static void pke_code_strow(struct pke_device* me, unsigned_4 pkecode);
63 static void pke_code_stcol(struct pke_device* me, unsigned_4 pkecode);
64 static void pke_code_mpg(struct pke_device* me, unsigned_4 pkecode);
65 static void pke_code_direct(struct pke_device* me, unsigned_4 pkecode);
66 static void pke_code_directhl(struct pke_device* me, unsigned_4 pkecode);
67 static void pke_code_unpack(struct pke_device* me, unsigned_4 pkecode);
68 static void pke_code_error(struct pke_device* me, unsigned_4 pkecode);
69 unsigned_4 pke_fifo_flush(struct pke_fifo*);
70 void pke_fifo_reset(struct pke_fifo*);
71 struct fifo_quadword* pke_fifo_fit(struct pke_fifo*);
72 struct fifo_quadword* pke_fifo_access(struct pke_fifo*, unsigned_4 qwnum);
73 void pke_fifo_old(struct pke_fifo*, unsigned_4 qwnum);
74
75 /* Default --log-file names */
76 const char *pke0_default_trace = "vif0.s";
77 const char *pke1_default_trace = "vif1.s";
78
79
80 /* Static data */
81
82 struct pke_device pke0_device =
83 {
84 { "vif0", &pke_io_read_buffer, &pke_io_write_buffer }, /* device */
85 0, 0, /* ID, flags */
86 {}, /* regs */
87 {}, 0, /* FIFO write buffer */
88 { NULL, 0, 0, 0 }, /* FIFO */
89 NULL, NULL, /* FIFO trace file descriptor and name */
90 -1, -1, 0, 0, 0, /* invalid FIFO cache */
91 0, 0, /* pc */
92 NULL, NULL /* disassembly trace file descriptor and name */
93 };
94
95
96 struct pke_device pke1_device =
97 {
98 { "vif1", &pke_io_read_buffer, &pke_io_write_buffer }, /* device */
99 1, 0, /* ID, flags */
100 {}, /* regs */
101 {}, 0, /* FIFO write buffer */
102 { NULL, 0, 0, 0 }, /* FIFO */
103 NULL, NULL, /* FIFO trace file descriptor and name */
104 -1, -1, 0, 0, 0, /* invalid FIFO cache */
105 0, 0, /* pc */
106 NULL, NULL /* disassembly trace file descriptor and name */
107 };
108
109
110
111 /* External functions */
112
113
114 /* Attach PKE addresses to main memory */
115
116 void
117 pke0_attach(SIM_DESC sd)
118 {
119 pke_attach(sd, & pke0_device);
120 pke_reset(& pke0_device);
121 }
122
123 void
124 pke1_attach(SIM_DESC sd)
125 {
126 pke_attach(sd, & pke1_device);
127 pke_reset(& pke1_device);
128 }
129
130
131
132 /* Issue a PKE instruction if possible */
133
134 void
135 pke0_issue(SIM_DESC sd)
136 {
137 pke_issue(sd, & pke0_device);
138 }
139
140 void
141 pke1_issue(SIM_DESC sd)
142 {
143 pke_issue(sd, & pke1_device);
144 }
145
146
147
148 /* Internal functions */
149
150
151 /* Attach PKE memory regions to simulator */
152
153 void
154 pke_attach(SIM_DESC sd, struct pke_device* me)
155 {
156 /* register file */
157 sim_core_attach (sd, NULL, 0, access_read_write, 0,
158 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START,
159 PKE_REGISTER_WINDOW_SIZE /*nr_bytes*/,
160 0 /*modulo*/,
161 (device*) me,
162 NULL /*buffer*/);
163
164 /* FIFO port */
165 sim_core_attach (sd, NULL, 0, access_read_write, 0,
166 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR,
167 sizeof(quadword) /*nr_bytes*/,
168 0 /*modulo*/,
169 (device*) me,
170 NULL /*buffer*/);
171
172 /* VU MEM0 tracking table */
173 sim_core_attach (sd, NULL, 0, access_read_write, 0,
174 ((me->pke_number == 0) ? VU0_MEM0_SRCADDR_START : VU1_MEM0_SRCADDR_START),
175 ((me->pke_number == 0) ? VU0_MEM0_SIZE : VU1_MEM0_SIZE) / 2,
176 0 /*modulo*/,
177 NULL,
178 NULL /*buffer*/);
179
180 /* VU MEM1 tracking table */
181 sim_core_attach (sd, NULL, 0, access_read_write, 0,
182 ((me->pke_number == 0) ? VU0_MEM1_SRCADDR_START : VU1_MEM1_SRCADDR_START),
183 ((me->pke_number == 0) ? VU0_MEM1_SIZE : VU1_MEM1_SIZE) / 4,
184 0 /*modulo*/,
185 NULL,
186 NULL /*buffer*/);
187 }
188
189
190 /* Read PKE Pseudo-PC index into buf in target order */
191 int
192 read_pke_pcx (struct pke_device *me, void *buf)
193 {
194 *((int *) buf) = H2T_4( (me->fifo_pc << 2) | me->qw_pc );
195 return 4;
196 }
197
198
199 /* Read PKE Pseudo-PC source address into buf in target order */
200 int
201 read_pke_pc (struct pke_device *me, void *buf)
202 {
203 struct fifo_quadword* fqw = pke_fifo_access(& me->fifo, me->fifo_pc);
204 unsigned_4 addr;
205
206 if (fqw == NULL)
207 *((int *) buf) = 0;
208 else
209 {
210 addr = (fqw->source_address & ~15) | (me->qw_pc << 2);
211 *((unsigned_4 *) buf) = H2T_4( addr );
212 }
213
214 return 4;
215 }
216
217
218 /* Read PKE reg into buf in target order */
219 int
220 read_pke_reg (struct pke_device *me, int reg_num, void *buf)
221 {
222 /* handle reads to individual registers; clear `readable' on error */
223 switch (reg_num)
224 {
225 /* handle common case of register reading, side-effect free */
226 /* PKE1-only registers*/
227 case PKE_REG_BASE:
228 case PKE_REG_OFST:
229 case PKE_REG_TOPS:
230 case PKE_REG_TOP:
231 case PKE_REG_DBF:
232 if (me->pke_number == 0)
233 {
234 *((int *) buf) = 0;
235 break;
236 }
237 /* fall through */
238
239 /* PKE0 & PKE1 common registers*/
240 case PKE_REG_STAT:
241 case PKE_REG_ERR:
242 case PKE_REG_MARK:
243 case PKE_REG_CYCLE:
244 case PKE_REG_MODE:
245 case PKE_REG_NUM:
246 case PKE_REG_MASK:
247 case PKE_REG_CODE:
248 case PKE_REG_ITOPS:
249 case PKE_REG_ITOP:
250 case PKE_REG_R0:
251 case PKE_REG_R1:
252 case PKE_REG_R2:
253 case PKE_REG_R3:
254 case PKE_REG_C0:
255 case PKE_REG_C1:
256 case PKE_REG_C2:
257 case PKE_REG_C3:
258 *((int *) buf) = H2T_4(me->regs[reg_num][0]);
259 break;
260
261 /* handle common case of write-only registers */
262 case PKE_REG_FBRST:
263 *((int *) buf) = 0;
264 break;
265
266 default:
267 ASSERT(0); /* tests above should prevent this possibility */
268 }
269
270 return 4;
271 }
272
273
274 /* Handle a PKE read; return no. of bytes read */
275
276 int
277 pke_io_read_buffer(device *me_,
278 void *dest,
279 int space,
280 address_word addr,
281 unsigned nr_bytes,
282 sim_cpu *cpu,
283 sim_cia cia)
284 {
285 /* downcast to gather embedding pke_device struct */
286 struct pke_device* me = (struct pke_device*) me_;
287
288 /* find my address ranges */
289 address_word my_reg_start =
290 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START;
291 address_word my_fifo_addr =
292 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR;
293
294 /* enforce that an access does not span more than one quadword */
295 address_word low = ADDR_TRUNC_QW(addr);
296 address_word high = ADDR_TRUNC_QW(addr + nr_bytes - 1);
297 if(low != high)
298 return 0;
299
300 /* classify address & handle */
301 if((addr >= my_reg_start) && (addr < my_reg_start + PKE_REGISTER_WINDOW_SIZE))
302 {
303 /* register bank */
304 int reg_num = ADDR_TRUNC_QW(addr - my_reg_start) >> 4;
305 int reg_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside register bank */
306 quadword result;
307
308 /* clear result */
309 result[0] = result[1] = result[2] = result[3] = 0;
310
311 read_pke_reg (me, reg_num, result);
312
313 /* perform transfer & return */
314 memcpy(dest, ((unsigned_1*) &result) + reg_byte, nr_bytes);
315
316 return nr_bytes;
317 /* NOTREACHED */
318 }
319 else if(addr >= my_fifo_addr &&
320 addr < my_fifo_addr + sizeof(quadword))
321 {
322 /* FIFO */
323
324 /* FIFO is not readable: return a word of zeroes */
325 memset(dest, 0, nr_bytes);
326 return nr_bytes;
327 }
328
329 /* NOTREACHED */
330 return 0;
331 }
332
333 /* Write PKE reg from buf, which is in target order */
334 int
335 write_pke_reg (struct pke_device *me, int reg_num, const void *buf)
336 {
337 int writeable = 1;
338 /* make words host-endian */
339 unsigned_4 input = T2H_4( *((unsigned_4 *) buf) );
340
341 /* handle writes to individual registers; clear `writeable' on error */
342 switch (reg_num)
343 {
344 case PKE_REG_FBRST:
345 /* Order these tests from least to most overriding, in case
346 multiple bits are set. */
347 if(BIT_MASK_GET(input, PKE_REG_FBRST_STC_B, PKE_REG_FBRST_STC_E))
348 {
349 /* clear a bunch of status bits */
350 PKE_REG_MASK_SET(me, STAT, PSS, 0);
351 PKE_REG_MASK_SET(me, STAT, PFS, 0);
352 PKE_REG_MASK_SET(me, STAT, PIS, 0);
353 PKE_REG_MASK_SET(me, STAT, INT, 0);
354 PKE_REG_MASK_SET(me, STAT, ER0, 0);
355 PKE_REG_MASK_SET(me, STAT, ER1, 0);
356 me->flags &= ~PKE_FLAG_PENDING_PSS;
357 /* will allow resumption of possible stalled instruction */
358 }
359 if(BIT_MASK_GET(input, PKE_REG_FBRST_STP_B, PKE_REG_FBRST_STP_E))
360 {
361 me->flags |= PKE_FLAG_PENDING_PSS;
362 }
363 if(BIT_MASK_GET(input, PKE_REG_FBRST_FBK_B, PKE_REG_FBRST_FBK_E))
364 {
365 PKE_REG_MASK_SET(me, STAT, PFS, 1);
366 }
367 if(BIT_MASK_GET(input, PKE_REG_FBRST_RST_B, PKE_REG_FBRST_RST_E))
368 {
369 pke_reset(me);
370 }
371 break;
372
373 case PKE_REG_ERR:
374 /* copy bottom three bits */
375 BIT_MASK_SET(me->regs[PKE_REG_ERR][0], 0, 2, BIT_MASK_GET(input, 0, 2));
376 break;
377
378 case PKE_REG_MARK:
379 /* copy bottom sixteen bits */
380 PKE_REG_MASK_SET(me, MARK, MARK, BIT_MASK_GET(input, 0, 15));
381 /* reset MRK bit in STAT */
382 PKE_REG_MASK_SET(me, STAT, MRK, 0);
383 break;
384
385 /* handle common case of read-only registers */
386 /* PKE1-only registers - not really necessary to handle separately */
387 case PKE_REG_BASE:
388 case PKE_REG_OFST:
389 case PKE_REG_TOPS:
390 case PKE_REG_TOP:
391 case PKE_REG_DBF:
392 if(me->pke_number == 0)
393 writeable = 0;
394 /* fall through */
395 /* PKE0 & PKE1 common registers*/
396 case PKE_REG_STAT:
397 /* ignore FDR bit for PKE1_STAT -- simulator does not implement PKE->RAM transfers */
398 case PKE_REG_CYCLE:
399 case PKE_REG_MODE:
400 case PKE_REG_NUM:
401 case PKE_REG_MASK:
402 case PKE_REG_CODE:
403 case PKE_REG_ITOPS:
404 case PKE_REG_ITOP:
405 case PKE_REG_R0:
406 case PKE_REG_R1:
407 case PKE_REG_R2:
408 case PKE_REG_R3:
409 case PKE_REG_C0:
410 case PKE_REG_C1:
411 case PKE_REG_C2:
412 case PKE_REG_C3:
413 writeable = 0;
414 break;
415
416 default:
417 ASSERT(0); /* test above should prevent this possibility */
418 }
419
420 /* perform return */
421 if(! writeable)
422 {
423 return 0; /* error */
424 }
425
426 return 4;
427 }
428
429
430 /* Handle a PKE write; return no. of bytes written */
431
432 int
433 pke_io_write_buffer(device *me_,
434 const void *src,
435 int space,
436 address_word addr,
437 unsigned nr_bytes,
438 sim_cpu *cpu,
439 sim_cia cia)
440 {
441 /* downcast to gather embedding pke_device struct */
442 struct pke_device* me = (struct pke_device*) me_;
443
444 /* find my address ranges */
445 address_word my_reg_start =
446 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START;
447 address_word my_fifo_addr =
448 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR;
449
450 /* enforce that an access does not span more than one quadword */
451 address_word low = ADDR_TRUNC_QW(addr);
452 address_word high = ADDR_TRUNC_QW(addr + nr_bytes - 1);
453 if(low != high)
454 return 0;
455
456 /* classify address & handle */
457 if((addr >= my_reg_start) && (addr < my_reg_start + PKE_REGISTER_WINDOW_SIZE))
458 {
459 /* register bank */
460 int reg_num = ADDR_TRUNC_QW(addr - my_reg_start) >> 4;
461 int reg_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside register bank */
462 quadword input;
463
464 /* clear input */
465 input[0] = input[1] = input[2] = input[3] = 0;
466
467 /* write user-given bytes into input */
468 memcpy(((unsigned_1*) &input) + reg_byte, src, nr_bytes);
469
470 write_pke_reg (me, reg_num, input);
471 return nr_bytes;
472
473 /* NOTREACHED */
474 }
475 else if(addr >= my_fifo_addr &&
476 addr < my_fifo_addr + sizeof(quadword))
477 {
478 /* FIFO */
479 struct fifo_quadword* fqw;
480 int fifo_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside fifo quadword */
481 unsigned_4 dma_tag_present = 0;
482 int i;
483
484 /* collect potentially-partial quadword in write buffer; LE byte order */
485 memcpy(((unsigned_1*)& me->fifo_qw_in_progress) + fifo_byte, src, nr_bytes);
486 /* mark bytes written */
487 for(i = fifo_byte; i < fifo_byte + nr_bytes; i++)
488 BIT_MASK_SET(me->fifo_qw_done, i, i, 1);
489
490 /* return if quadword not quite written yet */
491 if(BIT_MASK_GET(me->fifo_qw_done, 0, sizeof(quadword)-1) !=
492 BIT_MASK_BTW(0, sizeof(quadword)-1))
493 return nr_bytes;
494
495 /* all done - process quadword after clearing flag */
496 BIT_MASK_SET(me->fifo_qw_done, 0, sizeof(quadword)-1, 0);
497
498 /* allocate required address in FIFO */
499 fqw = pke_fifo_fit(& me->fifo);
500 ASSERT(fqw != NULL);
501
502 /* fill in unclassified FIFO quadword data in host byte order */
503 fqw->word_class[0] = fqw->word_class[1] =
504 fqw->word_class[2] = fqw->word_class[3] = wc_unknown;
505 fqw->data[0] = T2H_4(me->fifo_qw_in_progress[0]);
506 fqw->data[1] = T2H_4(me->fifo_qw_in_progress[1]);
507 fqw->data[2] = T2H_4(me->fifo_qw_in_progress[2]);
508 fqw->data[3] = T2H_4(me->fifo_qw_in_progress[3]);
509
510 /* read DMAC-supplied indicators */
511 ASSERT(sizeof(unsigned_4) == 4);
512 PKE_MEM_READ(me, (me->pke_number == 0 ? DMA_D0_MADR : DMA_D1_MADR),
513 & fqw->source_address, /* converted to host-endian */
514 4);
515 PKE_MEM_READ(me, (me->pke_number == 0 ? DMA_D0_PKTFLAG : DMA_D1_PKTFLAG),
516 & dma_tag_present,
517 4);
518
519 if(dma_tag_present)
520 {
521 /* lower two words are DMA tags */
522 fqw->word_class[0] = fqw->word_class[1] = wc_dma;
523 }
524
525 /* set FQC to "1" as FIFO is now not empty */
526 PKE_REG_MASK_SET(me, STAT, FQC, 1);
527
528 /* okay */
529 return nr_bytes;
530 }
531
532 /* NOTREACHED */
533 return 0;
534 }
535
536
537
538 /* Reset the simulated PKE hardware state. Preserve other internal
539 state. */
540 void
541 pke_reset(struct pke_device* me)
542 {
543 /* advance PC over last quadword in FIFO; keep previous FIFO history */
544 me->fifo_pc = pke_fifo_flush(& me->fifo);
545 me->qw_pc = 0;
546 /* clear registers, flag, other state */
547 memset(me->regs, 0, sizeof(me->regs));
548 me->fifo_qw_done = 0;
549 /* Command options will remain alive over the reset. */
550 me->flags &= PKE_FLAG_TRACE_ON;
551
552 /* NOTE: Since disassembly / trace logs remain open across ordinary
553 simulated hardware resets, there may be a problem of producing a
554 trace file that has only partial results from the prior
555 operation. For the current PKE model, however, this cannot
556 happen as stalls & interrupts only occur *between* simulated
557 PKEcode executions. This means that our trace files ought remain
558 syntactically valid, despite resets. */
559
560 if(me->trace_file != NULL)
561 {
562 fprintf(me->trace_file,
563 "\n;%s RESET\n",
564 me->dev.name);
565 }
566 }
567
568
569
570 /* Issue & swallow next PKE opcode if possible/available */
571
572 void
573 pke_issue(SIM_DESC sd, struct pke_device* me)
574 {
575 struct fifo_quadword* fqw;
576 unsigned_4 fw;
577 unsigned_4 cmd, intr;
578
579 /* 1 -- fetch PKE instruction */
580
581 /* confirm availability of new quadword of PKE instructions */
582 fqw = pke_fifo_access(& me->fifo, me->fifo_pc);
583 if(fqw == NULL)
584 return;
585
586 /* skip over DMA tag, if present */
587 pke_pc_advance(me, 0);
588 /* note: this can only change qw_pc from 0 to 2 and will not
589 invalidate fqw */
590
591 /* "fetch" instruction quadword and word */
592 fw = fqw->data[me->qw_pc];
593
594 /* store word in PKECODE register */
595 me->regs[PKE_REG_CODE][0] = fw;
596
597
598 /* 2 -- test go / no-go for PKE execution */
599
600 /* switch on STAT:PSS if PSS-pending and in idle state */
601 if((PKE_REG_MASK_GET(me, STAT, PPS) == PKE_REG_STAT_PPS_IDLE) &&
602 (me->flags & PKE_FLAG_PENDING_PSS) != 0)
603 {
604 me->flags &= ~PKE_FLAG_PENDING_PSS;
605 PKE_REG_MASK_SET(me, STAT, PSS, 1);
606 }
607
608 /* check for stall/halt control bits */
609 if(PKE_REG_MASK_GET(me, STAT, PFS) ||
610 PKE_REG_MASK_GET(me, STAT, PSS) || /* note special treatment below */
611 /* PEW bit not a reason to keep stalling - it's just an indication, re-computed below */
612 /* PGW bit not a reason to keep stalling - it's just an indication, re-computed below */
613 /* ER0/ER1 not a reason to keep stalling - it's just an indication */
614 PKE_REG_MASK_GET(me, STAT, PIS))
615 {
616 /* (still) stalled */
617 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
618 /* try again next cycle */
619 return;
620 }
621
622
623 /* 3 -- decode PKE instruction */
624
625 /* decoding */
626 if(PKE_REG_MASK_GET(me, STAT, PPS) == PKE_REG_STAT_PPS_IDLE)
627 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_DECODE);
628
629 /* Extract relevant bits from PKEcode */
630 intr = BIT_MASK_GET(fw, PKE_OPCODE_I_B, PKE_OPCODE_I_E);
631 cmd = BIT_MASK_GET(fw, PKE_OPCODE_CMD_B, PKE_OPCODE_CMD_E);
632
633 /* handle interrupts */
634 if(intr)
635 {
636 /* are we resuming an interrupt-stalled instruction? */
637 if(me->flags & PKE_FLAG_INT_NOLOOP)
638 {
639 /* clear loop-prevention flag */
640 me->flags &= ~PKE_FLAG_INT_NOLOOP;
641
642 /* fall through to decode & execute */
643 /* The pke_code_* functions should not check the MSB in the
644 pkecode. */
645 }
646 else /* new interrupt-flagged instruction */
647 {
648 /* set INT flag in STAT register */
649 PKE_REG_MASK_SET(me, STAT, INT, 1);
650 /* set loop-prevention flag */
651 me->flags |= PKE_FLAG_INT_NOLOOP;
652
653 /* set PIS if stall not masked */
654 if(!PKE_REG_MASK_GET(me, ERR, MII))
655 pke_begin_interrupt_stall(me);
656
657 /* suspend this instruction unless it's PKEMARK */
658 if(!IS_PKE_CMD(cmd, PKEMARK))
659 {
660 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
661 return;
662 }
663 else
664 {
665 ; /* fall through to decode & execute */
666 }
667 }
668 }
669
670 /* open trace file if necessary */
671 if((me->flags & PKE_FLAG_TRACE_ON) &&
672 (me->trace_file == NULL))
673 {
674 /* use default names */
675 if(me->trace_file_name == NULL)
676 {
677 if(me->pke_number == 0)
678 me->trace_file_name = (char *) pke0_default_trace;
679 else
680 me->trace_file_name = (char *) pke1_default_trace;
681 }
682
683 sky_open_file(& (me->trace_file),
684 me->trace_file_name,
685 (char *) NULL, _IOFBF );
686
687 /* print disassembly header */
688 fprintf(me->trace_file,
689 "\t.global %s_disassembly_tag\n"
690 "%s_disassembly_tag:\n",
691 me->dev.name, me->dev.name);
692 }
693
694 /* decode & execute */
695 /* order tests in decreasing order of frequency */
696 if(IS_PKE_CMD(cmd, PKENOP))
697 pke_code_nop(me, fw);
698 else if(IS_PKE_CMD(cmd, PKEMSCAL))
699 pke_code_pkemscal(me, fw);
700 else if(IS_PKE_CMD(cmd, PKEMSCNT))
701 pke_code_pkemscnt(me, fw);
702 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, PKEMSCALF))
703 pke_code_pkemscalf(me, fw);
704 else if(IS_PKE_CMD(cmd, UNPACK))
705 pke_code_unpack(me, fw);
706 else if(IS_PKE_CMD(cmd, STCYCL))
707 pke_code_stcycl(me, fw);
708 else if(IS_PKE_CMD(cmd, FLUSHE))
709 pke_code_flushe(me, fw);
710 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, FLUSH))
711 pke_code_flush(me, fw);
712 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, FLUSHA))
713 pke_code_flusha(me, fw);
714 else if(IS_PKE_CMD(cmd, DIRECT))
715 pke_code_direct(me, fw);
716 else if(IS_PKE_CMD(cmd, DIRECTHL))
717 pke_code_directhl(me, fw);
718 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, OFFSET))
719 pke_code_offset(me, fw);
720 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, BASE))
721 pke_code_base(me, fw);
722 else if(IS_PKE_CMD(cmd, ITOP))
723 pke_code_itop(me, fw);
724 else if(IS_PKE_CMD(cmd, STMOD))
725 pke_code_stmod(me, fw);
726 else if(IS_PKE_CMD(cmd, PKEMARK))
727 pke_code_pkemark(me, fw);
728 else if(IS_PKE_CMD(cmd, STMASK))
729 pke_code_stmask(me, fw);
730 else if(IS_PKE_CMD(cmd, STROW))
731 pke_code_strow(me, fw);
732 else if(IS_PKE_CMD(cmd, STCOL))
733 pke_code_stcol(me, fw);
734 else if(IS_PKE_CMD(cmd, MPG))
735 pke_code_mpg(me, fw);
736 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, MSKPATH3))
737 pke_code_mskpath3(me, fw);
738 else if(cmd == TXVU_VIF_BRK_MASK)
739 {
740 sim_cpu *cpu = STATE_CPU (sd, 0);
741 unsigned_4 pc_addr = (fqw->source_address & ~15) | (me->qw_pc << 2);
742
743 sim_engine_halt (sd, cpu, NULL, pc_addr, sim_stopped, SIM_SIGTRAP);
744 }
745 /* ... no other commands ... */
746 else
747 pke_code_error(me, fw);
748 }
749
750
751
752 /* Clear out contents of FIFO; act as if it was empty. Return PC
753 pointing to one-past-last word. */
754
755 unsigned_4
756 pke_fifo_flush(struct pke_fifo* fifo)
757 {
758 /* don't modify any state! */
759 return fifo->origin + fifo->next;
760 }
761
762
763
764 /* Clear out contents of FIFO; make it really empty. */
765
766 void
767 pke_fifo_reset(struct pke_fifo* fifo)
768 {
769 int i;
770
771 /* clear fifo quadwords */
772 for(i=0; i<fifo->next; i++)
773 {
774 zfree(fifo->quadwords[i]);
775 fifo->quadwords[i] = NULL;
776 }
777
778 /* reset pointers */
779 fifo->origin = 0;
780 fifo->next = 0;
781 }
782
783
784
785 /* Make space for the next quadword in the FIFO. Allocate/enlarge
786 FIFO pointer block if necessary. Return a pointer to it. */
787
788 struct fifo_quadword*
789 pke_fifo_fit(struct pke_fifo* fifo)
790 {
791 struct fifo_quadword* fqw;
792
793 /* out of space on quadword pointer array? */
794 if(fifo->next == fifo->length) /* also triggered before fifo->quadwords allocated */
795 {
796 struct fifo_quadword** new_qw;
797 unsigned_4 new_length = fifo->length + PKE_FIFO_GROW_SIZE;
798
799 /* allocate new pointer block */
800 new_qw = zalloc(new_length * sizeof(struct fifo_quadword*));
801 ASSERT(new_qw != NULL);
802
803 /* copy over old contents, if any */
804 if(fifo->quadwords != NULL)
805 {
806 /* copy over old pointers to beginning of new block */
807 memcpy(new_qw, fifo->quadwords,
808 fifo->length * sizeof(struct fifo_quadword*));
809
810 /* free old block */
811 zfree(fifo->quadwords);
812 }
813
814 /* replace pointers & counts */
815 fifo->quadwords = new_qw;
816 fifo->length = new_length;
817 }
818
819 /* sanity check */
820 ASSERT(fifo->quadwords != NULL);
821
822 /* allocate new quadword from heap */
823 fqw = zalloc(sizeof(struct fifo_quadword));
824 ASSERT(fqw != NULL);
825
826 /* push quadword onto fifo */
827 fifo->quadwords[fifo->next] = fqw;
828 fifo->next++;
829 return fqw;
830 }
831
832
833
834 /* Return a pointer to the FIFO quadword with given absolute index, or
835 NULL if it is out of range */
836
837 struct fifo_quadword*
838 pke_fifo_access(struct pke_fifo* fifo, unsigned_4 qwnum)
839 {
840 struct fifo_quadword* fqw;
841
842 if((qwnum < fifo->origin) || /* before history */
843 (qwnum >= fifo->origin + fifo->next)) /* after last available quadword */
844 fqw = NULL;
845 else
846 {
847 ASSERT(fifo->quadwords != NULL); /* must be allocated already */
848 fqw = fifo->quadwords[qwnum - fifo->origin]; /* pull out pointer from array */
849 ASSERT(fqw != NULL); /* must be allocated already */
850 }
851
852 return fqw;
853 }
854
855
856 /* Authorize release of any FIFO entries older than given absolute quadword. */
857 void
858 pke_fifo_old(struct pke_fifo* fifo, unsigned_4 qwnum)
859 {
860 /* do we have any too-old FIFO elements? */
861 if(fifo->origin + PKE_FIFO_ARCHEOLOGY < qwnum)
862 {
863 /* count quadwords to forget */
864 int horizon = qwnum - (fifo->origin + PKE_FIFO_ARCHEOLOGY);
865 int i;
866
867 /* free quadwords at indices below horizon */
868 for(i=0; i < horizon; i++)
869 zfree(fifo->quadwords[i]);
870
871 /* move surviving quadword pointers down to beginning of array */
872 for(i=horizon; i < fifo->next; i++)
873 fifo->quadwords[i-horizon] = fifo->quadwords[i];
874
875 /* clear duplicate pointers */
876 for(i=fifo->next - horizon; i < fifo->next; i++)
877 fifo->quadwords[i] = NULL;
878
879 /* adjust FIFO pointers */
880 fifo->origin = fifo->origin + horizon;
881 fifo->next = fifo->next - horizon;
882 }
883 }
884
885
886
887
888 /* advance the PC by given number of data words; update STAT/FQC
889 field; assume FIFO is filled enough; classify passed-over words;
890 write FIFO trace line */
891
892 void
893 pke_pc_advance(struct pke_device* me, int num_words)
894 {
895 int num = num_words;
896 struct fifo_quadword* fq = NULL;
897 unsigned_4 old_fifo_pc = me->fifo_pc;
898
899 ASSERT(num_words >= 0);
900
901 /* printf("pke %d pc_advance num_words %d\n", me->pke_number, num_words); */
902
903 while(1)
904 {
905 /* find next quadword, if any */
906 fq = pke_fifo_access(& me->fifo, me->fifo_pc);
907
908 /* skip over DMA tag words if present in word 0 or 1 */
909 if(fq != NULL && fq->word_class[me->qw_pc] == wc_dma)
910 {
911 /* skip by going around loop an extra time */
912 num ++;
913 }
914
915 /* nothing left to skip / no DMA tag here */
916 if(num == 0)
917 break;
918
919 /* we are supposed to skip existing words */
920 ASSERT(fq != NULL);
921
922 /* one word skipped */
923 num --;
924
925 /* point to next word */
926 me->qw_pc ++;
927 if(me->qw_pc == 4)
928 {
929 me->qw_pc = 0;
930 me->fifo_pc ++;
931
932 /* trace the consumption of the FIFO quadword we just skipped over */
933 /* fq still points to it */
934 if ( indebug (me->dev.name))
935 {
936 if (( me->fifo_trace_file == NULL) &&
937 ( me->fifo_trace_file_name != NULL ))
938 sky_open_file (&me->fifo_trace_file, me->fifo_trace_file_name,
939 (char *) NULL, _IOLBF );
940
941 /* assert complete classification */
942 ASSERT(fq->word_class[3] != wc_unknown);
943 ASSERT(fq->word_class[2] != wc_unknown);
944 ASSERT(fq->word_class[1] != wc_unknown);
945 ASSERT(fq->word_class[0] != wc_unknown);
946
947 /* print trace record */
948 fprintf((me->fifo_trace_file != NULL) ? me->fifo_trace_file : stdout,
949 "%d 0x%08x_%08x_%08x_%08x 0x%08x %c%c%c%c\n",
950 (me->pke_number == 0 ? 0 : 1),
951 (unsigned) fq->data[3], (unsigned) fq->data[2],
952 (unsigned) fq->data[1], (unsigned) fq->data[0],
953 (unsigned) fq->source_address,
954 fq->word_class[3], fq->word_class[2],
955 fq->word_class[1], fq->word_class[0]);
956 }
957 } /* next quadword */
958 }
959
960 /* age old entries before PC */
961 if(me->fifo_pc != old_fifo_pc)
962 {
963 /* we advanced the fifo-pc; authorize disposal of anything
964 before previous PKEcode */
965 pke_fifo_old(& me->fifo, old_fifo_pc);
966 }
967
968 /* clear FQC if FIFO is now empty */
969 fq = pke_fifo_access(& me->fifo, me->fifo_pc);
970 if(fq == NULL)
971 {
972 PKE_REG_MASK_SET(me, STAT, FQC, 0);
973 }
974 else /* annote the word where the PC lands as an PKEcode */
975 {
976 ASSERT(fq->word_class[me->qw_pc] == wc_pkecode || fq->word_class[me->qw_pc] == wc_unknown);
977 fq->word_class[me->qw_pc] = wc_pkecode;
978 }
979 }
980
981
982
983
984
985 /* Return pointer to FIFO quadword containing given operand# in FIFO.
986 `operand_num' starts at 1. Return pointer to operand word in last
987 argument, if non-NULL. If FIFO is not full enough, return 0.
988 Signal an ER0 indication upon skipping a DMA tag. */
989
990 struct fifo_quadword*
991 pke_pcrel_fifo(struct pke_device* me, int operand_num, unsigned_4** operand)
992 {
993 int num;
994 int new_qw_pc, new_fifo_pc;
995 struct fifo_quadword* fq = NULL;
996
997 /* check for validity of last search results in cache */
998 if(me->last_fifo_pc == me->fifo_pc &&
999 me->last_qw_pc == me->qw_pc &&
1000 operand_num > me->last_num)
1001 {
1002 /* continue search from last stop */
1003 new_fifo_pc = me->last_new_fifo_pc;
1004 new_qw_pc = me->last_new_qw_pc;
1005 num = operand_num - me->last_num;
1006 }
1007 else
1008 {
1009 /* start search from scratch */
1010 new_fifo_pc = me->fifo_pc;
1011 new_qw_pc = me->qw_pc;
1012 num = operand_num;
1013 }
1014
1015 ASSERT(num > 0);
1016
1017 /* printf("pke %d pcrel_fifo operand_num %d\n", me->pke_number, operand_num); */
1018
1019 do
1020 {
1021 /* one word skipped */
1022 num --;
1023
1024 /* point to next word */
1025 new_qw_pc ++;
1026 if(new_qw_pc == 4)
1027 {
1028 new_qw_pc = 0;
1029 new_fifo_pc ++;
1030 }
1031
1032 fq = pke_fifo_access(& me->fifo, new_fifo_pc);
1033
1034 /* check for FIFO underflow */
1035 if(fq == NULL)
1036 break;
1037
1038 /* skip over DMA tag words if present in word 0 or 1 */
1039 if(fq->word_class[new_qw_pc] == wc_dma)
1040 {
1041 /* set ER0 */
1042 PKE_REG_MASK_SET(me, STAT, ER0, 1);
1043
1044 /* mismatch error! */
1045 if(! PKE_REG_MASK_GET(me, ERR, ME0))
1046 {
1047 pke_begin_interrupt_stall(me);
1048 /* don't stall just yet -- finish this instruction */
1049 /* the PPS_STALL state will be entered by pke_issue() next time */
1050 }
1051 /* skip by going around loop an extra time */
1052 num ++;
1053 }
1054 }
1055 while(num > 0);
1056
1057 /* return pointer to operand word itself */
1058 if(fq != NULL)
1059 {
1060 *operand = & fq->data[new_qw_pc];
1061
1062 /* annote the word where the pseudo-PC lands as an PKE operand */
1063 ASSERT(fq->word_class[new_qw_pc] == wc_pkedata || fq->word_class[new_qw_pc] == wc_unknown);
1064 fq->word_class[new_qw_pc] = wc_pkedata;
1065
1066 /* store search results in cache */
1067 /* keys */
1068 me->last_fifo_pc = me->fifo_pc;
1069 me->last_qw_pc = me->qw_pc;
1070 /* values */
1071 me->last_num = operand_num;
1072 me->last_new_fifo_pc = new_fifo_pc;
1073 me->last_new_qw_pc = new_qw_pc;
1074 }
1075
1076 return fq;
1077 }
1078
1079
1080 /* Return pointer to given operand# in FIFO. `operand_num' starts at 1.
1081 If FIFO is not full enough, return 0. Skip over DMA tags, but mark
1082 them as an error (ER0). */
1083
1084 unsigned_4*
1085 pke_pcrel_operand(struct pke_device* me, int operand_num)
1086 {
1087 unsigned_4* operand = NULL;
1088 struct fifo_quadword* fifo_operand;
1089
1090 fifo_operand = pke_pcrel_fifo(me, operand_num, & operand);
1091
1092 if(fifo_operand == NULL)
1093 ASSERT(operand == NULL); /* pke_pcrel_fifo() ought leave it untouched */
1094
1095 return operand;
1096 }
1097
1098
1099 /* Return a bit-field extract of given operand# in FIFO, and its
1100 word-accurate source-addr. `bit_offset' starts at 0, referring to
1101 LSB after PKE instruction word. Width must be >0, <=32. Assume
1102 FIFO is full enough. Skip over DMA tags, but mark them as an error
1103 (ER0). */
1104
1105 unsigned_4
1106 pke_pcrel_operand_bits(struct pke_device* me, int bit_offset, int bit_width, unsigned_4* source_addr)
1107 {
1108 unsigned_4* word = NULL;
1109 unsigned_4 value;
1110 struct fifo_quadword* fifo_operand;
1111 int wordnumber, bitnumber;
1112 int i;
1113
1114 wordnumber = bit_offset/32;
1115 bitnumber = bit_offset%32;
1116
1117 /* find operand word with bitfield */
1118 fifo_operand = pke_pcrel_fifo(me, wordnumber + 1, &word);
1119 ASSERT(word != NULL);
1120
1121 /* extract bitfield from word */
1122 value = BIT_MASK_GET(*word, bitnumber, bitnumber + bit_width - 1);
1123
1124 /* extract source addr from fifo word */
1125 *source_addr = fifo_operand->source_address;
1126
1127 /* add word offset */
1128 for(i=0; i<3; i++)
1129 if(word == & fifo_operand->data[i])
1130 *source_addr += sizeof(unsigned_4) * i;
1131
1132 return value;
1133 }
1134
1135
1136
1137 /* check for stall conditions on indicated devices (path* only on
1138 PKE1), do not change status; return 0 iff no stall */
1139 int
1140 pke_check_stall(struct pke_device* me, enum pke_check_target what)
1141 {
1142 int any_stall = 0;
1143 unsigned_4 cop2_stat, gpuif_stat;
1144
1145 /* read status words */
1146 ASSERT(sizeof(unsigned_4) == 4);
1147 PKE_MEM_READ(me, (GIF_REG_STAT),
1148 & gpuif_stat,
1149 4);
1150 PKE_MEM_READ(me, (COP2_REG_STAT_ADDR),
1151 & cop2_stat,
1152 4);
1153
1154 /* perform checks */
1155 if(what == chk_vu)
1156 {
1157 if(me->pke_number == 0)
1158 any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS0_B, COP2_REG_STAT_VBS0_E);
1159 else /* if(me->pke_number == 1) */
1160 any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS1_B, COP2_REG_STAT_VBS1_E);
1161 }
1162 else if(what == chk_path1) /* VU -> GPUIF */
1163 {
1164 ASSERT(me->pke_number == 1);
1165 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 1)
1166 any_stall = 1;
1167 }
1168 else if(what == chk_path2) /* PKE -> GPUIF */
1169 {
1170 ASSERT(me->pke_number == 1);
1171 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 2)
1172 any_stall = 1;
1173 }
1174 else if(what == chk_path3) /* DMA -> GPUIF */
1175 {
1176 ASSERT(me->pke_number == 1);
1177 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 3)
1178 any_stall = 1;
1179 }
1180 else
1181 {
1182 /* invalid what */
1183 ASSERT(0);
1184 }
1185
1186 /* any stall reasons? */
1187 return any_stall;
1188 }
1189
1190
1191 /* PKE1 only: flip the DBF bit; recompute TOPS, TOP */
1192 void
1193 pke_flip_dbf(struct pke_device* me)
1194 {
1195 int newdf;
1196 /* compute new TOP */
1197 PKE_REG_MASK_SET(me, TOP, TOP,
1198 PKE_REG_MASK_GET(me, TOPS, TOPS));
1199 /* flip DBF */
1200 newdf = PKE_REG_MASK_GET(me, DBF, DF) ? 0 : 1;
1201 PKE_REG_MASK_SET(me, DBF, DF, newdf);
1202 PKE_REG_MASK_SET(me, STAT, DBF, newdf);
1203 /* compute new TOPS */
1204 PKE_REG_MASK_SET(me, TOPS, TOPS,
1205 (PKE_REG_MASK_GET(me, BASE, BASE) +
1206 newdf * PKE_REG_MASK_GET(me, OFST, OFFSET)));
1207
1208 /* this is equivalent to last word from okadaa (98-02-25):
1209 1) TOP=TOPS;
1210 2) TOPS=BASE + !DBF*OFFSET
1211 3) DBF=!DBF */
1212 }
1213
1214
1215 /* set the STAT:PIS bit and send an interrupt to the 5900 */
1216 void
1217 pke_begin_interrupt_stall(struct pke_device* me)
1218 {
1219 /* set PIS */
1220 PKE_REG_MASK_SET(me, STAT, PIS, 1);
1221 sky_signal_interrupt();
1222 }
1223
1224
1225
1226
1227 /* PKEcode handler functions -- responsible for checking and
1228 confirming old stall conditions, executing pkecode, updating PC and
1229 status registers -- may assume being run on correct PKE unit */
1230
1231 void
1232 pke_code_nop(struct pke_device* me, unsigned_4 pkecode)
1233 {
1234 /* done */
1235 if(me->trace_file != NULL)
1236 {
1237 fprintf(me->trace_file,
1238 "\n; %s PC %d/%d\n"
1239 "\tDmaCnt *\n"
1240 "\tVIFNOP%s\n"
1241 "\t.EndDmaData\n",
1242 me->dev.name, me->fifo_pc, me->qw_pc,
1243 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""));
1244 }
1245 pke_pc_advance(me, 1);
1246 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1247 }
1248
1249
1250 void
1251 pke_code_stcycl(struct pke_device* me, unsigned_4 pkecode)
1252 {
1253 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1254
1255 /* copy immediate value into CYCLE reg */
1256 PKE_REG_MASK_SET(me, CYCLE, WL, BIT_MASK_GET(imm, 8, 15));
1257 PKE_REG_MASK_SET(me, CYCLE, CL, BIT_MASK_GET(imm, 0, 7));
1258 /* done */
1259 if(me->trace_file != NULL)
1260 {
1261 fprintf(me->trace_file,
1262 "\n; %s PC %d/%d\n"
1263 "\tDmaCnt *\n"
1264 "\tSTCYCL%s %d,%d\n"
1265 "\t.EndDmaData\n",
1266 me->dev.name, me->fifo_pc, me->qw_pc,
1267 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1268 BIT_MASK_GET(imm, 8, 15), BIT_MASK_GET(imm, 0, 7));
1269 }
1270 pke_pc_advance(me, 1);
1271 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1272 }
1273
1274
1275 void
1276 pke_code_offset(struct pke_device* me, unsigned_4 pkecode)
1277 {
1278 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1279
1280 /* copy 10 bits to OFFSET field */
1281 PKE_REG_MASK_SET(me, OFST, OFFSET, BIT_MASK_GET(imm, 0, 9));
1282 /* clear DBF bit */
1283 PKE_REG_MASK_SET(me, DBF, DF, 0);
1284 /* clear other DBF bit */
1285 PKE_REG_MASK_SET(me, STAT, DBF, 0);
1286 /* set TOPS = BASE */
1287 PKE_REG_MASK_SET(me, TOPS, TOPS, PKE_REG_MASK_GET(me, BASE, BASE));
1288 /* done */
1289 if(me->trace_file != NULL)
1290 {
1291 fprintf(me->trace_file,
1292 "\n; %s PC %d/%d\n"
1293 "\tDmaCnt *\n"
1294 "\tOFFSET%s 0x%x\n"
1295 "\t.EndDmaData\n",
1296 me->dev.name, me->fifo_pc, me->qw_pc,
1297 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1298 imm);
1299 }
1300 pke_pc_advance(me, 1);
1301 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1302 }
1303
1304
1305 void
1306 pke_code_base(struct pke_device* me, unsigned_4 pkecode)
1307 {
1308 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1309
1310 /* copy 10 bits to BASE field */
1311 PKE_REG_MASK_SET(me, BASE, BASE, BIT_MASK_GET(imm, 0, 9));
1312 /* done */
1313 if(me->trace_file != NULL)
1314 {
1315 fprintf(me->trace_file,
1316 "\n; %s PC %d/%d\n"
1317 "\tDmaCnt *\n"
1318 "\tBASE%s 0x%x\n"
1319 "\t.EndDmaData\n",
1320 me->dev.name, me->fifo_pc, me->qw_pc,
1321 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1322 imm);
1323 }
1324 pke_pc_advance(me, 1);
1325 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1326 }
1327
1328
1329 void
1330 pke_code_itop(struct pke_device* me, unsigned_4 pkecode)
1331 {
1332 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1333
1334 /* copy 10 bits to ITOPS field */
1335 PKE_REG_MASK_SET(me, ITOPS, ITOPS, BIT_MASK_GET(imm, 0, 9));
1336 /* done */
1337 if(me->trace_file != NULL)
1338 {
1339 fprintf(me->trace_file,
1340 "\n; %s PC %d/%d\n"
1341 "\tDmaCnt *\n"
1342 "\tITOP%s 0x%x\n"
1343 "\t.EndDmaData\n",
1344 me->dev.name, me->fifo_pc, me->qw_pc,
1345 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1346 imm);
1347 }
1348 pke_pc_advance(me, 1);
1349 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1350 }
1351
1352
1353 void
1354 pke_code_stmod(struct pke_device* me, unsigned_4 pkecode)
1355 {
1356 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1357
1358 /* copy 2 bits to MODE register */
1359 PKE_REG_MASK_SET(me, MODE, MDE, BIT_MASK_GET(imm, 0, 1));
1360 /* done */
1361 if(me->trace_file != NULL)
1362 {
1363 char* mode;
1364 if(BIT_MASK_GET(imm, 0, 1) == 0) mode = "direct";
1365 else if(BIT_MASK_GET(imm, 0, 1) == 1) mode = "add";
1366 else if(BIT_MASK_GET(imm, 0, 1) == 2) mode = "addrow";
1367 else mode = "3"; /* invalid mode */
1368
1369 fprintf(me->trace_file,
1370 "\n; %s PC %d/%d\n"
1371 "\tDmaCnt *\n"
1372 "\tSTMOD%s %s\n"
1373 "\t.EndDmaData\n",
1374 me->dev.name, me->fifo_pc, me->qw_pc,
1375 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1376 mode);
1377 }
1378 pke_pc_advance(me, 1);
1379 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1380 }
1381
1382
1383 void
1384 pke_code_mskpath3(struct pke_device* me, unsigned_4 pkecode)
1385 {
1386 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1387 unsigned_4 gif_mode;
1388
1389 /* set appropriate bit */
1390 if(BIT_MASK_GET(imm, PKE_REG_MSKPATH3_B, PKE_REG_MSKPATH3_E) != 0)
1391 gif_mode = GIF_REG_STAT_M3P;
1392 else
1393 gif_mode = 0;
1394
1395 /* write register to "read-only" register; gpuif code will look at M3P bit only */
1396 PKE_MEM_WRITE(me, GIF_REG_VIF_M3P, & gif_mode, 4);
1397
1398 /* done */
1399 if(me->trace_file != NULL)
1400 {
1401 fprintf(me->trace_file,
1402 "\n; %s PC %d/%d\n"
1403 "\tDmaCnt *\n"
1404 "\tMSKPATH3%s %s\n"
1405 "\t.EndDmaData\n",
1406 me->dev.name, me->fifo_pc, me->qw_pc,
1407 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1408 (gif_mode ? "disable" : "enable"));
1409 }
1410 pke_pc_advance(me, 1);
1411 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1412 }
1413
1414
1415 void
1416 pke_code_pkemark(struct pke_device* me, unsigned_4 pkecode)
1417 {
1418 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1419 /* copy 16 bits to MARK register */
1420 PKE_REG_MASK_SET(me, MARK, MARK, BIT_MASK_GET(imm, 0, 15));
1421 /* set MRK bit in STAT register - CPU2 v2.1 docs incorrect */
1422 PKE_REG_MASK_SET(me, STAT, MRK, 1);
1423 /* done */
1424 if(me->trace_file != NULL)
1425 {
1426 fprintf(me->trace_file,
1427 "\n; %s PC %d/%d\n"
1428 "\tDmaCnt *\n"
1429 "\tMARK%s 0x%x\n"
1430 "\t.EndDmaData\n",
1431 me->dev.name, me->fifo_pc, me->qw_pc,
1432 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1433 imm);
1434 }
1435 pke_pc_advance(me, 1);
1436 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1437 }
1438
1439
1440 void
1441 pke_code_flushe(struct pke_device* me, unsigned_4 pkecode)
1442 {
1443 /* compute next PEW bit */
1444 if(pke_check_stall(me, chk_vu))
1445 {
1446 /* VU busy */
1447 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1448 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1449 /* try again next cycle */
1450 }
1451 else
1452 {
1453 /* VU idle */
1454 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1455 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1456 if(me->trace_file != NULL)
1457 {
1458 fprintf(me->trace_file,
1459 "\n; %s PC %d/%d\n"
1460 "\tDmaCnt *\n"
1461 "\tFLUSHE%s\n"
1462 "\t.EndDmaData\n",
1463 me->dev.name, me->fifo_pc, me->qw_pc,
1464 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""));
1465 }
1466 pke_pc_advance(me, 1);
1467 }
1468 }
1469
1470
1471 void
1472 pke_code_flush(struct pke_device* me, unsigned_4 pkecode)
1473 {
1474 int something_busy = 0;
1475
1476 /* compute next PEW, PGW bits */
1477 if(pke_check_stall(me, chk_vu))
1478 {
1479 something_busy = 1;
1480 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1481 }
1482 else
1483 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1484
1485
1486 if(pke_check_stall(me, chk_path1) ||
1487 pke_check_stall(me, chk_path2))
1488 {
1489 something_busy = 1;
1490 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1491 }
1492 else
1493 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1494
1495 /* go or no go */
1496 if(something_busy)
1497 {
1498 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1499 /* try again next cycle */
1500 }
1501 else
1502 {
1503 /* all idle */
1504 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1505 if(me->trace_file != NULL)
1506 {
1507 fprintf(me->trace_file,
1508 "\n; %s PC %d/%d\n"
1509 "\tDmaCnt *\n"
1510 "\tFLUSH%s\n"
1511 "\t.EndDmaData\n",
1512 me->dev.name, me->fifo_pc, me->qw_pc,
1513 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""));
1514 }
1515 pke_pc_advance(me, 1);
1516 }
1517 }
1518
1519
1520 void
1521 pke_code_flusha(struct pke_device* me, unsigned_4 pkecode)
1522 {
1523 int something_busy = 0;
1524
1525 /* compute next PEW, PGW bits */
1526 if(pke_check_stall(me, chk_vu))
1527 {
1528 something_busy = 1;
1529 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1530 }
1531 else
1532 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1533
1534
1535 if(pke_check_stall(me, chk_path1) ||
1536 pke_check_stall(me, chk_path2) ||
1537 pke_check_stall(me, chk_path3))
1538 {
1539 something_busy = 1;
1540 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1541 }
1542 else
1543 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1544
1545 if(something_busy)
1546 {
1547 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1548 /* try again next cycle */
1549 }
1550 else
1551 {
1552 /* all idle */
1553 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1554 if(me->trace_file != NULL)
1555 {
1556 fprintf(me->trace_file,
1557 "\n; %s PC %d/%d\n"
1558 "\tDmaCnt *\n"
1559 "\tFLUSHA%s\n"
1560 "\t.EndDmaData\n",
1561 me->dev.name, me->fifo_pc, me->qw_pc,
1562 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""));
1563 }
1564 pke_pc_advance(me, 1);
1565 }
1566 }
1567
1568
1569 void
1570 pke_code_pkemscal(struct pke_device* me, unsigned_4 pkecode)
1571 {
1572 /* compute next PEW bit */
1573 if(pke_check_stall(me, chk_vu))
1574 {
1575 /* VU busy */
1576 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1577 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1578 /* try again next cycle */
1579 }
1580 else
1581 {
1582 unsigned_4 vu_pc;
1583 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1584
1585 /* VU idle */
1586 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1587
1588 /* flip DBF on PKE1 */
1589 if(me->pke_number == 1)
1590 pke_flip_dbf(me);
1591
1592 /* compute new PC for VU (host byte-order) */
1593 vu_pc = BIT_MASK_GET(imm, 0, 15);
1594 vu_pc = T2H_4(vu_pc);
1595
1596 /* write new PC; callback function gets VU running */
1597 ASSERT(sizeof(unsigned_4) == 4);
1598 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1599 & vu_pc,
1600 4);
1601
1602 /* copy ITOPS field to ITOP */
1603 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1604
1605 /* done */
1606 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1607 if(me->trace_file != NULL)
1608 {
1609 fprintf(me->trace_file,
1610 "\n; %s PC %d/%d\n"
1611 "\tDmaCnt *\n"
1612 "\tMSCAL%s 0x%x\n"
1613 "\t.EndDmaData\n",
1614 me->dev.name, me->fifo_pc, me->qw_pc,
1615 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "[i]" : ""),
1616 imm);
1617 }
1618 pke_pc_advance(me, 1);
1619 }
1620 }
1621
1622
1623
1624 void
1625 pke_code_pkemscnt(struct pke_device* me, unsigned_4 pkecode)
1626 {
1627 /* compute next PEW bit */
1628 if(pke_check_stall(me, chk_vu))
1629 {
1630 /* VU busy */
1631 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1632 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1633 /* try again next cycle */
1634 }
1635 else
1636 {
1637 unsigned_4 vu_pc;
1638
1639 /* VU idle */
1640 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1641
1642 /* flip DBF on PKE1 */
1643 if(me->pke_number == 1)
1644 pke_flip_dbf(me);
1645
1646 /* read old PC */
1647 ASSERT(sizeof(unsigned_4) == 4);
1648 PKE_MEM_READ(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1649 & vu_pc,
1650 4);
1651
1652 /* rewrite new PC; callback function gets VU running */
1653 ASSERT(sizeof(unsigned_4) == 4);
1654 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1655 & vu_pc,
1656 4);
1657
1658 /* copy ITOPS field to ITOP */
1659 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1660
1661 /* done */
1662 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1663 if(me->trace_file != NULL)
1664 {
1665 fprintf(me->trace_file,
1666 "\n; %s PC %d/%d\n"
1667 "\tDmaCnt *\n"
1668 "\tMSCNT\n"
1669 "\t.EndDmaData\n",
1670 me->dev.name, me->fifo_pc, me->qw_pc);
1671 }
1672 pke_pc_advance(me, 1);
1673 }
1674 }
1675
1676
1677 void
1678 pke_code_pkemscalf(struct pke_device* me, unsigned_4 pkecode)
1679 {
1680 int something_busy = 0;
1681
1682 /* compute next PEW, PGW bits */
1683 if(pke_check_stall(me, chk_vu))
1684 {
1685 something_busy = 1;
1686 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1687 }
1688 else
1689 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1690
1691
1692 if(pke_check_stall(me, chk_path1) ||
1693 pke_check_stall(me, chk_path2) ||
1694 pke_check_stall(me, chk_path3))
1695 {
1696 something_busy = 1;
1697 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1698 }
1699 else
1700 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1701
1702 /* go or no go */
1703 if(something_busy)
1704 {
1705 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1706 /* try again next cycle */
1707 }
1708 else
1709 {
1710 unsigned_4 vu_pc;
1711 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1712
1713 /* flip DBF on PKE1 */
1714 if(me->pke_number == 1)
1715 pke_flip_dbf(me);
1716
1717 /* compute new PC for VU (host byte-order) */
1718 vu_pc = BIT_MASK_GET(imm, 0, 15);
1719 vu_pc = T2H_4(vu_pc);
1720
1721 /* rewrite new PC; callback function gets VU running */
1722 ASSERT(sizeof(unsigned_4) == 4);
1723 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1724 & vu_pc,
1725 4);
1726
1727 /* copy ITOPS field to ITOP */
1728 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1729
1730 /* done */
1731 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1732 if(me->trace_file != NULL)
1733 {
1734 fprintf(me->trace_file,
1735 "\n; %s PC %d/%d\n"
1736 "\tDmaCnt *\n"
1737 "\tMSCALF 0x%x\n"
1738 "\t.EndDmaData\n",
1739 me->dev.name, me->fifo_pc, me->qw_pc,
1740 imm);
1741 }
1742 pke_pc_advance(me, 1);
1743 }
1744 }
1745
1746
1747 void
1748 pke_code_stmask(struct pke_device* me, unsigned_4 pkecode)
1749 {
1750 unsigned_4* mask;
1751
1752 /* check that FIFO has one more word for STMASK operand */
1753 mask = pke_pcrel_operand(me, 1);
1754 if(mask != NULL)
1755 {
1756 /* "transferring" operand */
1757 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1758
1759 /* set NUM */
1760 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1761
1762 /* fill the register */
1763 PKE_REG_MASK_SET(me, MASK, MASK, *mask);
1764
1765 /* set NUM */
1766 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1767
1768 /* done */
1769 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1770 if(me->trace_file != NULL)
1771 {
1772 fprintf(me->trace_file,
1773 "\n; %s PC %d/%d\n"
1774 "\tDmaCnt *\n"
1775 "\tSTMASK 0x%x\n"
1776 "\t.EndDmaData\n",
1777 me->dev.name, me->fifo_pc, me->qw_pc,
1778 *mask);
1779 }
1780 pke_pc_advance(me, 2);
1781 }
1782 else
1783 {
1784 /* need to wait for another word */
1785 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1786 /* try again next cycle */
1787 }
1788 }
1789
1790
1791 void
1792 pke_code_strow(struct pke_device* me, unsigned_4 pkecode)
1793 {
1794 /* check that FIFO has four more words for STROW operand */
1795 unsigned_4* last_op;
1796
1797 last_op = pke_pcrel_operand(me, 4);
1798 if(last_op != NULL)
1799 {
1800 /* "transferring" operand */
1801 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1802
1803 /* set NUM */
1804 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1805
1806 /* copy ROW registers: must all exist if 4th operand exists */
1807 me->regs[PKE_REG_R0][0] = * pke_pcrel_operand(me, 1);
1808 me->regs[PKE_REG_R1][0] = * pke_pcrel_operand(me, 2);
1809 me->regs[PKE_REG_R2][0] = * pke_pcrel_operand(me, 3);
1810 me->regs[PKE_REG_R3][0] = * pke_pcrel_operand(me, 4);
1811
1812 /* set NUM */
1813 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1814
1815 /* done */
1816 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1817 if(me->trace_file != NULL)
1818 {
1819 fprintf(me->trace_file,
1820 "\n; %s PC %d/%d\n"
1821 "\tDmaCnt *\n"
1822 "\tSTROW 0x%x,0x%x,0x%x,0x%x\n"
1823 "\t.EndDmaData\n",
1824 me->dev.name, me->fifo_pc, me->qw_pc,
1825 * pke_pcrel_operand(me, 1),
1826 * pke_pcrel_operand(me, 2),
1827 * pke_pcrel_operand(me, 3),
1828 * pke_pcrel_operand(me, 4));
1829 }
1830 pke_pc_advance(me, 5);
1831 }
1832 else
1833 {
1834 /* need to wait for another word */
1835 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1836 /* try again next cycle */
1837 }
1838 }
1839
1840
1841 void
1842 pke_code_stcol(struct pke_device* me, unsigned_4 pkecode)
1843 {
1844 /* check that FIFO has four more words for STCOL operand */
1845 unsigned_4* last_op;
1846
1847 last_op = pke_pcrel_operand(me, 4);
1848 if(last_op != NULL)
1849 {
1850 /* "transferring" operand */
1851 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1852
1853 /* set NUM */
1854 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1855
1856 /* copy COL registers: must all exist if 4th operand exists */
1857 me->regs[PKE_REG_C0][0] = * pke_pcrel_operand(me, 1);
1858 me->regs[PKE_REG_C1][0] = * pke_pcrel_operand(me, 2);
1859 me->regs[PKE_REG_C2][0] = * pke_pcrel_operand(me, 3);
1860 me->regs[PKE_REG_C3][0] = * pke_pcrel_operand(me, 4);
1861
1862 /* set NUM */
1863 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1864
1865 /* done */
1866 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1867 if(me->trace_file != NULL)
1868 {
1869 fprintf(me->trace_file,
1870 "\n; %s PC %d/%d\n"
1871 "\tDmaCnt *\n"
1872 "\tSTCOL 0x%x,0x%x,0x%x,0x%x\n"
1873 "\t.EndDmaData\n",
1874 me->dev.name, me->fifo_pc, me->qw_pc,
1875 * pke_pcrel_operand(me, 1),
1876 * pke_pcrel_operand(me, 2),
1877 * pke_pcrel_operand(me, 3),
1878 * pke_pcrel_operand(me, 4));
1879 }
1880 pke_pc_advance(me, 5);
1881 }
1882 else
1883 {
1884 /* need to wait for another word */
1885 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1886 /* try again next cycle */
1887 }
1888 }
1889
1890
1891 void
1892 pke_code_mpg(struct pke_device* me, unsigned_4 pkecode)
1893 {
1894 unsigned_4* last_mpg_word;
1895 int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
1896 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1897
1898 /* assert 64-bit alignment of MPG operand */
1899 if(me->qw_pc != 3 && me->qw_pc != 1)
1900 return pke_code_error(me, pkecode);
1901
1902 /* map zero to max+1 */
1903 if(num==0) num=0x100;
1904
1905 /* check that FIFO has a few more words for MPG operand */
1906 last_mpg_word = pke_pcrel_operand(me, num*2); /* num: number of 64-bit words */
1907 if(last_mpg_word != NULL)
1908 {
1909 /* perform implied FLUSHE */
1910 if(pke_check_stall(me, chk_vu))
1911 {
1912 /* VU busy */
1913 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1914 /* retry this instruction next clock */
1915 }
1916 else
1917 {
1918 /* VU idle */
1919 int i;
1920
1921 /* "transferring" operand */
1922 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1923
1924 /* set NUM */
1925 PKE_REG_MASK_SET(me, NUM, NUM, num);
1926
1927 /* disassembly */
1928 if(me->trace_file != NULL)
1929 {
1930 fprintf(me->trace_file,
1931 "\n; %s PC %d/%d\n"
1932 "\tDmaCnt *\n"
1933 "\tMPG 0x%x,0x%x\n",
1934 me->dev.name, me->fifo_pc, me->qw_pc,
1935 imm, num);
1936 }
1937
1938 /* transfer VU instructions, one word-pair per iteration */
1939 for(i=0; i<num; i++)
1940 {
1941 address_word vu_addr_base, vu_addr;
1942 address_word vutrack_addr_base, vutrack_addr;
1943 address_word vu_addr_max_size;
1944 unsigned_4 vu_lower_opcode, vu_upper_opcode;
1945 unsigned_4* operand;
1946 unsigned_4 source_addr;
1947 struct fifo_quadword* fq;
1948 int next_num;
1949 int j;
1950
1951 /* decrement NUM */
1952 next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
1953 PKE_REG_MASK_SET(me, NUM, NUM, next_num);
1954
1955 /* imm: in 64-bit units for MPG instruction */
1956 /* VU*_MEM0 : instruction memory */
1957 vu_addr_base = (me->pke_number == 0) ?
1958 VU0_MEM0_WINDOW_START : VU1_MEM0_WINDOW_START;
1959 vu_addr_max_size = (me->pke_number == 0) ?
1960 VU0_MEM0_SIZE : VU1_MEM0_SIZE;
1961 vutrack_addr_base = (me->pke_number == 0) ?
1962 VU0_MEM0_SRCADDR_START : VU1_MEM0_SRCADDR_START;
1963
1964 /* compute VU address for this word-pair */
1965 vu_addr = vu_addr_base + (imm + i) * 8;
1966 /* check for vu_addr overflow */
1967 while(vu_addr >= vu_addr_base + vu_addr_max_size)
1968 vu_addr -= vu_addr_max_size;
1969
1970 /* compute VU tracking address */
1971 vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 2;
1972
1973 /* Fetch operand words; assume they are already little-endian for VU imem */
1974 fq = pke_pcrel_fifo(me, i*2 + 1, & operand);
1975 vu_lower_opcode = *operand;
1976
1977 source_addr = fq->source_address;
1978 /* add word offset */
1979 for(j=0; j<3; j++)
1980 if(operand == & fq->data[j])
1981 source_addr += sizeof(unsigned_4) * j;
1982
1983 fq = pke_pcrel_fifo(me, i*2 + 2, & operand);
1984 vu_upper_opcode = *operand;
1985
1986 /* write data into VU memory */
1987 /* lower (scalar) opcode comes in first word ; macro performs H2T! */
1988 PKE_MEM_WRITE(me, vu_addr,
1989 & vu_lower_opcode,
1990 4);
1991 /* upper (vector) opcode comes in second word ; H2T */
1992 ASSERT(sizeof(unsigned_4) == 4);
1993 PKE_MEM_WRITE(me, vu_addr + 4,
1994 & vu_upper_opcode,
1995 4);
1996
1997 /* write tracking address in target byte-order */
1998 ASSERT(sizeof(unsigned_4) == 4);
1999 PKE_MEM_WRITE(me, vutrack_addr,
2000 & source_addr,
2001 4);
2002
2003 /* disassembly */
2004 if(me->trace_file != NULL)
2005 {
2006 unsigned long opcodes[2] = { vu_upper_opcode, vu_lower_opcode };
2007 fprintf(me->trace_file, "\t");
2008 opcode_analyze(me->trace_file, opcodes);
2009 fprintf(me->trace_file, "\n");
2010 }
2011 } /* VU xfer loop */
2012
2013 /* check NUM */
2014 ASSERT(PKE_REG_MASK_GET(me, NUM, NUM) == 0);
2015
2016 /* done */
2017 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2018 if(me->trace_file != NULL)
2019 {
2020 fprintf(me->trace_file,
2021 "\t.EndMpg\n"
2022 "\t.EndDmaData\n");
2023 }
2024 pke_pc_advance(me, 1 + num*2);
2025 }
2026 } /* if FIFO full enough */
2027 else
2028 {
2029 /* need to wait for another word */
2030 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
2031 /* retry this instruction next clock */
2032 }
2033 }
2034
2035
2036 void
2037 pke_code_direct(struct pke_device* me, unsigned_4 pkecode)
2038 {
2039 /* check that FIFO has a few more words for DIRECT operand */
2040 unsigned_4* last_direct_word;
2041 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
2042
2043 /* assert 128-bit alignment of DIRECT operand */
2044 if(me->qw_pc != 3)
2045 return pke_code_error(me, pkecode);
2046
2047 /* map zero to max+1 */
2048 if(imm==0) imm=0x10000;
2049
2050 last_direct_word = pke_pcrel_operand(me, imm*4); /* imm: number of 128-bit words */
2051 if(last_direct_word != NULL)
2052 {
2053 /* VU idle */
2054 int i;
2055 unsigned_16 fifo_data;
2056
2057 /* "transferring" operand */
2058 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
2059
2060 /* disassembly */
2061 if(me->trace_file != NULL)
2062 {
2063 fprintf(me->trace_file,
2064 "\n; %s PC %d/%d\n"
2065 "\tDmaCnt *\n"
2066 "\t%s 0x%x\n",
2067 me->dev.name, me->fifo_pc, me->qw_pc,
2068 (IS_PKE_CMD(pkecode, DIRECT) ? "DIRECT" : "DIRECTHL"),
2069 imm);
2070 }
2071
2072 /* transfer GPUIF quadwords, one word per iteration */
2073 for(i=0; i<imm*4; i++)
2074 {
2075 unsigned_4* operand = pke_pcrel_operand(me, 1+i);
2076
2077 /* collect word into quadword */
2078 *A4_16(&fifo_data, 3 - (i % 4)) = *operand;
2079
2080 /* write to GPUIF FIFO only with full quadword */
2081 if(i % 4 == 3)
2082 {
2083 ASSERT(sizeof(fifo_data) == 16);
2084 PKE_MEM_WRITE(me, GIF_PATH2_FIFO_ADDR,
2085 & fifo_data,
2086 16);
2087
2088 /* disassembly */
2089 if(me->trace_file != NULL)
2090 {
2091 char buffer[200]; /* one line of disassembly */
2092 gif_disassemble_pke_data(buffer, (quadword*) &fifo_data);
2093 fprintf(me->trace_file, "\t%s\n", buffer);
2094 }
2095 } /* write collected quadword */
2096 } /* GPUIF xfer loop */
2097
2098 /* done */
2099 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2100 if(me->trace_file != NULL)
2101 {
2102 fprintf(me->trace_file,
2103 "\t.EndDirect\n"
2104 "\t.EndDmaData\n");
2105 }
2106 pke_pc_advance(me, 1 + imm*4);
2107 } /* if FIFO full enough */
2108 else
2109 {
2110 /* need to wait for another word */
2111 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
2112 /* retry this instruction next clock */
2113 }
2114 }
2115
2116
2117 void
2118 pke_code_directhl(struct pke_device* me, unsigned_4 pkecode)
2119 {
2120 /* treat the same as DIRECT */
2121 pke_code_direct(me, pkecode);
2122 /* dissassembly code handles DIRECT/DIRECTHL overloading */
2123 }
2124
2125
2126 void
2127 pke_code_unpack(struct pke_device* me, unsigned_4 pkecode)
2128 {
2129 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
2130 int cmd = BIT_MASK_GET(pkecode, PKE_OPCODE_CMD_B, PKE_OPCODE_CMD_E);
2131 int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
2132 int nummx = (num == 0) ? 0x0100 : num;
2133 short vn = BIT_MASK_GET(cmd, 2, 3); /* unpack shape controls */
2134 short vl = BIT_MASK_GET(cmd, 0, 1);
2135 int m = BIT_MASK_GET(cmd, 4, 4);
2136 short cl = PKE_REG_MASK_GET(me, CYCLE, CL); /* cycle controls */
2137 short wl = PKE_REG_MASK_GET(me, CYCLE, WL);
2138 short addrwl = (wl == 0) ? 0x0100 : wl;
2139 int r = BIT_MASK_GET(imm, 15, 15); /* indicator bits in imm value */
2140 int usn = BIT_MASK_GET(imm, 14, 14);
2141
2142 int n, num_operands;
2143 unsigned_4* last_operand_word = NULL;
2144
2145 /* catch all illegal UNPACK variants */
2146 if(vl == 3 && vn < 3)
2147 {
2148 pke_code_error(me, pkecode);
2149 return;
2150 }
2151
2152 /* compute PKEcode length, as given in CPU2 spec, v2.1 pg. 11 */
2153 if(cl >= addrwl)
2154 n = num;
2155 else
2156 n = cl * (nummx / addrwl) + PKE_LIMIT(nummx % addrwl, cl);
2157 num_operands = (31 + (32 >> vl) * (vn+1) * n)/32; /* round up to next word */
2158
2159 /* confirm that FIFO has enough words in it */
2160 if(num_operands > 0)
2161 last_operand_word = pke_pcrel_operand(me, num_operands);
2162 if(last_operand_word != NULL || num_operands == 0)
2163 {
2164 address_word vu_addr_base, vutrack_addr_base;
2165 address_word vu_addr_max_size;
2166 int vector_num_out, vector_num_in;
2167
2168 /* "transferring" operand */
2169 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
2170
2171 /* disassembly */
2172 if(me->trace_file != NULL)
2173 {
2174 char unpack_type[8];
2175 char flags[8];
2176 sprintf(flags,"[%s%s%s%s]",
2177 (m ? "m" : ""),
2178 (usn ? "u" : ""),
2179 (r ? "r" : ""),
2180 (BIT_MASK_GET(pkecode, PKE_OPCODE_I_B, PKE_OPCODE_I_E) ? "i" : ""));
2181 if(vn > 0)
2182 sprintf(unpack_type, "V%d_%d",
2183 (vn + 1),
2184 (vl == 3 ? 5 : (32 >> vl)));
2185 else
2186 sprintf(unpack_type, "S_%d",
2187 (vl == 3 ? 5 : (32 >> vl)));
2188
2189 fprintf(me->trace_file,
2190 "\n; %s PC %d/%d\n"
2191 "\tDmaCnt *\n"
2192 "\tUNPACK%s %s,0x%x,0x%x\n",
2193 me->dev.name, me->fifo_pc, me->qw_pc,
2194 flags, unpack_type, imm, num);
2195 }
2196
2197 /* don't check whether VU is idle */
2198
2199 /* compute VU address base */
2200 if(me->pke_number == 0)
2201 {
2202 vu_addr_base = VU0_MEM1_WINDOW_START;
2203 vu_addr_max_size = VU0_MEM1_SIZE;
2204 vutrack_addr_base = VU0_MEM1_SRCADDR_START;
2205 r = 0;
2206 }
2207 else
2208 {
2209 vu_addr_base = VU1_MEM1_WINDOW_START;
2210 vu_addr_max_size = VU1_MEM1_SIZE;
2211 vutrack_addr_base = VU1_MEM1_SRCADDR_START;
2212 }
2213
2214 /* set NUM */
2215 PKE_REG_MASK_SET(me, NUM, NUM, nummx);
2216
2217 /* transfer given number of vectors */
2218 vector_num_out = 0; /* output vector number being processed */
2219 vector_num_in = 0; /* argument vector number being processed */
2220 do
2221 {
2222 quadword vu_old_data;
2223 quadword vu_new_data;
2224 quadword unpacked_data;
2225 address_word vu_addr;
2226 address_word vutrack_addr;
2227 unsigned_4 source_addr = 0;
2228 int i;
2229 int next_num;
2230
2231 /* decrement NUM */
2232 next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
2233 PKE_REG_MASK_SET(me, NUM, NUM, next_num);
2234
2235 /* compute VU destination address, as bytes in R5900 memory */
2236 if(cl >= wl)
2237 {
2238 /* map zero to max+1 */
2239 vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
2240 (vector_num_out / addrwl) * cl +
2241 (vector_num_out % addrwl));
2242 }
2243 else
2244 vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
2245 vector_num_out);
2246
2247 /* handle "R" double-buffering bit */
2248 if(r)
2249 vu_addr += 16 * PKE_REG_MASK_GET(me, TOPS, TOPS);
2250
2251 /* check for vu_addr overflow */
2252 while(vu_addr >= vu_addr_base + vu_addr_max_size)
2253 vu_addr -= vu_addr_max_size;
2254
2255 /* compute address of tracking table entry */
2256 vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 4;
2257
2258 /* read old VU data word at address; reverse words if needed */
2259 {
2260 unsigned_16 vu_old_badwords;
2261 ASSERT(sizeof(vu_old_badwords) == 16);
2262 PKE_MEM_READ(me, vu_addr,
2263 &vu_old_badwords, 16);
2264 vu_old_data[0] = * A4_16(& vu_old_badwords, 3);
2265 vu_old_data[1] = * A4_16(& vu_old_badwords, 2);
2266 vu_old_data[2] = * A4_16(& vu_old_badwords, 1);
2267 vu_old_data[3] = * A4_16(& vu_old_badwords, 0);
2268 }
2269
2270 /* For cyclic unpack, next operand quadword may come from instruction stream
2271 or be zero. */
2272 if((cl < addrwl) &&
2273 (vector_num_out % addrwl) >= cl)
2274 {
2275 /* clear operand - used only in a "indeterminate" state */
2276 for(i = 0; i < 4; i++)
2277 unpacked_data[i] = 0;
2278 }
2279 else
2280 {
2281 /* compute packed vector dimensions */
2282 int vectorbits = 0, unitbits = 0;
2283
2284 /* disassembly */
2285 if(me->trace_file != NULL)
2286 {
2287 fprintf(me->trace_file, "\t; unpack input row %d\n", vector_num_in);
2288 }
2289
2290 if(vl < 3) /* PKE_UNPACK_*_{32,16,8} */
2291 {
2292 unitbits = (32 >> vl);
2293 vectorbits = unitbits * (vn+1);
2294 }
2295 else if(vl == 3 && vn == 3) /* PKE_UNPACK_V4_5 */
2296 {
2297 unitbits = 5;
2298 vectorbits = 16;
2299 }
2300 else /* illegal unpack variant */
2301 {
2302 /* should have been caught at top of function */
2303 ASSERT(0);
2304 }
2305
2306 /* loop over columns */
2307 for(i=0; i<=vn; i++)
2308 {
2309 unsigned_4 operand;
2310
2311 /* offset in bits in current operand word */
2312 int bitoffset =
2313 (vector_num_in * vectorbits) + (i * unitbits); /* # of bits from PKEcode */
2314
2315 /* last unit of V4_5 is only one bit wide */
2316 if(vl == 3 && vn == 3 && i == 3) /* PKE_UNPACK_V4_5 */
2317 unitbits = 1;
2318
2319 /* confirm we're not reading more than we said we needed */
2320 if(vector_num_in * vectorbits >= num_operands * 32)
2321 {
2322 /* this condition may be triggered by illegal
2323 PKEcode / CYCLE combinations. */
2324 pke_code_error(me, pkecode);
2325 /* XXX: this case needs to be better understood,
2326 and detected at a better time. */
2327 return;
2328 }
2329
2330 /* fetch bitfield operand */
2331 operand = pke_pcrel_operand_bits(me, bitoffset, unitbits, & source_addr);
2332
2333 /* disassemble */
2334 if(me->trace_file != NULL && vl < 3) /* not for V4_5 */
2335 {
2336 char* data_size;
2337 if(vl == 0) data_size=".word";
2338 else if(vl == 1) data_size=".short";
2339 else if(vl == 2) data_size=".byte";
2340 else data_size = "<invalid>";
2341
2342 fprintf(me->trace_file, "\t%s 0x%x\n", data_size, operand);
2343 }
2344
2345 /* selectively sign-extend; not for V4_5 1-bit value */
2346 if(usn || unitbits == 1)
2347 unpacked_data[i] = operand;
2348 else
2349 unpacked_data[i] = SEXT32(operand, unitbits-1);
2350 }
2351
2352 /* disassemble */
2353 if(me->trace_file != NULL && vl == 3) /* only for V4_5 */
2354 {
2355 unsigned short operand =
2356 ((unpacked_data[0] & 0x1f) << 0) |
2357 ((unpacked_data[1] & 0x1f) << 5) |
2358 ((unpacked_data[2] & 0x1f) << 10) |
2359 ((unpacked_data[3] & 0x1) << 15);
2360
2361 fprintf(me->trace_file, "\t.short 0x%x\n", operand);
2362 }
2363
2364 /* set remaining top words in vector */
2365 for(i=vn+1; i<4; i++)
2366 {
2367 if(vn == 0) /* S_{32,16,8}: copy lowest element */
2368 unpacked_data[i] = unpacked_data[0];
2369 else
2370 unpacked_data[i] = 0;
2371 }
2372
2373 /* consumed a vector from the PKE instruction stream */
2374 vector_num_in ++;
2375 } /* unpack word from instruction operand */
2376
2377 /* process STMOD register for accumulation operations */
2378 switch(PKE_REG_MASK_GET(me, MODE, MDE))
2379 {
2380 case PKE_MODE_ADDROW: /* add row registers to output data */
2381 case PKE_MODE_ACCROW: /* same .. later conditionally accumulate */
2382 for(i=0; i<4; i++)
2383 /* exploit R0..R3 contiguity */
2384 unpacked_data[i] += me->regs[PKE_REG_R0 + i][0];
2385 break;
2386
2387 case PKE_MODE_INPUT: /* pass data through */
2388 default: /* specified as undefined */
2389 ;
2390 }
2391
2392 /* compute replacement word */
2393 if(m) /* use mask register? */
2394 {
2395 /* compute index into mask register for this word */
2396 int mask_index = PKE_LIMIT(vector_num_out % addrwl, 3);
2397
2398 for(i=0; i<4; i++) /* loop over columns */
2399 {
2400 int mask_op = PKE_MASKREG_GET(me, mask_index, i);
2401 unsigned_4* masked_value = NULL;
2402
2403 switch(mask_op)
2404 {
2405 case PKE_MASKREG_INPUT:
2406 masked_value = & unpacked_data[i];
2407
2408 /* conditionally accumulate */
2409 if(PKE_REG_MASK_GET(me, MODE, MDE) == PKE_MODE_ACCROW)
2410 me->regs[PKE_REG_R0 + i][0] = unpacked_data[i];
2411
2412 break;
2413
2414 case PKE_MASKREG_ROW: /* exploit R0..R3 contiguity */
2415 masked_value = & me->regs[PKE_REG_R0 + i][0];
2416 break;
2417
2418 case PKE_MASKREG_COLUMN: /* exploit C0..C3 contiguity */
2419 masked_value = & me->regs[PKE_REG_C0 + mask_index][0];
2420 break;
2421
2422 case PKE_MASKREG_NOTHING:
2423 /* "write inhibit" by re-copying old data */
2424 masked_value = & vu_old_data[i];
2425 break;
2426
2427 default:
2428 ASSERT(0);
2429 /* no other cases possible */
2430 }
2431
2432 /* copy masked value for column */
2433 vu_new_data[i] = *masked_value;
2434 } /* loop over columns */
2435 } /* mask */
2436 else
2437 {
2438 /* no mask - just copy over entire unpacked quadword */
2439 memcpy(vu_new_data, unpacked_data, sizeof(unpacked_data));
2440
2441 /* conditionally store accumulated row results */
2442 if(PKE_REG_MASK_GET(me, MODE, MDE) == PKE_MODE_ACCROW)
2443 for(i=0; i<4; i++)
2444 me->regs[PKE_REG_R0 + i][0] = unpacked_data[i];
2445 }
2446
2447 /* write new VU data word at address; reverse words if needed */
2448 {
2449 unsigned_16 vu_new_badwords;
2450 * A4_16(& vu_new_badwords, 3) = vu_new_data[0];
2451 * A4_16(& vu_new_badwords, 2) = vu_new_data[1];
2452 * A4_16(& vu_new_badwords, 1) = vu_new_data[2];
2453 * A4_16(& vu_new_badwords, 0) = vu_new_data[3];
2454 ASSERT(sizeof(vu_new_badwords) == 16);
2455 PKE_MEM_WRITE(me, vu_addr,
2456 &vu_new_badwords, 16);
2457 }
2458
2459 /* write tracking address */
2460 ASSERT(sizeof(unsigned_4) == 4);
2461 PKE_MEM_WRITE(me, vutrack_addr,
2462 & source_addr,
2463 4);
2464
2465 /* next vector please */
2466 vector_num_out ++;
2467 } /* vector transfer loop */
2468 while(PKE_REG_MASK_GET(me, NUM, NUM) > 0);
2469
2470 /* confirm we've written as many vectors as told */
2471 ASSERT(nummx == vector_num_out);
2472
2473 /* done */
2474 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2475 if(me->trace_file != NULL)
2476 {
2477 fprintf(me->trace_file,
2478 "\t.EndUnpack\n"
2479 "\t.EndDmaData\n");
2480 }
2481 pke_pc_advance(me, 1 + num_operands);
2482 } /* PKE FIFO full enough */
2483 else
2484 {
2485 /* need to wait for another word */
2486 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
2487 /* retry this instruction next clock */
2488 }
2489 }
2490
2491
2492 void
2493 pke_code_error(struct pke_device* me, unsigned_4 pkecode)
2494 {
2495 /* set ER1 flag in STAT register */
2496 PKE_REG_MASK_SET(me, STAT, ER1, 1);
2497
2498 if(! PKE_REG_MASK_GET(me, ERR, ME1))
2499 {
2500 pke_begin_interrupt_stall(me);
2501 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
2502 }
2503 else
2504 {
2505 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2506 }
2507
2508 if(me->trace_file != NULL)
2509 {
2510 fprintf(me->trace_file,
2511 "\n; %s PC %d/%d\n"
2512 "\tDmaCnt *\n"
2513 "\t.word 0x%x\n"
2514 "\t.EndDmaData\n",
2515 me->dev.name, me->fifo_pc, me->qw_pc,
2516 (long) pkecode);
2517 }
2518
2519 /* advance over faulty word */
2520 pke_pc_advance(me, 1);
2521 }
2522
2523 void
2524 pke_options(struct pke_device *me, unsigned_4 option, char *option_string)
2525 {
2526 switch (option)
2527 {
2528 case SKY_OPT_DEBUG_NAME:
2529 if ( me->fifo_trace_file != NULL )
2530 {
2531 fclose (me->fifo_trace_file);
2532 me->fifo_trace_file = NULL;
2533 }
2534 sky_store_file_name (&me->fifo_trace_file_name, option_string);
2535 break;
2536
2537 case SKY_OPT_TRACE_ON:
2538 me->flags |= PKE_FLAG_TRACE_ON;
2539 break;
2540
2541 case SKY_OPT_TRACE_OFF:
2542 case SKY_OPT_TRACE_NAME:
2543 if ( me->trace_file != NULL )
2544 {
2545 fprintf(me->trace_file,
2546 "\n\n\tDmaEnd 0\n"
2547 "\t.EndDmaData\n");
2548 fclose (me->trace_file);
2549 me->trace_file = NULL;
2550 }
2551
2552 if ( option == SKY_OPT_TRACE_OFF )
2553 me->flags &= ~PKE_FLAG_TRACE_ON;
2554 else
2555 sky_store_file_name (&me->trace_file_name, option_string);
2556
2557 break;
2558
2559 case SKY_OPT_CLOSE:
2560 if (me->trace_file != NULL)
2561 {
2562 fprintf(me->trace_file,
2563 "\n\n\tDmaEnd 0\n"
2564 "\t.EndDmaData\n");
2565 fclose(me->trace_file);
2566 me->trace_file = NULL;
2567 }
2568 if (me->fifo_trace_file != NULL )
2569 fclose (me->fifo_trace_file);
2570 me->fifo_trace_file = NULL;
2571 break;
2572
2573 default:
2574 ASSERT (0);
2575 break;
2576 }
2577
2578 return;
2579 }
This page took 0.083396 seconds and 3 git commands to generate.