* emultempl/pe.em (gld_${EMULATION_NAME}_before_allocation): Put
[deliverable/binutils-gdb.git] / sim / mips / sky-pke.c
... / ...
CommitLineData
1/* Copyright (C) 1998, Cygnus Solutions */
2
3
4#include "config.h"
5
6#include <stdlib.h>
7#include "sky-pke.h"
8#include "sky-dma.h"
9#include "sim-bits.h"
10#include "sim-assert.h"
11#include "sky-vu0.h"
12#include "sky-vu1.h"
13#include "sky-gpuif.h"
14#include "sky-device.h"
15
16#ifdef HAVE_STRING_H
17#include <string.h>
18#else
19#ifdef HAVE_STRINGS_H
20#include <strings.h>
21#endif
22#endif
23
24
25/* Internal function declarations */
26
27static int pke_io_read_buffer(device*, void*, int, address_word,
28 unsigned, sim_cpu*, sim_cia);
29static int pke_io_write_buffer(device*, const void*, int, address_word,
30 unsigned, sim_cpu*, sim_cia);
31static void pke_reset(struct pke_device*);
32static void pke_issue(SIM_DESC, struct pke_device*);
33static void pke_pc_advance(struct pke_device*, int num_words);
34static struct fifo_quadword* pke_pcrel_fifo(struct pke_device*, int operand_num,
35 unsigned_4** operand);
36static unsigned_4* pke_pcrel_operand(struct pke_device*, int operand_num);
37static unsigned_4 pke_pcrel_operand_bits(struct pke_device*, int bit_offset,
38 int bit_width, unsigned_4* sourceaddr);
39static void pke_attach(SIM_DESC sd, struct pke_device* me);
40enum pke_check_target { chk_vu, chk_path1, chk_path2, chk_path3 };
41static int pke_check_stall(struct pke_device* me, enum pke_check_target what);
42static void pke_flip_dbf(struct pke_device* me);
43static void pke_begin_interrupt_stall(struct pke_device* me);
44/* PKEcode handlers */
45static void pke_code_nop(struct pke_device* me, unsigned_4 pkecode);
46static void pke_code_stcycl(struct pke_device* me, unsigned_4 pkecode);
47static void pke_code_offset(struct pke_device* me, unsigned_4 pkecode);
48static void pke_code_base(struct pke_device* me, unsigned_4 pkecode);
49static void pke_code_itop(struct pke_device* me, unsigned_4 pkecode);
50static void pke_code_stmod(struct pke_device* me, unsigned_4 pkecode);
51static void pke_code_mskpath3(struct pke_device* me, unsigned_4 pkecode);
52static void pke_code_pkemark(struct pke_device* me, unsigned_4 pkecode);
53static void pke_code_flushe(struct pke_device* me, unsigned_4 pkecode);
54static void pke_code_flush(struct pke_device* me, unsigned_4 pkecode);
55static void pke_code_flusha(struct pke_device* me, unsigned_4 pkecode);
56static void pke_code_pkemscal(struct pke_device* me, unsigned_4 pkecode);
57static void pke_code_pkemscnt(struct pke_device* me, unsigned_4 pkecode);
58static void pke_code_pkemscalf(struct pke_device* me, unsigned_4 pkecode);
59static void pke_code_stmask(struct pke_device* me, unsigned_4 pkecode);
60static void pke_code_strow(struct pke_device* me, unsigned_4 pkecode);
61static void pke_code_stcol(struct pke_device* me, unsigned_4 pkecode);
62static void pke_code_mpg(struct pke_device* me, unsigned_4 pkecode);
63static void pke_code_direct(struct pke_device* me, unsigned_4 pkecode);
64static void pke_code_directhl(struct pke_device* me, unsigned_4 pkecode);
65static void pke_code_unpack(struct pke_device* me, unsigned_4 pkecode);
66static void pke_code_error(struct pke_device* me, unsigned_4 pkecode);
67
68
69
70/* Static data */
71
72struct pke_device pke0_device =
73{
74 { "pke0", &pke_io_read_buffer, &pke_io_write_buffer }, /* device */
75 0, 0, /* ID, flags */
76 {}, /* regs */
77 {}, 0, /* FIFO write buffer */
78 { NULL, 0, 0, 0 }, /* FIFO */
79 NULL, /* FIFO trace file */
80 -1, -1, 0, 0, 0, /* invalid FIFO cache */
81 0, 0 /* pc */
82};
83
84
85struct pke_device pke1_device =
86{
87 { "pke1", &pke_io_read_buffer, &pke_io_write_buffer }, /* device */
88 1, 0, /* ID, flags */
89 {}, /* regs */
90 {}, 0, /* FIFO write buffer */
91 { NULL, 0, 0, 0 }, /* FIFO */
92 NULL, /* FIFO trace file */
93 -1, -1, 0, 0, 0, /* invalid FIFO cache */
94 0, 0 /* pc */
95};
96
97
98
99/* External functions */
100
101
102/* Attach PKE addresses to main memory */
103
104void
105pke0_attach(SIM_DESC sd)
106{
107 pke_attach(sd, & pke0_device);
108 pke_reset(& pke0_device);
109}
110
111void
112pke1_attach(SIM_DESC sd)
113{
114 pke_attach(sd, & pke1_device);
115 pke_reset(& pke1_device);
116}
117
118
119
120/* Issue a PKE instruction if possible */
121
122void
123pke0_issue(SIM_DESC sd)
124{
125 pke_issue(sd, & pke0_device);
126}
127
128void
129pke1_issue(SIM_DESC sd)
130{
131 pke_issue(sd, & pke1_device);
132}
133
134
135
136/* Internal functions */
137
138
139/* Attach PKE memory regions to simulator */
140
141void
142pke_attach(SIM_DESC sd, struct pke_device* me)
143{
144 /* register file */
145 sim_core_attach (sd, NULL, 0, access_read_write, 0,
146 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START,
147 PKE_REGISTER_WINDOW_SIZE /*nr_bytes*/,
148 0 /*modulo*/,
149 (device*) me,
150 NULL /*buffer*/);
151
152 /* FIFO port */
153 sim_core_attach (sd, NULL, 0, access_read_write, 0,
154 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR,
155 sizeof(quadword) /*nr_bytes*/,
156 0 /*modulo*/,
157 (device*) me,
158 NULL /*buffer*/);
159
160 /* VU MEM0 tracking table */
161 sim_core_attach (sd, NULL, 0, access_read_write, 0,
162 ((me->pke_number == 0) ? VU0_MEM0_SRCADDR_START : VU1_MEM0_SRCADDR_START),
163 ((me->pke_number == 0) ? VU0_MEM0_SIZE : VU1_MEM0_SIZE) / 2,
164 0 /*modulo*/,
165 NULL,
166 NULL /*buffer*/);
167
168 /* VU MEM1 tracking table */
169 sim_core_attach (sd, NULL, 0, access_read_write, 0,
170 ((me->pke_number == 0) ? VU0_MEM1_SRCADDR_START : VU1_MEM1_SRCADDR_START),
171 ((me->pke_number == 0) ? VU0_MEM1_SIZE : VU1_MEM1_SIZE) / 4,
172 0 /*modulo*/,
173 NULL,
174 NULL /*buffer*/);
175
176
177 /* attach to trace file if appropriate */
178 {
179 char trace_envvar[80];
180 char* trace_filename = NULL;
181 sprintf(trace_envvar, "VIF%d_TRACE_FILE", me->pke_number);
182 trace_filename = getenv(trace_envvar);
183 if(trace_filename != NULL)
184 {
185 me->fifo_trace_file = fopen(trace_filename, "w");
186 if(me->fifo_trace_file == NULL)
187 perror("VIF FIFO trace error on fopen");
188 else
189 setvbuf(me->fifo_trace_file, NULL, _IOLBF, 0);
190 }
191 }
192}
193
194
195
196/* Handle a PKE read; return no. of bytes read */
197
198int
199pke_io_read_buffer(device *me_,
200 void *dest,
201 int space,
202 address_word addr,
203 unsigned nr_bytes,
204 sim_cpu *cpu,
205 sim_cia cia)
206{
207 /* downcast to gather embedding pke_device struct */
208 struct pke_device* me = (struct pke_device*) me_;
209
210 /* find my address ranges */
211 address_word my_reg_start =
212 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START;
213 address_word my_fifo_addr =
214 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR;
215
216 /* enforce that an access does not span more than one quadword */
217 address_word low = ADDR_TRUNC_QW(addr);
218 address_word high = ADDR_TRUNC_QW(addr + nr_bytes - 1);
219 if(low != high)
220 return 0;
221
222 /* classify address & handle */
223 if((addr >= my_reg_start) && (addr < my_reg_start + PKE_REGISTER_WINDOW_SIZE))
224 {
225 /* register bank */
226 int reg_num = ADDR_TRUNC_QW(addr - my_reg_start) >> 4;
227 int reg_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside register bank */
228 int readable = 1;
229 quadword result;
230
231 /* clear result */
232 result[0] = result[1] = result[2] = result[3] = 0;
233
234 /* handle reads to individual registers; clear `readable' on error */
235 switch(reg_num)
236 {
237 /* handle common case of register reading, side-effect free */
238 /* PKE1-only registers*/
239 case PKE_REG_BASE:
240 case PKE_REG_OFST:
241 case PKE_REG_TOPS:
242 case PKE_REG_TOP:
243 case PKE_REG_DBF:
244 if(me->pke_number == 0)
245 readable = 0;
246 /* fall through */
247 /* PKE0 & PKE1 common registers*/
248 case PKE_REG_STAT:
249 case PKE_REG_ERR:
250 case PKE_REG_MARK:
251 case PKE_REG_CYCLE:
252 case PKE_REG_MODE:
253 case PKE_REG_NUM:
254 case PKE_REG_MASK:
255 case PKE_REG_CODE:
256 case PKE_REG_ITOPS:
257 case PKE_REG_ITOP:
258 case PKE_REG_R0:
259 case PKE_REG_R1:
260 case PKE_REG_R2:
261 case PKE_REG_R3:
262 case PKE_REG_C0:
263 case PKE_REG_C1:
264 case PKE_REG_C2:
265 case PKE_REG_C3:
266 result[0] = H2T_4(me->regs[reg_num][0]);
267 break;
268
269 /* handle common case of write-only registers */
270 case PKE_REG_FBRST:
271 readable = 0;
272 break;
273
274 default:
275 ASSERT(0); /* test above should prevent this possibility */
276 }
277
278 /* perform transfer & return */
279 if(readable)
280 {
281 /* copy the bits */
282 memcpy(dest, ((unsigned_1*) &result) + reg_byte, nr_bytes);
283 /* okay */
284 }
285 else
286 {
287 /* return zero bits */
288 memset(dest, 0, nr_bytes);
289 }
290
291 return nr_bytes;
292 /* NOTREACHED */
293 }
294 else if(addr >= my_fifo_addr &&
295 addr < my_fifo_addr + sizeof(quadword))
296 {
297 /* FIFO */
298
299 /* FIFO is not readable: return a word of zeroes */
300 memset(dest, 0, nr_bytes);
301 return nr_bytes;
302 }
303
304 /* NOTREACHED */
305 return 0;
306}
307
308
309/* Handle a PKE read; return no. of bytes written */
310
311int
312pke_io_write_buffer(device *me_,
313 const void *src,
314 int space,
315 address_word addr,
316 unsigned nr_bytes,
317 sim_cpu *cpu,
318 sim_cia cia)
319{
320 /* downcast to gather embedding pke_device struct */
321 struct pke_device* me = (struct pke_device*) me_;
322
323 /* find my address ranges */
324 address_word my_reg_start =
325 (me->pke_number == 0) ? PKE0_REGISTER_WINDOW_START : PKE1_REGISTER_WINDOW_START;
326 address_word my_fifo_addr =
327 (me->pke_number == 0) ? PKE0_FIFO_ADDR : PKE1_FIFO_ADDR;
328
329 /* enforce that an access does not span more than one quadword */
330 address_word low = ADDR_TRUNC_QW(addr);
331 address_word high = ADDR_TRUNC_QW(addr + nr_bytes - 1);
332 if(low != high)
333 return 0;
334
335 /* classify address & handle */
336 if((addr >= my_reg_start) && (addr < my_reg_start + PKE_REGISTER_WINDOW_SIZE))
337 {
338 /* register bank */
339 int reg_num = ADDR_TRUNC_QW(addr - my_reg_start) >> 4;
340 int reg_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside register bank */
341 int writeable = 1;
342 quadword input;
343
344 /* clear input */
345 input[0] = input[1] = input[2] = input[3] = 0;
346
347 /* write user-given bytes into input */
348 memcpy(((unsigned_1*) &input) + reg_byte, src, nr_bytes);
349
350 /* make words host-endian */
351 input[0] = T2H_4(input[0]);
352 /* we may ignore other words */
353
354 /* handle writes to individual registers; clear `writeable' on error */
355 switch(reg_num)
356 {
357 case PKE_REG_FBRST:
358 /* Order these tests from least to most overriding, in case
359 multiple bits are set. */
360 if(BIT_MASK_GET(input[0], PKE_REG_FBRST_STC_B, PKE_REG_FBRST_STC_E))
361 {
362 /* clear a bunch of status bits */
363 PKE_REG_MASK_SET(me, STAT, PSS, 0);
364 PKE_REG_MASK_SET(me, STAT, PFS, 0);
365 PKE_REG_MASK_SET(me, STAT, PIS, 0);
366 PKE_REG_MASK_SET(me, STAT, INT, 0);
367 PKE_REG_MASK_SET(me, STAT, ER0, 0);
368 PKE_REG_MASK_SET(me, STAT, ER1, 0);
369 me->flags &= ~PKE_FLAG_PENDING_PSS;
370 /* will allow resumption of possible stalled instruction */
371 }
372 if(BIT_MASK_GET(input[0], PKE_REG_FBRST_STP_B, PKE_REG_FBRST_STP_E))
373 {
374 me->flags |= PKE_FLAG_PENDING_PSS;
375 }
376 if(BIT_MASK_GET(input[0], PKE_REG_FBRST_FBK_B, PKE_REG_FBRST_FBK_E))
377 {
378 PKE_REG_MASK_SET(me, STAT, PFS, 1);
379 }
380 if(BIT_MASK_GET(input[0], PKE_REG_FBRST_RST_B, PKE_REG_FBRST_RST_E))
381 {
382 pke_reset(me);
383 }
384 break;
385
386 case PKE_REG_ERR:
387 /* copy bottom three bits */
388 BIT_MASK_SET(me->regs[PKE_REG_ERR][0], 0, 2, BIT_MASK_GET(input[0], 0, 2));
389 break;
390
391 case PKE_REG_MARK:
392 /* copy bottom sixteen bits */
393 PKE_REG_MASK_SET(me, MARK, MARK, BIT_MASK_GET(input[0], 0, 15));
394 /* reset MRK bit in STAT */
395 PKE_REG_MASK_SET(me, STAT, MRK, 0);
396 break;
397
398 /* handle common case of read-only registers */
399 /* PKE1-only registers - not really necessary to handle separately */
400 case PKE_REG_BASE:
401 case PKE_REG_OFST:
402 case PKE_REG_TOPS:
403 case PKE_REG_TOP:
404 case PKE_REG_DBF:
405 if(me->pke_number == 0)
406 writeable = 0;
407 /* fall through */
408 /* PKE0 & PKE1 common registers*/
409 case PKE_REG_STAT:
410 /* ignore FDR bit for PKE1_STAT -- simulator does not implement PKE->RAM transfers */
411 case PKE_REG_CYCLE:
412 case PKE_REG_MODE:
413 case PKE_REG_NUM:
414 case PKE_REG_MASK:
415 case PKE_REG_CODE:
416 case PKE_REG_ITOPS:
417 case PKE_REG_ITOP:
418 case PKE_REG_R0:
419 case PKE_REG_R1:
420 case PKE_REG_R2:
421 case PKE_REG_R3:
422 case PKE_REG_C0:
423 case PKE_REG_C1:
424 case PKE_REG_C2:
425 case PKE_REG_C3:
426 writeable = 0;
427 break;
428
429 default:
430 ASSERT(0); /* test above should prevent this possibility */
431 }
432
433 /* perform return */
434 if(! writeable)
435 {
436 ; /* error */
437 }
438
439 return nr_bytes;
440
441 /* NOTREACHED */
442 }
443 else if(addr >= my_fifo_addr &&
444 addr < my_fifo_addr + sizeof(quadword))
445 {
446 /* FIFO */
447 struct fifo_quadword* fqw;
448 int fifo_byte = ADDR_OFFSET_QW(addr); /* find byte-offset inside fifo quadword */
449 unsigned_4 dma_tag_present = 0;
450 int i;
451
452 /* collect potentially-partial quadword in write buffer; LE byte order */
453 memcpy(((unsigned_1*)& me->fifo_qw_in_progress) + fifo_byte, src, nr_bytes);
454 /* mark bytes written */
455 for(i = fifo_byte; i < fifo_byte + nr_bytes; i++)
456 BIT_MASK_SET(me->fifo_qw_done, i, i, 1);
457
458 /* return if quadword not quite written yet */
459 if(BIT_MASK_GET(me->fifo_qw_done, 0, sizeof(quadword)-1) !=
460 BIT_MASK_BTW(0, sizeof(quadword)-1))
461 return nr_bytes;
462
463 /* all done - process quadword after clearing flag */
464 BIT_MASK_SET(me->fifo_qw_done, 0, sizeof(quadword)-1, 0);
465
466 /* allocate required address in FIFO */
467 fqw = pke_fifo_fit(& me->fifo);
468 ASSERT(fqw != NULL);
469
470 /* fill in unclassified FIFO quadword data in host byte order */
471 fqw->word_class[0] = fqw->word_class[1] =
472 fqw->word_class[2] = fqw->word_class[3] = wc_unknown;
473 fqw->data[0] = T2H_4(me->fifo_qw_in_progress[0]);
474 fqw->data[1] = T2H_4(me->fifo_qw_in_progress[1]);
475 fqw->data[2] = T2H_4(me->fifo_qw_in_progress[2]);
476 fqw->data[3] = T2H_4(me->fifo_qw_in_progress[3]);
477
478 /* read DMAC-supplied indicators */
479 ASSERT(sizeof(unsigned_4) == 4);
480 PKE_MEM_READ(me, (me->pke_number == 0 ? DMA_D0_MADR : DMA_D1_MADR),
481 & fqw->source_address, /* converted to host-endian */
482 4);
483 PKE_MEM_READ(me, (me->pke_number == 0 ? DMA_D0_PKTFLAG : DMA_D1_PKTFLAG),
484 & dma_tag_present,
485 4);
486
487 if(dma_tag_present)
488 {
489 /* lower two words are DMA tags */
490 fqw->word_class[0] = fqw->word_class[1] = wc_dma;
491 }
492
493 /* set FQC to "1" as FIFO is now not empty */
494 PKE_REG_MASK_SET(me, STAT, FQC, 1);
495
496 /* okay */
497 return nr_bytes;
498 }
499
500 /* NOTREACHED */
501 return 0;
502}
503
504
505
506/* Reset the PKE */
507void
508pke_reset(struct pke_device* me)
509{
510 /* advance PC over last quadword in FIFO; keep previous FIFO history */
511 me->fifo_pc = pke_fifo_flush(& me->fifo);
512 me->qw_pc = 0;
513 /* clear registers, flag, other state */
514 memset(me->regs, 0, sizeof(me->regs));
515 me->fifo_qw_done = 0;
516 me->flags = 0;
517}
518
519
520
521/* Issue & swallow next PKE opcode if possible/available */
522
523void
524pke_issue(SIM_DESC sd, struct pke_device* me)
525{
526 struct fifo_quadword* fqw;
527 unsigned_4 fw;
528 unsigned_4 cmd, intr;
529
530 /* 1 -- fetch PKE instruction */
531
532 /* confirm availability of new quadword of PKE instructions */
533 fqw = pke_fifo_access(& me->fifo, me->fifo_pc);
534 if(fqw == NULL)
535 return;
536
537 /* skip over DMA tag, if present */
538 pke_pc_advance(me, 0);
539 /* note: this can only change qw_pc from 0 to 2 and will not
540 invalidate fqw */
541
542 /* "fetch" instruction quadword and word */
543 fw = fqw->data[me->qw_pc];
544
545 /* store word in PKECODE register */
546 me->regs[PKE_REG_CODE][0] = fw;
547
548
549 /* 2 -- test go / no-go for PKE execution */
550
551 /* switch on STAT:PSS if PSS-pending and in idle state */
552 if((PKE_REG_MASK_GET(me, STAT, PPS) == PKE_REG_STAT_PPS_IDLE) &&
553 (me->flags & PKE_FLAG_PENDING_PSS) != 0)
554 {
555 me->flags &= ~PKE_FLAG_PENDING_PSS;
556 PKE_REG_MASK_SET(me, STAT, PSS, 1);
557 }
558
559 /* check for stall/halt control bits */
560 if(PKE_REG_MASK_GET(me, STAT, PFS) ||
561 PKE_REG_MASK_GET(me, STAT, PSS) || /* note special treatment below */
562 /* PEW bit not a reason to keep stalling - it's just an indication, re-computed below */
563 /* PGW bit not a reason to keep stalling - it's just an indication, re-computed below */
564 /* ER0/ER1 not a reason to keep stalling - it's just an indication */
565 PKE_REG_MASK_GET(me, STAT, PIS))
566 {
567 /* (still) stalled */
568 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
569 /* try again next cycle */
570 return;
571 }
572
573
574 /* 3 -- decode PKE instruction */
575
576 /* decoding */
577 if(PKE_REG_MASK_GET(me, STAT, PPS) == PKE_REG_STAT_PPS_IDLE)
578 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_DECODE);
579
580 /* Extract relevant bits from PKEcode */
581 intr = BIT_MASK_GET(fw, PKE_OPCODE_I_B, PKE_OPCODE_I_E);
582 cmd = BIT_MASK_GET(fw, PKE_OPCODE_CMD_B, PKE_OPCODE_CMD_E);
583
584 /* handle interrupts */
585 if(intr)
586 {
587 /* are we resuming an interrupt-stalled instruction? */
588 if(me->flags & PKE_FLAG_INT_NOLOOP)
589 {
590 /* clear loop-prevention flag */
591 me->flags &= ~PKE_FLAG_INT_NOLOOP;
592
593 /* fall through to decode & execute */
594 /* The pke_code_* functions should not check the MSB in the
595 pkecode. */
596 }
597 else /* new interrupt-flagged instruction */
598 {
599 /* set INT flag in STAT register */
600 PKE_REG_MASK_SET(me, STAT, INT, 1);
601 /* set loop-prevention flag */
602 me->flags |= PKE_FLAG_INT_NOLOOP;
603
604 /* set PIS if stall not masked */
605 if(!PKE_REG_MASK_GET(me, ERR, MII))
606 pke_begin_interrupt_stall(me);
607
608 /* suspend this instruction unless it's PKEMARK */
609 if(!IS_PKE_CMD(cmd, PKEMARK))
610 {
611 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
612 return;
613 }
614 else
615 {
616 ; /* fall through to decode & execute */
617 }
618 }
619 }
620
621
622 /* decode & execute */
623 if(IS_PKE_CMD(cmd, PKENOP))
624 pke_code_nop(me, fw);
625 else if(IS_PKE_CMD(cmd, STCYCL))
626 pke_code_stcycl(me, fw);
627 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, OFFSET))
628 pke_code_offset(me, fw);
629 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, BASE))
630 pke_code_base(me, fw);
631 else if(IS_PKE_CMD(cmd, ITOP))
632 pke_code_itop(me, fw);
633 else if(IS_PKE_CMD(cmd, STMOD))
634 pke_code_stmod(me, fw);
635 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, MSKPATH3))
636 pke_code_mskpath3(me, fw);
637 else if(IS_PKE_CMD(cmd, PKEMARK))
638 pke_code_pkemark(me, fw);
639 else if(IS_PKE_CMD(cmd, FLUSHE))
640 pke_code_flushe(me, fw);
641 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, FLUSH))
642 pke_code_flush(me, fw);
643 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, FLUSHA))
644 pke_code_flusha(me, fw);
645 else if(IS_PKE_CMD(cmd, PKEMSCAL))
646 pke_code_pkemscal(me, fw);
647 else if(IS_PKE_CMD(cmd, PKEMSCNT))
648 pke_code_pkemscnt(me, fw);
649 else if(me->pke_number == 1 && IS_PKE_CMD(cmd, PKEMSCALF))
650 pke_code_pkemscalf(me, fw);
651 else if(IS_PKE_CMD(cmd, STMASK))
652 pke_code_stmask(me, fw);
653 else if(IS_PKE_CMD(cmd, STROW))
654 pke_code_strow(me, fw);
655 else if(IS_PKE_CMD(cmd, STCOL))
656 pke_code_stcol(me, fw);
657 else if(IS_PKE_CMD(cmd, MPG))
658 pke_code_mpg(me, fw);
659 else if(IS_PKE_CMD(cmd, DIRECT))
660 pke_code_direct(me, fw);
661 else if(IS_PKE_CMD(cmd, DIRECTHL))
662 pke_code_directhl(me, fw);
663 else if(IS_PKE_CMD(cmd, UNPACK))
664 pke_code_unpack(me, fw);
665 /* ... no other commands ... */
666 else
667 pke_code_error(me, fw);
668}
669
670
671
672/* Clear out contents of FIFO; act as if it was empty. Return PC
673 pointing to one-past-last word. */
674
675unsigned_4
676pke_fifo_flush(struct pke_fifo* fifo)
677{
678 /* don't modify any state! */
679 return fifo->origin + fifo->next;
680}
681
682
683
684/* Clear out contents of FIFO; make it really empty. */
685
686void
687pke_fifo_reset(struct pke_fifo* fifo)
688{
689 int i;
690
691 /* clear fifo quadwords */
692 for(i=0; i<fifo->next; i++)
693 {
694 zfree(fifo->quadwords[i]);
695 fifo->quadwords[i] = NULL;
696 }
697
698 /* reset pointers */
699 fifo->origin = 0;
700 fifo->next = 0;
701}
702
703
704
705/* Make space for the next quadword in the FIFO. Allocate/enlarge
706 FIFO pointer block if necessary. Return a pointer to it. */
707
708struct fifo_quadword*
709pke_fifo_fit(struct pke_fifo* fifo)
710{
711 struct fifo_quadword* fqw;
712
713 /* out of space on quadword pointer array? */
714 if(fifo->next == fifo->length) /* also triggered before fifo->quadwords allocated */
715 {
716 struct fifo_quadword** new_qw;
717 unsigned_4 new_length = fifo->length + PKE_FIFO_GROW_SIZE;
718
719 /* allocate new pointer block */
720 new_qw = zalloc(new_length * sizeof(struct fifo_quadword*));
721 ASSERT(new_qw != NULL);
722
723 /* copy over old contents, if any */
724 if(fifo->quadwords != NULL)
725 {
726 /* copy over old pointers to beginning of new block */
727 memcpy(new_qw, fifo->quadwords,
728 fifo->length * sizeof(struct fifo_quadword*));
729
730 /* free old block */
731 zfree(fifo->quadwords);
732 }
733
734 /* replace pointers & counts */
735 fifo->quadwords = new_qw;
736 fifo->length = new_length;
737 }
738
739 /* sanity check */
740 ASSERT(fifo->quadwords != NULL);
741
742 /* allocate new quadword from heap */
743 fqw = zalloc(sizeof(struct fifo_quadword));
744 ASSERT(fqw != NULL);
745
746 /* push quadword onto fifo */
747 fifo->quadwords[fifo->next] = fqw;
748 fifo->next++;
749 return fqw;
750}
751
752
753
754/* Return a pointer to the FIFO quadword with given absolute index, or
755 NULL if it is out of range */
756
757struct fifo_quadword*
758pke_fifo_access(struct pke_fifo* fifo, unsigned_4 qwnum)
759{
760 struct fifo_quadword* fqw;
761
762 if((qwnum < fifo->origin) || /* before history */
763 (qwnum >= fifo->origin + fifo->next)) /* after last available quadword */
764 fqw = NULL;
765 else
766 {
767 ASSERT(fifo->quadwords != NULL); /* must be allocated already */
768 fqw = fifo->quadwords[qwnum - fifo->origin]; /* pull out pointer from array */
769 ASSERT(fqw != NULL); /* must be allocated already */
770 }
771
772 return fqw;
773}
774
775
776/* Authorize release of any FIFO entries older than given absolute quadword. */
777void
778pke_fifo_old(struct pke_fifo* fifo, unsigned_4 qwnum)
779{
780 /* do we have any too-old FIFO elements? */
781 if(fifo->origin + PKE_FIFO_ARCHEOLOGY < qwnum)
782 {
783 /* count quadwords to forget */
784 int horizon = qwnum - (fifo->origin + PKE_FIFO_ARCHEOLOGY);
785 int i;
786
787 /* free quadwords at indices below horizon */
788 for(i=0; i < horizon; i++)
789 zfree(fifo->quadwords[i]);
790
791 /* move surviving quadword pointers down to beginning of array */
792 for(i=horizon; i < fifo->next; i++)
793 fifo->quadwords[i-horizon] = fifo->quadwords[i];
794
795 /* clear duplicate pointers */
796 for(i=fifo->next - horizon; i < fifo->next; i++)
797 fifo->quadwords[i] = NULL;
798
799 /* adjust FIFO pointers */
800 fifo->origin = fifo->origin + horizon;
801 fifo->next = fifo->next - horizon;
802 }
803}
804
805
806
807
808/* advance the PC by given number of data words; update STAT/FQC
809 field; assume FIFO is filled enough; classify passed-over words;
810 write FIFO trace line */
811
812void
813pke_pc_advance(struct pke_device* me, int num_words)
814{
815 int num = num_words;
816 struct fifo_quadword* fq = NULL;
817 unsigned_4 old_fifo_pc = me->fifo_pc;
818
819 ASSERT(num_words >= 0);
820
821 /* printf("pke %d pc_advance num_words %d\n", me->pke_number, num_words); */
822
823 while(1)
824 {
825 /* find next quadword, if any */
826 fq = pke_fifo_access(& me->fifo, me->fifo_pc);
827
828 /* skip over DMA tag words if present in word 0 or 1 */
829 if(fq != NULL && fq->word_class[me->qw_pc] == wc_dma)
830 {
831 /* skip by going around loop an extra time */
832 num ++;
833 }
834
835 /* nothing left to skip / no DMA tag here */
836 if(num == 0)
837 break;
838
839 /* we are supposed to skip existing words */
840 ASSERT(fq != NULL);
841
842 /* one word skipped */
843 num --;
844
845 /* point to next word */
846 me->qw_pc ++;
847 if(me->qw_pc == 4)
848 {
849 me->qw_pc = 0;
850 me->fifo_pc ++;
851
852 /* trace the consumption of the FIFO quadword we just skipped over */
853 /* fq still points to it */
854 if(me->fifo_trace_file != NULL)
855 {
856 /* assert complete classification */
857 ASSERT(fq->word_class[3] != wc_unknown);
858 ASSERT(fq->word_class[2] != wc_unknown);
859 ASSERT(fq->word_class[1] != wc_unknown);
860 ASSERT(fq->word_class[0] != wc_unknown);
861
862 /* print trace record */
863 fprintf(me->fifo_trace_file,
864 "%d 0x%08x_%08x_%08x_%08x 0x%08x %c%c%c%c\n",
865 (me->pke_number == 0 ? 0 : 1),
866 (unsigned) fq->data[3], (unsigned) fq->data[2],
867 (unsigned) fq->data[1], (unsigned) fq->data[0],
868 (unsigned) fq->source_address,
869 fq->word_class[3], fq->word_class[2],
870 fq->word_class[1], fq->word_class[0]);
871 }
872 } /* next quadword */
873 }
874
875 /* age old entries before PC */
876 if(me->fifo_pc != old_fifo_pc)
877 {
878 /* we advanced the fifo-pc; authorize disposal of anything
879 before previous PKEcode */
880 pke_fifo_old(& me->fifo, old_fifo_pc);
881 }
882
883 /* clear FQC if FIFO is now empty */
884 fq = pke_fifo_access(& me->fifo, me->fifo_pc);
885 if(fq == NULL)
886 {
887 PKE_REG_MASK_SET(me, STAT, FQC, 0);
888 }
889 else /* annote the word where the PC lands as an PKEcode */
890 {
891 ASSERT(fq->word_class[me->qw_pc] == wc_pkecode || fq->word_class[me->qw_pc] == wc_unknown);
892 fq->word_class[me->qw_pc] = wc_pkecode;
893 }
894}
895
896
897
898
899
900/* Return pointer to FIFO quadword containing given operand# in FIFO.
901 `operand_num' starts at 1. Return pointer to operand word in last
902 argument, if non-NULL. If FIFO is not full enough, return 0.
903 Signal an ER0 indication upon skipping a DMA tag. */
904
905struct fifo_quadword*
906pke_pcrel_fifo(struct pke_device* me, int operand_num, unsigned_4** operand)
907{
908 int num;
909 int new_qw_pc, new_fifo_pc;
910 struct fifo_quadword* fq = NULL;
911
912 /* check for validity of last search results in cache */
913 if(me->last_fifo_pc == me->fifo_pc &&
914 me->last_qw_pc == me->qw_pc &&
915 operand_num > me->last_num)
916 {
917 /* continue search from last stop */
918 new_fifo_pc = me->last_new_fifo_pc;
919 new_qw_pc = me->last_new_qw_pc;
920 num = operand_num - me->last_num;
921 }
922 else
923 {
924 /* start search from scratch */
925 new_fifo_pc = me->fifo_pc;
926 new_qw_pc = me->qw_pc;
927 num = operand_num;
928 }
929
930 ASSERT(num > 0);
931
932 /* printf("pke %d pcrel_fifo operand_num %d\n", me->pke_number, operand_num); */
933
934 do
935 {
936 /* one word skipped */
937 num --;
938
939 /* point to next word */
940 new_qw_pc ++;
941 if(new_qw_pc == 4)
942 {
943 new_qw_pc = 0;
944 new_fifo_pc ++;
945 }
946
947 fq = pke_fifo_access(& me->fifo, new_fifo_pc);
948
949 /* check for FIFO underflow */
950 if(fq == NULL)
951 break;
952
953 /* skip over DMA tag words if present in word 0 or 1 */
954 if(fq->word_class[new_qw_pc] == wc_dma)
955 {
956 /* set ER0 */
957 PKE_REG_MASK_SET(me, STAT, ER0, 1);
958
959 /* mismatch error! */
960 if(! PKE_REG_MASK_GET(me, ERR, ME0))
961 {
962 pke_begin_interrupt_stall(me);
963 /* don't stall just yet -- finish this instruction */
964 /* the PPS_STALL state will be entered by pke_issue() next time */
965 }
966 /* skip by going around loop an extra time */
967 num ++;
968 }
969 }
970 while(num > 0);
971
972 /* return pointer to operand word itself */
973 if(fq != NULL)
974 {
975 *operand = & fq->data[new_qw_pc];
976
977 /* annote the word where the pseudo-PC lands as an PKE operand */
978 ASSERT(fq->word_class[new_qw_pc] == wc_pkedata || fq->word_class[new_qw_pc] == wc_unknown);
979 fq->word_class[new_qw_pc] = wc_pkedata;
980
981 /* store search results in cache */
982 /* keys */
983 me->last_fifo_pc = me->fifo_pc;
984 me->last_qw_pc = me->qw_pc;
985 /* values */
986 me->last_num = operand_num;
987 me->last_new_fifo_pc = new_fifo_pc;
988 me->last_new_qw_pc = new_qw_pc;
989 }
990
991 return fq;
992}
993
994
995/* Return pointer to given operand# in FIFO. `operand_num' starts at 1.
996 If FIFO is not full enough, return 0. Skip over DMA tags, but mark
997 them as an error (ER0). */
998
999unsigned_4*
1000pke_pcrel_operand(struct pke_device* me, int operand_num)
1001{
1002 unsigned_4* operand = NULL;
1003 struct fifo_quadword* fifo_operand;
1004
1005 fifo_operand = pke_pcrel_fifo(me, operand_num, & operand);
1006
1007 if(fifo_operand == NULL)
1008 ASSERT(operand == NULL); /* pke_pcrel_fifo() ought leave it untouched */
1009
1010 return operand;
1011}
1012
1013
1014/* Return a bit-field extract of given operand# in FIFO, and its
1015 source-addr. `bit_offset' starts at 0, referring to LSB after PKE
1016 instruction word. Width must be >0, <=32. Assume FIFO is full
1017 enough. Skip over DMA tags, but mark them as an error (ER0). */
1018
1019unsigned_4
1020pke_pcrel_operand_bits(struct pke_device* me, int bit_offset, int bit_width, unsigned_4* source_addr)
1021{
1022 unsigned_4* word = NULL;
1023 unsigned_4 value;
1024 struct fifo_quadword* fifo_operand;
1025 int wordnumber, bitnumber;
1026
1027 wordnumber = bit_offset/32;
1028 bitnumber = bit_offset%32;
1029
1030 /* find operand word with bitfield */
1031 fifo_operand = pke_pcrel_fifo(me, wordnumber + 1, &word);
1032 ASSERT(word != NULL);
1033
1034 /* extract bitfield from word */
1035 value = BIT_MASK_GET(*word, bitnumber, bitnumber + bit_width - 1);
1036
1037 /* extract source addr from fifo word */
1038 *source_addr = fifo_operand->source_address;
1039
1040 return value;
1041}
1042
1043
1044
1045/* check for stall conditions on indicated devices (path* only on
1046 PKE1), do not change status; return 0 iff no stall */
1047int
1048pke_check_stall(struct pke_device* me, enum pke_check_target what)
1049{
1050 int any_stall = 0;
1051 unsigned_4 cop2_stat, gpuif_stat;
1052
1053 /* read status words */
1054 ASSERT(sizeof(unsigned_4) == 4);
1055 PKE_MEM_READ(me, (GIF_REG_STAT),
1056 & gpuif_stat,
1057 4);
1058 PKE_MEM_READ(me, (COP2_REG_STAT_ADDR),
1059 & cop2_stat,
1060 4);
1061
1062 /* perform checks */
1063 if(what == chk_vu)
1064 {
1065 if(me->pke_number == 0)
1066 any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS0_B, COP2_REG_STAT_VBS0_E);
1067 else /* if(me->pke_number == 1) */
1068 any_stall = BIT_MASK_GET(cop2_stat, COP2_REG_STAT_VBS1_B, COP2_REG_STAT_VBS1_E);
1069 }
1070 else if(what == chk_path1) /* VU -> GPUIF */
1071 {
1072 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 1)
1073 any_stall = 1;
1074 }
1075 else if(what == chk_path2) /* PKE -> GPUIF */
1076 {
1077 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 2)
1078 any_stall = 1;
1079 }
1080 else if(what == chk_path3) /* DMA -> GPUIF */
1081 {
1082 if(BIT_MASK_GET(gpuif_stat, GPUIF_REG_STAT_APATH_B, GPUIF_REG_STAT_APATH_E) == 3)
1083 any_stall = 1;
1084 }
1085 else
1086 {
1087 /* invalid what */
1088 ASSERT(0);
1089 }
1090
1091 /* any stall reasons? */
1092 return any_stall;
1093}
1094
1095
1096/* PKE1 only: flip the DBF bit; recompute TOPS, TOP */
1097void
1098pke_flip_dbf(struct pke_device* me)
1099{
1100 int newdf;
1101 /* compute new TOP */
1102 PKE_REG_MASK_SET(me, TOP, TOP,
1103 PKE_REG_MASK_GET(me, TOPS, TOPS));
1104 /* flip DBF */
1105 newdf = PKE_REG_MASK_GET(me, DBF, DF) ? 0 : 1;
1106 PKE_REG_MASK_SET(me, DBF, DF, newdf);
1107 PKE_REG_MASK_SET(me, STAT, DBF, newdf);
1108 /* compute new TOPS */
1109 PKE_REG_MASK_SET(me, TOPS, TOPS,
1110 (PKE_REG_MASK_GET(me, BASE, BASE) +
1111 newdf * PKE_REG_MASK_GET(me, OFST, OFFSET)));
1112
1113 /* this is equivalent to last word from okadaa (98-02-25):
1114 1) TOP=TOPS;
1115 2) TOPS=BASE + !DBF*OFFSET
1116 3) DBF=!DBF */
1117}
1118
1119
1120/* set the STAT:PIS bit and send an interrupt to the 5900 */
1121void
1122pke_begin_interrupt_stall(struct pke_device* me)
1123{
1124 /* set PIS */
1125 PKE_REG_MASK_SET(me, STAT, PIS, 1);
1126
1127 /* XXX: send interrupt to 5900? */
1128}
1129
1130
1131
1132
1133/* PKEcode handler functions -- responsible for checking and
1134 confirming old stall conditions, executing pkecode, updating PC and
1135 status registers -- may assume being run on correct PKE unit */
1136
1137void
1138pke_code_nop(struct pke_device* me, unsigned_4 pkecode)
1139{
1140 /* done */
1141 pke_pc_advance(me, 1);
1142 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1143}
1144
1145
1146void
1147pke_code_stcycl(struct pke_device* me, unsigned_4 pkecode)
1148{
1149 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1150
1151 /* copy immediate value into CYCLE reg */
1152 PKE_REG_MASK_SET(me, CYCLE, WL, BIT_MASK_GET(imm, 8, 15));
1153 PKE_REG_MASK_SET(me, CYCLE, CL, BIT_MASK_GET(imm, 0, 7));
1154 /* done */
1155 pke_pc_advance(me, 1);
1156 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1157}
1158
1159
1160void
1161pke_code_offset(struct pke_device* me, unsigned_4 pkecode)
1162{
1163 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1164
1165 /* copy 10 bits to OFFSET field */
1166 PKE_REG_MASK_SET(me, OFST, OFFSET, BIT_MASK_GET(imm, 0, 9));
1167 /* clear DBF bit */
1168 PKE_REG_MASK_SET(me, DBF, DF, 0);
1169 /* clear other DBF bit */
1170 PKE_REG_MASK_SET(me, STAT, DBF, 0);
1171 /* set TOPS = BASE */
1172 PKE_REG_MASK_SET(me, TOPS, TOPS, PKE_REG_MASK_GET(me, BASE, BASE));
1173 /* done */
1174 pke_pc_advance(me, 1);
1175 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1176}
1177
1178
1179void
1180pke_code_base(struct pke_device* me, unsigned_4 pkecode)
1181{
1182 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1183
1184 /* copy 10 bits to BASE field */
1185 PKE_REG_MASK_SET(me, BASE, BASE, BIT_MASK_GET(imm, 0, 9));
1186 /* done */
1187 pke_pc_advance(me, 1);
1188 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1189}
1190
1191
1192void
1193pke_code_itop(struct pke_device* me, unsigned_4 pkecode)
1194{
1195 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1196
1197 /* copy 10 bits to ITOPS field */
1198 PKE_REG_MASK_SET(me, ITOPS, ITOPS, BIT_MASK_GET(imm, 0, 9));
1199 /* done */
1200 pke_pc_advance(me, 1);
1201 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1202}
1203
1204
1205void
1206pke_code_stmod(struct pke_device* me, unsigned_4 pkecode)
1207{
1208 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1209
1210 /* copy 2 bits to MODE register */
1211 PKE_REG_MASK_SET(me, MODE, MDE, BIT_MASK_GET(imm, 0, 2));
1212 /* done */
1213 pke_pc_advance(me, 1);
1214 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1215}
1216
1217
1218void
1219pke_code_mskpath3(struct pke_device* me, unsigned_4 pkecode)
1220{
1221 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1222 unsigned_4 gif_mode;
1223
1224 /* set appropriate bit */
1225 if(BIT_MASK_GET(imm, PKE_REG_MSKPATH3_B, PKE_REG_MSKPATH3_E) != 0)
1226 gif_mode = GIF_REG_MODE_M3R_MASK;
1227 else
1228 gif_mode = 0;
1229
1230 /* write register; patrickm code will look at M3R bit only */
1231 PKE_MEM_WRITE(me, GIF_REG_MODE, & gif_mode, 4);
1232
1233 /* done */
1234 pke_pc_advance(me, 1);
1235 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1236}
1237
1238
1239void
1240pke_code_pkemark(struct pke_device* me, unsigned_4 pkecode)
1241{
1242 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1243 /* copy 16 bits to MARK register */
1244 PKE_REG_MASK_SET(me, MARK, MARK, BIT_MASK_GET(imm, 0, 15));
1245 /* set MRK bit in STAT register - CPU2 v2.1 docs incorrect */
1246 PKE_REG_MASK_SET(me, STAT, MRK, 1);
1247 /* done */
1248 pke_pc_advance(me, 1);
1249 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1250}
1251
1252
1253void
1254pke_code_flushe(struct pke_device* me, unsigned_4 pkecode)
1255{
1256 /* compute next PEW bit */
1257 if(pke_check_stall(me, chk_vu))
1258 {
1259 /* VU busy */
1260 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1261 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1262 /* try again next cycle */
1263 }
1264 else
1265 {
1266 /* VU idle */
1267 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1268 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1269 pke_pc_advance(me, 1);
1270 }
1271}
1272
1273
1274void
1275pke_code_flush(struct pke_device* me, unsigned_4 pkecode)
1276{
1277 int something_busy = 0;
1278
1279 /* compute next PEW, PGW bits */
1280 if(pke_check_stall(me, chk_vu))
1281 {
1282 something_busy = 1;
1283 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1284 }
1285 else
1286 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1287
1288
1289 if(pke_check_stall(me, chk_path1) ||
1290 pke_check_stall(me, chk_path2))
1291 {
1292 something_busy = 1;
1293 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1294 }
1295 else
1296 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1297
1298 /* go or no go */
1299 if(something_busy)
1300 {
1301 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1302 /* try again next cycle */
1303 }
1304 else
1305 {
1306 /* all idle */
1307 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1308 pke_pc_advance(me, 1);
1309 }
1310}
1311
1312
1313void
1314pke_code_flusha(struct pke_device* me, unsigned_4 pkecode)
1315{
1316 int something_busy = 0;
1317
1318 /* compute next PEW, PGW bits */
1319 if(pke_check_stall(me, chk_vu))
1320 {
1321 something_busy = 1;
1322 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1323 }
1324 else
1325 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1326
1327
1328 if(pke_check_stall(me, chk_path1) ||
1329 pke_check_stall(me, chk_path2) ||
1330 pke_check_stall(me, chk_path3))
1331 {
1332 something_busy = 1;
1333 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1334 }
1335 else
1336 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1337
1338 if(something_busy)
1339 {
1340 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1341 /* try again next cycle */
1342 }
1343 else
1344 {
1345 /* all idle */
1346 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1347 pke_pc_advance(me, 1);
1348 }
1349}
1350
1351
1352void
1353pke_code_pkemscal(struct pke_device* me, unsigned_4 pkecode)
1354{
1355 /* compute next PEW bit */
1356 if(pke_check_stall(me, chk_vu))
1357 {
1358 /* VU busy */
1359 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1360 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1361 /* try again next cycle */
1362 }
1363 else
1364 {
1365 unsigned_4 vu_pc;
1366 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1367
1368 /* VU idle */
1369 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1370
1371 /* flip DBF on PKE1 */
1372 if(me->pke_number == 1)
1373 pke_flip_dbf(me);
1374
1375 /* compute new PC for VU (host byte-order) */
1376 vu_pc = BIT_MASK_GET(imm, 0, 15);
1377 vu_pc = T2H_4(vu_pc);
1378
1379 /* write new PC; callback function gets VU running */
1380 ASSERT(sizeof(unsigned_4) == 4);
1381 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1382 & vu_pc,
1383 4);
1384
1385 /* copy ITOPS field to ITOP */
1386 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1387
1388 /* done */
1389 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1390 pke_pc_advance(me, 1);
1391 }
1392}
1393
1394
1395
1396void
1397pke_code_pkemscnt(struct pke_device* me, unsigned_4 pkecode)
1398{
1399 /* compute next PEW bit */
1400 if(pke_check_stall(me, chk_vu))
1401 {
1402 /* VU busy */
1403 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1404 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1405 /* try again next cycle */
1406 }
1407 else
1408 {
1409 unsigned_4 vu_pc;
1410
1411 /* VU idle */
1412 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1413
1414 /* flip DBF on PKE1 */
1415 if(me->pke_number == 1)
1416 pke_flip_dbf(me);
1417
1418 /* read old PC */
1419 ASSERT(sizeof(unsigned_4) == 4);
1420 PKE_MEM_READ(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1421 & vu_pc,
1422 4);
1423
1424 /* rewrite new PC; callback function gets VU running */
1425 ASSERT(sizeof(unsigned_4) == 4);
1426 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1427 & vu_pc,
1428 4);
1429
1430 /* copy ITOPS field to ITOP */
1431 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1432
1433 /* done */
1434 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1435 pke_pc_advance(me, 1);
1436 }
1437}
1438
1439
1440void
1441pke_code_pkemscalf(struct pke_device* me, unsigned_4 pkecode)
1442{
1443 int something_busy = 0;
1444
1445 /* compute next PEW, PGW bits */
1446 if(pke_check_stall(me, chk_vu))
1447 {
1448 something_busy = 1;
1449 PKE_REG_MASK_SET(me, STAT, PEW, 1);
1450 }
1451 else
1452 PKE_REG_MASK_SET(me, STAT, PEW, 0);
1453
1454
1455 if(pke_check_stall(me, chk_path1) ||
1456 pke_check_stall(me, chk_path2) ||
1457 pke_check_stall(me, chk_path3))
1458 {
1459 something_busy = 1;
1460 PKE_REG_MASK_SET(me, STAT, PGW, 1);
1461 }
1462 else
1463 PKE_REG_MASK_SET(me, STAT, PGW, 0);
1464
1465 /* go or no go */
1466 if(something_busy)
1467 {
1468 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1469 /* try again next cycle */
1470 }
1471 else
1472 {
1473 unsigned_4 vu_pc;
1474 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1475
1476 /* flip DBF on PKE1 */
1477 if(me->pke_number == 1)
1478 pke_flip_dbf(me);
1479
1480 /* compute new PC for VU (host byte-order) */
1481 vu_pc = BIT_MASK_GET(imm, 0, 15);
1482 vu_pc = T2H_4(vu_pc);
1483
1484 /* rewrite new PC; callback function gets VU running */
1485 ASSERT(sizeof(unsigned_4) == 4);
1486 PKE_MEM_WRITE(me, (me->pke_number == 0 ? VU0_CIA : VU1_CIA),
1487 & vu_pc,
1488 4);
1489
1490 /* copy ITOPS field to ITOP */
1491 PKE_REG_MASK_SET(me, ITOP, ITOP, PKE_REG_MASK_GET(me, ITOPS, ITOPS));
1492
1493 /* done */
1494 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1495 pke_pc_advance(me, 1);
1496 }
1497}
1498
1499
1500void
1501pke_code_stmask(struct pke_device* me, unsigned_4 pkecode)
1502{
1503 unsigned_4* mask;
1504
1505 /* check that FIFO has one more word for STMASK operand */
1506 mask = pke_pcrel_operand(me, 1);
1507 if(mask != NULL)
1508 {
1509 /* "transferring" operand */
1510 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1511
1512 /* set NUM */
1513 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1514
1515 /* fill the register */
1516 PKE_REG_MASK_SET(me, MASK, MASK, *mask);
1517
1518 /* set NUM */
1519 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1520
1521 /* done */
1522 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1523 pke_pc_advance(me, 2);
1524 }
1525 else
1526 {
1527 /* need to wait for another word */
1528 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1529 /* try again next cycle */
1530 }
1531}
1532
1533
1534void
1535pke_code_strow(struct pke_device* me, unsigned_4 pkecode)
1536{
1537 /* check that FIFO has four more words for STROW operand */
1538 unsigned_4* last_op;
1539
1540 last_op = pke_pcrel_operand(me, 4);
1541 if(last_op != NULL)
1542 {
1543 /* "transferring" operand */
1544 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1545
1546 /* set NUM */
1547 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1548
1549 /* copy ROW registers: must all exist if 4th operand exists */
1550 me->regs[PKE_REG_R0][0] = * pke_pcrel_operand(me, 1);
1551 me->regs[PKE_REG_R1][0] = * pke_pcrel_operand(me, 2);
1552 me->regs[PKE_REG_R2][0] = * pke_pcrel_operand(me, 3);
1553 me->regs[PKE_REG_R3][0] = * pke_pcrel_operand(me, 4);
1554
1555 /* set NUM */
1556 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1557
1558 /* done */
1559 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1560 pke_pc_advance(me, 5);
1561 }
1562 else
1563 {
1564 /* need to wait for another word */
1565 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1566 /* try again next cycle */
1567 }
1568}
1569
1570
1571void
1572pke_code_stcol(struct pke_device* me, unsigned_4 pkecode)
1573{
1574 /* check that FIFO has four more words for STCOL operand */
1575 unsigned_4* last_op;
1576
1577 last_op = pke_pcrel_operand(me, 4);
1578 if(last_op != NULL)
1579 {
1580 /* "transferring" operand */
1581 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1582
1583 /* set NUM */
1584 PKE_REG_MASK_SET(me, NUM, NUM, 1);
1585
1586 /* copy COL registers: must all exist if 4th operand exists */
1587 me->regs[PKE_REG_C0][0] = * pke_pcrel_operand(me, 1);
1588 me->regs[PKE_REG_C1][0] = * pke_pcrel_operand(me, 2);
1589 me->regs[PKE_REG_C2][0] = * pke_pcrel_operand(me, 3);
1590 me->regs[PKE_REG_C3][0] = * pke_pcrel_operand(me, 4);
1591
1592 /* set NUM */
1593 PKE_REG_MASK_SET(me, NUM, NUM, 0);
1594
1595 /* done */
1596 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1597 pke_pc_advance(me, 5);
1598 }
1599 else
1600 {
1601 /* need to wait for another word */
1602 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1603 /* try again next cycle */
1604 }
1605}
1606
1607
1608void
1609pke_code_mpg(struct pke_device* me, unsigned_4 pkecode)
1610{
1611 unsigned_4* last_mpg_word;
1612 int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
1613 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1614
1615 /* assert 64-bit alignment of MPG operand */
1616 if(me->qw_pc != 3 && me->qw_pc != 1)
1617 return pke_code_error(me, pkecode);
1618
1619 /* map zero to max+1 */
1620 if(num==0) num=0x100;
1621
1622 /* check that FIFO has a few more words for MPG operand */
1623 last_mpg_word = pke_pcrel_operand(me, num*2); /* num: number of 64-bit words */
1624 if(last_mpg_word != NULL)
1625 {
1626 /* perform implied FLUSHE */
1627 if(pke_check_stall(me, chk_vu))
1628 {
1629 /* VU busy */
1630 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
1631 /* retry this instruction next clock */
1632 }
1633 else
1634 {
1635 /* VU idle */
1636 int i;
1637
1638 /* "transferring" operand */
1639 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1640
1641 /* set NUM */
1642 PKE_REG_MASK_SET(me, NUM, NUM, num);
1643
1644 /* transfer VU instructions, one word-pair per iteration */
1645 for(i=0; i<num; i++)
1646 {
1647 address_word vu_addr_base, vu_addr;
1648 address_word vutrack_addr_base, vutrack_addr;
1649 address_word vu_addr_max_size;
1650 unsigned_4 vu_lower_opcode, vu_upper_opcode;
1651 unsigned_4* operand;
1652 struct fifo_quadword* fq;
1653 int next_num;
1654
1655 /* decrement NUM */
1656 next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
1657 PKE_REG_MASK_SET(me, NUM, NUM, next_num);
1658
1659 /* imm: in 64-bit units for MPG instruction */
1660 /* VU*_MEM0 : instruction memory */
1661 vu_addr_base = (me->pke_number == 0) ?
1662 VU0_MEM0_WINDOW_START : VU1_MEM0_WINDOW_START;
1663 vu_addr_max_size = (me->pke_number == 0) ?
1664 VU0_MEM0_SIZE : VU1_MEM0_SIZE;
1665 vutrack_addr_base = (me->pke_number == 0) ?
1666 VU0_MEM0_SRCADDR_START : VU1_MEM0_SRCADDR_START;
1667
1668 /* compute VU address for this word-pair */
1669 vu_addr = vu_addr_base + (imm + i) * 8;
1670 /* check for vu_addr overflow */
1671 while(vu_addr >= vu_addr_base + vu_addr_max_size)
1672 vu_addr -= vu_addr_max_size;
1673
1674 /* compute VU tracking address */
1675 vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 2;
1676
1677 /* Fetch operand words; assume they are already little-endian for VU imem */
1678 fq = pke_pcrel_fifo(me, i*2 + 1, & operand);
1679 vu_lower_opcode = *operand;
1680 vu_upper_opcode = *pke_pcrel_operand(me, i*2 + 2);
1681
1682 /* write data into VU memory */
1683 /* lower (scalar) opcode comes in first word ; macro performs H2T! */
1684 PKE_MEM_WRITE(me, vu_addr,
1685 & vu_lower_opcode,
1686 4);
1687 /* upper (vector) opcode comes in second word ; H2T */
1688 ASSERT(sizeof(unsigned_4) == 4);
1689 PKE_MEM_WRITE(me, vu_addr + 4,
1690 & vu_upper_opcode,
1691 4);
1692
1693 /* write tracking address in target byte-order */
1694 ASSERT(sizeof(unsigned_4) == 4);
1695 PKE_MEM_WRITE(me, vutrack_addr,
1696 & fq->source_address,
1697 4);
1698 } /* VU xfer loop */
1699
1700 /* check NUM */
1701 ASSERT(PKE_REG_MASK_GET(me, NUM, NUM) == 0);
1702
1703 /* done */
1704 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1705 pke_pc_advance(me, 1 + num*2);
1706 }
1707 } /* if FIFO full enough */
1708 else
1709 {
1710 /* need to wait for another word */
1711 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1712 /* retry this instruction next clock */
1713 }
1714}
1715
1716
1717void
1718pke_code_direct(struct pke_device* me, unsigned_4 pkecode)
1719{
1720 /* check that FIFO has a few more words for DIRECT operand */
1721 unsigned_4* last_direct_word;
1722 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1723
1724 /* assert 128-bit alignment of DIRECT operand */
1725 if(me->qw_pc != 3)
1726 return pke_code_error(me, pkecode);
1727
1728 /* map zero to max+1 */
1729 if(imm==0) imm=0x10000;
1730
1731 last_direct_word = pke_pcrel_operand(me, imm*4); /* imm: number of 128-bit words */
1732 if(last_direct_word != NULL)
1733 {
1734 /* VU idle */
1735 int i;
1736 unsigned_16 fifo_data;
1737
1738 /* "transferring" operand */
1739 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1740
1741 /* transfer GPUIF quadwords, one word per iteration */
1742 for(i=0; i<imm*4; i++)
1743 {
1744 unsigned_4* operand = pke_pcrel_operand(me, 1+i);
1745
1746 /* collect word into quadword */
1747 *A4_16(&fifo_data, 3 - (i % 4)) = *operand;
1748
1749 /* write to GPUIF FIFO only with full quadword */
1750 if(i % 4 == 3)
1751 {
1752 ASSERT(sizeof(fifo_data) == 16);
1753 PKE_MEM_WRITE(me, GIF_PATH2_FIFO_ADDR,
1754 & fifo_data,
1755 16);
1756 } /* write collected quadword */
1757 } /* GPUIF xfer loop */
1758
1759 /* done */
1760 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
1761 pke_pc_advance(me, 1 + imm*4);
1762 } /* if FIFO full enough */
1763 else
1764 {
1765 /* need to wait for another word */
1766 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
1767 /* retry this instruction next clock */
1768 }
1769}
1770
1771
1772void
1773pke_code_directhl(struct pke_device* me, unsigned_4 pkecode)
1774{
1775 /* treat the same as DIRECTH */
1776 pke_code_direct(me, pkecode);
1777}
1778
1779
1780void
1781pke_code_unpack(struct pke_device* me, unsigned_4 pkecode)
1782{
1783 int imm = BIT_MASK_GET(pkecode, PKE_OPCODE_IMM_B, PKE_OPCODE_IMM_E);
1784 int cmd = BIT_MASK_GET(pkecode, PKE_OPCODE_CMD_B, PKE_OPCODE_CMD_E);
1785 int num = BIT_MASK_GET(pkecode, PKE_OPCODE_NUM_B, PKE_OPCODE_NUM_E);
1786 int nummx = (num == 0) ? 0x0100 : num;
1787 short vn = BIT_MASK_GET(cmd, 2, 3); /* unpack shape controls */
1788 short vl = BIT_MASK_GET(cmd, 0, 1);
1789 int m = BIT_MASK_GET(cmd, 4, 4);
1790 short cl = PKE_REG_MASK_GET(me, CYCLE, CL); /* cycle controls */
1791 short wl = PKE_REG_MASK_GET(me, CYCLE, WL);
1792 short addrwl = (wl == 0) ? 0x0100 : wl;
1793 int r = BIT_MASK_GET(imm, 15, 15); /* indicator bits in imm value */
1794 int usn = BIT_MASK_GET(imm, 14, 14);
1795
1796 int n, num_operands;
1797 unsigned_4* last_operand_word = NULL;
1798
1799 /* catch all illegal UNPACK variants */
1800 if(vl == 3 && vn < 3)
1801 {
1802 pke_code_error(me, pkecode);
1803 return;
1804 }
1805
1806 /* compute PKEcode length, as given in CPU2 spec, v2.1 pg. 11 */
1807 if(cl >= addrwl)
1808 n = num;
1809 else
1810 n = cl * (nummx / addrwl) + PKE_LIMIT(nummx % addrwl, cl);
1811 num_operands = (31 + (32 >> vl) * (vn+1) * n)/32; /* round up to next word */
1812
1813 /* confirm that FIFO has enough words in it */
1814 if(num_operands > 0)
1815 last_operand_word = pke_pcrel_operand(me, num_operands);
1816 if(last_operand_word != NULL || num_operands == 0)
1817 {
1818 address_word vu_addr_base, vutrack_addr_base;
1819 address_word vu_addr_max_size;
1820 int vector_num_out, vector_num_in;
1821
1822 /* "transferring" operand */
1823 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_XFER);
1824
1825 /* don't check whether VU is idle */
1826
1827 /* compute VU address base */
1828 if(me->pke_number == 0)
1829 {
1830 vu_addr_base = VU0_MEM1_WINDOW_START;
1831 vu_addr_max_size = VU0_MEM1_SIZE;
1832 vutrack_addr_base = VU0_MEM1_SRCADDR_START;
1833 r = 0;
1834 }
1835 else
1836 {
1837 vu_addr_base = VU1_MEM1_WINDOW_START;
1838 vu_addr_max_size = VU1_MEM1_SIZE;
1839 vutrack_addr_base = VU1_MEM1_SRCADDR_START;
1840 }
1841
1842 /* set NUM */
1843 PKE_REG_MASK_SET(me, NUM, NUM, nummx);
1844
1845 /* transfer given number of vectors */
1846 vector_num_out = 0; /* output vector number being processed */
1847 vector_num_in = 0; /* argument vector number being processed */
1848 do
1849 {
1850 quadword vu_old_data;
1851 quadword vu_new_data;
1852 quadword unpacked_data;
1853 address_word vu_addr;
1854 address_word vutrack_addr;
1855 unsigned_4 source_addr = 0;
1856 int i;
1857 int next_num;
1858
1859 /* decrement NUM */
1860 next_num = PKE_REG_MASK_GET(me, NUM, NUM) - 1;
1861 PKE_REG_MASK_SET(me, NUM, NUM, next_num);
1862
1863 /* compute VU destination address, as bytes in R5900 memory */
1864 if(cl >= wl)
1865 {
1866 /* map zero to max+1 */
1867 vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
1868 (vector_num_out / addrwl) * cl +
1869 (vector_num_out % addrwl));
1870 }
1871 else
1872 vu_addr = vu_addr_base + 16 * (BIT_MASK_GET(imm, 0, 9) +
1873 vector_num_out);
1874
1875 /* handle "R" double-buffering bit */
1876 if(r)
1877 vu_addr += 16 * PKE_REG_MASK_GET(me, TOPS, TOPS);
1878
1879 /* check for vu_addr overflow */
1880 while(vu_addr >= vu_addr_base + vu_addr_max_size)
1881 vu_addr -= vu_addr_max_size;
1882
1883 /* compute address of tracking table entry */
1884 vutrack_addr = vutrack_addr_base + ((signed_8)vu_addr - (signed_8)vu_addr_base) / 4;
1885
1886 /* read old VU data word at address; reverse words if needed */
1887 {
1888 unsigned_16 vu_old_badwords;
1889 ASSERT(sizeof(vu_old_badwords) == 16);
1890 PKE_MEM_READ(me, vu_addr,
1891 &vu_old_badwords, 16);
1892 vu_old_data[0] = * A4_16(& vu_old_badwords, 3);
1893 vu_old_data[1] = * A4_16(& vu_old_badwords, 2);
1894 vu_old_data[2] = * A4_16(& vu_old_badwords, 1);
1895 vu_old_data[3] = * A4_16(& vu_old_badwords, 0);
1896 }
1897
1898 /* For cyclic unpack, next operand quadword may come from instruction stream
1899 or be zero. */
1900 if((cl < addrwl) &&
1901 (vector_num_out % addrwl) >= cl)
1902 {
1903 /* clear operand - used only in a "indeterminate" state */
1904 for(i = 0; i < 4; i++)
1905 unpacked_data[i] = 0;
1906 }
1907 else
1908 {
1909 /* compute packed vector dimensions */
1910 int vectorbits = 0, unitbits = 0;
1911
1912 if(vl < 3) /* PKE_UNPACK_*_{32,16,8} */
1913 {
1914 unitbits = (32 >> vl);
1915 vectorbits = unitbits * (vn+1);
1916 }
1917 else if(vl == 3 && vn == 3) /* PKE_UNPACK_V4_5 */
1918 {
1919 unitbits = 5;
1920 vectorbits = 16;
1921 }
1922 else /* illegal unpack variant */
1923 {
1924 /* should have been caught at top of function */
1925 ASSERT(0);
1926 }
1927
1928 /* loop over columns */
1929 for(i=0; i<=vn; i++)
1930 {
1931 unsigned_4 operand;
1932
1933 /* offset in bits in current operand word */
1934 int bitoffset =
1935 (vector_num_in * vectorbits) + (i * unitbits); /* # of bits from PKEcode */
1936
1937 /* last unit of V4_5 is only one bit wide */
1938 if(vl == 3 && vn == 3 && i == 3) /* PKE_UNPACK_V4_5 */
1939 unitbits = 1;
1940
1941 /* confirm we're not reading more than we said we needed */
1942 if(vector_num_in * vectorbits >= num_operands * 32)
1943 {
1944 /* this condition may be triggered by illegal
1945 PKEcode / CYCLE combinations. */
1946 pke_code_error(me, pkecode);
1947 /* XXX: this case needs to be better understood,
1948 and detected at a better time. */
1949 return;
1950 }
1951
1952 /* fetch bitfield operand */
1953 operand = pke_pcrel_operand_bits(me, bitoffset, unitbits, & source_addr);
1954
1955 /* selectively sign-extend; not for V4_5 1-bit value */
1956 if(usn || unitbits == 1)
1957 unpacked_data[i] = operand;
1958 else
1959 unpacked_data[i] = SEXT32(operand, unitbits-1);
1960 }
1961
1962 /* set remaining top words in vector */
1963 for(i=vn+1; i<4; i++)
1964 {
1965 if(vn == 0) /* S_{32,16,8}: copy lowest element */
1966 unpacked_data[i] = unpacked_data[0];
1967 else
1968 unpacked_data[i] = 0;
1969 }
1970
1971 /* consumed a vector from the PKE instruction stream */
1972 vector_num_in ++;
1973 } /* unpack word from instruction operand */
1974
1975 /* process STMOD register for accumulation operations */
1976 switch(PKE_REG_MASK_GET(me, MODE, MDE))
1977 {
1978 case PKE_MODE_ADDROW: /* add row registers to output data */
1979 for(i=0; i<4; i++)
1980 /* exploit R0..R3 contiguity */
1981 unpacked_data[i] += me->regs[PKE_REG_R0 + i][0];
1982 break;
1983
1984 case PKE_MODE_ACCROW: /* add row registers to output data; accumulate */
1985 for(i=0; i<4; i++)
1986 {
1987 /* exploit R0..R3 contiguity */
1988 unpacked_data[i] += me->regs[PKE_REG_R0 + i][0];
1989 me->regs[PKE_REG_R0 + i][0] = unpacked_data[i];
1990 }
1991 break;
1992
1993 case PKE_MODE_INPUT: /* pass data through */
1994 default: /* specified as undefined */
1995 ;
1996 }
1997
1998 /* compute replacement word */
1999 if(m) /* use mask register? */
2000 {
2001 /* compute index into mask register for this word */
2002 int mask_index = PKE_LIMIT(vector_num_out % addrwl, 3);
2003
2004 for(i=0; i<4; i++) /* loop over columns */
2005 {
2006 int mask_op = PKE_MASKREG_GET(me, mask_index, i);
2007 unsigned_4* masked_value = NULL;
2008
2009 switch(mask_op)
2010 {
2011 case PKE_MASKREG_INPUT:
2012 masked_value = & unpacked_data[i];
2013 break;
2014
2015 case PKE_MASKREG_ROW: /* exploit R0..R3 contiguity */
2016 masked_value = & me->regs[PKE_REG_R0 + i][0];
2017 break;
2018
2019 case PKE_MASKREG_COLUMN: /* exploit C0..C3 contiguity */
2020 masked_value = & me->regs[PKE_REG_C0 + mask_index][0];
2021 break;
2022
2023 case PKE_MASKREG_NOTHING:
2024 /* "write inhibit" by re-copying old data */
2025 masked_value = & vu_old_data[i];
2026 break;
2027
2028 default:
2029 ASSERT(0);
2030 /* no other cases possible */
2031 }
2032
2033 /* copy masked value for column */
2034 vu_new_data[i] = *masked_value;
2035 } /* loop over columns */
2036 } /* mask */
2037 else
2038 {
2039 /* no mask - just copy over entire unpacked quadword */
2040 memcpy(vu_new_data, unpacked_data, sizeof(unpacked_data));
2041 }
2042
2043 /* write new VU data word at address; reverse words if needed */
2044 {
2045 unsigned_16 vu_new_badwords;
2046 * A4_16(& vu_new_badwords, 3) = vu_new_data[0];
2047 * A4_16(& vu_new_badwords, 2) = vu_new_data[1];
2048 * A4_16(& vu_new_badwords, 1) = vu_new_data[2];
2049 * A4_16(& vu_new_badwords, 0) = vu_new_data[3];
2050 ASSERT(sizeof(vu_new_badwords) == 16);
2051 PKE_MEM_WRITE(me, vu_addr,
2052 &vu_new_badwords, 16);
2053 }
2054
2055 /* write tracking address */
2056 ASSERT(sizeof(unsigned_4) == 4);
2057 PKE_MEM_WRITE(me, vutrack_addr,
2058 & source_addr,
2059 4);
2060
2061 /* next vector please */
2062 vector_num_out ++;
2063 } /* vector transfer loop */
2064 while(PKE_REG_MASK_GET(me, NUM, NUM) > 0);
2065
2066 /* confirm we've written as many vectors as told */
2067 ASSERT(nummx == vector_num_out);
2068
2069 /* done */
2070 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2071 pke_pc_advance(me, 1 + num_operands);
2072 } /* PKE FIFO full enough */
2073 else
2074 {
2075 /* need to wait for another word */
2076 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_WAIT);
2077 /* retry this instruction next clock */
2078 }
2079}
2080
2081
2082void
2083pke_code_error(struct pke_device* me, unsigned_4 pkecode)
2084{
2085 /* set ER1 flag in STAT register */
2086 PKE_REG_MASK_SET(me, STAT, ER1, 1);
2087
2088 if(! PKE_REG_MASK_GET(me, ERR, ME1))
2089 {
2090 pke_begin_interrupt_stall(me);
2091 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_STALL);
2092 }
2093 else
2094 {
2095 PKE_REG_MASK_SET(me, STAT, PPS, PKE_REG_STAT_PPS_IDLE);
2096 }
2097
2098 /* advance over faulty word */
2099 pke_pc_advance(me, 1);
2100}
This page took 0.029205 seconds and 4 git commands to generate.