This commit was generated by cvs2svn to track changes on a CVS vendor
[deliverable/binutils-gdb.git] / sim / ppc / e500.igen
1 # e500 core instructions, for PSIM, the PowerPC simulator.
2
3 # Copyright 2003 Free Software Foundation, Inc.
4
5 # Contributed by Red Hat Inc; developed under contract from Motorola.
6 # Written by matthew green <mrg@redhat.com>.
7
8 # This file is part of GDB.
9
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU General Public License for more details.
19
20 # You should have received a copy of the GNU General Public License
21 # along with This program; see the file COPYING. If not, write to
22 # the Free Software Foundation, 59 Temple Place - Suite 330,
23 # Boston, MA 02111-1307, USA.
24
25 #
26 # e500 Core Complex Instructions
27 #
28
29 :cache:e500::signed_word *:rAh:RA:(cpu_registers(processor)->e500.gprh + RA)
30 :cache:e500::signed_word *:rSh:RS:(cpu_registers(processor)->e500.gprh + RS)
31 :cache:e500::signed_word *:rBh:RB:(cpu_registers(processor)->e500.gprh + RB)
32
33 # Flags for model.h
34 ::model-macro:::
35 #define PPC_INSN_INT_SPR(OUT_MASK, IN_MASK, SPR) \
36 do { \
37 if (CURRENT_MODEL_ISSUE > 0) \
38 ppc_insn_int_spr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, SPR); \
39 } while (0)
40
41 # Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
42 void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned nSPR
43 const unsigned32 int_mask = out_mask | in_mask;
44 model_busy *busy_ptr;
45
46 while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
47 if (WITH_TRACE && ppc_trace[trace_model])
48 model_trace_busy_p(model_ptr, int_mask, 0, 0, nSPR);
49
50 model_ptr->nr_stalls_data++;
51 model_new_cycle(model_ptr);
52 }
53
54 busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
55 busy_ptr->int_busy |= out_mask;
56 model_ptr->int_busy |= out_mask;
57 busy_ptr->spr_busy = nSPR;
58 model_ptr->spr_busy[nSPR] = 1;
59 busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_mask)) ? 3 : 2;
60 TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
61 \f
62 #
63 # SPE Modulo Fractional Multiplication handling support
64 #
65 :function:e500::unsigned64:ev_multiply16_smf:signed16 a, signed16 b, int *sat
66 signed32 a32 = a, b32 = b, rv32;
67 rv32 = a * b;
68 *sat = (rv32 & (3<<30)) == (3<<30);
69 return (signed64)rv32 << 1;
70
71 :function:e500::unsigned64:ev_multiply32_smf:signed32 a, signed32 b, int *sat
72 signed64 rv64, a64 = a, b64 = b;
73 rv64 = a64 * b64;
74 *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
75 /* Loses top sign bit. */
76 return rv64 << 1;
77 #
78 # SPE Saturation handling support
79 #
80 :function:e500::signed32:ev_multiply16_ssf:signed16 a, signed16 b, int *sat
81 signed32 rv32;
82 if (a == 0xffff8000 && b == 0xffff8000)
83 {
84 rv32 = 0x7fffffffL;
85 * sat = 1;
86 return rv32;
87 }
88 else
89 {
90 signed32 a32 = a, b32 = b;
91
92 rv32 = a * b;
93 * sat = (rv32 & (3<<30)) == (3<<30);
94 return (signed64)rv32 << 1;
95 }
96
97 :function:e500::signed64:ev_multiply32_ssf:signed32 a, signed32 b, int *sat
98 signed64 rv64;
99 if (a == 0x80000000 && b == 0x80000000)
100 {
101 rv64 = 0x7fffffffffffffffLL;
102 * sat = 1;
103 return rv64;
104 }
105 else
106 {
107 signed64 a64 = a, b64 = b;
108 rv64 = a64 * b64;
109 *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
110 /* Loses top sign bit. */
111 return rv64 << 1;
112 }
113 \f
114 #
115 # SPE FP handling support
116 #
117
118 :function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
119 unsigned64 guard;
120 guard = sim_fpu_guard(a, 0);
121 if (guard & 1)
122 EV_SET_SPEFSCR_BITS(fg);
123 if (guard & ~1)
124 EV_SET_SPEFSCR_BITS(fx);
125
126 :function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, unsigned32 packed
127 sim_fpu_32to (dst, packed);
128
129 /* Set normally unused fields to allow booke arithmetic. */
130 if (dst->class == sim_fpu_class_infinity)
131 {
132 dst->normal_exp = 128;
133 dst->fraction = ((unsigned64)1 << 60);
134 }
135 else if (dst->class == sim_fpu_class_qnan
136 || dst->class == sim_fpu_class_snan)
137 {
138 dst->normal_exp = 128;
139 /* This is set, but without the implicit bit, so we have to or
140 in the implicit bit. */
141 dst->fraction |= ((unsigned64)1 << 60);
142 }
143
144 :function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
145 int invalid_operand, overflow_result, underflow_result;
146 int dest_exp;
147
148 invalid_operand = 0;
149 overflow_result = 0;
150 underflow_result = 0;
151
152 /* Treat NaN, Inf, and denorm like normal numbers, and signal invalid
153 operand if it hasn't already been done. */
154 if (EV_IS_INFDENORMNAN (a))
155 {
156 a->class = sim_fpu_class_number;
157
158 EV_SET_SPEFSCR_BITS (inv);
159 invalid_operand = 1;
160 }
161 if (EV_IS_INFDENORMNAN (b))
162 {
163 b->class = sim_fpu_class_number;
164
165 if (! invalid_operand)
166 {
167 EV_SET_SPEFSCR_BITS (inv);
168 invalid_operand = 1;
169 }
170 }
171
172 sim_fpu_add (d, a, b);
173
174 dest_exp = booke_sim_fpu_exp (d);
175 /* If this is a denorm, force to zero, and signal underflow if
176 we haven't already indicated invalid operand. */
177 if (dest_exp <= -127)
178 {
179 int sign = d->sign;
180
181 *d = sim_fpu_zero;
182 d->sign = sign;
183 if (! invalid_operand)
184 {
185 EV_SET_SPEFSCR_BITS (under);
186 underflow_result = 1;
187 }
188 }
189 /* If this is Inf/NaN, force to pmax/nmax, and signal overflow if
190 we haven't already indicated invalid operand. */
191 else if (dest_exp >= 127)
192 {
193 int sign = d->sign;
194
195 *d = sim_fpu_max32;
196 d->sign = sign;
197 if (! invalid_operand)
198 {
199 EV_SET_SPEFSCR_BITS (over);
200 overflow_result = 1;
201 }
202 }
203 /* Destination sign is sign of operand with larger magnitude, or
204 the sign of the first operand if operands have the same
205 magnitude. Thus if the result is zero, we force it to have
206 the sign of the first operand. */
207 else if (d->fraction == 0)
208 d->sign = a->sign;
209
210 return invalid_operand || overflow_result || underflow_result;
211
212 :function:e500::unsigned32:ev_fs_add:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
213 sim_fpu a, b, d;
214 unsigned32 w;
215 int exception;
216
217 booke_sim_fpu_32to (&a, aa);
218 booke_sim_fpu_32to (&b, bb);
219
220 exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
221 processor);
222
223 sim_fpu_to32 (&w, &d);
224 if (! exception)
225 ev_check_guard(&d, fg, fx, processor);
226 return w;
227
228 :function:e500::unsigned32:ev_fs_sub:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
229 sim_fpu a, b, d;
230 unsigned32 w;
231 int exception;
232
233 booke_sim_fpu_32to (&a, aa);
234 booke_sim_fpu_32to (&b, bb);
235
236 /* Invert sign of second operand, and add. */
237 b.sign = ! b.sign;
238 exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
239 processor);
240
241 sim_fpu_to32 (&w, &d);
242 if (! exception)
243 ev_check_guard(&d, fg, fx, processor);
244 return w;
245
246 # sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
247 # The booke algorithms require exp values, so we fake them here.
248 # fixme: It also apparently does the same for zero, but should not.
249 :function:e500::unsigned32:booke_sim_fpu_exp:sim_fpu *x
250 int y = sim_fpu_is (x);
251 if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
252 return 0;
253 else if (y == SIM_FPU_IS_SNAN || y == SIM_FPU_IS_QNAN
254 || y == SIM_FPU_IS_NINF || y == SIM_FPU_IS_PINF)
255 return 128;
256 else
257 return sim_fpu_exp (x);
258
259 :function:e500::unsigned32:ev_fs_mul:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
260 sim_fpu a, b, d;
261 unsigned32 w;
262 int sa, sb, ea, eb, ei;
263 sim_fpu_32to (&a, aa);
264 sim_fpu_32to (&b, bb);
265 sa = sim_fpu_sign(&a);
266 sb = sim_fpu_sign(&b);
267 ea = booke_sim_fpu_exp(&a);
268 eb = booke_sim_fpu_exp(&b);
269 ei = ea + eb + 127;
270 if (sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
271 w = 0;
272 else if (sa == sb) {
273 if (ei >= 254) {
274 w = EV_PMAX;
275 EV_SET_SPEFSCR_BITS(over);
276 } else if (ei < 1) {
277 d = sim_fpu_zero;
278 sim_fpu_to32 (&w, &d);
279 w &= 0x7fffffff; /* Clear sign bit. */
280 } else {
281 goto normal_mul;
282 }
283 } else {
284 if (ei >= 254) {
285 w = EV_NMAX;
286 EV_SET_SPEFSCR_BITS(over);
287 } else if (ei < 1) {
288 d = sim_fpu_zero;
289 sim_fpu_to32 (&w, &d);
290 w |= 0x80000000; /* Set sign bit. */
291 } else {
292 normal_mul:
293 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
294 EV_SET_SPEFSCR_BITS(inv);
295 sim_fpu_mul (&d, &a, &b);
296 sim_fpu_to32 (&w, &d);
297 }
298 }
299 return w;
300
301 :function:e500::unsigned32:ev_fs_div:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
302 sim_fpu a, b, d;
303 unsigned32 w;
304 int sa, sb, ea, eb, ei;
305
306 sim_fpu_32to (&a, aa);
307 sim_fpu_32to (&b, bb);
308 sa = sim_fpu_sign(&a);
309 sb = sim_fpu_sign(&b);
310 ea = booke_sim_fpu_exp(&a);
311 eb = booke_sim_fpu_exp(&b);
312 ei = ea - eb + 127;
313
314 /* Special cases to handle behaviour of e500 hardware.
315 cf case 107543. */
316 if (sim_fpu_is_nan (&a) || sim_fpu_is_nan (&b)
317 || sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
318 {
319 if (sim_fpu_is_snan (&a) || sim_fpu_is_snan (&b))
320 {
321 if (bb == 0x3f800000)
322 w = EV_PMAX;
323 else if (aa == 0x7fc00001)
324 w = 0x3fbffffe;
325 else
326 goto normal_div;
327 }
328 else
329 goto normal_div;
330 }
331 else if (sim_fpu_is_infinity (&a) && sim_fpu_is_infinity (&b))
332 {
333 if (sa == sb)
334 sim_fpu_32to (&d, 0x3f800000);
335 else
336 sim_fpu_32to (&d, 0xbf800000);
337 sim_fpu_to32 (&w, &d);
338 }
339 else if (sa == sb) {
340 if (ei > 254) {
341 w = EV_PMAX;
342 EV_SET_SPEFSCR_BITS(over);
343 } else if (ei <= 1) {
344 d = sim_fpu_zero;
345 sim_fpu_to32 (&w, &d);
346 w &= 0x7fffffff; /* Clear sign bit. */
347 } else {
348 goto normal_div;
349 }
350 } else {
351 if (ei > 254) {
352 w = EV_NMAX;
353 EV_SET_SPEFSCR_BITS(over);
354 } else if (ei <= 1) {
355 d = sim_fpu_zero;
356 sim_fpu_to32 (&w, &d);
357 w |= 0x80000000; /* Set sign bit. */
358 } else {
359 normal_div:
360 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
361 EV_SET_SPEFSCR_BITS(inv);
362 if (sim_fpu_is_zero (&b))
363 {
364 if (sim_fpu_is_zero (&a))
365 EV_SET_SPEFSCR_BITS(dbz);
366 else
367 EV_SET_SPEFSCR_BITS(inv);
368 w = sa ? EV_NMAX : EV_PMAX;
369 }
370 else
371 {
372 sim_fpu_div (&d, &a, &b);
373 sim_fpu_to32 (&w, &d);
374 ev_check_guard(&d, fg, fx, processor);
375 }
376 }
377 }
378 return w;
379
380 \f
381 #
382 # A.2.7 Integer SPE Simple Instructions
383 #
384
385 0.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
386 unsigned32 w1, w2;
387 w1 = *rBh + *rAh;
388 w2 = *rB + *rA;
389 EV_SET_REG2(*rSh, *rS, w1, w2);
390 //printf("evaddw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
391 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
392
393 0.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
394 unsigned32 w1, w2;
395 w1 = *rBh + IMM;
396 w2 = *rB + IMM;
397 EV_SET_REG2(*rSh, *rS, w1, w2);
398 //printf("evaddiw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
399 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
400
401 0.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
402 unsigned32 w1, w2;
403 w1 = *rBh - *rAh;
404 w2 = *rB - *rA;
405 EV_SET_REG2(*rSh, *rS, w1, w2);
406 //printf("evsubfw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
407 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
408
409 0.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
410 unsigned32 w1, w2;
411 w1 = *rBh - IMM;
412 w2 = *rB - IMM;
413 EV_SET_REG2(*rSh, *rS, w1, w2);
414 //printf("evsubifw: *rSh = %08x; *rS = %08x; IMM = %d\n", *rSh, *rS, IMM);
415 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
416
417 0.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
418 signed32 w1, w2;
419 w1 = *rAh;
420 if (w1 < 0 && w1 != 0x80000000)
421 w1 = -w1;
422 w2 = *rA;
423 if (w2 < 0 && w2 != 0x80000000)
424 w2 = -w2;
425 EV_SET_REG2(*rSh, *rS, w1, w2);
426 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
427
428 0.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
429 signed32 w1, w2;
430 w1 = *rAh;
431 /* the negative most negative number is the most negative number */
432 if (w1 != 0x80000000)
433 w1 = -w1;
434 w2 = *rA;
435 if (w2 != 0x80000000)
436 w2 = -w2;
437 EV_SET_REG2(*rSh, *rS, w1, w2);
438 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
439
440 0.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
441 unsigned64 w1, w2;
442 w1 = *rAh & 0xff;
443 if (w1 & 0x80)
444 w1 |= 0xffffff00;
445 w2 = *rA & 0xff;
446 if (w2 & 0x80)
447 w2 |= 0xffffff00;
448 EV_SET_REG2(*rSh, *rS, w1, w2);
449 PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
450
451 0.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
452 unsigned64 w1, w2;
453 w1 = *rAh & 0xffff;
454 if (w1 & 0x8000)
455 w1 |= 0xffff0000;
456 w2 = *rA & 0xffff;
457 if (w2 & 0x8000)
458 w2 |= 0xffff0000;
459 EV_SET_REG2(*rSh, *rS, w1, w2);
460 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
461
462 0.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
463 unsigned32 w1, w2;
464 w1 = *rBh & *rAh;
465 w2 = *rB & *rA;
466 EV_SET_REG2(*rSh, *rS, w1, w2);
467 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
468
469 0.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
470 unsigned32 w1, w2;
471 w1 = *rBh | *rAh;
472 w2 = *rB | *rA;
473 EV_SET_REG2(*rSh, *rS, w1, w2);
474 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
475
476 0.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
477 unsigned32 w1, w2;
478 w1 = *rBh ^ *rAh;
479 w2 = *rB ^ *rA;
480 EV_SET_REG2(*rSh, *rS, w1, w2);
481 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
482
483 0.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
484 unsigned32 w1, w2;
485 w1 = ~(*rBh & *rAh);
486 w2 = ~(*rB & *rA);
487 EV_SET_REG2(*rSh, *rS, w1, w2);
488 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
489
490 0.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
491 unsigned32 w1, w2;
492 w1 = ~(*rBh | *rAh);
493 w2 = ~(*rB | *rA);
494 EV_SET_REG2(*rSh, *rS, w1, w2);
495 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
496
497 0.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
498 unsigned32 w1, w2;
499 w1 = (~*rBh) ^ *rAh;
500 w2 = (~*rB) ^ *rA;
501 EV_SET_REG2(*rSh, *rS, w1, w2);
502 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
503
504 0.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
505 unsigned32 w1, w2;
506 w1 = (~*rBh) & *rAh;
507 w2 = (~*rB) & *rA;
508 EV_SET_REG2(*rSh, *rS, w1, w2);
509 //printf("evandc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
510 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
511
512 0.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
513 unsigned32 w1, w2;
514 w1 = (~*rBh) | *rAh;
515 w2 = (~*rB) | *rA;
516 EV_SET_REG2(*rSh, *rS, w1, w2);
517 //printf("evorc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
518 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
519
520 0.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
521 unsigned32 nh, nl, w1, w2;
522 nh = *rBh & 0x1f;
523 nl = *rB & 0x1f;
524 w1 = ((unsigned32)*rAh) << nh | ((unsigned32)*rAh) >> (32 - nh);
525 w2 = ((unsigned32)*rA) << nl | ((unsigned32)*rA) >> (32 - nl);
526 EV_SET_REG2(*rSh, *rS, w1, w2);
527 //printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
528 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
529
530 0.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
531 unsigned32 w1, w2, imm;
532 imm = (unsigned32)UIMM;
533 w1 = ((unsigned32)*rAh) << imm | ((unsigned32)*rAh) >> (32 - imm);
534 w2 = ((unsigned32)*rA) << imm | ((unsigned32)*rA) >> (32 - imm);
535 EV_SET_REG2(*rSh, *rS, w1, w2);
536 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
537
538 0.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
539 unsigned32 nh, nl, w1, w2;
540 nh = *rBh & 0x1f;
541 nl = *rB & 0x1f;
542 w1 = ((unsigned32)*rAh) << nh;
543 w2 = ((unsigned32)*rA) << nl;
544 EV_SET_REG2(*rSh, *rS, w1, w2);
545 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
546
547 0.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
548 unsigned32 w1, w2, imm = UIMM;
549 w1 = ((unsigned32)*rAh) << imm;
550 w2 = ((unsigned32)*rA) << imm;
551 EV_SET_REG2(*rSh, *rS, w1, w2);
552 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
553
554 0.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
555 signed32 w1, w2;
556 unsigned32 nh, nl;
557 nh = *rBh & 0x1f;
558 nl = *rB & 0x1f;
559 w1 = ((signed32)*rAh) >> nh;
560 w2 = ((signed32)*rA) >> nl;
561 EV_SET_REG2(*rSh, *rS, w1, w2);
562 //printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
563 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
564
565 0.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
566 unsigned32 w1, w2, nh, nl;
567 nh = *rBh & 0x1f;
568 nl = *rB & 0x1f;
569 w1 = ((unsigned32)*rAh) >> nh;
570 w2 = ((unsigned32)*rA) >> nl;
571 EV_SET_REG2(*rSh, *rS, w1, w2);
572 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
573
574 0.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
575 signed32 w1, w2;
576 unsigned32 imm = UIMM;
577 w1 = ((signed32)*rAh) >> imm;
578 w2 = ((signed32)*rA) >> imm;
579 EV_SET_REG2(*rSh, *rS, w1, w2);
580 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
581
582 0.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
583 unsigned32 w1, w2, imm = UIMM;
584 w1 = ((unsigned32)*rAh) >> imm;
585 w2 = ((unsigned32)*rA) >> imm;
586 EV_SET_REG2(*rSh, *rS, w1, w2);
587 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
588
589 0.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
590 unsigned32 w1, w2, mask, c1, c2;
591 for (c1 = 0, mask = 0x80000000, w1 = *rAh;
592 !(w1 & mask) && mask != 0; mask >>= 1)
593 c1++;
594 for (c2 = 0, mask = 0x80000000, w2 = *rA;
595 !(w2 & mask) && mask != 0; mask >>= 1)
596 c2++;
597 EV_SET_REG2(*rSh, *rS, c1, c2);
598 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
599
600 0.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
601 unsigned32 w1, w2, mask, sign_bit, c1, c2;
602 for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
603 ((w1 & mask) == sign_bit) && mask != 0;
604 mask >>= 1, sign_bit >>= 1)
605 c1++;
606 for (c2 = 0, mask = 0x80000000, w2 = *rA, sign_bit = w2 & mask;
607 ((w2 & mask) == sign_bit) && mask != 0;
608 mask >>= 1, sign_bit >>= 1)
609 c2++;
610 EV_SET_REG2(*rSh, *rS, c1, c2);
611 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
612
613 0.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
614 unsigned32 w1, w2;
615 w1 = ((unsigned32)*rAh + 0x8000) & 0xffff0000;
616 w2 = ((unsigned32)*rA + 0x8000) & 0xffff0000;
617 EV_SET_REG2(*rSh, *rS, w1, w2);
618 //printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
619 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
620
621 0.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
622 unsigned32 w1, w2;
623 w1 = *rAh;
624 w2 = *rBh;
625 EV_SET_REG2(*rSh, *rS, w1, w2);
626 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
627
628 0.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
629 unsigned32 w1, w2;
630 w1 = *rA;
631 w2 = *rB;
632 EV_SET_REG2(*rSh, *rS, w1, w2);
633 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
634
635 0.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
636 unsigned32 w1, w2;
637 w1 = *rA;
638 w2 = *rBh;
639 EV_SET_REG2(*rSh, *rS, w1, w2);
640 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
641
642 0.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
643 unsigned32 w1, w2;
644 w1 = *rAh;
645 w2 = *rB;
646 EV_SET_REG2(*rSh, *rS, w1, w2);
647 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
648
649 0.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
650 unsigned32 w;
651 w = SIMM & 0x1f;
652 if (w & 0x10)
653 w |= 0xffffffe0;
654 EV_SET_REG2(*rSh, *rS, w, w);
655 PPC_INSN_INT(RS_BITMASK, 0, 0);
656
657 0.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
658 unsigned32 w;
659 w = SIMM << 27;
660 EV_SET_REG2(*rSh, *rS, w, w);
661 PPC_INSN_INT(RS_BITMASK, 0, 0);
662
663 0.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
664 signed32 ah, al, bh, bl;
665 int w, ch, cl;
666 ah = *rAh;
667 al = *rA;
668 bh = *rBh;
669 bl = *rB;
670 if (ah > bh)
671 ch = 1;
672 else
673 ch = 0;
674 if (al > bl)
675 cl = 1;
676 else
677 cl = 0;
678 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
679 CR_SET(BF, w);
680 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
681
682 0.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
683 unsigned32 ah, al, bh, bl;
684 int w, ch, cl;
685 ah = *rAh;
686 al = *rA;
687 bh = *rBh;
688 bl = *rB;
689 if (ah > bh)
690 ch = 1;
691 else
692 ch = 0;
693 if (al > bl)
694 cl = 1;
695 else
696 cl = 0;
697 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
698 CR_SET(BF, w);
699 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
700
701 0.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
702 signed32 ah, al, bh, bl;
703 int w, ch, cl;
704 ah = *rAh;
705 al = *rA;
706 bh = *rBh;
707 bl = *rB;
708 if (ah < bh)
709 ch = 1;
710 else
711 ch = 0;
712 if (al < bl)
713 cl = 1;
714 else
715 cl = 0;
716 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
717 CR_SET(BF, w);
718 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
719
720 0.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
721 unsigned32 ah, al, bh, bl;
722 int w, ch, cl;
723 ah = *rAh;
724 al = *rA;
725 bh = *rBh;
726 bl = *rB;
727 if (ah < bh)
728 ch = 1;
729 else
730 ch = 0;
731 if (al < bl)
732 cl = 1;
733 else
734 cl = 0;
735 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
736 CR_SET(BF, w);
737 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
738
739 0.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
740 unsigned32 ah, al, bh, bl;
741 int w, ch, cl;
742 ah = *rAh;
743 al = *rA;
744 bh = *rBh;
745 bl = *rB;
746 if (ah == bh)
747 ch = 1;
748 else
749 ch = 0;
750 if (al == bl)
751 cl = 1;
752 else
753 cl = 0;
754 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
755 CR_SET(BF, w);
756 //printf("evcmpeq: ch %d cl %d BF %d, CR is now %08x\n", ch, cl, BF, CR);
757 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
758
759 0.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
760 unsigned32 w1, w2;
761 int cr;
762 cr = CR_FIELD(CRFS);
763 if (cr & 8)
764 w1 = *rAh;
765 else
766 w1 = *rBh;
767 if (cr & 4)
768 w2 = *rA;
769 else
770 w2 = *rB;
771 EV_SET_REG2(*rSh, *rS, w1, w2);
772 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
773
774 0.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
775 unsigned32 w1, w2, a, d, mask;
776 mask = (*rB) & 0xffff;
777 a = (*rA) & 0xffff;
778 d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
779 *rS = ((*rA) & 0xffff0000) | (d & 0xffff);
780 //printf("brinc: *rS = %08x\n", *rS);
781 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
782
783 #
784 # A.2.8 Integer SPE Complex Instructions
785 #
786
787 0.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
788 signed16 al, ah, bl, bh;
789 signed32 tl, th;
790 int movl, movh;
791
792 al = (signed16) EV_LOHALF (*rA);
793 ah = (signed16) EV_LOHALF (*rAh);
794 bl = (signed16) EV_LOHALF (*rB);
795 bh = (signed16) EV_LOHALF (*rBh);
796 tl = ev_multiply16_ssf (al, bl, &movl);
797 th = ev_multiply16_ssf (ah, bh, &movh);
798 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
799 EV_SATURATE (movl, 0x7fffffff, tl));
800 EV_SET_SPEFSCR_OV (movl, movh);
801 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
802
803 0.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
804 signed16 al, ah, bl, bh;
805 signed32 tl, th;
806 int movl, movh;
807
808 al = (signed16) EV_LOHALF (*rA);
809 ah = (signed16) EV_LOHALF (*rAh);
810 bl = (signed16) EV_LOHALF (*rB);
811 bh = (signed16) EV_LOHALF (*rBh);
812 tl = ev_multiply16_ssf (al, bl, &movl);
813 th = ev_multiply16_ssf (ah, bh, &movh);
814 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
815 EV_SATURATE (movl, 0x7fffffff, tl));
816 EV_SET_SPEFSCR_OV (movl, movh);
817 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
818
819 0.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
820 signed16 al, ah, bl, bh;
821 signed32 tl, th;
822 int dummy;
823
824 al = (signed16) EV_LOHALF (*rA);
825 ah = (signed16) EV_LOHALF (*rAh);
826 bl = (signed16) EV_LOHALF (*rB);
827 bh = (signed16) EV_LOHALF (*rBh);
828 tl = ev_multiply16_smf (al, bl, & dummy);
829 th = ev_multiply16_smf (ah, bh, & dummy);
830 EV_SET_REG2 (*rSh, *rS, th, tl);
831 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
832
833 0.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
834 signed32 al, ah, bl, bh;
835 signed32 tl, th;
836 int dummy;
837
838 al = (signed16) EV_LOHALF (*rA);
839 ah = (signed16) EV_LOHALF (*rAh);
840 bl = (signed16) EV_LOHALF (*rB);
841 bh = (signed16) EV_LOHALF (*rBh);
842 tl = ev_multiply16_smf (al, bl, & dummy);
843 th = ev_multiply16_smf (ah, bh, & dummy);
844 EV_SET_REG2_ACC (*rSh, *rS, th, tl);
845 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
846
847 0.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
848 signed32 al, ah, bl, bh, tl, th;
849 al = (signed32)(signed16)EV_LOHALF(*rA);
850 ah = (signed32)(signed16)EV_LOHALF(*rAh);
851 bl = (signed32)(signed16)EV_LOHALF(*rB);
852 bh = (signed32)(signed16)EV_LOHALF(*rBh);
853 tl = al * bl;
854 th = ah * bh;
855 EV_SET_REG2(*rSh, *rS, th, tl);
856 //printf("evmhosmi: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
857 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
858
859 0.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
860 signed32 al, ah, bl, bh, tl, th;
861 al = (signed32)(signed16)EV_LOHALF(*rA);
862 ah = (signed32)(signed16)EV_LOHALF(*rAh);
863 bl = (signed32)(signed16)EV_LOHALF(*rB);
864 bh = (signed32)(signed16)EV_LOHALF(*rBh);
865 tl = al * bl;
866 th = ah * bh;
867 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
868 //printf("evmhosmia: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
869 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
870
871 0.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
872 unsigned32 al, ah, bl, bh, tl, th;
873 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
874 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
875 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
876 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
877 tl = al * bl;
878 th = ah * bh;
879 EV_SET_REG2(*rSh, *rS, th, tl);
880 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
881
882 0.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
883 unsigned32 al, ah, bl, bh, tl, th;
884 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
885 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
886 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
887 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
888 tl = al * bl;
889 th = ah * bh;
890 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
891 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
892
893 0.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
894 signed16 al, ah, bl, bh;
895 signed32 tl, th;
896 int movl, movh;
897
898 al = (signed16) EV_HIHALF (*rA);
899 ah = (signed16) EV_HIHALF (*rAh);
900 bl = (signed16) EV_HIHALF (*rB);
901 bh = (signed16) EV_HIHALF (*rBh);
902 tl = ev_multiply16_ssf (al, bl, &movl);
903 th = ev_multiply16_ssf (ah, bh, &movh);
904 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
905 EV_SATURATE (movl, 0x7fffffff, tl));
906 EV_SET_SPEFSCR_OV (movl, movh);
907 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
908
909 0.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
910 signed16 al, ah, bl, bh;
911 signed32 tl, th;
912 int movl, movh;
913
914 al = (signed16) EV_HIHALF (*rA);
915 ah = (signed16) EV_HIHALF (*rAh);
916 bl = (signed16) EV_HIHALF (*rB);
917 bh = (signed16) EV_HIHALF (*rBh);
918 tl = ev_multiply16_ssf (al, bl, &movl);
919 th = ev_multiply16_ssf (ah, bh, &movh);
920 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
921 EV_SATURATE (movl, 0x7fffffff, tl));
922 EV_SET_SPEFSCR_OV (movl, movh);
923 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
924
925 0.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
926 signed16 al, ah, bl, bh;
927 signed64 tl, th;
928 int movl, movh;
929
930 al = (signed16) EV_HIHALF (*rA);
931 ah = (signed16) EV_HIHALF (*rAh);
932 bl = (signed16) EV_HIHALF (*rB);
933 bh = (signed16) EV_HIHALF (*rBh);
934 tl = ev_multiply16_smf (al, bl, &movl);
935 th = ev_multiply16_smf (ah, bh, &movh);
936 EV_SET_REG2 (*rSh, *rS, th, tl);
937 EV_SET_SPEFSCR_OV (movl, movh);
938 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
939
940 0.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
941 signed16 al, ah, bl, bh;
942 signed32 tl, th;
943 int dummy;
944
945 al = (signed16) EV_HIHALF (*rA);
946 ah = (signed16) EV_HIHALF (*rAh);
947 bl = (signed16) EV_HIHALF (*rB);
948 bh = (signed16) EV_HIHALF (*rBh);
949 tl = ev_multiply16_smf (al, bl, & dummy);
950 th = ev_multiply16_smf (ah, bh, & dummy);
951 EV_SET_REG2_ACC (*rSh, *rS, th, tl);
952 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
953
954 0.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
955 signed16 al, ah, bl, bh;
956 signed32 tl, th;
957
958 al = (signed16) EV_HIHALF (*rA);
959 ah = (signed16) EV_HIHALF (*rAh);
960 bl = (signed16) EV_HIHALF (*rB);
961 bh = (signed16) EV_HIHALF (*rBh);
962 tl = al * bl;
963 th = ah * bh;
964 EV_SET_REG2 (*rSh, *rS, th, tl);
965 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
966
967 0.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
968 signed32 al, ah, bl, bh, tl, th;
969 al = (signed32)(signed16)EV_HIHALF(*rA);
970 ah = (signed32)(signed16)EV_HIHALF(*rAh);
971 bl = (signed32)(signed16)EV_HIHALF(*rB);
972 bh = (signed32)(signed16)EV_HIHALF(*rBh);
973 tl = al * bl;
974 th = ah * bh;
975 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
976 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
977
978 0.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
979 unsigned32 al, ah, bl, bh, tl, th;
980 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
981 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
982 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
983 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
984 tl = al * bl;
985 th = ah * bh;
986 EV_SET_REG2(*rSh, *rS, th, tl);
987 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
988
989 0.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
990 unsigned32 al, ah, bl, bh, tl, th;
991 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
992 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
993 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
994 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
995 tl = al * bl;
996 th = ah * bh;
997 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
998 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
999
1000 0.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
1001 signed16 al, ah, bl, bh;
1002 signed32 t1, t2;
1003 signed64 tl, th;
1004 int movl, movh, ovl, ovh;
1005
1006 al = (signed16) EV_LOHALF (*rA);
1007 ah = (signed16) EV_LOHALF (*rAh);
1008 bl = (signed16) EV_LOHALF (*rB);
1009 bh = (signed16) EV_LOHALF (*rBh);
1010 t1 = ev_multiply16_ssf (ah, bh, &movh);
1011 t2 = ev_multiply16_ssf (al, bl, &movl);
1012 th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1013 tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
1014 ovh = EV_SAT_P_S32 (th);
1015 ovl = EV_SAT_P_S32 (tl);
1016 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1017 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1018 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1019 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1020
1021 0.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
1022 signed32 al, ah, bl, bh;
1023 signed64 t1, t2, tl, th;
1024 int ovl, ovh;
1025 al = (signed32)(signed16)EV_LOHALF(*rA);
1026 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1027 bl = (signed32)(signed16)EV_LOHALF(*rB);
1028 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1029 t1 = ah * bh;
1030 t2 = al * bl;
1031 th = EV_ACCHIGH + t1;
1032 tl = EV_ACCLOW + t2;
1033 ovh = EV_SAT_P_S32(th);
1034 ovl = EV_SAT_P_S32(tl);
1035 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1036 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1037 //printf("evmhossiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1038 //printf("evmhossiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1039 EV_SET_SPEFSCR_OV(ovl, ovh);
1040 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1041
1042 0.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
1043 signed32 al, ah, bl, bh;
1044 signed64 t1, t2, tl, th;
1045 al = (signed32)(signed16)EV_LOHALF(*rA);
1046 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1047 bl = (signed32)(signed16)EV_LOHALF(*rB);
1048 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1049 t1 = ((signed64)ah * bh) << 1;
1050 t2 = ((signed64)al * bl) << 1;
1051 th = EV_ACCHIGH + (t1 & 0xffffffff);
1052 tl = EV_ACCLOW + (t2 & 0xffffffff);
1053 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1054 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1055
1056 0.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
1057 signed32 al, ah, bl, bh;
1058 signed64 t1, t2, tl, th;
1059 al = (signed32)(signed16)EV_LOHALF(*rA);
1060 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1061 bl = (signed32)(signed16)EV_LOHALF(*rB);
1062 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1063 t1 = ah * bh;
1064 t2 = al * bl;
1065 th = EV_ACCHIGH + t1;
1066 tl = EV_ACCLOW + t2;
1067 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1068 //printf("evmhosmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1069 //printf("evmhosmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1070 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1071
1072 0.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
1073 unsigned32 al, ah, bl, bh;
1074 unsigned64 t1, t2;
1075 signed64 tl, th;
1076 int ovl, ovh;
1077 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1078 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1079 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1080 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1081 t1 = ah * bh;
1082 t2 = al * bl;
1083 th = (signed64)EV_ACCHIGH + (signed64)t1;
1084 tl = (signed64)EV_ACCLOW + (signed64)t2;
1085 ovh = EV_SAT_P_U32(th);
1086 ovl = EV_SAT_P_U32(tl);
1087 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1088 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1089 //printf("evmhousiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1090 //printf("evmhousiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1091 EV_SET_SPEFSCR_OV(ovl, ovh);
1092 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1093
1094 0.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
1095 unsigned32 al, ah, bl, bh;
1096 unsigned32 t1, t2;
1097 signed64 tl, th;
1098 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1099 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1100 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1101 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1102 t1 = ah * bh;
1103 t2 = al * bl;
1104 th = EV_ACCHIGH + t1;
1105 tl = EV_ACCLOW + t2;
1106 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1107 //printf("evmhoumiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1108 //printf("evmhoumiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1109 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1110
1111 0.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
1112 signed16 al, ah, bl, bh;
1113 signed32 t1, t2;
1114 signed64 tl, th;
1115 int movl, movh, ovl, ovh;
1116
1117 al = (signed16) EV_HIHALF (*rA);
1118 ah = (signed16) EV_HIHALF (*rAh);
1119 bl = (signed16) EV_HIHALF (*rB);
1120 bh = (signed16) EV_HIHALF (*rBh);
1121 t1 = ev_multiply16_ssf (ah, bh, &movh);
1122 t2 = ev_multiply16_ssf (al, bl, &movl);
1123 th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1124 tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
1125 ovh = EV_SAT_P_S32 (th);
1126 ovl = EV_SAT_P_S32 (tl);
1127 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1128 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1129 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1130 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1131
1132 0.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
1133 signed32 al, ah, bl, bh;
1134 signed64 t1, t2, tl, th;
1135 int ovl, ovh;
1136 al = (signed32)(signed16)EV_HIHALF(*rA);
1137 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1138 bl = (signed32)(signed16)EV_HIHALF(*rB);
1139 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1140 t1 = ah * bh;
1141 t2 = al * bl;
1142 th = EV_ACCHIGH + t1;
1143 tl = EV_ACCLOW + t2;
1144 ovh = EV_SAT_P_S32(th);
1145 ovl = EV_SAT_P_S32(tl);
1146 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1147 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1148 //printf("evmhessiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1149 //printf("evmhessiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1150 EV_SET_SPEFSCR_OV(ovl, ovh);
1151 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1152
1153 0.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
1154 signed16 al, ah, bl, bh;
1155 signed32 t1, t2, th, tl;
1156 int dummy;
1157
1158 al = (signed16)EV_HIHALF(*rA);
1159 ah = (signed16)EV_HIHALF(*rAh);
1160 bl = (signed16)EV_HIHALF(*rB);
1161 bh = (signed16)EV_HIHALF(*rBh);
1162 t1 = ev_multiply16_smf (ah, bh, &dummy);
1163 t2 = ev_multiply16_smf (al, bl, &dummy);
1164 th = EV_ACCHIGH + t1;
1165 tl = EV_ACCLOW + t2;
1166 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
1167 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1168
1169 0.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
1170 signed32 al, ah, bl, bh;
1171 signed64 t1, t2, tl, th;
1172 al = (signed32)(signed16)EV_HIHALF(*rA);
1173 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1174 bl = (signed32)(signed16)EV_HIHALF(*rB);
1175 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1176 t1 = ah * bh;
1177 t2 = al * bl;
1178 th = EV_ACCHIGH + t1;
1179 tl = EV_ACCLOW + t2;
1180 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1181 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1182
1183 0.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
1184 unsigned32 al, ah, bl, bh;
1185 unsigned64 t1, t2;
1186 signed64 tl, th;
1187 int ovl, ovh;
1188 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1189 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1190 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1191 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1192 t1 = ah * bh;
1193 t2 = al * bl;
1194 th = (signed64)EV_ACCHIGH + (signed64)t1;
1195 tl = (signed64)EV_ACCLOW + (signed64)t2;
1196 ovh = EV_SAT_P_U32(th);
1197 ovl = EV_SAT_P_U32(tl);
1198 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1199 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1200 EV_SET_SPEFSCR_OV(ovl, ovh);
1201 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1202
1203 0.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
1204 unsigned32 al, ah, bl, bh;
1205 unsigned32 t1, t2;
1206 unsigned64 tl, th;
1207 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1208 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1209 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1210 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1211 t1 = ah * bh;
1212 t2 = al * bl;
1213 th = EV_ACCHIGH + t1;
1214 tl = EV_ACCLOW + t2;
1215 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1216 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1217
1218
1219 0.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
1220 signed16 al, ah, bl, bh;
1221 signed32 t1, t2;
1222 signed64 tl, th;
1223 int movl, movh, ovl, ovh;
1224
1225 al = (signed16) EV_LOHALF (*rA);
1226 ah = (signed16) EV_LOHALF (*rAh);
1227 bl = (signed16) EV_LOHALF (*rB);
1228 bh = (signed16) EV_LOHALF (*rBh);
1229 t1 = ev_multiply16_ssf (ah, bh, &movh);
1230 t2 = ev_multiply16_ssf (al, bl, &movl);
1231 th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1232 tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
1233 ovh = EV_SAT_P_S32 (th);
1234 ovl = EV_SAT_P_S32 (tl);
1235 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1236 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1237 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1238 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1239
1240 0.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
1241 signed32 al, ah, bl, bh;
1242 signed64 t1, t2, tl, th;
1243 int ovl, ovh;
1244 al = (signed32)(signed16)EV_LOHALF(*rA);
1245 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1246 bl = (signed32)(signed16)EV_LOHALF(*rB);
1247 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1248 t1 = ah * bh;
1249 t2 = al * bl;
1250 th = EV_ACCHIGH - t1;
1251 tl = EV_ACCLOW - t2;
1252 ovh = EV_SAT_P_S32(th);
1253 ovl = EV_SAT_P_S32(tl);
1254 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1255 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1256 EV_SET_SPEFSCR_OV(ovl, ovh);
1257 //printf("evmhossianw: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
1258 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1259
1260 0.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
1261 signed32 al, ah, bl, bh;
1262 signed64 t1, t2, tl, th;
1263 al = (signed32)(signed16)EV_LOHALF(*rA);
1264 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1265 bl = (signed32)(signed16)EV_LOHALF(*rB);
1266 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1267 t1 = ((signed64)ah * bh) << 1;
1268 t2 = ((signed64)al * bl) << 1;
1269 th = EV_ACCHIGH - (t1 & 0xffffffff);
1270 tl = EV_ACCLOW - (t2 & 0xffffffff);
1271 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1272 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1273
1274 0.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
1275 signed32 al, ah, bl, bh;
1276 signed64 t1, t2, tl, th;
1277 al = (signed32)(signed16)EV_LOHALF(*rA);
1278 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1279 bl = (signed32)(signed16)EV_LOHALF(*rB);
1280 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1281 t1 = ah * bh;
1282 t2 = al * bl;
1283 th = EV_ACCHIGH - t1;
1284 tl = EV_ACCLOW - t2;
1285 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1286 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1287
1288 0.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
1289 unsigned32 al, ah, bl, bh;
1290 unsigned64 t1, t2;
1291 signed64 tl, th;
1292 int ovl, ovh;
1293 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1294 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1295 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1296 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1297 t1 = ah * bh;
1298 t2 = al * bl;
1299 th = (signed64)EV_ACCHIGH - (signed64)t1;
1300 tl = (signed64)EV_ACCLOW - (signed64)t2;
1301 ovl = EV_SAT_P_U32(tl);
1302 ovh = EV_SAT_P_U32(th);
1303 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1304 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1305 //printf("evmhousianw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1306 //printf("evmoussianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1307 EV_SET_SPEFSCR_OV(ovl, ovh);
1308 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1309
1310 0.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
1311 unsigned32 al, ah, bl, bh;
1312 unsigned32 t1, t2;
1313 unsigned64 tl, th;
1314 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1315 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1316 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1317 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1318 t1 = ah * bh;
1319 t2 = al * bl;
1320 th = EV_ACCHIGH - t1;
1321 tl = EV_ACCLOW - t2;
1322 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1323 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1324
1325 0.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
1326 signed16 al, ah, bl, bh;
1327 signed32 t1, t2;
1328 signed64 tl, th;
1329 int movl, movh, ovl, ovh;
1330
1331 al = (signed16) EV_HIHALF (*rA);
1332 ah = (signed16) EV_HIHALF (*rAh);
1333 bl = (signed16) EV_HIHALF (*rB);
1334 bh = (signed16) EV_HIHALF (*rBh);
1335 t1 = ev_multiply16_ssf (ah, bh, &movh);
1336 t2 = ev_multiply16_ssf (al, bl, &movl);
1337 th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1338 tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
1339 ovh = EV_SAT_P_S32 (th);
1340 ovl = EV_SAT_P_S32 (tl);
1341 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1342 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1343 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1344 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1345
1346 0.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
1347 signed32 al, ah, bl, bh;
1348 signed64 t1, t2, tl, th;
1349 int ovl, ovh;
1350 al = (signed32)(signed16)EV_HIHALF(*rA);
1351 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1352 bl = (signed32)(signed16)EV_HIHALF(*rB);
1353 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1354 t1 = ah * bh;
1355 t2 = al * bl;
1356 th = EV_ACCHIGH - t1;
1357 tl = EV_ACCLOW - t2;
1358 ovh = EV_SAT_P_S32(th);
1359 ovl = EV_SAT_P_S32(tl);
1360 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1361 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1362 EV_SET_SPEFSCR_OV(ovl, ovh);
1363 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1364
1365 0.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
1366 signed32 al, ah, bl, bh;
1367 signed64 t1, t2, tl, th;
1368 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1369 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1370 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1371 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1372 t1 = ((signed64)ah * bh) << 1;
1373 t2 = ((signed64)al * bl) << 1;
1374 th = EV_ACCHIGH - (t1 & 0xffffffff);
1375 tl = EV_ACCLOW - (t2 & 0xffffffff);
1376 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1377 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1378
1379 0.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
1380 signed32 al, ah, bl, bh;
1381 signed64 t1, t2, tl, th;
1382 al = (signed32)(signed16)EV_HIHALF(*rA);
1383 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1384 bl = (signed32)(signed16)EV_HIHALF(*rB);
1385 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1386 t1 = ah * bh;
1387 t2 = al * bl;
1388 th = EV_ACCHIGH - t1;
1389 tl = EV_ACCLOW - t2;
1390 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1391 //printf("evmhesmianw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1392 //printf("evmhesmianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1393 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1394
1395 0.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
1396 unsigned32 al, ah, bl, bh;
1397 unsigned64 t1, t2;
1398 signed64 tl, th;
1399 int ovl, ovh;
1400 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1401 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1402 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1403 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1404 t1 = ah * bh;
1405 t2 = al * bl;
1406 th = (signed64)EV_ACCHIGH - (signed64)t1;
1407 tl = (signed64)EV_ACCLOW - (signed64)t2;
1408 ovl = EV_SAT_P_U32(tl);
1409 ovh = EV_SAT_P_U32(th);
1410 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1411 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1412 //printf("evmheusianw: ovh %d ovl %d al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1413 //printf("evmheusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1414 EV_SET_SPEFSCR_OV(ovl, ovh);
1415 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1416
1417 0.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
1418 unsigned32 al, ah, bl, bh;
1419 unsigned32 t1, t2;
1420 unsigned64 tl, th;
1421 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1422 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1423 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1424 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1425 t1 = ah * bh;
1426 t2 = al * bl;
1427 th = EV_ACCHIGH - t1;
1428 tl = EV_ACCLOW - t2;
1429 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1430 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1431
1432 0.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
1433 signed32 a, b;
1434 signed64 t1, t2;
1435 a = (signed32)(signed16)EV_LOHALF(*rA);
1436 b = (signed32)(signed16)EV_LOHALF(*rB);
1437 t1 = EV_MUL16_SSF(a, b);
1438 if (t1 & ((unsigned64)1 << 32))
1439 t1 |= 0xfffffffe00000000;
1440 t2 = ACC + t1;
1441 EV_SET_REG1_ACC(*rSh, *rS, t2);
1442 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1443
1444 0.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
1445 signed32 a, b;
1446 signed64 t1, t2;
1447 a = (signed32)(signed16)EV_LOHALF(*rA);
1448 b = (signed32)(signed16)EV_LOHALF(*rB);
1449 t1 = (signed64)a * (signed64)b;
1450 t2 = (signed64)ACC + t1;
1451 EV_SET_REG1_ACC(*rSh, *rS, t2);
1452 //printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1453 //printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1454 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1455
1456 0.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
1457 unsigned32 a, b;
1458 unsigned64 t1, t2;
1459 a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1460 b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1461 t1 = a * b;
1462 t2 = ACC + t1;
1463 EV_SET_REG1_ACC(*rSh, *rS, t2);
1464 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1465
1466 0.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
1467 signed32 a, b;
1468 signed64 t1, t2;
1469 a = (signed32)(signed16)EV_HIHALF(*rA);
1470 b = (signed32)(signed16)EV_HIHALF(*rB);
1471 t1 = EV_MUL16_SSF(a, b);
1472 if (t1 & ((unsigned64)1 << 32))
1473 t1 |= 0xfffffffe00000000;
1474 t2 = ACC + t1;
1475 EV_SET_REG1_ACC(*rSh, *rS, t2);
1476 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1477
1478 0.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
1479 signed32 a, b;
1480 signed64 t1, t2;
1481 a = (signed32)(signed16)EV_HIHALF(*rA);
1482 b = (signed32)(signed16)EV_HIHALF(*rB);
1483 t1 = (signed64)(a * b);
1484 t2 = ACC + t1;
1485 EV_SET_REG1_ACC(*rSh, *rS, t2);
1486 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1487
1488 0.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
1489 unsigned32 a, b;
1490 unsigned64 t1, t2;
1491 a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1492 b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1493 t1 = a * b;
1494 t2 = ACC + t1;
1495 EV_SET_REG1_ACC(*rSh, *rS, t2);
1496 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1497
1498
1499 0.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
1500 signed32 a, b;
1501 signed64 t1, t2;
1502 a = (signed32)(signed16)EV_LOHALF(*rA);
1503 b = (signed32)(signed16)EV_LOHALF(*rB);
1504 t1 = EV_MUL16_SSF(a, b);
1505 if (t1 & ((unsigned64)1 << 32))
1506 t1 |= 0xfffffffe00000000;
1507 t2 = ACC - t1;
1508 EV_SET_REG1_ACC(*rSh, *rS, t2);
1509 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1510
1511 0.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
1512 signed32 a, b;
1513 signed64 t1, t2;
1514 a = (signed32)(signed16)EV_LOHALF(*rA);
1515 b = (signed32)(signed16)EV_LOHALF(*rB);
1516 t1 = (signed64)a * (signed64)b;
1517 t2 = ACC - t1;
1518 EV_SET_REG1_ACC(*rSh, *rS, t2);
1519 //printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1520 //printf("evmhogsmian: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1521 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1522
1523 0.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
1524 unsigned32 a, b;
1525 unsigned64 t1, t2;
1526 a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1527 b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1528 t1 = (unsigned64)a * (unsigned64)b;
1529 t2 = ACC - t1;
1530 EV_SET_REG1_ACC(*rSh, *rS, t2);
1531 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1532
1533 0.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
1534 signed32 a, b;
1535 signed64 t1, t2;
1536 a = (signed32)(signed16)EV_HIHALF(*rA);
1537 b = (signed32)(signed16)EV_HIHALF(*rB);
1538 t1 = EV_MUL16_SSF(a, b);
1539 if (t1 & ((unsigned64)1 << 32))
1540 t1 |= 0xfffffffe00000000;
1541 t2 = ACC - t1;
1542 EV_SET_REG1_ACC(*rSh, *rS, t2);
1543 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1544
1545 0.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
1546 signed32 a, b;
1547 signed64 t1, t2;
1548 a = (signed32)(signed16)EV_HIHALF(*rA);
1549 b = (signed32)(signed16)EV_HIHALF(*rB);
1550 t1 = (signed64)a * (signed64)b;
1551 t2 = ACC - t1;
1552 EV_SET_REG1_ACC(*rSh, *rS, t2);
1553 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1554
1555 0.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
1556 unsigned32 a, b;
1557 unsigned64 t1, t2;
1558 a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1559 b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1560 t1 = (unsigned64)a * (unsigned64)b;
1561 t2 = ACC - t1;
1562 EV_SET_REG1_ACC(*rSh, *rS, t2);
1563 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1564
1565
1566 0.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
1567 signed32 al, ah, bl, bh;
1568 signed64 t1, t2;
1569 int movl, movh;
1570 al = *rA;
1571 ah = *rAh;
1572 bl = *rB;
1573 bh = *rBh;
1574 t1 = ev_multiply32_ssf(al, bl, &movl);
1575 t2 = ev_multiply32_ssf(ah, bh, &movh);
1576 EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1577 EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1578 EV_SET_SPEFSCR_OV(movl, movh);
1579 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1580
1581 0.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
1582 signed32 al, ah, bl, bh;
1583 signed64 t1, t2;
1584 int movl, movh;
1585 al = *rA;
1586 ah = *rAh;
1587 bl = *rB;
1588 bh = *rBh;
1589 t1 = ev_multiply32_ssf(al, bl, &movl);
1590 t2 = ev_multiply32_ssf(ah, bh, &movh);
1591 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1592 EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1593 EV_SET_SPEFSCR_OV(movl, movh);
1594 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1595
1596 0.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
1597 signed32 al, ah, bl, bh;
1598 signed64 t1, t2;
1599 al = *rA;
1600 ah = *rAh;
1601 bl = *rB;
1602 bh = *rBh;
1603 t1 = EV_MUL32_SSF(al, bl);
1604 t2 = EV_MUL32_SSF(ah, bh);
1605 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1606 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1607
1608 0.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
1609 signed32 al, ah, bl, bh;
1610 signed64 t1, t2;
1611 al = *rA;
1612 ah = *rAh;
1613 bl = *rB;
1614 bh = *rBh;
1615 t1 = EV_MUL32_SSF(al, bl);
1616 t2 = EV_MUL32_SSF(ah, bh);
1617 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1618 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1619
1620 0.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
1621 signed32 al, ah, bl, bh;
1622 signed64 t1, t2;
1623 al = *rA;
1624 ah = *rAh;
1625 bl = *rB;
1626 bh = *rBh;
1627 t1 = (signed64)al * (signed64)bl;
1628 t2 = (signed64)ah * (signed64)bh;
1629 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1630 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1631
1632 0.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
1633 signed32 al, ah, bl, bh;
1634 signed64 t1, t2;
1635 al = *rA;
1636 ah = *rAh;
1637 bl = *rB;
1638 bh = *rBh;
1639 t1 = (signed64)al * (signed64)bl;
1640 t2 = (signed64)ah * (signed64)bh;
1641 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1642 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1643
1644 0.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
1645 unsigned32 al, ah, bl, bh;
1646 unsigned64 t1, t2;
1647 al = *rA;
1648 ah = *rAh;
1649 bl = *rB;
1650 bh = *rBh;
1651 t1 = (unsigned64)al * (unsigned64)bl;
1652 t2 = (unsigned64)ah * (unsigned64)bh;
1653 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1654 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1655
1656 0.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
1657 unsigned32 al, ah, bl, bh;
1658 unsigned64 t1, t2;
1659 al = *rA;
1660 ah = *rAh;
1661 bl = *rB;
1662 bh = *rBh;
1663 t1 = (unsigned64)al * (unsigned64)bl;
1664 t2 = (unsigned64)ah * (unsigned64)bh;
1665 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1666 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1667
1668
1669 0.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
1670 signed32 al, ah, bl, bh;
1671 signed64 t1, t2;
1672 int movl, movh;
1673 al = *rA;
1674 ah = *rAh;
1675 bl = *rB;
1676 bh = *rBh;
1677 t1 = ev_multiply32_ssf(al, bl, &movl);
1678 t2 = ev_multiply32_ssf(ah, bh, &movh);
1679 EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1680 EV_SATURATE(movl, 0xffffffff, t1));
1681 EV_SET_SPEFSCR_OV(movl, movh);
1682 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1683
1684 0.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
1685 signed32 al, ah, bl, bh;
1686 signed64 t1, t2;
1687 int movl, movh;
1688 al = *rA;
1689 ah = *rAh;
1690 bl = *rB;
1691 bh = *rBh;
1692 t1 = ev_multiply32_ssf(al, bl, &movl);
1693 t2 = ev_multiply32_ssf(ah, bh, &movh);
1694 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1695 EV_SATURATE(movl, 0xffffffff, t1));
1696 EV_SET_SPEFSCR_OV(movl, movh);
1697 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1698
1699 0.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
1700 signed32 al, ah, bl, bh;
1701 signed64 t1, t2;
1702 al = *rA;
1703 ah = *rAh;
1704 bl = *rB;
1705 bh = *rBh;
1706 t1 = EV_MUL32_SSF(al, bl);
1707 t2 = EV_MUL32_SSF(ah, bh);
1708 EV_SET_REG2(*rSh, *rS, t2, t1);
1709 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1710
1711 0.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
1712 signed32 al, ah, bl, bh;
1713 signed64 t1, t2;
1714 al = *rA;
1715 ah = *rAh;
1716 bl = *rB;
1717 bh = *rBh;
1718 t1 = EV_MUL32_SSF(al, bl);
1719 t2 = EV_MUL32_SSF(ah, bh);
1720 EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1721 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1722
1723 0.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
1724 unsigned32 al, ah, bl, bh;
1725 unsigned64 t1, t2;
1726 al = *rA;
1727 ah = *rAh;
1728 bl = *rB;
1729 bh = *rBh;
1730 t1 = (unsigned64)al * (unsigned64)bl;
1731 t2 = (unsigned64)ah * (unsigned64)bh;
1732 EV_SET_REG2(*rSh, *rS, t2, t1);
1733 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1734
1735 0.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
1736 unsigned32 al, ah, bl, bh;
1737 unsigned64 t1, t2;
1738 al = *rA;
1739 ah = *rAh;
1740 bl = *rB;
1741 bh = *rBh;
1742 t1 = (unsigned64)al * (unsigned64)bl;
1743 t2 = (unsigned64)ah * (unsigned64)bh;
1744 EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1745 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1746
1747
1748 0.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
1749 signed32 al, ah, bl, bh;
1750 signed64 t1, t2, tl, th;
1751 int movl, movh, ovl, ovh;
1752 al = *rA;
1753 ah = *rAh;
1754 bl = *rB;
1755 bh = *rBh;
1756 t1 = ev_multiply32_ssf(ah, bh, &movh);
1757 t2 = ev_multiply32_ssf(al, bl, &movl);
1758 th = EV_ACCHIGH + EV_SATURATE(movh, 0xffffffff, t1);
1759 tl = EV_ACCLOW + EV_SATURATE(movl, 0xffffffff, t2);
1760 ovh = EV_SAT_P_S32(th);
1761 ovl = EV_SAT_P_S32(tl);
1762 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1763 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1764 EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1765 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1766
1767 0.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
1768 signed32 al, ah, bl, bh;
1769 signed64 t1, t2, tl, th;
1770 int ovl, ovh;
1771 al = *rA;
1772 ah = *rAh;
1773 bl = *rB;
1774 bh = *rBh;
1775 t1 = (signed64)ah * (signed64)bh;
1776 t2 = (signed64)al * (signed64)bl;
1777 th = EV_ACCHIGH + (t1 & 0xffffffff);
1778 tl = EV_ACCLOW + (t2 & 0xffffffff);
1779 ovh = EV_SAT_P_S32(th);
1780 ovl = EV_SAT_P_S32(tl);
1781 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1782 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1783 EV_SET_SPEFSCR_OV(ovl, ovh);
1784 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1785
1786 0.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
1787 signed32 al, ah, bl, bh;
1788 signed64 t1, t2;
1789 int mov;
1790 al = *rA;
1791 ah = *rAh;
1792 bl = *rB;
1793 bh = *rBh;
1794 t1 = ev_multiply32_smf(ah, bh, &mov);
1795 t2 = ev_multiply32_smf(al, bl, &mov);
1796 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1797 EV_ACCLOW + (t2 & 0xffffffff));
1798 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1799
1800 0.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
1801 signed32 al, ah, bl, bh;
1802 signed64 t1, t2;
1803 al = *rA;
1804 ah = *rAh;
1805 bl = *rB;
1806 bh = *rBh;
1807 t1 = (signed64)ah * (signed64)bh;
1808 t2 = (signed64)al * (signed64)bl;
1809 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1810 EV_ACCLOW + (t2 & 0xffffffff));
1811 //printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
1812 //printf("evmwlsmiaaw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
1813 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1814
1815 0.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
1816 unsigned32 al, ah, bl, bh;
1817 unsigned64 t1, t2, tl, th;
1818 int ovl, ovh;
1819 al = *rA;
1820 ah = *rAh;
1821 bl = *rB;
1822 bh = *rBh;
1823 t1 = (unsigned64)ah * (unsigned64)bh;
1824 t2 = (unsigned64)al * (unsigned64)bl;
1825 th = EV_ACCHIGH + (t1 & 0xffffffff);
1826 tl = EV_ACCLOW + (t2 & 0xffffffff);
1827 ovh = (th >> 32);
1828 ovl = (tl >> 32);
1829 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1830 EV_SATURATE(ovl, 0xffffffff, tl));
1831 EV_SET_SPEFSCR_OV(ovl, ovh);
1832 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1833
1834 0.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
1835 unsigned32 al, ah, bl, bh;
1836 unsigned64 t1, t2;
1837 al = *rA;
1838 ah = *rAh;
1839 bl = *rB;
1840 bh = *rBh;
1841 t1 = (unsigned64)ah * (unsigned64)bh;
1842 t2 = (unsigned64)al * (unsigned64)bl;
1843 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1844 EV_ACCLOW + (t2 & 0xffffffff));
1845 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1846
1847
1848 0.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
1849 signed32 al, ah, bl, bh;
1850 signed64 t1, t2, tl, th;
1851 int movl, movh, ovl, ovh;
1852 al = *rA;
1853 ah = *rAh;
1854 bl = *rB;
1855 bh = *rBh;
1856 t1 = ev_multiply32_ssf(ah, bh, &movh);
1857 t2 = ev_multiply32_ssf(al, bl, &movl);
1858 th = EV_ACCHIGH - EV_SATURATE(movh, 0xffffffff, t1);
1859 tl = EV_ACCLOW - EV_SATURATE(movl, 0xffffffff, t2);
1860 ovh = EV_SAT_P_S32(th);
1861 ovl = EV_SAT_P_S32(tl);
1862 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1863 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1864 EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1865 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1866
1867 0.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
1868 signed32 al, ah, bl, bh;
1869 signed64 t1, t2, tl, th;
1870 int ovl, ovh;
1871 al = *rA;
1872 ah = *rAh;
1873 bl = *rB;
1874 bh = *rBh;
1875 t1 = (signed64)ah * (signed64)bh;
1876 t2 = (signed64)al * (signed64)bl;
1877 th = EV_ACCHIGH - (t1 & 0xffffffff);
1878 tl = EV_ACCLOW - (t2 & 0xffffffff);
1879 ovh = EV_SAT_P_S32(th);
1880 ovl = EV_SAT_P_S32(tl);
1881 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1882 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1883 EV_SET_SPEFSCR_OV(ovl, ovh);
1884 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1885
1886 0.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
1887 signed32 al, ah, bl, bh;
1888 signed64 t1, t2;
1889 int mov;
1890 al = *rA;
1891 ah = *rAh;
1892 bl = *rB;
1893 bh = *rBh;
1894 t1 = ev_multiply32_smf(ah, bh, &mov);
1895 t2 = ev_multiply32_smf(al, bl, &mov);
1896 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1897 EV_ACCLOW - (t2 & 0xffffffff));
1898 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1899
1900 0.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
1901 signed32 al, ah, bl, bh;
1902 signed64 t1, t2;
1903 al = *rA;
1904 ah = *rAh;
1905 bl = *rB;
1906 bh = *rBh;
1907 t1 = (signed64)ah * (signed64)bh;
1908 t2 = (signed64)al * (signed64)bl;
1909 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1910 EV_ACCLOW - (t2 & 0xffffffff));
1911 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1912
1913 0.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
1914 unsigned32 al, ah, bl, bh;
1915 unsigned64 t1, t2, tl, th;
1916 int ovl, ovh;
1917 al = *rA;
1918 ah = *rAh;
1919 bl = *rB;
1920 bh = *rBh;
1921 t1 = (unsigned64)ah * (unsigned64)bh;
1922 t2 = (unsigned64)al * (unsigned64)bl;
1923 th = EV_ACCHIGH - (t1 & 0xffffffff);
1924 tl = EV_ACCLOW - (t2 & 0xffffffff);
1925 ovh = (th >> 32);
1926 ovl = (tl >> 32);
1927 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1928 EV_SATURATE(ovl, 0xffffffff, tl));
1929 //printf("evmwlusianw: ovl %d ovh %d al %d ah %d bl %d bh %d t1 %qd t2 %qd th %qd tl %qd\n", ovl, ovh, al, ah, al, bh, t1, t2, th, tl);
1930 //printf("evmwlusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1931 EV_SET_SPEFSCR_OV(ovl, ovh);
1932 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1933
1934 0.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
1935 unsigned32 al, ah, bl, bh;
1936 unsigned64 t1, t2;
1937 al = *rA;
1938 ah = *rAh;
1939 bl = *rB;
1940 bh = *rBh;
1941 t1 = (unsigned64)ah * (unsigned64)bh;
1942 t2 = (unsigned64)al * (unsigned64)bl;
1943 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1944 EV_ACCLOW - (t2 & 0xffffffff));
1945 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1946
1947
1948 0.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
1949 signed32 a, b;
1950 signed64 t;
1951 int movl;
1952 a = *rA;
1953 b = *rB;
1954 t = ev_multiply32_ssf(a, b, &movl);
1955 EV_SET_REG1(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1956 EV_SET_SPEFSCR_OV(movl, 0);
1957 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1958
1959 0.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
1960 signed32 a, b;
1961 signed64 t;
1962 int movl;
1963 a = *rA;
1964 b = *rB;
1965 t = ev_multiply32_ssf(a, b, &movl);
1966 EV_SET_REG1_ACC(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1967 EV_SET_SPEFSCR_OV(movl, 0);
1968 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1969
1970 0.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
1971 signed32 a, b;
1972 signed64 t;
1973 int movl;
1974 a = *rA;
1975 b = *rB;
1976 t = ev_multiply32_smf(a, b, &movl);
1977 EV_SET_REG1(*rSh, *rS, t);
1978 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1979
1980 0.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
1981 signed32 a, b;
1982 signed64 t;
1983 int movl;
1984 a = *rA;
1985 b = *rB;
1986 t = ev_multiply32_smf(a, b, &movl);
1987 EV_SET_REG1_ACC(*rSh, *rS, t);
1988 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1989
1990 0.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
1991 signed32 a, b;
1992 signed64 t;
1993 int movl;
1994 a = *rA;
1995 b = *rB;
1996 t = (signed64)a * (signed64)b;
1997 EV_SET_REG1(*rSh, *rS, t);
1998 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1999
2000 0.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
2001 signed32 a, b;
2002 signed64 t;
2003 int movl;
2004 a = *rA;
2005 b = *rB;
2006 t = (signed64)a * (signed64)b;
2007 EV_SET_REG1_ACC(*rSh, *rS, t);
2008 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2009
2010 0.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
2011 unsigned32 a, b;
2012 unsigned64 t;
2013 int movl;
2014 a = *rA;
2015 b = *rB;
2016 t = (signed64)a * (signed64)b;
2017 EV_SET_REG1(*rSh, *rS, t);
2018 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2019
2020 0.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
2021 unsigned32 a, b;
2022 unsigned64 t;
2023 int movl;
2024 a = *rA;
2025 b = *rB;
2026 t = (signed64)a * (signed64)b;
2027 EV_SET_REG1_ACC(*rSh, *rS, t);
2028 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2029
2030
2031 0.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
2032 signed64 t1, t2;
2033 signed32 a, b;
2034 int movl;
2035 a = *rA;
2036 b = *rB;
2037 t1 = ev_multiply32_ssf(a, b, &movl);
2038 t2 = ACC + EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2039 EV_SET_REG1_ACC(*rSh, *rS, t2);
2040 EV_SET_SPEFSCR_OV(movl, 0);
2041 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2042
2043 0.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
2044 signed64 t1, t2;
2045 signed32 a, b;
2046 int movl;
2047 a = *rA;
2048 b = *rB;
2049 t1 = ev_multiply32_smf(a, b, &movl);
2050 t2 = ACC + t1;
2051 EV_SET_REG1_ACC(*rSh, *rS, t2);
2052 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2053
2054 0.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
2055 signed64 t1, t2;
2056 signed32 a, b;
2057 a = *rA;
2058 b = *rB;
2059 t1 = (signed64)a * (signed64)b;
2060 t2 = ACC + t1;
2061 EV_SET_REG1_ACC(*rSh, *rS, t2);
2062 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2063
2064 0.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
2065 unsigned64 t1, t2;
2066 unsigned32 a, b;
2067 a = *rA;
2068 b = *rB;
2069 t1 = (unsigned64)a * (unsigned64)b;
2070 t2 = ACC + t1;
2071 EV_SET_REG1_ACC(*rSh, *rS, t2);
2072 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2073
2074
2075 0.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
2076 signed64 t1, t2;
2077 signed32 a, b;
2078 int movl;
2079 a = *rA;
2080 b = *rB;
2081 t1 = ev_multiply32_ssf(a, b, &movl);
2082 t2 = ACC - EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2083 EV_SET_REG1_ACC(*rSh, *rS, t2);
2084 EV_SET_SPEFSCR_OV(movl, 0);
2085 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2086
2087 0.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
2088 signed64 t1, t2;
2089 signed32 a, b;
2090 int movl;
2091 a = *rA;
2092 b = *rB;
2093 t1 = ev_multiply32_smf(a, b, &movl);
2094 t2 = ACC - t1;
2095 EV_SET_REG1_ACC(*rSh, *rS, t2);
2096 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2097
2098 0.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
2099 signed64 t1, t2;
2100 signed32 a, b;
2101 a = *rA;
2102 b = *rB;
2103 t1 = (signed64)a * (signed64)b;
2104 t2 = ACC - t1;
2105 EV_SET_REG1_ACC(*rSh, *rS, t2);
2106 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2107
2108 0.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
2109 unsigned64 t1, t2;
2110 unsigned32 a, b;
2111 a = *rA;
2112 b = *rB;
2113 t1 = (unsigned64)a * (unsigned64)b;
2114 t2 = ACC - t1;
2115 EV_SET_REG1_ACC(*rSh, *rS, t2);
2116 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2117
2118
2119 0.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
2120 signed64 t1, t2;
2121 signed32 al, ah;
2122 int ovl, ovh;
2123 al = *rA;
2124 ah = *rAh;
2125 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2126 t2 = (signed64)EV_ACCLOW + (signed64)al;
2127 ovh = EV_SAT_P_S32(t1);
2128 ovl = EV_SAT_P_S32(t2);
2129 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t1),
2130 EV_SATURATE_ACC(ovl, t2 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t2));
2131 EV_SET_SPEFSCR_OV(ovl, ovh);
2132 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2133
2134 0.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
2135 signed64 t1, t2;
2136 signed32 al, ah;
2137 al = *rA;
2138 ah = *rAh;
2139 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2140 t2 = (signed64)EV_ACCLOW + (signed64)al;
2141 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2142 //printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
2143 //printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2144 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2145
2146 0.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
2147 signed64 t1, t2;
2148 unsigned32 al, ah;
2149 int ovl, ovh;
2150 al = *rA;
2151 ah = *rAh;
2152 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2153 t2 = (signed64)EV_ACCLOW + (signed64)al;
2154 ovh = EV_SAT_P_U32(t1);
2155 ovl = EV_SAT_P_U32(t2);
2156 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
2157 EV_SATURATE(ovl, 0xffffffff, t2));
2158 //printf("evaddusiaaw: ovl %d ovh %d al %d ah %d t1 %qd t2 %qd\n", ovl, ovh, al, ah, t1, t2);
2159 //printf("evaddusiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2160 EV_SET_SPEFSCR_OV(ovl, ovh);
2161 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2162
2163 0.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
2164 unsigned64 t1, t2;
2165 unsigned32 al, ah;
2166 al = *rA;
2167 ah = *rAh;
2168 t1 = (unsigned64)EV_ACCHIGH + (unsigned64)ah;
2169 t2 = EV_ACCLOW + al;
2170 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2171 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2172
2173
2174 0.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
2175 signed64 t1, t2;
2176 signed32 al, ah;
2177 int ovl, ovh;
2178 al = *rA;
2179 ah = *rAh;
2180 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2181 t2 = (signed64)EV_ACCLOW - (signed64)al;
2182 ovh = EV_SAT_P_S32(t1);
2183 ovl = EV_SAT_P_S32(t2);
2184 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
2185 EV_SATURATE_ACC(ovl, t2, 0x80000000, 0x7fffffff, t2));
2186 EV_SET_SPEFSCR_OV(ovl, ovh);
2187 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2188
2189 0.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
2190 signed64 t1, t2;
2191 signed32 al, ah;
2192 al = *rA;
2193 ah = *rAh;
2194 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2195 t2 = (signed64)EV_ACCLOW - (signed64)al;
2196 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2197 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2198
2199 0.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
2200 signed64 t1, t2;
2201 unsigned32 al, ah;
2202 int ovl, ovh;
2203
2204 al = *rA;
2205 ah = *rAh;
2206 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2207 t2 = (signed64)EV_ACCLOW - (signed64)al;
2208 ovh = EV_SAT_P_U32(t1);
2209 ovl = EV_SAT_P_U32(t2);
2210 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
2211 EV_SATURATE(ovl, 0, t2));
2212 EV_SET_SPEFSCR_OV(ovl, ovh);
2213 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2214
2215 0.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
2216 unsigned64 t1, t2;
2217 unsigned32 al, ah;
2218 al = *rA;
2219 ah = *rAh;
2220 t1 = (unsigned64)EV_ACCHIGH - (unsigned64)ah;
2221 t2 = (unsigned64)EV_ACCLOW - (unsigned64)al;
2222 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2223 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2224
2225
2226 0.4,6.RS,11.RA,16.0,21.1220:EVX:e500:evmra %RS,%RA:Initialize Accumulator
2227 EV_SET_REG2_ACC(*rSh, *rS, *rAh, *rA);
2228 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2229
2230 0.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
2231 signed32 dividendh, dividendl, divisorh, divisorl;
2232 signed32 w1, w2;
2233 int ovh, ovl;
2234 dividendh = *rAh;
2235 dividendl = *rA;
2236 divisorh = *rBh;
2237 divisorl = *rB;
2238 if (dividendh < 0 && divisorh == 0) {
2239 w1 = 0x80000000;
2240 ovh = 1;
2241 } else if (dividendh > 0 && divisorh == 0) {
2242 w1 = 0x7fffffff;
2243 ovh = 1;
2244 } else if (dividendh == 0x80000000 && divisorh == -1) {
2245 w1 = 0x7fffffff;
2246 ovh = 1;
2247 } else {
2248 w1 = dividendh / divisorh;
2249 ovh = 0;
2250 }
2251 if (dividendl < 0 && divisorl == 0) {
2252 w2 = 0x80000000;
2253 ovl = 1;
2254 } else if (dividendl > 0 && divisorl == 0) {
2255 w2 = 0x7fffffff;
2256 ovl = 1;
2257 } else if (dividendl == 0x80000000 && divisorl == -1) {
2258 w2 = 0x7fffffff;
2259 ovl = 1;
2260 } else {
2261 w2 = dividendl / divisorl;
2262 ovl = 0;
2263 }
2264 EV_SET_REG2(*rSh, *rS, w1, w2);
2265 EV_SET_SPEFSCR_OV(ovl, ovh);
2266 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2267
2268
2269 0.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
2270 unsigned32 dividendh, dividendl, divisorh, divisorl;
2271 unsigned32 w1, w2;
2272 int ovh, ovl;
2273 dividendh = *rAh;
2274 dividendl = *rA;
2275 divisorh = *rBh;
2276 divisorl = *rB;
2277 if (divisorh == 0) {
2278 w1 = 0xffffffff;
2279 ovh = 1;
2280 } else {
2281 w1 = dividendh / divisorh;
2282 ovh = 0;
2283 }
2284 if (divisorl == 0) {
2285 w2 = 0xffffffff;
2286 ovl = 1;
2287 } else {
2288 w2 = dividendl / divisorl;
2289 ovl = 0;
2290 }
2291 EV_SET_REG2(*rSh, *rS, w1, w2);
2292 EV_SET_SPEFSCR_OV(ovl, ovh);
2293 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2294
2295
2296 #
2297 # A.2.9 Floating Point SPE Instructions
2298 #
2299
2300 0.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
2301 unsigned32 w1, w2;
2302 w1 = *rAh & 0x7fffffff;
2303 w2 = *rA & 0x7fffffff;
2304 EV_SET_REG2(*rSh, *rS, w1, w2);
2305 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2306
2307 0.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
2308 unsigned32 w1, w2;
2309 w1 = *rAh | 0x80000000;
2310 w2 = *rA | 0x80000000;
2311 EV_SET_REG2(*rSh, *rS, w1, w2);
2312 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2313
2314 0.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
2315 unsigned32 w1, w2;
2316 w1 = *rAh;
2317 w2 = *rA;
2318 w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
2319 w2 = (w2 & 0x7fffffff) | ((~w2) & 0x80000000);
2320 EV_SET_REG2(*rSh, *rS, w1, w2);
2321 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2322
2323 0.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
2324 unsigned32 w1, w2;
2325 w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2326 w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2327 EV_SET_REG2(*rSh, *rS, w1, w2);
2328 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2329
2330 0.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
2331 unsigned32 w1, w2;
2332 w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2333 w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2334 EV_SET_REG2(*rSh, *rS, w1, w2);
2335 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2336
2337 0.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
2338 unsigned32 w1, w2;
2339 w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2340 w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2341 EV_SET_REG2(*rSh, *rS, w1, w2);
2342 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2343
2344 0.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
2345 signed32 w1, w2;
2346 w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
2347 w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2348 EV_SET_REG2(*rSh, *rS, w1, w2);
2349 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2350
2351 0.4,6.BF,9./,11.RA,16.RB,21.652:EVX:e500:evfscmpgt %BF,%RA,%RB:Vector Floating-Point Compare Greater Than
2352 sim_fpu al, ah, bl, bh;
2353 int w, ch, cl;
2354 sim_fpu_32to (&al, *rA);
2355 sim_fpu_32to (&ah, *rAh);
2356 sim_fpu_32to (&bl, *rB);
2357 sim_fpu_32to (&bh, *rBh);
2358 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2359 EV_SET_SPEFSCR_BITS(spefscr_finv);
2360 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2361 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2362 if (sim_fpu_is_gt(&ah, &bh))
2363 ch = 1;
2364 else
2365 ch = 0;
2366 if (sim_fpu_is_gt(&al, &bl))
2367 cl = 1;
2368 else
2369 cl = 0;
2370 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2371 CR_SET(BF, w);
2372 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2373
2374 0.4,6.BF,9./,11.RA,16.RB,21.653:EVX:e500:evfscmplt %BF,%RA,%RB:Vector Floating-Point Compare Less Than
2375 sim_fpu al, ah, bl, bh;
2376 int w, ch, cl;
2377 sim_fpu_32to (&al, *rA);
2378 sim_fpu_32to (&ah, *rAh);
2379 sim_fpu_32to (&bl, *rB);
2380 sim_fpu_32to (&bh, *rBh);
2381 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2382 EV_SET_SPEFSCR_BITS(spefscr_finv);
2383 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2384 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2385 if (sim_fpu_is_lt(&ah, &bh))
2386 ch = 1;
2387 else
2388 ch = 0;
2389 if (sim_fpu_is_lt(&al, &bl))
2390 cl = 1;
2391 else
2392 cl = 0;
2393 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2394 CR_SET(BF, w);
2395 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2396
2397 0.4,6.BF,9./,11.RA,16.RB,21.654:EVX:e500:evfscmpeq %BF,%RA,%RB:Vector Floating-Point Compare Equal
2398 sim_fpu al, ah, bl, bh;
2399 int w, ch, cl;
2400 sim_fpu_32to (&al, *rA);
2401 sim_fpu_32to (&ah, *rAh);
2402 sim_fpu_32to (&bl, *rB);
2403 sim_fpu_32to (&bh, *rBh);
2404 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2405 EV_SET_SPEFSCR_BITS(spefscr_finv);
2406 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2407 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2408 if (sim_fpu_is_eq(&ah, &bh))
2409 ch = 1;
2410 else
2411 ch = 0;
2412 if (sim_fpu_is_eq(&al, &bl))
2413 cl = 1;
2414 else
2415 cl = 0;
2416 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2417 CR_SET(BF, w);
2418 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2419
2420 0.4,6.BF,9./,11.RA,16.RB,21.668:EVX:e500:evfststgt %BF,%RA,%RB:Vector Floating-Point Test Greater Than
2421 sim_fpu al, ah, bl, bh;
2422 int w, ch, cl;
2423 sim_fpu_32to (&al, *rA);
2424 sim_fpu_32to (&ah, *rAh);
2425 sim_fpu_32to (&bl, *rB);
2426 sim_fpu_32to (&bh, *rBh);
2427 if (sim_fpu_is_gt(&ah, &bh))
2428 ch = 1;
2429 else
2430 ch = 0;
2431 if (sim_fpu_is_gt(&al, &bl))
2432 cl = 1;
2433 else
2434 cl = 0;
2435 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2436 CR_SET(BF, w);
2437 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2438
2439 0.4,6.BF,9./,11.RA,16.RB,21.669:EVX:e500:evfststlt %BF,%RA,%RB:Vector Floating-Point Test Less Than
2440 sim_fpu al, ah, bl, bh;
2441 int w, ch, cl;
2442 sim_fpu_32to (&al, *rA);
2443 sim_fpu_32to (&ah, *rAh);
2444 sim_fpu_32to (&bl, *rB);
2445 sim_fpu_32to (&bh, *rBh);
2446 if (sim_fpu_is_lt(&ah, &bh))
2447 ch = 1;
2448 else
2449 ch = 0;
2450 if (sim_fpu_is_lt(&al, &bl))
2451 cl = 1;
2452 else
2453 cl = 0;
2454 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2455 CR_SET(BF, w);
2456 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2457
2458 0.4,6.BF,9./,11.RA,16.RB,21.670:EVX:e500:evfststeq %BF,%RA,%RB:Vector Floating-Point Test Equal
2459 sim_fpu al, ah, bl, bh;
2460 int w, ch, cl;
2461 sim_fpu_32to (&al, *rA);
2462 sim_fpu_32to (&ah, *rAh);
2463 sim_fpu_32to (&bl, *rB);
2464 sim_fpu_32to (&bh, *rBh);
2465 if (sim_fpu_is_eq(&ah, &bh))
2466 ch = 1;
2467 else
2468 ch = 0;
2469 if (sim_fpu_is_eq(&al, &bl))
2470 cl = 1;
2471 else
2472 cl = 0;
2473 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2474 CR_SET(BF, w);
2475 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2476
2477 0.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
2478 unsigned32 f, w1, w2;
2479 sim_fpu b;
2480
2481 sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
2482 sim_fpu_to32 (&w1, &b);
2483 sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2484 sim_fpu_to32 (&w2, &b);
2485
2486 EV_SET_REG2(*rSh, *rS, w1, w2);
2487 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2488
2489 0.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
2490 unsigned32 w1, w2;
2491 sim_fpu b;
2492
2493 sim_fpu_32to (&b, *rBh);
2494 sim_fpu_to32u (&w1, &b, sim_fpu_round_zero);
2495 sim_fpu_32to (&b, *rB);
2496 sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2497
2498 EV_SET_REG2(*rSh, *rS, w1, w2);
2499 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2500
2501 0.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer
2502 signed32 w1, w2;
2503 sim_fpu b, x, y;
2504
2505 sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
2506 sim_fpu_to32 (&w1, &b);
2507 sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2508 sim_fpu_to32 (&w2, &b);
2509
2510 EV_SET_REG2(*rSh, *rS, w1, w2);
2511 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2512
2513 0.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
2514 unsigned32 w1, w2, bh, bl;
2515 sim_fpu b, x, y;
2516 bh = *rBh;
2517 if (bh == 0xffffffff)
2518 sim_fpu_to32 (&w1, &sim_fpu_one);
2519 else {
2520 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2521 sim_fpu_u32to (&y, bh, sim_fpu_round_default);
2522 sim_fpu_div (&b, &y, &x);
2523 sim_fpu_to32 (&w1, &b);
2524 }
2525 bl = *rB;
2526 if (bl == 0xffffffff)
2527 sim_fpu_to32 (&w2, &sim_fpu_one);
2528 else {
2529 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2530 sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2531 sim_fpu_div (&b, &y, &x);
2532 sim_fpu_to32 (&w2, &b);
2533 }
2534 EV_SET_REG2(*rSh, *rS, w1, w2);
2535 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2536
2537 0.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
2538 unsigned32 w1, w2;
2539 sim_fpu b, x, y;
2540
2541 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2542 sim_fpu_i32to (&y, *rBh, sim_fpu_round_default);
2543 sim_fpu_div (&b, &y, &x);
2544 sim_fpu_to32 (&w1, &b);
2545
2546 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2547 sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2548 sim_fpu_div (&b, &y, &x);
2549 sim_fpu_to32 (&w2, &b);
2550
2551 EV_SET_REG2(*rSh, *rS, w1, w2);
2552 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2553
2554 0.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
2555 unsigned32 w1, w2;
2556 sim_fpu b;
2557
2558 sim_fpu_32to (&b, *rBh);
2559 sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2560 sim_fpu_32to (&b, *rB);
2561 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2562
2563 EV_SET_REG2(*rSh, *rS, w1, w2);
2564 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2565
2566 0.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
2567 signed32 w1, w2;
2568 sim_fpu b;
2569
2570 sim_fpu_32to (&b, *rBh);
2571 sim_fpu_to32i (&w1, &b, sim_fpu_round_default);
2572 sim_fpu_32to (&b, *rB);
2573 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2574
2575 EV_SET_REG2(*rSh, *rS, w1, w2);
2576 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2577
2578 0.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
2579 signed32 w1, w2;
2580 sim_fpu b;
2581
2582 sim_fpu_32to (&b, *rBh);
2583 sim_fpu_to32i (&w1, &b, sim_fpu_round_zero);
2584 sim_fpu_32to (&b, *rB);
2585 sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2586
2587 EV_SET_REG2(*rSh, *rS, w1, w2);
2588 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2589
2590 0.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
2591 unsigned32 w1, w2;
2592 sim_fpu b, x, y;
2593
2594 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2595 sim_fpu_32to (&y, *rBh);
2596 sim_fpu_mul (&b, &y, &x);
2597 sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2598
2599 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2600 sim_fpu_32to (&y, *rB);
2601 sim_fpu_mul (&b, &y, &x);
2602 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2603
2604 EV_SET_REG2(*rSh, *rS, w1, w2);
2605 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2606
2607 0.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
2608 signed32 w1, w2;
2609 sim_fpu b, x, y;
2610
2611 sim_fpu_32to (&y, *rBh);
2612 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2613 sim_fpu_mul (&b, &y, &x);
2614 sim_fpu_to32i (&w1, &b, sim_fpu_round_near);
2615
2616 sim_fpu_32to (&y, *rB);
2617 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2618 sim_fpu_mul (&b, &y, &x);
2619 sim_fpu_to32i (&w2, &b, sim_fpu_round_near);
2620
2621 EV_SET_REG2(*rSh, *rS, w1, w2);
2622 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2623
2624
2625 0.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
2626 unsigned32 w1, w2;
2627 w1 = *rSh;
2628 w2 = *rA & 0x7fffffff;
2629 EV_SET_REG2(*rSh, *rS, w1, w2);
2630 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2631
2632 0.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
2633 unsigned32 w1, w2;
2634 w1 = *rSh;
2635 w2 = *rA | 0x80000000;
2636 EV_SET_REG2(*rSh, *rS, w1, w2);
2637 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2638
2639 0.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
2640 unsigned32 w1, w2;
2641 w1 = *rSh;
2642 w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
2643 EV_SET_REG2(*rSh, *rS, w1, w2);
2644 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2645
2646 0.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
2647 unsigned32 w;
2648 w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2649 EV_SET_REG(*rS, w);
2650 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2651
2652 0.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
2653 unsigned32 w;
2654 w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2655 EV_SET_REG(*rS, w);
2656 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2657
2658 0.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
2659 unsigned32 w;
2660 w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2661 EV_SET_REG(*rS, w);
2662 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2663
2664 0.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
2665 unsigned32 w;
2666 w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2667 EV_SET_REG(*rS, w);
2668 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2669
2670 0.4,6.BF,9./,11.RA,16.RB,21.716:EVX:e500:efscmpgt %BF,%RA,%RB:Floating-Point Compare Greater Than
2671 sim_fpu a, b;
2672 int w, cl;
2673 sim_fpu_32to (&a, *rA);
2674 sim_fpu_32to (&b, *rB);
2675 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
2676 EV_SET_SPEFSCR_BITS(spefscr_finv);
2677 if (sim_fpu_is_gt(&a, &b))
2678 cl = 1;
2679 else
2680 cl = 0;
2681 w = cl << 2 | cl << 1;
2682 CR_SET(BF, w);
2683 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2684
2685 0.4,6.BF,9./,11.RA,16.RB,21.717:EVX:e500:efscmplt %BF,%RA,%RB:Floating-Point Compare Less Than
2686 sim_fpu al, bl;
2687 int w, cl;
2688 sim_fpu_32to (&al, *rA);
2689 sim_fpu_32to (&bl, *rB);
2690 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2691 EV_SET_SPEFSCR_BITS(spefscr_finv);
2692 if (sim_fpu_is_lt(&al, &bl))
2693 cl = 1;
2694 else
2695 cl = 0;
2696 w = cl << 2 | cl << 1;
2697 CR_SET(BF, w);
2698 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2699
2700 0.4,6.BF,9./,11.RA,16.RB,21.718:EVX:e500:efscmpeq %BF,%RA,%RB:Floating-Point Compare Equal
2701 sim_fpu al, bl;
2702 int w, cl;
2703 sim_fpu_32to (&al, *rA);
2704 sim_fpu_32to (&bl, *rB);
2705 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2706 EV_SET_SPEFSCR_BITS(spefscr_finv);
2707 if (sim_fpu_is_eq(&al, &bl))
2708 cl = 1;
2709 else
2710 cl = 0;
2711 w = cl << 2 | cl << 1;
2712 CR_SET(BF, w);
2713 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2714
2715 0.4,6.BF,9./,11.RA,16.RB,21.732:EVX:e500:efststgt %BF,%RA,%RB:Floating-Point Test Greater Than
2716 sim_fpu al, bl;
2717 int w, cl;
2718 sim_fpu_32to (&al, *rA);
2719 sim_fpu_32to (&bl, *rB);
2720 if (sim_fpu_is_gt(&al, &bl))
2721 cl = 1;
2722 else
2723 cl = 0;
2724 w = cl << 2 | cl << 1;
2725 CR_SET(BF, w);
2726 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2727
2728 0.4,6.BF,9./,11.RA,16.RB,21.733:EVX:e500:efststlt %BF,%RA,%RB:Floating-Point Test Less Than
2729 sim_fpu al, bl;
2730 int w, cl;
2731 sim_fpu_32to (&al, *rA);
2732 sim_fpu_32to (&bl, *rB);
2733 if (sim_fpu_is_lt(&al, &bl))
2734 cl = 1;
2735 else
2736 cl = 0;
2737 w = cl << 2 | cl << 1;
2738 CR_SET(BF, w);
2739 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2740
2741 0.4,6.BF,9./,11.RA,16.RB,21.734:EVX:e500:efststeq %BF,%RA,%RB:Floating-Point Test Equal
2742 sim_fpu al, bl;
2743 int w, cl;
2744 sim_fpu_32to (&al, *rA);
2745 sim_fpu_32to (&bl, *rB);
2746 if (sim_fpu_is_eq(&al, &bl))
2747 cl = 1;
2748 else
2749 cl = 0;
2750 w = cl << 2 | cl << 1;
2751 CR_SET(BF, w);
2752 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2753
2754 0.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
2755 signed32 f, w1, w2;
2756 sim_fpu b;
2757 w1 = *rSh;
2758 sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2759 sim_fpu_to32 (&w2, &b);
2760 EV_SET_REG2(*rSh, *rS, w1, w2);
2761 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2762
2763 0.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
2764 unsigned32 w1, w2;
2765 sim_fpu b;
2766 w1 = *rSh;
2767 sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2768 sim_fpu_to32 (&w2, &b);
2769 EV_SET_REG2(*rSh, *rS, w1, w2);
2770 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2771
2772 0.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
2773 unsigned32 w1, w2;
2774 sim_fpu b, x, y;
2775 w1 = *rSh;
2776 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2777 sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2778 sim_fpu_div (&b, &y, &x);
2779 sim_fpu_to32 (&w2, &b);
2780 EV_SET_REG2(*rSh, *rS, w1, w2);
2781 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2782
2783 0.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
2784 unsigned32 w1, w2, bl;
2785 sim_fpu b, x, y;
2786 w1 = *rSh;
2787 bl = *rB;
2788 if (bl == 0xffffffff)
2789 sim_fpu_to32 (&w2, &sim_fpu_one);
2790 else {
2791 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2792 sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2793 sim_fpu_div (&b, &y, &x);
2794 sim_fpu_to32 (&w2, &b);
2795 }
2796 EV_SET_REG2(*rSh, *rS, w1, w2);
2797 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2798
2799 0.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
2800 signed64 temp;
2801 signed32 w1, w2;
2802 sim_fpu b;
2803 w1 = *rSh;
2804 sim_fpu_32to (&b, *rB);
2805 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2806 EV_SET_REG2(*rSh, *rS, w1, w2);
2807 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2808
2809 0.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
2810 signed64 temp;
2811 signed32 w1, w2;
2812 sim_fpu b;
2813 w1 = *rSh;
2814 sim_fpu_32to (&b, *rB);
2815 sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2816 EV_SET_REG2(*rSh, *rS, w1, w2);
2817 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2818
2819 0.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
2820 unsigned64 temp;
2821 signed32 w1, w2;
2822 sim_fpu b;
2823 w1 = *rSh;
2824 sim_fpu_32to (&b, *rB);
2825 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2826 EV_SET_REG2(*rSh, *rS, w1, w2);
2827 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2828
2829 0.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
2830 unsigned64 temp;
2831 signed32 w1, w2;
2832 sim_fpu b;
2833 w1 = *rSh;
2834 sim_fpu_32to (&b, *rB);
2835 sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2836 EV_SET_REG2(*rSh, *rS, w1, w2);
2837 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2838
2839 0.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
2840 unsigned32 w1, w2;
2841 sim_fpu b, x, y;
2842 w1 = *rSh;
2843 sim_fpu_32to (&y, *rB);
2844 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2845 sim_fpu_mul (&b, &y, &x);
2846 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2847 sim_fpu_to32 (&w2, &b);
2848 EV_SET_REG2(*rSh, *rS, w1, w2);
2849 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2850
2851 0.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
2852 unsigned32 w1, w2;
2853 sim_fpu b, x, y;
2854 w1 = *rSh;
2855 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2856 sim_fpu_32to (&y, *rB);
2857 sim_fpu_mul (&b, &y, &x);
2858 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2859 EV_SET_REG2(*rSh, *rS, w1, w2);
2860 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2861
2862
2863 #
2864 # A.2.10 Vector Load/Store Instructions
2865 #
2866
2867 0.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
2868 unsigned64 m;
2869 unsigned_word b;
2870 unsigned_word EA;
2871 if (RA_is_0) b = 0;
2872 else b = *rA;
2873 EA = b + (UIMM << 3);
2874 m = MEM(unsigned, EA, 8);
2875 EV_SET_REG1(*rSh, *rS, m);
2876 //printf("evldd(%d<-%d + %u): m %08x.%08x, *rSh %x *rS %x\n", RS, RA, UIMM, (int)(m >> 32), (int)m, *rSh, *rS);
2877 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2878
2879 0.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
2880 unsigned64 m;
2881 unsigned_word b;
2882 unsigned_word EA;
2883 if (RA_is_0) b = 0;
2884 else b = *rA;
2885 EA = b + *rB;
2886 m = MEM(unsigned, EA, 8);
2887 EV_SET_REG1(*rSh, *rS, m);
2888 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2889
2890 0.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
2891 unsigned_word b;
2892 unsigned_word EA;
2893 unsigned32 w1, w2;
2894 if (RA_is_0) b = 0;
2895 else b = *rA;
2896 EA = b + (UIMM << 3);
2897 w1 = MEM(unsigned, EA, 4);
2898 w2 = MEM(unsigned, EA + 4, 4);
2899 EV_SET_REG2(*rSh, *rS, w1, w2);
2900 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2901
2902 0.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
2903 unsigned_word b;
2904 unsigned_word EA;
2905 unsigned32 w1, w2;
2906 if (RA_is_0) b = 0;
2907 else b = *rA;
2908 EA = b + *rB;
2909 w1 = MEM(unsigned, EA, 4);
2910 w2 = MEM(unsigned, EA + 4, 4);
2911 EV_SET_REG2(*rSh, *rS, w1, w2);
2912 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2913
2914 0.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
2915 unsigned_word b;
2916 unsigned_word EA;
2917 unsigned16 h1, h2, h3, h4;
2918 if (RA_is_0) b = 0;
2919 else b = *rA;
2920 EA = b + (UIMM << 3);
2921 h1 = MEM(unsigned, EA, 2);
2922 h2 = MEM(unsigned, EA + 2, 2);
2923 h3 = MEM(unsigned, EA + 4, 2);
2924 h4 = MEM(unsigned, EA + 6, 2);
2925 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2926 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2927
2928 0.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
2929 unsigned_word b;
2930 unsigned_word EA;
2931 unsigned16 h1, h2, h3, h4;
2932 if (RA_is_0) b = 0;
2933 else b = *rA;
2934 EA = b + *rB;
2935 h1 = MEM(unsigned, EA, 2);
2936 h2 = MEM(unsigned, EA + 2, 2);
2937 h3 = MEM(unsigned, EA + 4, 2);
2938 h4 = MEM(unsigned, EA + 6, 2);
2939 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2940 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2941
2942 0.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
2943 unsigned_word b;
2944 unsigned_word EA;
2945 unsigned16 h1, h2, h3, h4;
2946 if (RA_is_0) b = 0;
2947 else b = *rA;
2948 EA = b + (UIMM << 2);
2949 h1 = MEM(unsigned, EA, 2);
2950 h2 = 0;
2951 h3 = MEM(unsigned, EA + 2, 2);
2952 h4 = 0;
2953 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2954 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2955
2956 0.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
2957 unsigned_word b;
2958 unsigned_word EA;
2959 unsigned16 h1, h2, h3, h4;
2960 if (RA_is_0) b = 0;
2961 else b = *rA;
2962 EA = b + *rB;
2963 h1 = MEM(unsigned, EA, 2);
2964 h2 = 0;
2965 h3 = MEM(unsigned, EA + 2, 2);
2966 h4 = 0;
2967 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2968 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2969
2970 0.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
2971 unsigned_word b;
2972 unsigned_word EA;
2973 unsigned16 h1, h2, h3, h4;
2974 if (RA_is_0) b = 0;
2975 else b = *rA;
2976 EA = b + (UIMM << 2);
2977 h1 = 0;
2978 h2 = MEM(unsigned, EA, 2);
2979 h3 = 0;
2980 h4 = MEM(unsigned, EA + 2, 2);
2981 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2982 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2983
2984 0.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
2985 unsigned_word b;
2986 unsigned_word EA;
2987 unsigned16 h1, h2, h3, h4;
2988 if (RA_is_0) b = 0;
2989 else b = *rA;
2990 EA = b + *rB;
2991 h1 = 0;
2992 h2 = MEM(unsigned, EA, 2);
2993 h3 = 0;
2994 h4 = MEM(unsigned, EA + 2, 2);
2995 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2996 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2997
2998 0.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
2999 unsigned_word b;
3000 unsigned_word EA;
3001 unsigned16 h1, h2, h3, h4;
3002 if (RA_is_0) b = 0;
3003 else b = *rA;
3004 EA = b + (UIMM << 2);
3005 h2 = MEM(unsigned, EA, 2);
3006 if (h2 & 0x8000)
3007 h1 = 0xffff;
3008 else
3009 h1 = 0;
3010 h4 = MEM(unsigned, EA + 2, 2);
3011 if (h4 & 0x8000)
3012 h3 = 0xffff;
3013 else
3014 h3 = 0;
3015 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3016 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3017
3018 0.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
3019 unsigned_word b;
3020 unsigned_word EA;
3021 unsigned16 h1, h2, h3, h4;
3022 if (RA_is_0) b = 0;
3023 else b = *rA;
3024 EA = b + *rB;
3025 h2 = MEM(unsigned, EA, 2);
3026 if (h2 & 0x8000)
3027 h1 = 0xffff;
3028 else
3029 h1 = 0;
3030 h4 = MEM(unsigned, EA + 2, 2);
3031 if (h4 & 0x8000)
3032 h3 = 0xffff;
3033 else
3034 h3 = 0;
3035 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3036 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3037
3038 0.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
3039 unsigned_word b;
3040 unsigned_word EA;
3041 unsigned32 w1;
3042 if (RA_is_0) b = 0;
3043 else b = *rA;
3044 EA = b + (UIMM << 2);
3045 w1 = MEM(unsigned, EA, 4);
3046 EV_SET_REG2(*rSh, *rS, w1, w1);
3047 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3048
3049 0.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
3050 unsigned_word b;
3051 unsigned_word EA;
3052 unsigned32 w1;
3053 if (RA_is_0) b = 0;
3054 else b = *rA;
3055 EA = b + *rB;
3056 w1 = MEM(unsigned, EA, 4);
3057 EV_SET_REG2(*rSh, *rS, w1, w1);
3058 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3059
3060 0.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
3061 unsigned_word b;
3062 unsigned_word EA;
3063 unsigned16 h1, h2;
3064 if (RA_is_0) b = 0;
3065 else b = *rA;
3066 EA = b + (UIMM << 2);
3067 h1 = MEM(unsigned, EA, 2);
3068 h2 = MEM(unsigned, EA + 2, 2);
3069 EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3070 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3071
3072 0.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
3073 unsigned_word b;
3074 unsigned_word EA;
3075 unsigned16 h1, h2;
3076 if (RA_is_0) b = 0;
3077 else b = *rA;
3078 EA = b + *rB;
3079 h1 = MEM(unsigned, EA, 2);
3080 h2 = MEM(unsigned, EA + 2, 2);
3081 EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3082 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3083
3084 0.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
3085 unsigned_word b;
3086 unsigned_word EA;
3087 unsigned16 h;
3088 if (RA_is_0) b = 0;
3089 else b = *rA;
3090 EA = b + (UIMM << 1);
3091 h = MEM(unsigned, EA, 2);
3092 EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3093 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3094
3095 0.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
3096 unsigned_word b;
3097 unsigned_word EA;
3098 unsigned16 h;
3099 if (RA_is_0) b = 0;
3100 else b = *rA;
3101 EA = b + *rB;
3102 h = MEM(unsigned, EA, 2);
3103 EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3104 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3105
3106 0.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
3107 unsigned_word b;
3108 unsigned_word EA;
3109 unsigned16 h;
3110 if (RA_is_0) b = 0;
3111 else b = *rA;
3112 EA = b + (UIMM << 1);
3113 h = MEM(unsigned, EA, 2);
3114 EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3115 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3116
3117 0.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
3118 unsigned_word b;
3119 unsigned_word EA;
3120 unsigned16 h;
3121 if (RA_is_0) b = 0;
3122 else b = *rA;
3123 EA = b + *rB;
3124 h = MEM(unsigned, EA, 2);
3125 EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3126 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3127
3128 0.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
3129 unsigned_word b;
3130 unsigned_word EA;
3131 unsigned16 h1, h2;
3132 if (RA_is_0) b = 0;
3133 else b = *rA;
3134 EA = b + (UIMM << 1);
3135 h2 = MEM(unsigned, EA, 2);
3136 if (h2 & 0x8000)
3137 h1 = 0xffff;
3138 else
3139 h1 = 0;
3140 EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3141 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3142
3143 0.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
3144 unsigned_word b;
3145 unsigned_word EA;
3146 unsigned16 h1, h2;
3147 if (RA_is_0) b = 0;
3148 else b = *rA;
3149 EA = b + *rB;
3150 h2 = MEM(unsigned, EA, 2);
3151 if (h2 & 0x8000)
3152 h1 = 0xffff;
3153 else
3154 h1 = 0;
3155 EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3156 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3157
3158
3159 0.4,6.RS,11.RA,16.UIMM,21.801:EVX:e500:evstdd %RS,%RA,%UIMM:Vector Store Double of Double
3160 unsigned_word b;
3161 unsigned_word EA;
3162 if (RA_is_0) b = 0;
3163 else b = *rA;
3164 EA = b + (UIMM << 3);
3165 STORE(EA, 4, (*rSh));
3166 STORE(EA + 4, 4, (*rS));
3167 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3168
3169 0.4,6.RS,11.RA,16.RB,21.800:EVX:e500:evstddx %RS,%RA,%RB:Vector Store Double of Double Indexed
3170 unsigned_word b;
3171 unsigned_word EA;
3172 if (RA_is_0) b = 0;
3173 else b = *rA;
3174 EA = b + *rB;
3175 STORE(EA, 4, (*rSh));
3176 STORE(EA + 4, 4, (*rS));
3177 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3178
3179 0.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
3180 unsigned_word b;
3181 unsigned_word EA;
3182 unsigned32 w1, w2;
3183 if (RA_is_0) b = 0;
3184 else b = *rA;
3185 EA = b + (UIMM << 3);
3186 w1 = *rSh;
3187 w2 = *rS;
3188 STORE(EA + 0, 4, w1);
3189 STORE(EA + 4, 4, w2);
3190 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3191
3192 0.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
3193 unsigned_word b;
3194 unsigned_word EA;
3195 unsigned32 w1, w2;
3196 if (RA_is_0) b = 0;
3197 else b = *rA;
3198 EA = b + *rB;
3199 w1 = *rSh;
3200 w2 = *rS;
3201 STORE(EA + 0, 4, w1);
3202 STORE(EA + 4, 4, w2);
3203 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3204
3205 0.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
3206 unsigned_word b;
3207 unsigned_word EA;
3208 unsigned16 h1, h2, h3, h4;
3209 if (RA_is_0) b = 0;
3210 else b = *rA;
3211 EA = b + (UIMM << 3);
3212 h1 = EV_HIHALF(*rSh);
3213 h2 = EV_LOHALF(*rSh);
3214 h3 = EV_HIHALF(*rS);
3215 h4 = EV_LOHALF(*rS);
3216 STORE(EA + 0, 2, h1);
3217 STORE(EA + 2, 2, h2);
3218 STORE(EA + 4, 2, h3);
3219 STORE(EA + 6, 2, h4);
3220 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3221
3222 0.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
3223 unsigned_word b;
3224 unsigned_word EA;
3225 unsigned16 h1, h2, h3, h4;
3226 if (RA_is_0) b = 0;
3227 else b = *rA;
3228 EA = b + *rB;
3229 h1 = EV_HIHALF(*rSh);
3230 h2 = EV_LOHALF(*rSh);
3231 h3 = EV_HIHALF(*rS);
3232 h4 = EV_LOHALF(*rS);
3233 STORE(EA + 0, 2, h1);
3234 STORE(EA + 2, 2, h2);
3235 STORE(EA + 4, 2, h3);
3236 STORE(EA + 6, 2, h4);
3237 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3238
3239 0.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
3240 unsigned_word b;
3241 unsigned_word EA;
3242 unsigned32 w;
3243 if (RA_is_0) b = 0;
3244 else b = *rA;
3245 EA = b + (UIMM << 3);
3246 w = *rSh;
3247 STORE(EA, 4, w);
3248 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3249
3250 0.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
3251 unsigned_word b;
3252 unsigned_word EA;
3253 unsigned32 w;
3254 if (RA_is_0) b = 0;
3255 else b = *rA;
3256 EA = b + *rB;
3257 w = *rSh;
3258 STORE(EA, 4, w);
3259 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3260
3261 0.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
3262 unsigned_word b;
3263 unsigned_word EA;
3264 unsigned32 w;
3265 if (RA_is_0) b = 0;
3266 else b = *rA;
3267 EA = b + (UIMM << 3);
3268 w = *rS;
3269 STORE(EA, 4, w);
3270 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3271
3272 0.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
3273 unsigned_word b;
3274 unsigned_word EA;
3275 unsigned32 w;
3276 if (RA_is_0) b = 0;
3277 else b = *rA;
3278 EA = b + *rB;
3279 w = *rS;
3280 STORE(EA, 4, w);
3281 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3282
3283 0.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
3284 unsigned_word b;
3285 unsigned_word EA;
3286 unsigned16 h1, h2;
3287 if (RA_is_0) b = 0;
3288 else b = *rA;
3289 EA = b + (UIMM << 3);
3290 h1 = EV_HIHALF(*rSh);
3291 h2 = EV_HIHALF(*rS);
3292 STORE(EA + 0, 2, h1);
3293 STORE(EA + 2, 2, h2);
3294 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3295
3296 0.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
3297 unsigned_word b;
3298 unsigned_word EA;
3299 unsigned16 h1, h2;
3300 if (RA_is_0) b = 0;
3301 else b = *rA;
3302 EA = b + *rB;
3303 h1 = EV_HIHALF(*rSh);
3304 h2 = EV_HIHALF(*rS);
3305 STORE(EA + 0, 2, h1);
3306 STORE(EA + 2, 2, h2);
3307 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3308
3309 0.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
3310 unsigned_word b;
3311 unsigned_word EA;
3312 unsigned16 h1, h2;
3313 if (RA_is_0) b = 0;
3314 else b = *rA;
3315 EA = b + (UIMM << 3);
3316 h1 = EV_LOHALF(*rSh);
3317 h2 = EV_LOHALF(*rS);
3318 STORE(EA + 0, 2, h1);
3319 STORE(EA + 2, 2, h2);
3320 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3321
3322 0.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
3323 unsigned_word b;
3324 unsigned_word EA;
3325 unsigned16 h1, h2;
3326 if (RA_is_0) b = 0;
3327 else b = *rA;
3328 EA = b + *rB;
3329 h1 = EV_LOHALF(*rSh);
3330 h2 = EV_LOHALF(*rS);
3331 STORE(EA + 0, 2, h1);
3332 STORE(EA + 2, 2, h2);
3333 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3334
3335
3336 #
3337 # 4.5.1 Integer Select Instruction
3338 #
3339
3340 0.31,6.RS,11.RA,16.RB,21.CRB,26.30:X:e500:isel %RS,%RA,%RB,%CRB:Integer Select
3341 if (CR & (1 << (31 - (unsigned)CRB)))
3342 if (RA_is_0)
3343 EV_SET_REG1(*rSh, *rS, 0);
3344 else
3345 EV_SET_REG2(*rSh, *rS, *rAh, *rA);
3346 else
3347 EV_SET_REG2(*rSh, *rS, *rBh, *rB);
3348 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
This page took 0.093281 seconds and 5 git commands to generate.