Commit | Line | Data |
---|---|---|
2e8cf49e NC |
1 | /* simulator.c -- Interface for the AArch64 simulator. |
2 | ||
618f726f | 3 | Copyright (C) 2015-2016 Free Software Foundation, Inc. |
2e8cf49e NC |
4 | |
5 | Contributed by Red Hat. | |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 3 of the License, or | |
12 | (at your option) any later version. | |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include <stdlib.h> | |
24 | #include <stdio.h> | |
25 | #include <string.h> | |
26 | #include <sys/types.h> | |
27 | #include <syscall.h> | |
28 | #include <math.h> | |
29 | #include <time.h> | |
30 | #include <limits.h> | |
31 | ||
32 | #include "dis-asm.h" | |
33 | ||
34 | #include "simulator.h" | |
35 | #include "cpustate.h" | |
36 | #include "memory.h" | |
37 | ||
38 | #define NO_SP 0 | |
39 | #define SP_OK 1 | |
40 | ||
41 | bfd_boolean disas = FALSE; | |
42 | ||
43 | #define TST(_flag) (aarch64_test_CPSR_bit (cpu, _flag)) | |
44 | #define IS_SET(_X) ( TST (( _X ))) | |
45 | #define IS_CLEAR(_X) (!TST (( _X ))) | |
46 | ||
47 | #define HALT_UNALLOC \ | |
48 | do \ | |
49 | { \ | |
50 | if (TRACE_INSN_P (cpu)) \ | |
51 | { \ | |
52 | aarch64_print_insn (CPU_STATE (cpu), aarch64_get_PC (cpu)); \ | |
53 | TRACE_INSN (cpu, \ | |
54 | "Unallocated instruction detected at sim line %d,"\ | |
55 | " exe addr %" PRIx64, \ | |
56 | __LINE__, aarch64_get_PC (cpu)); \ | |
57 | } \ | |
58 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\ | |
59 | sim_stopped, SIM_SIGILL); \ | |
60 | } \ | |
61 | while (0) | |
62 | ||
63 | #define HALT_NYI \ | |
64 | do \ | |
65 | { \ | |
66 | if (TRACE_INSN_P (cpu)) \ | |
67 | { \ | |
68 | aarch64_print_insn (CPU_STATE (cpu), aarch64_get_PC (cpu)); \ | |
69 | TRACE_INSN (cpu, \ | |
70 | "Unimplemented instruction detected at sim line %d,"\ | |
71 | " exe addr %" PRIx64, \ | |
72 | __LINE__, aarch64_get_PC (cpu)); \ | |
73 | } \ | |
74 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\ | |
75 | sim_stopped, SIM_SIGABRT); \ | |
76 | } \ | |
77 | while (0) | |
78 | ||
79 | #define NYI_assert(HI, LO, EXPECTED) \ | |
80 | do \ | |
81 | { \ | |
82 | if (uimm (aarch64_get_instr (cpu), (HI), (LO)) != (EXPECTED)) \ | |
83 | HALT_NYI; \ | |
84 | } \ | |
85 | while (0) | |
86 | ||
87 | #define HALT_UNREACHABLE \ | |
88 | do \ | |
89 | { \ | |
90 | TRACE_EVENTS (cpu, "ISE: unreachable code point"); \ | |
91 | sim_engine_abort (NULL, cpu, aarch64_get_PC (cpu), "Internal Error"); \ | |
92 | } \ | |
93 | while (0) | |
94 | ||
95 | /* Helper functions used by expandLogicalImmediate. */ | |
96 | ||
97 | /* for i = 1, ... N result<i-1> = 1 other bits are zero */ | |
98 | static inline uint64_t | |
99 | ones (int N) | |
100 | { | |
101 | return (N == 64 ? (uint64_t)-1UL : ((1UL << N) - 1)); | |
102 | } | |
103 | ||
104 | /* result<0> to val<N> */ | |
105 | static inline uint64_t | |
106 | pickbit (uint64_t val, int N) | |
107 | { | |
108 | return pickbits64 (val, N, N); | |
109 | } | |
110 | ||
111 | static uint64_t | |
112 | expand_logical_immediate (uint32_t S, uint32_t R, uint32_t N) | |
113 | { | |
114 | uint64_t mask; | |
115 | uint64_t imm; | |
116 | unsigned simd_size; | |
117 | ||
118 | /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R | |
119 | (in other words, right rotated by R), then replicated. */ | |
120 | if (N != 0) | |
121 | { | |
122 | simd_size = 64; | |
123 | mask = 0xffffffffffffffffull; | |
124 | } | |
125 | else | |
126 | { | |
127 | switch (S) | |
128 | { | |
129 | case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break; | |
130 | case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break; | |
131 | case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break; | |
132 | case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break; | |
133 | case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break; | |
134 | default: return 0; | |
135 | } | |
136 | mask = (1ull << simd_size) - 1; | |
137 | /* Top bits are IGNORED. */ | |
138 | R &= simd_size - 1; | |
139 | } | |
140 | ||
141 | /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */ | |
142 | if (S == simd_size - 1) | |
143 | return 0; | |
144 | ||
145 | /* S+1 consecutive bits to 1. */ | |
146 | /* NOTE: S can't be 63 due to detection above. */ | |
147 | imm = (1ull << (S + 1)) - 1; | |
148 | ||
149 | /* Rotate to the left by simd_size - R. */ | |
150 | if (R != 0) | |
151 | imm = ((imm << (simd_size - R)) & mask) | (imm >> R); | |
152 | ||
153 | /* Replicate the value according to SIMD size. */ | |
154 | switch (simd_size) | |
155 | { | |
156 | case 2: imm = (imm << 2) | imm; | |
157 | case 4: imm = (imm << 4) | imm; | |
158 | case 8: imm = (imm << 8) | imm; | |
159 | case 16: imm = (imm << 16) | imm; | |
160 | case 32: imm = (imm << 32) | imm; | |
161 | case 64: break; | |
162 | default: return 0; | |
163 | } | |
164 | ||
165 | return imm; | |
166 | } | |
167 | ||
168 | /* Instr[22,10] encodes N immr and imms. we want a lookup table | |
169 | for each possible combination i.e. 13 bits worth of int entries. */ | |
170 | #define LI_TABLE_SIZE (1 << 13) | |
171 | static uint64_t LITable[LI_TABLE_SIZE]; | |
172 | ||
173 | void | |
174 | aarch64_init_LIT_table (void) | |
175 | { | |
176 | unsigned index; | |
177 | ||
178 | for (index = 0; index < LI_TABLE_SIZE; index++) | |
179 | { | |
180 | uint32_t N = uimm (index, 12, 12); | |
181 | uint32_t immr = uimm (index, 11, 6); | |
182 | uint32_t imms = uimm (index, 5, 0); | |
183 | ||
184 | LITable [index] = expand_logical_immediate (imms, immr, N); | |
185 | } | |
186 | } | |
187 | ||
188 | static void | |
189 | dexNotify (sim_cpu *cpu) | |
190 | { | |
191 | /* instr[14,0] == type : 0 ==> method entry, 1 ==> method reentry | |
192 | 2 ==> exit Java, 3 ==> start next bytecode. */ | |
193 | uint32_t type = uimm (aarch64_get_instr (cpu), 14, 0); | |
194 | ||
195 | TRACE_EVENTS (cpu, "Notify Insn encountered, type = 0x%x", type); | |
196 | ||
197 | switch (type) | |
198 | { | |
199 | case 0: | |
200 | /* aarch64_notifyMethodEntry (aarch64_get_reg_u64 (cpu, R23, 0), | |
201 | aarch64_get_reg_u64 (cpu, R22, 0)); */ | |
202 | break; | |
203 | case 1: | |
204 | /* aarch64_notifyMethodReentry (aarch64_get_reg_u64 (cpu, R23, 0), | |
205 | aarch64_get_reg_u64 (cpu, R22, 0)); */ | |
206 | break; | |
207 | case 2: | |
208 | /* aarch64_notifyMethodExit (); */ | |
209 | break; | |
210 | case 3: | |
211 | /* aarch64_notifyBCStart (aarch64_get_reg_u64 (cpu, R23, 0), | |
212 | aarch64_get_reg_u64 (cpu, R22, 0)); */ | |
213 | break; | |
214 | } | |
215 | } | |
216 | ||
217 | /* secondary decode within top level groups */ | |
218 | ||
219 | static void | |
220 | dexPseudo (sim_cpu *cpu) | |
221 | { | |
222 | /* assert instr[28,27] = 00 | |
223 | ||
224 | We provide 2 pseudo instructions: | |
225 | ||
226 | HALT stops execution of the simulator causing an immediate | |
227 | return to the x86 code which entered it. | |
228 | ||
229 | CALLOUT initiates recursive entry into x86 code. A register | |
230 | argument holds the address of the x86 routine. Immediate | |
231 | values in the instruction identify the number of general | |
232 | purpose and floating point register arguments to be passed | |
233 | and the type of any value to be returned. */ | |
234 | ||
235 | uint32_t PSEUDO_HALT = 0xE0000000U; | |
236 | uint32_t PSEUDO_CALLOUT = 0x00018000U; | |
237 | uint32_t PSEUDO_CALLOUTR = 0x00018001U; | |
238 | uint32_t PSEUDO_NOTIFY = 0x00014000U; | |
239 | uint32_t dispatch; | |
240 | ||
241 | if (aarch64_get_instr (cpu) == PSEUDO_HALT) | |
242 | { | |
243 | TRACE_EVENTS (cpu, " Pseudo Halt Instruction"); | |
244 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
245 | sim_stopped, SIM_SIGTRAP); | |
246 | } | |
247 | ||
248 | dispatch = uimm (aarch64_get_instr (cpu), 31, 15); | |
249 | ||
250 | /* We do not handle callouts at the moment. */ | |
251 | if (dispatch == PSEUDO_CALLOUT || dispatch == PSEUDO_CALLOUTR) | |
252 | { | |
253 | TRACE_EVENTS (cpu, " Callout"); | |
254 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
255 | sim_stopped, SIM_SIGABRT); | |
256 | } | |
257 | ||
258 | else if (dispatch == PSEUDO_NOTIFY) | |
259 | dexNotify (cpu); | |
260 | ||
261 | else | |
262 | HALT_UNALLOC; | |
263 | } | |
264 | ||
265 | /* Load-store single register (unscaled offset) | |
266 | These instructions employ a base register plus an unscaled signed | |
267 | 9 bit offset. | |
268 | ||
269 | N.B. the base register (source) can be Xn or SP. all other | |
270 | registers may not be SP. */ | |
271 | ||
272 | /* 32 bit load 32 bit unscaled signed 9 bit. */ | |
273 | static void | |
274 | ldur32 (sim_cpu *cpu, int32_t offset) | |
275 | { | |
276 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
277 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
278 | ||
279 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 | |
280 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
281 | + offset)); | |
282 | } | |
283 | ||
284 | /* 64 bit load 64 bit unscaled signed 9 bit. */ | |
285 | static void | |
286 | ldur64 (sim_cpu *cpu, int32_t offset) | |
287 | { | |
288 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
289 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
290 | ||
291 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 | |
292 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
293 | + offset)); | |
294 | } | |
295 | ||
296 | /* 32 bit load zero-extended byte unscaled signed 9 bit. */ | |
297 | static void | |
298 | ldurb32 (sim_cpu *cpu, int32_t offset) | |
299 | { | |
300 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
301 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
302 | ||
303 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 | |
304 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
305 | + offset)); | |
306 | } | |
307 | ||
308 | /* 32 bit load sign-extended byte unscaled signed 9 bit. */ | |
309 | static void | |
310 | ldursb32 (sim_cpu *cpu, int32_t offset) | |
311 | { | |
312 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
313 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
314 | ||
315 | aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s8 | |
316 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
317 | + offset)); | |
318 | } | |
319 | ||
320 | /* 64 bit load sign-extended byte unscaled signed 9 bit. */ | |
321 | static void | |
322 | ldursb64 (sim_cpu *cpu, int32_t offset) | |
323 | { | |
324 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
325 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
326 | ||
327 | aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s8 | |
328 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
329 | + offset)); | |
330 | } | |
331 | ||
332 | /* 32 bit load zero-extended short unscaled signed 9 bit */ | |
333 | static void | |
334 | ldurh32 (sim_cpu *cpu, int32_t offset) | |
335 | { | |
336 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
337 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
338 | ||
339 | aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_mem_u16 | |
340 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
341 | + offset)); | |
342 | } | |
343 | ||
344 | /* 32 bit load sign-extended short unscaled signed 9 bit */ | |
345 | static void | |
346 | ldursh32 (sim_cpu *cpu, int32_t offset) | |
347 | { | |
348 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
349 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
350 | ||
351 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s16 | |
352 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
353 | + offset)); | |
354 | } | |
355 | ||
356 | /* 64 bit load sign-extended short unscaled signed 9 bit */ | |
357 | static void | |
358 | ldursh64 (sim_cpu *cpu, int32_t offset) | |
359 | { | |
360 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
361 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
362 | ||
363 | aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s16 | |
364 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
365 | + offset)); | |
366 | } | |
367 | ||
368 | /* 64 bit load sign-extended word unscaled signed 9 bit */ | |
369 | static void | |
370 | ldursw (sim_cpu *cpu, int32_t offset) | |
371 | { | |
372 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
373 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
374 | ||
375 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s32 | |
376 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
377 | + offset)); | |
378 | } | |
379 | ||
380 | /* N.B. with stores the value in source is written to the address | |
381 | identified by source2 modified by offset. */ | |
382 | ||
383 | /* 32 bit store 32 bit unscaled signed 9 bit. */ | |
384 | static void | |
385 | stur32 (sim_cpu *cpu, int32_t offset) | |
386 | { | |
387 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
388 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
389 | ||
390 | aarch64_set_mem_u32 (cpu, | |
391 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
392 | aarch64_get_reg_u32 (cpu, rd, NO_SP)); | |
393 | } | |
394 | ||
395 | /* 64 bit store 64 bit unscaled signed 9 bit */ | |
396 | static void | |
397 | stur64 (sim_cpu *cpu, int32_t offset) | |
398 | { | |
399 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
400 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
401 | ||
402 | aarch64_set_mem_u64 (cpu, | |
403 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
404 | aarch64_get_reg_u64 (cpu, rd, NO_SP)); | |
405 | } | |
406 | ||
407 | /* 32 bit store byte unscaled signed 9 bit */ | |
408 | static void | |
409 | sturb (sim_cpu *cpu, int32_t offset) | |
410 | { | |
411 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
412 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
413 | ||
414 | aarch64_set_mem_u8 (cpu, | |
415 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
416 | aarch64_get_reg_u8 (cpu, rd, NO_SP)); | |
417 | } | |
418 | ||
419 | /* 32 bit store short unscaled signed 9 bit */ | |
420 | static void | |
421 | sturh (sim_cpu *cpu, int32_t offset) | |
422 | { | |
423 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
424 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
425 | ||
426 | aarch64_set_mem_u16 (cpu, | |
427 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
428 | aarch64_get_reg_u16 (cpu, rd, NO_SP)); | |
429 | } | |
430 | ||
431 | /* Load single register pc-relative label | |
432 | Offset is a signed 19 bit immediate count in words | |
433 | rt may not be SP. */ | |
434 | ||
435 | /* 32 bit pc-relative load */ | |
436 | static void | |
437 | ldr32_pcrel (sim_cpu *cpu, int32_t offset) | |
438 | { | |
439 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
440 | ||
441 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
442 | aarch64_get_mem_u32 | |
443 | (cpu, aarch64_get_PC (cpu) + offset * 4)); | |
444 | } | |
445 | ||
446 | /* 64 bit pc-relative load */ | |
447 | static void | |
448 | ldr_pcrel (sim_cpu *cpu, int32_t offset) | |
449 | { | |
450 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
451 | ||
452 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
453 | aarch64_get_mem_u64 | |
454 | (cpu, aarch64_get_PC (cpu) + offset * 4)); | |
455 | } | |
456 | ||
457 | /* sign extended 32 bit pc-relative load */ | |
458 | static void | |
459 | ldrsw_pcrel (sim_cpu *cpu, int32_t offset) | |
460 | { | |
461 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
462 | ||
463 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
464 | aarch64_get_mem_s32 | |
465 | (cpu, aarch64_get_PC (cpu) + offset * 4)); | |
466 | } | |
467 | ||
468 | /* float pc-relative load */ | |
469 | static void | |
470 | fldrs_pcrel (sim_cpu *cpu, int32_t offset) | |
471 | { | |
472 | unsigned int rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
473 | ||
474 | aarch64_set_FP_float (cpu, rd, | |
475 | aarch64_get_mem_float | |
476 | (cpu, aarch64_get_PC (cpu) + offset * 4)); | |
477 | } | |
478 | ||
479 | /* double pc-relative load */ | |
480 | static void | |
481 | fldrd_pcrel (sim_cpu *cpu, int32_t offset) | |
482 | { | |
483 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
484 | ||
485 | aarch64_set_FP_double (cpu, st, | |
486 | aarch64_get_mem_double | |
487 | (cpu, aarch64_get_PC (cpu) + offset * 4)); | |
488 | } | |
489 | ||
490 | /* long double pc-relative load. */ | |
491 | static void | |
492 | fldrq_pcrel (sim_cpu *cpu, int32_t offset) | |
493 | { | |
494 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
495 | uint64_t addr = aarch64_get_PC (cpu) + offset * 4; | |
496 | FRegister a; | |
497 | ||
498 | aarch64_get_mem_long_double (cpu, addr, & a); | |
499 | aarch64_set_FP_long_double (cpu, st, a); | |
500 | } | |
501 | ||
502 | /* This can be used to scale an offset by applying | |
503 | the requisite shift. the second argument is either | |
504 | 16, 32 or 64. */ | |
505 | ||
506 | #define SCALE(_offset, _elementSize) \ | |
507 | ((_offset) << ScaleShift ## _elementSize) | |
508 | ||
509 | /* This can be used to optionally scale a register derived offset | |
510 | by applying the requisite shift as indicated by the Scaling | |
511 | argument. the second argument is either Byte, Short, Word | |
512 | or Long. The third argument is either Scaled or Unscaled. | |
513 | N.B. when _Scaling is Scaled the shift gets ANDed with | |
514 | all 1s while when it is Unscaled it gets ANDed with 0. */ | |
515 | ||
516 | #define OPT_SCALE(_offset, _elementType, _Scaling) \ | |
517 | ((_offset) << (_Scaling ? ScaleShift ## _elementType : 0)) | |
518 | ||
519 | /* This can be used to zero or sign extend a 32 bit register derived | |
520 | value to a 64 bit value. the first argument must be the value as | |
521 | a uint32_t and the second must be either UXTW or SXTW. The result | |
522 | is returned as an int64_t. */ | |
523 | ||
524 | static inline int64_t | |
525 | extend (uint32_t value, Extension extension) | |
526 | { | |
527 | union | |
528 | { | |
529 | uint32_t u; | |
530 | int32_t n; | |
531 | } x; | |
532 | ||
533 | /* A branchless variant of this ought to be possible. */ | |
534 | if (extension == UXTW || extension == NoExtension) | |
535 | return value; | |
536 | ||
537 | x.u = value; | |
538 | return x.n; | |
539 | } | |
540 | ||
541 | /* Scalar Floating Point | |
542 | ||
543 | FP load/store single register (4 addressing modes) | |
544 | ||
545 | N.B. the base register (source) can be the stack pointer. | |
546 | The secondary source register (source2) can only be an Xn register. */ | |
547 | ||
548 | /* Load 32 bit unscaled signed 9 bit with pre- or post-writeback. */ | |
549 | static void | |
550 | fldrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
551 | { | |
552 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
553 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
554 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
555 | ||
556 | if (wb != Post) | |
557 | address += offset; | |
558 | ||
559 | aarch64_set_FP_float (cpu, st, aarch64_get_mem_float (cpu, address)); | |
560 | if (wb == Post) | |
561 | address += offset; | |
562 | ||
563 | if (wb != NoWriteBack) | |
564 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
565 | } | |
566 | ||
567 | /* Load 32 bit scaled unsigned 12 bit. */ | |
568 | static void | |
569 | fldrs_abs (sim_cpu *cpu, uint32_t offset) | |
570 | { | |
571 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
572 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
573 | ||
574 | aarch64_set_FP_float (cpu, st, | |
575 | aarch64_get_mem_float | |
576 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
577 | + SCALE (offset, 32))); | |
578 | } | |
579 | ||
580 | /* Load 32 bit scaled or unscaled zero- or sign-extended | |
581 | 32-bit register offset. */ | |
582 | static void | |
583 | fldrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
584 | { | |
585 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
586 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
587 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
588 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
589 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
590 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
591 | ||
592 | aarch64_set_FP_float (cpu, st, | |
593 | aarch64_get_mem_float | |
594 | (cpu, address + displacement)); | |
595 | } | |
596 | ||
597 | /* Load 64 bit unscaled signed 9 bit with pre- or post-writeback. */ | |
598 | static void | |
599 | fldrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
600 | { | |
601 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
602 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
603 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
604 | ||
605 | if (wb != Post) | |
606 | address += offset; | |
607 | ||
608 | aarch64_set_FP_double (cpu, st, aarch64_get_mem_double (cpu, address)); | |
609 | ||
610 | if (wb == Post) | |
611 | address += offset; | |
612 | ||
613 | if (wb != NoWriteBack) | |
614 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
615 | } | |
616 | ||
617 | /* Load 64 bit scaled unsigned 12 bit. */ | |
618 | static void | |
619 | fldrd_abs (sim_cpu *cpu, uint32_t offset) | |
620 | { | |
621 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
622 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
623 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64); | |
624 | ||
625 | aarch64_set_FP_double (cpu, st, aarch64_get_mem_double (cpu, address)); | |
626 | } | |
627 | ||
628 | /* Load 64 bit scaled or unscaled zero- or sign-extended 32-bit register offset. */ | |
629 | static void | |
630 | fldrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
631 | { | |
632 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
633 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
634 | uint64_t displacement = OPT_SCALE (extended, 64, scaling); | |
635 | ||
636 | fldrd_wb (cpu, displacement, NoWriteBack); | |
637 | } | |
638 | ||
639 | /* Load 128 bit unscaled signed 9 bit with pre- or post-writeback. */ | |
640 | static void | |
641 | fldrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
642 | { | |
643 | FRegister a; | |
644 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
645 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
646 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
647 | ||
648 | if (wb != Post) | |
649 | address += offset; | |
650 | ||
651 | aarch64_get_mem_long_double (cpu, address, & a); | |
652 | aarch64_set_FP_long_double (cpu, st, a); | |
653 | ||
654 | if (wb == Post) | |
655 | address += offset; | |
656 | ||
657 | if (wb != NoWriteBack) | |
658 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
659 | } | |
660 | ||
661 | /* Load 128 bit scaled unsigned 12 bit. */ | |
662 | static void | |
663 | fldrq_abs (sim_cpu *cpu, uint32_t offset) | |
664 | { | |
665 | FRegister a; | |
666 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
667 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
668 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128); | |
669 | ||
670 | aarch64_get_mem_long_double (cpu, address, & a); | |
671 | aarch64_set_FP_long_double (cpu, st, a); | |
672 | } | |
673 | ||
674 | /* Load 128 bit scaled or unscaled zero- or sign-extended 32-bit register offset */ | |
675 | static void | |
676 | fldrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
677 | { | |
678 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
679 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
680 | uint64_t displacement = OPT_SCALE (extended, 128, scaling); | |
681 | ||
682 | fldrq_wb (cpu, displacement, NoWriteBack); | |
683 | } | |
684 | ||
685 | /* Memory Access | |
686 | ||
687 | load-store single register | |
688 | There are four addressing modes available here which all employ a | |
689 | 64 bit source (base) register. | |
690 | ||
691 | N.B. the base register (source) can be the stack pointer. | |
692 | The secondary source register (source2)can only be an Xn register. | |
693 | ||
694 | Scaled, 12-bit, unsigned immediate offset, without pre- and | |
695 | post-index options. | |
696 | Unscaled, 9-bit, signed immediate offset with pre- or post-index | |
697 | writeback. | |
698 | scaled or unscaled 64-bit register offset. | |
699 | scaled or unscaled 32-bit extended register offset. | |
700 | ||
701 | All offsets are assumed to be raw from the decode i.e. the | |
702 | simulator is expected to adjust scaled offsets based on the | |
703 | accessed data size with register or extended register offset | |
704 | versions the same applies except that in the latter case the | |
705 | operation may also require a sign extend. | |
706 | ||
707 | A separate method is provided for each possible addressing mode. */ | |
708 | ||
709 | /* 32 bit load 32 bit scaled unsigned 12 bit */ | |
710 | static void | |
711 | ldr32_abs (sim_cpu *cpu, uint32_t offset) | |
712 | { | |
713 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
714 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
715 | ||
716 | /* The target register may not be SP but the source may be. */ | |
717 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 | |
718 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
719 | + SCALE (offset, 32))); | |
720 | } | |
721 | ||
722 | /* 32 bit load 32 bit unscaled signed 9 bit with pre- or post-writeback. */ | |
723 | static void | |
724 | ldr32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
725 | { | |
726 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
727 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
728 | uint64_t address; | |
729 | ||
730 | if (rn == rt && wb != NoWriteBack) | |
731 | HALT_UNALLOC; | |
732 | ||
733 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
734 | ||
735 | if (wb != Post) | |
736 | address += offset; | |
737 | ||
738 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 (cpu, address)); | |
739 | ||
740 | if (wb == Post) | |
741 | address += offset; | |
742 | ||
743 | if (wb != NoWriteBack) | |
744 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
745 | } | |
746 | ||
747 | /* 32 bit load 32 bit scaled or unscaled | |
748 | zero- or sign-extended 32-bit register offset */ | |
749 | static void | |
750 | ldr32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
751 | { | |
752 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
753 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
754 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
755 | /* rn may reference SP, rm and rt must reference ZR */ | |
756 | ||
757 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
758 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
759 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
760 | ||
761 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
762 | aarch64_get_mem_u32 (cpu, address + displacement)); | |
763 | } | |
764 | ||
765 | /* 64 bit load 64 bit scaled unsigned 12 bit */ | |
766 | static void | |
767 | ldr_abs (sim_cpu *cpu, uint32_t offset) | |
768 | { | |
769 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
770 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
771 | ||
772 | /* The target register may not be SP but the source may be. */ | |
773 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 | |
774 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
775 | + SCALE (offset, 64))); | |
776 | } | |
777 | ||
778 | /* 64 bit load 64 bit unscaled signed 9 bit with pre- or post-writeback. */ | |
779 | static void | |
780 | ldr_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
781 | { | |
782 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
783 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
784 | uint64_t address; | |
785 | ||
786 | if (rn == rt && wb != NoWriteBack) | |
787 | HALT_UNALLOC; | |
788 | ||
789 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
790 | ||
791 | if (wb != Post) | |
792 | address += offset; | |
793 | ||
794 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 (cpu, address)); | |
795 | ||
796 | if (wb == Post) | |
797 | address += offset; | |
798 | ||
799 | if (wb != NoWriteBack) | |
800 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
801 | } | |
802 | ||
803 | /* 64 bit load 64 bit scaled or unscaled zero- | |
804 | or sign-extended 32-bit register offset. */ | |
805 | static void | |
806 | ldr_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
807 | { | |
808 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
809 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
810 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
811 | /* rn may reference SP, rm and rt must reference ZR */ | |
812 | ||
813 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
814 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
815 | uint64_t displacement = OPT_SCALE (extended, 64, scaling); | |
816 | ||
817 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
818 | aarch64_get_mem_u64 (cpu, address + displacement)); | |
819 | } | |
820 | ||
821 | /* 32 bit load zero-extended byte scaled unsigned 12 bit. */ | |
822 | static void | |
823 | ldrb32_abs (sim_cpu *cpu, uint32_t offset) | |
824 | { | |
825 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
826 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
827 | ||
828 | /* The target register may not be SP but the source may be | |
829 | there is no scaling required for a byte load. */ | |
830 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
831 | aarch64_get_mem_u8 | |
832 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset)); | |
833 | } | |
834 | ||
835 | /* 32 bit load zero-extended byte unscaled signed 9 bit with pre- or post-writeback. */ | |
836 | static void | |
837 | ldrb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
838 | { | |
839 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
840 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
841 | uint64_t address; | |
842 | ||
843 | if (rn == rt && wb != NoWriteBack) | |
844 | HALT_UNALLOC; | |
845 | ||
846 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
847 | ||
848 | if (wb != Post) | |
849 | address += offset; | |
850 | ||
851 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 (cpu, address)); | |
852 | ||
853 | if (wb == Post) | |
854 | address += offset; | |
855 | ||
856 | if (wb != NoWriteBack) | |
857 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
858 | } | |
859 | ||
860 | /* 32 bit load zero-extended byte scaled or unscaled zero- | |
861 | or sign-extended 32-bit register offset. */ | |
862 | static void | |
863 | ldrb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
864 | { | |
865 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
866 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
867 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
868 | /* rn may reference SP, rm and rt must reference ZR */ | |
869 | ||
870 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
871 | int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
872 | extension); | |
873 | ||
874 | /* There is no scaling required for a byte load. */ | |
875 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
876 | aarch64_get_mem_u8 (cpu, address + displacement)); | |
877 | } | |
878 | ||
879 | /* 64 bit load sign-extended byte unscaled signed 9 bit | |
880 | with pre- or post-writeback. */ | |
881 | static void | |
882 | ldrsb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
883 | { | |
884 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
885 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
886 | uint64_t address; | |
887 | ||
888 | if (rn == rt && wb != NoWriteBack) | |
889 | HALT_UNALLOC; | |
890 | ||
891 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
892 | ||
893 | if (wb != Post) | |
894 | address += offset; | |
895 | ||
896 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_s8 (cpu, address)); | |
897 | ||
898 | if (wb == Post) | |
899 | address += offset; | |
900 | ||
901 | if (wb != NoWriteBack) | |
902 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
903 | } | |
904 | ||
905 | /* 64 bit load sign-extended byte scaled unsigned 12 bit. */ | |
906 | static void | |
907 | ldrsb_abs (sim_cpu *cpu, uint32_t offset) | |
908 | { | |
909 | ldrsb_wb (cpu, offset, NoWriteBack); | |
910 | } | |
911 | ||
912 | /* 64 bit load sign-extended byte scaled or unscaled zero- | |
913 | or sign-extended 32-bit register offset. */ | |
914 | static void | |
915 | ldrsb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
916 | { | |
917 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
918 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
919 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
920 | /* rn may reference SP, rm and rt must reference ZR */ | |
921 | ||
922 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
923 | int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
924 | extension); | |
925 | /* There is no scaling required for a byte load. */ | |
926 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
927 | aarch64_get_mem_s8 (cpu, address + displacement)); | |
928 | } | |
929 | ||
930 | /* 32 bit load zero-extended short scaled unsigned 12 bit. */ | |
931 | static void | |
932 | ldrh32_abs (sim_cpu *cpu, uint32_t offset) | |
933 | { | |
934 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
935 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
936 | ||
937 | /* The target register may not be SP but the source may be. */ | |
938 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u16 | |
939 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
940 | + SCALE (offset, 16))); | |
941 | } | |
942 | ||
943 | /* 32 bit load zero-extended short unscaled signed 9 bit | |
944 | with pre- or post-writeback. */ | |
945 | static void | |
946 | ldrh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
947 | { | |
948 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
949 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
950 | uint64_t address; | |
951 | ||
952 | if (rn == rt && wb != NoWriteBack) | |
953 | HALT_UNALLOC; | |
954 | ||
955 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
956 | ||
957 | if (wb != Post) | |
958 | address += offset; | |
959 | ||
960 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u16 (cpu, address)); | |
961 | ||
962 | if (wb == Post) | |
963 | address += offset; | |
964 | ||
965 | if (wb != NoWriteBack) | |
966 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
967 | } | |
968 | ||
969 | /* 32 bit load zero-extended short scaled or unscaled zero- | |
970 | or sign-extended 32-bit register offset. */ | |
971 | static void | |
972 | ldrh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
973 | { | |
974 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
975 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
976 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
977 | /* rn may reference SP, rm and rt must reference ZR */ | |
978 | ||
979 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
980 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
981 | uint64_t displacement = OPT_SCALE (extended, 16, scaling); | |
982 | ||
983 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
984 | aarch64_get_mem_u16 (cpu, address + displacement)); | |
985 | } | |
986 | ||
987 | /* 32 bit load sign-extended short scaled unsigned 12 bit. */ | |
988 | static void | |
989 | ldrsh32_abs (sim_cpu *cpu, uint32_t offset) | |
990 | { | |
991 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
992 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
993 | ||
994 | /* The target register may not be SP but the source may be. */ | |
995 | aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s16 | |
996 | (cpu, | |
997 | aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
998 | + SCALE (offset, 16))); | |
999 | } | |
1000 | ||
1001 | /* 32 bit load sign-extended short unscaled signed 9 bit | |
1002 | with pre- or post-writeback. */ | |
1003 | static void | |
1004 | ldrsh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1005 | { | |
1006 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1007 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1008 | uint64_t address; | |
1009 | ||
1010 | if (rn == rt && wb != NoWriteBack) | |
1011 | HALT_UNALLOC; | |
1012 | ||
1013 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1014 | ||
1015 | if (wb != Post) | |
1016 | address += offset; | |
1017 | ||
1018 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
1019 | (uint32_t) aarch64_get_mem_s16 (cpu, address)); | |
1020 | ||
1021 | if (wb == Post) | |
1022 | address += offset; | |
1023 | ||
1024 | if (wb != NoWriteBack) | |
1025 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1026 | } | |
1027 | ||
1028 | /* 32 bit load sign-extended short scaled or unscaled zero- | |
1029 | or sign-extended 32-bit register offset. */ | |
1030 | static void | |
1031 | ldrsh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1032 | { | |
1033 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1034 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1035 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1036 | /* rn may reference SP, rm and rt must reference ZR */ | |
1037 | ||
1038 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1039 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
1040 | uint64_t displacement = OPT_SCALE (extended, 16, scaling); | |
1041 | ||
1042 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
1043 | (uint32_t) aarch64_get_mem_s16 | |
1044 | (cpu, address + displacement)); | |
1045 | } | |
1046 | ||
1047 | /* 64 bit load sign-extended short scaled unsigned 12 bit. */ | |
1048 | static void | |
1049 | ldrsh_abs (sim_cpu *cpu, uint32_t offset) | |
1050 | { | |
1051 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1052 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1053 | ||
1054 | /* The target register may not be SP but the source may be. */ | |
1055 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_s16 | |
1056 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1057 | + SCALE (offset, 16))); | |
1058 | } | |
1059 | ||
1060 | /* 64 bit load sign-extended short unscaled signed 9 bit | |
1061 | with pre- or post-writeback. */ | |
1062 | static void | |
1063 | ldrsh64_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1064 | { | |
1065 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1066 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1067 | uint64_t address; | |
1068 | ||
1069 | if (rn == rt && wb != NoWriteBack) | |
1070 | HALT_UNALLOC; | |
1071 | ||
1072 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1073 | ||
1074 | if (wb != Post) | |
1075 | address += offset; | |
1076 | ||
1077 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_s16 (cpu, address)); | |
1078 | ||
1079 | if (wb == Post) | |
1080 | address += offset; | |
1081 | ||
1082 | if (wb != NoWriteBack) | |
1083 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1084 | } | |
1085 | ||
1086 | /* 64 bit load sign-extended short scaled or unscaled zero- | |
1087 | or sign-extended 32-bit register offset. */ | |
1088 | static void | |
1089 | ldrsh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1090 | { | |
1091 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1092 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1093 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1094 | /* rn may reference SP, rm and rt must reference ZR */ | |
1095 | ||
1096 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1097 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
1098 | uint64_t displacement = OPT_SCALE (extended, 16, scaling); | |
1099 | ||
1100 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
1101 | aarch64_get_mem_s16 (cpu, address + displacement)); | |
1102 | } | |
1103 | ||
1104 | /* 64 bit load sign-extended 32 bit scaled unsigned 12 bit. */ | |
1105 | static void | |
1106 | ldrsw_abs (sim_cpu *cpu, uint32_t offset) | |
1107 | { | |
1108 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1109 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1110 | ||
1111 | /* The target register may not be SP but the source may be. */ | |
1112 | return aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s32 | |
1113 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1114 | + SCALE (offset, 32))); | |
1115 | } | |
1116 | ||
1117 | /* 64 bit load sign-extended 32 bit unscaled signed 9 bit | |
1118 | with pre- or post-writeback. */ | |
1119 | static void | |
1120 | ldrsw_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1121 | { | |
1122 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1123 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1124 | uint64_t address; | |
1125 | ||
1126 | if (rn == rt && wb != NoWriteBack) | |
1127 | HALT_UNALLOC; | |
1128 | ||
1129 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1130 | ||
1131 | if (wb != Post) | |
1132 | address += offset; | |
1133 | ||
1134 | aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s32 (cpu, address)); | |
1135 | ||
1136 | if (wb == Post) | |
1137 | address += offset; | |
1138 | ||
1139 | if (wb != NoWriteBack) | |
1140 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1141 | } | |
1142 | ||
1143 | /* 64 bit load sign-extended 32 bit scaled or unscaled zero- | |
1144 | or sign-extended 32-bit register offset. */ | |
1145 | static void | |
1146 | ldrsw_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1147 | { | |
1148 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1149 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1150 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1151 | /* rn may reference SP, rm and rt must reference ZR */ | |
1152 | ||
1153 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1154 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
1155 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
1156 | ||
1157 | aarch64_set_reg_s64 (cpu, rt, NO_SP, | |
1158 | aarch64_get_mem_s32 (cpu, address + displacement)); | |
1159 | } | |
1160 | ||
1161 | /* N.B. with stores the value in source is written to the | |
1162 | address identified by source2 modified by source3/offset. */ | |
1163 | ||
1164 | /* 32 bit store scaled unsigned 12 bit. */ | |
1165 | static void | |
1166 | str32_abs (sim_cpu *cpu, uint32_t offset) | |
1167 | { | |
1168 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1169 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1170 | ||
1171 | /* The target register may not be SP but the source may be. */ | |
1172 | aarch64_set_mem_u32 (cpu, (aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1173 | + SCALE (offset, 32)), | |
1174 | aarch64_get_reg_u32 (cpu, rt, NO_SP)); | |
1175 | } | |
1176 | ||
1177 | /* 32 bit store unscaled signed 9 bit with pre- or post-writeback. */ | |
1178 | static void | |
1179 | str32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1180 | { | |
1181 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1182 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1183 | uint64_t address; | |
1184 | ||
1185 | if (rn == rt && wb != NoWriteBack) | |
1186 | HALT_UNALLOC; | |
1187 | ||
1188 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1189 | if (wb != Post) | |
1190 | address += offset; | |
1191 | ||
1192 | aarch64_set_mem_u32 (cpu, address, aarch64_get_reg_u32 (cpu, rt, NO_SP)); | |
1193 | ||
1194 | if (wb == Post) | |
1195 | address += offset; | |
1196 | ||
1197 | if (wb != NoWriteBack) | |
1198 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1199 | } | |
1200 | ||
1201 | /* 32 bit store scaled or unscaled zero- or | |
1202 | sign-extended 32-bit register offset. */ | |
1203 | static void | |
1204 | str32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1205 | { | |
1206 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1207 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1208 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1209 | ||
1210 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1211 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
1212 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
1213 | ||
1214 | aarch64_set_mem_u32 (cpu, address + displacement, | |
1215 | aarch64_get_reg_u64 (cpu, rt, NO_SP)); | |
1216 | } | |
1217 | ||
1218 | /* 64 bit store scaled unsigned 12 bit. */ | |
1219 | static void | |
1220 | str_abs (sim_cpu *cpu, uint32_t offset) | |
1221 | { | |
1222 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1223 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1224 | ||
1225 | aarch64_set_mem_u64 (cpu, | |
1226 | aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1227 | + SCALE (offset, 64), | |
1228 | aarch64_get_reg_u64 (cpu, rt, NO_SP)); | |
1229 | } | |
1230 | ||
1231 | /* 64 bit store unscaled signed 9 bit with pre- or post-writeback. */ | |
1232 | static void | |
1233 | str_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1234 | { | |
1235 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1236 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1237 | uint64_t address; | |
1238 | ||
1239 | if (rn == rt && wb != NoWriteBack) | |
1240 | HALT_UNALLOC; | |
1241 | ||
1242 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1243 | ||
1244 | if (wb != Post) | |
1245 | address += offset; | |
1246 | ||
1247 | aarch64_set_mem_u64 (cpu, address, aarch64_get_reg_u64 (cpu, rt, NO_SP)); | |
1248 | ||
1249 | if (wb == Post) | |
1250 | address += offset; | |
1251 | ||
1252 | if (wb != NoWriteBack) | |
1253 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1254 | } | |
1255 | ||
1256 | /* 64 bit store scaled or unscaled zero- | |
1257 | or sign-extended 32-bit register offset. */ | |
1258 | static void | |
1259 | str_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1260 | { | |
1261 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1262 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1263 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1264 | /* rn may reference SP, rm and rt must reference ZR */ | |
1265 | ||
1266 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1267 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1268 | extension); | |
1269 | uint64_t displacement = OPT_SCALE (extended, 64, scaling); | |
1270 | ||
1271 | aarch64_set_mem_u64 (cpu, address + displacement, | |
1272 | aarch64_get_reg_u64 (cpu, rt, NO_SP)); | |
1273 | } | |
1274 | ||
1275 | /* 32 bit store byte scaled unsigned 12 bit. */ | |
1276 | static void | |
1277 | strb_abs (sim_cpu *cpu, uint32_t offset) | |
1278 | { | |
1279 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1280 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1281 | ||
1282 | /* The target register may not be SP but the source may be. | |
1283 | There is no scaling required for a byte load. */ | |
1284 | aarch64_set_mem_u8 (cpu, | |
1285 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
1286 | aarch64_get_reg_u8 (cpu, rt, NO_SP)); | |
1287 | } | |
1288 | ||
1289 | /* 32 bit store byte unscaled signed 9 bit with pre- or post-writeback. */ | |
1290 | static void | |
1291 | strb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1292 | { | |
1293 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1294 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1295 | uint64_t address; | |
1296 | ||
1297 | if (rn == rt && wb != NoWriteBack) | |
1298 | HALT_UNALLOC; | |
1299 | ||
1300 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1301 | ||
1302 | if (wb != Post) | |
1303 | address += offset; | |
1304 | ||
1305 | aarch64_set_mem_u8 (cpu, address, aarch64_get_reg_u8 (cpu, rt, NO_SP)); | |
1306 | ||
1307 | if (wb == Post) | |
1308 | address += offset; | |
1309 | ||
1310 | if (wb != NoWriteBack) | |
1311 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1312 | } | |
1313 | ||
1314 | /* 32 bit store byte scaled or unscaled zero- | |
1315 | or sign-extended 32-bit register offset. */ | |
1316 | static void | |
1317 | strb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1318 | { | |
1319 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1320 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1321 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1322 | /* rn may reference SP, rm and rt must reference ZR */ | |
1323 | ||
1324 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1325 | int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1326 | extension); | |
1327 | ||
1328 | /* There is no scaling required for a byte load. */ | |
1329 | aarch64_set_mem_u8 (cpu, address + displacement, | |
1330 | aarch64_get_reg_u8 (cpu, rt, NO_SP)); | |
1331 | } | |
1332 | ||
1333 | /* 32 bit store short scaled unsigned 12 bit. */ | |
1334 | static void | |
1335 | strh_abs (sim_cpu *cpu, uint32_t offset) | |
1336 | { | |
1337 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1338 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1339 | ||
1340 | /* The target register may not be SP but the source may be. */ | |
1341 | aarch64_set_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1342 | + SCALE (offset, 16), | |
1343 | aarch64_get_reg_u16 (cpu, rt, NO_SP)); | |
1344 | } | |
1345 | ||
1346 | /* 32 bit store short unscaled signed 9 bit with pre- or post-writeback. */ | |
1347 | static void | |
1348 | strh_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
1349 | { | |
1350 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1351 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1352 | uint64_t address; | |
1353 | ||
1354 | if (rn == rt && wb != NoWriteBack) | |
1355 | HALT_UNALLOC; | |
1356 | ||
1357 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1358 | ||
1359 | if (wb != Post) | |
1360 | address += offset; | |
1361 | ||
1362 | aarch64_set_mem_u16 (cpu, address, aarch64_get_reg_u16 (cpu, rt, NO_SP)); | |
1363 | ||
1364 | if (wb == Post) | |
1365 | address += offset; | |
1366 | ||
1367 | if (wb != NoWriteBack) | |
1368 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
1369 | } | |
1370 | ||
1371 | /* 32 bit store short scaled or unscaled zero- | |
1372 | or sign-extended 32-bit register offset. */ | |
1373 | static void | |
1374 | strh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1375 | { | |
1376 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1377 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1378 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1379 | /* rn may reference SP, rm and rt must reference ZR */ | |
1380 | ||
1381 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1382 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension); | |
1383 | uint64_t displacement = OPT_SCALE (extended, 16, scaling); | |
1384 | ||
1385 | aarch64_set_mem_u16 (cpu, address + displacement, | |
1386 | aarch64_get_reg_u16 (cpu, rt, NO_SP)); | |
1387 | } | |
1388 | ||
1389 | /* Prefetch unsigned 12 bit. */ | |
1390 | static void | |
1391 | prfm_abs (sim_cpu *cpu, uint32_t offset) | |
1392 | { | |
1393 | /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM, | |
1394 | 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM, | |
1395 | 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM, | |
1396 | 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM, | |
1397 | 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM, | |
1398 | 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM, | |
1399 | ow ==> UNALLOC | |
1400 | PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0); | |
1401 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
1402 | + SCALE (offset, 64). */ | |
1403 | ||
1404 | /* TODO : implement prefetch of address. */ | |
1405 | } | |
1406 | ||
1407 | /* Prefetch scaled or unscaled zero- or sign-extended 32-bit register offset. */ | |
1408 | static void | |
1409 | prfm_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
1410 | { | |
1411 | /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM, | |
1412 | 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM, | |
1413 | 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM, | |
1414 | 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM, | |
1415 | 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM, | |
1416 | 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM, | |
1417 | ow ==> UNALLOC | |
1418 | rn may reference SP, rm may only reference ZR | |
1419 | PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0); | |
1420 | uint64_t base = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1421 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1422 | extension); | |
1423 | uint64_t displacement = OPT_SCALE (extended, 64, scaling); | |
1424 | uint64_t address = base + displacement. */ | |
1425 | ||
1426 | /* TODO : implement prefetch of address */ | |
1427 | } | |
1428 | ||
1429 | /* 64 bit pc-relative prefetch. */ | |
1430 | static void | |
1431 | prfm_pcrel (sim_cpu *cpu, int32_t offset) | |
1432 | { | |
1433 | /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM, | |
1434 | 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM, | |
1435 | 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM, | |
1436 | 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM, | |
1437 | 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM, | |
1438 | 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM, | |
1439 | ow ==> UNALLOC | |
1440 | PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0); | |
1441 | uint64_t address = aarch64_get_PC (cpu) + offset. */ | |
1442 | ||
1443 | /* TODO : implement this */ | |
1444 | } | |
1445 | ||
1446 | /* Load-store exclusive. */ | |
1447 | ||
1448 | static void | |
1449 | ldxr (sim_cpu *cpu) | |
1450 | { | |
1451 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1452 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1453 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1454 | int size = uimm (aarch64_get_instr (cpu), 31, 30); | |
1455 | /* int ordered = uimm (aarch64_get_instr (cpu), 15, 15); */ | |
1456 | /* int exclusive = ! uimm (aarch64_get_instr (cpu), 23, 23); */ | |
1457 | ||
1458 | switch (size) | |
1459 | { | |
1460 | case 0: | |
1461 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 (cpu, address)); | |
1462 | break; | |
1463 | case 1: | |
1464 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u16 (cpu, address)); | |
1465 | break; | |
1466 | case 2: | |
1467 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 (cpu, address)); | |
1468 | break; | |
1469 | case 3: | |
1470 | aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 (cpu, address)); | |
1471 | break; | |
1472 | default: | |
1473 | HALT_UNALLOC; | |
1474 | } | |
1475 | } | |
1476 | ||
1477 | static void | |
1478 | stxr (sim_cpu *cpu) | |
1479 | { | |
1480 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1481 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
1482 | unsigned rs = uimm (aarch64_get_instr (cpu), 20, 16); | |
1483 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1484 | int size = uimm (aarch64_get_instr (cpu), 31, 30); | |
1485 | uint64_t data = aarch64_get_reg_u64 (cpu, rt, NO_SP); | |
1486 | ||
1487 | switch (size) | |
1488 | { | |
1489 | case 0: aarch64_set_mem_u8 (cpu, address, data); break; | |
1490 | case 1: aarch64_set_mem_u16 (cpu, address, data); break; | |
1491 | case 2: aarch64_set_mem_u32 (cpu, address, data); break; | |
1492 | case 3: aarch64_set_mem_u64 (cpu, address, data); break; | |
1493 | default: HALT_UNALLOC; | |
1494 | } | |
1495 | ||
1496 | aarch64_set_reg_u64 (cpu, rs, NO_SP, 0); /* Always exclusive... */ | |
1497 | } | |
1498 | ||
1499 | static void | |
1500 | dexLoadLiteral (sim_cpu *cpu) | |
1501 | { | |
1502 | /* instr[29,27] == 011 | |
1503 | instr[25,24] == 00 | |
1504 | instr[31,30:26] = opc: 000 ==> LDRW, 001 ==> FLDRS | |
1505 | 010 ==> LDRX, 011 ==> FLDRD | |
1506 | 100 ==> LDRSW, 101 ==> FLDRQ | |
1507 | 110 ==> PRFM, 111 ==> UNALLOC | |
1508 | instr[26] ==> V : 0 ==> GReg, 1 ==> FReg | |
1509 | instr[23, 5] == simm19 */ | |
1510 | ||
1511 | /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */ | |
1512 | uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 1) | |
1513 | | uimm (aarch64_get_instr (cpu), 26, 26)); | |
1514 | int32_t imm = simm32 (aarch64_get_instr (cpu), 23, 5); | |
1515 | ||
1516 | switch (dispatch) | |
1517 | { | |
1518 | case 0: ldr32_pcrel (cpu, imm); break; | |
1519 | case 1: fldrs_pcrel (cpu, imm); break; | |
1520 | case 2: ldr_pcrel (cpu, imm); break; | |
1521 | case 3: fldrd_pcrel (cpu, imm); break; | |
1522 | case 4: ldrsw_pcrel (cpu, imm); break; | |
1523 | case 5: fldrq_pcrel (cpu, imm); break; | |
1524 | case 6: prfm_pcrel (cpu, imm); break; | |
1525 | case 7: | |
1526 | default: | |
1527 | HALT_UNALLOC; | |
1528 | } | |
1529 | } | |
1530 | ||
1531 | /* Immediate arithmetic | |
1532 | The aimm argument is a 12 bit unsigned value or a 12 bit unsigned | |
1533 | value left shifted by 12 bits (done at decode). | |
1534 | ||
1535 | N.B. the register args (dest, source) can normally be Xn or SP. | |
1536 | the exception occurs for flag setting instructions which may | |
1537 | only use Xn for the output (dest). */ | |
1538 | ||
1539 | /* 32 bit add immediate. */ | |
1540 | static void | |
1541 | add32 (sim_cpu *cpu, uint32_t aimm) | |
1542 | { | |
1543 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1544 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1545 | ||
1546 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
1547 | aarch64_get_reg_u32 (cpu, rn, SP_OK) + aimm); | |
1548 | } | |
1549 | ||
1550 | /* 64 bit add immediate. */ | |
1551 | static void | |
1552 | add64 (sim_cpu *cpu, uint32_t aimm) | |
1553 | { | |
1554 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1555 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1556 | ||
1557 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
1558 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + aimm); | |
1559 | } | |
1560 | ||
1561 | static void | |
1562 | set_flags_for_add32 (sim_cpu *cpu, int32_t value1, int32_t value2) | |
1563 | { | |
1564 | int32_t result = value1 + value2; | |
1565 | int64_t sresult = (int64_t) value1 + (int64_t) value2; | |
1566 | uint64_t uresult = (uint64_t)(uint32_t) value1 | |
1567 | + (uint64_t)(uint32_t) value2; | |
1568 | uint32_t flags = 0; | |
1569 | ||
1570 | if (result == 0) | |
1571 | flags |= Z; | |
1572 | ||
1573 | if (result & (1 << 31)) | |
1574 | flags |= N; | |
1575 | ||
1576 | if (uresult != result) | |
1577 | flags |= C; | |
1578 | ||
1579 | if (sresult != result) | |
1580 | flags |= V; | |
1581 | ||
1582 | aarch64_set_CPSR (cpu, flags); | |
1583 | } | |
1584 | ||
1585 | static void | |
1586 | set_flags_for_add64 (sim_cpu *cpu, uint64_t value1, uint64_t value2) | |
1587 | { | |
1588 | int64_t sval1 = value1; | |
1589 | int64_t sval2 = value2; | |
1590 | uint64_t result = value1 + value2; | |
1591 | int64_t sresult = sval1 + sval2; | |
1592 | uint32_t flags = 0; | |
1593 | ||
1594 | if (result == 0) | |
1595 | flags |= Z; | |
1596 | ||
1597 | if (result & (1ULL << 63)) | |
1598 | flags |= N; | |
1599 | ||
1600 | if (sval1 < 0) | |
1601 | { | |
1602 | if (sval2 < 0) | |
1603 | { | |
1604 | /* Negative plus a negative. Overflow happens if | |
1605 | the result is greater than either of the operands. */ | |
1606 | if (sresult > sval1 || sresult > sval2) | |
1607 | flags |= V; | |
1608 | } | |
1609 | /* else Negative plus a positive. Overflow cannot happen. */ | |
1610 | } | |
1611 | else /* value1 is +ve. */ | |
1612 | { | |
1613 | if (sval2 < 0) | |
1614 | { | |
1615 | /* Overflow can only occur if we computed "0 - MININT". */ | |
1616 | if (sval1 == 0 && sval2 == (1LL << 63)) | |
1617 | flags |= V; | |
1618 | } | |
1619 | else | |
1620 | { | |
1621 | /* Postive plus positive - overflow has happened if the | |
1622 | result is smaller than either of the operands. */ | |
1623 | if (result < value1 || result < value2) | |
1624 | flags |= V | C; | |
1625 | } | |
1626 | } | |
1627 | ||
1628 | aarch64_set_CPSR (cpu, flags); | |
1629 | } | |
1630 | ||
1631 | #define NEG(a) (((a) & signbit) == signbit) | |
1632 | #define POS(a) (((a) & signbit) == 0) | |
1633 | ||
1634 | static void | |
1635 | set_flags_for_sub32 (sim_cpu *cpu, uint32_t value1, uint32_t value2) | |
1636 | { | |
1637 | uint32_t result = value1 - value2; | |
1638 | uint32_t flags = 0; | |
1639 | uint32_t signbit = 1ULL << 31; | |
1640 | ||
1641 | if (result == 0) | |
1642 | flags |= Z; | |
1643 | ||
1644 | if (NEG (result)) | |
1645 | flags |= N; | |
1646 | ||
1647 | if ( (NEG (value1) && POS (value2)) | |
1648 | || (NEG (value1) && POS (result)) | |
1649 | || (POS (value2) && POS (result))) | |
1650 | flags |= C; | |
1651 | ||
1652 | if ( (NEG (value1) && POS (value2) && POS (result)) | |
1653 | || (POS (value1) && NEG (value2) && NEG (result))) | |
1654 | flags |= V; | |
1655 | ||
1656 | aarch64_set_CPSR (cpu, flags); | |
1657 | } | |
1658 | ||
1659 | static void | |
1660 | set_flags_for_sub64 (sim_cpu *cpu, uint64_t value1, uint64_t value2) | |
1661 | { | |
1662 | uint64_t result = value1 - value2; | |
1663 | uint32_t flags = 0; | |
1664 | uint64_t signbit = 1ULL << 63; | |
1665 | ||
1666 | if (result == 0) | |
1667 | flags |= Z; | |
1668 | ||
1669 | if (NEG (result)) | |
1670 | flags |= N; | |
1671 | ||
1672 | if ( (NEG (value1) && POS (value2)) | |
1673 | || (NEG (value1) && POS (result)) | |
1674 | || (POS (value2) && POS (result))) | |
1675 | flags |= C; | |
1676 | ||
1677 | if ( (NEG (value1) && POS (value2) && POS (result)) | |
1678 | || (POS (value1) && NEG (value2) && NEG (result))) | |
1679 | flags |= V; | |
1680 | ||
1681 | aarch64_set_CPSR (cpu, flags); | |
1682 | } | |
1683 | ||
1684 | static void | |
1685 | set_flags_for_binop32 (sim_cpu *cpu, uint32_t result) | |
1686 | { | |
1687 | uint32_t flags = 0; | |
1688 | ||
1689 | if (result == 0) | |
1690 | flags |= Z; | |
1691 | else | |
1692 | flags &= ~ Z; | |
1693 | ||
1694 | if (result & (1 << 31)) | |
1695 | flags |= N; | |
1696 | else | |
1697 | flags &= ~ N; | |
1698 | ||
1699 | aarch64_set_CPSR (cpu, flags); | |
1700 | } | |
1701 | ||
1702 | static void | |
1703 | set_flags_for_binop64 (sim_cpu *cpu, uint64_t result) | |
1704 | { | |
1705 | uint32_t flags = 0; | |
1706 | ||
1707 | if (result == 0) | |
1708 | flags |= Z; | |
1709 | else | |
1710 | flags &= ~ Z; | |
1711 | ||
1712 | if (result & (1ULL << 63)) | |
1713 | flags |= N; | |
1714 | else | |
1715 | flags &= ~ N; | |
1716 | ||
1717 | aarch64_set_CPSR (cpu, flags); | |
1718 | } | |
1719 | ||
1720 | /* 32 bit add immediate set flags. */ | |
1721 | static void | |
1722 | adds32 (sim_cpu *cpu, uint32_t aimm) | |
1723 | { | |
1724 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1725 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1726 | /* TODO : do we need to worry about signs here? */ | |
1727 | int32_t value1 = aarch64_get_reg_s32 (cpu, rn, SP_OK); | |
1728 | ||
1729 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + aimm); | |
1730 | set_flags_for_add32 (cpu, value1, aimm); | |
1731 | } | |
1732 | ||
1733 | /* 64 bit add immediate set flags. */ | |
1734 | static void | |
1735 | adds64 (sim_cpu *cpu, uint32_t aimm) | |
1736 | { | |
1737 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1738 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1739 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1740 | uint64_t value2 = aimm; | |
1741 | ||
1742 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2); | |
1743 | set_flags_for_add64 (cpu, value1, value2); | |
1744 | } | |
1745 | ||
1746 | /* 32 bit sub immediate. */ | |
1747 | static void | |
1748 | sub32 (sim_cpu *cpu, uint32_t aimm) | |
1749 | { | |
1750 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1751 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1752 | ||
1753 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
1754 | aarch64_get_reg_u32 (cpu, rn, SP_OK) - aimm); | |
1755 | } | |
1756 | ||
1757 | /* 64 bit sub immediate. */ | |
1758 | static void | |
1759 | sub64 (sim_cpu *cpu, uint32_t aimm) | |
1760 | { | |
1761 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1762 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1763 | ||
1764 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
1765 | aarch64_get_reg_u64 (cpu, rn, SP_OK) - aimm); | |
1766 | } | |
1767 | ||
1768 | /* 32 bit sub immediate set flags. */ | |
1769 | static void | |
1770 | subs32 (sim_cpu *cpu, uint32_t aimm) | |
1771 | { | |
1772 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1773 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1774 | uint32_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1775 | uint32_t value2 = aimm; | |
1776 | ||
1777 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
1778 | set_flags_for_sub32 (cpu, value1, value2); | |
1779 | } | |
1780 | ||
1781 | /* 64 bit sub immediate set flags. */ | |
1782 | static void | |
1783 | subs64 (sim_cpu *cpu, uint32_t aimm) | |
1784 | { | |
1785 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1786 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1787 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
1788 | uint32_t value2 = aimm; | |
1789 | ||
1790 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
1791 | set_flags_for_sub64 (cpu, value1, value2); | |
1792 | } | |
1793 | ||
1794 | /* Data Processing Register. */ | |
1795 | ||
1796 | /* First two helpers to perform the shift operations. */ | |
1797 | ||
1798 | static inline uint32_t | |
1799 | shifted32 (uint32_t value, Shift shift, uint32_t count) | |
1800 | { | |
1801 | switch (shift) | |
1802 | { | |
1803 | default: | |
1804 | case LSL: | |
1805 | return (value << count); | |
1806 | case LSR: | |
1807 | return (value >> count); | |
1808 | case ASR: | |
1809 | { | |
1810 | int32_t svalue = value; | |
1811 | return (svalue >> count); | |
1812 | } | |
1813 | case ROR: | |
1814 | { | |
1815 | uint32_t top = value >> count; | |
1816 | uint32_t bottom = value << (32 - count); | |
1817 | return (bottom | top); | |
1818 | } | |
1819 | } | |
1820 | } | |
1821 | ||
1822 | static inline uint64_t | |
1823 | shifted64 (uint64_t value, Shift shift, uint32_t count) | |
1824 | { | |
1825 | switch (shift) | |
1826 | { | |
1827 | default: | |
1828 | case LSL: | |
1829 | return (value << count); | |
1830 | case LSR: | |
1831 | return (value >> count); | |
1832 | case ASR: | |
1833 | { | |
1834 | int64_t svalue = value; | |
1835 | return (svalue >> count); | |
1836 | } | |
1837 | case ROR: | |
1838 | { | |
1839 | uint64_t top = value >> count; | |
1840 | uint64_t bottom = value << (64 - count); | |
1841 | return (bottom | top); | |
1842 | } | |
1843 | } | |
1844 | } | |
1845 | ||
1846 | /* Arithmetic shifted register. | |
1847 | These allow an optional LSL, ASR or LSR to the second source | |
1848 | register with a count up to the register bit count. | |
1849 | ||
1850 | N.B register args may not be SP. */ | |
1851 | ||
1852 | /* 32 bit ADD shifted register. */ | |
1853 | static void | |
1854 | add32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1855 | { | |
1856 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1857 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1858 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1859 | ||
1860 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
1861 | aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
1862 | + shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1863 | shift, count)); | |
1864 | } | |
1865 | ||
1866 | /* 64 bit ADD shifted register. */ | |
1867 | static void | |
1868 | add64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1869 | { | |
1870 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1871 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1872 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1873 | ||
1874 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
1875 | aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
1876 | + shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
1877 | shift, count)); | |
1878 | } | |
1879 | ||
1880 | /* 32 bit ADD shifted register setting flags. */ | |
1881 | static void | |
1882 | adds32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1883 | { | |
1884 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1885 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1886 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1887 | ||
1888 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
1889 | uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1890 | shift, count); | |
1891 | ||
1892 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2); | |
1893 | set_flags_for_add32 (cpu, value1, value2); | |
1894 | } | |
1895 | ||
1896 | /* 64 bit ADD shifted register setting flags. */ | |
1897 | static void | |
1898 | adds64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1899 | { | |
1900 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1901 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1902 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1903 | ||
1904 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
1905 | uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
1906 | shift, count); | |
1907 | ||
1908 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2); | |
1909 | set_flags_for_add64 (cpu, value1, value2); | |
1910 | } | |
1911 | ||
1912 | /* 32 bit SUB shifted register. */ | |
1913 | static void | |
1914 | sub32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1915 | { | |
1916 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1917 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1918 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1919 | ||
1920 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
1921 | aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
1922 | - shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1923 | shift, count)); | |
1924 | } | |
1925 | ||
1926 | /* 64 bit SUB shifted register. */ | |
1927 | static void | |
1928 | sub64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1929 | { | |
1930 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1931 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1932 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1933 | ||
1934 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
1935 | aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
1936 | - shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
1937 | shift, count)); | |
1938 | } | |
1939 | ||
1940 | /* 32 bit SUB shifted register setting flags. */ | |
1941 | static void | |
1942 | subs32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1943 | { | |
1944 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1945 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1946 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1947 | ||
1948 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
1949 | uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
1950 | shift, count); | |
1951 | ||
1952 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
1953 | set_flags_for_sub32 (cpu, value1, value2); | |
1954 | } | |
1955 | ||
1956 | /* 64 bit SUB shifted register setting flags. */ | |
1957 | static void | |
1958 | subs64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
1959 | { | |
1960 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
1961 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
1962 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
1963 | ||
1964 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
1965 | uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
1966 | shift, count); | |
1967 | ||
1968 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
1969 | set_flags_for_sub64 (cpu, value1, value2); | |
1970 | } | |
1971 | ||
1972 | /* First a couple more helpers to fetch the | |
1973 | relevant source register element either | |
1974 | sign or zero extended as required by the | |
1975 | extension value. */ | |
1976 | ||
1977 | static uint32_t | |
1978 | extreg32 (sim_cpu *cpu, unsigned int lo, Extension extension) | |
1979 | { | |
1980 | switch (extension) | |
1981 | { | |
1982 | case UXTB: return aarch64_get_reg_u8 (cpu, lo, NO_SP); | |
1983 | case UXTH: return aarch64_get_reg_u16 (cpu, lo, NO_SP); | |
1984 | case UXTW: /* Fall through. */ | |
1985 | case UXTX: return aarch64_get_reg_u32 (cpu, lo, NO_SP); | |
1986 | case SXTB: return aarch64_get_reg_s8 (cpu, lo, NO_SP); | |
1987 | case SXTH: return aarch64_get_reg_s16 (cpu, lo, NO_SP); | |
1988 | case SXTW: /* Fall through. */ | |
1989 | case SXTX: /* Fall through. */ | |
1990 | default: return aarch64_get_reg_s32 (cpu, lo, NO_SP); | |
1991 | } | |
1992 | } | |
1993 | ||
1994 | static uint64_t | |
1995 | extreg64 (sim_cpu *cpu, unsigned int lo, Extension extension) | |
1996 | { | |
1997 | switch (extension) | |
1998 | { | |
1999 | case UXTB: return aarch64_get_reg_u8 (cpu, lo, NO_SP); | |
2000 | case UXTH: return aarch64_get_reg_u16 (cpu, lo, NO_SP); | |
2001 | case UXTW: return aarch64_get_reg_u32 (cpu, lo, NO_SP); | |
2002 | case UXTX: return aarch64_get_reg_u64 (cpu, lo, NO_SP); | |
2003 | case SXTB: return aarch64_get_reg_s8 (cpu, lo, NO_SP); | |
2004 | case SXTH: return aarch64_get_reg_s16 (cpu, lo, NO_SP); | |
2005 | case SXTW: return aarch64_get_reg_s32 (cpu, lo, NO_SP); | |
2006 | case SXTX: | |
2007 | default: return aarch64_get_reg_s64 (cpu, lo, NO_SP); | |
2008 | } | |
2009 | } | |
2010 | ||
2011 | /* Arithmetic extending register | |
2012 | These allow an optional sign extension of some portion of the | |
2013 | second source register followed by an optional left shift of | |
2014 | between 1 and 4 bits (i.e. a shift of 0-4 bits???) | |
2015 | ||
2016 | N.B output (dest) and first input arg (source) may normally be Xn | |
2017 | or SP. However, for flag setting operations dest can only be | |
2018 | Xn. Second input registers are always Xn. */ | |
2019 | ||
2020 | /* 32 bit ADD extending register. */ | |
2021 | static void | |
2022 | add32_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2023 | { | |
2024 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2025 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2026 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2027 | ||
2028 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
2029 | aarch64_get_reg_u32 (cpu, rn, SP_OK) | |
2030 | + (extreg32 (cpu, rm, extension) << shift)); | |
2031 | } | |
2032 | ||
2033 | /* 64 bit ADD extending register. | |
2034 | N.B. This subsumes the case with 64 bit source2 and UXTX #n or LSL #0. */ | |
2035 | static void | |
2036 | add64_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2037 | { | |
2038 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2039 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2040 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2041 | ||
2042 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
2043 | aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
2044 | + (extreg64 (cpu, rm, extension) << shift)); | |
2045 | } | |
2046 | ||
2047 | /* 32 bit ADD extending register setting flags. */ | |
2048 | static void | |
2049 | adds32_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2050 | { | |
2051 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2052 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2053 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2054 | ||
2055 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK); | |
2056 | uint32_t value2 = extreg32 (cpu, rm, extension) << shift; | |
2057 | ||
2058 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2); | |
2059 | set_flags_for_add32 (cpu, value1, value2); | |
2060 | } | |
2061 | ||
2062 | /* 64 bit ADD extending register setting flags */ | |
2063 | /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0 */ | |
2064 | static void | |
2065 | adds64_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2066 | { | |
2067 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2068 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2069 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2070 | ||
2071 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
2072 | uint64_t value2 = extreg64 (cpu, rm, extension) << shift; | |
2073 | ||
2074 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2); | |
2075 | set_flags_for_add64 (cpu, value1, value2); | |
2076 | } | |
2077 | ||
2078 | /* 32 bit SUB extending register. */ | |
2079 | static void | |
2080 | sub32_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2081 | { | |
2082 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2083 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2084 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2085 | ||
2086 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
2087 | aarch64_get_reg_u32 (cpu, rn, SP_OK) | |
2088 | - (extreg32 (cpu, rm, extension) << shift)); | |
2089 | } | |
2090 | ||
2091 | /* 64 bit SUB extending register. */ | |
2092 | /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0. */ | |
2093 | static void | |
2094 | sub64_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2095 | { | |
2096 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2097 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2098 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2099 | ||
2100 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
2101 | aarch64_get_reg_u64 (cpu, rn, SP_OK) | |
2102 | - (extreg64 (cpu, rm, extension) << shift)); | |
2103 | } | |
2104 | ||
2105 | /* 32 bit SUB extending register setting flags. */ | |
2106 | static void | |
2107 | subs32_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2108 | { | |
2109 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2110 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2111 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2112 | ||
2113 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK); | |
2114 | uint32_t value2 = extreg32 (cpu, rm, extension) << shift; | |
2115 | ||
2116 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
2117 | set_flags_for_sub32 (cpu, value1, value2); | |
2118 | } | |
2119 | ||
2120 | /* 64 bit SUB extending register setting flags */ | |
2121 | /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0 */ | |
2122 | static void | |
2123 | subs64_ext (sim_cpu *cpu, Extension extension, uint32_t shift) | |
2124 | { | |
2125 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2126 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2127 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2128 | ||
2129 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
2130 | uint64_t value2 = extreg64 (cpu, rm, extension) << shift; | |
2131 | ||
2132 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2); | |
2133 | set_flags_for_sub64 (cpu, value1, value2); | |
2134 | } | |
2135 | ||
2136 | static void | |
2137 | dexAddSubtractImmediate (sim_cpu *cpu) | |
2138 | { | |
2139 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
2140 | instr[30] = op : 0 ==> ADD, 1 ==> SUB | |
2141 | instr[29] = set : 0 ==> no flags, 1 ==> set flags | |
2142 | instr[28,24] = 10001 | |
2143 | instr[23,22] = shift : 00 == LSL#0, 01 = LSL#12 1x = UNALLOC | |
2144 | instr[21,10] = uimm12 | |
2145 | instr[9,5] = Rn | |
2146 | instr[4,0] = Rd */ | |
2147 | ||
2148 | /* N.B. the shift is applied at decode before calling the add/sub routine. */ | |
2149 | uint32_t shift = uimm (aarch64_get_instr (cpu), 23, 22); | |
2150 | uint32_t imm = uimm (aarch64_get_instr (cpu), 21, 10); | |
2151 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
2152 | ||
2153 | NYI_assert (28, 24, 0x11); | |
2154 | ||
2155 | if (shift > 1) | |
2156 | HALT_UNALLOC; | |
2157 | ||
2158 | if (shift) | |
2159 | imm <<= 12; | |
2160 | ||
2161 | switch (dispatch) | |
2162 | { | |
2163 | case 0: add32 (cpu, imm); break; | |
2164 | case 1: adds32 (cpu, imm); break; | |
2165 | case 2: sub32 (cpu, imm); break; | |
2166 | case 3: subs32 (cpu, imm); break; | |
2167 | case 4: add64 (cpu, imm); break; | |
2168 | case 5: adds64 (cpu, imm); break; | |
2169 | case 6: sub64 (cpu, imm); break; | |
2170 | case 7: subs64 (cpu, imm); break; | |
2171 | default: | |
2172 | HALT_UNALLOC; | |
2173 | } | |
2174 | } | |
2175 | ||
2176 | static void | |
2177 | dexAddSubtractShiftedRegister (sim_cpu *cpu) | |
2178 | { | |
2179 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
2180 | instr[30,29] = op : 00 ==> ADD, 01 ==> ADDS, 10 ==> SUB, 11 ==> SUBS | |
2181 | instr[28,24] = 01011 | |
2182 | instr[23,22] = shift : 0 ==> LSL, 1 ==> LSR, 2 ==> ASR, 3 ==> UNALLOC | |
2183 | instr[21] = 0 | |
2184 | instr[20,16] = Rm | |
2185 | instr[15,10] = count : must be 0xxxxx for 32 bit | |
2186 | instr[9,5] = Rn | |
2187 | instr[4,0] = Rd */ | |
2188 | ||
2189 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
2190 | /* 32 bit operations must have count[5] = 0 | |
2191 | or else we have an UNALLOC. */ | |
2192 | uint32_t count = uimm (aarch64_get_instr (cpu), 15, 10); | |
2193 | /* Shift encoded as ROR is unallocated. */ | |
2194 | Shift shiftType = shift (aarch64_get_instr (cpu), 22); | |
2195 | /* Dispatch on size:op i.e aarch64_get_instr (cpu)[31,29]. */ | |
2196 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
2197 | ||
2198 | NYI_assert (28, 24, 0x0B); | |
2199 | NYI_assert (21, 21, 0); | |
2200 | ||
2201 | if (shiftType == ROR) | |
2202 | HALT_UNALLOC; | |
2203 | ||
2204 | if (!size && uimm (count, 5, 5)) | |
2205 | HALT_UNALLOC; | |
2206 | ||
2207 | switch (dispatch) | |
2208 | { | |
2209 | case 0: add32_shift (cpu, shiftType, count); break; | |
2210 | case 1: adds32_shift (cpu, shiftType, count); break; | |
2211 | case 2: sub32_shift (cpu, shiftType, count); break; | |
2212 | case 3: subs32_shift (cpu, shiftType, count); break; | |
2213 | case 4: add64_shift (cpu, shiftType, count); break; | |
2214 | case 5: adds64_shift (cpu, shiftType, count); break; | |
2215 | case 6: sub64_shift (cpu, shiftType, count); break; | |
2216 | case 7: subs64_shift (cpu, shiftType, count); break; | |
2217 | default: | |
2218 | HALT_UNALLOC; | |
2219 | } | |
2220 | } | |
2221 | ||
2222 | static void | |
2223 | dexAddSubtractExtendedRegister (sim_cpu *cpu) | |
2224 | { | |
2225 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
2226 | instr[30] = op : 0 ==> ADD, 1 ==> SUB | |
2227 | instr[29] = set? : 0 ==> no flags, 1 ==> set flags | |
2228 | instr[28,24] = 01011 | |
2229 | instr[23,22] = opt : 0 ==> ok, 1,2,3 ==> UNALLOC | |
2230 | instr[21] = 1 | |
2231 | instr[20,16] = Rm | |
2232 | instr[15,13] = option : 000 ==> UXTB, 001 ==> UXTH, | |
2233 | 000 ==> LSL|UXTW, 001 ==> UXTZ, | |
2234 | 000 ==> SXTB, 001 ==> SXTH, | |
2235 | 000 ==> SXTW, 001 ==> SXTX, | |
2236 | instr[12,10] = shift : 0,1,2,3,4 ==> ok, 5,6,7 ==> UNALLOC | |
2237 | instr[9,5] = Rn | |
2238 | instr[4,0] = Rd */ | |
2239 | ||
2240 | Extension extensionType = extension (aarch64_get_instr (cpu), 13); | |
2241 | uint32_t shift = uimm (aarch64_get_instr (cpu), 12, 10); | |
2242 | /* dispatch on size:op:set? i.e aarch64_get_instr (cpu)[31,29] */ | |
2243 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
2244 | ||
2245 | NYI_assert (28, 24, 0x0B); | |
2246 | NYI_assert (21, 21, 1); | |
2247 | ||
2248 | /* Shift may not exceed 4. */ | |
2249 | if (shift > 4) | |
2250 | HALT_UNALLOC; | |
2251 | ||
2252 | switch (dispatch) | |
2253 | { | |
2254 | case 0: add32_ext (cpu, extensionType, shift); break; | |
2255 | case 1: adds32_ext (cpu, extensionType, shift); break; | |
2256 | case 2: sub32_ext (cpu, extensionType, shift); break; | |
2257 | case 3: subs32_ext (cpu, extensionType, shift); break; | |
2258 | case 4: add64_ext (cpu, extensionType, shift); break; | |
2259 | case 5: adds64_ext (cpu, extensionType, shift); break; | |
2260 | case 6: sub64_ext (cpu, extensionType, shift); break; | |
2261 | case 7: subs64_ext (cpu, extensionType, shift); break; | |
2262 | default: HALT_UNALLOC; | |
2263 | } | |
2264 | } | |
2265 | ||
2266 | /* Conditional data processing | |
2267 | Condition register is implicit 3rd source. */ | |
2268 | ||
2269 | /* 32 bit add with carry. */ | |
2270 | /* N.B register args may not be SP. */ | |
2271 | ||
2272 | static void | |
2273 | adc32 (sim_cpu *cpu) | |
2274 | { | |
2275 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2276 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2277 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2278 | ||
2279 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
2280 | aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
2281 | + aarch64_get_reg_u32 (cpu, rm, NO_SP) | |
2282 | + IS_SET (C)); | |
2283 | } | |
2284 | ||
2285 | /* 64 bit add with carry */ | |
2286 | static void | |
2287 | adc64 (sim_cpu *cpu) | |
2288 | { | |
2289 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2290 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2291 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2292 | ||
2293 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
2294 | aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
2295 | + aarch64_get_reg_u64 (cpu, rm, NO_SP) | |
2296 | + IS_SET (C)); | |
2297 | } | |
2298 | ||
2299 | /* 32 bit add with carry setting flags. */ | |
2300 | static void | |
2301 | adcs32 (sim_cpu *cpu) | |
2302 | { | |
2303 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2304 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2305 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2306 | ||
2307 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
2308 | uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP); | |
2309 | uint32_t carry = IS_SET (C); | |
2310 | ||
2311 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry); | |
2312 | set_flags_for_add32 (cpu, value1, value2 + carry); | |
2313 | } | |
2314 | ||
2315 | /* 64 bit add with carry setting flags. */ | |
2316 | static void | |
2317 | adcs64 (sim_cpu *cpu) | |
2318 | { | |
2319 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2320 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2321 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2322 | ||
2323 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
2324 | uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP); | |
2325 | uint64_t carry = IS_SET (C); | |
2326 | ||
2327 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry); | |
2328 | set_flags_for_add64 (cpu, value1, value2 + carry); | |
2329 | } | |
2330 | ||
2331 | /* 32 bit sub with carry. */ | |
2332 | static void | |
2333 | sbc32 (sim_cpu *cpu) | |
2334 | { | |
2335 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2336 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2337 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2338 | ||
2339 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
2340 | aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
2341 | - aarch64_get_reg_u32 (cpu, rm, NO_SP) | |
2342 | - 1 + IS_SET (C)); | |
2343 | } | |
2344 | ||
2345 | /* 64 bit sub with carry */ | |
2346 | static void | |
2347 | sbc64 (sim_cpu *cpu) | |
2348 | { | |
2349 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2350 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2351 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2352 | ||
2353 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
2354 | aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
2355 | - aarch64_get_reg_u64 (cpu, rm, NO_SP) | |
2356 | - 1 + IS_SET (C)); | |
2357 | } | |
2358 | ||
2359 | /* 32 bit sub with carry setting flags */ | |
2360 | static void | |
2361 | sbcs32 (sim_cpu *cpu) | |
2362 | { | |
2363 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2364 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2365 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2366 | ||
2367 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
2368 | uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP); | |
2369 | uint32_t carry = IS_SET (C); | |
2370 | uint32_t result = value1 - value2 + 1 - carry; | |
2371 | ||
2372 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
2373 | set_flags_for_sub32 (cpu, value1, value2 + 1 - carry); | |
2374 | } | |
2375 | ||
2376 | /* 64 bit sub with carry setting flags */ | |
2377 | static void | |
2378 | sbcs64 (sim_cpu *cpu) | |
2379 | { | |
2380 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2381 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2382 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2383 | ||
2384 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
2385 | uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP); | |
2386 | uint64_t carry = IS_SET (C); | |
2387 | uint64_t result = value1 - value2 + 1 - carry; | |
2388 | ||
2389 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
2390 | set_flags_for_sub64 (cpu, value1, value2 + 1 - carry); | |
2391 | } | |
2392 | ||
2393 | static void | |
2394 | dexAddSubtractWithCarry (sim_cpu *cpu) | |
2395 | { | |
2396 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
2397 | instr[30] = op : 0 ==> ADC, 1 ==> SBC | |
2398 | instr[29] = set? : 0 ==> no flags, 1 ==> set flags | |
2399 | instr[28,21] = 1 1010 000 | |
2400 | instr[20,16] = Rm | |
2401 | instr[15,10] = op2 : 00000 ==> ok, ow ==> UNALLOC | |
2402 | instr[9,5] = Rn | |
2403 | instr[4,0] = Rd */ | |
2404 | ||
2405 | uint32_t op2 = uimm (aarch64_get_instr (cpu), 15, 10); | |
2406 | /* Dispatch on size:op:set? i.e aarch64_get_instr (cpu)[31,29] */ | |
2407 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
2408 | ||
2409 | NYI_assert (28, 21, 0xD0); | |
2410 | ||
2411 | if (op2 != 0) | |
2412 | HALT_UNALLOC; | |
2413 | ||
2414 | switch (dispatch) | |
2415 | { | |
2416 | case 0: adc32 (cpu); break; | |
2417 | case 1: adcs32 (cpu); break; | |
2418 | case 2: sbc32 (cpu); break; | |
2419 | case 3: sbcs32 (cpu); break; | |
2420 | case 4: adc64 (cpu); break; | |
2421 | case 5: adcs64 (cpu); break; | |
2422 | case 6: sbc64 (cpu); break; | |
2423 | case 7: sbcs64 (cpu); break; | |
2424 | default: HALT_UNALLOC; | |
2425 | } | |
2426 | } | |
2427 | ||
2428 | static uint32_t | |
2429 | testConditionCode (sim_cpu *cpu, CondCode cc) | |
2430 | { | |
2431 | /* This should be reduceable to branchless logic | |
2432 | by some careful testing of bits in CC followed | |
2433 | by the requisite masking and combining of bits | |
2434 | from the flag register. | |
2435 | ||
2436 | For now we do it with a switch. */ | |
2437 | int res; | |
2438 | ||
2439 | switch (cc) | |
2440 | { | |
2441 | case EQ: res = IS_SET (Z); break; | |
2442 | case NE: res = IS_CLEAR (Z); break; | |
2443 | case CS: res = IS_SET (C); break; | |
2444 | case CC: res = IS_CLEAR (C); break; | |
2445 | case MI: res = IS_SET (N); break; | |
2446 | case PL: res = IS_CLEAR (N); break; | |
2447 | case VS: res = IS_SET (V); break; | |
2448 | case VC: res = IS_CLEAR (V); break; | |
2449 | case HI: res = IS_SET (C) && IS_CLEAR (Z); break; | |
2450 | case LS: res = IS_CLEAR (C) || IS_SET (Z); break; | |
2451 | case GE: res = IS_SET (N) == IS_SET (V); break; | |
2452 | case LT: res = IS_SET (N) != IS_SET (V); break; | |
2453 | case GT: res = IS_CLEAR (Z) && (IS_SET (N) == IS_SET (V)); break; | |
2454 | case LE: res = IS_SET (Z) || (IS_SET (N) != IS_SET (V)); break; | |
2455 | case AL: | |
2456 | case NV: | |
2457 | default: | |
2458 | res = 1; | |
2459 | break; | |
2460 | } | |
2461 | return res; | |
2462 | } | |
2463 | ||
2464 | static void | |
2465 | CondCompare (sim_cpu *cpu) /* aka: ccmp and ccmn */ | |
2466 | { | |
2467 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
2468 | instr[30] = compare with positive (0) or negative value (1) | |
2469 | instr[29,21] = 1 1101 0010 | |
2470 | instr[20,16] = Rm or const | |
2471 | instr[15,12] = cond | |
2472 | instr[11] = compare reg (0) or const (1) | |
2473 | instr[10] = 0 | |
2474 | instr[9,5] = Rn | |
2475 | instr[4] = 0 | |
2476 | instr[3,0] = value for CPSR bits if the comparison does not take place. */ | |
2477 | signed int negate; | |
2478 | unsigned rm; | |
2479 | unsigned rn; | |
2480 | ||
2481 | NYI_assert (29, 21, 0x1d2); | |
2482 | NYI_assert (10, 10, 0); | |
2483 | NYI_assert (4, 4, 0); | |
2484 | ||
2485 | if (! testConditionCode (cpu, uimm (aarch64_get_instr (cpu), 15, 12))) | |
2486 | { | |
2487 | aarch64_set_CPSR (cpu, uimm (aarch64_get_instr (cpu), 3, 0)); | |
2488 | return; | |
2489 | } | |
2490 | ||
2491 | negate = uimm (aarch64_get_instr (cpu), 30, 30) ? -1 : 1; | |
2492 | rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2493 | rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2494 | ||
2495 | if (uimm (aarch64_get_instr (cpu), 31, 31)) | |
2496 | { | |
2497 | if (uimm (aarch64_get_instr (cpu), 11, 11)) | |
2498 | set_flags_for_sub64 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK), | |
2499 | negate * (uint64_t) rm); | |
2500 | else | |
2501 | set_flags_for_sub64 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK), | |
2502 | negate * aarch64_get_reg_u64 (cpu, rm, SP_OK)); | |
2503 | } | |
2504 | else | |
2505 | { | |
2506 | if (uimm (aarch64_get_instr (cpu), 11, 11)) | |
2507 | set_flags_for_sub32 (cpu, aarch64_get_reg_u32 (cpu, rn, SP_OK), | |
2508 | negate * rm); | |
2509 | else | |
2510 | set_flags_for_sub32 (cpu, aarch64_get_reg_u32 (cpu, rn, SP_OK), | |
2511 | negate * aarch64_get_reg_u32 (cpu, rm, SP_OK)); | |
2512 | } | |
2513 | } | |
2514 | ||
2515 | static void | |
2516 | do_vec_MOV_whole_vector (sim_cpu *cpu) | |
2517 | { | |
2518 | /* MOV Vd.T, Vs.T (alias for ORR Vd.T, Vn.T, Vm.T where Vn == Vm) | |
2519 | ||
2520 | instr[31] = 0 | |
2521 | instr[30] = half(0)/full(1) | |
2522 | instr[29,21] = 001110101 | |
2523 | instr[20,16] = Vs | |
2524 | instr[15,10] = 000111 | |
2525 | instr[9,5] = Vs | |
2526 | instr[4,0] = Vd */ | |
2527 | ||
2528 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
2529 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2530 | ||
2531 | NYI_assert (29, 21, 0x075); | |
2532 | NYI_assert (15, 10, 0x07); | |
2533 | ||
2534 | if (uimm (aarch64_get_instr (cpu), 20, 16) != vs) | |
2535 | HALT_NYI; | |
2536 | ||
2537 | if (uimm (aarch64_get_instr (cpu), 30, 30)) | |
2538 | aarch64_set_vec_u64 (cpu, vd, 1, aarch64_get_vec_u64 (cpu, vs, 1)); | |
2539 | ||
2540 | aarch64_set_vec_u64 (cpu, vd, 0, aarch64_get_vec_u64 (cpu, vs, 0)); | |
2541 | } | |
2542 | ||
2543 | static void | |
2544 | do_vec_MOV_into_scalar (sim_cpu *cpu) | |
2545 | { | |
2546 | /* instr[31] = 0 | |
2547 | instr[30] = word(0)/long(1) | |
2548 | instr[29,21] = 00 1110 000 | |
2549 | instr[20,18] = element size and index | |
2550 | instr[17,10] = 00 0011 11 | |
2551 | instr[9,5] = V source | |
2552 | instr[4,0] = R dest */ | |
2553 | ||
2554 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
2555 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2556 | ||
2557 | NYI_assert (29, 21, 0x070); | |
2558 | NYI_assert (17, 10, 0x0F); | |
2559 | ||
2560 | switch (uimm (aarch64_get_instr (cpu), 20, 18)) | |
2561 | { | |
2562 | case 0x2: | |
2563 | aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, vs, 0)); | |
2564 | break; | |
2565 | ||
2566 | case 0x6: | |
2567 | aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, vs, 1)); | |
2568 | break; | |
2569 | ||
2570 | case 0x1: | |
2571 | case 0x3: | |
2572 | case 0x5: | |
2573 | case 0x7: | |
2574 | aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u32 | |
2575 | (cpu, vs, uimm (aarch64_get_instr (cpu), 20, 19))); | |
2576 | break; | |
2577 | ||
2578 | default: | |
2579 | HALT_NYI; | |
2580 | } | |
2581 | } | |
2582 | ||
2583 | static void | |
2584 | do_vec_INS (sim_cpu *cpu) | |
2585 | { | |
2586 | /* instr[31,21] = 01001110000 | |
2587 | instr[20,16] = element size and index | |
2588 | instr[15,10] = 000111 | |
2589 | instr[9,5] = W source | |
2590 | instr[4,0] = V dest */ | |
2591 | ||
2592 | int index; | |
2593 | unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5); | |
2594 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2595 | ||
2596 | NYI_assert (31, 21, 0x270); | |
2597 | NYI_assert (15, 10, 0x07); | |
2598 | ||
2599 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
2600 | { | |
2601 | index = uimm (aarch64_get_instr (cpu), 20, 17); | |
2602 | aarch64_set_vec_u8 (cpu, vd, index, | |
2603 | aarch64_get_reg_u8 (cpu, rs, NO_SP)); | |
2604 | } | |
2605 | else if (uimm (aarch64_get_instr (cpu), 17, 17)) | |
2606 | { | |
2607 | index = uimm (aarch64_get_instr (cpu), 20, 18); | |
2608 | aarch64_set_vec_u16 (cpu, vd, index, | |
2609 | aarch64_get_reg_u16 (cpu, rs, NO_SP)); | |
2610 | } | |
2611 | else if (uimm (aarch64_get_instr (cpu), 18, 18)) | |
2612 | { | |
2613 | index = uimm (aarch64_get_instr (cpu), 20, 19); | |
2614 | aarch64_set_vec_u32 (cpu, vd, index, | |
2615 | aarch64_get_reg_u32 (cpu, rs, NO_SP)); | |
2616 | } | |
2617 | else if (uimm (aarch64_get_instr (cpu), 19, 19)) | |
2618 | { | |
2619 | index = uimm (aarch64_get_instr (cpu), 20, 20); | |
2620 | aarch64_set_vec_u64 (cpu, vd, index, | |
2621 | aarch64_get_reg_u64 (cpu, rs, NO_SP)); | |
2622 | } | |
2623 | else | |
2624 | HALT_NYI; | |
2625 | } | |
2626 | ||
2627 | static void | |
2628 | do_vec_DUP_vector_into_vector (sim_cpu *cpu) | |
2629 | { | |
2630 | /* instr[31] = 0 | |
2631 | instr[30] = half(0)/full(1) | |
2632 | instr[29,21] = 00 1110 000 | |
2633 | instr[20,16] = element size and index | |
2634 | instr[15,10] = 0000 01 | |
2635 | instr[9,5] = V source | |
2636 | instr[4,0] = V dest. */ | |
2637 | ||
2638 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
2639 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
2640 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2641 | int i, index; | |
2642 | ||
2643 | NYI_assert (29, 21, 0x070); | |
2644 | NYI_assert (15, 10, 0x01); | |
2645 | ||
2646 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
2647 | { | |
2648 | index = uimm (aarch64_get_instr (cpu), 20, 17); | |
2649 | ||
2650 | for (i = 0; i < (full ? 16 : 8); i++) | |
2651 | aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vs, index)); | |
2652 | } | |
2653 | else if (uimm (aarch64_get_instr (cpu), 17, 17)) | |
2654 | { | |
2655 | index = uimm (aarch64_get_instr (cpu), 20, 18); | |
2656 | ||
2657 | for (i = 0; i < (full ? 8 : 4); i++) | |
2658 | aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, index)); | |
2659 | } | |
2660 | else if (uimm (aarch64_get_instr (cpu), 18, 18)) | |
2661 | { | |
2662 | index = uimm (aarch64_get_instr (cpu), 20, 19); | |
2663 | ||
2664 | for (i = 0; i < (full ? 4 : 2); i++) | |
2665 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vs, index)); | |
2666 | } | |
2667 | else | |
2668 | { | |
2669 | if (uimm (aarch64_get_instr (cpu), 19, 19) == 0) | |
2670 | HALT_UNALLOC; | |
2671 | ||
2672 | if (! full) | |
2673 | HALT_UNALLOC; | |
2674 | ||
2675 | index = uimm (aarch64_get_instr (cpu), 20, 20); | |
2676 | ||
2677 | for (i = 0; i < 2; i++) | |
2678 | aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vs, index)); | |
2679 | } | |
2680 | } | |
2681 | ||
2682 | static void | |
2683 | do_vec_TBL (sim_cpu *cpu) | |
2684 | { | |
2685 | /* instr[31] = 0 | |
2686 | instr[30] = half(0)/full(1) | |
2687 | instr[29,21] = 00 1110 000 | |
2688 | instr[20,16] = Vm | |
2689 | instr[15] = 0 | |
2690 | instr[14,13] = vec length | |
2691 | instr[12,10] = 000 | |
2692 | instr[9,5] = V start | |
2693 | instr[4,0] = V dest */ | |
2694 | ||
2695 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
2696 | int len = uimm (aarch64_get_instr (cpu), 14, 13) + 1; | |
2697 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2698 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2699 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2700 | unsigned i; | |
2701 | ||
2702 | NYI_assert (29, 21, 0x070); | |
2703 | NYI_assert (12, 10, 0); | |
2704 | ||
2705 | for (i = 0; i < (full ? 16 : 8); i++) | |
2706 | { | |
2707 | unsigned int selector = aarch64_get_vec_u8 (cpu, vm, i); | |
2708 | uint8_t val; | |
2709 | ||
2710 | if (selector < 16) | |
2711 | val = aarch64_get_vec_u8 (cpu, vn, selector); | |
2712 | else if (selector < 32) | |
2713 | val = len < 2 ? 0 : aarch64_get_vec_u8 (cpu, vn + 1, selector - 16); | |
2714 | else if (selector < 48) | |
2715 | val = len < 3 ? 0 : aarch64_get_vec_u8 (cpu, vn + 2, selector - 32); | |
2716 | else if (selector < 64) | |
2717 | val = len < 4 ? 0 : aarch64_get_vec_u8 (cpu, vn + 3, selector - 48); | |
2718 | else | |
2719 | val = 0; | |
2720 | ||
2721 | aarch64_set_vec_u8 (cpu, vd, i, val); | |
2722 | } | |
2723 | } | |
2724 | ||
2725 | static void | |
2726 | do_vec_TRN (sim_cpu *cpu) | |
2727 | { | |
2728 | /* instr[31] = 0 | |
2729 | instr[30] = half(0)/full(1) | |
2730 | instr[29,24] = 00 1110 | |
2731 | instr[23,22] = size | |
2732 | instr[21] = 0 | |
2733 | instr[20,16] = Vm | |
2734 | instr[15] = 0 | |
2735 | instr[14] = TRN1 (0) / TRN2 (1) | |
2736 | instr[13,10] = 1010 | |
2737 | instr[9,5] = V source | |
2738 | instr[4,0] = V dest. */ | |
2739 | ||
2740 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
2741 | int second = uimm (aarch64_get_instr (cpu), 14, 14); | |
2742 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2743 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2744 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2745 | unsigned i; | |
2746 | ||
2747 | NYI_assert (29, 24, 0x0E); | |
2748 | NYI_assert (13, 10, 0xA); | |
2749 | ||
2750 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
2751 | { | |
2752 | case 0: | |
2753 | for (i = 0; i < (full ? 8 : 4); i++) | |
2754 | { | |
2755 | aarch64_set_vec_u8 | |
2756 | (cpu, vd, i * 2, | |
2757 | aarch64_get_vec_u8 (cpu, second ? vm : vn, i * 2)); | |
2758 | aarch64_set_vec_u8 | |
2759 | (cpu, vd, 1 * 2 + 1, | |
2760 | aarch64_get_vec_u8 (cpu, second ? vn : vm, i * 2 + 1)); | |
2761 | } | |
2762 | break; | |
2763 | ||
2764 | case 1: | |
2765 | for (i = 0; i < (full ? 4 : 2); i++) | |
2766 | { | |
2767 | aarch64_set_vec_u16 | |
2768 | (cpu, vd, i * 2, | |
2769 | aarch64_get_vec_u16 (cpu, second ? vm : vn, i * 2)); | |
2770 | aarch64_set_vec_u16 | |
2771 | (cpu, vd, 1 * 2 + 1, | |
2772 | aarch64_get_vec_u16 (cpu, second ? vn : vm, i * 2 + 1)); | |
2773 | } | |
2774 | break; | |
2775 | ||
2776 | case 2: | |
2777 | aarch64_set_vec_u32 | |
2778 | (cpu, vd, 0, aarch64_get_vec_u32 (cpu, second ? vm : vn, 0)); | |
2779 | aarch64_set_vec_u32 | |
2780 | (cpu, vd, 1, aarch64_get_vec_u32 (cpu, second ? vn : vm, 1)); | |
2781 | aarch64_set_vec_u32 | |
2782 | (cpu, vd, 2, aarch64_get_vec_u32 (cpu, second ? vm : vn, 2)); | |
2783 | aarch64_set_vec_u32 | |
2784 | (cpu, vd, 3, aarch64_get_vec_u32 (cpu, second ? vn : vm, 3)); | |
2785 | break; | |
2786 | ||
2787 | case 3: | |
2788 | if (! full) | |
2789 | HALT_UNALLOC; | |
2790 | ||
2791 | aarch64_set_vec_u64 (cpu, vd, 0, | |
2792 | aarch64_get_vec_u64 (cpu, second ? vm : vn, 0)); | |
2793 | aarch64_set_vec_u64 (cpu, vd, 1, | |
2794 | aarch64_get_vec_u64 (cpu, second ? vn : vm, 1)); | |
2795 | break; | |
2796 | ||
2797 | default: | |
2798 | HALT_UNALLOC; | |
2799 | } | |
2800 | } | |
2801 | ||
2802 | static void | |
2803 | do_vec_DUP_scalar_into_vector (sim_cpu *cpu) | |
2804 | { | |
2805 | /* instr[31] = 0 | |
2806 | instr[30] = 0=> zero top 64-bits, 1=> duplicate into top 64-bits | |
2807 | [must be 1 for 64-bit xfer] | |
2808 | instr[29,20] = 00 1110 0000 | |
2809 | instr[19,16] = element size: 0001=> 8-bits, 0010=> 16-bits, | |
2810 | 0100=> 32-bits. 1000=>64-bits | |
2811 | instr[15,10] = 0000 11 | |
2812 | instr[9,5] = W source | |
2813 | instr[4,0] = V dest. */ | |
2814 | ||
2815 | unsigned i; | |
2816 | unsigned Vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2817 | unsigned Rs = uimm (aarch64_get_instr (cpu), 9, 5); | |
2818 | int both = uimm (aarch64_get_instr (cpu), 30, 30); | |
2819 | ||
2820 | NYI_assert (29, 20, 0x0E0); | |
2821 | NYI_assert (15, 10, 0x03); | |
2822 | ||
2823 | switch (uimm (aarch64_get_instr (cpu), 19, 16)) | |
2824 | { | |
2825 | case 1: | |
2826 | for (i = 0; i < (both ? 16 : 8); i++) | |
2827 | aarch64_set_vec_u8 (cpu, Vd, i, aarch64_get_reg_u8 (cpu, Rs, NO_SP)); | |
2828 | break; | |
2829 | ||
2830 | case 2: | |
2831 | for (i = 0; i < (both ? 8 : 4); i++) | |
2832 | aarch64_set_vec_u16 (cpu, Vd, i, aarch64_get_reg_u16 (cpu, Rs, NO_SP)); | |
2833 | break; | |
2834 | ||
2835 | case 4: | |
2836 | for (i = 0; i < (both ? 4 : 2); i++) | |
2837 | aarch64_set_vec_u32 (cpu, Vd, i, aarch64_get_reg_u32 (cpu, Rs, NO_SP)); | |
2838 | break; | |
2839 | ||
2840 | case 8: | |
2841 | if (!both) | |
2842 | HALT_NYI; | |
2843 | aarch64_set_vec_u64 (cpu, Vd, 0, aarch64_get_reg_u64 (cpu, Rs, NO_SP)); | |
2844 | aarch64_set_vec_u64 (cpu, Vd, 1, aarch64_get_reg_u64 (cpu, Rs, NO_SP)); | |
2845 | break; | |
2846 | ||
2847 | default: | |
2848 | HALT_NYI; | |
2849 | } | |
2850 | } | |
2851 | ||
2852 | static void | |
2853 | do_vec_UZP (sim_cpu *cpu) | |
2854 | { | |
2855 | /* instr[31] = 0 | |
2856 | instr[30] = half(0)/full(1) | |
2857 | instr[29,24] = 00 1110 | |
2858 | instr[23,22] = size: byte(00), half(01), word (10), long (11) | |
2859 | instr[21] = 0 | |
2860 | instr[20,16] = Vm | |
2861 | instr[15] = 0 | |
2862 | instr[14] = lower (0) / upper (1) | |
2863 | instr[13,10] = 0110 | |
2864 | instr[9,5] = Vn | |
2865 | instr[4,0] = Vd. */ | |
2866 | ||
2867 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
2868 | int upper = uimm (aarch64_get_instr (cpu), 14, 14); | |
2869 | ||
2870 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2871 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2872 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2873 | ||
2874 | uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0); | |
2875 | uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1); | |
2876 | uint64_t val_n1 = aarch64_get_vec_u64 (cpu, vn, 0); | |
2877 | uint64_t val_n2 = aarch64_get_vec_u64 (cpu, vn, 1); | |
2878 | ||
2879 | uint64_t val1 = 0; | |
2880 | uint64_t val2 = 0; | |
2881 | ||
2882 | uint64_t input1 = upper ? val_n1 : val_m1; | |
2883 | uint64_t input2 = upper ? val_n2 : val_m2; | |
2884 | unsigned i; | |
2885 | ||
2886 | NYI_assert (29, 24, 0x0E); | |
2887 | NYI_assert (21, 21, 0); | |
2888 | NYI_assert (15, 15, 0); | |
2889 | NYI_assert (13, 10, 6); | |
2890 | ||
2891 | switch (uimm (aarch64_get_instr (cpu), 23, 23)) | |
2892 | { | |
2893 | case 0: | |
2894 | for (i = 0; i < 8; i++) | |
2895 | { | |
2896 | val1 |= (input1 >> (i * 8)) & (0xFFULL << (i * 8)); | |
2897 | val2 |= (input2 >> (i * 8)) & (0xFFULL << (i * 8)); | |
2898 | } | |
2899 | break; | |
2900 | ||
2901 | case 1: | |
2902 | for (i = 0; i < 4; i++) | |
2903 | { | |
2904 | val1 |= (input1 >> (i * 16)) & (0xFFFFULL << (i * 16)); | |
2905 | val2 |= (input2 >> (i * 16)) & (0xFFFFULL << (i * 16)); | |
2906 | } | |
2907 | break; | |
2908 | ||
2909 | case 2: | |
2910 | val1 = ((input1 & 0xFFFFFFFF) | ((input1 >> 32) & 0xFFFFFFFF00000000ULL)); | |
2911 | val2 = ((input2 & 0xFFFFFFFF) | ((input2 >> 32) & 0xFFFFFFFF00000000ULL)); | |
2912 | ||
2913 | case 3: | |
2914 | val1 = input1; | |
2915 | val2 = input2; | |
2916 | break; | |
2917 | } | |
2918 | ||
2919 | aarch64_set_vec_u64 (cpu, vd, 0, val1); | |
2920 | if (full) | |
2921 | aarch64_set_vec_u64 (cpu, vd, 1, val2); | |
2922 | } | |
2923 | ||
2924 | static void | |
2925 | do_vec_ZIP (sim_cpu *cpu) | |
2926 | { | |
2927 | /* instr[31] = 0 | |
2928 | instr[30] = half(0)/full(1) | |
2929 | instr[29,24] = 00 1110 | |
2930 | instr[23,22] = size: byte(00), hald(01), word (10), long (11) | |
2931 | instr[21] = 0 | |
2932 | instr[20,16] = Vm | |
2933 | instr[15] = 0 | |
2934 | instr[14] = lower (0) / upper (1) | |
2935 | instr[13,10] = 1110 | |
2936 | instr[9,5] = Vn | |
2937 | instr[4,0] = Vd. */ | |
2938 | ||
2939 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
2940 | int upper = uimm (aarch64_get_instr (cpu), 14, 14); | |
2941 | ||
2942 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
2943 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
2944 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
2945 | ||
2946 | uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0); | |
2947 | uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1); | |
2948 | uint64_t val_n1 = aarch64_get_vec_u64 (cpu, vn, 0); | |
2949 | uint64_t val_n2 = aarch64_get_vec_u64 (cpu, vn, 1); | |
2950 | ||
2951 | uint64_t val1 = 0; | |
2952 | uint64_t val2 = 0; | |
2953 | ||
2954 | uint64_t input1 = upper ? val_n1 : val_m1; | |
2955 | uint64_t input2 = upper ? val_n2 : val_m2; | |
2956 | ||
2957 | NYI_assert (29, 24, 0x0E); | |
2958 | NYI_assert (21, 21, 0); | |
2959 | NYI_assert (15, 15, 0); | |
2960 | NYI_assert (13, 10, 0xE); | |
2961 | ||
2962 | switch (uimm (aarch64_get_instr (cpu), 23, 23)) | |
2963 | { | |
2964 | case 0: | |
2965 | val1 = | |
2966 | ((input1 << 0) & (0xFF << 0)) | |
2967 | | ((input2 << 8) & (0xFF << 8)) | |
2968 | | ((input1 << 8) & (0xFF << 16)) | |
2969 | | ((input2 << 16) & (0xFF << 24)) | |
2970 | | ((input1 << 16) & (0xFFULL << 32)) | |
2971 | | ((input2 << 24) & (0xFFULL << 40)) | |
2972 | | ((input1 << 24) & (0xFFULL << 48)) | |
2973 | | ((input2 << 32) & (0xFFULL << 56)); | |
2974 | ||
2975 | val2 = | |
2976 | ((input1 >> 32) & (0xFF << 0)) | |
2977 | | ((input2 >> 24) & (0xFF << 8)) | |
2978 | | ((input1 >> 24) & (0xFF << 16)) | |
2979 | | ((input2 >> 16) & (0xFF << 24)) | |
2980 | | ((input1 >> 16) & (0xFFULL << 32)) | |
2981 | | ((input2 >> 8) & (0xFFULL << 40)) | |
2982 | | ((input1 >> 8) & (0xFFULL << 48)) | |
2983 | | ((input2 >> 0) & (0xFFULL << 56)); | |
2984 | break; | |
2985 | ||
2986 | case 1: | |
2987 | val1 = | |
2988 | ((input1 << 0) & (0xFFFF << 0)) | |
2989 | | ((input2 << 16) & (0xFFFF << 16)) | |
2990 | | ((input1 << 16) & (0xFFFFULL << 32)) | |
2991 | | ((input2 << 32) & (0xFFFFULL << 48)); | |
2992 | ||
2993 | val2 = | |
2994 | ((input1 >> 32) & (0xFFFF << 0)) | |
2995 | | ((input2 >> 16) & (0xFFFF << 16)) | |
2996 | | ((input1 >> 16) & (0xFFFFULL << 32)) | |
2997 | | ((input2 >> 0) & (0xFFFFULL << 48)); | |
2998 | break; | |
2999 | ||
3000 | case 2: | |
3001 | val1 = (input1 & 0xFFFFFFFFULL) | (input2 << 32); | |
3002 | val2 = (input2 & 0xFFFFFFFFULL) | (input1 << 32); | |
3003 | break; | |
3004 | ||
3005 | case 3: | |
3006 | val1 = input1; | |
3007 | val2 = input2; | |
3008 | break; | |
3009 | } | |
3010 | ||
3011 | aarch64_set_vec_u64 (cpu, vd, 0, val1); | |
3012 | if (full) | |
3013 | aarch64_set_vec_u64 (cpu, vd, 1, val2); | |
3014 | } | |
3015 | ||
3016 | /* Floating point immediates are encoded in 8 bits. | |
3017 | fpimm[7] = sign bit. | |
3018 | fpimm[6:4] = signed exponent. | |
3019 | fpimm[3:0] = fraction (assuming leading 1). | |
3020 | i.e. F = s * 1.f * 2^(e - b). */ | |
3021 | ||
3022 | static float | |
3023 | fp_immediate_for_encoding_32 (uint32_t imm8) | |
3024 | { | |
3025 | float u; | |
3026 | uint32_t s, e, f, i; | |
3027 | ||
3028 | s = (imm8 >> 7) & 0x1; | |
3029 | e = (imm8 >> 4) & 0x7; | |
3030 | f = imm8 & 0xf; | |
3031 | ||
3032 | /* The fp value is s * n/16 * 2r where n is 16+e. */ | |
3033 | u = (16.0 + f) / 16.0; | |
3034 | ||
3035 | /* N.B. exponent is signed. */ | |
3036 | if (e < 4) | |
3037 | { | |
3038 | int epos = e; | |
3039 | ||
3040 | for (i = 0; i <= epos; i++) | |
3041 | u *= 2.0; | |
3042 | } | |
3043 | else | |
3044 | { | |
3045 | int eneg = 7 - e; | |
3046 | ||
3047 | for (i = 0; i < eneg; i++) | |
3048 | u /= 2.0; | |
3049 | } | |
3050 | ||
3051 | if (s) | |
3052 | u = - u; | |
3053 | ||
3054 | return u; | |
3055 | } | |
3056 | ||
3057 | static double | |
3058 | fp_immediate_for_encoding_64 (uint32_t imm8) | |
3059 | { | |
3060 | double u; | |
3061 | uint32_t s, e, f, i; | |
3062 | ||
3063 | s = (imm8 >> 7) & 0x1; | |
3064 | e = (imm8 >> 4) & 0x7; | |
3065 | f = imm8 & 0xf; | |
3066 | ||
3067 | /* The fp value is s * n/16 * 2r where n is 16+e. */ | |
3068 | u = (16.0 + f) / 16.0; | |
3069 | ||
3070 | /* N.B. exponent is signed. */ | |
3071 | if (e < 4) | |
3072 | { | |
3073 | int epos = e; | |
3074 | ||
3075 | for (i = 0; i <= epos; i++) | |
3076 | u *= 2.0; | |
3077 | } | |
3078 | else | |
3079 | { | |
3080 | int eneg = 7 - e; | |
3081 | ||
3082 | for (i = 0; i < eneg; i++) | |
3083 | u /= 2.0; | |
3084 | } | |
3085 | ||
3086 | if (s) | |
3087 | u = - u; | |
3088 | ||
3089 | return u; | |
3090 | } | |
3091 | ||
3092 | static void | |
3093 | do_vec_MOV_immediate (sim_cpu *cpu) | |
3094 | { | |
3095 | /* instr[31] = 0 | |
3096 | instr[30] = full/half selector | |
3097 | instr[29,19] = 00111100000 | |
3098 | instr[18,16] = high 3 bits of uimm8 | |
3099 | instr[15,12] = size & shift: | |
3100 | 0000 => 32-bit | |
3101 | 0010 => 32-bit + LSL#8 | |
3102 | 0100 => 32-bit + LSL#16 | |
3103 | 0110 => 32-bit + LSL#24 | |
3104 | 1010 => 16-bit + LSL#8 | |
3105 | 1000 => 16-bit | |
3106 | 1101 => 32-bit + MSL#16 | |
3107 | 1100 => 32-bit + MSL#8 | |
3108 | 1110 => 8-bit | |
3109 | 1111 => double | |
3110 | instr[11,10] = 01 | |
3111 | instr[9,5] = low 5-bits of uimm8 | |
3112 | instr[4,0] = Vd. */ | |
3113 | ||
3114 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3115 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3116 | unsigned val = uimm (aarch64_get_instr (cpu), 18, 16) << 5 | |
3117 | | uimm (aarch64_get_instr (cpu), 9, 5); | |
3118 | unsigned i; | |
3119 | ||
3120 | NYI_assert (29, 19, 0x1E0); | |
3121 | NYI_assert (11, 10, 1); | |
3122 | ||
3123 | switch (uimm (aarch64_get_instr (cpu), 15, 12)) | |
3124 | { | |
3125 | case 0x0: /* 32-bit, no shift. */ | |
3126 | case 0x2: /* 32-bit, shift by 8. */ | |
3127 | case 0x4: /* 32-bit, shift by 16. */ | |
3128 | case 0x6: /* 32-bit, shift by 24. */ | |
3129 | val <<= (8 * uimm (aarch64_get_instr (cpu), 14, 13)); | |
3130 | for (i = 0; i < (full ? 4 : 2); i++) | |
3131 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3132 | break; | |
3133 | ||
3134 | case 0xa: /* 16-bit, shift by 8. */ | |
3135 | val <<= 8; | |
3136 | /* Fall through. */ | |
3137 | case 0x8: /* 16-bit, no shift. */ | |
3138 | for (i = 0; i < (full ? 8 : 4); i++) | |
3139 | aarch64_set_vec_u16 (cpu, vd, i, val); | |
3140 | /* Fall through. */ | |
3141 | case 0xd: /* 32-bit, mask shift by 16. */ | |
3142 | val <<= 8; | |
3143 | val |= 0xFF; | |
3144 | /* Fall through. */ | |
3145 | case 0xc: /* 32-bit, mask shift by 8. */ | |
3146 | val <<= 8; | |
3147 | val |= 0xFF; | |
3148 | for (i = 0; i < (full ? 4 : 2); i++) | |
3149 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3150 | break; | |
3151 | ||
3152 | case 0xe: /* 8-bit, no shift. */ | |
3153 | for (i = 0; i < (full ? 16 : 8); i++) | |
3154 | aarch64_set_vec_u8 (cpu, vd, i, val); | |
3155 | break; | |
3156 | ||
3157 | case 0xf: /* FMOV Vs.{2|4}S, #fpimm. */ | |
3158 | { | |
3159 | float u = fp_immediate_for_encoding_32 (val); | |
3160 | for (i = 0; i < (full ? 4 : 2); i++) | |
3161 | aarch64_set_vec_float (cpu, vd, i, u); | |
3162 | break; | |
3163 | } | |
3164 | ||
3165 | default: | |
3166 | HALT_NYI; | |
3167 | } | |
3168 | } | |
3169 | ||
3170 | static void | |
3171 | do_vec_MVNI (sim_cpu *cpu) | |
3172 | { | |
3173 | /* instr[31] = 0 | |
3174 | instr[30] = full/half selector | |
3175 | instr[29,19] = 10111100000 | |
3176 | instr[18,16] = high 3 bits of uimm8 | |
3177 | instr[15,12] = selector | |
3178 | instr[11,10] = 01 | |
3179 | instr[9,5] = low 5-bits of uimm8 | |
3180 | instr[4,0] = Vd. */ | |
3181 | ||
3182 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3183 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3184 | unsigned val = uimm (aarch64_get_instr (cpu), 18, 16) << 5 | |
3185 | | uimm (aarch64_get_instr (cpu), 9, 5); | |
3186 | unsigned i; | |
3187 | ||
3188 | NYI_assert (29, 19, 0x5E0); | |
3189 | NYI_assert (11, 10, 1); | |
3190 | ||
3191 | switch (uimm (aarch64_get_instr (cpu), 15, 12)) | |
3192 | { | |
3193 | case 0x0: /* 32-bit, no shift. */ | |
3194 | case 0x2: /* 32-bit, shift by 8. */ | |
3195 | case 0x4: /* 32-bit, shift by 16. */ | |
3196 | case 0x6: /* 32-bit, shift by 24. */ | |
3197 | val <<= (8 * uimm (aarch64_get_instr (cpu), 14, 13)); | |
3198 | val = ~ val; | |
3199 | for (i = 0; i < (full ? 4 : 2); i++) | |
3200 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3201 | return; | |
3202 | ||
3203 | case 0xa: /* 16-bit, 8 bit shift. */ | |
3204 | val <<= 8; | |
3205 | case 0x8: /* 16-bit, no shift. */ | |
3206 | val = ~ val; | |
3207 | for (i = 0; i < (full ? 8 : 4); i++) | |
3208 | aarch64_set_vec_u16 (cpu, vd, i, val); | |
3209 | return; | |
3210 | ||
3211 | case 0xd: /* 32-bit, mask shift by 16. */ | |
3212 | val <<= 8; | |
3213 | val |= 0xFF; | |
3214 | case 0xc: /* 32-bit, mask shift by 8. */ | |
3215 | val <<= 8; | |
3216 | val |= 0xFF; | |
3217 | val = ~ val; | |
3218 | for (i = 0; i < (full ? 4 : 2); i++) | |
3219 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3220 | return; | |
3221 | ||
3222 | case 0xE: /* MOVI Dn, #mask64 */ | |
3223 | { | |
3224 | uint64_t mask = 0; | |
3225 | ||
3226 | for (i = 0; i < 8; i++) | |
3227 | if (val & (1 << i)) | |
3228 | mask |= (0xF << (i * 4)); | |
3229 | aarch64_set_vec_u64 (cpu, vd, 0, mask); | |
3230 | aarch64_set_vec_u64 (cpu, vd, 1, 0); | |
3231 | return; | |
3232 | } | |
3233 | ||
3234 | case 0xf: /* FMOV Vd.2D, #fpimm. */ | |
3235 | { | |
3236 | double u = fp_immediate_for_encoding_64 (val); | |
3237 | ||
3238 | if (! full) | |
3239 | HALT_UNALLOC; | |
3240 | ||
3241 | aarch64_set_vec_double (cpu, vd, 0, u); | |
3242 | aarch64_set_vec_double (cpu, vd, 1, u); | |
3243 | return; | |
3244 | } | |
3245 | ||
3246 | default: | |
3247 | HALT_NYI; | |
3248 | } | |
3249 | } | |
3250 | ||
3251 | #define ABS(A) ((A) < 0 ? - (A) : (A)) | |
3252 | ||
3253 | static void | |
3254 | do_vec_ABS (sim_cpu *cpu) | |
3255 | { | |
3256 | /* instr[31] = 0 | |
3257 | instr[30] = half(0)/full(1) | |
3258 | instr[29,24] = 00 1110 | |
3259 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit | |
3260 | instr[21,10] = 10 0000 1011 10 | |
3261 | instr[9,5] = Vn | |
3262 | instr[4.0] = Vd. */ | |
3263 | ||
3264 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3265 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3266 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3267 | unsigned i; | |
3268 | ||
3269 | NYI_assert (29, 24, 0x0E); | |
3270 | NYI_assert (21, 10, 0x82E); | |
3271 | ||
3272 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3273 | { | |
3274 | case 0: | |
3275 | for (i = 0; i < (full ? 16 : 8); i++) | |
3276 | aarch64_set_vec_s8 (cpu, vd, i, | |
3277 | ABS (aarch64_get_vec_s8 (cpu, vn, i))); | |
3278 | break; | |
3279 | ||
3280 | case 1: | |
3281 | for (i = 0; i < (full ? 8 : 4); i++) | |
3282 | aarch64_set_vec_s16 (cpu, vd, i, | |
3283 | ABS (aarch64_get_vec_s16 (cpu, vn, i))); | |
3284 | break; | |
3285 | ||
3286 | case 2: | |
3287 | for (i = 0; i < (full ? 4 : 2); i++) | |
3288 | aarch64_set_vec_s32 (cpu, vd, i, | |
3289 | ABS (aarch64_get_vec_s32 (cpu, vn, i))); | |
3290 | break; | |
3291 | ||
3292 | case 3: | |
3293 | if (! full) | |
3294 | HALT_NYI; | |
3295 | for (i = 0; i < 2; i++) | |
3296 | aarch64_set_vec_s64 (cpu, vd, i, | |
3297 | ABS (aarch64_get_vec_s64 (cpu, vn, i))); | |
3298 | break; | |
3299 | } | |
3300 | } | |
3301 | ||
3302 | static void | |
3303 | do_vec_ADDV (sim_cpu *cpu) | |
3304 | { | |
3305 | /* instr[31] = 0 | |
3306 | instr[30] = full/half selector | |
3307 | instr[29,24] = 00 1110 | |
3308 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit | |
3309 | instr[21,10] = 11 0001 1011 10 | |
3310 | instr[9,5] = Vm | |
3311 | instr[4.0] = Rd. */ | |
3312 | ||
3313 | unsigned vm = uimm (aarch64_get_instr (cpu), 9, 5); | |
3314 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3315 | unsigned i; | |
3316 | uint64_t val = 0; | |
3317 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3318 | ||
3319 | NYI_assert (29, 24, 0x0E); | |
3320 | NYI_assert (21, 10, 0xC6E); | |
3321 | ||
3322 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3323 | { | |
3324 | case 0: | |
3325 | for (i = 0; i < (full ? 16 : 8); i++) | |
3326 | val += aarch64_get_vec_u8 (cpu, vm, i); | |
3327 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val); | |
3328 | return; | |
3329 | ||
3330 | case 1: | |
3331 | for (i = 0; i < (full ? 8 : 4); i++) | |
3332 | val += aarch64_get_vec_u16 (cpu, vm, i); | |
3333 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val); | |
3334 | return; | |
3335 | ||
3336 | case 2: | |
3337 | for (i = 0; i < (full ? 4 : 2); i++) | |
3338 | val += aarch64_get_vec_u32 (cpu, vm, i); | |
3339 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val); | |
3340 | return; | |
3341 | ||
3342 | case 3: | |
3343 | if (! full) | |
3344 | HALT_UNALLOC; | |
3345 | val = aarch64_get_vec_u64 (cpu, vm, 0); | |
3346 | val += aarch64_get_vec_u64 (cpu, vm, 1); | |
3347 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val); | |
3348 | return; | |
3349 | ||
3350 | default: | |
3351 | HALT_UNREACHABLE; | |
3352 | } | |
3353 | } | |
3354 | ||
3355 | static void | |
3356 | do_vec_ins_2 (sim_cpu *cpu) | |
3357 | { | |
3358 | /* instr[31,21] = 01001110000 | |
3359 | instr[20,18] = size & element selector | |
3360 | instr[17,14] = 0000 | |
3361 | instr[13] = direction: to vec(0), from vec (1) | |
3362 | instr[12,10] = 111 | |
3363 | instr[9,5] = Vm | |
3364 | instr[4,0] = Vd. */ | |
3365 | ||
3366 | unsigned elem; | |
3367 | unsigned vm = uimm (aarch64_get_instr (cpu), 9, 5); | |
3368 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3369 | ||
3370 | NYI_assert (31, 21, 0x270); | |
3371 | NYI_assert (17, 14, 0); | |
3372 | NYI_assert (12, 10, 7); | |
3373 | ||
3374 | if (uimm (aarch64_get_instr (cpu), 13, 13) == 1) | |
3375 | { | |
3376 | if (uimm (aarch64_get_instr (cpu), 18, 18) == 1) | |
3377 | { | |
3378 | /* 32-bit moves. */ | |
3379 | elem = uimm (aarch64_get_instr (cpu), 20, 19); | |
3380 | aarch64_set_reg_u64 (cpu, vd, NO_SP, | |
3381 | aarch64_get_vec_u32 (cpu, vm, elem)); | |
3382 | } | |
3383 | else | |
3384 | { | |
3385 | /* 64-bit moves. */ | |
3386 | if (uimm (aarch64_get_instr (cpu), 19, 19) != 1) | |
3387 | HALT_NYI; | |
3388 | ||
3389 | elem = uimm (aarch64_get_instr (cpu), 20, 20); | |
3390 | aarch64_set_reg_u64 (cpu, vd, NO_SP, | |
3391 | aarch64_get_vec_u64 (cpu, vm, elem)); | |
3392 | } | |
3393 | } | |
3394 | else | |
3395 | { | |
3396 | if (uimm (aarch64_get_instr (cpu), 18, 18) == 1) | |
3397 | { | |
3398 | /* 32-bit moves. */ | |
3399 | elem = uimm (aarch64_get_instr (cpu), 20, 19); | |
3400 | aarch64_set_vec_u32 (cpu, vd, elem, | |
3401 | aarch64_get_reg_u32 (cpu, vm, NO_SP)); | |
3402 | } | |
3403 | else | |
3404 | { | |
3405 | /* 64-bit moves. */ | |
3406 | if (uimm (aarch64_get_instr (cpu), 19, 19) != 1) | |
3407 | HALT_NYI; | |
3408 | ||
3409 | elem = uimm (aarch64_get_instr (cpu), 20, 20); | |
3410 | aarch64_set_vec_u64 (cpu, vd, elem, | |
3411 | aarch64_get_reg_u64 (cpu, vm, NO_SP)); | |
3412 | } | |
3413 | } | |
3414 | } | |
3415 | ||
3416 | static void | |
3417 | do_vec_mull (sim_cpu *cpu) | |
3418 | { | |
3419 | /* instr[31] = 0 | |
3420 | instr[30] = lower(0)/upper(1) selector | |
3421 | instr[29] = signed(0)/unsigned(1) | |
3422 | instr[28,24] = 0 1110 | |
3423 | instr[23,22] = size: 8-bit (00), 16-bit (01), 32-bit (10) | |
3424 | instr[21] = 1 | |
3425 | instr[20,16] = Vm | |
3426 | instr[15,10] = 11 0000 | |
3427 | instr[9,5] = Vn | |
3428 | instr[4.0] = Vd. */ | |
3429 | ||
3430 | int unsign = uimm (aarch64_get_instr (cpu), 29, 29); | |
3431 | int bias = uimm (aarch64_get_instr (cpu), 30, 30); | |
3432 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3433 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3434 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3435 | unsigned i; | |
3436 | ||
3437 | NYI_assert (28, 24, 0x0E); | |
3438 | NYI_assert (15, 10, 0x30); | |
3439 | ||
3440 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3441 | { | |
3442 | case 0: | |
3443 | if (bias) | |
3444 | bias = 8; | |
3445 | if (unsign) | |
3446 | for (i = 0; i < 8; i++) | |
3447 | aarch64_set_vec_u16 (cpu, vd, i, | |
3448 | aarch64_get_vec_u8 (cpu, vn, i + bias) | |
3449 | * aarch64_get_vec_u8 (cpu, vm, i + bias)); | |
3450 | else | |
3451 | for (i = 0; i < 8; i++) | |
3452 | aarch64_set_vec_s16 (cpu, vd, i, | |
3453 | aarch64_get_vec_s8 (cpu, vn, i + bias) | |
3454 | * aarch64_get_vec_s8 (cpu, vm, i + bias)); | |
3455 | return; | |
3456 | ||
3457 | case 1: | |
3458 | if (bias) | |
3459 | bias = 4; | |
3460 | if (unsign) | |
3461 | for (i = 0; i < 4; i++) | |
3462 | aarch64_set_vec_u32 (cpu, vd, i, | |
3463 | aarch64_get_vec_u16 (cpu, vn, i + bias) | |
3464 | * aarch64_get_vec_u16 (cpu, vm, i + bias)); | |
3465 | else | |
3466 | for (i = 0; i < 4; i++) | |
3467 | aarch64_set_vec_s32 (cpu, vd, i, | |
3468 | aarch64_get_vec_s16 (cpu, vn, i + bias) | |
3469 | * aarch64_get_vec_s16 (cpu, vm, i + bias)); | |
3470 | return; | |
3471 | ||
3472 | case 2: | |
3473 | if (bias) | |
3474 | bias = 2; | |
3475 | if (unsign) | |
3476 | for (i = 0; i < 2; i++) | |
3477 | aarch64_set_vec_u64 (cpu, vd, i, | |
3478 | (uint64_t) aarch64_get_vec_u32 (cpu, vn, | |
3479 | i + bias) | |
3480 | * (uint64_t) aarch64_get_vec_u32 (cpu, vm, | |
3481 | i + bias)); | |
3482 | else | |
3483 | for (i = 0; i < 2; i++) | |
3484 | aarch64_set_vec_s64 (cpu, vd, i, | |
3485 | aarch64_get_vec_s32 (cpu, vn, i + bias) | |
3486 | * aarch64_get_vec_s32 (cpu, vm, i + bias)); | |
3487 | return; | |
3488 | ||
3489 | case 3: | |
3490 | default: | |
3491 | HALT_NYI; | |
3492 | } | |
3493 | } | |
3494 | ||
3495 | static void | |
3496 | do_vec_fadd (sim_cpu *cpu) | |
3497 | { | |
3498 | /* instr[31] = 0 | |
3499 | instr[30] = half(0)/full(1) | |
3500 | instr[29,24] = 001110 | |
3501 | instr[23] = FADD(0)/FSUB(1) | |
3502 | instr[22] = float (0)/double(1) | |
3503 | instr[21] = 1 | |
3504 | instr[20,16] = Vm | |
3505 | instr[15,10] = 110101 | |
3506 | instr[9,5] = Vn | |
3507 | instr[4.0] = Vd. */ | |
3508 | ||
3509 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3510 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3511 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3512 | unsigned i; | |
3513 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3514 | ||
3515 | NYI_assert (29, 24, 0x0E); | |
3516 | NYI_assert (21, 21, 1); | |
3517 | NYI_assert (15, 10, 0x35); | |
3518 | ||
3519 | if (uimm (aarch64_get_instr (cpu), 23, 23)) | |
3520 | { | |
3521 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
3522 | { | |
3523 | if (! full) | |
3524 | HALT_NYI; | |
3525 | ||
3526 | for (i = 0; i < 2; i++) | |
3527 | aarch64_set_vec_double (cpu, vd, i, | |
3528 | aarch64_get_vec_double (cpu, vn, i) | |
3529 | - aarch64_get_vec_double (cpu, vm, i)); | |
3530 | } | |
3531 | else | |
3532 | { | |
3533 | for (i = 0; i < (full ? 4 : 2); i++) | |
3534 | aarch64_set_vec_float (cpu, vd, i, | |
3535 | aarch64_get_vec_float (cpu, vn, i) | |
3536 | - aarch64_get_vec_float (cpu, vm, i)); | |
3537 | } | |
3538 | } | |
3539 | else | |
3540 | { | |
3541 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
3542 | { | |
3543 | if (! full) | |
3544 | HALT_NYI; | |
3545 | ||
3546 | for (i = 0; i < 2; i++) | |
3547 | aarch64_set_vec_double (cpu, vd, i, | |
3548 | aarch64_get_vec_double (cpu, vm, i) | |
3549 | + aarch64_get_vec_double (cpu, vn, i)); | |
3550 | } | |
3551 | else | |
3552 | { | |
3553 | for (i = 0; i < (full ? 4 : 2); i++) | |
3554 | aarch64_set_vec_float (cpu, vd, i, | |
3555 | aarch64_get_vec_float (cpu, vm, i) | |
3556 | + aarch64_get_vec_float (cpu, vn, i)); | |
3557 | } | |
3558 | } | |
3559 | } | |
3560 | ||
3561 | static void | |
3562 | do_vec_add (sim_cpu *cpu) | |
3563 | { | |
3564 | /* instr[31] = 0 | |
3565 | instr[30] = full/half selector | |
3566 | instr[29,24] = 001110 | |
3567 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit | |
3568 | instr[21] = 1 | |
3569 | instr[20,16] = Vn | |
3570 | instr[15,10] = 100001 | |
3571 | instr[9,5] = Vm | |
3572 | instr[4.0] = Vd. */ | |
3573 | ||
3574 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3575 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3576 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3577 | unsigned i; | |
3578 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3579 | ||
3580 | NYI_assert (29, 24, 0x0E); | |
3581 | NYI_assert (21, 21, 1); | |
3582 | NYI_assert (15, 10, 0x21); | |
3583 | ||
3584 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3585 | { | |
3586 | case 0: | |
3587 | for (i = 0; i < (full ? 16 : 8); i++) | |
3588 | aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vn, i) | |
3589 | + aarch64_get_vec_u8 (cpu, vm, i)); | |
3590 | return; | |
3591 | ||
3592 | case 1: | |
3593 | for (i = 0; i < (full ? 8 : 4); i++) | |
3594 | aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vn, i) | |
3595 | + aarch64_get_vec_u16 (cpu, vm, i)); | |
3596 | return; | |
3597 | ||
3598 | case 2: | |
3599 | for (i = 0; i < (full ? 4 : 2); i++) | |
3600 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vn, i) | |
3601 | + aarch64_get_vec_u32 (cpu, vm, i)); | |
3602 | return; | |
3603 | ||
3604 | case 3: | |
3605 | if (! full) | |
3606 | HALT_UNALLOC; | |
3607 | aarch64_set_vec_u64 (cpu, vd, 0, aarch64_get_vec_u64 (cpu, vn, 0) | |
3608 | + aarch64_get_vec_u64 (cpu, vm, 0)); | |
3609 | aarch64_set_vec_u64 (cpu, vd, 1, | |
3610 | aarch64_get_vec_u64 (cpu, vn, 1) | |
3611 | + aarch64_get_vec_u64 (cpu, vm, 1)); | |
3612 | return; | |
3613 | ||
3614 | default: | |
3615 | HALT_UNREACHABLE; | |
3616 | } | |
3617 | } | |
3618 | ||
3619 | static void | |
3620 | do_vec_mul (sim_cpu *cpu) | |
3621 | { | |
3622 | /* instr[31] = 0 | |
3623 | instr[30] = full/half selector | |
3624 | instr[29,24] = 00 1110 | |
3625 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit | |
3626 | instr[21] = 1 | |
3627 | instr[20,16] = Vn | |
3628 | instr[15,10] = 10 0111 | |
3629 | instr[9,5] = Vm | |
3630 | instr[4.0] = Vd. */ | |
3631 | ||
3632 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3633 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3634 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3635 | unsigned i; | |
3636 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3637 | ||
3638 | NYI_assert (29, 24, 0x0E); | |
3639 | NYI_assert (21, 21, 1); | |
3640 | NYI_assert (15, 10, 0x27); | |
3641 | ||
3642 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3643 | { | |
3644 | case 0: | |
3645 | for (i = 0; i < (full ? 16 : 8); i++) | |
3646 | { | |
3647 | uint16_t val = aarch64_get_vec_u8 (cpu, vn, i); | |
3648 | val *= aarch64_get_vec_u8 (cpu, vm, i); | |
3649 | ||
3650 | aarch64_set_vec_u16 (cpu, vd, i, val); | |
3651 | } | |
3652 | return; | |
3653 | ||
3654 | case 1: | |
3655 | for (i = 0; i < (full ? 8 : 4); i++) | |
3656 | { | |
3657 | uint32_t val = aarch64_get_vec_u16 (cpu, vn, i); | |
3658 | val *= aarch64_get_vec_u16 (cpu, vm, i); | |
3659 | ||
3660 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3661 | } | |
3662 | return; | |
3663 | ||
3664 | case 2: | |
3665 | for (i = 0; i < (full ? 4 : 2); i++) | |
3666 | { | |
3667 | uint64_t val = aarch64_get_vec_u32 (cpu, vn, i); | |
3668 | val *= aarch64_get_vec_u32 (cpu, vm, i); | |
3669 | ||
3670 | aarch64_set_vec_u64 (cpu, vd, i, val); | |
3671 | } | |
3672 | return; | |
3673 | ||
3674 | default: | |
3675 | case 3: | |
3676 | HALT_UNALLOC; | |
3677 | } | |
3678 | } | |
3679 | ||
3680 | static void | |
3681 | do_vec_MLA (sim_cpu *cpu) | |
3682 | { | |
3683 | /* instr[31] = 0 | |
3684 | instr[30] = full/half selector | |
3685 | instr[29,24] = 00 1110 | |
3686 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit | |
3687 | instr[21] = 1 | |
3688 | instr[20,16] = Vn | |
3689 | instr[15,10] = 1001 01 | |
3690 | instr[9,5] = Vm | |
3691 | instr[4.0] = Vd. */ | |
3692 | ||
3693 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3694 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3695 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3696 | unsigned i; | |
3697 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3698 | ||
3699 | NYI_assert (29, 24, 0x0E); | |
3700 | NYI_assert (21, 21, 1); | |
3701 | NYI_assert (15, 10, 0x25); | |
3702 | ||
3703 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
3704 | { | |
3705 | case 0: | |
3706 | for (i = 0; i < (full ? 16 : 8); i++) | |
3707 | { | |
3708 | uint16_t val = aarch64_get_vec_u8 (cpu, vn, i); | |
3709 | val *= aarch64_get_vec_u8 (cpu, vm, i); | |
3710 | val += aarch64_get_vec_u8 (cpu, vd, i); | |
3711 | ||
3712 | aarch64_set_vec_u16 (cpu, vd, i, val); | |
3713 | } | |
3714 | return; | |
3715 | ||
3716 | case 1: | |
3717 | for (i = 0; i < (full ? 8 : 4); i++) | |
3718 | { | |
3719 | uint32_t val = aarch64_get_vec_u16 (cpu, vn, i); | |
3720 | val *= aarch64_get_vec_u16 (cpu, vm, i); | |
3721 | val += aarch64_get_vec_u16 (cpu, vd, i); | |
3722 | ||
3723 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
3724 | } | |
3725 | return; | |
3726 | ||
3727 | case 2: | |
3728 | for (i = 0; i < (full ? 4 : 2); i++) | |
3729 | { | |
3730 | uint64_t val = aarch64_get_vec_u32 (cpu, vn, i); | |
3731 | val *= aarch64_get_vec_u32 (cpu, vm, i); | |
3732 | val += aarch64_get_vec_u32 (cpu, vd, i); | |
3733 | ||
3734 | aarch64_set_vec_u64 (cpu, vd, i, val); | |
3735 | } | |
3736 | return; | |
3737 | ||
3738 | default: | |
3739 | case 3: | |
3740 | HALT_UNALLOC; | |
3741 | } | |
3742 | } | |
3743 | ||
3744 | static float | |
3745 | fmaxnm (float a, float b) | |
3746 | { | |
3747 | if (fpclassify (a) == FP_NORMAL) | |
3748 | { | |
3749 | if (fpclassify (b) == FP_NORMAL) | |
3750 | return a > b ? a : b; | |
3751 | return a; | |
3752 | } | |
3753 | else if (fpclassify (b) == FP_NORMAL) | |
3754 | return b; | |
3755 | return a; | |
3756 | } | |
3757 | ||
3758 | static float | |
3759 | fminnm (float a, float b) | |
3760 | { | |
3761 | if (fpclassify (a) == FP_NORMAL) | |
3762 | { | |
3763 | if (fpclassify (b) == FP_NORMAL) | |
3764 | return a < b ? a : b; | |
3765 | return a; | |
3766 | } | |
3767 | else if (fpclassify (b) == FP_NORMAL) | |
3768 | return b; | |
3769 | return a; | |
3770 | } | |
3771 | ||
3772 | static double | |
3773 | dmaxnm (double a, double b) | |
3774 | { | |
3775 | if (fpclassify (a) == FP_NORMAL) | |
3776 | { | |
3777 | if (fpclassify (b) == FP_NORMAL) | |
3778 | return a > b ? a : b; | |
3779 | return a; | |
3780 | } | |
3781 | else if (fpclassify (b) == FP_NORMAL) | |
3782 | return b; | |
3783 | return a; | |
3784 | } | |
3785 | ||
3786 | static double | |
3787 | dminnm (double a, double b) | |
3788 | { | |
3789 | if (fpclassify (a) == FP_NORMAL) | |
3790 | { | |
3791 | if (fpclassify (b) == FP_NORMAL) | |
3792 | return a < b ? a : b; | |
3793 | return a; | |
3794 | } | |
3795 | else if (fpclassify (b) == FP_NORMAL) | |
3796 | return b; | |
3797 | return a; | |
3798 | } | |
3799 | ||
3800 | static void | |
3801 | do_vec_FminmaxNMP (sim_cpu *cpu) | |
3802 | { | |
3803 | /* aarch64_get_instr (cpu)[31] = 0 | |
3804 | aarch64_get_instr (cpu)[30] = half (0)/full (1) | |
3805 | aarch64_get_instr (cpu)[29,24] = 10 1110 | |
3806 | aarch64_get_instr (cpu)[23] = max(0)/min(1) | |
3807 | aarch64_get_instr (cpu)[22] = float (0)/double (1) | |
3808 | aarch64_get_instr (cpu)[21] = 1 | |
3809 | aarch64_get_instr (cpu)[20,16] = Vn | |
3810 | aarch64_get_instr (cpu)[15,10] = 1100 01 | |
3811 | aarch64_get_instr (cpu)[9,5] = Vm | |
3812 | aarch64_get_instr (cpu)[4.0] = Vd. */ | |
3813 | ||
3814 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3815 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3816 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3817 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3818 | ||
3819 | NYI_assert (29, 24, 0x2E); | |
3820 | NYI_assert (21, 21, 1); | |
3821 | NYI_assert (15, 10, 0x31); | |
3822 | ||
3823 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
3824 | { | |
3825 | double (* fn)(double, double) = uimm (aarch64_get_instr (cpu), 23, 23) | |
3826 | ? dminnm : dmaxnm; | |
3827 | ||
3828 | if (! full) | |
3829 | HALT_NYI; | |
3830 | aarch64_set_vec_double (cpu, vd, 0, | |
3831 | fn (aarch64_get_vec_double (cpu, vn, 0), | |
3832 | aarch64_get_vec_double (cpu, vn, 1))); | |
3833 | aarch64_set_vec_double (cpu, vd, 0, | |
3834 | fn (aarch64_get_vec_double (cpu, vm, 0), | |
3835 | aarch64_get_vec_double (cpu, vm, 1))); | |
3836 | } | |
3837 | else | |
3838 | { | |
3839 | float (* fn)(float, float) = uimm (aarch64_get_instr (cpu), 23, 23) | |
3840 | ? fminnm : fmaxnm; | |
3841 | ||
3842 | aarch64_set_vec_float (cpu, vd, 0, | |
3843 | fn (aarch64_get_vec_float (cpu, vn, 0), | |
3844 | aarch64_get_vec_float (cpu, vn, 1))); | |
3845 | if (full) | |
3846 | aarch64_set_vec_float (cpu, vd, 1, | |
3847 | fn (aarch64_get_vec_float (cpu, vn, 2), | |
3848 | aarch64_get_vec_float (cpu, vn, 3))); | |
3849 | ||
3850 | aarch64_set_vec_float (cpu, vd, (full ? 2 : 1), | |
3851 | fn (aarch64_get_vec_float (cpu, vm, 0), | |
3852 | aarch64_get_vec_float (cpu, vm, 1))); | |
3853 | if (full) | |
3854 | aarch64_set_vec_float (cpu, vd, 3, | |
3855 | fn (aarch64_get_vec_float (cpu, vm, 2), | |
3856 | aarch64_get_vec_float (cpu, vm, 3))); | |
3857 | } | |
3858 | } | |
3859 | ||
3860 | static void | |
3861 | do_vec_AND (sim_cpu *cpu) | |
3862 | { | |
3863 | /* instr[31] = 0 | |
3864 | instr[30] = half (0)/full (1) | |
3865 | instr[29,21] = 001110001 | |
3866 | instr[20,16] = Vm | |
3867 | instr[15,10] = 000111 | |
3868 | instr[9,5] = Vn | |
3869 | instr[4.0] = Vd. */ | |
3870 | ||
3871 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3872 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3873 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3874 | unsigned i; | |
3875 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3876 | ||
3877 | NYI_assert (29, 21, 0x071); | |
3878 | NYI_assert (15, 10, 0x07); | |
3879 | ||
3880 | for (i = 0; i < (full ? 4 : 2); i++) | |
3881 | aarch64_set_vec_u32 (cpu, vd, i, | |
3882 | aarch64_get_vec_u32 (cpu, vn, i) | |
3883 | & aarch64_get_vec_u32 (cpu, vm, i)); | |
3884 | } | |
3885 | ||
3886 | static void | |
3887 | do_vec_BSL (sim_cpu *cpu) | |
3888 | { | |
3889 | /* instr[31] = 0 | |
3890 | instr[30] = half (0)/full (1) | |
3891 | instr[29,21] = 101110011 | |
3892 | instr[20,16] = Vm | |
3893 | instr[15,10] = 000111 | |
3894 | instr[9,5] = Vn | |
3895 | instr[4.0] = Vd. */ | |
3896 | ||
3897 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3898 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3899 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3900 | unsigned i; | |
3901 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3902 | ||
3903 | NYI_assert (29, 21, 0x173); | |
3904 | NYI_assert (15, 10, 0x07); | |
3905 | ||
3906 | for (i = 0; i < (full ? 16 : 8); i++) | |
3907 | aarch64_set_vec_u8 (cpu, vd, i, | |
3908 | ( aarch64_get_vec_u8 (cpu, vd, i) | |
3909 | & aarch64_get_vec_u8 (cpu, vn, i)) | |
3910 | | ((~ aarch64_get_vec_u8 (cpu, vd, i)) | |
3911 | & aarch64_get_vec_u8 (cpu, vm, i))); | |
3912 | } | |
3913 | ||
3914 | static void | |
3915 | do_vec_EOR (sim_cpu *cpu) | |
3916 | { | |
3917 | /* instr[31] = 0 | |
3918 | instr[30] = half (0)/full (1) | |
3919 | instr[29,21] = 10 1110 001 | |
3920 | instr[20,16] = Vm | |
3921 | instr[15,10] = 000111 | |
3922 | instr[9,5] = Vn | |
3923 | instr[4.0] = Vd. */ | |
3924 | ||
3925 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3926 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3927 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3928 | unsigned i; | |
3929 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3930 | ||
3931 | NYI_assert (29, 21, 0x171); | |
3932 | NYI_assert (15, 10, 0x07); | |
3933 | ||
3934 | for (i = 0; i < (full ? 4 : 2); i++) | |
3935 | aarch64_set_vec_u32 (cpu, vd, i, | |
3936 | aarch64_get_vec_u32 (cpu, vn, i) | |
3937 | ^ aarch64_get_vec_u32 (cpu, vm, i)); | |
3938 | } | |
3939 | ||
3940 | static void | |
3941 | do_vec_bit (sim_cpu *cpu) | |
3942 | { | |
3943 | /* instr[31] = 0 | |
3944 | instr[30] = half (0)/full (1) | |
3945 | instr[29,23] = 10 1110 1 | |
3946 | instr[22] = BIT (0) / BIF (1) | |
3947 | instr[21] = 1 | |
3948 | instr[20,16] = Vm | |
3949 | instr[15,10] = 0001 11 | |
3950 | instr[9,5] = Vn | |
3951 | instr[4.0] = Vd. */ | |
3952 | ||
3953 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3954 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3955 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3956 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3957 | unsigned test_false = uimm (aarch64_get_instr (cpu), 22, 22); | |
3958 | unsigned i; | |
3959 | ||
3960 | NYI_assert (29, 23, 0x5D); | |
3961 | NYI_assert (21, 21, 1); | |
3962 | NYI_assert (15, 10, 0x07); | |
3963 | ||
3964 | if (test_false) | |
3965 | { | |
3966 | for (i = 0; i < (full ? 16 : 8); i++) | |
3967 | if (aarch64_get_vec_u32 (cpu, vn, i) == 0) | |
3968 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vm, i)); | |
3969 | } | |
3970 | else | |
3971 | { | |
3972 | for (i = 0; i < (full ? 16 : 8); i++) | |
3973 | if (aarch64_get_vec_u32 (cpu, vn, i) != 0) | |
3974 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vm, i)); | |
3975 | } | |
3976 | } | |
3977 | ||
3978 | static void | |
3979 | do_vec_ORN (sim_cpu *cpu) | |
3980 | { | |
3981 | /* instr[31] = 0 | |
3982 | instr[30] = half (0)/full (1) | |
3983 | instr[29,21] = 00 1110 111 | |
3984 | instr[20,16] = Vm | |
3985 | instr[15,10] = 00 0111 | |
3986 | instr[9,5] = Vn | |
3987 | instr[4.0] = Vd. */ | |
3988 | ||
3989 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
3990 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
3991 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
3992 | unsigned i; | |
3993 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
3994 | ||
3995 | NYI_assert (29, 21, 0x077); | |
3996 | NYI_assert (15, 10, 0x07); | |
3997 | ||
3998 | for (i = 0; i < (full ? 16 : 8); i++) | |
3999 | aarch64_set_vec_u8 (cpu, vd, i, | |
4000 | aarch64_get_vec_u8 (cpu, vn, i) | |
4001 | | ~ aarch64_get_vec_u8 (cpu, vm, i)); | |
4002 | } | |
4003 | ||
4004 | static void | |
4005 | do_vec_ORR (sim_cpu *cpu) | |
4006 | { | |
4007 | /* instr[31] = 0 | |
4008 | instr[30] = half (0)/full (1) | |
4009 | instr[29,21] = 00 1110 101 | |
4010 | instr[20,16] = Vm | |
4011 | instr[15,10] = 0001 11 | |
4012 | instr[9,5] = Vn | |
4013 | instr[4.0] = Vd. */ | |
4014 | ||
4015 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4016 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4017 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4018 | unsigned i; | |
4019 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4020 | ||
4021 | NYI_assert (29, 21, 0x075); | |
4022 | NYI_assert (15, 10, 0x07); | |
4023 | ||
4024 | for (i = 0; i < (full ? 16 : 8); i++) | |
4025 | aarch64_set_vec_u8 (cpu, vd, i, | |
4026 | aarch64_get_vec_u8 (cpu, vn, i) | |
4027 | | aarch64_get_vec_u8 (cpu, vm, i)); | |
4028 | } | |
4029 | ||
4030 | static void | |
4031 | do_vec_BIC (sim_cpu *cpu) | |
4032 | { | |
4033 | /* instr[31] = 0 | |
4034 | instr[30] = half (0)/full (1) | |
4035 | instr[29,21] = 00 1110 011 | |
4036 | instr[20,16] = Vm | |
4037 | instr[15,10] = 00 0111 | |
4038 | instr[9,5] = Vn | |
4039 | instr[4.0] = Vd. */ | |
4040 | ||
4041 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4042 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4043 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4044 | unsigned i; | |
4045 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4046 | ||
4047 | NYI_assert (29, 21, 0x073); | |
4048 | NYI_assert (15, 10, 0x07); | |
4049 | ||
4050 | for (i = 0; i < (full ? 16 : 8); i++) | |
4051 | aarch64_set_vec_u8 (cpu, vd, i, | |
4052 | aarch64_get_vec_u8 (cpu, vn, i) | |
4053 | & ~ aarch64_get_vec_u8 (cpu, vm, i)); | |
4054 | } | |
4055 | ||
4056 | static void | |
4057 | do_vec_XTN (sim_cpu *cpu) | |
4058 | { | |
4059 | /* instr[31] = 0 | |
4060 | instr[30] = first part (0)/ second part (1) | |
4061 | instr[29,24] = 00 1110 | |
4062 | instr[23,22] = size: byte(00), half(01), word (10) | |
4063 | instr[21,10] = 1000 0100 1010 | |
4064 | instr[9,5] = Vs | |
4065 | instr[4,0] = Vd. */ | |
4066 | ||
4067 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
4068 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4069 | unsigned bias = uimm (aarch64_get_instr (cpu), 30, 30); | |
4070 | unsigned i; | |
4071 | ||
4072 | NYI_assert (29, 24, 0x0E); | |
4073 | NYI_assert (21, 10, 0x84A); | |
4074 | ||
4075 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4076 | { | |
4077 | case 0: | |
4078 | if (bias) | |
4079 | for (i = 0; i < 8; i++) | |
4080 | aarch64_set_vec_u8 (cpu, vd, i + 8, | |
4081 | aarch64_get_vec_u16 (cpu, vs, i) >> 8); | |
4082 | else | |
4083 | for (i = 0; i < 8; i++) | |
4084 | aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, i)); | |
4085 | return; | |
4086 | ||
4087 | case 1: | |
4088 | if (bias) | |
4089 | for (i = 0; i < 4; i++) | |
4090 | aarch64_set_vec_u16 (cpu, vd, i + 4, | |
4091 | aarch64_get_vec_u32 (cpu, vs, i) >> 16); | |
4092 | else | |
4093 | for (i = 0; i < 4; i++) | |
4094 | aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vs, i)); | |
4095 | return; | |
4096 | ||
4097 | case 2: | |
4098 | if (bias) | |
4099 | for (i = 0; i < 2; i++) | |
4100 | aarch64_set_vec_u32 (cpu, vd, i + 4, | |
4101 | aarch64_get_vec_u64 (cpu, vs, i) >> 32); | |
4102 | else | |
4103 | for (i = 0; i < 2; i++) | |
4104 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vs, i)); | |
4105 | return; | |
4106 | ||
4107 | default: | |
4108 | HALT_UNALLOC; | |
4109 | } | |
4110 | } | |
4111 | ||
4112 | #define MAX(A,B) ((A) > (B) ? (A) : (B)) | |
4113 | #define MIN(A,B) ((A) < (B) ? (A) : (B)) | |
4114 | ||
4115 | static void | |
4116 | do_vec_maxv (sim_cpu *cpu) | |
4117 | { | |
4118 | /* instr[31] = 0 | |
4119 | instr[30] = half(0)/full(1) | |
4120 | instr[29] = signed (0)/unsigned(1) | |
4121 | instr[28,24] = 0 1110 | |
4122 | instr[23,22] = size: byte(00), half(01), word (10) | |
4123 | instr[21] = 1 | |
4124 | instr[20,17] = 1 000 | |
4125 | instr[16] = max(0)/min(1) | |
4126 | instr[15,10] = 1010 10 | |
4127 | instr[9,5] = V source | |
4128 | instr[4.0] = R dest. */ | |
4129 | ||
4130 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
4131 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4132 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4133 | unsigned i; | |
4134 | ||
4135 | NYI_assert (28, 24, 0x0E); | |
4136 | NYI_assert (21, 21, 1); | |
4137 | NYI_assert (20, 17, 8); | |
4138 | NYI_assert (15, 10, 0x2A); | |
4139 | ||
4140 | switch ((uimm (aarch64_get_instr (cpu), 29, 29) << 1) | |
4141 | | uimm (aarch64_get_instr (cpu), 16, 16)) | |
4142 | { | |
4143 | case 0: /* SMAXV. */ | |
4144 | { | |
4145 | int64_t smax; | |
4146 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4147 | { | |
4148 | case 0: | |
4149 | smax = aarch64_get_vec_s8 (cpu, vs, 0); | |
4150 | for (i = 1; i < (full ? 16 : 8); i++) | |
4151 | smax = MAX (smax, aarch64_get_vec_s8 (cpu, vs, i)); | |
4152 | break; | |
4153 | case 1: | |
4154 | smax = aarch64_get_vec_s16 (cpu, vs, 0); | |
4155 | for (i = 1; i < (full ? 8 : 4); i++) | |
4156 | smax = MAX (smax, aarch64_get_vec_s16 (cpu, vs, i)); | |
4157 | break; | |
4158 | case 2: | |
4159 | smax = aarch64_get_vec_s32 (cpu, vs, 0); | |
4160 | for (i = 1; i < (full ? 4 : 2); i++) | |
4161 | smax = MAX (smax, aarch64_get_vec_s32 (cpu, vs, i)); | |
4162 | break; | |
4163 | default: | |
4164 | case 3: | |
4165 | HALT_UNALLOC; | |
4166 | } | |
4167 | aarch64_set_reg_s64 (cpu, rd, NO_SP, smax); | |
4168 | return; | |
4169 | } | |
4170 | ||
4171 | case 1: /* SMINV. */ | |
4172 | { | |
4173 | int64_t smin; | |
4174 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4175 | { | |
4176 | case 0: | |
4177 | smin = aarch64_get_vec_s8 (cpu, vs, 0); | |
4178 | for (i = 1; i < (full ? 16 : 8); i++) | |
4179 | smin = MIN (smin, aarch64_get_vec_s8 (cpu, vs, i)); | |
4180 | break; | |
4181 | case 1: | |
4182 | smin = aarch64_get_vec_s16 (cpu, vs, 0); | |
4183 | for (i = 1; i < (full ? 8 : 4); i++) | |
4184 | smin = MIN (smin, aarch64_get_vec_s16 (cpu, vs, i)); | |
4185 | break; | |
4186 | case 2: | |
4187 | smin = aarch64_get_vec_s32 (cpu, vs, 0); | |
4188 | for (i = 1; i < (full ? 4 : 2); i++) | |
4189 | smin = MIN (smin, aarch64_get_vec_s32 (cpu, vs, i)); | |
4190 | break; | |
4191 | default: | |
4192 | case 3: | |
4193 | HALT_UNALLOC; | |
4194 | } | |
4195 | aarch64_set_reg_s64 (cpu, rd, NO_SP, smin); | |
4196 | return; | |
4197 | } | |
4198 | ||
4199 | case 2: /* UMAXV. */ | |
4200 | { | |
4201 | uint64_t umax; | |
4202 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4203 | { | |
4204 | case 0: | |
4205 | umax = aarch64_get_vec_u8 (cpu, vs, 0); | |
4206 | for (i = 1; i < (full ? 16 : 8); i++) | |
4207 | umax = MAX (umax, aarch64_get_vec_u8 (cpu, vs, i)); | |
4208 | break; | |
4209 | case 1: | |
4210 | umax = aarch64_get_vec_u16 (cpu, vs, 0); | |
4211 | for (i = 1; i < (full ? 8 : 4); i++) | |
4212 | umax = MAX (umax, aarch64_get_vec_u16 (cpu, vs, i)); | |
4213 | break; | |
4214 | case 2: | |
4215 | umax = aarch64_get_vec_u32 (cpu, vs, 0); | |
4216 | for (i = 1; i < (full ? 4 : 2); i++) | |
4217 | umax = MAX (umax, aarch64_get_vec_u32 (cpu, vs, i)); | |
4218 | break; | |
4219 | default: | |
4220 | case 3: | |
4221 | HALT_UNALLOC; | |
4222 | } | |
4223 | aarch64_set_reg_u64 (cpu, rd, NO_SP, umax); | |
4224 | return; | |
4225 | } | |
4226 | ||
4227 | case 3: /* UMINV. */ | |
4228 | { | |
4229 | uint64_t umin; | |
4230 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4231 | { | |
4232 | case 0: | |
4233 | umin = aarch64_get_vec_u8 (cpu, vs, 0); | |
4234 | for (i = 1; i < (full ? 16 : 8); i++) | |
4235 | umin = MIN (umin, aarch64_get_vec_u8 (cpu, vs, i)); | |
4236 | break; | |
4237 | case 1: | |
4238 | umin = aarch64_get_vec_u16 (cpu, vs, 0); | |
4239 | for (i = 1; i < (full ? 8 : 4); i++) | |
4240 | umin = MIN (umin, aarch64_get_vec_u16 (cpu, vs, i)); | |
4241 | break; | |
4242 | case 2: | |
4243 | umin = aarch64_get_vec_u32 (cpu, vs, 0); | |
4244 | for (i = 1; i < (full ? 4 : 2); i++) | |
4245 | umin = MIN (umin, aarch64_get_vec_u32 (cpu, vs, i)); | |
4246 | break; | |
4247 | default: | |
4248 | case 3: | |
4249 | HALT_UNALLOC; | |
4250 | } | |
4251 | aarch64_set_reg_u64 (cpu, rd, NO_SP, umin); | |
4252 | return; | |
4253 | } | |
4254 | ||
4255 | default: | |
4256 | HALT_UNALLOC; | |
4257 | } | |
4258 | } | |
4259 | ||
4260 | static void | |
4261 | do_vec_fminmaxV (sim_cpu *cpu) | |
4262 | { | |
4263 | /* instr[31,24] = 0110 1110 | |
4264 | instr[23] = max(0)/min(1) | |
4265 | instr[22,14] = 011 0000 11 | |
4266 | instr[13,12] = nm(00)/normal(11) | |
4267 | instr[11,10] = 10 | |
4268 | instr[9,5] = V source | |
4269 | instr[4.0] = R dest. */ | |
4270 | ||
4271 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
4272 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4273 | unsigned i; | |
4274 | float res = aarch64_get_vec_float (cpu, vs, 0); | |
4275 | ||
4276 | NYI_assert (31, 24, 0x6E); | |
4277 | NYI_assert (22, 14, 0x0C3); | |
4278 | NYI_assert (11, 10, 2); | |
4279 | ||
4280 | if (uimm (aarch64_get_instr (cpu), 23, 23)) | |
4281 | { | |
4282 | switch (uimm (aarch64_get_instr (cpu), 13, 12)) | |
4283 | { | |
4284 | case 0: /* FMNINNMV. */ | |
4285 | for (i = 1; i < 4; i++) | |
4286 | res = fminnm (res, aarch64_get_vec_float (cpu, vs, i)); | |
4287 | break; | |
4288 | ||
4289 | case 3: /* FMINV. */ | |
4290 | for (i = 1; i < 4; i++) | |
4291 | res = MIN (res, aarch64_get_vec_float (cpu, vs, i)); | |
4292 | break; | |
4293 | ||
4294 | default: | |
4295 | HALT_NYI; | |
4296 | } | |
4297 | } | |
4298 | else | |
4299 | { | |
4300 | switch (uimm (aarch64_get_instr (cpu), 13, 12)) | |
4301 | { | |
4302 | case 0: /* FMNAXNMV. */ | |
4303 | for (i = 1; i < 4; i++) | |
4304 | res = fmaxnm (res, aarch64_get_vec_float (cpu, vs, i)); | |
4305 | break; | |
4306 | ||
4307 | case 3: /* FMAXV. */ | |
4308 | for (i = 1; i < 4; i++) | |
4309 | res = MAX (res, aarch64_get_vec_float (cpu, vs, i)); | |
4310 | break; | |
4311 | ||
4312 | default: | |
4313 | HALT_NYI; | |
4314 | } | |
4315 | } | |
4316 | ||
4317 | aarch64_set_FP_float (cpu, rd, res); | |
4318 | } | |
4319 | ||
4320 | static void | |
4321 | do_vec_Fminmax (sim_cpu *cpu) | |
4322 | { | |
4323 | /* instr[31] = 0 | |
4324 | instr[30] = half(0)/full(1) | |
4325 | instr[29,24] = 00 1110 | |
4326 | instr[23] = max(0)/min(1) | |
4327 | instr[22] = float(0)/double(1) | |
4328 | instr[21] = 1 | |
4329 | instr[20,16] = Vm | |
4330 | instr[15,14] = 11 | |
4331 | instr[13,12] = nm(00)/normal(11) | |
4332 | instr[11,10] = 01 | |
4333 | instr[9,5] = Vn | |
4334 | instr[4,0] = Vd. */ | |
4335 | ||
4336 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4337 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4338 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4339 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4340 | unsigned min = uimm (aarch64_get_instr (cpu), 23, 23); | |
4341 | unsigned i; | |
4342 | ||
4343 | NYI_assert (29, 24, 0x0E); | |
4344 | NYI_assert (21, 21, 1); | |
4345 | NYI_assert (15, 14, 3); | |
4346 | NYI_assert (11, 10, 1); | |
4347 | ||
4348 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
4349 | { | |
4350 | double (* func)(double, double); | |
4351 | ||
4352 | if (! full) | |
4353 | HALT_NYI; | |
4354 | ||
4355 | if (uimm (aarch64_get_instr (cpu), 13, 12) == 0) | |
4356 | func = min ? dminnm : dmaxnm; | |
4357 | else if (uimm (aarch64_get_instr (cpu), 13, 12) == 3) | |
4358 | func = min ? fmin : fmax; | |
4359 | else | |
4360 | HALT_NYI; | |
4361 | ||
4362 | for (i = 0; i < 2; i++) | |
4363 | aarch64_set_vec_double (cpu, vd, i, | |
4364 | func (aarch64_get_vec_double (cpu, vn, i), | |
4365 | aarch64_get_vec_double (cpu, vm, i))); | |
4366 | } | |
4367 | else | |
4368 | { | |
4369 | float (* func)(float, float); | |
4370 | ||
4371 | if (uimm (aarch64_get_instr (cpu), 13, 12) == 0) | |
4372 | func = min ? fminnm : fmaxnm; | |
4373 | else if (uimm (aarch64_get_instr (cpu), 13, 12) == 3) | |
4374 | func = min ? fminf : fmaxf; | |
4375 | else | |
4376 | HALT_NYI; | |
4377 | ||
4378 | for (i = 0; i < (full ? 4 : 2); i++) | |
4379 | aarch64_set_vec_float (cpu, vd, i, | |
4380 | func (aarch64_get_vec_float (cpu, vn, i), | |
4381 | aarch64_get_vec_float (cpu, vm, i))); | |
4382 | } | |
4383 | } | |
4384 | ||
4385 | static void | |
4386 | do_vec_SCVTF (sim_cpu *cpu) | |
4387 | { | |
4388 | /* instr[31] = 0 | |
4389 | instr[30] = Q | |
4390 | instr[29,23] = 00 1110 0 | |
4391 | instr[22] = float(0)/double(1) | |
4392 | instr[21,10] = 10 0001 1101 10 | |
4393 | instr[9,5] = Vn | |
4394 | instr[4,0] = Vd. */ | |
4395 | ||
4396 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4397 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4398 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4399 | unsigned size = uimm (aarch64_get_instr (cpu), 22, 22); | |
4400 | unsigned i; | |
4401 | ||
4402 | NYI_assert (29, 23, 0x1C); | |
4403 | NYI_assert (21, 10, 0x876); | |
4404 | ||
4405 | if (size) | |
4406 | { | |
4407 | if (! full) | |
4408 | HALT_UNALLOC; | |
4409 | ||
4410 | for (i = 0; i < 2; i++) | |
4411 | { | |
4412 | double val = (double) aarch64_get_vec_u64 (cpu, vn, i); | |
4413 | aarch64_set_vec_double (cpu, vd, i, val); | |
4414 | } | |
4415 | } | |
4416 | else | |
4417 | { | |
4418 | for (i = 0; i < (full ? 4 : 2); i++) | |
4419 | { | |
4420 | float val = (float) aarch64_get_vec_u32 (cpu, vn, i); | |
4421 | aarch64_set_vec_float (cpu, vd, i, val); | |
4422 | } | |
4423 | } | |
4424 | } | |
4425 | ||
4426 | #define VEC_CMP(SOURCE, CMP) \ | |
4427 | do \ | |
4428 | { \ | |
4429 | switch (size) \ | |
4430 | { \ | |
4431 | case 0: \ | |
4432 | for (i = 0; i < (full ? 16 : 8); i++) \ | |
4433 | aarch64_set_vec_u8 (cpu, vd, i, \ | |
4434 | aarch64_get_vec_##SOURCE##8 (cpu, vn, i) \ | |
4435 | CMP \ | |
4436 | aarch64_get_vec_##SOURCE##8 (cpu, vm, i) \ | |
4437 | ? -1 : 0); \ | |
4438 | return; \ | |
4439 | case 1: \ | |
4440 | for (i = 0; i < (full ? 8 : 4); i++) \ | |
4441 | aarch64_set_vec_u16 (cpu, vd, i, \ | |
4442 | aarch64_get_vec_##SOURCE##16 (cpu, vn, i) \ | |
4443 | CMP \ | |
4444 | aarch64_get_vec_##SOURCE##16 (cpu, vm, i) \ | |
4445 | ? -1 : 0); \ | |
4446 | return; \ | |
4447 | case 2: \ | |
4448 | for (i = 0; i < (full ? 4 : 2); i++) \ | |
4449 | aarch64_set_vec_u32 (cpu, vd, i, \ | |
4450 | aarch64_get_vec_##SOURCE##32 (cpu, vn, i) \ | |
4451 | CMP \ | |
4452 | aarch64_get_vec_##SOURCE##32 (cpu, vm, i) \ | |
4453 | ? -1 : 0); \ | |
4454 | return; \ | |
4455 | case 3: \ | |
4456 | if (! full) \ | |
4457 | HALT_UNALLOC; \ | |
4458 | for (i = 0; i < 2; i++) \ | |
4459 | aarch64_set_vec_u64 (cpu, vd, i, \ | |
4460 | aarch64_get_vec_##SOURCE##64 (cpu, vn, i) \ | |
4461 | CMP \ | |
4462 | aarch64_get_vec_##SOURCE##64 (cpu, vm, i) \ | |
4463 | ? -1ULL : 0); \ | |
4464 | return; \ | |
4465 | default: \ | |
4466 | HALT_UNALLOC; \ | |
4467 | } \ | |
4468 | } \ | |
4469 | while (0) | |
4470 | ||
4471 | #define VEC_CMP0(SOURCE, CMP) \ | |
4472 | do \ | |
4473 | { \ | |
4474 | switch (size) \ | |
4475 | { \ | |
4476 | case 0: \ | |
4477 | for (i = 0; i < (full ? 16 : 8); i++) \ | |
4478 | aarch64_set_vec_u8 (cpu, vd, i, \ | |
4479 | aarch64_get_vec_##SOURCE##8 (cpu, vn, i) \ | |
4480 | CMP 0 ? -1 : 0); \ | |
4481 | return; \ | |
4482 | case 1: \ | |
4483 | for (i = 0; i < (full ? 8 : 4); i++) \ | |
4484 | aarch64_set_vec_u16 (cpu, vd, i, \ | |
4485 | aarch64_get_vec_##SOURCE##16 (cpu, vn, i) \ | |
4486 | CMP 0 ? -1 : 0); \ | |
4487 | return; \ | |
4488 | case 2: \ | |
4489 | for (i = 0; i < (full ? 4 : 2); i++) \ | |
4490 | aarch64_set_vec_u32 (cpu, vd, i, \ | |
4491 | aarch64_get_vec_##SOURCE##32 (cpu, vn, i) \ | |
4492 | CMP 0 ? -1 : 0); \ | |
4493 | return; \ | |
4494 | case 3: \ | |
4495 | if (! full) \ | |
4496 | HALT_UNALLOC; \ | |
4497 | for (i = 0; i < 2; i++) \ | |
4498 | aarch64_set_vec_u64 (cpu, vd, i, \ | |
4499 | aarch64_get_vec_##SOURCE##64 (cpu, vn, i) \ | |
4500 | CMP 0 ? -1ULL : 0); \ | |
4501 | return; \ | |
4502 | default: \ | |
4503 | HALT_UNALLOC; \ | |
4504 | } \ | |
4505 | } \ | |
4506 | while (0) | |
4507 | ||
4508 | #define VEC_FCMP0(CMP) \ | |
4509 | do \ | |
4510 | { \ | |
4511 | if (vm != 0) \ | |
4512 | HALT_NYI; \ | |
4513 | if (uimm (aarch64_get_instr (cpu), 22, 22)) \ | |
4514 | { \ | |
4515 | if (! full) \ | |
4516 | HALT_NYI; \ | |
4517 | for (i = 0; i < 2; i++) \ | |
4518 | aarch64_set_vec_u64 (cpu, vd, i, \ | |
4519 | aarch64_get_vec_double (cpu, vn, i) \ | |
4520 | CMP 0.0 ? -1 : 0); \ | |
4521 | } \ | |
4522 | else \ | |
4523 | { \ | |
4524 | for (i = 0; i < (full ? 4 : 2); i++) \ | |
4525 | aarch64_set_vec_u32 (cpu, vd, i, \ | |
4526 | aarch64_get_vec_float (cpu, vn, i) \ | |
4527 | CMP 0.0 ? -1 : 0); \ | |
4528 | } \ | |
4529 | return; \ | |
4530 | } \ | |
4531 | while (0) | |
4532 | ||
4533 | #define VEC_FCMP(CMP) \ | |
4534 | do \ | |
4535 | { \ | |
4536 | if (uimm (aarch64_get_instr (cpu), 22, 22)) \ | |
4537 | { \ | |
4538 | if (! full) \ | |
4539 | HALT_NYI; \ | |
4540 | for (i = 0; i < 2; i++) \ | |
4541 | aarch64_set_vec_u64 (cpu, vd, i, \ | |
4542 | aarch64_get_vec_double (cpu, vn, i) \ | |
4543 | CMP \ | |
4544 | aarch64_get_vec_double (cpu, vm, i) \ | |
4545 | ? -1 : 0); \ | |
4546 | } \ | |
4547 | else \ | |
4548 | { \ | |
4549 | for (i = 0; i < (full ? 4 : 2); i++) \ | |
4550 | aarch64_set_vec_u32 (cpu, vd, i, \ | |
4551 | aarch64_get_vec_float (cpu, vn, i) \ | |
4552 | CMP \ | |
4553 | aarch64_get_vec_float (cpu, vm, i) \ | |
4554 | ? -1 : 0); \ | |
4555 | } \ | |
4556 | return; \ | |
4557 | } \ | |
4558 | while (0) | |
4559 | ||
4560 | static void | |
4561 | do_vec_compare (sim_cpu *cpu) | |
4562 | { | |
4563 | /* instr[31] = 0 | |
4564 | instr[30] = half(0)/full(1) | |
4565 | instr[29] = part-of-comparison-type | |
4566 | instr[28,24] = 0 1110 | |
4567 | instr[23,22] = size of integer compares: byte(00), half(01), word (10), long (11) | |
4568 | type of float compares: single (-0) / double (-1) | |
4569 | instr[21] = 1 | |
4570 | instr[20,16] = Vm or 00000 (compare vs 0) | |
4571 | instr[15,10] = part-of-comparison-type | |
4572 | instr[9,5] = Vn | |
4573 | instr[4.0] = Vd. */ | |
4574 | ||
4575 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4576 | int size = uimm (aarch64_get_instr (cpu), 23, 22); | |
4577 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4578 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4579 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4580 | unsigned i; | |
4581 | ||
4582 | NYI_assert (28, 24, 0x0E); | |
4583 | NYI_assert (21, 21, 1); | |
4584 | ||
4585 | if ((uimm (aarch64_get_instr (cpu), 11, 11) | |
4586 | && uimm (aarch64_get_instr (cpu), 14, 14)) | |
4587 | || ((uimm (aarch64_get_instr (cpu), 11, 11) == 0 | |
4588 | && uimm (aarch64_get_instr (cpu), 10, 10) == 0))) | |
4589 | { | |
4590 | /* A compare vs 0. */ | |
4591 | if (vm != 0) | |
4592 | { | |
4593 | if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x2A) | |
4594 | do_vec_maxv (cpu); | |
4595 | else if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x32 | |
4596 | || uimm (aarch64_get_instr (cpu), 15, 10) == 0x3E) | |
4597 | do_vec_fminmaxV (cpu); | |
4598 | else if (uimm (aarch64_get_instr (cpu), 29, 23) == 0x1C | |
4599 | && uimm (aarch64_get_instr (cpu), 21, 10) == 0x876) | |
4600 | do_vec_SCVTF (cpu); | |
4601 | else | |
4602 | HALT_NYI; | |
4603 | return; | |
4604 | } | |
4605 | } | |
4606 | ||
4607 | if (uimm (aarch64_get_instr (cpu), 14, 14)) | |
4608 | { | |
4609 | /* A floating point compare. */ | |
4610 | unsigned decode = (uimm (aarch64_get_instr (cpu), 29, 29) << 5) | |
4611 | | (uimm (aarch64_get_instr (cpu), 23, 23) << 4) | |
4612 | | uimm (aarch64_get_instr (cpu), 13, 10); | |
4613 | ||
4614 | NYI_assert (15, 15, 1); | |
4615 | ||
4616 | switch (decode) | |
4617 | { | |
4618 | case /* 0b010010: GT#0 */ 0x12: VEC_FCMP0 (>); | |
4619 | case /* 0b110010: GE#0 */ 0x32: VEC_FCMP0 (>=); | |
4620 | case /* 0b010110: EQ#0 */ 0x16: VEC_FCMP0 (==); | |
4621 | case /* 0b110110: LE#0 */ 0x36: VEC_FCMP0 (<=); | |
4622 | case /* 0b011010: LT#0 */ 0x1A: VEC_FCMP0 (<); | |
4623 | case /* 0b111001: GT */ 0x39: VEC_FCMP (>); | |
4624 | case /* 0b101001: GE */ 0x29: VEC_FCMP (>=); | |
4625 | case /* 0b001001: EQ */ 0x09: VEC_FCMP (==); | |
4626 | ||
4627 | default: | |
4628 | HALT_NYI; | |
4629 | } | |
4630 | } | |
4631 | else | |
4632 | { | |
4633 | unsigned decode = (uimm (aarch64_get_instr (cpu), 29, 29) << 6) | |
4634 | | uimm (aarch64_get_instr (cpu), 15, 10); | |
4635 | ||
4636 | switch (decode) | |
4637 | { | |
4638 | case 0x0D: /* 0001101 GT */ VEC_CMP (s, > ); | |
4639 | case 0x0F: /* 0001111 GE */ VEC_CMP (s, >= ); | |
4640 | case 0x22: /* 0100010 GT #0 */ VEC_CMP0 (s, > ); | |
4641 | case 0x26: /* 0100110 EQ #0 */ VEC_CMP0 (s, == ); | |
4642 | case 0x2A: /* 0101010 LT #0 */ VEC_CMP0 (s, < ); | |
4643 | case 0x4D: /* 1001101 HI */ VEC_CMP (u, > ); | |
4644 | case 0x4F: /* 1001111 HS */ VEC_CMP (u, >= ); | |
4645 | case 0x62: /* 1100010 GE #0 */ VEC_CMP0 (s, >= ); | |
4646 | case 0x63: /* 1100011 EQ */ VEC_CMP (u, == ); | |
4647 | case 0x66: /* 1100110 LE #0 */ VEC_CMP0 (s, <= ); | |
4648 | default: | |
4649 | if (vm == 0) | |
4650 | HALT_NYI; | |
4651 | do_vec_maxv (cpu); | |
4652 | } | |
4653 | } | |
4654 | } | |
4655 | ||
4656 | static void | |
4657 | do_vec_SSHL (sim_cpu *cpu) | |
4658 | { | |
4659 | /* instr[31] = 0 | |
4660 | instr[30] = first part (0)/ second part (1) | |
4661 | instr[29,24] = 00 1110 | |
4662 | instr[23,22] = size: byte(00), half(01), word (10), long (11) | |
4663 | instr[21] = 1 | |
4664 | instr[20,16] = Vm | |
4665 | instr[15,10] = 0100 01 | |
4666 | instr[9,5] = Vn | |
4667 | instr[4,0] = Vd. */ | |
4668 | ||
4669 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4670 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4671 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4672 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4673 | unsigned i; | |
4674 | ||
4675 | NYI_assert (29, 24, 0x0E); | |
4676 | NYI_assert (21, 21, 1); | |
4677 | NYI_assert (15, 10, 0x11); | |
4678 | ||
4679 | /* FIXME: What is a signed shift left in this context ?. */ | |
4680 | ||
4681 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4682 | { | |
4683 | case 0: | |
4684 | for (i = 0; i < (full ? 16 : 8); i++) | |
4685 | aarch64_set_vec_s8 (cpu, vd, i, aarch64_get_vec_s8 (cpu, vn, i) | |
4686 | << aarch64_get_vec_s8 (cpu, vm, i)); | |
4687 | return; | |
4688 | ||
4689 | case 1: | |
4690 | for (i = 0; i < (full ? 8 : 4); i++) | |
4691 | aarch64_set_vec_s16 (cpu, vd, i, aarch64_get_vec_s16 (cpu, vn, i) | |
4692 | << aarch64_get_vec_s16 (cpu, vm, i)); | |
4693 | return; | |
4694 | ||
4695 | case 2: | |
4696 | for (i = 0; i < (full ? 4 : 2); i++) | |
4697 | aarch64_set_vec_s32 (cpu, vd, i, aarch64_get_vec_s32 (cpu, vn, i) | |
4698 | << aarch64_get_vec_s32 (cpu, vm, i)); | |
4699 | return; | |
4700 | ||
4701 | case 3: | |
4702 | if (! full) | |
4703 | HALT_UNALLOC; | |
4704 | for (i = 0; i < 2; i++) | |
4705 | aarch64_set_vec_s64 (cpu, vd, i, aarch64_get_vec_s64 (cpu, vn, i) | |
4706 | << aarch64_get_vec_s64 (cpu, vm, i)); | |
4707 | return; | |
4708 | ||
4709 | default: | |
4710 | HALT_NYI; | |
4711 | } | |
4712 | } | |
4713 | ||
4714 | static void | |
4715 | do_vec_USHL (sim_cpu *cpu) | |
4716 | { | |
4717 | /* instr[31] = 0 | |
4718 | instr[30] = first part (0)/ second part (1) | |
4719 | instr[29,24] = 10 1110 | |
4720 | instr[23,22] = size: byte(00), half(01), word (10), long (11) | |
4721 | instr[21] = 1 | |
4722 | instr[20,16] = Vm | |
4723 | instr[15,10] = 0100 01 | |
4724 | instr[9,5] = Vn | |
4725 | instr[4,0] = Vd */ | |
4726 | ||
4727 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4728 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4729 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4730 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4731 | unsigned i; | |
4732 | ||
4733 | NYI_assert (29, 24, 0x2E); | |
4734 | NYI_assert (15, 10, 0x11); | |
4735 | ||
4736 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4737 | { | |
4738 | case 0: | |
4739 | for (i = 0; i < (full ? 16 : 8); i++) | |
4740 | aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vn, i) | |
4741 | << aarch64_get_vec_u8 (cpu, vm, i)); | |
4742 | return; | |
4743 | ||
4744 | case 1: | |
4745 | for (i = 0; i < (full ? 8 : 4); i++) | |
4746 | aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vn, i) | |
4747 | << aarch64_get_vec_u16 (cpu, vm, i)); | |
4748 | return; | |
4749 | ||
4750 | case 2: | |
4751 | for (i = 0; i < (full ? 4 : 2); i++) | |
4752 | aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vn, i) | |
4753 | << aarch64_get_vec_u32 (cpu, vm, i)); | |
4754 | return; | |
4755 | ||
4756 | case 3: | |
4757 | if (! full) | |
4758 | HALT_UNALLOC; | |
4759 | for (i = 0; i < 2; i++) | |
4760 | aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vn, i) | |
4761 | << aarch64_get_vec_u64 (cpu, vm, i)); | |
4762 | return; | |
4763 | ||
4764 | default: | |
4765 | HALT_NYI; | |
4766 | } | |
4767 | } | |
4768 | ||
4769 | static void | |
4770 | do_vec_FMLA (sim_cpu *cpu) | |
4771 | { | |
4772 | /* instr[31] = 0 | |
4773 | instr[30] = full/half selector | |
4774 | instr[29,23] = 0011100 | |
4775 | instr[22] = size: 0=>float, 1=>double | |
4776 | instr[21] = 1 | |
4777 | instr[20,16] = Vn | |
4778 | instr[15,10] = 1100 11 | |
4779 | instr[9,5] = Vm | |
4780 | instr[4.0] = Vd. */ | |
4781 | ||
4782 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4783 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4784 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4785 | unsigned i; | |
4786 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4787 | ||
4788 | NYI_assert (29, 23, 0x1C); | |
4789 | NYI_assert (21, 21, 1); | |
4790 | NYI_assert (15, 10, 0x33); | |
4791 | ||
4792 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
4793 | { | |
4794 | if (! full) | |
4795 | HALT_UNALLOC; | |
4796 | for (i = 0; i < 2; i++) | |
4797 | aarch64_set_vec_double (cpu, vd, i, | |
4798 | aarch64_get_vec_double (cpu, vn, i) * | |
4799 | aarch64_get_vec_double (cpu, vm, i) + | |
4800 | aarch64_get_vec_double (cpu, vd, i)); | |
4801 | } | |
4802 | else | |
4803 | { | |
4804 | for (i = 0; i < (full ? 4 : 2); i++) | |
4805 | aarch64_set_vec_float (cpu, vd, i, | |
4806 | aarch64_get_vec_float (cpu, vn, i) * | |
4807 | aarch64_get_vec_float (cpu, vm, i) + | |
4808 | aarch64_get_vec_float (cpu, vd, i)); | |
4809 | } | |
4810 | } | |
4811 | ||
4812 | static void | |
4813 | do_vec_max (sim_cpu *cpu) | |
4814 | { | |
4815 | /* instr[31] = 0 | |
4816 | instr[30] = full/half selector | |
4817 | instr[29] = SMAX (0) / UMAX (1) | |
4818 | instr[28,24] = 0 1110 | |
4819 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit | |
4820 | instr[21] = 1 | |
4821 | instr[20,16] = Vn | |
4822 | instr[15,10] = 0110 01 | |
4823 | instr[9,5] = Vm | |
4824 | instr[4.0] = Vd. */ | |
4825 | ||
4826 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4827 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4828 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4829 | unsigned i; | |
4830 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4831 | ||
4832 | NYI_assert (28, 24, 0x0E); | |
4833 | NYI_assert (21, 21, 1); | |
4834 | NYI_assert (15, 10, 0x19); | |
4835 | ||
4836 | if (uimm (aarch64_get_instr (cpu), 29, 29)) | |
4837 | { | |
4838 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4839 | { | |
4840 | case 0: | |
4841 | for (i = 0; i < (full ? 16 : 8); i++) | |
4842 | aarch64_set_vec_u8 (cpu, vd, i, | |
4843 | aarch64_get_vec_u8 (cpu, vn, i) | |
4844 | > aarch64_get_vec_u8 (cpu, vm, i) | |
4845 | ? aarch64_get_vec_u8 (cpu, vn, i) | |
4846 | : aarch64_get_vec_u8 (cpu, vm, i)); | |
4847 | return; | |
4848 | ||
4849 | case 1: | |
4850 | for (i = 0; i < (full ? 8 : 4); i++) | |
4851 | aarch64_set_vec_u16 (cpu, vd, i, | |
4852 | aarch64_get_vec_u16 (cpu, vn, i) | |
4853 | > aarch64_get_vec_u16 (cpu, vm, i) | |
4854 | ? aarch64_get_vec_u16 (cpu, vn, i) | |
4855 | : aarch64_get_vec_u16 (cpu, vm, i)); | |
4856 | return; | |
4857 | ||
4858 | case 2: | |
4859 | for (i = 0; i < (full ? 4 : 2); i++) | |
4860 | aarch64_set_vec_u32 (cpu, vd, i, | |
4861 | aarch64_get_vec_u32 (cpu, vn, i) | |
4862 | > aarch64_get_vec_u32 (cpu, vm, i) | |
4863 | ? aarch64_get_vec_u32 (cpu, vn, i) | |
4864 | : aarch64_get_vec_u32 (cpu, vm, i)); | |
4865 | return; | |
4866 | ||
4867 | default: | |
4868 | case 3: | |
4869 | HALT_UNALLOC; | |
4870 | } | |
4871 | } | |
4872 | else | |
4873 | { | |
4874 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4875 | { | |
4876 | case 0: | |
4877 | for (i = 0; i < (full ? 16 : 8); i++) | |
4878 | aarch64_set_vec_s8 (cpu, vd, i, | |
4879 | aarch64_get_vec_s8 (cpu, vn, i) | |
4880 | > aarch64_get_vec_s8 (cpu, vm, i) | |
4881 | ? aarch64_get_vec_s8 (cpu, vn, i) | |
4882 | : aarch64_get_vec_s8 (cpu, vm, i)); | |
4883 | return; | |
4884 | ||
4885 | case 1: | |
4886 | for (i = 0; i < (full ? 8 : 4); i++) | |
4887 | aarch64_set_vec_s16 (cpu, vd, i, | |
4888 | aarch64_get_vec_s16 (cpu, vn, i) | |
4889 | > aarch64_get_vec_s16 (cpu, vm, i) | |
4890 | ? aarch64_get_vec_s16 (cpu, vn, i) | |
4891 | : aarch64_get_vec_s16 (cpu, vm, i)); | |
4892 | return; | |
4893 | ||
4894 | case 2: | |
4895 | for (i = 0; i < (full ? 4 : 2); i++) | |
4896 | aarch64_set_vec_s32 (cpu, vd, i, | |
4897 | aarch64_get_vec_s32 (cpu, vn, i) | |
4898 | > aarch64_get_vec_s32 (cpu, vm, i) | |
4899 | ? aarch64_get_vec_s32 (cpu, vn, i) | |
4900 | : aarch64_get_vec_s32 (cpu, vm, i)); | |
4901 | return; | |
4902 | ||
4903 | default: | |
4904 | case 3: | |
4905 | HALT_UNALLOC; | |
4906 | } | |
4907 | } | |
4908 | } | |
4909 | ||
4910 | static void | |
4911 | do_vec_min (sim_cpu *cpu) | |
4912 | { | |
4913 | /* instr[31] = 0 | |
4914 | instr[30] = full/half selector | |
4915 | instr[29] = SMIN (0) / UMIN (1) | |
4916 | instr[28,24] = 0 1110 | |
4917 | instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit | |
4918 | instr[21] = 1 | |
4919 | instr[20,16] = Vn | |
4920 | instr[15,10] = 0110 11 | |
4921 | instr[9,5] = Vm | |
4922 | instr[4.0] = Vd. */ | |
4923 | ||
4924 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
4925 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
4926 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
4927 | unsigned i; | |
4928 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
4929 | ||
4930 | NYI_assert (28, 24, 0x0E); | |
4931 | NYI_assert (21, 21, 1); | |
4932 | NYI_assert (15, 10, 0x1B); | |
4933 | ||
4934 | if (uimm (aarch64_get_instr (cpu), 29, 29)) | |
4935 | { | |
4936 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4937 | { | |
4938 | case 0: | |
4939 | for (i = 0; i < (full ? 16 : 8); i++) | |
4940 | aarch64_set_vec_u8 (cpu, vd, i, | |
4941 | aarch64_get_vec_u8 (cpu, vn, i) | |
4942 | < aarch64_get_vec_u8 (cpu, vm, i) | |
4943 | ? aarch64_get_vec_u8 (cpu, vn, i) | |
4944 | : aarch64_get_vec_u8 (cpu, vm, i)); | |
4945 | return; | |
4946 | ||
4947 | case 1: | |
4948 | for (i = 0; i < (full ? 8 : 4); i++) | |
4949 | aarch64_set_vec_u16 (cpu, vd, i, | |
4950 | aarch64_get_vec_u16 (cpu, vn, i) | |
4951 | < aarch64_get_vec_u16 (cpu, vm, i) | |
4952 | ? aarch64_get_vec_u16 (cpu, vn, i) | |
4953 | : aarch64_get_vec_u16 (cpu, vm, i)); | |
4954 | return; | |
4955 | ||
4956 | case 2: | |
4957 | for (i = 0; i < (full ? 4 : 2); i++) | |
4958 | aarch64_set_vec_u32 (cpu, vd, i, | |
4959 | aarch64_get_vec_u32 (cpu, vn, i) | |
4960 | < aarch64_get_vec_u32 (cpu, vm, i) | |
4961 | ? aarch64_get_vec_u32 (cpu, vn, i) | |
4962 | : aarch64_get_vec_u32 (cpu, vm, i)); | |
4963 | return; | |
4964 | ||
4965 | default: | |
4966 | case 3: | |
4967 | HALT_UNALLOC; | |
4968 | } | |
4969 | } | |
4970 | else | |
4971 | { | |
4972 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
4973 | { | |
4974 | case 0: | |
4975 | for (i = 0; i < (full ? 16 : 8); i++) | |
4976 | aarch64_set_vec_s8 (cpu, vd, i, | |
4977 | aarch64_get_vec_s8 (cpu, vn, i) | |
4978 | < aarch64_get_vec_s8 (cpu, vm, i) | |
4979 | ? aarch64_get_vec_s8 (cpu, vn, i) | |
4980 | : aarch64_get_vec_s8 (cpu, vm, i)); | |
4981 | return; | |
4982 | ||
4983 | case 1: | |
4984 | for (i = 0; i < (full ? 8 : 4); i++) | |
4985 | aarch64_set_vec_s16 (cpu, vd, i, | |
4986 | aarch64_get_vec_s16 (cpu, vn, i) | |
4987 | < aarch64_get_vec_s16 (cpu, vm, i) | |
4988 | ? aarch64_get_vec_s16 (cpu, vn, i) | |
4989 | : aarch64_get_vec_s16 (cpu, vm, i)); | |
4990 | return; | |
4991 | ||
4992 | case 2: | |
4993 | for (i = 0; i < (full ? 4 : 2); i++) | |
4994 | aarch64_set_vec_s32 (cpu, vd, i, | |
4995 | aarch64_get_vec_s32 (cpu, vn, i) | |
4996 | < aarch64_get_vec_s32 (cpu, vm, i) | |
4997 | ? aarch64_get_vec_s32 (cpu, vn, i) | |
4998 | : aarch64_get_vec_s32 (cpu, vm, i)); | |
4999 | return; | |
5000 | ||
5001 | default: | |
5002 | case 3: | |
5003 | HALT_UNALLOC; | |
5004 | } | |
5005 | } | |
5006 | } | |
5007 | ||
5008 | static void | |
5009 | do_vec_sub_long (sim_cpu *cpu) | |
5010 | { | |
5011 | /* instr[31] = 0 | |
5012 | instr[30] = lower (0) / upper (1) | |
5013 | instr[29] = signed (0) / unsigned (1) | |
5014 | instr[28,24] = 0 1110 | |
5015 | instr[23,22] = size: bytes (00), half (01), word (10) | |
5016 | instr[21] = 1 | |
5017 | insrt[20,16] = Vm | |
5018 | instr[15,10] = 0010 00 | |
5019 | instr[9,5] = Vn | |
5020 | instr[4,0] = V dest. */ | |
5021 | ||
5022 | unsigned size = uimm (aarch64_get_instr (cpu), 23, 22); | |
5023 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5024 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5025 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5026 | unsigned bias = 0; | |
5027 | unsigned i; | |
5028 | ||
5029 | NYI_assert (28, 24, 0x0E); | |
5030 | NYI_assert (21, 21, 1); | |
5031 | NYI_assert (15, 10, 0x08); | |
5032 | ||
5033 | if (size == 3) | |
5034 | HALT_UNALLOC; | |
5035 | ||
5036 | switch (uimm (aarch64_get_instr (cpu), 30, 29)) | |
5037 | { | |
5038 | case 2: /* SSUBL2. */ | |
5039 | bias = 2; | |
5040 | case 0: /* SSUBL. */ | |
5041 | switch (size) | |
5042 | { | |
5043 | case 0: | |
5044 | bias *= 3; | |
5045 | for (i = 0; i < 8; i++) | |
5046 | aarch64_set_vec_s16 (cpu, vd, i, | |
5047 | aarch64_get_vec_s8 (cpu, vn, i + bias) | |
5048 | - aarch64_get_vec_s8 (cpu, vm, i + bias)); | |
5049 | break; | |
5050 | ||
5051 | case 1: | |
5052 | bias *= 2; | |
5053 | for (i = 0; i < 4; i++) | |
5054 | aarch64_set_vec_s32 (cpu, vd, i, | |
5055 | aarch64_get_vec_s16 (cpu, vn, i + bias) | |
5056 | - aarch64_get_vec_s16 (cpu, vm, i + bias)); | |
5057 | break; | |
5058 | ||
5059 | case 2: | |
5060 | for (i = 0; i < 2; i++) | |
5061 | aarch64_set_vec_s64 (cpu, vd, i, | |
5062 | aarch64_get_vec_s32 (cpu, vn, i + bias) | |
5063 | - aarch64_get_vec_s32 (cpu, vm, i + bias)); | |
5064 | break; | |
5065 | ||
5066 | default: | |
5067 | HALT_UNALLOC; | |
5068 | } | |
5069 | break; | |
5070 | ||
5071 | case 3: /* USUBL2. */ | |
5072 | bias = 2; | |
5073 | case 1: /* USUBL. */ | |
5074 | switch (size) | |
5075 | { | |
5076 | case 0: | |
5077 | bias *= 3; | |
5078 | for (i = 0; i < 8; i++) | |
5079 | aarch64_set_vec_u16 (cpu, vd, i, | |
5080 | aarch64_get_vec_u8 (cpu, vn, i + bias) | |
5081 | - aarch64_get_vec_u8 (cpu, vm, i + bias)); | |
5082 | break; | |
5083 | ||
5084 | case 1: | |
5085 | bias *= 2; | |
5086 | for (i = 0; i < 4; i++) | |
5087 | aarch64_set_vec_u32 (cpu, vd, i, | |
5088 | aarch64_get_vec_u16 (cpu, vn, i + bias) | |
5089 | - aarch64_get_vec_u16 (cpu, vm, i + bias)); | |
5090 | break; | |
5091 | ||
5092 | case 2: | |
5093 | for (i = 0; i < 2; i++) | |
5094 | aarch64_set_vec_u64 (cpu, vd, i, | |
5095 | aarch64_get_vec_u32 (cpu, vn, i + bias) | |
5096 | - aarch64_get_vec_u32 (cpu, vm, i + bias)); | |
5097 | break; | |
5098 | ||
5099 | default: | |
5100 | HALT_UNALLOC; | |
5101 | } | |
5102 | break; | |
5103 | } | |
5104 | } | |
5105 | ||
5106 | #define DO_ADDP(FN) \ | |
5107 | do \ | |
5108 | { \ | |
5109 | for (i = 0; i < range; i++) \ | |
5110 | { \ | |
5111 | aarch64_set_vec_##FN (cpu, vd, i, \ | |
5112 | aarch64_get_vec_##FN (cpu, vn, i * 2) \ | |
5113 | + aarch64_get_vec_##FN (cpu, vn, i * 2 + 1)); \ | |
5114 | aarch64_set_vec_##FN (cpu, vd, i + range, \ | |
5115 | aarch64_get_vec_##FN (cpu, vm, i * 2) \ | |
5116 | + aarch64_get_vec_##FN (cpu, vm, i * 2 + 1)); \ | |
5117 | } \ | |
5118 | } \ | |
5119 | while (0) | |
5120 | ||
5121 | static void | |
5122 | do_vec_ADDP (sim_cpu *cpu) | |
5123 | { | |
5124 | /* instr[31] = 0 | |
5125 | instr[30] = half(0)/full(1) | |
5126 | instr[29,24] = 00 1110 | |
5127 | instr[23,22] = size: bytes (00), half (01), word (10), long (11) | |
5128 | instr[21] = 1 | |
5129 | insrt[20,16] = Vm | |
5130 | instr[15,10] = 1011 11 | |
5131 | instr[9,5] = Vn | |
5132 | instr[4,0] = V dest. */ | |
5133 | ||
5134 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5135 | unsigned size = uimm (aarch64_get_instr (cpu), 23, 22); | |
5136 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5137 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5138 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5139 | unsigned i, range; | |
5140 | ||
5141 | NYI_assert (29, 24, 0x0E); | |
5142 | NYI_assert (21, 21, 1); | |
5143 | NYI_assert (15, 10, 0x2F); | |
5144 | ||
5145 | switch (size) | |
5146 | { | |
5147 | case 0: | |
5148 | range = full ? 8 : 4; | |
5149 | DO_ADDP (u8); | |
5150 | return; | |
5151 | ||
5152 | case 1: | |
5153 | range = full ? 4 : 2; | |
5154 | DO_ADDP (u16); | |
5155 | return; | |
5156 | ||
5157 | case 2: | |
5158 | range = full ? 2 : 1; | |
5159 | DO_ADDP (u32); | |
5160 | return; | |
5161 | ||
5162 | case 3: | |
5163 | if (! full) | |
5164 | HALT_UNALLOC; | |
5165 | range = 1; | |
5166 | DO_ADDP (u64); | |
5167 | return; | |
5168 | ||
5169 | default: | |
5170 | HALT_NYI; | |
5171 | } | |
5172 | } | |
5173 | ||
5174 | static void | |
5175 | do_vec_UMOV (sim_cpu *cpu) | |
5176 | { | |
5177 | /* instr[31] = 0 | |
5178 | instr[30] = 32-bit(0)/64-bit(1) | |
5179 | instr[29,21] = 00 1110 000 | |
5180 | insrt[20,16] = size & index | |
5181 | instr[15,10] = 0011 11 | |
5182 | instr[9,5] = V source | |
5183 | instr[4,0] = R dest. */ | |
5184 | ||
5185 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5186 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5187 | unsigned index; | |
5188 | ||
5189 | NYI_assert (29, 21, 0x070); | |
5190 | NYI_assert (15, 10, 0x0F); | |
5191 | ||
5192 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
5193 | { | |
5194 | /* Byte transfer. */ | |
5195 | index = uimm (aarch64_get_instr (cpu), 20, 17); | |
5196 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
5197 | aarch64_get_vec_u8 (cpu, vs, index)); | |
5198 | } | |
5199 | else if (uimm (aarch64_get_instr (cpu), 17, 17)) | |
5200 | { | |
5201 | index = uimm (aarch64_get_instr (cpu), 20, 18); | |
5202 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
5203 | aarch64_get_vec_u16 (cpu, vs, index)); | |
5204 | } | |
5205 | else if (uimm (aarch64_get_instr (cpu), 18, 18)) | |
5206 | { | |
5207 | index = uimm (aarch64_get_instr (cpu), 20, 19); | |
5208 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
5209 | aarch64_get_vec_u32 (cpu, vs, index)); | |
5210 | } | |
5211 | else | |
5212 | { | |
5213 | if (uimm (aarch64_get_instr (cpu), 30, 30) != 1) | |
5214 | HALT_UNALLOC; | |
5215 | ||
5216 | index = uimm (aarch64_get_instr (cpu), 20, 20); | |
5217 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
5218 | aarch64_get_vec_u64 (cpu, vs, index)); | |
5219 | } | |
5220 | } | |
5221 | ||
5222 | static void | |
5223 | do_vec_FABS (sim_cpu *cpu) | |
5224 | { | |
5225 | /* instr[31] = 0 | |
5226 | instr[30] = half(0)/full(1) | |
5227 | instr[29,23] = 00 1110 1 | |
5228 | instr[22] = float(0)/double(1) | |
5229 | instr[21,16] = 10 0000 | |
5230 | instr[15,10] = 1111 10 | |
5231 | instr[9,5] = Vn | |
5232 | instr[4,0] = Vd. */ | |
5233 | ||
5234 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5235 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5236 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5237 | unsigned i; | |
5238 | ||
5239 | NYI_assert (29, 23, 0x1D); | |
5240 | NYI_assert (21, 10, 0x83E); | |
5241 | ||
5242 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
5243 | { | |
5244 | if (! full) | |
5245 | HALT_NYI; | |
5246 | ||
5247 | for (i = 0; i < 2; i++) | |
5248 | aarch64_set_vec_double (cpu, vd, i, | |
5249 | fabs (aarch64_get_vec_double (cpu, vn, i))); | |
5250 | } | |
5251 | else | |
5252 | { | |
5253 | for (i = 0; i < (full ? 4 : 2); i++) | |
5254 | aarch64_set_vec_float (cpu, vd, i, | |
5255 | fabsf (aarch64_get_vec_float (cpu, vn, i))); | |
5256 | } | |
5257 | } | |
5258 | ||
5259 | static void | |
5260 | do_vec_FCVTZS (sim_cpu *cpu) | |
5261 | { | |
5262 | /* instr[31] = 0 | |
5263 | instr[30] = half (0) / all (1) | |
5264 | instr[29,23] = 00 1110 1 | |
5265 | instr[22] = single (0) / double (1) | |
5266 | instr[21,10] = 10 0001 1011 10 | |
5267 | instr[9,5] = Rn | |
5268 | instr[4,0] = Rd. */ | |
5269 | ||
5270 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5271 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5272 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5273 | unsigned i; | |
5274 | ||
5275 | NYI_assert (31, 31, 0); | |
5276 | NYI_assert (29, 23, 0x1D); | |
5277 | NYI_assert (21, 10, 0x86E); | |
5278 | ||
5279 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
5280 | { | |
5281 | if (! full) | |
5282 | HALT_UNALLOC; | |
5283 | ||
5284 | for (i = 0; i < 2; i++) | |
5285 | aarch64_set_vec_s64 (cpu, rd, i, | |
5286 | (int64_t) aarch64_get_vec_double (cpu, rn, i)); | |
5287 | } | |
5288 | else | |
5289 | for (i = 0; i < (full ? 4 : 2); i++) | |
5290 | aarch64_set_vec_s32 (cpu, rd, i, | |
5291 | (int32_t) aarch64_get_vec_float (cpu, rn, i)); | |
5292 | } | |
5293 | ||
5294 | static void | |
5295 | do_vec_op1 (sim_cpu *cpu) | |
5296 | { | |
5297 | /* instr[31] = 0 | |
5298 | instr[30] = half/full | |
5299 | instr[29,24] = 00 1110 | |
5300 | instr[23,21] = ??? | |
5301 | instr[20,16] = Vm | |
5302 | instr[15,10] = sub-opcode | |
5303 | instr[9,5] = Vn | |
5304 | instr[4,0] = Vd */ | |
5305 | NYI_assert (29, 24, 0x0E); | |
5306 | ||
5307 | if (uimm (aarch64_get_instr (cpu), 21, 21) == 0) | |
5308 | { | |
5309 | if (uimm (aarch64_get_instr (cpu), 23, 22) == 0) | |
5310 | { | |
5311 | if (uimm (aarch64_get_instr (cpu), 30, 30) == 1 | |
5312 | && uimm (aarch64_get_instr (cpu), 17, 14) == 0 | |
5313 | && uimm (aarch64_get_instr (cpu), 12, 10) == 7) | |
5314 | return do_vec_ins_2 (cpu); | |
5315 | ||
5316 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
5317 | { | |
5318 | case 0x01: do_vec_DUP_vector_into_vector (cpu); return; | |
5319 | case 0x03: do_vec_DUP_scalar_into_vector (cpu); return; | |
5320 | case 0x07: do_vec_INS (cpu); return; | |
5321 | case 0x0A: do_vec_TRN (cpu); return; | |
5322 | ||
5323 | case 0x0F: | |
5324 | if (uimm (aarch64_get_instr (cpu), 17, 16) == 0) | |
5325 | { | |
5326 | do_vec_MOV_into_scalar (cpu); | |
5327 | return; | |
5328 | } | |
5329 | break; | |
5330 | ||
5331 | case 0x00: | |
5332 | case 0x08: | |
5333 | case 0x10: | |
5334 | case 0x18: | |
5335 | do_vec_TBL (cpu); return; | |
5336 | ||
5337 | case 0x06: | |
5338 | case 0x16: | |
5339 | do_vec_UZP (cpu); return; | |
5340 | ||
5341 | case 0x0E: | |
5342 | case 0x1E: | |
5343 | do_vec_ZIP (cpu); return; | |
5344 | ||
5345 | default: | |
5346 | HALT_NYI; | |
5347 | } | |
5348 | } | |
5349 | ||
5350 | switch (uimm (aarch64_get_instr (cpu), 13, 10)) | |
5351 | { | |
5352 | case 0x6: do_vec_UZP (cpu); return; | |
5353 | case 0xE: do_vec_ZIP (cpu); return; | |
5354 | case 0xA: do_vec_TRN (cpu); return; | |
5355 | case 0xF: do_vec_UMOV (cpu); return; | |
5356 | default: HALT_NYI; | |
5357 | } | |
5358 | } | |
5359 | ||
5360 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
5361 | { | |
5362 | case 0x07: | |
5363 | switch (uimm (aarch64_get_instr (cpu), 23, 21)) | |
5364 | { | |
5365 | case 1: do_vec_AND (cpu); return; | |
5366 | case 3: do_vec_BIC (cpu); return; | |
5367 | case 5: do_vec_ORR (cpu); return; | |
5368 | case 7: do_vec_ORN (cpu); return; | |
5369 | default: HALT_NYI; | |
5370 | } | |
5371 | ||
5372 | case 0x08: do_vec_sub_long (cpu); return; | |
5373 | case 0x0a: do_vec_XTN (cpu); return; | |
5374 | case 0x11: do_vec_SSHL (cpu); return; | |
5375 | case 0x19: do_vec_max (cpu); return; | |
5376 | case 0x1B: do_vec_min (cpu); return; | |
5377 | case 0x21: do_vec_add (cpu); return; | |
5378 | case 0x25: do_vec_MLA (cpu); return; | |
5379 | case 0x27: do_vec_mul (cpu); return; | |
5380 | case 0x2F: do_vec_ADDP (cpu); return; | |
5381 | case 0x30: do_vec_mull (cpu); return; | |
5382 | case 0x33: do_vec_FMLA (cpu); return; | |
5383 | case 0x35: do_vec_fadd (cpu); return; | |
5384 | ||
5385 | case 0x2E: | |
5386 | switch (uimm (aarch64_get_instr (cpu), 20, 16)) | |
5387 | { | |
5388 | case 0x00: do_vec_ABS (cpu); return; | |
5389 | case 0x01: do_vec_FCVTZS (cpu); return; | |
5390 | case 0x11: do_vec_ADDV (cpu); return; | |
5391 | default: HALT_NYI; | |
5392 | } | |
5393 | ||
5394 | case 0x31: | |
5395 | case 0x3B: | |
5396 | do_vec_Fminmax (cpu); return; | |
5397 | ||
5398 | case 0x0D: | |
5399 | case 0x0F: | |
5400 | case 0x22: | |
5401 | case 0x23: | |
5402 | case 0x26: | |
5403 | case 0x2A: | |
5404 | case 0x32: | |
5405 | case 0x36: | |
5406 | case 0x39: | |
5407 | case 0x3A: | |
5408 | do_vec_compare (cpu); return; | |
5409 | ||
5410 | case 0x3E: | |
5411 | do_vec_FABS (cpu); return; | |
5412 | ||
5413 | default: | |
5414 | HALT_NYI; | |
5415 | } | |
5416 | } | |
5417 | ||
5418 | static void | |
5419 | do_vec_xtl (sim_cpu *cpu) | |
5420 | { | |
5421 | /* instr[31] = 0 | |
5422 | instr[30,29] = SXTL (00), UXTL (01), SXTL2 (10), UXTL2 (11) | |
5423 | instr[28,22] = 0 1111 00 | |
5424 | instr[21,16] = size & shift (USHLL, SSHLL, USHLL2, SSHLL2) | |
5425 | instr[15,10] = 1010 01 | |
5426 | instr[9,5] = V source | |
5427 | instr[4,0] = V dest. */ | |
5428 | ||
5429 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5430 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5431 | unsigned i, shift, bias = 0; | |
5432 | ||
5433 | NYI_assert (28, 22, 0x3C); | |
5434 | NYI_assert (15, 10, 0x29); | |
5435 | ||
5436 | switch (uimm (aarch64_get_instr (cpu), 30, 29)) | |
5437 | { | |
5438 | case 2: /* SXTL2, SSHLL2. */ | |
5439 | bias = 2; | |
5440 | case 0: /* SXTL, SSHLL. */ | |
5441 | if (uimm (aarch64_get_instr (cpu), 21, 21)) | |
5442 | { | |
5443 | shift = uimm (aarch64_get_instr (cpu), 20, 16); | |
5444 | aarch64_set_vec_s64 | |
5445 | (cpu, vd, 0, aarch64_get_vec_s32 (cpu, vs, bias) << shift); | |
5446 | aarch64_set_vec_s64 | |
5447 | (cpu, vd, 1, aarch64_get_vec_s32 (cpu, vs, bias + 1) << shift); | |
5448 | } | |
5449 | else if (uimm (aarch64_get_instr (cpu), 20, 20)) | |
5450 | { | |
5451 | shift = uimm (aarch64_get_instr (cpu), 19, 16); | |
5452 | bias *= 2; | |
5453 | for (i = 0; i < 4; i++) | |
5454 | aarch64_set_vec_s32 | |
5455 | (cpu, vd, i, aarch64_get_vec_s16 (cpu, vs, i + bias) << shift); | |
5456 | } | |
5457 | else | |
5458 | { | |
5459 | NYI_assert (19, 19, 1); | |
5460 | ||
5461 | shift = uimm (aarch64_get_instr (cpu), 18, 16); | |
5462 | bias *= 3; | |
5463 | for (i = 0; i < 8; i++) | |
5464 | aarch64_set_vec_s16 | |
5465 | (cpu, vd, i, aarch64_get_vec_s8 (cpu, vs, i + bias) << shift); | |
5466 | } | |
5467 | return; | |
5468 | ||
5469 | case 3: /* UXTL2, USHLL2. */ | |
5470 | bias = 2; | |
5471 | case 1: /* UXTL, USHLL. */ | |
5472 | if (uimm (aarch64_get_instr (cpu), 21, 21)) | |
5473 | { | |
5474 | shift = uimm (aarch64_get_instr (cpu), 20, 16); | |
5475 | aarch64_set_vec_u64 | |
5476 | (cpu, vd, 0, aarch64_get_vec_u32 (cpu, vs, bias) << shift); | |
5477 | aarch64_set_vec_u64 | |
5478 | (cpu, vd, 1, aarch64_get_vec_u32 (cpu, vs, bias + 1) << shift); | |
5479 | } | |
5480 | else if (uimm (aarch64_get_instr (cpu), 20, 20)) | |
5481 | { | |
5482 | shift = uimm (aarch64_get_instr (cpu), 19, 16); | |
5483 | bias *= 2; | |
5484 | for (i = 0; i < 4; i++) | |
5485 | aarch64_set_vec_u32 | |
5486 | (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, i + bias) << shift); | |
5487 | } | |
5488 | else | |
5489 | { | |
5490 | NYI_assert (19, 19, 1); | |
5491 | ||
5492 | shift = uimm (aarch64_get_instr (cpu), 18, 16); | |
5493 | bias *= 3; | |
5494 | for (i = 0; i < 8; i++) | |
5495 | aarch64_set_vec_u16 | |
5496 | (cpu, vd, i, aarch64_get_vec_u8 (cpu, vs, i + bias) << shift); | |
5497 | } | |
5498 | return; | |
5499 | ||
5500 | default: | |
5501 | HALT_NYI; | |
5502 | } | |
5503 | } | |
5504 | ||
5505 | static void | |
5506 | do_vec_SHL (sim_cpu *cpu) | |
5507 | { | |
5508 | /* instr [31] = 0 | |
5509 | instr [30] = half(0)/full(1) | |
5510 | instr [29,23] = 001 1110 | |
5511 | instr [22,16] = size and shift amount | |
5512 | instr [15,10] = 01 0101 | |
5513 | instr [9, 5] = Vs | |
5514 | instr [4, 0] = Vd. */ | |
5515 | ||
5516 | int shift; | |
5517 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5518 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5519 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5520 | unsigned i; | |
5521 | ||
5522 | NYI_assert (29, 23, 0x1E); | |
5523 | NYI_assert (15, 10, 0x15); | |
5524 | ||
5525 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
5526 | { | |
5527 | shift = uimm (aarch64_get_instr (cpu), 21, 16) - 1; | |
5528 | ||
5529 | if (full == 0) | |
5530 | HALT_UNALLOC; | |
5531 | ||
5532 | for (i = 0; i < 2; i++) | |
5533 | { | |
5534 | uint64_t val = aarch64_get_vec_u64 (cpu, vs, i); | |
5535 | aarch64_set_vec_u64 (cpu, vd, i, val << shift); | |
5536 | } | |
5537 | ||
5538 | return; | |
5539 | } | |
5540 | ||
5541 | if (uimm (aarch64_get_instr (cpu), 21, 21)) | |
5542 | { | |
5543 | shift = uimm (aarch64_get_instr (cpu), 20, 16) - 1; | |
5544 | ||
5545 | for (i = 0; i < (full ? 4 : 2); i++) | |
5546 | { | |
5547 | uint32_t val = aarch64_get_vec_u32 (cpu, vs, i); | |
5548 | aarch64_set_vec_u32 (cpu, vd, i, val << shift); | |
5549 | } | |
5550 | ||
5551 | return; | |
5552 | } | |
5553 | ||
5554 | if (uimm (aarch64_get_instr (cpu), 20, 20)) | |
5555 | { | |
5556 | shift = uimm (aarch64_get_instr (cpu), 19, 16) - 1; | |
5557 | ||
5558 | for (i = 0; i < (full ? 8 : 4); i++) | |
5559 | { | |
5560 | uint16_t val = aarch64_get_vec_u16 (cpu, vs, i); | |
5561 | aarch64_set_vec_u16 (cpu, vd, i, val << shift); | |
5562 | } | |
5563 | ||
5564 | return; | |
5565 | } | |
5566 | ||
5567 | if (uimm (aarch64_get_instr (cpu), 19, 19) == 0) | |
5568 | HALT_UNALLOC; | |
5569 | ||
5570 | shift = uimm (aarch64_get_instr (cpu), 18, 16) - 1; | |
5571 | ||
5572 | for (i = 0; i < (full ? 16 : 8); i++) | |
5573 | { | |
5574 | uint8_t val = aarch64_get_vec_u8 (cpu, vs, i); | |
5575 | aarch64_set_vec_u8 (cpu, vd, i, val << shift); | |
5576 | } | |
5577 | } | |
5578 | ||
5579 | static void | |
5580 | do_vec_SSHR_USHR (sim_cpu *cpu) | |
5581 | { | |
5582 | /* instr [31] = 0 | |
5583 | instr [30] = half(0)/full(1) | |
5584 | instr [29] = signed(0)/unsigned(1) | |
5585 | instr [28,23] = 01 1110 | |
5586 | instr [22,16] = size and shift amount | |
5587 | instr [15,10] = 0000 01 | |
5588 | instr [9, 5] = Vs | |
5589 | instr [4, 0] = Vd. */ | |
5590 | ||
5591 | int shift; | |
5592 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5593 | int sign = uimm (aarch64_get_instr (cpu), 29, 29); | |
5594 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5595 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5596 | unsigned i; | |
5597 | ||
5598 | NYI_assert (28, 23, 0x1E); | |
5599 | NYI_assert (15, 10, 0x01); | |
5600 | ||
5601 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
5602 | { | |
5603 | shift = uimm (aarch64_get_instr (cpu), 21, 16); | |
5604 | ||
5605 | if (full == 0) | |
5606 | HALT_UNALLOC; | |
5607 | ||
5608 | if (sign) | |
5609 | for (i = 0; i < 2; i++) | |
5610 | { | |
5611 | int64_t val = aarch64_get_vec_s64 (cpu, vs, i); | |
5612 | aarch64_set_vec_s64 (cpu, vd, i, val >> shift); | |
5613 | } | |
5614 | else | |
5615 | for (i = 0; i < 2; i++) | |
5616 | { | |
5617 | uint64_t val = aarch64_get_vec_u64 (cpu, vs, i); | |
5618 | aarch64_set_vec_u64 (cpu, vd, i, val >> shift); | |
5619 | } | |
5620 | ||
5621 | return; | |
5622 | } | |
5623 | ||
5624 | if (uimm (aarch64_get_instr (cpu), 21, 21)) | |
5625 | { | |
5626 | shift = uimm (aarch64_get_instr (cpu), 20, 16); | |
5627 | ||
5628 | if (sign) | |
5629 | for (i = 0; i < (full ? 4 : 2); i++) | |
5630 | { | |
5631 | int32_t val = aarch64_get_vec_s32 (cpu, vs, i); | |
5632 | aarch64_set_vec_s32 (cpu, vd, i, val >> shift); | |
5633 | } | |
5634 | else | |
5635 | for (i = 0; i < (full ? 4 : 2); i++) | |
5636 | { | |
5637 | uint32_t val = aarch64_get_vec_u32 (cpu, vs, i); | |
5638 | aarch64_set_vec_u32 (cpu, vd, i, val >> shift); | |
5639 | } | |
5640 | ||
5641 | return; | |
5642 | } | |
5643 | ||
5644 | if (uimm (aarch64_get_instr (cpu), 20, 20)) | |
5645 | { | |
5646 | shift = uimm (aarch64_get_instr (cpu), 19, 16); | |
5647 | ||
5648 | if (sign) | |
5649 | for (i = 0; i < (full ? 8 : 4); i++) | |
5650 | { | |
5651 | int16_t val = aarch64_get_vec_s16 (cpu, vs, i); | |
5652 | aarch64_set_vec_s16 (cpu, vd, i, val >> shift); | |
5653 | } | |
5654 | else | |
5655 | for (i = 0; i < (full ? 8 : 4); i++) | |
5656 | { | |
5657 | uint16_t val = aarch64_get_vec_u16 (cpu, vs, i); | |
5658 | aarch64_set_vec_u16 (cpu, vd, i, val >> shift); | |
5659 | } | |
5660 | ||
5661 | return; | |
5662 | } | |
5663 | ||
5664 | if (uimm (aarch64_get_instr (cpu), 19, 19) == 0) | |
5665 | HALT_UNALLOC; | |
5666 | ||
5667 | shift = uimm (aarch64_get_instr (cpu), 18, 16); | |
5668 | ||
5669 | if (sign) | |
5670 | for (i = 0; i < (full ? 16 : 8); i++) | |
5671 | { | |
5672 | int8_t val = aarch64_get_vec_s8 (cpu, vs, i); | |
5673 | aarch64_set_vec_s8 (cpu, vd, i, val >> shift); | |
5674 | } | |
5675 | else | |
5676 | for (i = 0; i < (full ? 16 : 8); i++) | |
5677 | { | |
5678 | uint8_t val = aarch64_get_vec_u8 (cpu, vs, i); | |
5679 | aarch64_set_vec_u8 (cpu, vd, i, val >> shift); | |
5680 | } | |
5681 | } | |
5682 | ||
5683 | static void | |
5684 | do_vec_op2 (sim_cpu *cpu) | |
5685 | { | |
5686 | /* instr[31] = 0 | |
5687 | instr[30] = half/full | |
5688 | instr[29,24] = 00 1111 | |
5689 | instr[23] = ? | |
5690 | instr[22,16] = element size & index | |
5691 | instr[15,10] = sub-opcode | |
5692 | instr[9,5] = Vm | |
5693 | instr[4.0] = Vd */ | |
5694 | ||
5695 | NYI_assert (29, 24, 0x0F); | |
5696 | ||
5697 | if (uimm (aarch64_get_instr (cpu), 23, 23) != 0) | |
5698 | HALT_NYI; | |
5699 | ||
5700 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
5701 | { | |
5702 | case 0x01: do_vec_SSHR_USHR (cpu); return; | |
5703 | case 0x15: do_vec_SHL (cpu); return; | |
5704 | case 0x29: do_vec_xtl (cpu); return; | |
5705 | default: HALT_NYI; | |
5706 | } | |
5707 | } | |
5708 | ||
5709 | static void | |
5710 | do_vec_neg (sim_cpu *cpu) | |
5711 | { | |
5712 | /* instr[31] = 0 | |
5713 | instr[30] = full(1)/half(0) | |
5714 | instr[29,24] = 10 1110 | |
5715 | instr[23,22] = size: byte(00), half (01), word (10), long (11) | |
5716 | instr[21,10] = 1000 0010 1110 | |
5717 | instr[9,5] = Vs | |
5718 | instr[4,0] = Vd */ | |
5719 | ||
5720 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5721 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5722 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5723 | unsigned i; | |
5724 | ||
5725 | NYI_assert (29, 24, 0x2E); | |
5726 | NYI_assert (21, 10, 0x82E); | |
5727 | ||
5728 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
5729 | { | |
5730 | case 0: | |
5731 | for (i = 0; i < (full ? 16 : 8); i++) | |
5732 | aarch64_set_vec_s8 (cpu, vd, i, - aarch64_get_vec_s8 (cpu, vs, i)); | |
5733 | return; | |
5734 | ||
5735 | case 1: | |
5736 | for (i = 0; i < (full ? 8 : 4); i++) | |
5737 | aarch64_set_vec_s16 (cpu, vd, i, - aarch64_get_vec_s16 (cpu, vs, i)); | |
5738 | return; | |
5739 | ||
5740 | case 2: | |
5741 | for (i = 0; i < (full ? 4 : 2); i++) | |
5742 | aarch64_set_vec_s32 (cpu, vd, i, - aarch64_get_vec_s32 (cpu, vs, i)); | |
5743 | return; | |
5744 | ||
5745 | case 3: | |
5746 | if (! full) | |
5747 | HALT_NYI; | |
5748 | for (i = 0; i < 2; i++) | |
5749 | aarch64_set_vec_s64 (cpu, vd, i, - aarch64_get_vec_s64 (cpu, vs, i)); | |
5750 | return; | |
5751 | ||
5752 | default: | |
5753 | HALT_UNREACHABLE; | |
5754 | } | |
5755 | } | |
5756 | ||
5757 | static void | |
5758 | do_vec_sqrt (sim_cpu *cpu) | |
5759 | { | |
5760 | /* instr[31] = 0 | |
5761 | instr[30] = full(1)/half(0) | |
5762 | instr[29,23] = 101 1101 | |
5763 | instr[22] = single(0)/double(1) | |
5764 | instr[21,10] = 1000 0111 1110 | |
5765 | instr[9,5] = Vs | |
5766 | instr[4,0] = Vd. */ | |
5767 | ||
5768 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5769 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5770 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5771 | unsigned i; | |
5772 | ||
5773 | NYI_assert (29, 23, 0x5B); | |
5774 | NYI_assert (21, 10, 0x87E); | |
5775 | ||
5776 | if (uimm (aarch64_get_instr (cpu), 22, 22) == 0) | |
5777 | for (i = 0; i < (full ? 4 : 2); i++) | |
5778 | aarch64_set_vec_float (cpu, vd, i, | |
5779 | sqrtf (aarch64_get_vec_float (cpu, vs, i))); | |
5780 | else | |
5781 | for (i = 0; i < 2; i++) | |
5782 | aarch64_set_vec_double (cpu, vd, i, | |
5783 | sqrt (aarch64_get_vec_double (cpu, vs, i))); | |
5784 | } | |
5785 | ||
5786 | static void | |
5787 | do_vec_mls_indexed (sim_cpu *cpu) | |
5788 | { | |
5789 | /* instr[31] = 0 | |
5790 | instr[30] = half(0)/full(1) | |
5791 | instr[29,24] = 10 1111 | |
5792 | instr[23,22] = 16-bit(01)/32-bit(10) | |
5793 | instr[21,20+11] = index (if 16-bit) | |
5794 | instr[21+11] = index (if 32-bit) | |
5795 | instr[20,16] = Vm | |
5796 | instr[15,12] = 0100 | |
5797 | instr[11] = part of index | |
5798 | instr[10] = 0 | |
5799 | instr[9,5] = Vs | |
5800 | instr[4,0] = Vd. */ | |
5801 | ||
5802 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5803 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
5804 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5805 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5806 | unsigned i; | |
5807 | ||
5808 | NYI_assert (15, 12, 4); | |
5809 | NYI_assert (10, 10, 0); | |
5810 | ||
5811 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
5812 | { | |
5813 | case 1: | |
5814 | { | |
5815 | unsigned elem; | |
5816 | uint32_t val; | |
5817 | ||
5818 | if (vm > 15) | |
5819 | HALT_NYI; | |
5820 | ||
5821 | elem = (uimm (aarch64_get_instr (cpu), 21, 20) << 1) | |
5822 | | uimm (aarch64_get_instr (cpu), 11, 11); | |
5823 | val = aarch64_get_vec_u16 (cpu, vm, elem); | |
5824 | ||
5825 | for (i = 0; i < (full ? 8 : 4); i++) | |
5826 | aarch64_set_vec_u32 (cpu, vd, i, | |
5827 | aarch64_get_vec_u32 (cpu, vd, i) - | |
5828 | (aarch64_get_vec_u32 (cpu, vs, i) * val)); | |
5829 | return; | |
5830 | } | |
5831 | ||
5832 | case 2: | |
5833 | { | |
5834 | unsigned elem = (uimm (aarch64_get_instr (cpu), 21, 21) << 1) | |
5835 | | uimm (aarch64_get_instr (cpu), 11, 11); | |
5836 | uint64_t val = aarch64_get_vec_u32 (cpu, vm, elem); | |
5837 | ||
5838 | for (i = 0; i < (full ? 4 : 2); i++) | |
5839 | aarch64_set_vec_u64 (cpu, vd, i, | |
5840 | aarch64_get_vec_u64 (cpu, vd, i) - | |
5841 | (aarch64_get_vec_u64 (cpu, vs, i) * val)); | |
5842 | return; | |
5843 | } | |
5844 | ||
5845 | case 0: | |
5846 | case 3: | |
5847 | default: | |
5848 | HALT_NYI; | |
5849 | } | |
5850 | } | |
5851 | ||
5852 | static void | |
5853 | do_vec_SUB (sim_cpu *cpu) | |
5854 | { | |
5855 | /* instr [31] = 0 | |
5856 | instr [30] = half(0)/full(1) | |
5857 | instr [29,24] = 10 1110 | |
5858 | instr [23,22] = size: byte(00, half(01), word (10), long (11) | |
5859 | instr [21] = 1 | |
5860 | instr [20,16] = Vm | |
5861 | instr [15,10] = 10 0001 | |
5862 | instr [9, 5] = Vn | |
5863 | instr [4, 0] = Vd. */ | |
5864 | ||
5865 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5866 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5867 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5868 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5869 | unsigned i; | |
5870 | ||
5871 | NYI_assert (29, 24, 0x2E); | |
5872 | NYI_assert (21, 21, 1); | |
5873 | NYI_assert (15, 10, 0x21); | |
5874 | ||
5875 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
5876 | { | |
5877 | case 0: | |
5878 | for (i = 0; i < (full ? 16 : 8); i++) | |
5879 | aarch64_set_vec_s8 (cpu, vd, i, | |
5880 | aarch64_get_vec_s8 (cpu, vn, i) | |
5881 | - aarch64_get_vec_s8 (cpu, vm, i)); | |
5882 | return; | |
5883 | ||
5884 | case 1: | |
5885 | for (i = 0; i < (full ? 8 : 4); i++) | |
5886 | aarch64_set_vec_s16 (cpu, vd, i, | |
5887 | aarch64_get_vec_s16 (cpu, vn, i) | |
5888 | - aarch64_get_vec_s16 (cpu, vm, i)); | |
5889 | return; | |
5890 | ||
5891 | case 2: | |
5892 | for (i = 0; i < (full ? 4 : 2); i++) | |
5893 | aarch64_set_vec_s32 (cpu, vd, i, | |
5894 | aarch64_get_vec_s32 (cpu, vn, i) | |
5895 | - aarch64_get_vec_s32 (cpu, vm, i)); | |
5896 | return; | |
5897 | ||
5898 | case 3: | |
5899 | if (full == 0) | |
5900 | HALT_UNALLOC; | |
5901 | ||
5902 | for (i = 0; i < 2; i++) | |
5903 | aarch64_set_vec_s64 (cpu, vd, i, | |
5904 | aarch64_get_vec_s64 (cpu, vn, i) | |
5905 | - aarch64_get_vec_s64 (cpu, vm, i)); | |
5906 | return; | |
5907 | ||
5908 | default: | |
5909 | HALT_UNREACHABLE; | |
5910 | } | |
5911 | } | |
5912 | ||
5913 | static void | |
5914 | do_vec_MLS (sim_cpu *cpu) | |
5915 | { | |
5916 | /* instr [31] = 0 | |
5917 | instr [30] = half(0)/full(1) | |
5918 | instr [29,24] = 10 1110 | |
5919 | instr [23,22] = size: byte(00, half(01), word (10) | |
5920 | instr [21] = 1 | |
5921 | instr [20,16] = Vm | |
5922 | instr [15,10] = 10 0101 | |
5923 | instr [9, 5] = Vn | |
5924 | instr [4, 0] = Vd. */ | |
5925 | ||
5926 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5927 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5928 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5929 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5930 | unsigned i; | |
5931 | ||
5932 | NYI_assert (29, 24, 0x2E); | |
5933 | NYI_assert (21, 21, 1); | |
5934 | NYI_assert (15, 10, 0x25); | |
5935 | ||
5936 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
5937 | { | |
5938 | case 0: | |
5939 | for (i = 0; i < (full ? 16 : 8); i++) | |
5940 | aarch64_set_vec_u8 (cpu, vd, i, | |
5941 | (aarch64_get_vec_u8 (cpu, vn, i) | |
5942 | * aarch64_get_vec_u8 (cpu, vm, i)) | |
5943 | - aarch64_get_vec_u8 (cpu, vd, i)); | |
5944 | return; | |
5945 | ||
5946 | case 1: | |
5947 | for (i = 0; i < (full ? 8 : 4); i++) | |
5948 | aarch64_set_vec_u16 (cpu, vd, i, | |
5949 | (aarch64_get_vec_u16 (cpu, vn, i) | |
5950 | * aarch64_get_vec_u16 (cpu, vm, i)) | |
5951 | - aarch64_get_vec_u16 (cpu, vd, i)); | |
5952 | return; | |
5953 | ||
5954 | case 2: | |
5955 | for (i = 0; i < (full ? 4 : 2); i++) | |
5956 | aarch64_set_vec_u32 (cpu, vd, i, | |
5957 | (aarch64_get_vec_u32 (cpu, vn, i) | |
5958 | * aarch64_get_vec_u32 (cpu, vm, i)) | |
5959 | - aarch64_get_vec_u32 (cpu, vd, i)); | |
5960 | return; | |
5961 | ||
5962 | default: | |
5963 | HALT_UNALLOC; | |
5964 | } | |
5965 | } | |
5966 | ||
5967 | static void | |
5968 | do_vec_FDIV (sim_cpu *cpu) | |
5969 | { | |
5970 | /* instr [31] = 0 | |
5971 | instr [30] = half(0)/full(1) | |
5972 | instr [29,23] = 10 1110 0 | |
5973 | instr [22] = float()/double(1) | |
5974 | instr [21] = 1 | |
5975 | instr [20,16] = Vm | |
5976 | instr [15,10] = 1111 11 | |
5977 | instr [9, 5] = Vn | |
5978 | instr [4, 0] = Vd. */ | |
5979 | ||
5980 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
5981 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
5982 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
5983 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
5984 | unsigned i; | |
5985 | ||
5986 | NYI_assert (29, 23, 0x5C); | |
5987 | NYI_assert (21, 21, 1); | |
5988 | NYI_assert (15, 10, 0x3F); | |
5989 | ||
5990 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
5991 | { | |
5992 | if (! full) | |
5993 | HALT_UNALLOC; | |
5994 | ||
5995 | for (i = 0; i < 2; i++) | |
5996 | aarch64_set_vec_double (cpu, vd, i, | |
5997 | aarch64_get_vec_double (cpu, vn, i) | |
5998 | / aarch64_get_vec_double (cpu, vm, i)); | |
5999 | } | |
6000 | else | |
6001 | for (i = 0; i < (full ? 4 : 2); i++) | |
6002 | aarch64_set_vec_float (cpu, vd, i, | |
6003 | aarch64_get_vec_float (cpu, vn, i) | |
6004 | / aarch64_get_vec_float (cpu, vm, i)); | |
6005 | } | |
6006 | ||
6007 | static void | |
6008 | do_vec_FMUL (sim_cpu *cpu) | |
6009 | { | |
6010 | /* instr [31] = 0 | |
6011 | instr [30] = half(0)/full(1) | |
6012 | instr [29,23] = 10 1110 0 | |
6013 | instr [22] = float(0)/double(1) | |
6014 | instr [21] = 1 | |
6015 | instr [20,16] = Vm | |
6016 | instr [15,10] = 1101 11 | |
6017 | instr [9, 5] = Vn | |
6018 | instr [4, 0] = Vd. */ | |
6019 | ||
6020 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
6021 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6022 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6023 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6024 | unsigned i; | |
6025 | ||
6026 | NYI_assert (29, 23, 0x5C); | |
6027 | NYI_assert (21, 21, 1); | |
6028 | NYI_assert (15, 10, 0x37); | |
6029 | ||
6030 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6031 | { | |
6032 | if (! full) | |
6033 | HALT_UNALLOC; | |
6034 | ||
6035 | for (i = 0; i < 2; i++) | |
6036 | aarch64_set_vec_double (cpu, vd, i, | |
6037 | aarch64_get_vec_double (cpu, vn, i) | |
6038 | * aarch64_get_vec_double (cpu, vm, i)); | |
6039 | } | |
6040 | else | |
6041 | for (i = 0; i < (full ? 4 : 2); i++) | |
6042 | aarch64_set_vec_float (cpu, vd, i, | |
6043 | aarch64_get_vec_float (cpu, vn, i) | |
6044 | * aarch64_get_vec_float (cpu, vm, i)); | |
6045 | } | |
6046 | ||
6047 | static void | |
6048 | do_vec_FADDP (sim_cpu *cpu) | |
6049 | { | |
6050 | /* instr [31] = 0 | |
6051 | instr [30] = half(0)/full(1) | |
6052 | instr [29,23] = 10 1110 0 | |
6053 | instr [22] = float(0)/double(1) | |
6054 | instr [21] = 1 | |
6055 | instr [20,16] = Vm | |
6056 | instr [15,10] = 1101 01 | |
6057 | instr [9, 5] = Vn | |
6058 | instr [4, 0] = Vd. */ | |
6059 | ||
6060 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
6061 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6062 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6063 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6064 | ||
6065 | NYI_assert (29, 23, 0x5C); | |
6066 | NYI_assert (21, 21, 1); | |
6067 | NYI_assert (15, 10, 0x35); | |
6068 | ||
6069 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6070 | { | |
6071 | if (! full) | |
6072 | HALT_UNALLOC; | |
6073 | ||
6074 | aarch64_set_vec_double (cpu, vd, 0, aarch64_get_vec_double (cpu, vn, 0) | |
6075 | + aarch64_get_vec_double (cpu, vn, 1)); | |
6076 | aarch64_set_vec_double (cpu, vd, 1, aarch64_get_vec_double (cpu, vm, 0) | |
6077 | + aarch64_get_vec_double (cpu, vm, 1)); | |
6078 | } | |
6079 | else | |
6080 | { | |
6081 | aarch64_set_vec_float (cpu, vd, 0, aarch64_get_vec_float (cpu, vn, 0) | |
6082 | + aarch64_get_vec_float (cpu, vn, 1)); | |
6083 | if (full) | |
6084 | aarch64_set_vec_float (cpu, vd, 1, aarch64_get_vec_float (cpu, vn, 2) | |
6085 | + aarch64_get_vec_float (cpu, vn, 3)); | |
6086 | aarch64_set_vec_float (cpu, vd, full ? 2 : 1, | |
6087 | aarch64_get_vec_float (cpu, vm, 0) | |
6088 | + aarch64_get_vec_float (cpu, vm, 1)); | |
6089 | if (full) | |
6090 | aarch64_set_vec_float (cpu, vd, 3, | |
6091 | aarch64_get_vec_float (cpu, vm, 2) | |
6092 | + aarch64_get_vec_float (cpu, vm, 3)); | |
6093 | } | |
6094 | } | |
6095 | ||
6096 | static void | |
6097 | do_vec_FSQRT (sim_cpu *cpu) | |
6098 | { | |
6099 | /* instr[31] = 0 | |
6100 | instr[30] = half(0)/full(1) | |
6101 | instr[29,23] = 10 1110 1 | |
6102 | instr[22] = single(0)/double(1) | |
6103 | instr[21,10] = 10 0001 1111 10 | |
6104 | instr[9,5] = Vsrc | |
6105 | instr[4,0] = Vdest. */ | |
6106 | ||
6107 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6108 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6109 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
6110 | int i; | |
6111 | ||
6112 | NYI_assert (29, 23, 0x5D); | |
6113 | NYI_assert (21, 10, 0x87E); | |
6114 | ||
6115 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6116 | { | |
6117 | if (! full) | |
6118 | HALT_UNALLOC; | |
6119 | ||
6120 | for (i = 0; i < 2; i++) | |
6121 | aarch64_set_vec_double (cpu, vd, i, | |
6122 | sqrt (aarch64_get_vec_double (cpu, vn, i))); | |
6123 | } | |
6124 | else | |
6125 | { | |
6126 | for (i = 0; i < (full ? 4 : 2); i++) | |
6127 | aarch64_set_vec_float (cpu, vd, i, | |
6128 | sqrtf (aarch64_get_vec_float (cpu, vn, i))); | |
6129 | } | |
6130 | } | |
6131 | ||
6132 | static void | |
6133 | do_vec_FNEG (sim_cpu *cpu) | |
6134 | { | |
6135 | /* instr[31] = 0 | |
6136 | instr[30] = half (0)/full (1) | |
6137 | instr[29,23] = 10 1110 1 | |
6138 | instr[22] = single (0)/double (1) | |
6139 | instr[21,10] = 10 0000 1111 10 | |
6140 | instr[9,5] = Vsrc | |
6141 | instr[4,0] = Vdest. */ | |
6142 | ||
6143 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6144 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6145 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
6146 | int i; | |
6147 | ||
6148 | NYI_assert (29, 23, 0x5D); | |
6149 | NYI_assert (21, 10, 0x83E); | |
6150 | ||
6151 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6152 | { | |
6153 | if (! full) | |
6154 | HALT_UNALLOC; | |
6155 | ||
6156 | for (i = 0; i < 2; i++) | |
6157 | aarch64_set_vec_double (cpu, vd, i, | |
6158 | - aarch64_get_vec_double (cpu, vn, i)); | |
6159 | } | |
6160 | else | |
6161 | { | |
6162 | for (i = 0; i < (full ? 4 : 2); i++) | |
6163 | aarch64_set_vec_float (cpu, vd, i, | |
6164 | - aarch64_get_vec_float (cpu, vn, i)); | |
6165 | } | |
6166 | } | |
6167 | ||
6168 | static void | |
6169 | do_vec_NOT (sim_cpu *cpu) | |
6170 | { | |
6171 | /* instr[31] = 0 | |
6172 | instr[30] = half (0)/full (1) | |
6173 | instr[29,21] = 10 1110 001 | |
6174 | instr[20,16] = 0 0000 | |
6175 | instr[15,10] = 0101 10 | |
6176 | instr[9,5] = Vn | |
6177 | instr[4.0] = Vd. */ | |
6178 | ||
6179 | unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6180 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6181 | unsigned i; | |
6182 | int full = uimm (aarch64_get_instr (cpu), 30, 30); | |
6183 | ||
6184 | NYI_assert (29, 10, 0xB8816); | |
6185 | ||
6186 | for (i = 0; i < (full ? 16 : 8); i++) | |
6187 | aarch64_set_vec_u8 (cpu, vd, i, ~ aarch64_get_vec_u8 (cpu, vn, i)); | |
6188 | } | |
6189 | ||
6190 | static void | |
6191 | do_vec_MOV_element (sim_cpu *cpu) | |
6192 | { | |
6193 | /* instr[31,21] = 0110 1110 000 | |
6194 | instr[20,16] = size & dest index | |
6195 | instr[15] = 0 | |
6196 | instr[14,11] = source index | |
6197 | instr[10] = 1 | |
6198 | instr[9,5] = Vs | |
6199 | instr[4.0] = Vd. */ | |
6200 | ||
6201 | unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5); | |
6202 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6203 | unsigned src_index; | |
6204 | unsigned dst_index; | |
6205 | ||
6206 | NYI_assert (31, 21, 0x370); | |
6207 | NYI_assert (15, 15, 0); | |
6208 | NYI_assert (10, 10, 1); | |
6209 | ||
6210 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
6211 | { | |
6212 | /* Move a byte. */ | |
6213 | src_index = uimm (aarch64_get_instr (cpu), 14, 11); | |
6214 | dst_index = uimm (aarch64_get_instr (cpu), 20, 17); | |
6215 | aarch64_set_vec_u8 (cpu, vd, dst_index, | |
6216 | aarch64_get_vec_u8 (cpu, vs, src_index)); | |
6217 | } | |
6218 | else if (uimm (aarch64_get_instr (cpu), 17, 17)) | |
6219 | { | |
6220 | /* Move 16-bits. */ | |
6221 | NYI_assert (11, 11, 0); | |
6222 | src_index = uimm (aarch64_get_instr (cpu), 14, 12); | |
6223 | dst_index = uimm (aarch64_get_instr (cpu), 20, 18); | |
6224 | aarch64_set_vec_u16 (cpu, vd, dst_index, | |
6225 | aarch64_get_vec_u16 (cpu, vs, src_index)); | |
6226 | } | |
6227 | else if (uimm (aarch64_get_instr (cpu), 18, 18)) | |
6228 | { | |
6229 | /* Move 32-bits. */ | |
6230 | NYI_assert (12, 11, 0); | |
6231 | src_index = uimm (aarch64_get_instr (cpu), 14, 13); | |
6232 | dst_index = uimm (aarch64_get_instr (cpu), 20, 19); | |
6233 | aarch64_set_vec_u32 (cpu, vd, dst_index, | |
6234 | aarch64_get_vec_u32 (cpu, vs, src_index)); | |
6235 | } | |
6236 | else | |
6237 | { | |
6238 | NYI_assert (19, 19, 1); | |
6239 | NYI_assert (13, 11, 0); | |
6240 | src_index = uimm (aarch64_get_instr (cpu), 14, 14); | |
6241 | dst_index = uimm (aarch64_get_instr (cpu), 20, 20); | |
6242 | aarch64_set_vec_u64 (cpu, vd, dst_index, | |
6243 | aarch64_get_vec_u64 (cpu, vs, src_index)); | |
6244 | } | |
6245 | } | |
6246 | ||
6247 | static void | |
6248 | dexAdvSIMD0 (sim_cpu *cpu) | |
6249 | { | |
6250 | /* instr [28,25] = 0 111. */ | |
6251 | if ( uimm (aarch64_get_instr (cpu), 15, 10) == 0x07 | |
6252 | && (uimm (aarch64_get_instr (cpu), 9, 5) == | |
6253 | uimm (aarch64_get_instr (cpu), 20, 16))) | |
6254 | { | |
6255 | if (uimm (aarch64_get_instr (cpu), 31, 21) == 0x075 | |
6256 | || uimm (aarch64_get_instr (cpu), 31, 21) == 0x275) | |
6257 | { | |
6258 | do_vec_MOV_whole_vector (cpu); | |
6259 | return; | |
6260 | } | |
6261 | } | |
6262 | ||
6263 | if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x1E0) | |
6264 | { | |
6265 | do_vec_MOV_immediate (cpu); | |
6266 | return; | |
6267 | } | |
6268 | ||
6269 | if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x5E0) | |
6270 | { | |
6271 | do_vec_MVNI (cpu); | |
6272 | return; | |
6273 | } | |
6274 | ||
6275 | if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x1C0 | |
6276 | || uimm (aarch64_get_instr (cpu), 29, 19) == 0x1C1) | |
6277 | { | |
6278 | if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x03) | |
6279 | { | |
6280 | do_vec_DUP_scalar_into_vector (cpu); | |
6281 | return; | |
6282 | } | |
6283 | } | |
6284 | ||
6285 | switch (uimm (aarch64_get_instr (cpu), 29, 24)) | |
6286 | { | |
6287 | case 0x0E: do_vec_op1 (cpu); return; | |
6288 | case 0x0F: do_vec_op2 (cpu); return; | |
6289 | ||
6290 | case 0x2f: | |
6291 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
6292 | { | |
6293 | case 0x01: do_vec_SSHR_USHR (cpu); return; | |
6294 | case 0x10: | |
6295 | case 0x12: do_vec_mls_indexed (cpu); return; | |
6296 | case 0x29: do_vec_xtl (cpu); return; | |
6297 | default: | |
6298 | HALT_NYI; | |
6299 | } | |
6300 | ||
6301 | case 0x2E: | |
6302 | if (uimm (aarch64_get_instr (cpu), 21, 21) == 1) | |
6303 | { | |
6304 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
6305 | { | |
6306 | case 0x07: | |
6307 | switch (uimm (aarch64_get_instr (cpu), 23, 22)) | |
6308 | { | |
6309 | case 0: do_vec_EOR (cpu); return; | |
6310 | case 1: do_vec_BSL (cpu); return; | |
6311 | case 2: | |
6312 | case 3: do_vec_bit (cpu); return; | |
6313 | } | |
6314 | break; | |
6315 | ||
6316 | case 0x08: do_vec_sub_long (cpu); return; | |
6317 | case 0x11: do_vec_USHL (cpu); return; | |
6318 | case 0x16: do_vec_NOT (cpu); return; | |
6319 | case 0x19: do_vec_max (cpu); return; | |
6320 | case 0x1B: do_vec_min (cpu); return; | |
6321 | case 0x21: do_vec_SUB (cpu); return; | |
6322 | case 0x25: do_vec_MLS (cpu); return; | |
6323 | case 0x31: do_vec_FminmaxNMP (cpu); return; | |
6324 | case 0x35: do_vec_FADDP (cpu); return; | |
6325 | case 0x37: do_vec_FMUL (cpu); return; | |
6326 | case 0x3F: do_vec_FDIV (cpu); return; | |
6327 | ||
6328 | case 0x3E: | |
6329 | switch (uimm (aarch64_get_instr (cpu), 20, 16)) | |
6330 | { | |
6331 | case 0x00: do_vec_FNEG (cpu); return; | |
6332 | case 0x01: do_vec_FSQRT (cpu); return; | |
6333 | default: HALT_NYI; | |
6334 | } | |
6335 | ||
6336 | case 0x0D: | |
6337 | case 0x0F: | |
6338 | case 0x22: | |
6339 | case 0x23: | |
6340 | case 0x26: | |
6341 | case 0x2A: | |
6342 | case 0x32: | |
6343 | case 0x36: | |
6344 | case 0x39: | |
6345 | case 0x3A: | |
6346 | do_vec_compare (cpu); return; | |
6347 | ||
6348 | default: break; | |
6349 | } | |
6350 | } | |
6351 | ||
6352 | if (uimm (aarch64_get_instr (cpu), 31, 21) == 0x370) | |
6353 | { | |
6354 | do_vec_MOV_element (cpu); | |
6355 | return; | |
6356 | } | |
6357 | ||
6358 | switch (uimm (aarch64_get_instr (cpu), 21, 10)) | |
6359 | { | |
6360 | case 0x82E: do_vec_neg (cpu); return; | |
6361 | case 0x87E: do_vec_sqrt (cpu); return; | |
6362 | default: | |
6363 | if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x30) | |
6364 | { | |
6365 | do_vec_mull (cpu); | |
6366 | return; | |
6367 | } | |
6368 | break; | |
6369 | } | |
6370 | break; | |
6371 | ||
6372 | default: | |
6373 | break; | |
6374 | } | |
6375 | ||
6376 | HALT_NYI; | |
6377 | } | |
6378 | ||
6379 | /* 3 sources. */ | |
6380 | ||
6381 | /* Float multiply add. */ | |
6382 | static void | |
6383 | fmadds (sim_cpu *cpu) | |
6384 | { | |
6385 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6386 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6387 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6388 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6389 | ||
6390 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa) | |
6391 | + aarch64_get_FP_float (cpu, sn) | |
6392 | * aarch64_get_FP_float (cpu, sm)); | |
6393 | } | |
6394 | ||
6395 | /* Double multiply add. */ | |
6396 | static void | |
6397 | fmaddd (sim_cpu *cpu) | |
6398 | { | |
6399 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6400 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6401 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6402 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6403 | ||
6404 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa) | |
6405 | + aarch64_get_FP_double (cpu, sn) | |
6406 | * aarch64_get_FP_double (cpu, sm)); | |
6407 | } | |
6408 | ||
6409 | /* Float multiply subtract. */ | |
6410 | static void | |
6411 | fmsubs (sim_cpu *cpu) | |
6412 | { | |
6413 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6414 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6415 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6416 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6417 | ||
6418 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa) | |
6419 | - aarch64_get_FP_float (cpu, sn) | |
6420 | * aarch64_get_FP_float (cpu, sm)); | |
6421 | } | |
6422 | ||
6423 | /* Double multiply subtract. */ | |
6424 | static void | |
6425 | fmsubd (sim_cpu *cpu) | |
6426 | { | |
6427 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6428 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6429 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6430 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6431 | ||
6432 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa) | |
6433 | - aarch64_get_FP_double (cpu, sn) | |
6434 | * aarch64_get_FP_double (cpu, sm)); | |
6435 | } | |
6436 | ||
6437 | /* Float negative multiply add. */ | |
6438 | static void | |
6439 | fnmadds (sim_cpu *cpu) | |
6440 | { | |
6441 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6442 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6443 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6444 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6445 | ||
6446 | aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa) | |
6447 | + (- aarch64_get_FP_float (cpu, sn)) | |
6448 | * aarch64_get_FP_float (cpu, sm)); | |
6449 | } | |
6450 | ||
6451 | /* Double negative multiply add. */ | |
6452 | static void | |
6453 | fnmaddd (sim_cpu *cpu) | |
6454 | { | |
6455 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6456 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6457 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6458 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6459 | ||
6460 | aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa) | |
6461 | + (- aarch64_get_FP_double (cpu, sn)) | |
6462 | * aarch64_get_FP_double (cpu, sm)); | |
6463 | } | |
6464 | ||
6465 | /* Float negative multiply subtract. */ | |
6466 | static void | |
6467 | fnmsubs (sim_cpu *cpu) | |
6468 | { | |
6469 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6470 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6471 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6472 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6473 | ||
6474 | aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa) | |
6475 | + aarch64_get_FP_float (cpu, sn) | |
6476 | * aarch64_get_FP_float (cpu, sm)); | |
6477 | } | |
6478 | ||
6479 | /* Double negative multiply subtract. */ | |
6480 | static void | |
6481 | fnmsubd (sim_cpu *cpu) | |
6482 | { | |
6483 | unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10); | |
6484 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6485 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6486 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6487 | ||
6488 | aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa) | |
6489 | + aarch64_get_FP_double (cpu, sn) | |
6490 | * aarch64_get_FP_double (cpu, sm)); | |
6491 | } | |
6492 | ||
6493 | static void | |
6494 | dexSimpleFPDataProc3Source (sim_cpu *cpu) | |
6495 | { | |
6496 | /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC | |
6497 | instr[30] = 0 | |
6498 | instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC | |
6499 | instr[28,25] = 1111 | |
6500 | instr[24] = 1 | |
6501 | instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC | |
6502 | instr[21] ==> o1 : 0 ==> unnegated, 1 ==> negated | |
6503 | instr[15] ==> o2 : 0 ==> ADD, 1 ==> SUB */ | |
6504 | ||
6505 | uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1) | |
6506 | | uimm (aarch64_get_instr (cpu), 29, 29); | |
6507 | /* dispatch on combined type:o1:o2. */ | |
6508 | uint32_t dispatch = (uimm (aarch64_get_instr (cpu), 23, 21) << 1) | |
6509 | | uimm (aarch64_get_instr (cpu), 15, 15); | |
6510 | ||
6511 | if (M_S != 0) | |
6512 | HALT_UNALLOC; | |
6513 | ||
6514 | switch (dispatch) | |
6515 | { | |
6516 | case 0: fmadds (cpu); return; | |
6517 | case 1: fmsubs (cpu); return; | |
6518 | case 2: fnmadds (cpu); return; | |
6519 | case 3: fnmsubs (cpu); return; | |
6520 | case 4: fmaddd (cpu); return; | |
6521 | case 5: fmsubd (cpu); return; | |
6522 | case 6: fnmaddd (cpu); return; | |
6523 | case 7: fnmsubd (cpu); return; | |
6524 | default: | |
6525 | /* type > 1 is currently unallocated. */ | |
6526 | HALT_UNALLOC; | |
6527 | } | |
6528 | } | |
6529 | ||
6530 | static void | |
6531 | dexSimpleFPFixedConvert (sim_cpu *cpu) | |
6532 | { | |
6533 | HALT_NYI; | |
6534 | } | |
6535 | ||
6536 | static void | |
6537 | dexSimpleFPCondCompare (sim_cpu *cpu) | |
6538 | { | |
6539 | HALT_NYI; | |
6540 | } | |
6541 | ||
6542 | /* 2 sources. */ | |
6543 | ||
6544 | /* Float add. */ | |
6545 | static void | |
6546 | fadds (sim_cpu *cpu) | |
6547 | { | |
6548 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6549 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6550 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6551 | ||
6552 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn) | |
6553 | + aarch64_get_FP_float (cpu, sm)); | |
6554 | } | |
6555 | ||
6556 | /* Double add. */ | |
6557 | static void | |
6558 | faddd (sim_cpu *cpu) | |
6559 | { | |
6560 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6561 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6562 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6563 | ||
6564 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn) | |
6565 | + aarch64_get_FP_double (cpu, sm)); | |
6566 | } | |
6567 | ||
6568 | /* Float divide. */ | |
6569 | static void | |
6570 | fdivs (sim_cpu *cpu) | |
6571 | { | |
6572 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6573 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6574 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6575 | ||
6576 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn) | |
6577 | / aarch64_get_FP_float (cpu, sm)); | |
6578 | } | |
6579 | ||
6580 | /* Double divide. */ | |
6581 | static void | |
6582 | fdivd (sim_cpu *cpu) | |
6583 | { | |
6584 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6585 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6586 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6587 | ||
6588 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn) | |
6589 | / aarch64_get_FP_double (cpu, sm)); | |
6590 | } | |
6591 | ||
6592 | /* Float multiply. */ | |
6593 | static void | |
6594 | fmuls (sim_cpu *cpu) | |
6595 | { | |
6596 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6597 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6598 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6599 | ||
6600 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn) | |
6601 | * aarch64_get_FP_float (cpu, sm)); | |
6602 | } | |
6603 | ||
6604 | /* Double multiply. */ | |
6605 | static void | |
6606 | fmuld (sim_cpu *cpu) | |
6607 | { | |
6608 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6609 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6610 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6611 | ||
6612 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn) | |
6613 | * aarch64_get_FP_double (cpu, sm)); | |
6614 | } | |
6615 | ||
6616 | /* Float negate and multiply. */ | |
6617 | static void | |
6618 | fnmuls (sim_cpu *cpu) | |
6619 | { | |
6620 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6621 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6622 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6623 | ||
6624 | aarch64_set_FP_float (cpu, sd, - (aarch64_get_FP_float (cpu, sn) | |
6625 | * aarch64_get_FP_float (cpu, sm))); | |
6626 | } | |
6627 | ||
6628 | /* Double negate and multiply. */ | |
6629 | static void | |
6630 | fnmuld (sim_cpu *cpu) | |
6631 | { | |
6632 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6633 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6634 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6635 | ||
6636 | aarch64_set_FP_double (cpu, sd, - (aarch64_get_FP_double (cpu, sn) | |
6637 | * aarch64_get_FP_double (cpu, sm))); | |
6638 | } | |
6639 | ||
6640 | /* Float subtract. */ | |
6641 | static void | |
6642 | fsubs (sim_cpu *cpu) | |
6643 | { | |
6644 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6645 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6646 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6647 | ||
6648 | aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn) | |
6649 | - aarch64_get_FP_float (cpu, sm)); | |
6650 | } | |
6651 | ||
6652 | /* Double subtract. */ | |
6653 | static void | |
6654 | fsubd (sim_cpu *cpu) | |
6655 | { | |
6656 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6657 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6658 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6659 | ||
6660 | aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn) | |
6661 | - aarch64_get_FP_double (cpu, sm)); | |
6662 | } | |
6663 | ||
6664 | static void | |
6665 | do_FMINNM (sim_cpu *cpu) | |
6666 | { | |
6667 | /* instr[31,23] = 0 0011 1100 | |
6668 | instr[22] = float(0)/double(1) | |
6669 | instr[21] = 1 | |
6670 | instr[20,16] = Sm | |
6671 | instr[15,10] = 01 1110 | |
6672 | instr[9,5] = Sn | |
6673 | instr[4,0] = Cpu */ | |
6674 | ||
6675 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6676 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6677 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6678 | ||
6679 | NYI_assert (31, 23, 0x03C); | |
6680 | NYI_assert (15, 10, 0x1E); | |
6681 | ||
6682 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6683 | aarch64_set_FP_double (cpu, sd, | |
6684 | dminnm (aarch64_get_FP_double (cpu, sn), | |
6685 | aarch64_get_FP_double (cpu, sm))); | |
6686 | else | |
6687 | aarch64_set_FP_float (cpu, sd, | |
6688 | fminnm (aarch64_get_FP_float (cpu, sn), | |
6689 | aarch64_get_FP_float (cpu, sm))); | |
6690 | } | |
6691 | ||
6692 | static void | |
6693 | do_FMAXNM (sim_cpu *cpu) | |
6694 | { | |
6695 | /* instr[31,23] = 0 0011 1100 | |
6696 | instr[22] = float(0)/double(1) | |
6697 | instr[21] = 1 | |
6698 | instr[20,16] = Sm | |
6699 | instr[15,10] = 01 1010 | |
6700 | instr[9,5] = Sn | |
6701 | instr[4,0] = Cpu */ | |
6702 | ||
6703 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6704 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6705 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6706 | ||
6707 | NYI_assert (31, 23, 0x03C); | |
6708 | NYI_assert (15, 10, 0x1A); | |
6709 | ||
6710 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6711 | aarch64_set_FP_double (cpu, sd, | |
6712 | dmaxnm (aarch64_get_FP_double (cpu, sn), | |
6713 | aarch64_get_FP_double (cpu, sm))); | |
6714 | else | |
6715 | aarch64_set_FP_float (cpu, sd, | |
6716 | fmaxnm (aarch64_get_FP_float (cpu, sn), | |
6717 | aarch64_get_FP_float (cpu, sm))); | |
6718 | } | |
6719 | ||
6720 | static void | |
6721 | dexSimpleFPDataProc2Source (sim_cpu *cpu) | |
6722 | { | |
6723 | /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC | |
6724 | instr[30] = 0 | |
6725 | instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC | |
6726 | instr[28,25] = 1111 | |
6727 | instr[24] = 0 | |
6728 | instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC | |
6729 | instr[21] = 1 | |
6730 | instr[20,16] = Vm | |
6731 | instr[15,12] ==> opcode : 0000 ==> FMUL, 0001 ==> FDIV | |
6732 | 0010 ==> FADD, 0011 ==> FSUB, | |
6733 | 0100 ==> FMAX, 0101 ==> FMIN | |
6734 | 0110 ==> FMAXNM, 0111 ==> FMINNM | |
6735 | 1000 ==> FNMUL, ow ==> UNALLOC | |
6736 | instr[11,10] = 10 | |
6737 | instr[9,5] = Vn | |
6738 | instr[4,0] = Vd */ | |
6739 | ||
6740 | uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1) | |
6741 | | uimm (aarch64_get_instr (cpu), 29, 29); | |
6742 | uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22); | |
6743 | /* Dispatch on opcode. */ | |
6744 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 15, 12); | |
6745 | ||
6746 | if (type > 1) | |
6747 | HALT_UNALLOC; | |
6748 | ||
6749 | if (M_S != 0) | |
6750 | HALT_UNALLOC; | |
6751 | ||
6752 | if (type) | |
6753 | switch (dispatch) | |
6754 | { | |
6755 | case 0: fmuld (cpu); return; | |
6756 | case 1: fdivd (cpu); return; | |
6757 | case 2: faddd (cpu); return; | |
6758 | case 3: fsubd (cpu); return; | |
6759 | case 6: do_FMAXNM (cpu); return; | |
6760 | case 7: do_FMINNM (cpu); return; | |
6761 | case 8: fnmuld (cpu); return; | |
6762 | ||
6763 | /* Have not yet implemented fmax and fmin. */ | |
6764 | case 4: | |
6765 | case 5: | |
6766 | HALT_NYI; | |
6767 | ||
6768 | default: | |
6769 | HALT_UNALLOC; | |
6770 | } | |
6771 | else /* type == 0 => floats. */ | |
6772 | switch (dispatch) | |
6773 | { | |
6774 | case 0: fmuls (cpu); return; | |
6775 | case 1: fdivs (cpu); return; | |
6776 | case 2: fadds (cpu); return; | |
6777 | case 3: fsubs (cpu); return; | |
6778 | case 6: do_FMAXNM (cpu); return; | |
6779 | case 7: do_FMINNM (cpu); return; | |
6780 | case 8: fnmuls (cpu); return; | |
6781 | ||
6782 | case 4: | |
6783 | case 5: | |
6784 | HALT_NYI; | |
6785 | ||
6786 | default: | |
6787 | HALT_UNALLOC; | |
6788 | } | |
6789 | } | |
6790 | ||
6791 | static void | |
6792 | dexSimpleFPCondSelect (sim_cpu *cpu) | |
6793 | { | |
6794 | /* FCSEL | |
6795 | instr[31,23] = 0 0011 1100 | |
6796 | instr[22] = 0=>single 1=>double | |
6797 | instr[21] = 1 | |
6798 | instr[20,16] = Sm | |
6799 | instr[15,12] = cond | |
6800 | instr[11,10] = 11 | |
6801 | instr[9,5] = Sn | |
6802 | instr[4,0] = Cpu */ | |
6803 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
6804 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6805 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6806 | uint32_t set = testConditionCode (cpu, uimm (aarch64_get_instr (cpu), 15, 12)); | |
6807 | ||
6808 | NYI_assert (31, 23, 0x03C); | |
6809 | NYI_assert (11, 10, 0x3); | |
6810 | ||
6811 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6812 | aarch64_set_FP_double (cpu, sd, set ? sn : sm); | |
6813 | else | |
6814 | aarch64_set_FP_float (cpu, sd, set ? sn : sm); | |
6815 | } | |
6816 | ||
6817 | /* Store 32 bit unscaled signed 9 bit. */ | |
6818 | static void | |
6819 | fsturs (sim_cpu *cpu, int32_t offset) | |
6820 | { | |
6821 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6822 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6823 | ||
6824 | aarch64_set_mem_float (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset, | |
6825 | aarch64_get_FP_float (cpu, rn)); | |
6826 | } | |
6827 | ||
6828 | /* Store 64 bit unscaled signed 9 bit. */ | |
6829 | static void | |
6830 | fsturd (sim_cpu *cpu, int32_t offset) | |
6831 | { | |
6832 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6833 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6834 | ||
6835 | aarch64_set_mem_double (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset, | |
6836 | aarch64_get_FP_double (cpu, rn)); | |
6837 | } | |
6838 | ||
6839 | /* Store 128 bit unscaled signed 9 bit. */ | |
6840 | static void | |
6841 | fsturq (sim_cpu *cpu, int32_t offset) | |
6842 | { | |
6843 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6844 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6845 | FRegister a; | |
6846 | ||
6847 | aarch64_get_FP_long_double (cpu, rn, & a); | |
6848 | aarch64_set_mem_long_double (cpu, | |
6849 | aarch64_get_reg_u64 (cpu, st, 1) | |
6850 | + offset, a); | |
6851 | } | |
6852 | ||
6853 | /* TODO FP move register. */ | |
6854 | ||
6855 | /* 32 bit fp to fp move register. */ | |
6856 | static void | |
6857 | ffmovs (sim_cpu *cpu) | |
6858 | { | |
6859 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6860 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6861 | ||
6862 | aarch64_set_FP_float (cpu, st, aarch64_get_FP_float (cpu, rn)); | |
6863 | } | |
6864 | ||
6865 | /* 64 bit fp to fp move register. */ | |
6866 | static void | |
6867 | ffmovd (sim_cpu *cpu) | |
6868 | { | |
6869 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6870 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6871 | ||
6872 | aarch64_set_FP_double (cpu, st, aarch64_get_FP_double (cpu, rn)); | |
6873 | } | |
6874 | ||
6875 | /* 32 bit GReg to Vec move register. */ | |
6876 | static void | |
6877 | fgmovs (sim_cpu *cpu) | |
6878 | { | |
6879 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6880 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6881 | ||
6882 | aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_reg_u32 (cpu, rn, NO_SP)); | |
6883 | } | |
6884 | ||
6885 | /* 64 bit g to fp move register. */ | |
6886 | static void | |
6887 | fgmovd (sim_cpu *cpu) | |
6888 | { | |
6889 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6890 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6891 | ||
6892 | aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_reg_u64 (cpu, rn, NO_SP)); | |
6893 | } | |
6894 | ||
6895 | /* 32 bit fp to g move register. */ | |
6896 | static void | |
6897 | gfmovs (sim_cpu *cpu) | |
6898 | { | |
6899 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6900 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6901 | ||
6902 | aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u32 (cpu, rn, 0)); | |
6903 | } | |
6904 | ||
6905 | /* 64 bit fp to g move register. */ | |
6906 | static void | |
6907 | gfmovd (sim_cpu *cpu) | |
6908 | { | |
6909 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6910 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6911 | ||
6912 | aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u64 (cpu, rn, 0)); | |
6913 | } | |
6914 | ||
6915 | /* FP move immediate | |
6916 | ||
6917 | These install an immediate 8 bit value in the target register | |
6918 | where the 8 bits comprise 1 sign bit, 4 bits of fraction and a 3 | |
6919 | bit exponent. */ | |
6920 | ||
6921 | static void | |
6922 | fmovs (sim_cpu *cpu) | |
6923 | { | |
6924 | unsigned int sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6925 | uint32_t imm = uimm (aarch64_get_instr (cpu), 20, 13); | |
6926 | float f = fp_immediate_for_encoding_32 (imm); | |
6927 | ||
6928 | aarch64_set_FP_float (cpu, sd, f); | |
6929 | } | |
6930 | ||
6931 | static void | |
6932 | fmovd (sim_cpu *cpu) | |
6933 | { | |
6934 | unsigned int sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
6935 | uint32_t imm = uimm (aarch64_get_instr (cpu), 20, 13); | |
6936 | double d = fp_immediate_for_encoding_64 (imm); | |
6937 | ||
6938 | aarch64_set_FP_double (cpu, sd, d); | |
6939 | } | |
6940 | ||
6941 | static void | |
6942 | dexSimpleFPImmediate (sim_cpu *cpu) | |
6943 | { | |
6944 | /* instr[31,23] == 00111100 | |
6945 | instr[22] == type : single(0)/double(1) | |
6946 | instr[21] == 1 | |
6947 | instr[20,13] == imm8 | |
6948 | instr[12,10] == 100 | |
6949 | instr[9,5] == imm5 : 00000 ==> PK, ow ==> UNALLOC | |
6950 | instr[4,0] == Rd */ | |
6951 | uint32_t imm5 = uimm (aarch64_get_instr (cpu), 9, 5); | |
6952 | ||
6953 | NYI_assert (31, 23, 0x3C); | |
6954 | ||
6955 | if (imm5 != 0) | |
6956 | HALT_UNALLOC; | |
6957 | ||
6958 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
6959 | fmovd (cpu); | |
6960 | else | |
6961 | fmovs (cpu); | |
6962 | } | |
6963 | ||
6964 | /* TODO specific decode and execute for group Load Store. */ | |
6965 | ||
6966 | /* TODO FP load/store single register (unscaled offset). */ | |
6967 | ||
6968 | /* TODO load 8 bit unscaled signed 9 bit. */ | |
6969 | /* TODO load 16 bit unscaled signed 9 bit. */ | |
6970 | ||
6971 | /* Load 32 bit unscaled signed 9 bit. */ | |
6972 | static void | |
6973 | fldurs (sim_cpu *cpu, int32_t offset) | |
6974 | { | |
6975 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6976 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6977 | ||
6978 | aarch64_set_FP_float (cpu, st, aarch64_get_mem_float | |
6979 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset)); | |
6980 | } | |
6981 | ||
6982 | /* Load 64 bit unscaled signed 9 bit. */ | |
6983 | static void | |
6984 | fldurd (sim_cpu *cpu, int32_t offset) | |
6985 | { | |
6986 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6987 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6988 | ||
6989 | aarch64_set_FP_double (cpu, st, aarch64_get_mem_double | |
6990 | (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset)); | |
6991 | } | |
6992 | ||
6993 | /* Load 128 bit unscaled signed 9 bit. */ | |
6994 | static void | |
6995 | fldurq (sim_cpu *cpu, int32_t offset) | |
6996 | { | |
6997 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
6998 | unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0); | |
6999 | FRegister a; | |
7000 | uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset; | |
7001 | ||
7002 | aarch64_get_mem_long_double (cpu, addr, & a); | |
7003 | aarch64_set_FP_long_double (cpu, st, a); | |
7004 | } | |
7005 | ||
7006 | /* TODO store 8 bit unscaled signed 9 bit. */ | |
7007 | /* TODO store 16 bit unscaled signed 9 bit. */ | |
7008 | ||
7009 | ||
7010 | /* 1 source. */ | |
7011 | ||
7012 | /* Float absolute value. */ | |
7013 | static void | |
7014 | fabss (sim_cpu *cpu) | |
7015 | { | |
7016 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7017 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7018 | float value = aarch64_get_FP_float (cpu, sn); | |
7019 | ||
7020 | aarch64_set_FP_float (cpu, sd, fabsf (value)); | |
7021 | } | |
7022 | ||
7023 | /* Double absolute value. */ | |
7024 | static void | |
7025 | fabcpu (sim_cpu *cpu) | |
7026 | { | |
7027 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7028 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7029 | double value = aarch64_get_FP_double (cpu, sn); | |
7030 | ||
7031 | aarch64_set_FP_double (cpu, sd, fabs (value)); | |
7032 | } | |
7033 | ||
7034 | /* Float negative value. */ | |
7035 | static void | |
7036 | fnegs (sim_cpu *cpu) | |
7037 | { | |
7038 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7039 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7040 | ||
7041 | aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sn)); | |
7042 | } | |
7043 | ||
7044 | /* Double negative value. */ | |
7045 | static void | |
7046 | fnegd (sim_cpu *cpu) | |
7047 | { | |
7048 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7049 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7050 | ||
7051 | aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sn)); | |
7052 | } | |
7053 | ||
7054 | /* Float square root. */ | |
7055 | static void | |
7056 | fsqrts (sim_cpu *cpu) | |
7057 | { | |
7058 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7059 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7060 | ||
7061 | aarch64_set_FP_float (cpu, sd, sqrt (aarch64_get_FP_float (cpu, sn))); | |
7062 | } | |
7063 | ||
7064 | /* Double square root. */ | |
7065 | static void | |
7066 | fsqrtd (sim_cpu *cpu) | |
7067 | { | |
7068 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7069 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7070 | ||
7071 | aarch64_set_FP_double (cpu, sd, | |
7072 | sqrt (aarch64_get_FP_double (cpu, sn))); | |
7073 | } | |
7074 | ||
7075 | /* Convert double to float. */ | |
7076 | static void | |
7077 | fcvtds (sim_cpu *cpu) | |
7078 | { | |
7079 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7080 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7081 | ||
7082 | aarch64_set_FP_float (cpu, sd, (float) aarch64_get_FP_double (cpu, sn)); | |
7083 | } | |
7084 | ||
7085 | /* Convert float to double. */ | |
7086 | static void | |
7087 | fcvtcpu (sim_cpu *cpu) | |
7088 | { | |
7089 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7090 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7091 | ||
7092 | aarch64_set_FP_double (cpu, sd, (double) aarch64_get_FP_float (cpu, sn)); | |
7093 | } | |
7094 | ||
7095 | static void | |
7096 | do_FRINT (sim_cpu *cpu) | |
7097 | { | |
7098 | /* instr[31,23] = 0001 1110 0 | |
7099 | instr[22] = single(0)/double(1) | |
7100 | instr[21,18] = 1001 | |
7101 | instr[17,15] = rounding mode | |
7102 | instr[14,10] = 10000 | |
7103 | instr[9,5] = source | |
7104 | instr[4,0] = dest */ | |
7105 | ||
7106 | float val; | |
7107 | unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5); | |
7108 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7109 | unsigned int rmode = uimm (aarch64_get_instr (cpu), 17, 15); | |
7110 | ||
7111 | NYI_assert (31, 23, 0x03C); | |
7112 | NYI_assert (21, 18, 0x9); | |
7113 | NYI_assert (14, 10, 0x10); | |
7114 | ||
7115 | if (rmode == 6 || rmode == 7) | |
7116 | /* FIXME: Add support for rmode == 6 exactness check. */ | |
7117 | rmode = uimm (aarch64_get_FPSR (cpu), 23, 22); | |
7118 | ||
7119 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7120 | { | |
7121 | double val = aarch64_get_FP_double (cpu, rs); | |
7122 | ||
7123 | switch (rmode) | |
7124 | { | |
7125 | case 0: /* mode N: nearest or even. */ | |
7126 | { | |
7127 | double rval = round (val); | |
7128 | ||
7129 | if (val - rval == 0.5) | |
7130 | { | |
7131 | if (((rval / 2.0) * 2.0) != rval) | |
7132 | rval += 1.0; | |
7133 | } | |
7134 | ||
7135 | aarch64_set_FP_double (cpu, rd, round (val)); | |
7136 | return; | |
7137 | } | |
7138 | ||
7139 | case 1: /* mode P: towards +inf. */ | |
7140 | if (val < 0.0) | |
7141 | aarch64_set_FP_double (cpu, rd, trunc (val)); | |
7142 | else | |
7143 | aarch64_set_FP_double (cpu, rd, round (val)); | |
7144 | return; | |
7145 | ||
7146 | case 2: /* mode M: towards -inf. */ | |
7147 | if (val < 0.0) | |
7148 | aarch64_set_FP_double (cpu, rd, round (val)); | |
7149 | else | |
7150 | aarch64_set_FP_double (cpu, rd, trunc (val)); | |
7151 | return; | |
7152 | ||
7153 | case 3: /* mode Z: towards 0. */ | |
7154 | aarch64_set_FP_double (cpu, rd, trunc (val)); | |
7155 | return; | |
7156 | ||
7157 | case 4: /* mode A: away from 0. */ | |
7158 | aarch64_set_FP_double (cpu, rd, round (val)); | |
7159 | return; | |
7160 | ||
7161 | case 6: /* mode X: use FPCR with exactness check. */ | |
7162 | case 7: /* mode I: use FPCR mode. */ | |
7163 | HALT_NYI; | |
7164 | ||
7165 | default: | |
7166 | HALT_UNALLOC; | |
7167 | } | |
7168 | } | |
7169 | ||
7170 | val = aarch64_get_FP_float (cpu, rs); | |
7171 | ||
7172 | switch (rmode) | |
7173 | { | |
7174 | case 0: /* mode N: nearest or even. */ | |
7175 | { | |
7176 | float rval = roundf (val); | |
7177 | ||
7178 | if (val - rval == 0.5) | |
7179 | { | |
7180 | if (((rval / 2.0) * 2.0) != rval) | |
7181 | rval += 1.0; | |
7182 | } | |
7183 | ||
7184 | aarch64_set_FP_float (cpu, rd, rval); | |
7185 | return; | |
7186 | } | |
7187 | ||
7188 | case 1: /* mode P: towards +inf. */ | |
7189 | if (val < 0.0) | |
7190 | aarch64_set_FP_float (cpu, rd, truncf (val)); | |
7191 | else | |
7192 | aarch64_set_FP_float (cpu, rd, roundf (val)); | |
7193 | return; | |
7194 | ||
7195 | case 2: /* mode M: towards -inf. */ | |
7196 | if (val < 0.0) | |
7197 | aarch64_set_FP_float (cpu, rd, truncf (val)); | |
7198 | else | |
7199 | aarch64_set_FP_float (cpu, rd, roundf (val)); | |
7200 | return; | |
7201 | ||
7202 | case 3: /* mode Z: towards 0. */ | |
7203 | aarch64_set_FP_float (cpu, rd, truncf (val)); | |
7204 | return; | |
7205 | ||
7206 | case 4: /* mode A: away from 0. */ | |
7207 | aarch64_set_FP_float (cpu, rd, roundf (val)); | |
7208 | return; | |
7209 | ||
7210 | case 6: /* mode X: use FPCR with exactness check. */ | |
7211 | case 7: /* mode I: use FPCR mode. */ | |
7212 | HALT_NYI; | |
7213 | ||
7214 | default: | |
7215 | HALT_UNALLOC; | |
7216 | } | |
7217 | } | |
7218 | ||
7219 | static void | |
7220 | dexSimpleFPDataProc1Source (sim_cpu *cpu) | |
7221 | { | |
7222 | /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC | |
7223 | instr[30] = 0 | |
7224 | instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC | |
7225 | instr[28,25] = 1111 | |
7226 | instr[24] = 0 | |
7227 | instr[23,22] ==> type : 00 ==> source is single, | |
7228 | 01 ==> source is double | |
7229 | 10 ==> UNALLOC | |
7230 | 11 ==> UNALLOC or source is half | |
7231 | instr[21] = 1 | |
7232 | instr[20,15] ==> opcode : with type 00 or 01 | |
7233 | 000000 ==> FMOV, 000001 ==> FABS, | |
7234 | 000010 ==> FNEG, 000011 ==> FSQRT, | |
7235 | 000100 ==> UNALLOC, 000101 ==> FCVT,(to single/double) | |
7236 | 000110 ==> UNALLOC, 000111 ==> FCVT (to half) | |
7237 | 001000 ==> FRINTN, 001001 ==> FRINTP, | |
7238 | 001010 ==> FRINTM, 001011 ==> FRINTZ, | |
7239 | 001100 ==> FRINTA, 001101 ==> UNALLOC | |
7240 | 001110 ==> FRINTX, 001111 ==> FRINTI | |
7241 | with type 11 | |
7242 | 000100 ==> FCVT (half-to-single) | |
7243 | 000101 ==> FCVT (half-to-double) | |
7244 | instr[14,10] = 10000. */ | |
7245 | ||
7246 | uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1) | |
7247 | | uimm (aarch64_get_instr (cpu), 29, 29); | |
7248 | uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22); | |
7249 | uint32_t opcode = uimm (aarch64_get_instr (cpu), 20, 15); | |
7250 | ||
7251 | if (M_S != 0) | |
7252 | HALT_UNALLOC; | |
7253 | ||
7254 | if (type == 3) | |
7255 | { | |
7256 | if (opcode == 4 || opcode == 5) | |
7257 | HALT_NYI; | |
7258 | else | |
7259 | HALT_UNALLOC; | |
7260 | } | |
7261 | ||
7262 | if (type == 2) | |
7263 | HALT_UNALLOC; | |
7264 | ||
7265 | switch (opcode) | |
7266 | { | |
7267 | case 0: | |
7268 | if (type) | |
7269 | ffmovd (cpu); | |
7270 | else | |
7271 | ffmovs (cpu); | |
7272 | return; | |
7273 | ||
7274 | case 1: | |
7275 | if (type) | |
7276 | fabcpu (cpu); | |
7277 | else | |
7278 | fabss (cpu); | |
7279 | return; | |
7280 | ||
7281 | case 2: | |
7282 | if (type) | |
7283 | fnegd (cpu); | |
7284 | else | |
7285 | fnegs (cpu); | |
7286 | return; | |
7287 | ||
7288 | case 3: | |
7289 | if (type) | |
7290 | fsqrtd (cpu); | |
7291 | else | |
7292 | fsqrts (cpu); | |
7293 | return; | |
7294 | ||
7295 | case 4: | |
7296 | if (type) | |
7297 | fcvtds (cpu); | |
7298 | else | |
7299 | HALT_UNALLOC; | |
7300 | return; | |
7301 | ||
7302 | case 5: | |
7303 | if (type) | |
7304 | HALT_UNALLOC; | |
7305 | fcvtcpu (cpu); | |
7306 | return; | |
7307 | ||
7308 | case 8: /* FRINTN etc. */ | |
7309 | case 9: | |
7310 | case 10: | |
7311 | case 11: | |
7312 | case 12: | |
7313 | case 14: | |
7314 | case 15: | |
7315 | do_FRINT (cpu); | |
7316 | return; | |
7317 | ||
7318 | case 7: /* FCVT double/single to half precision. */ | |
7319 | case 13: | |
7320 | HALT_NYI; | |
7321 | ||
7322 | default: | |
7323 | HALT_UNALLOC; | |
7324 | } | |
7325 | } | |
7326 | ||
7327 | /* 32 bit signed int to float. */ | |
7328 | static void | |
7329 | scvtf32 (sim_cpu *cpu) | |
7330 | { | |
7331 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7332 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7333 | ||
7334 | aarch64_set_FP_float | |
7335 | (cpu, sd, (float) aarch64_get_reg_s32 (cpu, rn, NO_SP)); | |
7336 | } | |
7337 | ||
7338 | /* signed int to float. */ | |
7339 | static void | |
7340 | scvtf (sim_cpu *cpu) | |
7341 | { | |
7342 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7343 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7344 | ||
7345 | aarch64_set_FP_float | |
7346 | (cpu, sd, (float) aarch64_get_reg_s64 (cpu, rn, NO_SP)); | |
7347 | } | |
7348 | ||
7349 | /* 32 bit signed int to double. */ | |
7350 | static void | |
7351 | scvtd32 (sim_cpu *cpu) | |
7352 | { | |
7353 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7354 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7355 | ||
7356 | aarch64_set_FP_double | |
7357 | (cpu, sd, (double) aarch64_get_reg_s32 (cpu, rn, NO_SP)); | |
7358 | } | |
7359 | ||
7360 | /* signed int to double. */ | |
7361 | static void | |
7362 | scvtd (sim_cpu *cpu) | |
7363 | { | |
7364 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7365 | unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7366 | ||
7367 | aarch64_set_FP_double | |
7368 | (cpu, sd, (double) aarch64_get_reg_s64 (cpu, rn, NO_SP)); | |
7369 | } | |
7370 | ||
7371 | static const float FLOAT_INT_MAX = (float) INT_MAX; | |
7372 | static const float FLOAT_INT_MIN = (float) INT_MIN; | |
7373 | static const double DOUBLE_INT_MAX = (double) INT_MAX; | |
7374 | static const double DOUBLE_INT_MIN = (double) INT_MIN; | |
7375 | static const float FLOAT_LONG_MAX = (float) LONG_MAX; | |
7376 | static const float FLOAT_LONG_MIN = (float) LONG_MIN; | |
7377 | static const double DOUBLE_LONG_MAX = (double) LONG_MAX; | |
7378 | static const double DOUBLE_LONG_MIN = (double) LONG_MIN; | |
7379 | ||
7380 | /* Check for FP exception conditions: | |
7381 | NaN raises IO | |
7382 | Infinity raises IO | |
7383 | Out of Range raises IO and IX and saturates value | |
7384 | Denormal raises ID and IX and sets to zero. */ | |
7385 | #define RAISE_EXCEPTIONS(F, VALUE, FTYPE, ITYPE) \ | |
7386 | do \ | |
7387 | { \ | |
7388 | switch (fpclassify (F)) \ | |
7389 | { \ | |
7390 | case FP_INFINITE: \ | |
7391 | case FP_NAN: \ | |
7392 | aarch64_set_FPSR (cpu, IO); \ | |
7393 | if (signbit (F)) \ | |
7394 | VALUE = ITYPE##_MAX; \ | |
7395 | else \ | |
7396 | VALUE = ITYPE##_MIN; \ | |
7397 | break; \ | |
7398 | \ | |
7399 | case FP_NORMAL: \ | |
7400 | if (F >= FTYPE##_##ITYPE##_MAX) \ | |
7401 | { \ | |
7402 | aarch64_set_FPSR_bits (cpu, IO | IX, IO | IX); \ | |
7403 | VALUE = ITYPE##_MAX; \ | |
7404 | } \ | |
7405 | else if (F <= FTYPE##_##ITYPE##_MIN) \ | |
7406 | { \ | |
7407 | aarch64_set_FPSR_bits (cpu, IO | IX, IO | IX); \ | |
7408 | VALUE = ITYPE##_MIN; \ | |
7409 | } \ | |
7410 | break; \ | |
7411 | \ | |
7412 | case FP_SUBNORMAL: \ | |
7413 | aarch64_set_FPSR_bits (cpu, IO | IX | ID, IX | ID); \ | |
7414 | VALUE = 0; \ | |
7415 | break; \ | |
7416 | \ | |
7417 | default: \ | |
7418 | case FP_ZERO: \ | |
7419 | VALUE = 0; \ | |
7420 | break; \ | |
7421 | } \ | |
7422 | } \ | |
7423 | while (0) | |
7424 | ||
7425 | /* 32 bit convert float to signed int truncate towards zero. */ | |
7426 | static void | |
7427 | fcvtszs32 (sim_cpu *cpu) | |
7428 | { | |
7429 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7430 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7431 | /* TODO : check that this rounds toward zero. */ | |
7432 | float f = aarch64_get_FP_float (cpu, sn); | |
7433 | int32_t value = (int32_t) f; | |
7434 | ||
7435 | RAISE_EXCEPTIONS (f, value, FLOAT, INT); | |
7436 | ||
7437 | /* Avoid sign extension to 64 bit. */ | |
7438 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value); | |
7439 | } | |
7440 | ||
7441 | /* 64 bit convert float to signed int truncate towards zero. */ | |
7442 | static void | |
7443 | fcvtszs (sim_cpu *cpu) | |
7444 | { | |
7445 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7446 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7447 | float f = aarch64_get_FP_float (cpu, sn); | |
7448 | int64_t value = (int64_t) f; | |
7449 | ||
7450 | RAISE_EXCEPTIONS (f, value, FLOAT, LONG); | |
7451 | ||
7452 | aarch64_set_reg_s64 (cpu, rd, NO_SP, value); | |
7453 | } | |
7454 | ||
7455 | /* 32 bit convert double to signed int truncate towards zero. */ | |
7456 | static void | |
7457 | fcvtszd32 (sim_cpu *cpu) | |
7458 | { | |
7459 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7460 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7461 | /* TODO : check that this rounds toward zero. */ | |
7462 | double d = aarch64_get_FP_double (cpu, sn); | |
7463 | int32_t value = (int32_t) d; | |
7464 | ||
7465 | RAISE_EXCEPTIONS (d, value, DOUBLE, INT); | |
7466 | ||
7467 | /* Avoid sign extension to 64 bit. */ | |
7468 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value); | |
7469 | } | |
7470 | ||
7471 | /* 64 bit convert double to signed int truncate towards zero. */ | |
7472 | static void | |
7473 | fcvtszd (sim_cpu *cpu) | |
7474 | { | |
7475 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7476 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7477 | /* TODO : check that this rounds toward zero. */ | |
7478 | double d = aarch64_get_FP_double (cpu, sn); | |
7479 | int64_t value; | |
7480 | ||
7481 | value = (int64_t) d; | |
7482 | ||
7483 | RAISE_EXCEPTIONS (d, value, DOUBLE, LONG); | |
7484 | ||
7485 | aarch64_set_reg_s64 (cpu, rd, NO_SP, value); | |
7486 | } | |
7487 | ||
7488 | static void | |
7489 | do_fcvtzu (sim_cpu *cpu) | |
7490 | { | |
7491 | /* instr[31] = size: 32-bit (0), 64-bit (1) | |
7492 | instr[30,23] = 00111100 | |
7493 | instr[22] = type: single (0)/ double (1) | |
7494 | instr[21] = enable (0)/disable(1) precision | |
7495 | instr[20,16] = 11001 | |
7496 | instr[15,10] = precision | |
7497 | instr[9,5] = Rs | |
7498 | instr[4,0] = Rd. */ | |
7499 | ||
7500 | unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5); | |
7501 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7502 | ||
7503 | NYI_assert (30, 23, 0x3C); | |
7504 | NYI_assert (20, 16, 0x19); | |
7505 | ||
7506 | if (uimm (aarch64_get_instr (cpu), 21, 21) != 1) | |
7507 | /* Convert to fixed point. */ | |
7508 | HALT_NYI; | |
7509 | ||
7510 | if (uimm (aarch64_get_instr (cpu), 31, 31)) | |
7511 | { | |
7512 | /* Convert to unsigned 64-bit integer. */ | |
7513 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7514 | { | |
7515 | double d = aarch64_get_FP_double (cpu, rs); | |
7516 | uint64_t value = (uint64_t) d; | |
7517 | ||
7518 | /* Do not raise an exception if we have reached ULONG_MAX. */ | |
7519 | if (value != (1UL << 63)) | |
7520 | RAISE_EXCEPTIONS (d, value, DOUBLE, LONG); | |
7521 | ||
7522 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value); | |
7523 | } | |
7524 | else | |
7525 | { | |
7526 | float f = aarch64_get_FP_float (cpu, rs); | |
7527 | uint64_t value = (uint64_t) f; | |
7528 | ||
7529 | /* Do not raise an exception if we have reached ULONG_MAX. */ | |
7530 | if (value != (1UL << 63)) | |
7531 | RAISE_EXCEPTIONS (f, value, FLOAT, LONG); | |
7532 | ||
7533 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value); | |
7534 | } | |
7535 | } | |
7536 | else | |
7537 | { | |
7538 | uint32_t value; | |
7539 | ||
7540 | /* Convert to unsigned 32-bit integer. */ | |
7541 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7542 | { | |
7543 | double d = aarch64_get_FP_double (cpu, rs); | |
7544 | ||
7545 | value = (uint32_t) d; | |
7546 | /* Do not raise an exception if we have reached UINT_MAX. */ | |
7547 | if (value != (1UL << 31)) | |
7548 | RAISE_EXCEPTIONS (d, value, DOUBLE, INT); | |
7549 | } | |
7550 | else | |
7551 | { | |
7552 | float f = aarch64_get_FP_float (cpu, rs); | |
7553 | ||
7554 | value = (uint32_t) f; | |
7555 | /* Do not raise an exception if we have reached UINT_MAX. */ | |
7556 | if (value != (1UL << 31)) | |
7557 | RAISE_EXCEPTIONS (f, value, FLOAT, INT); | |
7558 | } | |
7559 | ||
7560 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value); | |
7561 | } | |
7562 | } | |
7563 | ||
7564 | static void | |
7565 | do_UCVTF (sim_cpu *cpu) | |
7566 | { | |
7567 | /* instr[31] = size: 32-bit (0), 64-bit (1) | |
7568 | instr[30,23] = 001 1110 0 | |
7569 | instr[22] = type: single (0)/ double (1) | |
7570 | instr[21] = enable (0)/disable(1) precision | |
7571 | instr[20,16] = 0 0011 | |
7572 | instr[15,10] = precision | |
7573 | instr[9,5] = Rs | |
7574 | instr[4,0] = Rd. */ | |
7575 | ||
7576 | unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5); | |
7577 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7578 | ||
7579 | NYI_assert (30, 23, 0x3C); | |
7580 | NYI_assert (20, 16, 0x03); | |
7581 | ||
7582 | if (uimm (aarch64_get_instr (cpu), 21, 21) != 1) | |
7583 | HALT_NYI; | |
7584 | ||
7585 | /* FIXME: Add exception raising. */ | |
7586 | if (uimm (aarch64_get_instr (cpu), 31, 31)) | |
7587 | { | |
7588 | uint64_t value = aarch64_get_reg_u64 (cpu, rs, NO_SP); | |
7589 | ||
7590 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7591 | aarch64_set_FP_double (cpu, rd, (double) value); | |
7592 | else | |
7593 | aarch64_set_FP_float (cpu, rd, (float) value); | |
7594 | } | |
7595 | else | |
7596 | { | |
7597 | uint32_t value = aarch64_get_reg_u32 (cpu, rs, NO_SP); | |
7598 | ||
7599 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7600 | aarch64_set_FP_double (cpu, rd, (double) value); | |
7601 | else | |
7602 | aarch64_set_FP_float (cpu, rd, (float) value); | |
7603 | } | |
7604 | } | |
7605 | ||
7606 | static void | |
7607 | float_vector_move (sim_cpu *cpu) | |
7608 | { | |
7609 | /* instr[31,17] == 100 1111 0101 0111 | |
7610 | instr[16] ==> direction 0=> to GR, 1=> from GR | |
7611 | instr[15,10] => ??? | |
7612 | instr[9,5] ==> source | |
7613 | instr[4,0] ==> dest. */ | |
7614 | ||
7615 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7616 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7617 | ||
7618 | NYI_assert (31, 17, 0x4F57); | |
7619 | ||
7620 | if (uimm (aarch64_get_instr (cpu), 15, 10) != 0) | |
7621 | HALT_UNALLOC; | |
7622 | ||
7623 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
7624 | aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_reg_u64 (cpu, rn, NO_SP)); | |
7625 | else | |
7626 | aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, rn, 1)); | |
7627 | } | |
7628 | ||
7629 | static void | |
7630 | dexSimpleFPIntegerConvert (sim_cpu *cpu) | |
7631 | { | |
7632 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
7633 | instr[30 = 0 | |
7634 | instr[29] = S : 0 ==> OK, 1 ==> UNALLOC | |
7635 | instr[28,25] = 1111 | |
7636 | instr[24] = 0 | |
7637 | instr[23,22] = type : 00 ==> single, 01 ==> double, 1x ==> UNALLOC | |
7638 | instr[21] = 1 | |
7639 | instr[20,19] = rmode | |
7640 | instr[18,16] = opcode | |
7641 | instr[15,10] = 10 0000 */ | |
7642 | ||
7643 | uint32_t rmode_opcode; | |
7644 | uint32_t size_type; | |
7645 | uint32_t type; | |
7646 | uint32_t size; | |
7647 | uint32_t S; | |
7648 | ||
7649 | if (uimm (aarch64_get_instr (cpu), 31, 17) == 0x4F57) | |
7650 | { | |
7651 | float_vector_move (cpu); | |
7652 | return; | |
7653 | } | |
7654 | ||
7655 | size = uimm (aarch64_get_instr (cpu), 31, 31); | |
7656 | S = uimm (aarch64_get_instr (cpu), 29, 29); | |
7657 | if (S != 0) | |
7658 | HALT_UNALLOC; | |
7659 | ||
7660 | type = uimm (aarch64_get_instr (cpu), 23, 22); | |
7661 | if (type > 1) | |
7662 | HALT_UNALLOC; | |
7663 | ||
7664 | rmode_opcode = uimm (aarch64_get_instr (cpu), 20, 16); | |
7665 | size_type = (size << 1) | type; /* 0==32f, 1==32d, 2==64f, 3==64d. */ | |
7666 | ||
7667 | switch (rmode_opcode) | |
7668 | { | |
7669 | case 2: /* SCVTF. */ | |
7670 | switch (size_type) | |
7671 | { | |
7672 | case 0: scvtf32 (cpu); return; | |
7673 | case 1: scvtd32 (cpu); return; | |
7674 | case 2: scvtf (cpu); return; | |
7675 | case 3: scvtd (cpu); return; | |
7676 | default: | |
7677 | HALT_UNREACHABLE; | |
7678 | } | |
7679 | ||
7680 | case 6: /* FMOV GR, Vec. */ | |
7681 | switch (size_type) | |
7682 | { | |
7683 | case 0: gfmovs (cpu); return; | |
7684 | case 3: gfmovd (cpu); return; | |
7685 | default: HALT_UNALLOC; | |
7686 | } | |
7687 | ||
7688 | case 7: /* FMOV vec, GR. */ | |
7689 | switch (size_type) | |
7690 | { | |
7691 | case 0: fgmovs (cpu); return; | |
7692 | case 3: fgmovd (cpu); return; | |
7693 | default: HALT_UNALLOC; | |
7694 | } | |
7695 | ||
7696 | case 24: /* FCVTZS. */ | |
7697 | switch (size_type) | |
7698 | { | |
7699 | case 0: fcvtszs32 (cpu); return; | |
7700 | case 1: fcvtszd32 (cpu); return; | |
7701 | case 2: fcvtszs (cpu); return; | |
7702 | case 3: fcvtszd (cpu); return; | |
7703 | default: HALT_UNREACHABLE; | |
7704 | } | |
7705 | ||
7706 | case 25: do_fcvtzu (cpu); return; | |
7707 | case 3: do_UCVTF (cpu); return; | |
7708 | ||
7709 | case 0: /* FCVTNS. */ | |
7710 | case 1: /* FCVTNU. */ | |
7711 | case 4: /* FCVTAS. */ | |
7712 | case 5: /* FCVTAU. */ | |
7713 | case 8: /* FCVPTS. */ | |
7714 | case 9: /* FCVTPU. */ | |
7715 | case 16: /* FCVTMS. */ | |
7716 | case 17: /* FCVTMU. */ | |
7717 | default: | |
7718 | HALT_NYI; | |
7719 | } | |
7720 | } | |
7721 | ||
7722 | static void | |
7723 | set_flags_for_float_compare (sim_cpu *cpu, float fvalue1, float fvalue2) | |
7724 | { | |
7725 | uint32_t flags; | |
7726 | ||
7727 | if (isnan (fvalue1) || isnan (fvalue2)) | |
7728 | flags = C|V; | |
7729 | else | |
7730 | { | |
7731 | float result = fvalue1 - fvalue2; | |
7732 | ||
7733 | if (result == 0.0) | |
7734 | flags = Z|C; | |
7735 | else if (result < 0) | |
7736 | flags = N; | |
7737 | else /* (result > 0). */ | |
7738 | flags = C; | |
7739 | } | |
7740 | ||
7741 | aarch64_set_CPSR (cpu, flags); | |
7742 | } | |
7743 | ||
7744 | static void | |
7745 | fcmps (sim_cpu *cpu) | |
7746 | { | |
7747 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7748 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7749 | ||
7750 | float fvalue1 = aarch64_get_FP_float (cpu, sn); | |
7751 | float fvalue2 = aarch64_get_FP_float (cpu, sm); | |
7752 | ||
7753 | set_flags_for_float_compare (cpu, fvalue1, fvalue2); | |
7754 | } | |
7755 | ||
7756 | /* Float compare to zero -- Invalid Operation exception | |
7757 | only on signaling NaNs. */ | |
7758 | static void | |
7759 | fcmpzs (sim_cpu *cpu) | |
7760 | { | |
7761 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7762 | float fvalue1 = aarch64_get_FP_float (cpu, sn); | |
7763 | ||
7764 | set_flags_for_float_compare (cpu, fvalue1, 0.0f); | |
7765 | } | |
7766 | ||
7767 | /* Float compare -- Invalid Operation exception on all NaNs. */ | |
7768 | static void | |
7769 | fcmpes (sim_cpu *cpu) | |
7770 | { | |
7771 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7772 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7773 | ||
7774 | float fvalue1 = aarch64_get_FP_float (cpu, sn); | |
7775 | float fvalue2 = aarch64_get_FP_float (cpu, sm); | |
7776 | ||
7777 | set_flags_for_float_compare (cpu, fvalue1, fvalue2); | |
7778 | } | |
7779 | ||
7780 | /* Float compare to zero -- Invalid Operation exception on all NaNs. */ | |
7781 | static void | |
7782 | fcmpzes (sim_cpu *cpu) | |
7783 | { | |
7784 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7785 | float fvalue1 = aarch64_get_FP_float (cpu, sn); | |
7786 | ||
7787 | set_flags_for_float_compare (cpu, fvalue1, 0.0f); | |
7788 | } | |
7789 | ||
7790 | static void | |
7791 | set_flags_for_double_compare (sim_cpu *cpu, double dval1, double dval2) | |
7792 | { | |
7793 | uint32_t flags; | |
7794 | ||
7795 | if (isnan (dval1) || isnan (dval2)) | |
7796 | flags = C|V; | |
7797 | else | |
7798 | { | |
7799 | double result = dval1 - dval2; | |
7800 | ||
7801 | if (result == 0.0) | |
7802 | flags = Z|C; | |
7803 | else if (result < 0) | |
7804 | flags = N; | |
7805 | else /* (result > 0). */ | |
7806 | flags = C; | |
7807 | } | |
7808 | ||
7809 | aarch64_set_CPSR (cpu, flags); | |
7810 | } | |
7811 | ||
7812 | /* Double compare -- Invalid Operation exception only on signaling NaNs. */ | |
7813 | static void | |
7814 | fcmpd (sim_cpu *cpu) | |
7815 | { | |
7816 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7817 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7818 | ||
7819 | double dvalue1 = aarch64_get_FP_double (cpu, sn); | |
7820 | double dvalue2 = aarch64_get_FP_double (cpu, sm); | |
7821 | ||
7822 | set_flags_for_double_compare (cpu, dvalue1, dvalue2); | |
7823 | } | |
7824 | ||
7825 | /* Double compare to zero -- Invalid Operation exception | |
7826 | only on signaling NaNs. */ | |
7827 | static void | |
7828 | fcmpzd (sim_cpu *cpu) | |
7829 | { | |
7830 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7831 | double dvalue1 = aarch64_get_FP_double (cpu, sn); | |
7832 | ||
7833 | set_flags_for_double_compare (cpu, dvalue1, 0.0); | |
7834 | } | |
7835 | ||
7836 | /* Double compare -- Invalid Operation exception on all NaNs. */ | |
7837 | static void | |
7838 | fcmped (sim_cpu *cpu) | |
7839 | { | |
7840 | unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7841 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7842 | ||
7843 | double dvalue1 = aarch64_get_FP_double (cpu, sn); | |
7844 | double dvalue2 = aarch64_get_FP_double (cpu, sm); | |
7845 | ||
7846 | set_flags_for_double_compare (cpu, dvalue1, dvalue2); | |
7847 | } | |
7848 | ||
7849 | /* Double compare to zero -- Invalid Operation exception on all NaNs. */ | |
7850 | static void | |
7851 | fcmpzed (sim_cpu *cpu) | |
7852 | { | |
7853 | unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7854 | double dvalue1 = aarch64_get_FP_double (cpu, sn); | |
7855 | ||
7856 | set_flags_for_double_compare (cpu, dvalue1, 0.0); | |
7857 | } | |
7858 | ||
7859 | static void | |
7860 | dexSimpleFPCompare (sim_cpu *cpu) | |
7861 | { | |
7862 | /* assert instr[28,25] == 1111 | |
7863 | instr[30:24:21:13,10] = 0011000 | |
7864 | instr[31] = M : 0 ==> OK, 1 ==> UNALLOC | |
7865 | instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC | |
7866 | instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC | |
7867 | instr[15,14] ==> op : 00 ==> OK, ow ==> UNALLOC | |
7868 | instr[4,0] ==> opcode2 : 00000 ==> FCMP, 10000 ==> FCMPE, | |
7869 | 01000 ==> FCMPZ, 11000 ==> FCMPEZ, | |
7870 | ow ==> UNALLOC */ | |
7871 | uint32_t dispatch; | |
7872 | uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1) | |
7873 | | uimm (aarch64_get_instr (cpu), 29, 29); | |
7874 | uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22); | |
7875 | uint32_t op = uimm (aarch64_get_instr (cpu), 15, 14); | |
7876 | uint32_t op2_2_0 = uimm (aarch64_get_instr (cpu), 2, 0); | |
7877 | ||
7878 | if (op2_2_0 != 0) | |
7879 | HALT_UNALLOC; | |
7880 | ||
7881 | if (M_S != 0) | |
7882 | HALT_UNALLOC; | |
7883 | ||
7884 | if (type > 1) | |
7885 | HALT_UNALLOC; | |
7886 | ||
7887 | if (op != 0) | |
7888 | HALT_UNALLOC; | |
7889 | ||
7890 | /* dispatch on type and top 2 bits of opcode. */ | |
7891 | dispatch = (type << 2) | uimm (aarch64_get_instr (cpu), 4, 3); | |
7892 | ||
7893 | switch (dispatch) | |
7894 | { | |
7895 | case 0: fcmps (cpu); return; | |
7896 | case 1: fcmpzs (cpu); return; | |
7897 | case 2: fcmpes (cpu); return; | |
7898 | case 3: fcmpzes (cpu); return; | |
7899 | case 4: fcmpd (cpu); return; | |
7900 | case 5: fcmpzd (cpu); return; | |
7901 | case 6: fcmped (cpu); return; | |
7902 | case 7: fcmpzed (cpu); return; | |
7903 | default: HALT_UNREACHABLE; | |
7904 | } | |
7905 | } | |
7906 | ||
7907 | static void | |
7908 | do_scalar_FADDP (sim_cpu *cpu) | |
7909 | { | |
7910 | /* instr [31,23] = 011111100 | |
7911 | instr [22] = single(0)/double(1) | |
7912 | instr [21,10] = 1100 0011 0110 | |
7913 | instr [9,5] = Fn | |
7914 | instr [4,0] = Fd. */ | |
7915 | ||
7916 | unsigned Fn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7917 | unsigned Fd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7918 | ||
7919 | NYI_assert (31, 23, 0x0FC); | |
7920 | NYI_assert (21, 10, 0xC36); | |
7921 | ||
7922 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7923 | { | |
7924 | double val1 = aarch64_get_vec_double (cpu, Fn, 0); | |
7925 | double val2 = aarch64_get_vec_double (cpu, Fn, 1); | |
7926 | ||
7927 | aarch64_set_FP_double (cpu, Fd, val1 + val2); | |
7928 | } | |
7929 | else | |
7930 | { | |
7931 | float val1 = aarch64_get_vec_float (cpu, Fn, 0); | |
7932 | float val2 = aarch64_get_vec_float (cpu, Fn, 1); | |
7933 | ||
7934 | aarch64_set_FP_float (cpu, Fd, val1 + val2); | |
7935 | } | |
7936 | } | |
7937 | ||
7938 | /* Floating point absolute difference. */ | |
7939 | ||
7940 | static void | |
7941 | do_scalar_FABD (sim_cpu *cpu) | |
7942 | { | |
7943 | /* instr [31,23] = 0111 1110 1 | |
7944 | instr [22] = float(0)/double(1) | |
7945 | instr [21] = 1 | |
7946 | instr [20,16] = Rm | |
7947 | instr [15,10] = 1101 01 | |
7948 | instr [9, 5] = Rn | |
7949 | instr [4, 0] = Rd. */ | |
7950 | ||
7951 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7952 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7953 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7954 | ||
7955 | NYI_assert (31, 23, 0x0FD); | |
7956 | NYI_assert (21, 21, 1); | |
7957 | NYI_assert (15, 10, 0x35); | |
7958 | ||
7959 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
7960 | aarch64_set_FP_double (cpu, rd, | |
7961 | fabs (aarch64_get_FP_double (cpu, rn) | |
7962 | - aarch64_get_FP_double (cpu, rm))); | |
7963 | else | |
7964 | aarch64_set_FP_float (cpu, rd, | |
7965 | fabsf (aarch64_get_FP_float (cpu, rn) | |
7966 | - aarch64_get_FP_float (cpu, rm))); | |
7967 | } | |
7968 | ||
7969 | static void | |
7970 | do_scalar_CMGT (sim_cpu *cpu) | |
7971 | { | |
7972 | /* instr [31,21] = 0101 1110 111 | |
7973 | instr [20,16] = Rm | |
7974 | instr [15,10] = 00 1101 | |
7975 | instr [9, 5] = Rn | |
7976 | instr [4, 0] = Rd. */ | |
7977 | ||
7978 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
7979 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
7980 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
7981 | ||
7982 | NYI_assert (31, 21, 0x2F7); | |
7983 | NYI_assert (15, 10, 0x0D); | |
7984 | ||
7985 | aarch64_set_vec_u64 (cpu, rd, 0, | |
7986 | aarch64_get_vec_u64 (cpu, rn, 0) > | |
7987 | aarch64_get_vec_u64 (cpu, rm, 0) ? -1L : 0L); | |
7988 | } | |
7989 | ||
7990 | static void | |
7991 | do_scalar_USHR (sim_cpu *cpu) | |
7992 | { | |
7993 | /* instr [31,23] = 0111 1111 0 | |
7994 | instr [22,16] = shift amount | |
7995 | instr [15,10] = 0000 01 | |
7996 | instr [9, 5] = Rn | |
7997 | instr [4, 0] = Rd. */ | |
7998 | ||
7999 | unsigned amount = 128 - uimm (aarch64_get_instr (cpu), 22, 16); | |
8000 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8001 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8002 | ||
8003 | NYI_assert (31, 23, 0x0FE); | |
8004 | NYI_assert (15, 10, 0x01); | |
8005 | ||
8006 | aarch64_set_vec_u64 (cpu, rd, 0, | |
8007 | aarch64_get_vec_u64 (cpu, rn, 0) >> amount); | |
8008 | } | |
8009 | ||
8010 | static void | |
8011 | do_scalar_SHL (sim_cpu *cpu) | |
8012 | { | |
8013 | /* instr [31,23] = 0111 1101 0 | |
8014 | instr [22,16] = shift amount | |
8015 | instr [15,10] = 0101 01 | |
8016 | instr [9, 5] = Rn | |
8017 | instr [4, 0] = Rd. */ | |
8018 | ||
8019 | unsigned amount = uimm (aarch64_get_instr (cpu), 22, 16) - 64; | |
8020 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8021 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8022 | ||
8023 | NYI_assert (31, 23, 0x0BE); | |
8024 | NYI_assert (15, 10, 0x15); | |
8025 | ||
8026 | if (uimm (aarch64_get_instr (cpu), 22, 22) == 0) | |
8027 | HALT_UNALLOC; | |
8028 | ||
8029 | aarch64_set_vec_u64 (cpu, rd, 0, | |
8030 | aarch64_get_vec_u64 (cpu, rn, 0) << amount); | |
8031 | } | |
8032 | ||
8033 | /* FCMEQ FCMGT FCMGE. */ | |
8034 | static void | |
8035 | do_scalar_FCM (sim_cpu *cpu) | |
8036 | { | |
8037 | /* instr [31,30] = 01 | |
8038 | instr [29] = U | |
8039 | instr [28,24] = 1 1110 | |
8040 | instr [23] = E | |
8041 | instr [22] = size | |
8042 | instr [21] = 1 | |
8043 | instr [20,16] = Rm | |
8044 | instr [15,12] = 1110 | |
8045 | instr [11] = AC | |
8046 | instr [10] = 1 | |
8047 | instr [9, 5] = Rn | |
8048 | instr [4, 0] = Rd. */ | |
8049 | ||
8050 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8051 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8052 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8053 | unsigned EUac = (uimm (aarch64_get_instr (cpu), 23, 23) << 2) | |
8054 | | (uimm (aarch64_get_instr (cpu), 29, 29) << 1) | |
8055 | | uimm (aarch64_get_instr (cpu), 11, 11); | |
8056 | unsigned result; | |
8057 | float val1; | |
8058 | float val2; | |
8059 | ||
8060 | NYI_assert (31, 30, 1); | |
8061 | NYI_assert (28, 24, 0x1E); | |
8062 | NYI_assert (21, 21, 1); | |
8063 | NYI_assert (15, 12, 0xE); | |
8064 | NYI_assert (10, 10, 1); | |
8065 | ||
8066 | if (uimm (aarch64_get_instr (cpu), 22, 22)) | |
8067 | { | |
8068 | double val1 = aarch64_get_FP_double (cpu, rn); | |
8069 | double val2 = aarch64_get_FP_double (cpu, rm); | |
8070 | ||
8071 | switch (EUac) | |
8072 | { | |
8073 | case 0: /* 000 */ | |
8074 | result = val1 == val2; | |
8075 | break; | |
8076 | ||
8077 | case 3: /* 011 */ | |
8078 | val1 = fabs (val1); | |
8079 | val2 = fabs (val2); | |
8080 | /* Fall through. */ | |
8081 | case 2: /* 010 */ | |
8082 | result = val1 >= val2; | |
8083 | break; | |
8084 | ||
8085 | case 7: /* 111 */ | |
8086 | val1 = fabs (val1); | |
8087 | val2 = fabs (val2); | |
8088 | /* Fall through. */ | |
8089 | case 6: /* 110 */ | |
8090 | result = val1 > val2; | |
8091 | break; | |
8092 | ||
8093 | default: | |
8094 | HALT_UNALLOC; | |
8095 | } | |
8096 | ||
8097 | aarch64_set_vec_u32 (cpu, rd, 0, result ? -1 : 0); | |
8098 | return; | |
8099 | } | |
8100 | ||
8101 | val1 = aarch64_get_FP_float (cpu, rn); | |
8102 | val2 = aarch64_get_FP_float (cpu, rm); | |
8103 | ||
8104 | switch (EUac) | |
8105 | { | |
8106 | case 0: /* 000 */ | |
8107 | result = val1 == val2; | |
8108 | break; | |
8109 | ||
8110 | case 3: /* 011 */ | |
8111 | val1 = fabsf (val1); | |
8112 | val2 = fabsf (val2); | |
8113 | /* Fall through. */ | |
8114 | case 2: /* 010 */ | |
8115 | result = val1 >= val2; | |
8116 | break; | |
8117 | ||
8118 | case 7: /* 111 */ | |
8119 | val1 = fabsf (val1); | |
8120 | val2 = fabsf (val2); | |
8121 | /* Fall through. */ | |
8122 | case 6: /* 110 */ | |
8123 | result = val1 > val2; | |
8124 | break; | |
8125 | ||
8126 | default: | |
8127 | HALT_UNALLOC; | |
8128 | } | |
8129 | ||
8130 | aarch64_set_vec_u32 (cpu, rd, 0, result ? -1 : 0); | |
8131 | } | |
8132 | ||
8133 | /* An alias of DUP. */ | |
8134 | static void | |
8135 | do_scalar_MOV (sim_cpu *cpu) | |
8136 | { | |
8137 | /* instr [31,21] = 0101 1110 000 | |
8138 | instr [20,16] = imm5 | |
8139 | instr [15,10] = 0000 01 | |
8140 | instr [9, 5] = Rn | |
8141 | instr [4, 0] = Rd. */ | |
8142 | ||
8143 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8144 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8145 | unsigned index; | |
8146 | ||
8147 | NYI_assert (31, 21, 0x2F0); | |
8148 | NYI_assert (15, 10, 0x01); | |
8149 | ||
8150 | if (uimm (aarch64_get_instr (cpu), 16, 16)) | |
8151 | { | |
8152 | /* 8-bit. */ | |
8153 | index = uimm (aarch64_get_instr (cpu), 20, 17); | |
8154 | aarch64_set_vec_u8 | |
8155 | (cpu, rd, 0, aarch64_get_vec_u8 (cpu, rn, index)); | |
8156 | } | |
8157 | else if (uimm (aarch64_get_instr (cpu), 17, 17)) | |
8158 | { | |
8159 | /* 16-bit. */ | |
8160 | index = uimm (aarch64_get_instr (cpu), 20, 18); | |
8161 | aarch64_set_vec_u16 | |
8162 | (cpu, rd, 0, aarch64_get_vec_u16 (cpu, rn, index)); | |
8163 | } | |
8164 | else if (uimm (aarch64_get_instr (cpu), 18, 18)) | |
8165 | { | |
8166 | /* 32-bit. */ | |
8167 | index = uimm (aarch64_get_instr (cpu), 20, 19); | |
8168 | aarch64_set_vec_u32 | |
8169 | (cpu, rd, 0, aarch64_get_vec_u32 (cpu, rn, index)); | |
8170 | } | |
8171 | else if (uimm (aarch64_get_instr (cpu), 19, 19)) | |
8172 | { | |
8173 | /* 64-bit. */ | |
8174 | index = uimm (aarch64_get_instr (cpu), 20, 20); | |
8175 | aarch64_set_vec_u64 | |
8176 | (cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, index)); | |
8177 | } | |
8178 | else | |
8179 | HALT_UNALLOC; | |
8180 | } | |
8181 | ||
8182 | static void | |
8183 | do_double_add (sim_cpu *cpu) | |
8184 | { | |
8185 | /* instr [28,25] = 1111. */ | |
8186 | unsigned Fd; | |
8187 | unsigned Fm; | |
8188 | unsigned Fn; | |
8189 | double val1; | |
8190 | double val2; | |
8191 | ||
8192 | switch (uimm (aarch64_get_instr (cpu), 31, 23)) | |
8193 | { | |
8194 | case 0xBC: | |
8195 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
8196 | { | |
8197 | case 0x01: do_scalar_MOV (cpu); return; | |
8198 | case 0x39: do_scalar_FCM (cpu); return; | |
8199 | case 0x3B: do_scalar_FCM (cpu); return; | |
8200 | } | |
8201 | break; | |
8202 | ||
8203 | case 0xBE: do_scalar_SHL (cpu); return; | |
8204 | ||
8205 | case 0xFC: | |
8206 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
8207 | { | |
8208 | case 0x36: do_scalar_FADDP (cpu); return; | |
8209 | case 0x39: do_scalar_FCM (cpu); return; | |
8210 | case 0x3B: do_scalar_FCM (cpu); return; | |
8211 | } | |
8212 | break; | |
8213 | ||
8214 | case 0xFD: | |
8215 | switch (uimm (aarch64_get_instr (cpu), 15, 10)) | |
8216 | { | |
8217 | case 0x0D: do_scalar_CMGT (cpu); return; | |
8218 | case 0x35: do_scalar_FABD (cpu); return; | |
8219 | case 0x39: do_scalar_FCM (cpu); return; | |
8220 | case 0x3B: do_scalar_FCM (cpu); return; | |
8221 | default: | |
8222 | HALT_NYI; | |
8223 | } | |
8224 | ||
8225 | case 0xFE: do_scalar_USHR (cpu); return; | |
8226 | default: | |
8227 | break; | |
8228 | } | |
8229 | ||
8230 | /* instr [31,21] = 0101 1110 111 | |
8231 | instr [20,16] = Fn | |
8232 | instr [15,10] = 1000 01 | |
8233 | instr [9,5] = Fm | |
8234 | instr [4,0] = Fd. */ | |
8235 | if (uimm (aarch64_get_instr (cpu), 31, 21) != 0x2F7 | |
8236 | || uimm (aarch64_get_instr (cpu), 15, 10) != 0x21) | |
8237 | HALT_NYI; | |
8238 | ||
8239 | Fd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8240 | Fm = uimm (aarch64_get_instr (cpu), 9, 5); | |
8241 | Fn = uimm (aarch64_get_instr (cpu), 20, 16); | |
8242 | ||
8243 | val1 = aarch64_get_FP_double (cpu, Fm); | |
8244 | val2 = aarch64_get_FP_double (cpu, Fn); | |
8245 | ||
8246 | aarch64_set_FP_double (cpu, Fd, val1 + val2); | |
8247 | } | |
8248 | ||
8249 | static void | |
8250 | dexAdvSIMD1 (sim_cpu *cpu) | |
8251 | { | |
8252 | /* instr [28,25] = 1 111. */ | |
8253 | ||
8254 | /* we are currently only interested in the basic | |
8255 | scalar fp routines which all have bit 30 = 0. */ | |
8256 | if (uimm (aarch64_get_instr (cpu), 30, 30)) | |
8257 | do_double_add (cpu); | |
8258 | ||
8259 | /* instr[24] is set for FP data processing 3-source and clear for | |
8260 | all other basic scalar fp instruction groups. */ | |
8261 | else if (uimm (aarch64_get_instr (cpu), 24, 24)) | |
8262 | dexSimpleFPDataProc3Source (cpu); | |
8263 | ||
8264 | /* instr[21] is clear for floating <-> fixed conversions and set for | |
8265 | all other basic scalar fp instruction groups. */ | |
8266 | else if (!uimm (aarch64_get_instr (cpu), 21, 21)) | |
8267 | dexSimpleFPFixedConvert (cpu); | |
8268 | ||
8269 | /* instr[11,10] : 01 ==> cond compare, 10 ==> Data Proc 2 Source | |
8270 | 11 ==> cond select, 00 ==> other. */ | |
8271 | else | |
8272 | switch (uimm (aarch64_get_instr (cpu), 11, 10)) | |
8273 | { | |
8274 | case 1: dexSimpleFPCondCompare (cpu); return; | |
8275 | case 2: dexSimpleFPDataProc2Source (cpu); return; | |
8276 | case 3: dexSimpleFPCondSelect (cpu); return; | |
8277 | ||
8278 | default: | |
8279 | /* Now an ordered cascade of tests. | |
8280 | FP immediate has aarch64_get_instr (cpu)[12] == 1. | |
8281 | FP compare has aarch64_get_instr (cpu)[13] == 1. | |
8282 | FP Data Proc 1 Source has aarch64_get_instr (cpu)[14] == 1. | |
8283 | FP floating <--> integer conversions has aarch64_get_instr (cpu)[15] == 0. */ | |
8284 | if (uimm (aarch64_get_instr (cpu), 12, 12)) | |
8285 | dexSimpleFPImmediate (cpu); | |
8286 | ||
8287 | else if (uimm (aarch64_get_instr (cpu), 13, 13)) | |
8288 | dexSimpleFPCompare (cpu); | |
8289 | ||
8290 | else if (uimm (aarch64_get_instr (cpu), 14, 14)) | |
8291 | dexSimpleFPDataProc1Source (cpu); | |
8292 | ||
8293 | else if (!uimm (aarch64_get_instr (cpu), 15, 15)) | |
8294 | dexSimpleFPIntegerConvert (cpu); | |
8295 | ||
8296 | else | |
8297 | /* If we get here then instr[15] == 1 which means UNALLOC. */ | |
8298 | HALT_UNALLOC; | |
8299 | } | |
8300 | } | |
8301 | ||
8302 | /* PC relative addressing. */ | |
8303 | ||
8304 | static void | |
8305 | pcadr (sim_cpu *cpu) | |
8306 | { | |
8307 | /* instr[31] = op : 0 ==> ADR, 1 ==> ADRP | |
8308 | instr[30,29] = immlo | |
8309 | instr[23,5] = immhi. */ | |
8310 | uint64_t address; | |
8311 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8312 | uint32_t isPage = uimm (aarch64_get_instr (cpu), 31, 31); | |
8313 | union { int64_t u64; uint64_t s64; } imm; | |
8314 | uint64_t offset; | |
8315 | ||
8316 | imm.s64 = simm64 (aarch64_get_instr (cpu), 23, 5); | |
8317 | offset = imm.u64; | |
8318 | offset = (offset << 2) | uimm (aarch64_get_instr (cpu), 30, 29); | |
8319 | ||
8320 | address = aarch64_get_PC (cpu); | |
8321 | ||
8322 | if (isPage) | |
8323 | { | |
8324 | offset <<= 12; | |
8325 | address &= ~0xfff; | |
8326 | } | |
8327 | ||
8328 | aarch64_set_reg_u64 (cpu, rd, NO_SP, address + offset); | |
8329 | } | |
8330 | ||
8331 | /* Specific decode and execute for group Data Processing Immediate. */ | |
8332 | ||
8333 | static void | |
8334 | dexPCRelAddressing (sim_cpu *cpu) | |
8335 | { | |
8336 | /* assert instr[28,24] = 10000. */ | |
8337 | pcadr (cpu); | |
8338 | } | |
8339 | ||
8340 | /* Immediate logical. | |
8341 | The bimm32/64 argument is constructed by replicating a 2, 4, 8, | |
8342 | 16, 32 or 64 bit sequence pulled out at decode and possibly | |
8343 | inverting it.. | |
8344 | ||
8345 | N.B. the output register (dest) can normally be Xn or SP | |
8346 | the exception occurs for flag setting instructions which may | |
8347 | only use Xn for the output (dest). The input register can | |
8348 | never be SP. */ | |
8349 | ||
8350 | /* 32 bit and immediate. */ | |
8351 | static void | |
8352 | and32 (sim_cpu *cpu, uint32_t bimm) | |
8353 | { | |
8354 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8355 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8356 | ||
8357 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8358 | aarch64_get_reg_u32 (cpu, rn, NO_SP) & bimm); | |
8359 | } | |
8360 | ||
8361 | /* 64 bit and immediate. */ | |
8362 | static void | |
8363 | and64 (sim_cpu *cpu, uint64_t bimm) | |
8364 | { | |
8365 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8366 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8367 | ||
8368 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8369 | aarch64_get_reg_u64 (cpu, rn, NO_SP) & bimm); | |
8370 | } | |
8371 | ||
8372 | /* 32 bit and immediate set flags. */ | |
8373 | static void | |
8374 | ands32 (sim_cpu *cpu, uint32_t bimm) | |
8375 | { | |
8376 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8377 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8378 | ||
8379 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
8380 | uint32_t value2 = bimm; | |
8381 | ||
8382 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8383 | set_flags_for_binop32 (cpu, value1 & value2); | |
8384 | } | |
8385 | ||
8386 | /* 64 bit and immediate set flags. */ | |
8387 | static void | |
8388 | ands64 (sim_cpu *cpu, uint64_t bimm) | |
8389 | { | |
8390 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8391 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8392 | ||
8393 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
8394 | uint64_t value2 = bimm; | |
8395 | ||
8396 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8397 | set_flags_for_binop64 (cpu, value1 & value2); | |
8398 | } | |
8399 | ||
8400 | /* 32 bit exclusive or immediate. */ | |
8401 | static void | |
8402 | eor32 (sim_cpu *cpu, uint32_t bimm) | |
8403 | { | |
8404 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8405 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8406 | ||
8407 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8408 | aarch64_get_reg_u32 (cpu, rn, NO_SP) ^ bimm); | |
8409 | } | |
8410 | ||
8411 | /* 64 bit exclusive or immediate. */ | |
8412 | static void | |
8413 | eor64 (sim_cpu *cpu, uint64_t bimm) | |
8414 | { | |
8415 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8416 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8417 | ||
8418 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8419 | aarch64_get_reg_u64 (cpu, rn, NO_SP) ^ bimm); | |
8420 | } | |
8421 | ||
8422 | /* 32 bit or immediate. */ | |
8423 | static void | |
8424 | orr32 (sim_cpu *cpu, uint32_t bimm) | |
8425 | { | |
8426 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8427 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8428 | ||
8429 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8430 | aarch64_get_reg_u32 (cpu, rn, NO_SP) | bimm); | |
8431 | } | |
8432 | ||
8433 | /* 64 bit or immediate. */ | |
8434 | static void | |
8435 | orr64 (sim_cpu *cpu, uint64_t bimm) | |
8436 | { | |
8437 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8438 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8439 | ||
8440 | aarch64_set_reg_u64 (cpu, rd, SP_OK, | |
8441 | aarch64_get_reg_u64 (cpu, rn, NO_SP) | bimm); | |
8442 | } | |
8443 | ||
8444 | /* Logical shifted register. | |
8445 | These allow an optional LSL, ASR, LSR or ROR to the second source | |
8446 | register with a count up to the register bit count. | |
8447 | N.B register args may not be SP. */ | |
8448 | ||
8449 | /* 32 bit AND shifted register. */ | |
8450 | static void | |
8451 | and32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8452 | { | |
8453 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8454 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8455 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8456 | ||
8457 | aarch64_set_reg_u64 | |
8458 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8459 | & shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8460 | } | |
8461 | ||
8462 | /* 64 bit AND shifted register. */ | |
8463 | static void | |
8464 | and64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8465 | { | |
8466 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8467 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8468 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8469 | ||
8470 | aarch64_set_reg_u64 | |
8471 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8472 | & shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8473 | } | |
8474 | ||
8475 | /* 32 bit AND shifted register setting flags. */ | |
8476 | static void | |
8477 | ands32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8478 | { | |
8479 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8480 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8481 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8482 | ||
8483 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
8484 | uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
8485 | shift, count); | |
8486 | ||
8487 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8488 | set_flags_for_binop32 (cpu, value1 & value2); | |
8489 | } | |
8490 | ||
8491 | /* 64 bit AND shifted register setting flags. */ | |
8492 | static void | |
8493 | ands64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8494 | { | |
8495 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8496 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8497 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8498 | ||
8499 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
8500 | uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
8501 | shift, count); | |
8502 | ||
8503 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8504 | set_flags_for_binop64 (cpu, value1 & value2); | |
8505 | } | |
8506 | ||
8507 | /* 32 bit BIC shifted register. */ | |
8508 | static void | |
8509 | bic32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8510 | { | |
8511 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8512 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8513 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8514 | ||
8515 | aarch64_set_reg_u64 | |
8516 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8517 | & ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8518 | } | |
8519 | ||
8520 | /* 64 bit BIC shifted register. */ | |
8521 | static void | |
8522 | bic64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8523 | { | |
8524 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8525 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8526 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8527 | ||
8528 | aarch64_set_reg_u64 | |
8529 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8530 | & ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8531 | } | |
8532 | ||
8533 | /* 32 bit BIC shifted register setting flags. */ | |
8534 | static void | |
8535 | bics32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8536 | { | |
8537 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8538 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8539 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8540 | ||
8541 | uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
8542 | uint32_t value2 = ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
8543 | shift, count); | |
8544 | ||
8545 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8546 | set_flags_for_binop32 (cpu, value1 & value2); | |
8547 | } | |
8548 | ||
8549 | /* 64 bit BIC shifted register setting flags. */ | |
8550 | static void | |
8551 | bics64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8552 | { | |
8553 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8554 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8555 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8556 | ||
8557 | uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
8558 | uint64_t value2 = ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), | |
8559 | shift, count); | |
8560 | ||
8561 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2); | |
8562 | set_flags_for_binop64 (cpu, value1 & value2); | |
8563 | } | |
8564 | ||
8565 | /* 32 bit EON shifted register. */ | |
8566 | static void | |
8567 | eon32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8568 | { | |
8569 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8570 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8571 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8572 | ||
8573 | aarch64_set_reg_u64 | |
8574 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8575 | ^ ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8576 | } | |
8577 | ||
8578 | /* 64 bit EON shifted register. */ | |
8579 | static void | |
8580 | eon64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8581 | { | |
8582 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8583 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8584 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8585 | ||
8586 | aarch64_set_reg_u64 | |
8587 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8588 | ^ ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8589 | } | |
8590 | ||
8591 | /* 32 bit EOR shifted register. */ | |
8592 | static void | |
8593 | eor32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8594 | { | |
8595 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8596 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8597 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8598 | ||
8599 | aarch64_set_reg_u64 | |
8600 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8601 | ^ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8602 | } | |
8603 | ||
8604 | /* 64 bit EOR shifted register. */ | |
8605 | static void | |
8606 | eor64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8607 | { | |
8608 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8609 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8610 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8611 | ||
8612 | aarch64_set_reg_u64 | |
8613 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8614 | ^ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8615 | } | |
8616 | ||
8617 | /* 32 bit ORR shifted register. */ | |
8618 | static void | |
8619 | orr32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8620 | { | |
8621 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8622 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8623 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8624 | ||
8625 | aarch64_set_reg_u64 | |
8626 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8627 | | shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8628 | } | |
8629 | ||
8630 | /* 64 bit ORR shifted register. */ | |
8631 | static void | |
8632 | orr64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8633 | { | |
8634 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8635 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8636 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8637 | ||
8638 | aarch64_set_reg_u64 | |
8639 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8640 | | shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8641 | } | |
8642 | ||
8643 | /* 32 bit ORN shifted register. */ | |
8644 | static void | |
8645 | orn32_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8646 | { | |
8647 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8648 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8649 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8650 | ||
8651 | aarch64_set_reg_u64 | |
8652 | (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
8653 | | ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count)); | |
8654 | } | |
8655 | ||
8656 | /* 64 bit ORN shifted register. */ | |
8657 | static void | |
8658 | orn64_shift (sim_cpu *cpu, Shift shift, uint32_t count) | |
8659 | { | |
8660 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
8661 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8662 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8663 | ||
8664 | aarch64_set_reg_u64 | |
8665 | (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
8666 | | ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count)); | |
8667 | } | |
8668 | ||
8669 | static void | |
8670 | dexLogicalImmediate (sim_cpu *cpu) | |
8671 | { | |
8672 | /* assert instr[28,23] = 1001000 | |
8673 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
8674 | instr[30,29] = op : 0 ==> AND, 1 ==> ORR, 2 ==> EOR, 3 ==> ANDS | |
8675 | instr[22] = N : used to construct immediate mask | |
8676 | instr[21,16] = immr | |
8677 | instr[15,10] = imms | |
8678 | instr[9,5] = Rn | |
8679 | instr[4,0] = Rd */ | |
8680 | ||
8681 | /* 32 bit operations must have N = 0 or else we have an UNALLOC. */ | |
8682 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
8683 | uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22); | |
8684 | /* uint32_t immr = uimm (aarch64_get_instr (cpu), 21, 16);. */ | |
8685 | /* uint32_t imms = uimm (aarch64_get_instr (cpu), 15, 10);. */ | |
8686 | uint32_t index = uimm (aarch64_get_instr (cpu), 22, 10); | |
8687 | uint64_t bimm64 = LITable [index]; | |
8688 | uint32_t dispatch = uimm (aarch64_get_instr (cpu), 30, 29); | |
8689 | ||
8690 | if (~size & N) | |
8691 | HALT_UNALLOC; | |
8692 | ||
8693 | if (!bimm64) | |
8694 | HALT_UNALLOC; | |
8695 | ||
8696 | if (size == 0) | |
8697 | { | |
8698 | uint32_t bimm = (uint32_t) bimm64; | |
8699 | ||
8700 | switch (dispatch) | |
8701 | { | |
8702 | case 0: and32 (cpu, bimm); return; | |
8703 | case 1: orr32 (cpu, bimm); return; | |
8704 | case 2: eor32 (cpu, bimm); return; | |
8705 | case 3: ands32 (cpu, bimm); return; | |
8706 | } | |
8707 | } | |
8708 | else | |
8709 | { | |
8710 | switch (dispatch) | |
8711 | { | |
8712 | case 0: and64 (cpu, bimm64); return; | |
8713 | case 1: orr64 (cpu, bimm64); return; | |
8714 | case 2: eor64 (cpu, bimm64); return; | |
8715 | case 3: ands64 (cpu, bimm64); return; | |
8716 | } | |
8717 | } | |
8718 | HALT_UNALLOC; | |
8719 | } | |
8720 | ||
8721 | /* Immediate move. | |
8722 | The uimm argument is a 16 bit value to be inserted into the | |
8723 | target register the pos argument locates the 16 bit word in the | |
8724 | dest register i.e. it is in {0, 1} for 32 bit and {0, 1, 2, | |
8725 | 3} for 64 bit. | |
8726 | N.B register arg may not be SP so it should be. | |
8727 | accessed using the setGZRegisterXXX accessors. */ | |
8728 | ||
8729 | /* 32 bit move 16 bit immediate zero remaining shorts. */ | |
8730 | static void | |
8731 | movz32 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8732 | { | |
8733 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8734 | ||
8735 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val << (pos * 16)); | |
8736 | } | |
8737 | ||
8738 | /* 64 bit move 16 bit immediate zero remaining shorts. */ | |
8739 | static void | |
8740 | movz64 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8741 | { | |
8742 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8743 | ||
8744 | aarch64_set_reg_u64 (cpu, rd, NO_SP, ((uint64_t) val) << (pos * 16)); | |
8745 | } | |
8746 | ||
8747 | /* 32 bit move 16 bit immediate negated. */ | |
8748 | static void | |
8749 | movn32 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8750 | { | |
8751 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8752 | ||
8753 | aarch64_set_reg_u64 (cpu, rd, NO_SP, ((val << (pos * 16)) ^ 0xffffffffU)); | |
8754 | } | |
8755 | ||
8756 | /* 64 bit move 16 bit immediate negated. */ | |
8757 | static void | |
8758 | movn64 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8759 | { | |
8760 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8761 | ||
8762 | aarch64_set_reg_u64 | |
8763 | (cpu, rd, NO_SP, ((((uint64_t) val) << (pos * 16)) | |
8764 | ^ 0xffffffffffffffffULL)); | |
8765 | } | |
8766 | ||
8767 | /* 32 bit move 16 bit immediate keep remaining shorts. */ | |
8768 | static void | |
8769 | movk32 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8770 | { | |
8771 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8772 | uint32_t current = aarch64_get_reg_u32 (cpu, rd, NO_SP); | |
8773 | uint32_t value = val << (pos * 16); | |
8774 | uint32_t mask = ~(0xffffU << (pos * 16)); | |
8775 | ||
8776 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask))); | |
8777 | } | |
8778 | ||
8779 | /* 64 bit move 16 it immediate keep remaining shorts. */ | |
8780 | static void | |
8781 | movk64 (sim_cpu *cpu, uint32_t val, uint32_t pos) | |
8782 | { | |
8783 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8784 | uint64_t current = aarch64_get_reg_u64 (cpu, rd, NO_SP); | |
8785 | uint64_t value = (uint64_t) val << (pos * 16); | |
8786 | uint64_t mask = ~(0xffffULL << (pos * 16)); | |
8787 | ||
8788 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask))); | |
8789 | } | |
8790 | ||
8791 | static void | |
8792 | dexMoveWideImmediate (sim_cpu *cpu) | |
8793 | { | |
8794 | /* assert instr[28:23] = 100101 | |
8795 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
8796 | instr[30,29] = op : 0 ==> MOVN, 1 ==> UNALLOC, 2 ==> MOVZ, 3 ==> MOVK | |
8797 | instr[22,21] = shift : 00 == LSL#0, 01 = LSL#16, 10 = LSL#32, 11 = LSL#48 | |
8798 | instr[20,5] = uimm16 | |
8799 | instr[4,0] = Rd */ | |
8800 | ||
8801 | /* N.B. the (multiple of 16) shift is applied by the called routine, | |
8802 | we just pass the multiplier. */ | |
8803 | ||
8804 | uint32_t imm; | |
8805 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
8806 | uint32_t op = uimm (aarch64_get_instr (cpu), 30, 29); | |
8807 | uint32_t shift = uimm (aarch64_get_instr (cpu), 22, 21); | |
8808 | ||
8809 | /* 32 bit can only shift 0 or 1 lot of 16. | |
8810 | anything else is an unallocated instruction. */ | |
8811 | if (size == 0 && (shift > 1)) | |
8812 | HALT_UNALLOC; | |
8813 | ||
8814 | if (op == 1) | |
8815 | HALT_UNALLOC; | |
8816 | ||
8817 | imm = uimm (aarch64_get_instr (cpu), 20, 5); | |
8818 | ||
8819 | if (size == 0) | |
8820 | { | |
8821 | if (op == 0) | |
8822 | movn32 (cpu, imm, shift); | |
8823 | else if (op == 2) | |
8824 | movz32 (cpu, imm, shift); | |
8825 | else | |
8826 | movk32 (cpu, imm, shift); | |
8827 | } | |
8828 | else | |
8829 | { | |
8830 | if (op == 0) | |
8831 | movn64 (cpu, imm, shift); | |
8832 | else if (op == 2) | |
8833 | movz64 (cpu, imm, shift); | |
8834 | else | |
8835 | movk64 (cpu, imm, shift); | |
8836 | } | |
8837 | } | |
8838 | ||
8839 | /* Bitfield operations. | |
8840 | These take a pair of bit positions r and s which are in {0..31} | |
8841 | or {0..63} depending on the instruction word size. | |
8842 | N.B register args may not be SP. */ | |
8843 | ||
8844 | /* OK, we start with ubfm which just needs to pick | |
8845 | some bits out of source zero the rest and write | |
8846 | the result to dest. Just need two logical shifts. */ | |
8847 | ||
8848 | /* 32 bit bitfield move, left and right of affected zeroed | |
8849 | if r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */ | |
8850 | static void | |
8851 | ubfm32 (sim_cpu *cpu, uint32_t r, uint32_t s) | |
8852 | { | |
8853 | unsigned rd; | |
8854 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8855 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
8856 | ||
8857 | /* Pick either s+1-r or s+1 consecutive bits out of the original word. */ | |
8858 | if (r <= s) | |
8859 | { | |
8860 | /* 31:...:s:xxx:r:...:0 ==> 31:...:s-r:xxx:0. | |
8861 | We want only bits s:xxx:r at the bottom of the word | |
8862 | so we LSL bit s up to bit 31 i.e. by 31 - s | |
8863 | and then we LSR to bring bit 31 down to bit s - r | |
8864 | i.e. by 31 + r - s. */ | |
8865 | value <<= 31 - s; | |
8866 | value >>= 31 + r - s; | |
8867 | } | |
8868 | else | |
8869 | { | |
8870 | /* 31:...:s:xxx:0 ==> 31:...:31-(r-1)+s:xxx:31-(r-1):...:0 | |
8871 | We want only bits s:xxx:0 starting at it 31-(r-1) | |
8872 | so we LSL bit s up to bit 31 i.e. by 31 - s | |
8873 | and then we LSL to bring bit 31 down to 31-(r-1)+s | |
8874 | i.e. by r - (s + 1). */ | |
8875 | value <<= 31 - s; | |
8876 | value >>= r - (s + 1); | |
8877 | } | |
8878 | ||
8879 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8880 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value); | |
8881 | } | |
8882 | ||
8883 | /* 64 bit bitfield move, left and right of affected zeroed | |
8884 | if r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */ | |
8885 | static void | |
8886 | ubfm (sim_cpu *cpu, uint32_t r, uint32_t s) | |
8887 | { | |
8888 | unsigned rd; | |
8889 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8890 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
8891 | ||
8892 | if (r <= s) | |
8893 | { | |
8894 | /* 63:...:s:xxx:r:...:0 ==> 63:...:s-r:xxx:0. | |
8895 | We want only bits s:xxx:r at the bottom of the word. | |
8896 | So we LSL bit s up to bit 63 i.e. by 63 - s | |
8897 | and then we LSR to bring bit 63 down to bit s - r | |
8898 | i.e. by 63 + r - s. */ | |
8899 | value <<= 63 - s; | |
8900 | value >>= 63 + r - s; | |
8901 | } | |
8902 | else | |
8903 | { | |
8904 | /* 63:...:s:xxx:0 ==> 63:...:63-(r-1)+s:xxx:63-(r-1):...:0. | |
8905 | We want only bits s:xxx:0 starting at it 63-(r-1). | |
8906 | So we LSL bit s up to bit 63 i.e. by 63 - s | |
8907 | and then we LSL to bring bit 63 down to 63-(r-1)+s | |
8908 | i.e. by r - (s + 1). */ | |
8909 | value <<= 63 - s; | |
8910 | value >>= r - (s + 1); | |
8911 | } | |
8912 | ||
8913 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8914 | aarch64_set_reg_u64 (cpu, rd, NO_SP, value); | |
8915 | } | |
8916 | ||
8917 | /* The signed versions need to insert sign bits | |
8918 | on the left of the inserted bit field. so we do | |
8919 | much the same as the unsigned version except we | |
8920 | use an arithmetic shift right -- this just means | |
8921 | we need to operate on signed values. */ | |
8922 | ||
8923 | /* 32 bit bitfield move, left of affected sign-extended, right zeroed. */ | |
8924 | /* If r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */ | |
8925 | static void | |
8926 | sbfm32 (sim_cpu *cpu, uint32_t r, uint32_t s) | |
8927 | { | |
8928 | unsigned rd; | |
8929 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8930 | /* as per ubfm32 but use an ASR instead of an LSR. */ | |
8931 | int32_t value = aarch64_get_reg_s32 (cpu, rn, NO_SP); | |
8932 | ||
8933 | if (r <= s) | |
8934 | { | |
8935 | value <<= 31 - s; | |
8936 | value >>= 31 + r - s; | |
8937 | } | |
8938 | else | |
8939 | { | |
8940 | value <<= 31 - s; | |
8941 | value >>= r - (s + 1); | |
8942 | } | |
8943 | ||
8944 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8945 | aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value); | |
8946 | } | |
8947 | ||
8948 | /* 64 bit bitfield move, left of affected sign-extended, right zeroed. */ | |
8949 | /* If r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */ | |
8950 | static void | |
8951 | sbfm (sim_cpu *cpu, uint32_t r, uint32_t s) | |
8952 | { | |
8953 | unsigned rd; | |
8954 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8955 | /* acpu per ubfm but use an ASR instead of an LSR. */ | |
8956 | int64_t value = aarch64_get_reg_s64 (cpu, rn, NO_SP); | |
8957 | ||
8958 | if (r <= s) | |
8959 | { | |
8960 | value <<= 63 - s; | |
8961 | value >>= 63 + r - s; | |
8962 | } | |
8963 | else | |
8964 | { | |
8965 | value <<= 63 - s; | |
8966 | value >>= r - (s + 1); | |
8967 | } | |
8968 | ||
8969 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
8970 | aarch64_set_reg_s64 (cpu, rd, NO_SP, value); | |
8971 | } | |
8972 | ||
8973 | /* Finally, these versions leave non-affected bits | |
8974 | as is. so we need to generate the bits as per | |
8975 | ubfm and also generate a mask to pick the | |
8976 | bits from the original and computed values. */ | |
8977 | ||
8978 | /* 32 bit bitfield move, non-affected bits left as is. | |
8979 | If r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */ | |
8980 | static void | |
8981 | bfm32 (sim_cpu *cpu, uint32_t r, uint32_t s) | |
8982 | { | |
8983 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
8984 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
8985 | uint32_t mask = -1; | |
8986 | unsigned rd; | |
8987 | uint32_t value2; | |
8988 | ||
8989 | /* Pick either s+1-r or s+1 consecutive bits out of the original word. */ | |
8990 | if (r <= s) | |
8991 | { | |
8992 | /* 31:...:s:xxx:r:...:0 ==> 31:...:s-r:xxx:0. | |
8993 | We want only bits s:xxx:r at the bottom of the word | |
8994 | so we LSL bit s up to bit 31 i.e. by 31 - s | |
8995 | and then we LSR to bring bit 31 down to bit s - r | |
8996 | i.e. by 31 + r - s. */ | |
8997 | value <<= 31 - s; | |
8998 | value >>= 31 + r - s; | |
8999 | /* the mask must include the same bits. */ | |
9000 | mask <<= 31 - s; | |
9001 | mask >>= 31 + r - s; | |
9002 | } | |
9003 | else | |
9004 | { | |
9005 | /* 31:...:s:xxx:0 ==> 31:...:31-(r-1)+s:xxx:31-(r-1):...:0. | |
9006 | We want only bits s:xxx:0 starting at it 31-(r-1) | |
9007 | so we LSL bit s up to bit 31 i.e. by 31 - s | |
9008 | and then we LSL to bring bit 31 down to 31-(r-1)+s | |
9009 | i.e. by r - (s + 1). */ | |
9010 | value <<= 31 - s; | |
9011 | value >>= r - (s + 1); | |
9012 | /* The mask must include the same bits. */ | |
9013 | mask <<= 31 - s; | |
9014 | mask >>= r - (s + 1); | |
9015 | } | |
9016 | ||
9017 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
9018 | value2 = aarch64_get_reg_u32 (cpu, rd, NO_SP); | |
9019 | ||
9020 | value2 &= ~mask; | |
9021 | value2 |= value; | |
9022 | ||
9023 | aarch64_set_reg_u64 | |
9024 | (cpu, rd, NO_SP, (aarch64_get_reg_u32 (cpu, rd, NO_SP) & ~mask) | value); | |
9025 | } | |
9026 | ||
9027 | /* 64 bit bitfield move, non-affected bits left as is. | |
9028 | If r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */ | |
9029 | static void | |
9030 | bfm (sim_cpu *cpu, uint32_t r, uint32_t s) | |
9031 | { | |
9032 | unsigned rd; | |
9033 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9034 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
9035 | uint64_t mask = 0xffffffffffffffffULL; | |
9036 | ||
9037 | if (r <= s) | |
9038 | { | |
9039 | /* 63:...:s:xxx:r:...:0 ==> 63:...:s-r:xxx:0. | |
9040 | We want only bits s:xxx:r at the bottom of the word | |
9041 | so we LSL bit s up to bit 63 i.e. by 63 - s | |
9042 | and then we LSR to bring bit 63 down to bit s - r | |
9043 | i.e. by 63 + r - s. */ | |
9044 | value <<= 63 - s; | |
9045 | value >>= 63 + r - s; | |
9046 | /* The mask must include the same bits. */ | |
9047 | mask <<= 63 - s; | |
9048 | mask >>= 63 + r - s; | |
9049 | } | |
9050 | else | |
9051 | { | |
9052 | /* 63:...:s:xxx:0 ==> 63:...:63-(r-1)+s:xxx:63-(r-1):...:0 | |
9053 | We want only bits s:xxx:0 starting at it 63-(r-1) | |
9054 | so we LSL bit s up to bit 63 i.e. by 63 - s | |
9055 | and then we LSL to bring bit 63 down to 63-(r-1)+s | |
9056 | i.e. by r - (s + 1). */ | |
9057 | value <<= 63 - s; | |
9058 | value >>= r - (s + 1); | |
9059 | /* The mask must include the same bits. */ | |
9060 | mask <<= 63 - s; | |
9061 | mask >>= r - (s + 1); | |
9062 | } | |
9063 | ||
9064 | rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
9065 | aarch64_set_reg_u64 | |
9066 | (cpu, rd, NO_SP, (aarch64_get_reg_u64 (cpu, rd, NO_SP) & ~mask) | value); | |
9067 | } | |
9068 | ||
9069 | static void | |
9070 | dexBitfieldImmediate (sim_cpu *cpu) | |
9071 | { | |
9072 | /* assert instr[28:23] = 100110 | |
9073 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
9074 | instr[30,29] = op : 0 ==> SBFM, 1 ==> BFM, 2 ==> UBFM, 3 ==> UNALLOC | |
9075 | instr[22] = N : must be 0 for 32 bit, 1 for 64 bit ow UNALLOC | |
9076 | instr[21,16] = immr : 0xxxxx for 32 bit, xxxxxx for 64 bit | |
9077 | instr[15,10] = imms : 0xxxxx for 32 bit, xxxxxx for 64 bit | |
9078 | instr[9,5] = Rn | |
9079 | instr[4,0] = Rd */ | |
9080 | ||
9081 | /* 32 bit operations must have N = 0 or else we have an UNALLOC. */ | |
9082 | uint32_t dispatch; | |
9083 | uint32_t imms; | |
9084 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
9085 | uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22); | |
9086 | /* 32 bit operations must have immr[5] = 0 and imms[5] = 0. */ | |
9087 | /* or else we have an UNALLOC. */ | |
9088 | uint32_t immr = uimm (aarch64_get_instr (cpu), 21, 16); | |
9089 | ||
9090 | if (~size & N) | |
9091 | HALT_UNALLOC; | |
9092 | ||
9093 | if (!size && uimm (immr, 5, 5)) | |
9094 | HALT_UNALLOC; | |
9095 | ||
9096 | imms = uimm (aarch64_get_instr (cpu), 15, 10); | |
9097 | if (!size && uimm (imms, 5, 5)) | |
9098 | HALT_UNALLOC; | |
9099 | ||
9100 | /* Switch on combined size and op. */ | |
9101 | dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
9102 | switch (dispatch) | |
9103 | { | |
9104 | case 0: sbfm32 (cpu, immr, imms); return; | |
9105 | case 1: bfm32 (cpu, immr, imms); return; | |
9106 | case 2: ubfm32 (cpu, immr, imms); return; | |
9107 | case 4: sbfm (cpu, immr, imms); return; | |
9108 | case 5: bfm (cpu, immr, imms); return; | |
9109 | case 6: ubfm (cpu, immr, imms); return; | |
9110 | default: HALT_UNALLOC; | |
9111 | } | |
9112 | } | |
9113 | ||
9114 | static void | |
9115 | do_EXTR_32 (sim_cpu *cpu) | |
9116 | { | |
9117 | /* instr[31:21] = 00010011100 | |
9118 | instr[20,16] = Rm | |
9119 | instr[15,10] = imms : 0xxxxx for 32 bit | |
9120 | instr[9,5] = Rn | |
9121 | instr[4,0] = Rd */ | |
9122 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9123 | unsigned imms = uimm (aarch64_get_instr (cpu), 15, 10) & 31; | |
9124 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9125 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
9126 | uint64_t val1; | |
9127 | uint64_t val2; | |
9128 | ||
9129 | val1 = aarch64_get_reg_u32 (cpu, rm, NO_SP); | |
9130 | val1 >>= imms; | |
9131 | val2 = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
9132 | val2 <<= (32 - imms); | |
9133 | ||
9134 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val1 | val2); | |
9135 | } | |
9136 | ||
9137 | static void | |
9138 | do_EXTR_64 (sim_cpu *cpu) | |
9139 | { | |
9140 | /* instr[31:21] = 10010011100 | |
9141 | instr[20,16] = Rm | |
9142 | instr[15,10] = imms | |
9143 | instr[9,5] = Rn | |
9144 | instr[4,0] = Rd */ | |
9145 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9146 | unsigned imms = uimm (aarch64_get_instr (cpu), 15, 10) & 63; | |
9147 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9148 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
9149 | uint64_t val; | |
9150 | ||
9151 | val = aarch64_get_reg_u64 (cpu, rm, NO_SP); | |
9152 | val >>= imms; | |
9153 | val |= (aarch64_get_reg_u64 (cpu, rn, NO_SP) << (64 - imms)); | |
9154 | ||
9155 | aarch64_set_reg_u64 (cpu, rd, NO_SP, val); | |
9156 | } | |
9157 | ||
9158 | static void | |
9159 | dexExtractImmediate (sim_cpu *cpu) | |
9160 | { | |
9161 | /* assert instr[28:23] = 100111 | |
9162 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
9163 | instr[30,29] = op21 : 0 ==> EXTR, 1,2,3 ==> UNALLOC | |
9164 | instr[22] = N : must be 0 for 32 bit, 1 for 64 bit or UNALLOC | |
9165 | instr[21] = op0 : must be 0 or UNALLOC | |
9166 | instr[20,16] = Rm | |
9167 | instr[15,10] = imms : 0xxxxx for 32 bit, xxxxxx for 64 bit | |
9168 | instr[9,5] = Rn | |
9169 | instr[4,0] = Rd */ | |
9170 | ||
9171 | /* 32 bit operations must have N = 0 or else we have an UNALLOC. */ | |
9172 | /* 64 bit operations must have N = 1 or else we have an UNALLOC. */ | |
9173 | uint32_t dispatch; | |
9174 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
9175 | uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22); | |
9176 | /* 32 bit operations must have imms[5] = 0 | |
9177 | or else we have an UNALLOC. */ | |
9178 | uint32_t imms = uimm (aarch64_get_instr (cpu), 15, 10); | |
9179 | ||
9180 | if (size ^ N) | |
9181 | HALT_UNALLOC; | |
9182 | ||
9183 | if (!size && uimm (imms, 5, 5)) | |
9184 | HALT_UNALLOC; | |
9185 | ||
9186 | /* Switch on combined size and op. */ | |
9187 | dispatch = uimm (aarch64_get_instr (cpu), 31, 29); | |
9188 | ||
9189 | if (dispatch == 0) | |
9190 | do_EXTR_32 (cpu); | |
9191 | ||
9192 | else if (dispatch == 4) | |
9193 | do_EXTR_64 (cpu); | |
9194 | ||
9195 | else if (dispatch == 1) | |
9196 | HALT_NYI; | |
9197 | else | |
9198 | HALT_UNALLOC; | |
9199 | } | |
9200 | ||
9201 | static void | |
9202 | dexDPImm (sim_cpu *cpu) | |
9203 | { | |
9204 | /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu)); | |
9205 | assert group == GROUP_DPIMM_1000 || grpoup == GROUP_DPIMM_1001 | |
9206 | bits [25,23] of a DPImm are the secondary dispatch vector. */ | |
9207 | uint32_t group2 = dispatchDPImm (aarch64_get_instr (cpu)); | |
9208 | ||
9209 | switch (group2) | |
9210 | { | |
9211 | case DPIMM_PCADR_000: | |
9212 | case DPIMM_PCADR_001: | |
9213 | dexPCRelAddressing (cpu); | |
9214 | return; | |
9215 | ||
9216 | case DPIMM_ADDSUB_010: | |
9217 | case DPIMM_ADDSUB_011: | |
9218 | dexAddSubtractImmediate (cpu); | |
9219 | return; | |
9220 | ||
9221 | case DPIMM_LOG_100: | |
9222 | dexLogicalImmediate (cpu); | |
9223 | return; | |
9224 | ||
9225 | case DPIMM_MOV_101: | |
9226 | dexMoveWideImmediate (cpu); | |
9227 | return; | |
9228 | ||
9229 | case DPIMM_BITF_110: | |
9230 | dexBitfieldImmediate (cpu); | |
9231 | return; | |
9232 | ||
9233 | case DPIMM_EXTR_111: | |
9234 | dexExtractImmediate (cpu); | |
9235 | return; | |
9236 | ||
9237 | default: | |
9238 | /* Should never reach here. */ | |
9239 | HALT_NYI; | |
9240 | } | |
9241 | } | |
9242 | ||
9243 | static void | |
9244 | dexLoadUnscaledImmediate (sim_cpu *cpu) | |
9245 | { | |
9246 | /* instr[29,24] == 111_00 | |
9247 | instr[21] == 0 | |
9248 | instr[11,10] == 00 | |
9249 | instr[31,30] = size | |
9250 | instr[26] = V | |
9251 | instr[23,22] = opc | |
9252 | instr[20,12] = simm9 | |
9253 | instr[9,5] = rn may be SP. */ | |
9254 | /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */ | |
9255 | uint32_t V = uimm (aarch64_get_instr (cpu), 26, 26); | |
9256 | uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2) | |
9257 | | uimm (aarch64_get_instr (cpu), 23, 22)); | |
9258 | int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12); | |
9259 | ||
9260 | if (!V) | |
9261 | { | |
9262 | /* GReg operations. */ | |
9263 | switch (dispatch) | |
9264 | { | |
9265 | case 0: sturb (cpu, imm); return; | |
9266 | case 1: ldurb32 (cpu, imm); return; | |
9267 | case 2: ldursb64 (cpu, imm); return; | |
9268 | case 3: ldursb32 (cpu, imm); return; | |
9269 | case 4: sturh (cpu, imm); return; | |
9270 | case 5: ldurh32 (cpu, imm); return; | |
9271 | case 6: ldursh64 (cpu, imm); return; | |
9272 | case 7: ldursh32 (cpu, imm); return; | |
9273 | case 8: stur32 (cpu, imm); return; | |
9274 | case 9: ldur32 (cpu, imm); return; | |
9275 | case 10: ldursw (cpu, imm); return; | |
9276 | case 12: stur64 (cpu, imm); return; | |
9277 | case 13: ldur64 (cpu, imm); return; | |
9278 | ||
9279 | case 14: | |
9280 | /* PRFUM NYI. */ | |
9281 | HALT_NYI; | |
9282 | ||
9283 | default: | |
9284 | case 11: | |
9285 | case 15: | |
9286 | HALT_UNALLOC; | |
9287 | } | |
9288 | } | |
9289 | ||
9290 | /* FReg operations. */ | |
9291 | switch (dispatch) | |
9292 | { | |
9293 | case 2: fsturq (cpu, imm); return; | |
9294 | case 3: fldurq (cpu, imm); return; | |
9295 | case 8: fsturs (cpu, imm); return; | |
9296 | case 9: fldurs (cpu, imm); return; | |
9297 | case 12: fsturd (cpu, imm); return; | |
9298 | case 13: fldurd (cpu, imm); return; | |
9299 | ||
9300 | case 0: /* STUR 8 bit FP. */ | |
9301 | case 1: /* LDUR 8 bit FP. */ | |
9302 | case 4: /* STUR 16 bit FP. */ | |
9303 | case 5: /* LDUR 8 bit FP. */ | |
9304 | HALT_NYI; | |
9305 | ||
9306 | default: | |
9307 | case 6: | |
9308 | case 7: | |
9309 | case 10: | |
9310 | case 11: | |
9311 | case 14: | |
9312 | case 15: | |
9313 | HALT_UNALLOC; | |
9314 | } | |
9315 | } | |
9316 | ||
9317 | /* N.B. A preliminary note regarding all the ldrs<x>32 | |
9318 | instructions | |
9319 | ||
9320 | The signed value loaded by these instructions is cast to unsigned | |
9321 | before being assigned to aarch64_get_reg_u64 (cpu, N) i.e. to the | |
9322 | 64 bit element of the GReg union. this performs a 32 bit sign extension | |
9323 | (as required) but avoids 64 bit sign extension, thus ensuring that the | |
9324 | top half of the register word is zero. this is what the spec demands | |
9325 | when a 32 bit load occurs. */ | |
9326 | ||
9327 | /* 32 bit load sign-extended byte scaled unsigned 12 bit. */ | |
9328 | static void | |
9329 | ldrsb32_abs (sim_cpu *cpu, uint32_t offset) | |
9330 | { | |
9331 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9332 | unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
9333 | ||
9334 | /* The target register may not be SP but the source may be | |
9335 | there is no scaling required for a byte load. */ | |
9336 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset; | |
9337 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
9338 | (int64_t) aarch64_get_mem_s8 (cpu, address)); | |
9339 | } | |
9340 | ||
9341 | /* 32 bit load sign-extended byte scaled or unscaled zero- | |
9342 | or sign-extended 32-bit register offset. */ | |
9343 | static void | |
9344 | ldrsb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9345 | { | |
9346 | unsigned int rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9347 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9348 | unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
9349 | ||
9350 | /* rn may reference SP, rm and rt must reference ZR. */ | |
9351 | ||
9352 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9353 | int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9354 | extension); | |
9355 | ||
9356 | /* There is no scaling required for a byte load. */ | |
9357 | aarch64_set_reg_u64 | |
9358 | (cpu, rt, NO_SP, (int64_t) aarch64_get_mem_s8 (cpu, address | |
9359 | + displacement)); | |
9360 | } | |
9361 | ||
9362 | /* 32 bit load sign-extended byte unscaled signed 9 bit with | |
9363 | pre- or post-writeback. */ | |
9364 | static void | |
9365 | ldrsb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9366 | { | |
9367 | uint64_t address; | |
9368 | unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9369 | unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
9370 | ||
9371 | if (rn == rt && wb != NoWriteBack) | |
9372 | HALT_UNALLOC; | |
9373 | ||
9374 | address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9375 | ||
9376 | if (wb == Pre) | |
9377 | address += offset; | |
9378 | ||
9379 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
9380 | (int64_t) aarch64_get_mem_s8 (cpu, address)); | |
9381 | ||
9382 | if (wb == Post) | |
9383 | address += offset; | |
9384 | ||
9385 | if (wb != NoWriteBack) | |
9386 | aarch64_set_reg_u64 (cpu, rn, NO_SP, address); | |
9387 | } | |
9388 | ||
9389 | /* 8 bit store scaled. */ | |
9390 | static void | |
9391 | fstrb_abs (sim_cpu *cpu, uint32_t offset) | |
9392 | { | |
9393 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9394 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9395 | ||
9396 | aarch64_set_mem_u8 (cpu, | |
9397 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset, | |
9398 | aarch64_get_vec_u8 (cpu, st, 0)); | |
9399 | } | |
9400 | ||
9401 | /* 8 bit store scaled or unscaled zero- or | |
9402 | sign-extended 8-bit register offset. */ | |
9403 | static void | |
9404 | fstrb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9405 | { | |
9406 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9407 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9408 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9409 | ||
9410 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9411 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9412 | extension); | |
9413 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
9414 | ||
9415 | aarch64_set_mem_u8 | |
9416 | (cpu, address + displacement, aarch64_get_vec_u8 (cpu, st, 0)); | |
9417 | } | |
9418 | ||
9419 | /* 16 bit store scaled. */ | |
9420 | static void | |
9421 | fstrh_abs (sim_cpu *cpu, uint32_t offset) | |
9422 | { | |
9423 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9424 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9425 | ||
9426 | aarch64_set_mem_u16 | |
9427 | (cpu, | |
9428 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 16), | |
9429 | aarch64_get_vec_u16 (cpu, st, 0)); | |
9430 | } | |
9431 | ||
9432 | /* 16 bit store scaled or unscaled zero- | |
9433 | or sign-extended 16-bit register offset. */ | |
9434 | static void | |
9435 | fstrh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9436 | { | |
9437 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9438 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9439 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9440 | ||
9441 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9442 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9443 | extension); | |
9444 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
9445 | ||
9446 | aarch64_set_mem_u16 | |
9447 | (cpu, address + displacement, aarch64_get_vec_u16 (cpu, st, 0)); | |
9448 | } | |
9449 | ||
9450 | /* 32 bit store scaled unsigned 12 bit. */ | |
9451 | static void | |
9452 | fstrs_abs (sim_cpu *cpu, uint32_t offset) | |
9453 | { | |
9454 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9455 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9456 | ||
9457 | aarch64_set_mem_float | |
9458 | (cpu, | |
9459 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 32), | |
9460 | aarch64_get_FP_float (cpu, st)); | |
9461 | } | |
9462 | ||
9463 | /* 32 bit store unscaled signed 9 bit with pre- or post-writeback. */ | |
9464 | static void | |
9465 | fstrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9466 | { | |
9467 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9468 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9469 | ||
9470 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9471 | ||
9472 | if (wb != Post) | |
9473 | address += offset; | |
9474 | ||
9475 | aarch64_set_mem_float (cpu, address, aarch64_get_FP_float (cpu, st)); | |
9476 | ||
9477 | if (wb == Post) | |
9478 | address += offset; | |
9479 | ||
9480 | if (wb != NoWriteBack) | |
9481 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
9482 | } | |
9483 | ||
9484 | /* 32 bit store scaled or unscaled zero- | |
9485 | or sign-extended 32-bit register offset. */ | |
9486 | static void | |
9487 | fstrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9488 | { | |
9489 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9490 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9491 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9492 | ||
9493 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9494 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9495 | extension); | |
9496 | uint64_t displacement = OPT_SCALE (extended, 32, scaling); | |
9497 | ||
9498 | aarch64_set_mem_float | |
9499 | (cpu, address + displacement, aarch64_get_FP_float (cpu, st)); | |
9500 | } | |
9501 | ||
9502 | /* 64 bit store scaled unsigned 12 bit. */ | |
9503 | static void | |
9504 | fstrd_abs (sim_cpu *cpu, uint32_t offset) | |
9505 | { | |
9506 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9507 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9508 | ||
9509 | aarch64_set_mem_double | |
9510 | (cpu, | |
9511 | aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64), | |
9512 | aarch64_get_FP_double (cpu, st)); | |
9513 | } | |
9514 | ||
9515 | /* 64 bit store unscaled signed 9 bit with pre- or post-writeback. */ | |
9516 | static void | |
9517 | fstrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9518 | { | |
9519 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9520 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9521 | ||
9522 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9523 | ||
9524 | if (wb != Post) | |
9525 | address += offset; | |
9526 | ||
9527 | aarch64_set_mem_double (cpu, address, aarch64_get_FP_double (cpu, st)); | |
9528 | ||
9529 | if (wb == Post) | |
9530 | address += offset; | |
9531 | ||
9532 | if (wb != NoWriteBack) | |
9533 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
9534 | } | |
9535 | ||
9536 | /* 64 bit store scaled or unscaled zero- | |
9537 | or sign-extended 32-bit register offset. */ | |
9538 | static void | |
9539 | fstrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9540 | { | |
9541 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9542 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9543 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9544 | ||
9545 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9546 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9547 | extension); | |
9548 | uint64_t displacement = OPT_SCALE (extended, 64, scaling); | |
9549 | ||
9550 | aarch64_set_mem_double | |
9551 | (cpu, address + displacement, aarch64_get_FP_double (cpu, st)); | |
9552 | } | |
9553 | ||
9554 | /* 128 bit store scaled unsigned 12 bit. */ | |
9555 | static void | |
9556 | fstrq_abs (sim_cpu *cpu, uint32_t offset) | |
9557 | { | |
9558 | FRegister a; | |
9559 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9560 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9561 | uint64_t addr; | |
9562 | ||
9563 | aarch64_get_FP_long_double (cpu, st, & a); | |
9564 | ||
9565 | addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128); | |
9566 | aarch64_set_mem_long_double (cpu, addr, a); | |
9567 | } | |
9568 | ||
9569 | /* 128 bit store unscaled signed 9 bit with pre- or post-writeback. */ | |
9570 | static void | |
9571 | fstrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9572 | { | |
9573 | FRegister a; | |
9574 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9575 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9576 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9577 | ||
9578 | if (wb != Post) | |
9579 | address += offset; | |
9580 | ||
9581 | aarch64_get_FP_long_double (cpu, st, & a); | |
9582 | aarch64_set_mem_long_double (cpu, address, a); | |
9583 | ||
9584 | if (wb == Post) | |
9585 | address += offset; | |
9586 | ||
9587 | if (wb != NoWriteBack) | |
9588 | aarch64_set_reg_u64 (cpu, rn, SP_OK, address); | |
9589 | } | |
9590 | ||
9591 | /* 128 bit store scaled or unscaled zero- | |
9592 | or sign-extended 32-bit register offset. */ | |
9593 | static void | |
9594 | fstrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension) | |
9595 | { | |
9596 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
9597 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
9598 | unsigned st = uimm (aarch64_get_instr (cpu), 4, 0); | |
9599 | ||
9600 | uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK); | |
9601 | int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), | |
9602 | extension); | |
9603 | uint64_t displacement = OPT_SCALE (extended, 128, scaling); | |
9604 | ||
9605 | FRegister a; | |
9606 | ||
9607 | aarch64_get_FP_long_double (cpu, st, & a); | |
9608 | aarch64_set_mem_long_double (cpu, address + displacement, a); | |
9609 | } | |
9610 | ||
9611 | static void | |
9612 | dexLoadImmediatePrePost (sim_cpu *cpu) | |
9613 | { | |
9614 | /* instr[29,24] == 111_00 | |
9615 | instr[21] == 0 | |
9616 | instr[11,10] == 00 | |
9617 | instr[31,30] = size | |
9618 | instr[26] = V | |
9619 | instr[23,22] = opc | |
9620 | instr[20,12] = simm9 | |
9621 | instr[11] = wb : 0 ==> Post, 1 ==> Pre | |
9622 | instr[9,5] = rn may be SP. */ | |
9623 | /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */ | |
9624 | uint32_t V = uimm (aarch64_get_instr (cpu), 26, 26); | |
9625 | uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2) | |
9626 | | uimm (aarch64_get_instr (cpu), 23, 22)); | |
9627 | int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12); | |
9628 | WriteBack wb = writeback (aarch64_get_instr (cpu), 11); | |
9629 | ||
9630 | if (!V) | |
9631 | { | |
9632 | /* GReg operations. */ | |
9633 | switch (dispatch) | |
9634 | { | |
9635 | case 0: strb_wb (cpu, imm, wb); return; | |
9636 | case 1: ldrb32_wb (cpu, imm, wb); return; | |
9637 | case 2: ldrsb_wb (cpu, imm, wb); return; | |
9638 | case 3: ldrsb32_wb (cpu, imm, wb); return; | |
9639 | case 4: strh_wb (cpu, imm, wb); return; | |
9640 | case 5: ldrh32_wb (cpu, imm, wb); return; | |
9641 | case 6: ldrsh64_wb (cpu, imm, wb); return; | |
9642 | case 7: ldrsh32_wb (cpu, imm, wb); return; | |
9643 | case 8: str32_wb (cpu, imm, wb); return; | |
9644 | case 9: ldr32_wb (cpu, imm, wb); return; | |
9645 | case 10: ldrsw_wb (cpu, imm, wb); return; | |
9646 | case 12: str_wb (cpu, imm, wb); return; | |
9647 | case 13: ldr_wb (cpu, imm, wb); return; | |
9648 | ||
9649 | default: | |
9650 | case 11: | |
9651 | case 14: | |
9652 | case 15: | |
9653 | HALT_UNALLOC; | |
9654 | } | |
9655 | } | |
9656 | ||
9657 | /* FReg operations. */ | |
9658 | switch (dispatch) | |
9659 | { | |
9660 | case 2: fstrq_wb (cpu, imm, wb); return; | |
9661 | case 3: fldrq_wb (cpu, imm, wb); return; | |
9662 | case 8: fstrs_wb (cpu, imm, wb); return; | |
9663 | case 9: fldrs_wb (cpu, imm, wb); return; | |
9664 | case 12: fstrd_wb (cpu, imm, wb); return; | |
9665 | case 13: fldrd_wb (cpu, imm, wb); return; | |
9666 | ||
9667 | case 0: /* STUR 8 bit FP. */ | |
9668 | case 1: /* LDUR 8 bit FP. */ | |
9669 | case 4: /* STUR 16 bit FP. */ | |
9670 | case 5: /* LDUR 8 bit FP. */ | |
9671 | HALT_NYI; | |
9672 | ||
9673 | default: | |
9674 | case 6: | |
9675 | case 7: | |
9676 | case 10: | |
9677 | case 11: | |
9678 | case 14: | |
9679 | case 15: | |
9680 | HALT_UNALLOC; | |
9681 | } | |
9682 | } | |
9683 | ||
9684 | static void | |
9685 | dexLoadRegisterOffset (sim_cpu *cpu) | |
9686 | { | |
9687 | /* instr[31,30] = size | |
9688 | instr[29,27] = 111 | |
9689 | instr[26] = V | |
9690 | instr[25,24] = 00 | |
9691 | instr[23,22] = opc | |
9692 | instr[21] = 1 | |
9693 | instr[20,16] = rm | |
9694 | instr[15,13] = option : 010 ==> UXTW, 011 ==> UXTX/LSL, | |
9695 | 110 ==> SXTW, 111 ==> SXTX, | |
9696 | ow ==> RESERVED | |
9697 | instr[12] = scaled | |
9698 | instr[11,10] = 10 | |
9699 | instr[9,5] = rn | |
9700 | instr[4,0] = rt. */ | |
9701 | ||
9702 | uint32_t V = uimm (aarch64_get_instr (cpu), 26,26); | |
9703 | uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2) | |
9704 | | uimm (aarch64_get_instr (cpu), 23, 22)); | |
9705 | Scaling scale = scaling (aarch64_get_instr (cpu), 12); | |
9706 | Extension extensionType = extension (aarch64_get_instr (cpu), 13); | |
9707 | ||
9708 | /* Check for illegal extension types. */ | |
9709 | if (uimm (extensionType, 1, 1) == 0) | |
9710 | HALT_UNALLOC; | |
9711 | ||
9712 | if (extensionType == UXTX || extensionType == SXTX) | |
9713 | extensionType = NoExtension; | |
9714 | ||
9715 | if (!V) | |
9716 | { | |
9717 | /* GReg operations. */ | |
9718 | switch (dispatch) | |
9719 | { | |
9720 | case 0: strb_scale_ext (cpu, scale, extensionType); return; | |
9721 | case 1: ldrb32_scale_ext (cpu, scale, extensionType); return; | |
9722 | case 2: ldrsb_scale_ext (cpu, scale, extensionType); return; | |
9723 | case 3: ldrsb32_scale_ext (cpu, scale, extensionType); return; | |
9724 | case 4: strh_scale_ext (cpu, scale, extensionType); return; | |
9725 | case 5: ldrh32_scale_ext (cpu, scale, extensionType); return; | |
9726 | case 6: ldrsh_scale_ext (cpu, scale, extensionType); return; | |
9727 | case 7: ldrsh32_scale_ext (cpu, scale, extensionType); return; | |
9728 | case 8: str32_scale_ext (cpu, scale, extensionType); return; | |
9729 | case 9: ldr32_scale_ext (cpu, scale, extensionType); return; | |
9730 | case 10: ldrsw_scale_ext (cpu, scale, extensionType); return; | |
9731 | case 12: str_scale_ext (cpu, scale, extensionType); return; | |
9732 | case 13: ldr_scale_ext (cpu, scale, extensionType); return; | |
9733 | case 14: prfm_scale_ext (cpu, scale, extensionType); return; | |
9734 | ||
9735 | default: | |
9736 | case 11: | |
9737 | case 15: | |
9738 | HALT_UNALLOC; | |
9739 | } | |
9740 | } | |
9741 | ||
9742 | /* FReg operations. */ | |
9743 | switch (dispatch) | |
9744 | { | |
9745 | case 1: /* LDUR 8 bit FP. */ | |
9746 | HALT_NYI; | |
9747 | case 3: fldrq_scale_ext (cpu, scale, extensionType); return; | |
9748 | case 5: /* LDUR 8 bit FP. */ | |
9749 | HALT_NYI; | |
9750 | case 9: fldrs_scale_ext (cpu, scale, extensionType); return; | |
9751 | case 13: fldrd_scale_ext (cpu, scale, extensionType); return; | |
9752 | ||
9753 | case 0: fstrb_scale_ext (cpu, scale, extensionType); return; | |
9754 | case 2: fstrq_scale_ext (cpu, scale, extensionType); return; | |
9755 | case 4: fstrh_scale_ext (cpu, scale, extensionType); return; | |
9756 | case 8: fstrs_scale_ext (cpu, scale, extensionType); return; | |
9757 | case 12: fstrd_scale_ext (cpu, scale, extensionType); return; | |
9758 | ||
9759 | default: | |
9760 | case 6: | |
9761 | case 7: | |
9762 | case 10: | |
9763 | case 11: | |
9764 | case 14: | |
9765 | case 15: | |
9766 | HALT_UNALLOC; | |
9767 | } | |
9768 | } | |
9769 | ||
9770 | static void | |
9771 | dexLoadUnsignedImmediate (sim_cpu *cpu) | |
9772 | { | |
9773 | /* assert instr[29,24] == 111_01 | |
9774 | instr[31,30] = size | |
9775 | instr[26] = V | |
9776 | instr[23,22] = opc | |
9777 | instr[21,10] = uimm12 : unsigned immediate offset | |
9778 | instr[9,5] = rn may be SP. */ | |
9779 | /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */ | |
9780 | uint32_t V = uimm (aarch64_get_instr (cpu), 26,26); | |
9781 | uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2) | |
9782 | | uimm (aarch64_get_instr (cpu), 23, 22)); | |
9783 | uint32_t imm = uimm (aarch64_get_instr (cpu), 21, 10); | |
9784 | ||
9785 | if (!V) | |
9786 | { | |
9787 | /* GReg operations. */ | |
9788 | switch (dispatch) | |
9789 | { | |
9790 | case 0: strb_abs (cpu, imm); return; | |
9791 | case 1: ldrb32_abs (cpu, imm); return; | |
9792 | case 2: ldrsb_abs (cpu, imm); return; | |
9793 | case 3: ldrsb32_abs (cpu, imm); return; | |
9794 | case 4: strh_abs (cpu, imm); return; | |
9795 | case 5: ldrh32_abs (cpu, imm); return; | |
9796 | case 6: ldrsh_abs (cpu, imm); return; | |
9797 | case 7: ldrsh32_abs (cpu, imm); return; | |
9798 | case 8: str32_abs (cpu, imm); return; | |
9799 | case 9: ldr32_abs (cpu, imm); return; | |
9800 | case 10: ldrsw_abs (cpu, imm); return; | |
9801 | case 12: str_abs (cpu, imm); return; | |
9802 | case 13: ldr_abs (cpu, imm); return; | |
9803 | case 14: prfm_abs (cpu, imm); return; | |
9804 | ||
9805 | default: | |
9806 | case 11: | |
9807 | case 15: | |
9808 | HALT_UNALLOC; | |
9809 | } | |
9810 | } | |
9811 | ||
9812 | /* FReg operations. */ | |
9813 | switch (dispatch) | |
9814 | { | |
9815 | case 3: fldrq_abs (cpu, imm); return; | |
9816 | case 9: fldrs_abs (cpu, imm); return; | |
9817 | case 13: fldrd_abs (cpu, imm); return; | |
9818 | ||
9819 | case 0: fstrb_abs (cpu, imm); return; | |
9820 | case 2: fstrq_abs (cpu, imm); return; | |
9821 | case 4: fstrh_abs (cpu, imm); return; | |
9822 | case 8: fstrs_abs (cpu, imm); return; | |
9823 | case 12: fstrd_abs (cpu, imm); return; | |
9824 | ||
9825 | case 1: /* LDR 8 bit FP. */ | |
9826 | case 5: /* LDR 8 bit FP. */ | |
9827 | HALT_NYI; | |
9828 | ||
9829 | default: | |
9830 | case 6: | |
9831 | case 7: | |
9832 | case 10: | |
9833 | case 11: | |
9834 | case 14: | |
9835 | case 15: | |
9836 | HALT_UNALLOC; | |
9837 | } | |
9838 | } | |
9839 | ||
9840 | static void | |
9841 | dexLoadExclusive (sim_cpu *cpu) | |
9842 | { | |
9843 | /* assert instr[29:24] = 001000; | |
9844 | instr[31,30] = size | |
9845 | instr[23] = 0 if exclusive | |
9846 | instr[22] = L : 1 if load, 0 if store | |
9847 | instr[21] = 1 if pair | |
9848 | instr[20,16] = Rs | |
9849 | instr[15] = o0 : 1 if ordered | |
9850 | instr[14,10] = Rt2 | |
9851 | instr[9,5] = Rn | |
9852 | instr[4.0] = Rt. */ | |
9853 | ||
9854 | switch (uimm (aarch64_get_instr (cpu), 22, 21)) | |
9855 | { | |
9856 | case 2: ldxr (cpu); return; | |
9857 | case 0: stxr (cpu); return; | |
9858 | default: HALT_NYI; | |
9859 | } | |
9860 | } | |
9861 | ||
9862 | static void | |
9863 | dexLoadOther (sim_cpu *cpu) | |
9864 | { | |
9865 | uint32_t dispatch; | |
9866 | ||
9867 | /* instr[29,25] = 111_0 | |
9868 | instr[24] == 0 ==> dispatch, 1 ==> ldst reg unsigned immediate | |
9869 | instr[21:11,10] is the secondary dispatch. */ | |
9870 | if (uimm (aarch64_get_instr (cpu), 24, 24)) | |
9871 | { | |
9872 | dexLoadUnsignedImmediate (cpu); | |
9873 | return; | |
9874 | } | |
9875 | ||
9876 | dispatch = ( (uimm (aarch64_get_instr (cpu), 21, 21) << 2) | |
9877 | | uimm (aarch64_get_instr (cpu), 11, 10)); | |
9878 | switch (dispatch) | |
9879 | { | |
9880 | case 0: dexLoadUnscaledImmediate (cpu); return; | |
9881 | case 1: dexLoadImmediatePrePost (cpu); return; | |
9882 | case 3: dexLoadImmediatePrePost (cpu); return; | |
9883 | case 6: dexLoadRegisterOffset (cpu); return; | |
9884 | ||
9885 | default: | |
9886 | case 2: | |
9887 | case 4: | |
9888 | case 5: | |
9889 | case 7: | |
9890 | HALT_NYI; | |
9891 | } | |
9892 | } | |
9893 | ||
9894 | static void | |
9895 | store_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9896 | { | |
9897 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
9898 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
9899 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
9900 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
9901 | ||
9902 | if ((rn == rd || rm == rd) && wb != NoWriteBack) | |
9903 | HALT_UNALLOC; /* ??? */ | |
9904 | ||
9905 | offset <<= 2; | |
9906 | ||
9907 | if (wb != Post) | |
9908 | address += offset; | |
9909 | ||
9910 | aarch64_set_mem_u32 (cpu, address, | |
9911 | aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
9912 | aarch64_set_mem_u32 (cpu, address + 4, | |
9913 | aarch64_get_reg_u32 (cpu, rn, NO_SP)); | |
9914 | ||
9915 | if (wb == Post) | |
9916 | address += offset; | |
9917 | ||
9918 | if (wb != NoWriteBack) | |
9919 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
9920 | } | |
9921 | ||
9922 | static void | |
9923 | store_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9924 | { | |
9925 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
9926 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
9927 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
9928 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
9929 | ||
9930 | if ((rn == rd || rm == rd) && wb != NoWriteBack) | |
9931 | HALT_UNALLOC; /* ??? */ | |
9932 | ||
9933 | offset <<= 3; | |
9934 | ||
9935 | if (wb != Post) | |
9936 | address += offset; | |
9937 | ||
9938 | aarch64_set_mem_u64 (cpu, address, | |
9939 | aarch64_get_reg_u64 (cpu, rm, SP_OK)); | |
9940 | aarch64_set_mem_u64 (cpu, address + 8, | |
9941 | aarch64_get_reg_u64 (cpu, rn, SP_OK)); | |
9942 | ||
9943 | if (wb == Post) | |
9944 | address += offset; | |
9945 | ||
9946 | if (wb != NoWriteBack) | |
9947 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
9948 | } | |
9949 | ||
9950 | static void | |
9951 | load_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9952 | { | |
9953 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
9954 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
9955 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
9956 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
9957 | ||
9958 | /* treat this as unalloc to make sure we don't do it. */ | |
9959 | if (rn == rm) | |
9960 | HALT_UNALLOC; | |
9961 | ||
9962 | offset <<= 2; | |
9963 | ||
9964 | if (wb != Post) | |
9965 | address += offset; | |
9966 | ||
9967 | aarch64_set_reg_u64 (cpu, rm, SP_OK, aarch64_get_mem_u32 (cpu, address)); | |
9968 | aarch64_set_reg_u64 (cpu, rn, SP_OK, aarch64_get_mem_u32 (cpu, address + 4)); | |
9969 | ||
9970 | if (wb == Post) | |
9971 | address += offset; | |
9972 | ||
9973 | if (wb != NoWriteBack) | |
9974 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
9975 | } | |
9976 | ||
9977 | static void | |
9978 | load_pair_s32 (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
9979 | { | |
9980 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
9981 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
9982 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
9983 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
9984 | ||
9985 | /* Treat this as unalloc to make sure we don't do it. */ | |
9986 | if (rn == rm) | |
9987 | HALT_UNALLOC; | |
9988 | ||
9989 | offset <<= 2; | |
9990 | ||
9991 | if (wb != Post) | |
9992 | address += offset; | |
9993 | ||
9994 | aarch64_set_reg_s64 (cpu, rm, SP_OK, aarch64_get_mem_s32 (cpu, address)); | |
9995 | aarch64_set_reg_s64 (cpu, rn, SP_OK, aarch64_get_mem_s32 (cpu, address + 4)); | |
9996 | ||
9997 | if (wb == Post) | |
9998 | address += offset; | |
9999 | ||
10000 | if (wb != NoWriteBack) | |
10001 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10002 | } | |
10003 | ||
10004 | static void | |
10005 | load_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10006 | { | |
10007 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10008 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10009 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10010 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10011 | ||
10012 | /* Treat this as unalloc to make sure we don't do it. */ | |
10013 | if (rn == rm) | |
10014 | HALT_UNALLOC; | |
10015 | ||
10016 | offset <<= 3; | |
10017 | ||
10018 | if (wb != Post) | |
10019 | address += offset; | |
10020 | ||
10021 | aarch64_set_reg_u64 (cpu, rm, SP_OK, aarch64_get_mem_u64 (cpu, address)); | |
10022 | aarch64_set_reg_u64 (cpu, rn, SP_OK, aarch64_get_mem_u64 (cpu, address + 8)); | |
10023 | ||
10024 | if (wb == Post) | |
10025 | address += offset; | |
10026 | ||
10027 | if (wb != NoWriteBack) | |
10028 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10029 | } | |
10030 | ||
10031 | static void | |
10032 | dex_load_store_pair_gr (sim_cpu *cpu) | |
10033 | { | |
10034 | /* instr[31,30] = size (10=> 64-bit, 01=> signed 32-bit, 00=> 32-bit) | |
10035 | instr[29,25] = instruction encoding: 101_0 | |
10036 | instr[26] = V : 1 if fp 0 if gp | |
10037 | instr[24,23] = addressing mode (10=> offset, 01=> post, 11=> pre) | |
10038 | instr[22] = load/store (1=> load) | |
10039 | instr[21,15] = signed, scaled, offset | |
10040 | instr[14,10] = Rn | |
10041 | instr[ 9, 5] = Rd | |
10042 | instr[ 4, 0] = Rm. */ | |
10043 | ||
10044 | uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 3) | |
10045 | | uimm (aarch64_get_instr (cpu), 24, 22)); | |
10046 | int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15); | |
10047 | ||
10048 | switch (dispatch) | |
10049 | { | |
10050 | case 2: store_pair_u32 (cpu, offset, Post); return; | |
10051 | case 3: load_pair_u32 (cpu, offset, Post); return; | |
10052 | case 4: store_pair_u32 (cpu, offset, NoWriteBack); return; | |
10053 | case 5: load_pair_u32 (cpu, offset, NoWriteBack); return; | |
10054 | case 6: store_pair_u32 (cpu, offset, Pre); return; | |
10055 | case 7: load_pair_u32 (cpu, offset, Pre); return; | |
10056 | ||
10057 | case 11: load_pair_s32 (cpu, offset, Post); return; | |
10058 | case 13: load_pair_s32 (cpu, offset, NoWriteBack); return; | |
10059 | case 15: load_pair_s32 (cpu, offset, Pre); return; | |
10060 | ||
10061 | case 18: store_pair_u64 (cpu, offset, Post); return; | |
10062 | case 19: load_pair_u64 (cpu, offset, Post); return; | |
10063 | case 20: store_pair_u64 (cpu, offset, NoWriteBack); return; | |
10064 | case 21: load_pair_u64 (cpu, offset, NoWriteBack); return; | |
10065 | case 22: store_pair_u64 (cpu, offset, Pre); return; | |
10066 | case 23: load_pair_u64 (cpu, offset, Pre); return; | |
10067 | ||
10068 | default: | |
10069 | HALT_UNALLOC; | |
10070 | } | |
10071 | } | |
10072 | ||
10073 | static void | |
10074 | store_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10075 | { | |
10076 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10077 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10078 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10079 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10080 | ||
10081 | offset <<= 2; | |
10082 | ||
10083 | if (wb != Post) | |
10084 | address += offset; | |
10085 | ||
10086 | aarch64_set_mem_float (cpu, address, aarch64_get_FP_float (cpu, rm)); | |
10087 | aarch64_set_mem_float (cpu, address + 4, aarch64_get_FP_float (cpu, rn)); | |
10088 | ||
10089 | if (wb == Post) | |
10090 | address += offset; | |
10091 | ||
10092 | if (wb != NoWriteBack) | |
10093 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10094 | } | |
10095 | ||
10096 | static void | |
10097 | store_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10098 | { | |
10099 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10100 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10101 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10102 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10103 | ||
10104 | offset <<= 3; | |
10105 | ||
10106 | if (wb != Post) | |
10107 | address += offset; | |
10108 | ||
10109 | aarch64_set_mem_double (cpu, address, aarch64_get_FP_double (cpu, rm)); | |
10110 | aarch64_set_mem_double (cpu, address + 8, aarch64_get_FP_double (cpu, rn)); | |
10111 | ||
10112 | if (wb == Post) | |
10113 | address += offset; | |
10114 | ||
10115 | if (wb != NoWriteBack) | |
10116 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10117 | } | |
10118 | ||
10119 | static void | |
10120 | store_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10121 | { | |
10122 | FRegister a; | |
10123 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10124 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10125 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10126 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10127 | ||
10128 | offset <<= 4; | |
10129 | ||
10130 | if (wb != Post) | |
10131 | address += offset; | |
10132 | ||
10133 | aarch64_get_FP_long_double (cpu, rm, & a); | |
10134 | aarch64_set_mem_long_double (cpu, address, a); | |
10135 | aarch64_get_FP_long_double (cpu, rn, & a); | |
10136 | aarch64_set_mem_long_double (cpu, address + 16, a); | |
10137 | ||
10138 | if (wb == Post) | |
10139 | address += offset; | |
10140 | ||
10141 | if (wb != NoWriteBack) | |
10142 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10143 | } | |
10144 | ||
10145 | static void | |
10146 | load_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10147 | { | |
10148 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10149 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10150 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10151 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10152 | ||
10153 | if (rm == rn) | |
10154 | HALT_UNALLOC; | |
10155 | ||
10156 | offset <<= 2; | |
10157 | ||
10158 | if (wb != Post) | |
10159 | address += offset; | |
10160 | ||
10161 | aarch64_set_FP_float (cpu, rm, aarch64_get_mem_float (cpu, address)); | |
10162 | aarch64_set_FP_float (cpu, rn, aarch64_get_mem_float (cpu, address + 4)); | |
10163 | ||
10164 | if (wb == Post) | |
10165 | address += offset; | |
10166 | ||
10167 | if (wb != NoWriteBack) | |
10168 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10169 | } | |
10170 | ||
10171 | static void | |
10172 | load_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10173 | { | |
10174 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10175 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10176 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10177 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10178 | ||
10179 | if (rm == rn) | |
10180 | HALT_UNALLOC; | |
10181 | ||
10182 | offset <<= 3; | |
10183 | ||
10184 | if (wb != Post) | |
10185 | address += offset; | |
10186 | ||
10187 | aarch64_set_FP_double (cpu, rm, aarch64_get_mem_double (cpu, address)); | |
10188 | aarch64_set_FP_double (cpu, rn, aarch64_get_mem_double (cpu, address + 8)); | |
10189 | ||
10190 | if (wb == Post) | |
10191 | address += offset; | |
10192 | ||
10193 | if (wb != NoWriteBack) | |
10194 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10195 | } | |
10196 | ||
10197 | static void | |
10198 | load_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb) | |
10199 | { | |
10200 | FRegister a; | |
10201 | unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10); | |
10202 | unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5); | |
10203 | unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0); | |
10204 | uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK); | |
10205 | ||
10206 | if (rm == rn) | |
10207 | HALT_UNALLOC; | |
10208 | ||
10209 | offset <<= 4; | |
10210 | ||
10211 | if (wb != Post) | |
10212 | address += offset; | |
10213 | ||
10214 | aarch64_get_mem_long_double (cpu, address, & a); | |
10215 | aarch64_set_FP_long_double (cpu, rm, a); | |
10216 | aarch64_get_mem_long_double (cpu, address + 16, & a); | |
10217 | aarch64_set_FP_long_double (cpu, rn, a); | |
10218 | ||
10219 | if (wb == Post) | |
10220 | address += offset; | |
10221 | ||
10222 | if (wb != NoWriteBack) | |
10223 | aarch64_set_reg_u64 (cpu, rd, SP_OK, address); | |
10224 | } | |
10225 | ||
10226 | static void | |
10227 | dex_load_store_pair_fp (sim_cpu *cpu) | |
10228 | { | |
10229 | /* instr[31,30] = size (10=> 128-bit, 01=> 64-bit, 00=> 32-bit) | |
10230 | instr[29,25] = instruction encoding | |
10231 | instr[24,23] = addressing mode (10=> offset, 01=> post, 11=> pre) | |
10232 | instr[22] = load/store (1=> load) | |
10233 | instr[21,15] = signed, scaled, offset | |
10234 | instr[14,10] = Rn | |
10235 | instr[ 9, 5] = Rd | |
10236 | instr[ 4, 0] = Rm */ | |
10237 | ||
10238 | uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 3) | |
10239 | | uimm (aarch64_get_instr (cpu), 24, 22)); | |
10240 | int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15); | |
10241 | ||
10242 | switch (dispatch) | |
10243 | { | |
10244 | case 2: store_pair_float (cpu, offset, Post); return; | |
10245 | case 3: load_pair_float (cpu, offset, Post); return; | |
10246 | case 4: store_pair_float (cpu, offset, NoWriteBack); return; | |
10247 | case 5: load_pair_float (cpu, offset, NoWriteBack); return; | |
10248 | case 6: store_pair_float (cpu, offset, Pre); return; | |
10249 | case 7: load_pair_float (cpu, offset, Pre); return; | |
10250 | ||
10251 | case 10: store_pair_double (cpu, offset, Post); return; | |
10252 | case 11: load_pair_double (cpu, offset, Post); return; | |
10253 | case 12: store_pair_double (cpu, offset, NoWriteBack); return; | |
10254 | case 13: load_pair_double (cpu, offset, NoWriteBack); return; | |
10255 | case 14: store_pair_double (cpu, offset, Pre); return; | |
10256 | case 15: load_pair_double (cpu, offset, Pre); return; | |
10257 | ||
10258 | case 18: store_pair_long_double (cpu, offset, Post); return; | |
10259 | case 19: load_pair_long_double (cpu, offset, Post); return; | |
10260 | case 20: store_pair_long_double (cpu, offset, NoWriteBack); return; | |
10261 | case 21: load_pair_long_double (cpu, offset, NoWriteBack); return; | |
10262 | case 22: store_pair_long_double (cpu, offset, Pre); return; | |
10263 | case 23: load_pair_long_double (cpu, offset, Pre); return; | |
10264 | ||
10265 | default: | |
10266 | HALT_UNALLOC; | |
10267 | } | |
10268 | } | |
10269 | ||
10270 | static inline unsigned | |
10271 | vec_reg (unsigned v, unsigned o) | |
10272 | { | |
10273 | return (v + o) & 0x3F; | |
10274 | } | |
10275 | ||
10276 | /* Load multiple N-element structures to N consecutive registers. */ | |
10277 | static void | |
10278 | vec_load (sim_cpu *cpu, uint64_t address, unsigned N) | |
10279 | { | |
10280 | int all = uimm (aarch64_get_instr (cpu), 30, 30); | |
10281 | unsigned size = uimm (aarch64_get_instr (cpu), 11, 10); | |
10282 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
10283 | unsigned i; | |
10284 | ||
10285 | switch (size) | |
10286 | { | |
10287 | case 0: /* 8-bit operations. */ | |
10288 | if (all) | |
10289 | for (i = 0; i < (16 * N); i++) | |
10290 | aarch64_set_vec_u8 (cpu, vec_reg (vd, i >> 4), i & 15, | |
10291 | aarch64_get_mem_u8 (cpu, address + i)); | |
10292 | else | |
10293 | for (i = 0; i < (8 * N); i++) | |
10294 | aarch64_set_vec_u8 (cpu, vec_reg (vd, i >> 3), i & 7, | |
10295 | aarch64_get_mem_u8 (cpu, address + i)); | |
10296 | return; | |
10297 | ||
10298 | case 1: /* 16-bit operations. */ | |
10299 | if (all) | |
10300 | for (i = 0; i < (8 * N); i++) | |
10301 | aarch64_set_vec_u16 (cpu, vec_reg (vd, i >> 3), i & 7, | |
10302 | aarch64_get_mem_u16 (cpu, address + i * 2)); | |
10303 | else | |
10304 | for (i = 0; i < (4 * N); i++) | |
10305 | aarch64_set_vec_u16 (cpu, vec_reg (vd, i >> 2), i & 3, | |
10306 | aarch64_get_mem_u16 (cpu, address + i * 2)); | |
10307 | return; | |
10308 | ||
10309 | case 2: /* 32-bit operations. */ | |
10310 | if (all) | |
10311 | for (i = 0; i < (4 * N); i++) | |
10312 | aarch64_set_vec_u32 (cpu, vec_reg (vd, i >> 2), i & 3, | |
10313 | aarch64_get_mem_u32 (cpu, address + i * 4)); | |
10314 | else | |
10315 | for (i = 0; i < (2 * N); i++) | |
10316 | aarch64_set_vec_u32 (cpu, vec_reg (vd, i >> 1), i & 1, | |
10317 | aarch64_get_mem_u32 (cpu, address + i * 4)); | |
10318 | return; | |
10319 | ||
10320 | case 3: /* 64-bit operations. */ | |
10321 | if (all) | |
10322 | for (i = 0; i < (2 * N); i++) | |
10323 | aarch64_set_vec_u64 (cpu, vec_reg (vd, i >> 1), i & 1, | |
10324 | aarch64_get_mem_u64 (cpu, address + i * 8)); | |
10325 | else | |
10326 | for (i = 0; i < N; i++) | |
10327 | aarch64_set_vec_u64 (cpu, vec_reg (vd, i), 0, | |
10328 | aarch64_get_mem_u64 (cpu, address + i * 8)); | |
10329 | return; | |
10330 | ||
10331 | default: | |
10332 | HALT_UNREACHABLE; | |
10333 | } | |
10334 | } | |
10335 | ||
10336 | /* LD4: load multiple 4-element to four consecutive registers. */ | |
10337 | static void | |
10338 | LD4 (sim_cpu *cpu, uint64_t address) | |
10339 | { | |
10340 | vec_load (cpu, address, 4); | |
10341 | } | |
10342 | ||
10343 | /* LD3: load multiple 3-element structures to three consecutive registers. */ | |
10344 | static void | |
10345 | LD3 (sim_cpu *cpu, uint64_t address) | |
10346 | { | |
10347 | vec_load (cpu, address, 3); | |
10348 | } | |
10349 | ||
10350 | /* LD2: load multiple 2-element structures to two consecutive registers. */ | |
10351 | static void | |
10352 | LD2 (sim_cpu *cpu, uint64_t address) | |
10353 | { | |
10354 | vec_load (cpu, address, 2); | |
10355 | } | |
10356 | ||
10357 | /* Load multiple 1-element structures into one register. */ | |
10358 | static void | |
10359 | LD1_1 (sim_cpu *cpu, uint64_t address) | |
10360 | { | |
10361 | int all = uimm (aarch64_get_instr (cpu), 30, 30); | |
10362 | unsigned size = uimm (aarch64_get_instr (cpu), 11, 10); | |
10363 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
10364 | unsigned i; | |
10365 | ||
10366 | switch (size) | |
10367 | { | |
10368 | case 0: | |
10369 | /* LD1 {Vd.16b}, addr, #16 */ | |
10370 | /* LD1 {Vd.8b}, addr, #8 */ | |
10371 | for (i = 0; i < (all ? 16 : 8); i++) | |
10372 | aarch64_set_vec_u8 (cpu, vd, i, | |
10373 | aarch64_get_mem_u8 (cpu, address + i)); | |
10374 | return; | |
10375 | ||
10376 | case 1: | |
10377 | /* LD1 {Vd.8h}, addr, #16 */ | |
10378 | /* LD1 {Vd.4h}, addr, #8 */ | |
10379 | for (i = 0; i < (all ? 8 : 4); i++) | |
10380 | aarch64_set_vec_u16 (cpu, vd, i, | |
10381 | aarch64_get_mem_u16 (cpu, address + i * 2)); | |
10382 | return; | |
10383 | ||
10384 | case 2: | |
10385 | /* LD1 {Vd.4s}, addr, #16 */ | |
10386 | /* LD1 {Vd.2s}, addr, #8 */ | |
10387 | for (i = 0; i < (all ? 4 : 2); i++) | |
10388 | aarch64_set_vec_u32 (cpu, vd, i, | |
10389 | aarch64_get_mem_u32 (cpu, address + i * 4)); | |
10390 | return; | |
10391 | ||
10392 | case 3: | |
10393 | /* LD1 {Vd.2d}, addr, #16 */ | |
10394 | /* LD1 {Vd.1d}, addr, #8 */ | |
10395 | for (i = 0; i < (all ? 2 : 1); i++) | |
10396 | aarch64_set_vec_u64 (cpu, vd, i, | |
10397 | aarch64_get_mem_u64 (cpu, address + i * 8)); | |
10398 | return; | |
10399 | ||
10400 | default: | |
10401 | HALT_UNREACHABLE; | |
10402 | } | |
10403 | } | |
10404 | ||
10405 | /* Load multiple 1-element structures into two registers. */ | |
10406 | static void | |
10407 | LD1_2 (sim_cpu *cpu, uint64_t address) | |
10408 | { | |
10409 | /* FIXME: This algorithm is *exactly* the same as the LD2 version. | |
10410 | So why have two different instructions ? There must be something | |
10411 | wrong somewhere. */ | |
10412 | vec_load (cpu, address, 2); | |
10413 | } | |
10414 | ||
10415 | /* Load multiple 1-element structures into three registers. */ | |
10416 | static void | |
10417 | LD1_3 (sim_cpu *cpu, uint64_t address) | |
10418 | { | |
10419 | /* FIXME: This algorithm is *exactly* the same as the LD3 version. | |
10420 | So why have two different instructions ? There must be something | |
10421 | wrong somewhere. */ | |
10422 | vec_load (cpu, address, 3); | |
10423 | } | |
10424 | ||
10425 | /* Load multiple 1-element structures into four registers. */ | |
10426 | static void | |
10427 | LD1_4 (sim_cpu *cpu, uint64_t address) | |
10428 | { | |
10429 | /* FIXME: This algorithm is *exactly* the same as the LD4 version. | |
10430 | So why have two different instructions ? There must be something | |
10431 | wrong somewhere. */ | |
10432 | vec_load (cpu, address, 4); | |
10433 | } | |
10434 | ||
10435 | /* Store multiple N-element structures to N consecutive registers. */ | |
10436 | static void | |
10437 | vec_store (sim_cpu *cpu, uint64_t address, unsigned N) | |
10438 | { | |
10439 | int all = uimm (aarch64_get_instr (cpu), 30, 30); | |
10440 | unsigned size = uimm (aarch64_get_instr (cpu), 11, 10); | |
10441 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
10442 | unsigned i; | |
10443 | ||
10444 | switch (size) | |
10445 | { | |
10446 | case 0: /* 8-bit operations. */ | |
10447 | if (all) | |
10448 | for (i = 0; i < (16 * N); i++) | |
10449 | aarch64_set_mem_u8 | |
10450 | (cpu, address + i, | |
10451 | aarch64_get_vec_u8 (cpu, vec_reg (vd, i >> 4), i & 15)); | |
10452 | else | |
10453 | for (i = 0; i < (8 * N); i++) | |
10454 | aarch64_set_mem_u8 | |
10455 | (cpu, address + i, | |
10456 | aarch64_get_vec_u8 (cpu, vec_reg (vd, i >> 3), i & 7)); | |
10457 | return; | |
10458 | ||
10459 | case 1: /* 16-bit operations. */ | |
10460 | if (all) | |
10461 | for (i = 0; i < (8 * N); i++) | |
10462 | aarch64_set_mem_u16 | |
10463 | (cpu, address + i * 2, | |
10464 | aarch64_get_vec_u16 (cpu, vec_reg (vd, i >> 3), i & 7)); | |
10465 | else | |
10466 | for (i = 0; i < (4 * N); i++) | |
10467 | aarch64_set_mem_u16 | |
10468 | (cpu, address + i * 2, | |
10469 | aarch64_get_vec_u16 (cpu, vec_reg (vd, i >> 2), i & 3)); | |
10470 | return; | |
10471 | ||
10472 | case 2: /* 32-bit operations. */ | |
10473 | if (all) | |
10474 | for (i = 0; i < (4 * N); i++) | |
10475 | aarch64_set_mem_u32 | |
10476 | (cpu, address + i * 4, | |
10477 | aarch64_get_vec_u32 (cpu, vec_reg (vd, i >> 2), i & 3)); | |
10478 | else | |
10479 | for (i = 0; i < (2 * N); i++) | |
10480 | aarch64_set_mem_u32 | |
10481 | (cpu, address + i * 4, | |
10482 | aarch64_get_vec_u32 (cpu, vec_reg (vd, i >> 1), i & 1)); | |
10483 | return; | |
10484 | ||
10485 | case 3: /* 64-bit operations. */ | |
10486 | if (all) | |
10487 | for (i = 0; i < (2 * N); i++) | |
10488 | aarch64_set_mem_u64 | |
10489 | (cpu, address + i * 8, | |
10490 | aarch64_get_vec_u64 (cpu, vec_reg (vd, i >> 1), i & 1)); | |
10491 | else | |
10492 | for (i = 0; i < N; i++) | |
10493 | aarch64_set_mem_u64 | |
10494 | (cpu, address + i * 8, | |
10495 | aarch64_get_vec_u64 (cpu, vec_reg (vd, i), 0)); | |
10496 | return; | |
10497 | ||
10498 | default: | |
10499 | HALT_UNREACHABLE; | |
10500 | } | |
10501 | } | |
10502 | ||
10503 | /* Store multiple 4-element structure to four consecutive registers. */ | |
10504 | static void | |
10505 | ST4 (sim_cpu *cpu, uint64_t address) | |
10506 | { | |
10507 | vec_store (cpu, address, 4); | |
10508 | } | |
10509 | ||
10510 | /* Store multiple 3-element structures to three consecutive registers. */ | |
10511 | static void | |
10512 | ST3 (sim_cpu *cpu, uint64_t address) | |
10513 | { | |
10514 | vec_store (cpu, address, 3); | |
10515 | } | |
10516 | ||
10517 | /* Store multiple 2-element structures to two consecutive registers. */ | |
10518 | static void | |
10519 | ST2 (sim_cpu *cpu, uint64_t address) | |
10520 | { | |
10521 | vec_store (cpu, address, 2); | |
10522 | } | |
10523 | ||
10524 | /* Store multiple 1-element structures into one register. */ | |
10525 | static void | |
10526 | ST1_1 (sim_cpu *cpu, uint64_t address) | |
10527 | { | |
10528 | int all = uimm (aarch64_get_instr (cpu), 30, 30); | |
10529 | unsigned size = uimm (aarch64_get_instr (cpu), 11, 10); | |
10530 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
10531 | unsigned i; | |
10532 | ||
10533 | switch (size) | |
10534 | { | |
10535 | case 0: | |
10536 | for (i = 0; i < (all ? 16 : 8); i++) | |
10537 | aarch64_set_mem_u8 (cpu, address + i, | |
10538 | aarch64_get_vec_u8 (cpu, vd, i)); | |
10539 | return; | |
10540 | ||
10541 | case 1: | |
10542 | for (i = 0; i < (all ? 8 : 4); i++) | |
10543 | aarch64_set_mem_u16 (cpu, address + i * 2, | |
10544 | aarch64_get_vec_u16 (cpu, vd, i)); | |
10545 | return; | |
10546 | ||
10547 | case 2: | |
10548 | for (i = 0; i < (all ? 4 : 2); i++) | |
10549 | aarch64_set_mem_u32 (cpu, address + i * 4, | |
10550 | aarch64_get_vec_u32 (cpu, vd, i)); | |
10551 | return; | |
10552 | ||
10553 | case 3: | |
10554 | for (i = 0; i < (all ? 2 : 1); i++) | |
10555 | aarch64_set_mem_u64 (cpu, address + i * 8, | |
10556 | aarch64_get_vec_u64 (cpu, vd, i)); | |
10557 | return; | |
10558 | ||
10559 | default: | |
10560 | HALT_UNREACHABLE; | |
10561 | } | |
10562 | } | |
10563 | ||
10564 | /* Store multiple 1-element structures into two registers. */ | |
10565 | static void | |
10566 | ST1_2 (sim_cpu *cpu, uint64_t address) | |
10567 | { | |
10568 | /* FIXME: This algorithm is *exactly* the same as the ST2 version. | |
10569 | So why have two different instructions ? There must be | |
10570 | something wrong somewhere. */ | |
10571 | vec_store (cpu, address, 2); | |
10572 | } | |
10573 | ||
10574 | /* Store multiple 1-element structures into three registers. */ | |
10575 | static void | |
10576 | ST1_3 (sim_cpu *cpu, uint64_t address) | |
10577 | { | |
10578 | /* FIXME: This algorithm is *exactly* the same as the ST3 version. | |
10579 | So why have two different instructions ? There must be | |
10580 | something wrong somewhere. */ | |
10581 | vec_store (cpu, address, 3); | |
10582 | } | |
10583 | ||
10584 | /* Store multiple 1-element structures into four registers. */ | |
10585 | static void | |
10586 | ST1_4 (sim_cpu *cpu, uint64_t address) | |
10587 | { | |
10588 | /* FIXME: This algorithm is *exactly* the same as the ST4 version. | |
10589 | So why have two different instructions ? There must be | |
10590 | something wrong somewhere. */ | |
10591 | vec_store (cpu, address, 4); | |
10592 | } | |
10593 | ||
10594 | static void | |
10595 | do_vec_LDnR (sim_cpu *cpu, uint64_t address) | |
10596 | { | |
10597 | /* instr[31] = 0 | |
10598 | instr[30] = element selector 0=>half, 1=>all elements | |
10599 | instr[29,24] = 00 1101 | |
10600 | instr[23] = 0=>simple, 1=>post | |
10601 | instr[22] = 1 | |
10602 | instr[21] = width: LD1R-or-LD3R (0) / LD2R-or-LD4R (1) | |
10603 | instr[20,16] = 0 0000 (simple), Vinc (reg-post-inc, no SP), | |
10604 | 11111 (immediate post inc) | |
10605 | instr[15,14] = 11 | |
10606 | instr[13] = width: LD1R-or-LD2R (0) / LD3R-or-LD4R (1) | |
10607 | instr[12] = 0 | |
10608 | instr[11,10] = element size 00=> byte(b), 01=> half(h), | |
10609 | 10=> word(s), 11=> double(d) | |
10610 | instr[9,5] = address | |
10611 | instr[4,0] = Vd */ | |
10612 | ||
10613 | unsigned full = uimm (aarch64_get_instr (cpu), 30, 30); | |
10614 | unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0); | |
10615 | unsigned size = uimm (aarch64_get_instr (cpu), 11, 10); | |
10616 | int i; | |
10617 | ||
10618 | NYI_assert (29, 24, 0x0D); | |
10619 | NYI_assert (22, 22, 1); | |
10620 | NYI_assert (15, 14, 3); | |
10621 | NYI_assert (12, 12, 0); | |
10622 | ||
10623 | switch ((uimm (aarch64_get_instr (cpu), 13, 13) << 1) | |
10624 | | uimm (aarch64_get_instr (cpu), 21, 21)) | |
10625 | { | |
10626 | case 0: /* LD1R. */ | |
10627 | switch (size) | |
10628 | { | |
10629 | case 0: | |
10630 | { | |
10631 | uint8_t val = aarch64_get_mem_u8 (cpu, address); | |
10632 | for (i = 0; i < (full ? 16 : 8); i++) | |
10633 | aarch64_set_vec_u8 (cpu, vd, i, val); | |
10634 | break; | |
10635 | } | |
10636 | ||
10637 | case 1: | |
10638 | { | |
10639 | uint16_t val = aarch64_get_mem_u16 (cpu, address); | |
10640 | for (i = 0; i < (full ? 8 : 4); i++) | |
10641 | aarch64_set_vec_u16 (cpu, vd, i, val); | |
10642 | break; | |
10643 | } | |
10644 | ||
10645 | case 2: | |
10646 | { | |
10647 | uint32_t val = aarch64_get_mem_u32 (cpu, address); | |
10648 | for (i = 0; i < (full ? 4 : 2); i++) | |
10649 | aarch64_set_vec_u32 (cpu, vd, i, val); | |
10650 | break; | |
10651 | } | |
10652 | ||
10653 | case 3: | |
10654 | { | |
10655 | uint64_t val = aarch64_get_mem_u64 (cpu, address); | |
10656 | for (i = 0; i < (full ? 2 : 1); i++) | |
10657 | aarch64_set_vec_u64 (cpu, vd, i, val); | |
10658 | break; | |
10659 | } | |
10660 | ||
10661 | default: | |
10662 | HALT_UNALLOC; | |
10663 | } | |
10664 | break; | |
10665 | ||
10666 | case 1: /* LD2R. */ | |
10667 | switch (size) | |
10668 | { | |
10669 | case 0: | |
10670 | { | |
10671 | uint8_t val1 = aarch64_get_mem_u8 (cpu, address); | |
10672 | uint8_t val2 = aarch64_get_mem_u8 (cpu, address + 1); | |
10673 | ||
10674 | for (i = 0; i < (full ? 16 : 8); i++) | |
10675 | { | |
10676 | aarch64_set_vec_u8 (cpu, vd, 0, val1); | |
10677 | aarch64_set_vec_u8 (cpu, vd + 1, 0, val2); | |
10678 | } | |
10679 | break; | |
10680 | } | |
10681 | ||
10682 | case 1: | |
10683 | { | |
10684 | uint16_t val1 = aarch64_get_mem_u16 (cpu, address); | |
10685 | uint16_t val2 = aarch64_get_mem_u16 (cpu, address + 2); | |
10686 | ||
10687 | for (i = 0; i < (full ? 8 : 4); i++) | |
10688 | { | |
10689 | aarch64_set_vec_u16 (cpu, vd, 0, val1); | |
10690 | aarch64_set_vec_u16 (cpu, vd + 1, 0, val2); | |
10691 | } | |
10692 | break; | |
10693 | } | |
10694 | ||
10695 | case 2: | |
10696 | { | |
10697 | uint32_t val1 = aarch64_get_mem_u32 (cpu, address); | |
10698 | uint32_t val2 = aarch64_get_mem_u32 (cpu, address + 4); | |
10699 | ||
10700 | for (i = 0; i < (full ? 4 : 2); i++) | |
10701 | { | |
10702 | aarch64_set_vec_u32 (cpu, vd, 0, val1); | |
10703 | aarch64_set_vec_u32 (cpu, vd + 1, 0, val2); | |
10704 | } | |
10705 | break; | |
10706 | } | |
10707 | ||
10708 | case 3: | |
10709 | { | |
10710 | uint64_t val1 = aarch64_get_mem_u64 (cpu, address); | |
10711 | uint64_t val2 = aarch64_get_mem_u64 (cpu, address + 8); | |
10712 | ||
10713 | for (i = 0; i < (full ? 2 : 1); i++) | |
10714 | { | |
10715 | aarch64_set_vec_u64 (cpu, vd, 0, val1); | |
10716 | aarch64_set_vec_u64 (cpu, vd + 1, 0, val2); | |
10717 | } | |
10718 | break; | |
10719 | } | |
10720 | ||
10721 | default: | |
10722 | HALT_UNALLOC; | |
10723 | } | |
10724 | break; | |
10725 | ||
10726 | case 2: /* LD3R. */ | |
10727 | switch (size) | |
10728 | { | |
10729 | case 0: | |
10730 | { | |
10731 | uint8_t val1 = aarch64_get_mem_u8 (cpu, address); | |
10732 | uint8_t val2 = aarch64_get_mem_u8 (cpu, address + 1); | |
10733 | uint8_t val3 = aarch64_get_mem_u8 (cpu, address + 2); | |
10734 | ||
10735 | for (i = 0; i < (full ? 16 : 8); i++) | |
10736 | { | |
10737 | aarch64_set_vec_u8 (cpu, vd, 0, val1); | |
10738 | aarch64_set_vec_u8 (cpu, vd + 1, 0, val2); | |
10739 | aarch64_set_vec_u8 (cpu, vd + 2, 0, val3); | |
10740 | } | |
10741 | } | |
10742 | break; | |
10743 | ||
10744 | case 1: | |
10745 | { | |
10746 | uint32_t val1 = aarch64_get_mem_u16 (cpu, address); | |
10747 | uint32_t val2 = aarch64_get_mem_u16 (cpu, address + 2); | |
10748 | uint32_t val3 = aarch64_get_mem_u16 (cpu, address + 4); | |
10749 | ||
10750 | for (i = 0; i < (full ? 8 : 4); i++) | |
10751 | { | |
10752 | aarch64_set_vec_u16 (cpu, vd, 0, val1); | |
10753 | aarch64_set_vec_u16 (cpu, vd + 1, 0, val2); | |
10754 | aarch64_set_vec_u16 (cpu, vd + 2, 0, val3); | |
10755 | } | |
10756 | } | |
10757 | break; | |
10758 | ||
10759 | case 2: | |
10760 | { | |
10761 | uint32_t val1 = aarch64_get_mem_u32 (cpu, address); | |
10762 | uint32_t val2 = aarch64_get_mem_u32 (cpu, address + 4); | |
10763 | uint32_t val3 = aarch64_get_mem_u32 (cpu, address + 8); | |
10764 | ||
10765 | for (i = 0; i < (full ? 4 : 2); i++) | |
10766 | { | |
10767 | aarch64_set_vec_u32 (cpu, vd, 0, val1); | |
10768 | aarch64_set_vec_u32 (cpu, vd + 1, 0, val2); | |
10769 | aarch64_set_vec_u32 (cpu, vd + 2, 0, val3); | |
10770 | } | |
10771 | } | |
10772 | break; | |
10773 | ||
10774 | case 3: | |
10775 | { | |
10776 | uint64_t val1 = aarch64_get_mem_u64 (cpu, address); | |
10777 | uint64_t val2 = aarch64_get_mem_u64 (cpu, address + 8); | |
10778 | uint64_t val3 = aarch64_get_mem_u64 (cpu, address + 16); | |
10779 | ||
10780 | for (i = 0; i < (full ? 2 : 1); i++) | |
10781 | { | |
10782 | aarch64_set_vec_u64 (cpu, vd, 0, val1); | |
10783 | aarch64_set_vec_u64 (cpu, vd + 1, 0, val2); | |
10784 | aarch64_set_vec_u64 (cpu, vd + 2, 0, val3); | |
10785 | } | |
10786 | } | |
10787 | break; | |
10788 | ||
10789 | default: | |
10790 | HALT_UNALLOC; | |
10791 | } | |
10792 | break; | |
10793 | ||
10794 | case 3: /* LD4R. */ | |
10795 | switch (size) | |
10796 | { | |
10797 | case 0: | |
10798 | { | |
10799 | uint8_t val1 = aarch64_get_mem_u8 (cpu, address); | |
10800 | uint8_t val2 = aarch64_get_mem_u8 (cpu, address + 1); | |
10801 | uint8_t val3 = aarch64_get_mem_u8 (cpu, address + 2); | |
10802 | uint8_t val4 = aarch64_get_mem_u8 (cpu, address + 3); | |
10803 | ||
10804 | for (i = 0; i < (full ? 16 : 8); i++) | |
10805 | { | |
10806 | aarch64_set_vec_u8 (cpu, vd, 0, val1); | |
10807 | aarch64_set_vec_u8 (cpu, vd + 1, 0, val2); | |
10808 | aarch64_set_vec_u8 (cpu, vd + 2, 0, val3); | |
10809 | aarch64_set_vec_u8 (cpu, vd + 3, 0, val4); | |
10810 | } | |
10811 | } | |
10812 | break; | |
10813 | ||
10814 | case 1: | |
10815 | { | |
10816 | uint32_t val1 = aarch64_get_mem_u16 (cpu, address); | |
10817 | uint32_t val2 = aarch64_get_mem_u16 (cpu, address + 2); | |
10818 | uint32_t val3 = aarch64_get_mem_u16 (cpu, address + 4); | |
10819 | uint32_t val4 = aarch64_get_mem_u16 (cpu, address + 6); | |
10820 | ||
10821 | for (i = 0; i < (full ? 8 : 4); i++) | |
10822 | { | |
10823 | aarch64_set_vec_u16 (cpu, vd, 0, val1); | |
10824 | aarch64_set_vec_u16 (cpu, vd + 1, 0, val2); | |
10825 | aarch64_set_vec_u16 (cpu, vd + 2, 0, val3); | |
10826 | aarch64_set_vec_u16 (cpu, vd + 3, 0, val4); | |
10827 | } | |
10828 | } | |
10829 | break; | |
10830 | ||
10831 | case 2: | |
10832 | { | |
10833 | uint32_t val1 = aarch64_get_mem_u32 (cpu, address); | |
10834 | uint32_t val2 = aarch64_get_mem_u32 (cpu, address + 4); | |
10835 | uint32_t val3 = aarch64_get_mem_u32 (cpu, address + 8); | |
10836 | uint32_t val4 = aarch64_get_mem_u32 (cpu, address + 12); | |
10837 | ||
10838 | for (i = 0; i < (full ? 4 : 2); i++) | |
10839 | { | |
10840 | aarch64_set_vec_u32 (cpu, vd, 0, val1); | |
10841 | aarch64_set_vec_u32 (cpu, vd + 1, 0, val2); | |
10842 | aarch64_set_vec_u32 (cpu, vd + 2, 0, val3); | |
10843 | aarch64_set_vec_u32 (cpu, vd + 3, 0, val4); | |
10844 | } | |
10845 | } | |
10846 | break; | |
10847 | ||
10848 | case 3: | |
10849 | { | |
10850 | uint64_t val1 = aarch64_get_mem_u64 (cpu, address); | |
10851 | uint64_t val2 = aarch64_get_mem_u64 (cpu, address + 8); | |
10852 | uint64_t val3 = aarch64_get_mem_u64 (cpu, address + 16); | |
10853 | uint64_t val4 = aarch64_get_mem_u64 (cpu, address + 24); | |
10854 | ||
10855 | for (i = 0; i < (full ? 2 : 1); i++) | |
10856 | { | |
10857 | aarch64_set_vec_u64 (cpu, vd, 0, val1); | |
10858 | aarch64_set_vec_u64 (cpu, vd + 1, 0, val2); | |
10859 | aarch64_set_vec_u64 (cpu, vd + 2, 0, val3); | |
10860 | aarch64_set_vec_u64 (cpu, vd + 3, 0, val4); | |
10861 | } | |
10862 | } | |
10863 | break; | |
10864 | ||
10865 | default: | |
10866 | HALT_UNALLOC; | |
10867 | } | |
10868 | break; | |
10869 | ||
10870 | default: | |
10871 | HALT_UNALLOC; | |
10872 | } | |
10873 | } | |
10874 | ||
10875 | static void | |
10876 | do_vec_load_store (sim_cpu *cpu) | |
10877 | { | |
10878 | /* {LD|ST}<N> {Vd..Vd+N}, vaddr | |
10879 | ||
10880 | instr[31] = 0 | |
10881 | instr[30] = element selector 0=>half, 1=>all elements | |
10882 | instr[29,25] = 00110 | |
10883 | instr[24] = ? | |
10884 | instr[23] = 0=>simple, 1=>post | |
10885 | instr[22] = 0=>store, 1=>load | |
10886 | instr[21] = 0 (LDn) / small(0)-large(1) selector (LDnR) | |
10887 | instr[20,16] = 00000 (simple), Vinc (reg-post-inc, no SP), | |
10888 | 11111 (immediate post inc) | |
10889 | instr[15,12] = elements and destinations. eg for load: | |
10890 | 0000=>LD4 => load multiple 4-element to | |
10891 | four consecutive registers | |
10892 | 0100=>LD3 => load multiple 3-element to | |
10893 | three consecutive registers | |
10894 | 1000=>LD2 => load multiple 2-element to | |
10895 | two consecutive registers | |
10896 | 0010=>LD1 => load multiple 1-element to | |
10897 | four consecutive registers | |
10898 | 0110=>LD1 => load multiple 1-element to | |
10899 | three consecutive registers | |
10900 | 1010=>LD1 => load multiple 1-element to | |
10901 | two consecutive registers | |
10902 | 0111=>LD1 => load multiple 1-element to | |
10903 | one register | |
10904 | 1100=>LDR1,LDR2 | |
10905 | 1110=>LDR3,LDR4 | |
10906 | instr[11,10] = element size 00=> byte(b), 01=> half(h), | |
10907 | 10=> word(s), 11=> double(d) | |
10908 | instr[9,5] = Vn, can be SP | |
10909 | instr[4,0] = Vd */ | |
10910 | ||
10911 | int post; | |
10912 | int load; | |
10913 | unsigned vn; | |
10914 | uint64_t address; | |
10915 | int type; | |
10916 | ||
10917 | if (uimm (aarch64_get_instr (cpu), 31, 31) != 0 | |
10918 | || uimm (aarch64_get_instr (cpu), 29, 25) != 0x06) | |
10919 | HALT_NYI; | |
10920 | ||
10921 | type = uimm (aarch64_get_instr (cpu), 15, 12); | |
10922 | if (type != 0xE && type != 0xE && uimm (aarch64_get_instr (cpu), 21, 21) != 0) | |
10923 | HALT_NYI; | |
10924 | ||
10925 | post = uimm (aarch64_get_instr (cpu), 23, 23); | |
10926 | load = uimm (aarch64_get_instr (cpu), 22, 22); | |
10927 | vn = uimm (aarch64_get_instr (cpu), 9, 5); | |
10928 | address = aarch64_get_reg_u64 (cpu, vn, SP_OK); | |
10929 | ||
10930 | if (post) | |
10931 | { | |
10932 | unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16); | |
10933 | ||
10934 | if (vm == R31) | |
10935 | { | |
10936 | unsigned sizeof_operation; | |
10937 | ||
10938 | switch (type) | |
10939 | { | |
10940 | case 0: sizeof_operation = 32; break; | |
10941 | case 4: sizeof_operation = 24; break; | |
10942 | case 8: sizeof_operation = 16; break; | |
10943 | ||
10944 | case 0xC: | |
10945 | sizeof_operation = uimm (aarch64_get_instr (cpu), 21, 21) ? 2 : 1; | |
10946 | sizeof_operation <<= uimm (aarch64_get_instr (cpu), 11, 10); | |
10947 | break; | |
10948 | ||
10949 | case 0xE: | |
10950 | sizeof_operation = uimm (aarch64_get_instr (cpu), 21, 21) ? 8 : 4; | |
10951 | sizeof_operation <<= uimm (aarch64_get_instr (cpu), 11, 10); | |
10952 | break; | |
10953 | ||
10954 | case 2: | |
10955 | case 6: | |
10956 | case 10: | |
10957 | case 7: | |
10958 | sizeof_operation = 2 << uimm (aarch64_get_instr (cpu), 11, 10); | |
10959 | break; | |
10960 | ||
10961 | default: | |
10962 | HALT_UNALLOC; | |
10963 | } | |
10964 | ||
10965 | if (uimm (aarch64_get_instr (cpu), 30, 30)) | |
10966 | sizeof_operation *= 2; | |
10967 | ||
10968 | aarch64_set_reg_u64 (cpu, vn, SP_OK, address + sizeof_operation); | |
10969 | } | |
10970 | else | |
10971 | aarch64_set_reg_u64 (cpu, vn, SP_OK, | |
10972 | address + aarch64_get_reg_u64 (cpu, vm, NO_SP)); | |
10973 | } | |
10974 | else | |
10975 | { | |
10976 | NYI_assert (20, 16, 0); | |
10977 | } | |
10978 | ||
10979 | if (load) | |
10980 | { | |
10981 | switch (type) | |
10982 | { | |
10983 | case 0: LD4 (cpu, address); return; | |
10984 | case 4: LD3 (cpu, address); return; | |
10985 | case 8: LD2 (cpu, address); return; | |
10986 | case 2: LD1_4 (cpu, address); return; | |
10987 | case 6: LD1_3 (cpu, address); return; | |
10988 | case 10: LD1_2 (cpu, address); return; | |
10989 | case 7: LD1_1 (cpu, address); return; | |
10990 | ||
10991 | case 0xE: | |
10992 | case 0xC: do_vec_LDnR (cpu, address); return; | |
10993 | ||
10994 | default: | |
10995 | HALT_NYI; | |
10996 | } | |
10997 | } | |
10998 | ||
10999 | /* Stores. */ | |
11000 | switch (type) | |
11001 | { | |
11002 | case 0: ST4 (cpu, address); return; | |
11003 | case 4: ST3 (cpu, address); return; | |
11004 | case 8: ST2 (cpu, address); return; | |
11005 | case 2: ST1_4 (cpu, address); return; | |
11006 | case 6: ST1_3 (cpu, address); return; | |
11007 | case 10: ST1_2 (cpu, address); return; | |
11008 | case 7: ST1_1 (cpu, address); return; | |
11009 | default: | |
11010 | HALT_NYI; | |
11011 | } | |
11012 | } | |
11013 | ||
11014 | static void | |
11015 | dexLdSt (sim_cpu *cpu) | |
11016 | { | |
11017 | /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu)); | |
11018 | assert group == GROUP_LDST_0100 || group == GROUP_LDST_0110 || | |
11019 | group == GROUP_LDST_1100 || group == GROUP_LDST_1110 | |
11020 | bits [29,28:26] of a LS are the secondary dispatch vector. */ | |
11021 | uint32_t group2 = dispatchLS (aarch64_get_instr (cpu)); | |
11022 | ||
11023 | switch (group2) | |
11024 | { | |
11025 | case LS_EXCL_000: | |
11026 | dexLoadExclusive (cpu); return; | |
11027 | ||
11028 | case LS_LIT_010: | |
11029 | case LS_LIT_011: | |
11030 | dexLoadLiteral (cpu); return; | |
11031 | ||
11032 | case LS_OTHER_110: | |
11033 | case LS_OTHER_111: | |
11034 | dexLoadOther (cpu); return; | |
11035 | ||
11036 | case LS_ADVSIMD_001: | |
11037 | do_vec_load_store (cpu); return; | |
11038 | ||
11039 | case LS_PAIR_100: | |
11040 | dex_load_store_pair_gr (cpu); return; | |
11041 | ||
11042 | case LS_PAIR_101: | |
11043 | dex_load_store_pair_fp (cpu); return; | |
11044 | ||
11045 | default: | |
11046 | /* Should never reach here. */ | |
11047 | HALT_NYI; | |
11048 | } | |
11049 | } | |
11050 | ||
11051 | /* Specific decode and execute for group Data Processing Register. */ | |
11052 | ||
11053 | static void | |
11054 | dexLogicalShiftedRegister (sim_cpu *cpu) | |
11055 | { | |
11056 | /* assert instr[28:24] = 01010 | |
11057 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
11058 | instr[30,29:21] = op,N : 000 ==> AND, 001 ==> BIC, | |
11059 | 010 ==> ORR, 011 ==> ORN | |
11060 | 100 ==> EOR, 101 ==> EON, | |
11061 | 110 ==> ANDS, 111 ==> BICS | |
11062 | instr[23,22] = shift : 0 ==> LSL, 1 ==> LSR, 2 ==> ASR, 3 ==> ROR | |
11063 | instr[15,10] = count : must be 0xxxxx for 32 bit | |
11064 | instr[9,5] = Rn | |
11065 | instr[4,0] = Rd */ | |
11066 | ||
11067 | /* unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); */ | |
11068 | uint32_t dispatch; | |
11069 | Shift shiftType; | |
11070 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
11071 | ||
11072 | /* 32 bit operations must have count[5] = 0. */ | |
11073 | /* or else we have an UNALLOC. */ | |
11074 | uint32_t count = uimm (aarch64_get_instr (cpu), 15, 10); | |
11075 | ||
11076 | if (!size && uimm (count, 5, 5)) | |
11077 | HALT_UNALLOC; | |
11078 | ||
11079 | shiftType = shift (aarch64_get_instr (cpu), 22); | |
11080 | ||
11081 | /* dispatch on size:op:N i.e aarch64_get_instr (cpu)[31,29:21]. */ | |
11082 | dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 29) << 1) | |
11083 | | uimm (aarch64_get_instr (cpu), 21, 21)); | |
11084 | ||
11085 | switch (dispatch) | |
11086 | { | |
11087 | case 0: and32_shift (cpu, shiftType, count); return; | |
11088 | case 1: bic32_shift (cpu, shiftType, count); return; | |
11089 | case 2: orr32_shift (cpu, shiftType, count); return; | |
11090 | case 3: orn32_shift (cpu, shiftType, count); return; | |
11091 | case 4: eor32_shift (cpu, shiftType, count); return; | |
11092 | case 5: eon32_shift (cpu, shiftType, count); return; | |
11093 | case 6: ands32_shift (cpu, shiftType, count); return; | |
11094 | case 7: bics32_shift (cpu, shiftType, count); return; | |
11095 | case 8: and64_shift (cpu, shiftType, count); return; | |
11096 | case 9: bic64_shift (cpu, shiftType, count); return; | |
11097 | case 10:orr64_shift (cpu, shiftType, count); return; | |
11098 | case 11:orn64_shift (cpu, shiftType, count); return; | |
11099 | case 12:eor64_shift (cpu, shiftType, count); return; | |
11100 | case 13:eon64_shift (cpu, shiftType, count); return; | |
11101 | case 14:ands64_shift (cpu, shiftType, count); return; | |
11102 | case 15:bics64_shift (cpu, shiftType, count); return; | |
11103 | default: HALT_UNALLOC; | |
11104 | } | |
11105 | } | |
11106 | ||
11107 | /* 32 bit conditional select. */ | |
11108 | static void | |
11109 | csel32 (sim_cpu *cpu, CondCode cc) | |
11110 | { | |
11111 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11112 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11113 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11114 | ||
11115 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11116 | testConditionCode (cpu, cc) | |
11117 | ? aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11118 | : aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
11119 | } | |
11120 | ||
11121 | /* 64 bit conditional select. */ | |
11122 | static void | |
11123 | csel64 (sim_cpu *cpu, CondCode cc) | |
11124 | { | |
11125 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11126 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11127 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11128 | ||
11129 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11130 | testConditionCode (cpu, cc) | |
11131 | ? aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11132 | : aarch64_get_reg_u64 (cpu, rm, NO_SP)); | |
11133 | } | |
11134 | ||
11135 | /* 32 bit conditional increment. */ | |
11136 | static void | |
11137 | csinc32 (sim_cpu *cpu, CondCode cc) | |
11138 | { | |
11139 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11140 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11141 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11142 | ||
11143 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11144 | testConditionCode (cpu, cc) | |
11145 | ? aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11146 | : aarch64_get_reg_u32 (cpu, rm, NO_SP) + 1); | |
11147 | } | |
11148 | ||
11149 | /* 64 bit conditional increment. */ | |
11150 | static void | |
11151 | csinc64 (sim_cpu *cpu, CondCode cc) | |
11152 | { | |
11153 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11154 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11155 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11156 | ||
11157 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11158 | testConditionCode (cpu, cc) | |
11159 | ? aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11160 | : aarch64_get_reg_u64 (cpu, rm, NO_SP) + 1); | |
11161 | } | |
11162 | ||
11163 | /* 32 bit conditional invert. */ | |
11164 | static void | |
11165 | csinv32 (sim_cpu *cpu, CondCode cc) | |
11166 | { | |
11167 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11168 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11169 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11170 | ||
11171 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11172 | testConditionCode (cpu, cc) | |
11173 | ? aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11174 | : ~ aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
11175 | } | |
11176 | ||
11177 | /* 64 bit conditional invert. */ | |
11178 | static void | |
11179 | csinv64 (sim_cpu *cpu, CondCode cc) | |
11180 | { | |
11181 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11182 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11183 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11184 | ||
11185 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11186 | testConditionCode (cpu, cc) | |
11187 | ? aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11188 | : ~ aarch64_get_reg_u64 (cpu, rm, NO_SP)); | |
11189 | } | |
11190 | ||
11191 | /* 32 bit conditional negate. */ | |
11192 | static void | |
11193 | csneg32 (sim_cpu *cpu, CondCode cc) | |
11194 | { | |
11195 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11196 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11197 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11198 | ||
11199 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11200 | testConditionCode (cpu, cc) | |
11201 | ? aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11202 | : - aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
11203 | } | |
11204 | ||
11205 | /* 64 bit conditional negate. */ | |
11206 | static void | |
11207 | csneg64 (sim_cpu *cpu, CondCode cc) | |
11208 | { | |
11209 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11210 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11211 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11212 | ||
11213 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11214 | testConditionCode (cpu, cc) | |
11215 | ? aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11216 | : - aarch64_get_reg_u64 (cpu, rm, NO_SP)); | |
11217 | } | |
11218 | ||
11219 | static void | |
11220 | dexCondSelect (sim_cpu *cpu) | |
11221 | { | |
11222 | /* assert instr[28,21] = 11011011 | |
11223 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
11224 | instr[30:11,10] = op : 000 ==> CSEL, 001 ==> CSINC, | |
11225 | 100 ==> CSINV, 101 ==> CSNEG, | |
11226 | _1_ ==> UNALLOC | |
11227 | instr[29] = S : 0 ==> ok, 1 ==> UNALLOC | |
11228 | instr[15,12] = cond | |
11229 | instr[29] = S : 0 ==> ok, 1 ==> UNALLOC */ | |
11230 | ||
11231 | CondCode cc; | |
11232 | uint32_t dispatch; | |
11233 | uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29); | |
11234 | uint32_t op2 = uimm (aarch64_get_instr (cpu), 11, 10); | |
11235 | ||
11236 | if (S == 1) | |
11237 | HALT_UNALLOC; | |
11238 | ||
11239 | if (op2 & 0x2) | |
11240 | HALT_UNALLOC; | |
11241 | ||
11242 | cc = condcode (aarch64_get_instr (cpu), 12); | |
11243 | dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 1) | op2); | |
11244 | ||
11245 | switch (dispatch) | |
11246 | { | |
11247 | case 0: csel32 (cpu, cc); return; | |
11248 | case 1: csinc32 (cpu, cc); return; | |
11249 | case 2: csinv32 (cpu, cc); return; | |
11250 | case 3: csneg32 (cpu, cc); return; | |
11251 | case 4: csel64 (cpu, cc); return; | |
11252 | case 5: csinc64 (cpu, cc); return; | |
11253 | case 6: csinv64 (cpu, cc); return; | |
11254 | case 7: csneg64 (cpu, cc); return; | |
11255 | default: HALT_UNALLOC; | |
11256 | } | |
11257 | } | |
11258 | ||
11259 | /* Some helpers for counting leading 1 or 0 bits. */ | |
11260 | ||
11261 | /* Counts the number of leading bits which are the same | |
11262 | in a 32 bit value in the range 1 to 32. */ | |
11263 | static uint32_t | |
11264 | leading32 (uint32_t value) | |
11265 | { | |
11266 | int32_t mask= 0xffff0000; | |
11267 | uint32_t count= 16; /* Counts number of bits set in mask. */ | |
11268 | uint32_t lo = 1; /* Lower bound for number of sign bits. */ | |
11269 | uint32_t hi = 32; /* Upper bound for number of sign bits. */ | |
11270 | ||
11271 | while (lo + 1 < hi) | |
11272 | { | |
11273 | int32_t test = (value & mask); | |
11274 | ||
11275 | if (test == 0 || test == mask) | |
11276 | { | |
11277 | lo = count; | |
11278 | count = (lo + hi) / 2; | |
11279 | mask >>= (count - lo); | |
11280 | } | |
11281 | else | |
11282 | { | |
11283 | hi = count; | |
11284 | count = (lo + hi) / 2; | |
11285 | mask <<= hi - count; | |
11286 | } | |
11287 | } | |
11288 | ||
11289 | if (lo != hi) | |
11290 | { | |
11291 | int32_t test; | |
11292 | ||
11293 | mask >>= 1; | |
11294 | test = (value & mask); | |
11295 | ||
11296 | if (test == 0 || test == mask) | |
11297 | count = hi; | |
11298 | else | |
11299 | count = lo; | |
11300 | } | |
11301 | ||
11302 | return count; | |
11303 | } | |
11304 | ||
11305 | /* Counts the number of leading bits which are the same | |
11306 | in a 64 bit value in the range 1 to 64. */ | |
11307 | static uint64_t | |
11308 | leading64 (uint64_t value) | |
11309 | { | |
11310 | int64_t mask= 0xffffffff00000000LL; | |
11311 | uint64_t count = 32; /* Counts number of bits set in mask. */ | |
11312 | uint64_t lo = 1; /* Lower bound for number of sign bits. */ | |
11313 | uint64_t hi = 64; /* Upper bound for number of sign bits. */ | |
11314 | ||
11315 | while (lo + 1 < hi) | |
11316 | { | |
11317 | int64_t test = (value & mask); | |
11318 | ||
11319 | if (test == 0 || test == mask) | |
11320 | { | |
11321 | lo = count; | |
11322 | count = (lo + hi) / 2; | |
11323 | mask >>= (count - lo); | |
11324 | } | |
11325 | else | |
11326 | { | |
11327 | hi = count; | |
11328 | count = (lo + hi) / 2; | |
11329 | mask <<= hi - count; | |
11330 | } | |
11331 | } | |
11332 | ||
11333 | if (lo != hi) | |
11334 | { | |
11335 | int64_t test; | |
11336 | ||
11337 | mask >>= 1; | |
11338 | test = (value & mask); | |
11339 | ||
11340 | if (test == 0 || test == mask) | |
11341 | count = hi; | |
11342 | else | |
11343 | count = lo; | |
11344 | } | |
11345 | ||
11346 | return count; | |
11347 | } | |
11348 | ||
11349 | /* Bit operations. */ | |
11350 | /* N.B register args may not be SP. */ | |
11351 | ||
11352 | /* 32 bit count leading sign bits. */ | |
11353 | static void | |
11354 | cls32 (sim_cpu *cpu) | |
11355 | { | |
11356 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11357 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11358 | ||
11359 | /* N.B. the result needs to exclude the leading bit. */ | |
11360 | aarch64_set_reg_u64 | |
11361 | (cpu, rd, NO_SP, leading32 (aarch64_get_reg_u32 (cpu, rn, NO_SP)) - 1); | |
11362 | } | |
11363 | ||
11364 | /* 64 bit count leading sign bits. */ | |
11365 | static void | |
11366 | cls64 (sim_cpu *cpu) | |
11367 | { | |
11368 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11369 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11370 | ||
11371 | /* N.B. the result needs to exclude the leading bit. */ | |
11372 | aarch64_set_reg_u64 | |
11373 | (cpu, rd, NO_SP, leading64 (aarch64_get_reg_u64 (cpu, rn, NO_SP)) - 1); | |
11374 | } | |
11375 | ||
11376 | /* 32 bit count leading zero bits. */ | |
11377 | static void | |
11378 | clz32 (sim_cpu *cpu) | |
11379 | { | |
11380 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11381 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11382 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
11383 | ||
11384 | /* if the sign (top) bit is set then the count is 0. */ | |
11385 | if (pick32 (value, 31, 31)) | |
11386 | aarch64_set_reg_u64 (cpu, rd, NO_SP, 0L); | |
11387 | else | |
11388 | aarch64_set_reg_u64 (cpu, rd, NO_SP, leading32 (value)); | |
11389 | } | |
11390 | ||
11391 | /* 64 bit count leading zero bits. */ | |
11392 | static void | |
11393 | clz64 (sim_cpu *cpu) | |
11394 | { | |
11395 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11396 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11397 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
11398 | ||
11399 | /* if the sign (top) bit is set then the count is 0. */ | |
11400 | if (pick64 (value, 63, 63)) | |
11401 | aarch64_set_reg_u64 (cpu, rd, NO_SP, 0L); | |
11402 | else | |
11403 | aarch64_set_reg_u64 (cpu, rd, NO_SP, leading64 (value)); | |
11404 | } | |
11405 | ||
11406 | /* 32 bit reverse bits. */ | |
11407 | static void | |
11408 | rbit32 (sim_cpu *cpu) | |
11409 | { | |
11410 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11411 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11412 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
11413 | uint32_t result = 0; | |
11414 | int i; | |
11415 | ||
11416 | for (i = 0; i < 32; i++) | |
11417 | { | |
11418 | result <<= 1; | |
11419 | result |= (value & 1); | |
11420 | value >>= 1; | |
11421 | } | |
11422 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11423 | } | |
11424 | ||
11425 | /* 64 bit reverse bits. */ | |
11426 | static void | |
11427 | rbit64 (sim_cpu *cpu) | |
11428 | { | |
11429 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11430 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11431 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
11432 | uint64_t result = 0; | |
11433 | int i; | |
11434 | ||
11435 | for (i = 0; i < 64; i++) | |
11436 | { | |
11437 | result <<= 1; | |
11438 | result |= (value & 1L); | |
11439 | value >>= 1; | |
11440 | } | |
11441 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11442 | } | |
11443 | ||
11444 | /* 32 bit reverse bytes. */ | |
11445 | static void | |
11446 | rev32 (sim_cpu *cpu) | |
11447 | { | |
11448 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11449 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11450 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
11451 | uint32_t result = 0; | |
11452 | int i; | |
11453 | ||
11454 | for (i = 0; i < 4; i++) | |
11455 | { | |
11456 | result <<= 8; | |
11457 | result |= (value & 0xff); | |
11458 | value >>= 8; | |
11459 | } | |
11460 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11461 | } | |
11462 | ||
11463 | /* 64 bit reverse bytes. */ | |
11464 | static void | |
11465 | rev64 (sim_cpu *cpu) | |
11466 | { | |
11467 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11468 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11469 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
11470 | uint64_t result = 0; | |
11471 | int i; | |
11472 | ||
11473 | for (i = 0; i < 8; i++) | |
11474 | { | |
11475 | result <<= 8; | |
11476 | result |= (value & 0xffULL); | |
11477 | value >>= 8; | |
11478 | } | |
11479 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11480 | } | |
11481 | ||
11482 | /* 32 bit reverse shorts. */ | |
11483 | /* N.B.this reverses the order of the bytes in each half word. */ | |
11484 | static void | |
11485 | revh32 (sim_cpu *cpu) | |
11486 | { | |
11487 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11488 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11489 | uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
11490 | uint32_t result = 0; | |
11491 | int i; | |
11492 | ||
11493 | for (i = 0; i < 2; i++) | |
11494 | { | |
11495 | result <<= 8; | |
11496 | result |= (value & 0x00ff00ff); | |
11497 | value >>= 8; | |
11498 | } | |
11499 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11500 | } | |
11501 | ||
11502 | /* 64 bit reverse shorts. */ | |
11503 | /* N.B.this reverses the order of the bytes in each half word. */ | |
11504 | static void | |
11505 | revh64 (sim_cpu *cpu) | |
11506 | { | |
11507 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11508 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11509 | uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
11510 | uint64_t result = 0; | |
11511 | int i; | |
11512 | ||
11513 | for (i = 0; i < 2; i++) | |
11514 | { | |
11515 | result <<= 8; | |
11516 | result |= (value & 0x00ff00ff00ff00ffULL); | |
11517 | value >>= 8; | |
11518 | } | |
11519 | aarch64_set_reg_u64 (cpu, rd, NO_SP, result); | |
11520 | } | |
11521 | ||
11522 | static void | |
11523 | dexDataProc1Source (sim_cpu *cpu) | |
11524 | { | |
11525 | /* assert instr[30] == 1 | |
11526 | aarch64_get_instr (cpu)[28,21] == 111010110 | |
11527 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
11528 | instr[29] = S : 0 ==> ok, 1 ==> UNALLOC | |
11529 | instr[20,16] = opcode2 : 00000 ==> ok, ow ==> UNALLOC | |
11530 | instr[15,10] = opcode : 000000 ==> RBIT, 000001 ==> REV16, | |
11531 | 000010 ==> REV, 000011 ==> UNALLOC | |
11532 | 000100 ==> CLZ, 000101 ==> CLS | |
11533 | ow ==> UNALLOC | |
11534 | instr[9,5] = rn : may not be SP | |
11535 | instr[4,0] = rd : may not be SP. */ | |
11536 | ||
11537 | uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29); | |
11538 | uint32_t opcode2 = uimm (aarch64_get_instr (cpu), 20, 16); | |
11539 | uint32_t opcode = uimm (aarch64_get_instr (cpu), 15, 10); | |
11540 | uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 31) << 3) | opcode); | |
11541 | ||
11542 | if (S == 1) | |
11543 | HALT_UNALLOC; | |
11544 | ||
11545 | if (opcode2 != 0) | |
11546 | HALT_UNALLOC; | |
11547 | ||
11548 | if (opcode & 0x38) | |
11549 | HALT_UNALLOC; | |
11550 | ||
11551 | switch (dispatch) | |
11552 | { | |
11553 | case 0: rbit32 (cpu); return; | |
11554 | case 1: revh32 (cpu); return; | |
11555 | case 2: rev32 (cpu); return; | |
11556 | case 4: clz32 (cpu); return; | |
11557 | case 5: cls32 (cpu); return; | |
11558 | case 8: rbit64 (cpu); return; | |
11559 | case 9: revh64 (cpu); return; | |
11560 | case 10:rev32 (cpu); return; | |
11561 | case 11:rev64 (cpu); return; | |
11562 | case 12:clz64 (cpu); return; | |
11563 | case 13:cls64 (cpu); return; | |
11564 | default: HALT_UNALLOC; | |
11565 | } | |
11566 | } | |
11567 | ||
11568 | /* Variable shift. | |
11569 | Shifts by count supplied in register. | |
11570 | N.B register args may not be SP. | |
11571 | These all use the shifted auxiliary function for | |
11572 | simplicity and clarity. Writing the actual shift | |
11573 | inline would avoid a branch and so be faster but | |
11574 | would also necessitate getting signs right. */ | |
11575 | ||
11576 | /* 32 bit arithmetic shift right. */ | |
11577 | static void | |
11578 | asrv32 (sim_cpu *cpu) | |
11579 | { | |
11580 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11581 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11582 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11583 | ||
11584 | aarch64_set_reg_u64 | |
11585 | (cpu, rd, NO_SP, | |
11586 | shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), ASR, | |
11587 | (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f))); | |
11588 | } | |
11589 | ||
11590 | /* 64 bit arithmetic shift right. */ | |
11591 | static void | |
11592 | asrv64 (sim_cpu *cpu) | |
11593 | { | |
11594 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11595 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11596 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11597 | ||
11598 | aarch64_set_reg_u64 | |
11599 | (cpu, rd, NO_SP, | |
11600 | shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), ASR, | |
11601 | (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f))); | |
11602 | } | |
11603 | ||
11604 | /* 32 bit logical shift left. */ | |
11605 | static void | |
11606 | lslv32 (sim_cpu *cpu) | |
11607 | { | |
11608 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11609 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11610 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11611 | ||
11612 | aarch64_set_reg_u64 | |
11613 | (cpu, rd, NO_SP, | |
11614 | shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), LSL, | |
11615 | (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f))); | |
11616 | } | |
11617 | ||
11618 | /* 64 bit arithmetic shift left. */ | |
11619 | static void | |
11620 | lslv64 (sim_cpu *cpu) | |
11621 | { | |
11622 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11623 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11624 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11625 | ||
11626 | aarch64_set_reg_u64 | |
11627 | (cpu, rd, NO_SP, | |
11628 | shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), LSL, | |
11629 | (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f))); | |
11630 | } | |
11631 | ||
11632 | /* 32 bit logical shift right. */ | |
11633 | static void | |
11634 | lsrv32 (sim_cpu *cpu) | |
11635 | { | |
11636 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11637 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11638 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11639 | ||
11640 | aarch64_set_reg_u64 | |
11641 | (cpu, rd, NO_SP, | |
11642 | shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), LSR, | |
11643 | (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f))); | |
11644 | } | |
11645 | ||
11646 | /* 64 bit logical shift right. */ | |
11647 | static void | |
11648 | lsrv64 (sim_cpu *cpu) | |
11649 | { | |
11650 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11651 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11652 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11653 | ||
11654 | aarch64_set_reg_u64 | |
11655 | (cpu, rd, NO_SP, | |
11656 | shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), LSR, | |
11657 | (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f))); | |
11658 | } | |
11659 | ||
11660 | /* 32 bit rotate right. */ | |
11661 | static void | |
11662 | rorv32 (sim_cpu *cpu) | |
11663 | { | |
11664 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11665 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11666 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11667 | ||
11668 | aarch64_set_reg_u64 | |
11669 | (cpu, rd, NO_SP, | |
11670 | shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), ROR, | |
11671 | (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f))); | |
11672 | } | |
11673 | ||
11674 | /* 64 bit rotate right. */ | |
11675 | static void | |
11676 | rorv64 (sim_cpu *cpu) | |
11677 | { | |
11678 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11679 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11680 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11681 | ||
11682 | aarch64_set_reg_u64 | |
11683 | (cpu, rd, NO_SP, | |
11684 | shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), ROR, | |
11685 | (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f))); | |
11686 | } | |
11687 | ||
11688 | ||
11689 | /* divide. */ | |
11690 | ||
11691 | /* 32 bit signed divide. */ | |
11692 | static void | |
11693 | cpuiv32 (sim_cpu *cpu) | |
11694 | { | |
11695 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11696 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11697 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11698 | /* N.B. the pseudo-code does the divide using 64 bit data. */ | |
11699 | /* TODO : check that this rounds towards zero as required. */ | |
11700 | int64_t dividend = aarch64_get_reg_s32 (cpu, rn, NO_SP); | |
11701 | int64_t divisor = aarch64_get_reg_s32 (cpu, rm, NO_SP); | |
11702 | ||
11703 | aarch64_set_reg_s64 (cpu, rd, NO_SP, | |
11704 | divisor ? ((int32_t) (dividend / divisor)) : 0); | |
11705 | } | |
11706 | ||
11707 | /* 64 bit signed divide. */ | |
11708 | static void | |
11709 | cpuiv64 (sim_cpu *cpu) | |
11710 | { | |
11711 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11712 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11713 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11714 | ||
11715 | /* TODO : check that this rounds towards zero as required. */ | |
11716 | int64_t divisor = aarch64_get_reg_s64 (cpu, rm, NO_SP); | |
11717 | ||
11718 | aarch64_set_reg_s64 | |
11719 | (cpu, rd, NO_SP, | |
11720 | divisor ? (aarch64_get_reg_s64 (cpu, rn, NO_SP) / divisor) : 0); | |
11721 | } | |
11722 | ||
11723 | /* 32 bit unsigned divide. */ | |
11724 | static void | |
11725 | udiv32 (sim_cpu *cpu) | |
11726 | { | |
11727 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11728 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11729 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11730 | ||
11731 | /* N.B. the pseudo-code does the divide using 64 bit data. */ | |
11732 | uint64_t dividend = aarch64_get_reg_u32 (cpu, rn, NO_SP); | |
11733 | uint64_t divisor = aarch64_get_reg_u32 (cpu, rm, NO_SP); | |
11734 | ||
11735 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11736 | divisor ? (uint32_t) (dividend / divisor) : 0); | |
11737 | } | |
11738 | ||
11739 | /* 64 bit unsigned divide. */ | |
11740 | static void | |
11741 | udiv64 (sim_cpu *cpu) | |
11742 | { | |
11743 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11744 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11745 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11746 | ||
11747 | /* TODO : check that this rounds towards zero as required. */ | |
11748 | uint64_t divisor = aarch64_get_reg_u64 (cpu, rm, NO_SP); | |
11749 | ||
11750 | aarch64_set_reg_u64 | |
11751 | (cpu, rd, NO_SP, | |
11752 | divisor ? (aarch64_get_reg_u64 (cpu, rn, NO_SP) / divisor) : 0); | |
11753 | } | |
11754 | ||
11755 | static void | |
11756 | dexDataProc2Source (sim_cpu *cpu) | |
11757 | { | |
11758 | /* assert instr[30] == 0 | |
11759 | instr[28,21] == 11010110 | |
11760 | instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit | |
11761 | instr[29] = S : 0 ==> ok, 1 ==> UNALLOC | |
11762 | instr[15,10] = opcode : 000010 ==> UDIV, 000011 ==> CPUIV, | |
11763 | 001000 ==> LSLV, 001001 ==> LSRV | |
11764 | 001010 ==> ASRV, 001011 ==> RORV | |
11765 | ow ==> UNALLOC. */ | |
11766 | ||
11767 | uint32_t dispatch; | |
11768 | uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29); | |
11769 | uint32_t opcode = uimm (aarch64_get_instr (cpu), 15, 10); | |
11770 | ||
11771 | if (S == 1) | |
11772 | HALT_UNALLOC; | |
11773 | ||
11774 | if (opcode & 0x34) | |
11775 | HALT_UNALLOC; | |
11776 | ||
11777 | dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 31) << 3) | |
11778 | | (uimm (opcode, 3, 3) << 2) | |
11779 | | uimm (opcode, 1, 0)); | |
11780 | switch (dispatch) | |
11781 | { | |
11782 | case 2: udiv32 (cpu); return; | |
11783 | case 3: cpuiv32 (cpu); return; | |
11784 | case 4: lslv32 (cpu); return; | |
11785 | case 5: lsrv32 (cpu); return; | |
11786 | case 6: asrv32 (cpu); return; | |
11787 | case 7: rorv32 (cpu); return; | |
11788 | case 10: udiv64 (cpu); return; | |
11789 | case 11: cpuiv64 (cpu); return; | |
11790 | case 12: lslv64 (cpu); return; | |
11791 | case 13: lsrv64 (cpu); return; | |
11792 | case 14: asrv64 (cpu); return; | |
11793 | case 15: rorv64 (cpu); return; | |
11794 | default: HALT_UNALLOC; | |
11795 | } | |
11796 | } | |
11797 | ||
11798 | ||
11799 | /* Multiply. */ | |
11800 | ||
11801 | /* 32 bit multiply and add. */ | |
11802 | static void | |
11803 | madd32 (sim_cpu *cpu) | |
11804 | { | |
11805 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11806 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11807 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11808 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11809 | ||
11810 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11811 | aarch64_get_reg_u32 (cpu, ra, NO_SP) | |
11812 | + aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11813 | * aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
11814 | } | |
11815 | ||
11816 | /* 64 bit multiply and add. */ | |
11817 | static void | |
11818 | madd64 (sim_cpu *cpu) | |
11819 | { | |
11820 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11821 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11822 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11823 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11824 | ||
11825 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11826 | aarch64_get_reg_u64 (cpu, ra, NO_SP) | |
11827 | + aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11828 | * aarch64_get_reg_u64 (cpu, rm, NO_SP)); | |
11829 | } | |
11830 | ||
11831 | /* 32 bit multiply and sub. */ | |
11832 | static void | |
11833 | msub32 (sim_cpu *cpu) | |
11834 | { | |
11835 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11836 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11837 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11838 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11839 | ||
11840 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11841 | aarch64_get_reg_u32 (cpu, ra, NO_SP) | |
11842 | - aarch64_get_reg_u32 (cpu, rn, NO_SP) | |
11843 | * aarch64_get_reg_u32 (cpu, rm, NO_SP)); | |
11844 | } | |
11845 | ||
11846 | /* 64 bit multiply and sub. */ | |
11847 | static void | |
11848 | msub64 (sim_cpu *cpu) | |
11849 | { | |
11850 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11851 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11852 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11853 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11854 | ||
11855 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
11856 | aarch64_get_reg_u64 (cpu, ra, NO_SP) | |
11857 | - aarch64_get_reg_u64 (cpu, rn, NO_SP) | |
11858 | * aarch64_get_reg_u64 (cpu, rm, NO_SP)); | |
11859 | } | |
11860 | ||
11861 | /* Signed multiply add long -- source, source2 : 32 bit, source3 : 64 bit. */ | |
11862 | static void | |
11863 | smaddl (sim_cpu *cpu) | |
11864 | { | |
11865 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11866 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11867 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11868 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11869 | ||
11870 | /* N.B. we need to multiply the signed 32 bit values in rn, rm to | |
11871 | obtain a 64 bit product. */ | |
11872 | aarch64_set_reg_s64 | |
11873 | (cpu, rd, NO_SP, | |
11874 | aarch64_get_reg_s64 (cpu, ra, NO_SP) | |
11875 | + ((int64_t) aarch64_get_reg_s32 (cpu, rn, NO_SP)) | |
11876 | * ((int64_t) aarch64_get_reg_s32 (cpu, rm, NO_SP))); | |
11877 | } | |
11878 | ||
11879 | /* Signed multiply sub long -- source, source2 : 32 bit, source3 : 64 bit. */ | |
11880 | static void | |
11881 | smsubl (sim_cpu *cpu) | |
11882 | { | |
11883 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11884 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
11885 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11886 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11887 | ||
11888 | /* N.B. we need to multiply the signed 32 bit values in rn, rm to | |
11889 | obtain a 64 bit product. */ | |
11890 | aarch64_set_reg_s64 | |
11891 | (cpu, rd, NO_SP, | |
11892 | aarch64_get_reg_s64 (cpu, ra, NO_SP) | |
11893 | - ((int64_t) aarch64_get_reg_s32 (cpu, rn, NO_SP)) | |
11894 | * ((int64_t) aarch64_get_reg_s32 (cpu, rm, NO_SP))); | |
11895 | } | |
11896 | ||
11897 | /* Integer Multiply/Divide. */ | |
11898 | ||
11899 | /* First some macros and a helper function. */ | |
11900 | /* Macros to test or access elements of 64 bit words. */ | |
11901 | ||
11902 | /* Mask used to access lo 32 bits of 64 bit unsigned int. */ | |
11903 | #define LOW_WORD_MASK ((1ULL << 32) - 1) | |
11904 | /* Return the lo 32 bit word of a 64 bit unsigned int as a 64 bit unsigned int. */ | |
11905 | #define lowWordToU64(_value_u64) ((_value_u64) & LOW_WORD_MASK) | |
11906 | /* Return the hi 32 bit word of a 64 bit unsigned int as a 64 bit unsigned int. */ | |
11907 | #define highWordToU64(_value_u64) ((_value_u64) >> 32) | |
11908 | ||
11909 | /* Offset of sign bit in 64 bit signed integger. */ | |
11910 | #define SIGN_SHIFT_U64 63 | |
11911 | /* The sign bit itself -- also identifies the minimum negative int value. */ | |
11912 | #define SIGN_BIT_U64 (1UL << SIGN_SHIFT_U64) | |
11913 | /* Return true if a 64 bit signed int presented as an unsigned int is the | |
11914 | most negative value. */ | |
11915 | #define isMinimumU64(_value_u64) ((_value_u64) == SIGN_BIT_U64) | |
11916 | /* Return true (non-zero) if a 64 bit signed int presented as an unsigned | |
11917 | int has its sign bit set to false. */ | |
11918 | #define isSignSetU64(_value_u64) ((_value_u64) & SIGN_BIT_U64) | |
11919 | /* Return 1L or -1L according to whether a 64 bit signed int presented as | |
11920 | an unsigned int has its sign bit set or not. */ | |
11921 | #define signOfU64(_value_u64) (1L + (((value_u64) >> SIGN_SHIFT_U64) * -2L) | |
11922 | /* Clear the sign bit of a 64 bit signed int presented as an unsigned int. */ | |
11923 | #define clearSignU64(_value_u64) ((_value_u64) &= ~SIGN_BIT_U64) | |
11924 | ||
11925 | /* Multiply two 64 bit ints and return. | |
11926 | the hi 64 bits of the 128 bit product. */ | |
11927 | ||
11928 | static uint64_t | |
11929 | mul64hi (uint64_t value1, uint64_t value2) | |
11930 | { | |
11931 | uint64_t resultmid1; | |
11932 | uint64_t result; | |
11933 | uint64_t value1_lo = lowWordToU64 (value1); | |
11934 | uint64_t value1_hi = highWordToU64 (value1) ; | |
11935 | uint64_t value2_lo = lowWordToU64 (value2); | |
11936 | uint64_t value2_hi = highWordToU64 (value2); | |
11937 | ||
11938 | /* Cross-multiply and collect results. */ | |
11939 | ||
11940 | uint64_t xproductlo = value1_lo * value2_lo; | |
11941 | uint64_t xproductmid1 = value1_lo * value2_hi; | |
11942 | uint64_t xproductmid2 = value1_hi * value2_lo; | |
11943 | uint64_t xproducthi = value1_hi * value2_hi; | |
11944 | uint64_t carry = 0; | |
11945 | /* Start accumulating 64 bit results. */ | |
11946 | /* Drop bottom half of lowest cross-product. */ | |
11947 | uint64_t resultmid = xproductlo >> 32; | |
11948 | /* Add in middle products. */ | |
11949 | resultmid = resultmid + xproductmid1; | |
11950 | ||
11951 | /* Check for overflow. */ | |
11952 | if (resultmid < xproductmid1) | |
11953 | /* Carry over 1 into top cross-product. */ | |
11954 | carry++; | |
11955 | ||
11956 | resultmid1 = resultmid + xproductmid2; | |
11957 | ||
11958 | /* Check for overflow. */ | |
11959 | if (resultmid1 < xproductmid2) | |
11960 | /* Carry over 1 into top cross-product. */ | |
11961 | carry++; | |
11962 | ||
11963 | /* Drop lowest 32 bits of middle cross-product. */ | |
11964 | result = resultmid1 >> 32; | |
11965 | ||
11966 | /* Add top cross-product plus and any carry. */ | |
11967 | result += xproducthi + carry; | |
11968 | ||
11969 | return result; | |
11970 | } | |
11971 | ||
11972 | /* Signed multiply high, source, source2 : | |
11973 | 64 bit, dest <-- high 64-bit of result. */ | |
11974 | static void | |
11975 | smulh (sim_cpu *cpu) | |
11976 | { | |
11977 | uint64_t uresult; | |
11978 | int64_t result; | |
11979 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
11980 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
11981 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
11982 | GReg ra = greg (aarch64_get_instr (cpu), 10); | |
11983 | int64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP); | |
11984 | int64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP); | |
11985 | uint64_t uvalue1; | |
11986 | uint64_t uvalue2; | |
11987 | int64_t signum = 1; | |
11988 | ||
11989 | if (ra != R31) | |
11990 | HALT_UNALLOC; | |
11991 | ||
11992 | /* Convert to unsigned and use the unsigned mul64hi routine | |
11993 | the fix the sign up afterwards. */ | |
11994 | if (value1 < 0) | |
11995 | { | |
11996 | signum *= -1L; | |
11997 | uvalue1 = -value1; | |
11998 | } | |
11999 | else | |
12000 | { | |
12001 | uvalue1 = value1; | |
12002 | } | |
12003 | ||
12004 | if (value2 < 0) | |
12005 | { | |
12006 | signum *= -1L; | |
12007 | uvalue2 = -value2; | |
12008 | } | |
12009 | else | |
12010 | { | |
12011 | uvalue2 = value2; | |
12012 | } | |
12013 | ||
12014 | uresult = mul64hi (uvalue1, uvalue2); | |
12015 | result = uresult; | |
12016 | result *= signum; | |
12017 | ||
12018 | aarch64_set_reg_s64 (cpu, rd, NO_SP, result); | |
12019 | } | |
12020 | ||
12021 | /* Unsigned multiply add long -- source, source2 : | |
12022 | 32 bit, source3 : 64 bit. */ | |
12023 | static void | |
12024 | umaddl (sim_cpu *cpu) | |
12025 | { | |
12026 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
12027 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
12028 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12029 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
12030 | ||
12031 | /* N.B. we need to multiply the signed 32 bit values in rn, rm to | |
12032 | obtain a 64 bit product. */ | |
12033 | aarch64_set_reg_u64 | |
12034 | (cpu, rd, NO_SP, | |
12035 | aarch64_get_reg_u64 (cpu, ra, NO_SP) | |
12036 | + ((uint64_t) aarch64_get_reg_u32 (cpu, rn, NO_SP)) | |
12037 | * ((uint64_t) aarch64_get_reg_u32 (cpu, rm, NO_SP))); | |
12038 | } | |
12039 | ||
12040 | /* Unsigned multiply sub long -- source, source2 : 32 bit, source3 : 64 bit. */ | |
12041 | static void | |
12042 | umsubl (sim_cpu *cpu) | |
12043 | { | |
12044 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
12045 | unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10); | |
12046 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12047 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
12048 | ||
12049 | /* N.B. we need to multiply the signed 32 bit values in rn, rm to | |
12050 | obtain a 64 bit product. */ | |
12051 | aarch64_set_reg_u64 | |
12052 | (cpu, rd, NO_SP, | |
12053 | aarch64_get_reg_u64 (cpu, ra, NO_SP) | |
12054 | - ((uint64_t) aarch64_get_reg_u32 (cpu, rn, NO_SP)) | |
12055 | * ((uint64_t) aarch64_get_reg_u32 (cpu, rm, NO_SP))); | |
12056 | } | |
12057 | ||
12058 | /* Unsigned multiply high, source, source2 : | |
12059 | 64 bit, dest <-- high 64-bit of result. */ | |
12060 | static void | |
12061 | umulh (sim_cpu *cpu) | |
12062 | { | |
12063 | unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); | |
12064 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12065 | unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0); | |
12066 | GReg ra = greg (aarch64_get_instr (cpu), 10); | |
12067 | ||
12068 | if (ra != R31) | |
12069 | HALT_UNALLOC; | |
12070 | ||
12071 | aarch64_set_reg_u64 (cpu, rd, NO_SP, | |
12072 | mul64hi (aarch64_get_reg_u64 (cpu, rn, NO_SP), | |
12073 | aarch64_get_reg_u64 (cpu, rm, NO_SP))); | |
12074 | } | |
12075 | ||
12076 | static void | |
12077 | dexDataProc3Source (sim_cpu *cpu) | |
12078 | { | |
12079 | /* assert instr[28,24] == 11011. */ | |
12080 | /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit (for rd at least) | |
12081 | instr[30,29] = op54 : 00 ==> ok, ow ==> UNALLOC | |
12082 | instr[23,21] = op31 : 111 ==> UNALLOC, o2 ==> ok | |
12083 | instr[15] = o0 : 0/1 ==> ok | |
12084 | instr[23,21:15] ==> op : 0000 ==> MADD, 0001 ==> MSUB, (32/64 bit) | |
12085 | 0010 ==> SMADDL, 0011 ==> SMSUBL, (64 bit only) | |
12086 | 0100 ==> SMULH, (64 bit only) | |
12087 | 1010 ==> UMADDL, 1011 ==> UNSUBL, (64 bit only) | |
12088 | 1100 ==> UMULH (64 bit only) | |
12089 | ow ==> UNALLOC. */ | |
12090 | ||
12091 | uint32_t dispatch; | |
12092 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
12093 | uint32_t op54 = uimm (aarch64_get_instr (cpu), 30, 29); | |
12094 | uint32_t op31 = uimm (aarch64_get_instr (cpu), 23, 21); | |
12095 | uint32_t o0 = uimm (aarch64_get_instr (cpu), 15, 15); | |
12096 | ||
12097 | if (op54 != 0) | |
12098 | HALT_UNALLOC; | |
12099 | ||
12100 | if (size == 0) | |
12101 | { | |
12102 | if (op31 != 0) | |
12103 | HALT_UNALLOC; | |
12104 | ||
12105 | if (o0 == 0) | |
12106 | madd32 (cpu); | |
12107 | else | |
12108 | msub32 (cpu); | |
12109 | return; | |
12110 | } | |
12111 | ||
12112 | dispatch = (op31 << 1) | o0; | |
12113 | ||
12114 | switch (dispatch) | |
12115 | { | |
12116 | case 0: madd64 (cpu); return; | |
12117 | case 1: msub64 (cpu); return; | |
12118 | case 2: smaddl (cpu); return; | |
12119 | case 3: smsubl (cpu); return; | |
12120 | case 4: smulh (cpu); return; | |
12121 | case 10: umaddl (cpu); return; | |
12122 | case 11: umsubl (cpu); return; | |
12123 | case 12: umulh (cpu); return; | |
12124 | default: HALT_UNALLOC; | |
12125 | } | |
12126 | } | |
12127 | ||
12128 | static void | |
12129 | dexDPReg (sim_cpu *cpu) | |
12130 | { | |
12131 | /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu)); | |
12132 | assert group == GROUP_DPREG_0101 || group == GROUP_DPREG_1101 | |
12133 | bits [28:24:21] of a DPReg are the secondary dispatch vector. */ | |
12134 | uint32_t group2 = dispatchDPReg (aarch64_get_instr (cpu)); | |
12135 | ||
12136 | switch (group2) | |
12137 | { | |
12138 | case DPREG_LOG_000: | |
12139 | case DPREG_LOG_001: | |
12140 | dexLogicalShiftedRegister (cpu); return; | |
12141 | ||
12142 | case DPREG_ADDSHF_010: | |
12143 | dexAddSubtractShiftedRegister (cpu); return; | |
12144 | ||
12145 | case DPREG_ADDEXT_011: | |
12146 | dexAddSubtractExtendedRegister (cpu); return; | |
12147 | ||
12148 | case DPREG_ADDCOND_100: | |
12149 | { | |
12150 | /* This set bundles a variety of different operations. */ | |
12151 | /* Check for. */ | |
12152 | /* 1) add/sub w carry. */ | |
12153 | uint32_t mask1 = 0x1FE00000U; | |
12154 | uint32_t val1 = 0x1A000000U; | |
12155 | /* 2) cond compare register/immediate. */ | |
12156 | uint32_t mask2 = 0x1FE00000U; | |
12157 | uint32_t val2 = 0x1A400000U; | |
12158 | /* 3) cond select. */ | |
12159 | uint32_t mask3 = 0x1FE00000U; | |
12160 | uint32_t val3 = 0x1A800000U; | |
12161 | /* 4) data proc 1/2 source. */ | |
12162 | uint32_t mask4 = 0x1FE00000U; | |
12163 | uint32_t val4 = 0x1AC00000U; | |
12164 | ||
12165 | if ((aarch64_get_instr (cpu) & mask1) == val1) | |
12166 | dexAddSubtractWithCarry (cpu); | |
12167 | ||
12168 | else if ((aarch64_get_instr (cpu) & mask2) == val2) | |
12169 | CondCompare (cpu); | |
12170 | ||
12171 | else if ((aarch64_get_instr (cpu) & mask3) == val3) | |
12172 | dexCondSelect (cpu); | |
12173 | ||
12174 | else if ((aarch64_get_instr (cpu) & mask4) == val4) | |
12175 | { | |
12176 | /* Bit 30 is clear for data proc 2 source | |
12177 | and set for data proc 1 source. */ | |
12178 | if (aarch64_get_instr (cpu) & (1U << 30)) | |
12179 | dexDataProc1Source (cpu); | |
12180 | else | |
12181 | dexDataProc2Source (cpu); | |
12182 | } | |
12183 | ||
12184 | else | |
12185 | /* Should not reach here. */ | |
12186 | HALT_NYI; | |
12187 | ||
12188 | return; | |
12189 | } | |
12190 | ||
12191 | case DPREG_3SRC_110: | |
12192 | dexDataProc3Source (cpu); return; | |
12193 | ||
12194 | case DPREG_UNALLOC_101: | |
12195 | HALT_UNALLOC; | |
12196 | ||
12197 | case DPREG_3SRC_111: | |
12198 | dexDataProc3Source (cpu); return; | |
12199 | ||
12200 | default: | |
12201 | /* Should never reach here. */ | |
12202 | HALT_NYI; | |
12203 | } | |
12204 | } | |
12205 | ||
12206 | /* Unconditional Branch immediate. | |
12207 | Offset is a PC-relative byte offset in the range +/- 128MiB. | |
12208 | The offset is assumed to be raw from the decode i.e. the | |
12209 | simulator is expected to scale them from word offsets to byte. */ | |
12210 | ||
12211 | /* Unconditional branch. */ | |
12212 | static void | |
12213 | buc (sim_cpu *cpu, int32_t offset) | |
12214 | { | |
12215 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12216 | } | |
12217 | ||
12218 | static unsigned stack_depth = 0; | |
12219 | ||
12220 | /* Unconditional branch and link -- writes return PC to LR. */ | |
12221 | static void | |
12222 | bl (sim_cpu *cpu, int32_t offset) | |
12223 | { | |
12224 | aarch64_save_LR (cpu); | |
12225 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12226 | ||
12227 | if (TRACE_BRANCH_P (cpu)) | |
12228 | { | |
12229 | ++ stack_depth; | |
12230 | TRACE_BRANCH (cpu, | |
12231 | " %*scall %" PRIx64 " [%s]" | |
12232 | " [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]", | |
12233 | stack_depth, " ", aarch64_get_next_PC (cpu), | |
12234 | aarch64_get_func (aarch64_get_next_PC (cpu)), | |
12235 | aarch64_get_reg_u64 (cpu, 0, NO_SP), | |
12236 | aarch64_get_reg_u64 (cpu, 1, NO_SP), | |
12237 | aarch64_get_reg_u64 (cpu, 2, NO_SP) | |
12238 | ); | |
12239 | } | |
12240 | } | |
12241 | ||
12242 | /* Unconditional Branch register. | |
12243 | Branch/return address is in source register. */ | |
12244 | ||
12245 | /* Unconditional branch. */ | |
12246 | static void | |
12247 | br (sim_cpu *cpu) | |
12248 | { | |
12249 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12250 | aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP)); | |
12251 | } | |
12252 | ||
12253 | /* Unconditional branch and link -- writes return PC to LR. */ | |
12254 | static void | |
12255 | blr (sim_cpu *cpu) | |
12256 | { | |
12257 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12258 | ||
12259 | /* The pseudo code in the spec says we update LR before fetching. | |
12260 | the value from the rn. */ | |
12261 | aarch64_save_LR (cpu); | |
12262 | aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP)); | |
12263 | ||
12264 | if (TRACE_BRANCH_P (cpu)) | |
12265 | { | |
12266 | ++ stack_depth; | |
12267 | TRACE_BRANCH (cpu, | |
12268 | " %*scall %" PRIx64 " [%s]" | |
12269 | " [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]", | |
12270 | stack_depth, " ", aarch64_get_next_PC (cpu), | |
12271 | aarch64_get_func (aarch64_get_next_PC (cpu)), | |
12272 | aarch64_get_reg_u64 (cpu, 0, NO_SP), | |
12273 | aarch64_get_reg_u64 (cpu, 1, NO_SP), | |
12274 | aarch64_get_reg_u64 (cpu, 2, NO_SP) | |
12275 | ); | |
12276 | } | |
12277 | } | |
12278 | ||
12279 | /* Return -- assembler will default source to LR this is functionally | |
12280 | equivalent to br but, presumably, unlike br it side effects the | |
12281 | branch predictor. */ | |
12282 | static void | |
12283 | ret (sim_cpu *cpu) | |
12284 | { | |
12285 | unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); | |
12286 | aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP)); | |
12287 | ||
12288 | if (TRACE_BRANCH_P (cpu)) | |
12289 | { | |
12290 | TRACE_BRANCH (cpu, | |
12291 | " %*sreturn [result: %" PRIx64 "]", | |
12292 | stack_depth, " ", aarch64_get_reg_u64 (cpu, 0, NO_SP)); | |
12293 | -- stack_depth; | |
12294 | } | |
12295 | } | |
12296 | ||
12297 | /* NOP -- we implement this and call it from the decode in case we | |
12298 | want to intercept it later. */ | |
12299 | ||
12300 | static void | |
12301 | nop (sim_cpu *cpu) | |
12302 | { | |
12303 | } | |
12304 | ||
12305 | /* Data synchronization barrier. */ | |
12306 | ||
12307 | static void | |
12308 | dsb (sim_cpu *cpu) | |
12309 | { | |
12310 | } | |
12311 | ||
12312 | /* Data memory barrier. */ | |
12313 | ||
12314 | static void | |
12315 | dmb (sim_cpu *cpu) | |
12316 | { | |
12317 | } | |
12318 | ||
12319 | /* Instruction synchronization barrier. */ | |
12320 | ||
12321 | static void | |
12322 | isb (sim_cpu *cpu) | |
12323 | { | |
12324 | } | |
12325 | ||
12326 | static void | |
12327 | dexBranchImmediate (sim_cpu *cpu) | |
12328 | { | |
12329 | /* assert instr[30,26] == 00101 | |
12330 | instr[31] ==> 0 == B, 1 == BL | |
12331 | instr[25,0] == imm26 branch offset counted in words. */ | |
12332 | ||
12333 | uint32_t top = uimm (aarch64_get_instr (cpu), 31, 31); | |
12334 | /* We have a 26 byte signed word offset which we need to pass to the | |
12335 | execute routine as a signed byte offset. */ | |
12336 | int32_t offset = simm32 (aarch64_get_instr (cpu), 25, 0) << 2; | |
12337 | ||
12338 | if (top) | |
12339 | bl (cpu, offset); | |
12340 | else | |
12341 | buc (cpu, offset); | |
12342 | } | |
12343 | ||
12344 | /* Control Flow. */ | |
12345 | ||
12346 | /* Conditional branch | |
12347 | ||
12348 | Offset is a PC-relative byte offset in the range +/- 1MiB pos is | |
12349 | a bit position in the range 0 .. 63 | |
12350 | ||
12351 | cc is a CondCode enum value as pulled out of the decode | |
12352 | ||
12353 | N.B. any offset register (source) can only be Xn or Wn. */ | |
12354 | ||
12355 | static void | |
12356 | bcc (sim_cpu *cpu, int32_t offset, CondCode cc) | |
12357 | { | |
12358 | /* the test returns TRUE if CC is met. */ | |
12359 | if (testConditionCode (cpu, cc)) | |
12360 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12361 | } | |
12362 | ||
12363 | /* 32 bit branch on register non-zero. */ | |
12364 | static void | |
12365 | cbnz32 (sim_cpu *cpu, int32_t offset) | |
12366 | { | |
12367 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12368 | ||
12369 | if (aarch64_get_reg_u32 (cpu, rt, NO_SP) != 0) | |
12370 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12371 | } | |
12372 | ||
12373 | /* 64 bit branch on register zero. */ | |
12374 | static void | |
12375 | cbnz (sim_cpu *cpu, int32_t offset) | |
12376 | { | |
12377 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12378 | ||
12379 | if (aarch64_get_reg_u64 (cpu, rt, NO_SP) != 0) | |
12380 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12381 | } | |
12382 | ||
12383 | /* 32 bit branch on register non-zero. */ | |
12384 | static void | |
12385 | cbz32 (sim_cpu *cpu, int32_t offset) | |
12386 | { | |
12387 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12388 | ||
12389 | if (aarch64_get_reg_u32 (cpu, rt, NO_SP) == 0) | |
12390 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12391 | } | |
12392 | ||
12393 | /* 64 bit branch on register zero. */ | |
12394 | static void | |
12395 | cbz (sim_cpu *cpu, int32_t offset) | |
12396 | { | |
12397 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12398 | ||
12399 | if (aarch64_get_reg_u64 (cpu, rt, NO_SP) == 0) | |
12400 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12401 | } | |
12402 | ||
12403 | /* Branch on register bit test non-zero -- one size fits all. */ | |
12404 | static void | |
12405 | tbnz (sim_cpu *cpu, uint32_t pos, int32_t offset) | |
12406 | { | |
12407 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12408 | ||
12409 | if (aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos)) | |
12410 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12411 | } | |
12412 | ||
12413 | /* branch on register bit test zero -- one size fits all. */ | |
12414 | static void | |
12415 | tbz (sim_cpu *cpu, uint32_t pos, int32_t offset) | |
12416 | { | |
12417 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12418 | ||
12419 | if (!(aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos))) | |
12420 | aarch64_set_next_PC_by_offset (cpu, offset); | |
12421 | } | |
12422 | ||
12423 | static void | |
12424 | dexCompareBranchImmediate (sim_cpu *cpu) | |
12425 | { | |
12426 | /* instr[30,25] = 01 1010 | |
12427 | instr[31] = size : 0 ==> 32, 1 ==> 64 | |
12428 | instr[24] = op : 0 ==> CBZ, 1 ==> CBNZ | |
12429 | instr[23,5] = simm19 branch offset counted in words | |
12430 | instr[4,0] = rt */ | |
12431 | ||
12432 | uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31); | |
12433 | uint32_t op = uimm (aarch64_get_instr (cpu), 24, 24); | |
12434 | int32_t offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2; | |
12435 | ||
12436 | if (size == 0) | |
12437 | { | |
12438 | if (op == 0) | |
12439 | cbz32 (cpu, offset); | |
12440 | else | |
12441 | cbnz32 (cpu, offset); | |
12442 | } | |
12443 | else | |
12444 | { | |
12445 | if (op == 0) | |
12446 | cbz (cpu, offset); | |
12447 | else | |
12448 | cbnz (cpu, offset); | |
12449 | } | |
12450 | } | |
12451 | ||
12452 | static void | |
12453 | dexTestBranchImmediate (sim_cpu *cpu) | |
12454 | { | |
12455 | /* instr[31] = b5 : bit 5 of test bit idx | |
12456 | instr[30,25] = 01 1011 | |
12457 | instr[24] = op : 0 ==> TBZ, 1 == TBNZ | |
12458 | instr[23,19] = b40 : bits 4 to 0 of test bit idx | |
12459 | instr[18,5] = simm14 : signed offset counted in words | |
12460 | instr[4,0] = uimm5 */ | |
12461 | ||
12462 | uint32_t pos = ((uimm (aarch64_get_instr (cpu), 31, 31) << 4) | |
12463 | | uimm (aarch64_get_instr (cpu), 23,19)); | |
12464 | int32_t offset = simm32 (aarch64_get_instr (cpu), 18, 5) << 2; | |
12465 | ||
12466 | NYI_assert (30, 25, 0x1b); | |
12467 | ||
12468 | if (uimm (aarch64_get_instr (cpu), 24, 24) == 0) | |
12469 | tbz (cpu, pos, offset); | |
12470 | else | |
12471 | tbnz (cpu, pos, offset); | |
12472 | } | |
12473 | ||
12474 | static void | |
12475 | dexCondBranchImmediate (sim_cpu *cpu) | |
12476 | { | |
12477 | /* instr[31,25] = 010 1010 | |
12478 | instr[24] = op1; op => 00 ==> B.cond | |
12479 | instr[23,5] = simm19 : signed offset counted in words | |
12480 | instr[4] = op0 | |
12481 | instr[3,0] = cond */ | |
12482 | ||
12483 | int32_t offset; | |
12484 | CondCode cc; | |
12485 | uint32_t op = ((uimm (aarch64_get_instr (cpu), 24, 24) << 1) | |
12486 | | uimm (aarch64_get_instr (cpu), 4, 4)); | |
12487 | ||
12488 | NYI_assert (31, 25, 0x2a); | |
12489 | ||
12490 | if (op != 0) | |
12491 | HALT_UNALLOC; | |
12492 | ||
12493 | offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2; | |
12494 | cc = condcode (aarch64_get_instr (cpu), 0); | |
12495 | ||
12496 | bcc (cpu, offset, cc); | |
12497 | } | |
12498 | ||
12499 | static void | |
12500 | dexBranchRegister (sim_cpu *cpu) | |
12501 | { | |
12502 | /* instr[31,25] = 110 1011 | |
12503 | instr[24,21] = op : 0 ==> BR, 1 => BLR, 2 => RET, 3 => ERET, 4 => DRPS | |
12504 | instr[20,16] = op2 : must be 11111 | |
12505 | instr[15,10] = op3 : must be 000000 | |
12506 | instr[4,0] = op2 : must be 11111. */ | |
12507 | ||
12508 | uint32_t op = uimm (aarch64_get_instr (cpu), 24, 21); | |
12509 | uint32_t op2 = uimm (aarch64_get_instr (cpu), 20, 16); | |
12510 | uint32_t op3 = uimm (aarch64_get_instr (cpu), 15, 10); | |
12511 | uint32_t op4 = uimm (aarch64_get_instr (cpu), 4, 0); | |
12512 | ||
12513 | NYI_assert (31, 25, 0x6b); | |
12514 | ||
12515 | if (op2 != 0x1F || op3 != 0 || op4 != 0) | |
12516 | HALT_UNALLOC; | |
12517 | ||
12518 | if (op == 0) | |
12519 | br (cpu); | |
12520 | ||
12521 | else if (op == 1) | |
12522 | blr (cpu); | |
12523 | ||
12524 | else if (op == 2) | |
12525 | ret (cpu); | |
12526 | ||
12527 | else | |
12528 | { | |
12529 | /* ERET and DRPS accept 0b11111 for rn = aarch64_get_instr (cpu)[4,0]. */ | |
12530 | /* anything else is unallocated. */ | |
12531 | uint32_t rn = greg (aarch64_get_instr (cpu), 0); | |
12532 | ||
12533 | if (rn != 0x1f) | |
12534 | HALT_UNALLOC; | |
12535 | ||
12536 | if (op == 4 || op == 5) | |
12537 | HALT_NYI; | |
12538 | ||
12539 | HALT_UNALLOC; | |
12540 | } | |
12541 | } | |
12542 | ||
12543 | /* FIXME: We should get the Angel SWI values from ../../libgloss/aarch64/svc.h | |
12544 | but this may not be available. So instead we define the values we need | |
12545 | here. */ | |
12546 | #define AngelSVC_Reason_Open 0x01 | |
12547 | #define AngelSVC_Reason_Close 0x02 | |
12548 | #define AngelSVC_Reason_Write 0x05 | |
12549 | #define AngelSVC_Reason_Read 0x06 | |
12550 | #define AngelSVC_Reason_IsTTY 0x09 | |
12551 | #define AngelSVC_Reason_Seek 0x0A | |
12552 | #define AngelSVC_Reason_FLen 0x0C | |
12553 | #define AngelSVC_Reason_Remove 0x0E | |
12554 | #define AngelSVC_Reason_Rename 0x0F | |
12555 | #define AngelSVC_Reason_Clock 0x10 | |
12556 | #define AngelSVC_Reason_Time 0x11 | |
12557 | #define AngelSVC_Reason_System 0x12 | |
12558 | #define AngelSVC_Reason_Errno 0x13 | |
12559 | #define AngelSVC_Reason_GetCmdLine 0x15 | |
12560 | #define AngelSVC_Reason_HeapInfo 0x16 | |
12561 | #define AngelSVC_Reason_ReportException 0x18 | |
12562 | #define AngelSVC_Reason_Elapsed 0x30 | |
12563 | ||
12564 | ||
12565 | static void | |
12566 | handle_halt (sim_cpu *cpu, uint32_t val) | |
12567 | { | |
12568 | uint64_t result = 0; | |
12569 | ||
12570 | if (val != 0xf000) | |
12571 | { | |
12572 | TRACE_SYSCALL (cpu, " HLT [0x%x]", val); | |
12573 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12574 | sim_stopped, SIM_SIGTRAP); | |
12575 | } | |
12576 | ||
12577 | /* We have encountered an Angel SVC call. See if we can process it. */ | |
12578 | switch (aarch64_get_reg_u32 (cpu, 0, NO_SP)) | |
12579 | { | |
12580 | case AngelSVC_Reason_HeapInfo: | |
12581 | { | |
12582 | /* Get the values. */ | |
12583 | uint64_t stack_top = aarch64_get_stack_start (cpu); | |
12584 | uint64_t heap_base = aarch64_get_heap_start (cpu); | |
12585 | ||
12586 | /* Get the pointer */ | |
12587 | uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK); | |
12588 | ptr = aarch64_get_mem_u64 (cpu, ptr); | |
12589 | ||
12590 | /* Fill in the memory block. */ | |
12591 | /* Start addr of heap. */ | |
12592 | aarch64_set_mem_u64 (cpu, ptr + 0, heap_base); | |
12593 | /* End addr of heap. */ | |
12594 | aarch64_set_mem_u64 (cpu, ptr + 8, stack_top); | |
12595 | /* Lowest stack addr. */ | |
12596 | aarch64_set_mem_u64 (cpu, ptr + 16, heap_base); | |
12597 | /* Initial stack addr. */ | |
12598 | aarch64_set_mem_u64 (cpu, ptr + 24, stack_top); | |
12599 | ||
12600 | TRACE_SYSCALL (cpu, " AngelSVC: Get Heap Info"); | |
12601 | } | |
12602 | break; | |
12603 | ||
12604 | case AngelSVC_Reason_Open: | |
12605 | { | |
12606 | /* Get the pointer */ | |
12607 | /* uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);. */ | |
12608 | /* FIXME: For now we just assume that we will only be asked | |
12609 | to open the standard file descriptors. */ | |
12610 | static int fd = 0; | |
12611 | result = fd ++; | |
12612 | ||
12613 | TRACE_SYSCALL (cpu, " AngelSVC: Open file %d", fd - 1); | |
12614 | } | |
12615 | break; | |
12616 | ||
12617 | case AngelSVC_Reason_Close: | |
12618 | { | |
12619 | uint64_t fh = aarch64_get_reg_u64 (cpu, 1, SP_OK); | |
12620 | TRACE_SYSCALL (cpu, " AngelSVC: Close file %d", (int) fh); | |
12621 | result = 0; | |
12622 | } | |
12623 | break; | |
12624 | ||
12625 | case AngelSVC_Reason_Errno: | |
12626 | result = 0; | |
12627 | TRACE_SYSCALL (cpu, " AngelSVC: Get Errno"); | |
12628 | break; | |
12629 | ||
12630 | case AngelSVC_Reason_Clock: | |
12631 | result = | |
12632 | #ifdef CLOCKS_PER_SEC | |
12633 | (CLOCKS_PER_SEC >= 100) | |
12634 | ? (clock () / (CLOCKS_PER_SEC / 100)) | |
12635 | : ((clock () * 100) / CLOCKS_PER_SEC) | |
12636 | #else | |
12637 | /* Presume unix... clock() returns microseconds. */ | |
12638 | (clock () / 10000) | |
12639 | #endif | |
12640 | ; | |
12641 | TRACE_SYSCALL (cpu, " AngelSVC: Get Clock"); | |
12642 | break; | |
12643 | ||
12644 | case AngelSVC_Reason_GetCmdLine: | |
12645 | { | |
12646 | /* Get the pointer */ | |
12647 | uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK); | |
12648 | ptr = aarch64_get_mem_u64 (cpu, ptr); | |
12649 | ||
12650 | /* FIXME: No command line for now. */ | |
12651 | aarch64_set_mem_u64 (cpu, ptr, 0); | |
12652 | TRACE_SYSCALL (cpu, " AngelSVC: Get Command Line"); | |
12653 | } | |
12654 | break; | |
12655 | ||
12656 | case AngelSVC_Reason_IsTTY: | |
12657 | result = 1; | |
12658 | TRACE_SYSCALL (cpu, " AngelSVC: IsTTY ?"); | |
12659 | break; | |
12660 | ||
12661 | case AngelSVC_Reason_Write: | |
12662 | { | |
12663 | /* Get the pointer */ | |
12664 | uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK); | |
12665 | /* Get the write control block. */ | |
12666 | uint64_t fd = aarch64_get_mem_u64 (cpu, ptr); | |
12667 | uint64_t buf = aarch64_get_mem_u64 (cpu, ptr + 8); | |
12668 | uint64_t len = aarch64_get_mem_u64 (cpu, ptr + 16); | |
12669 | ||
12670 | TRACE_SYSCALL (cpu, "write of %" PRIx64 " bytes from %" | |
12671 | PRIx64 " on descriptor %" PRIx64, | |
12672 | len, buf, fd); | |
12673 | ||
12674 | if (len > 1280) | |
12675 | { | |
12676 | TRACE_SYSCALL (cpu, | |
12677 | " AngelSVC: Write: Suspiciously long write: %ld", | |
12678 | (long) len); | |
12679 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12680 | sim_stopped, SIM_SIGBUS); | |
12681 | } | |
12682 | else if (fd == 1) | |
12683 | { | |
12684 | printf ("%.*s", (int) len, aarch64_get_mem_ptr (cpu, buf)); | |
12685 | if (disas) | |
12686 | /* So that the output stays in sync with trace output. */ | |
12687 | fflush (stdout); | |
12688 | } | |
12689 | else if (fd == 2) | |
12690 | { | |
12691 | TRACE (cpu, 0, "\n"); | |
12692 | sim_io_eprintf (CPU_STATE (cpu), "%.*s", | |
12693 | (int) len, aarch64_get_mem_ptr (cpu, buf)); | |
12694 | TRACE (cpu, 0, "\n"); | |
12695 | } | |
12696 | else | |
12697 | { | |
12698 | TRACE_SYSCALL (cpu, | |
12699 | " AngelSVC: Write: Unexpected file handle: %d", | |
12700 | (int) fd); | |
12701 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12702 | sim_stopped, SIM_SIGABRT); | |
12703 | } | |
12704 | } | |
12705 | break; | |
12706 | ||
12707 | case AngelSVC_Reason_ReportException: | |
12708 | { | |
12709 | /* Get the pointer */ | |
12710 | uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK); | |
12711 | /*ptr = aarch64_get_mem_u64 (cpu, ptr);. */ | |
12712 | uint64_t type = aarch64_get_mem_u64 (cpu, ptr); | |
12713 | uint64_t state = aarch64_get_mem_u64 (cpu, ptr + 8); | |
12714 | ||
12715 | TRACE_SYSCALL (cpu, | |
12716 | "Angel Exception: type 0x%" PRIx64 " state %" PRIx64, | |
12717 | type, state); | |
12718 | ||
12719 | if (type == 0x20026) | |
12720 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12721 | sim_exited, state); | |
12722 | else | |
12723 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12724 | sim_stopped, SIM_SIGINT); | |
12725 | } | |
12726 | break; | |
12727 | ||
12728 | case AngelSVC_Reason_Read: | |
12729 | case AngelSVC_Reason_FLen: | |
12730 | case AngelSVC_Reason_Seek: | |
12731 | case AngelSVC_Reason_Remove: | |
12732 | case AngelSVC_Reason_Time: | |
12733 | case AngelSVC_Reason_System: | |
12734 | case AngelSVC_Reason_Rename: | |
12735 | case AngelSVC_Reason_Elapsed: | |
12736 | default: | |
12737 | TRACE_SYSCALL (cpu, " HLT [Unknown angel %x]", | |
12738 | aarch64_get_reg_u32 (cpu, 0, NO_SP)); | |
12739 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12740 | sim_stopped, SIM_SIGTRAP); | |
12741 | } | |
12742 | ||
12743 | aarch64_set_reg_u64 (cpu, 0, NO_SP, result); | |
12744 | } | |
12745 | ||
12746 | static void | |
12747 | dexExcpnGen (sim_cpu *cpu) | |
12748 | { | |
12749 | /* instr[31:24] = 11010100 | |
12750 | instr[23,21] = opc : 000 ==> GEN EXCPN, 001 ==> BRK | |
12751 | 010 ==> HLT, 101 ==> DBG GEN EXCPN | |
12752 | instr[20,5] = imm16 | |
12753 | instr[4,2] = opc2 000 ==> OK, ow ==> UNALLOC | |
12754 | instr[1,0] = LL : discriminates opc */ | |
12755 | ||
12756 | uint32_t opc = uimm (aarch64_get_instr (cpu), 23, 21); | |
12757 | uint32_t imm16 = uimm (aarch64_get_instr (cpu), 20, 5); | |
12758 | uint32_t opc2 = uimm (aarch64_get_instr (cpu), 4, 2); | |
12759 | uint32_t LL; | |
12760 | ||
12761 | NYI_assert (31, 24, 0xd4); | |
12762 | ||
12763 | if (opc2 != 0) | |
12764 | HALT_UNALLOC; | |
12765 | ||
12766 | LL = uimm (aarch64_get_instr (cpu), 1, 0); | |
12767 | ||
12768 | /* We only implement HLT and BRK for now. */ | |
12769 | if (opc == 1 && LL == 0) | |
12770 | { | |
12771 | TRACE_EVENTS (cpu, " BRK [0x%x]", imm16); | |
12772 | sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu), | |
12773 | sim_exited, aarch64_get_reg_s32 (cpu, R0, SP_OK)); | |
12774 | } | |
12775 | ||
12776 | if (opc == 2 && LL == 0) | |
12777 | handle_halt (cpu, imm16); | |
12778 | ||
12779 | else if (opc == 0 || opc == 5) | |
12780 | HALT_NYI; | |
12781 | ||
12782 | else | |
12783 | HALT_UNALLOC; | |
12784 | } | |
12785 | ||
caa8d700 NC |
12786 | /* Stub for accessing system registers. |
12787 | We implement support for the DCZID register since this is used | |
12788 | by the C library's memset function. */ | |
12789 | ||
12790 | static uint64_t | |
12791 | system_get (sim_cpu *cpu, unsigned op0, unsigned op1, unsigned crn, | |
12792 | unsigned crm, unsigned op2) | |
12793 | { | |
12794 | if (crn == 0 && op1 == 3 && crm == 0 && op2 == 7) | |
12795 | /* DCZID_EL0 - the Data Cache Zero ID register. | |
12796 | We do not support DC ZVA at the moment, so | |
12797 | we return a value with the disable bit set. */ | |
12798 | return ((uint64_t) 1) << 4; | |
12799 | ||
12800 | HALT_NYI; | |
12801 | } | |
12802 | ||
12803 | static void | |
12804 | do_mrs (sim_cpu *cpu) | |
12805 | { | |
12806 | /* instr[31:20] = 1101 01010 0011 | |
12807 | instr[19] = op0 | |
12808 | instr[18,16] = op1 | |
12809 | instr[15,12] = CRn | |
12810 | instr[11,8] = CRm | |
12811 | instr[7,5] = op2 | |
12812 | instr[4,0] = Rt */ | |
12813 | unsigned sys_op0 = uimm (aarch64_get_instr (cpu), 19, 19) + 2; | |
12814 | unsigned sys_op1 = uimm (aarch64_get_instr (cpu), 18, 16); | |
12815 | unsigned sys_crn = uimm (aarch64_get_instr (cpu), 15, 12); | |
12816 | unsigned sys_crm = uimm (aarch64_get_instr (cpu), 11, 8); | |
12817 | unsigned sys_op2 = uimm (aarch64_get_instr (cpu), 7, 5); | |
12818 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12819 | ||
12820 | aarch64_set_reg_u64 (cpu, rt, NO_SP, | |
12821 | system_get (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2)); | |
12822 | } | |
12823 | ||
2e8cf49e NC |
12824 | static void |
12825 | dexSystem (sim_cpu *cpu) | |
12826 | { | |
12827 | /* instr[31:22] = 1101 01010 0 | |
12828 | instr[21] = L | |
12829 | instr[20,19] = op0 | |
12830 | instr[18,16] = op1 | |
12831 | instr[15,12] = CRn | |
12832 | instr[11,8] = CRm | |
12833 | instr[7,5] = op2 | |
12834 | instr[4,0] = uimm5 */ | |
12835 | ||
12836 | /* We are interested in HINT, DSB, DMB and ISB | |
12837 | ||
12838 | Hint #0 encodes NOOP (this is the only hint we care about) | |
12839 | L == 0, op0 == 0, op1 = 011, CRn = 0010, Rt = 11111, | |
12840 | CRm op2 != 0000 000 OR CRm op2 == 0000 000 || CRm op > 0000 101 | |
12841 | ||
12842 | DSB, DMB, ISB are data store barrier, data memory barrier and | |
12843 | instruction store barrier, respectively, where | |
12844 | ||
12845 | L == 0, op0 == 0, op1 = 011, CRn = 0011, Rt = 11111, | |
12846 | op2 : DSB ==> 100, DMB ==> 101, ISB ==> 110 | |
12847 | CRm<3:2> ==> domain, CRm<1:0> ==> types, | |
12848 | domain : 00 ==> OuterShareable, 01 ==> Nonshareable, | |
12849 | 10 ==> InerShareable, 11 ==> FullSystem | |
12850 | types : 01 ==> Reads, 10 ==> Writes, | |
12851 | 11 ==> All, 00 ==> All (domain == FullSystem). */ | |
12852 | ||
12853 | unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); | |
12854 | uint32_t l_op0_op1_crn = uimm (aarch64_get_instr (cpu), 21, 12); | |
12855 | ||
12856 | NYI_assert (31, 22, 0x354); | |
12857 | ||
12858 | switch (l_op0_op1_crn) | |
12859 | { | |
12860 | case 0x032: | |
12861 | if (rt == 0x1F) | |
12862 | { | |
12863 | /* NOP has CRm != 0000 OR. */ | |
12864 | /* (CRm == 0000 AND (op2 == 000 OR op2 > 101)). */ | |
12865 | uint32_t crm = uimm (aarch64_get_instr (cpu), 11, 8); | |
12866 | uint32_t op2 = uimm (aarch64_get_instr (cpu), 7, 5); | |
12867 | ||
12868 | if (crm != 0 || (op2 == 0 || op2 > 5)) | |
12869 | { | |
12870 | /* Actually call nop method so we can reimplement it later. */ | |
12871 | nop (cpu); | |
12872 | return; | |
12873 | } | |
12874 | } | |
12875 | HALT_NYI; | |
12876 | ||
12877 | case 0x033: | |
12878 | { | |
12879 | uint32_t op2 = uimm (aarch64_get_instr (cpu), 7, 5); | |
12880 | ||
12881 | switch (op2) | |
12882 | { | |
caa8d700 | 12883 | case 2: HALT_NYI; |
2e8cf49e NC |
12884 | case 4: dsb (cpu); return; |
12885 | case 5: dmb (cpu); return; | |
12886 | case 6: isb (cpu); return; | |
12887 | case 7: | |
12888 | default: HALT_UNALLOC; | |
12889 | } | |
12890 | } | |
12891 | ||
12892 | case 0x3B0: | |
12893 | /* MRS Wt, sys-reg. */ | |
caa8d700 | 12894 | do_mrs (cpu); |
2e8cf49e NC |
12895 | return; |
12896 | ||
12897 | case 0x3B4: | |
12898 | case 0x3BD: | |
12899 | /* MRS Xt, sys-reg. */ | |
caa8d700 | 12900 | do_mrs (cpu); |
2e8cf49e NC |
12901 | return; |
12902 | ||
12903 | case 0x0B7: | |
12904 | /* DC <type>, x<n>. */ | |
caa8d700 | 12905 | HALT_NYI; |
2e8cf49e NC |
12906 | return; |
12907 | ||
12908 | default: | |
caa8d700 NC |
12909 | /* if (uimm (aarch64_get_instr (cpu), 21, 20) == 0x1) |
12910 | MRS Xt, sys-reg. */ | |
2e8cf49e | 12911 | HALT_NYI; |
caa8d700 | 12912 | return; |
2e8cf49e NC |
12913 | } |
12914 | } | |
12915 | ||
12916 | static void | |
12917 | dexBr (sim_cpu *cpu) | |
12918 | { | |
12919 | /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu)); | |
12920 | assert group == GROUP_BREXSYS_1010 || group == GROUP_BREXSYS_1011 | |
12921 | bits [31,29] of a BrExSys are the secondary dispatch vector. */ | |
12922 | uint32_t group2 = dispatchBrExSys (aarch64_get_instr (cpu)); | |
12923 | ||
12924 | switch (group2) | |
12925 | { | |
12926 | case BR_IMM_000: | |
12927 | return dexBranchImmediate (cpu); | |
12928 | ||
12929 | case BR_IMMCMP_001: | |
12930 | /* Compare has bit 25 clear while test has it set. */ | |
12931 | if (!uimm (aarch64_get_instr (cpu), 25, 25)) | |
12932 | dexCompareBranchImmediate (cpu); | |
12933 | else | |
12934 | dexTestBranchImmediate (cpu); | |
12935 | return; | |
12936 | ||
12937 | case BR_IMMCOND_010: | |
12938 | /* This is a conditional branch if bit 25 is clear otherwise | |
12939 | unallocated. */ | |
12940 | if (!uimm (aarch64_get_instr (cpu), 25, 25)) | |
12941 | dexCondBranchImmediate (cpu); | |
12942 | else | |
12943 | HALT_UNALLOC; | |
12944 | return; | |
12945 | ||
12946 | case BR_UNALLOC_011: | |
12947 | HALT_UNALLOC; | |
12948 | ||
12949 | case BR_IMM_100: | |
12950 | dexBranchImmediate (cpu); | |
12951 | return; | |
12952 | ||
12953 | case BR_IMMCMP_101: | |
12954 | /* Compare has bit 25 clear while test has it set. */ | |
12955 | if (!uimm (aarch64_get_instr (cpu), 25, 25)) | |
12956 | dexCompareBranchImmediate (cpu); | |
12957 | else | |
12958 | dexTestBranchImmediate (cpu); | |
12959 | return; | |
12960 | ||
12961 | case BR_REG_110: | |
12962 | /* Unconditional branch reg has bit 25 set. */ | |
12963 | if (uimm (aarch64_get_instr (cpu), 25, 25)) | |
12964 | dexBranchRegister (cpu); | |
12965 | ||
12966 | /* This includes both Excpn Gen, System and unalloc operations. | |
12967 | We need to decode the Excpn Gen operation BRK so we can plant | |
12968 | debugger entry points. | |
12969 | Excpn Gen operations have aarch64_get_instr (cpu)[24] = 0. | |
12970 | we need to decode at least one of the System operations NOP | |
12971 | which is an alias for HINT #0. | |
12972 | System operations have aarch64_get_instr (cpu)[24,22] = 100. */ | |
12973 | else if (uimm (aarch64_get_instr (cpu), 24, 24) == 0) | |
12974 | dexExcpnGen (cpu); | |
12975 | ||
12976 | else if (uimm (aarch64_get_instr (cpu), 24, 22) == 4) | |
12977 | dexSystem (cpu); | |
12978 | ||
12979 | else | |
12980 | HALT_UNALLOC; | |
12981 | ||
12982 | return; | |
12983 | ||
12984 | case BR_UNALLOC_111: | |
12985 | HALT_UNALLOC; | |
12986 | ||
12987 | default: | |
12988 | /* Should never reach here. */ | |
12989 | HALT_NYI; | |
12990 | } | |
12991 | } | |
12992 | ||
12993 | static void | |
12994 | aarch64_decode_and_execute (sim_cpu *cpu, uint64_t pc) | |
12995 | { | |
12996 | /* We need to check if gdb wants an in here. */ | |
12997 | /* checkBreak (cpu);. */ | |
12998 | ||
12999 | uint64_t group = dispatchGroup (aarch64_get_instr (cpu)); | |
13000 | ||
13001 | switch (group) | |
13002 | { | |
13003 | case GROUP_PSEUDO_0000: dexPseudo (cpu); break; | |
13004 | case GROUP_LDST_0100: dexLdSt (cpu); break; | |
13005 | case GROUP_DPREG_0101: dexDPReg (cpu); break; | |
13006 | case GROUP_LDST_0110: dexLdSt (cpu); break; | |
13007 | case GROUP_ADVSIMD_0111: dexAdvSIMD0 (cpu); break; | |
13008 | case GROUP_DPIMM_1000: dexDPImm (cpu); break; | |
13009 | case GROUP_DPIMM_1001: dexDPImm (cpu); break; | |
13010 | case GROUP_BREXSYS_1010: dexBr (cpu); break; | |
13011 | case GROUP_BREXSYS_1011: dexBr (cpu); break; | |
13012 | case GROUP_LDST_1100: dexLdSt (cpu); break; | |
13013 | case GROUP_DPREG_1101: dexDPReg (cpu); break; | |
13014 | case GROUP_LDST_1110: dexLdSt (cpu); break; | |
13015 | case GROUP_ADVSIMD_1111: dexAdvSIMD1 (cpu); break; | |
13016 | ||
13017 | case GROUP_UNALLOC_0001: | |
13018 | case GROUP_UNALLOC_0010: | |
13019 | case GROUP_UNALLOC_0011: | |
13020 | HALT_UNALLOC; | |
13021 | ||
13022 | default: | |
13023 | /* Should never reach here. */ | |
13024 | HALT_NYI; | |
13025 | } | |
13026 | } | |
13027 | ||
13028 | static bfd_boolean | |
13029 | aarch64_step (sim_cpu *cpu) | |
13030 | { | |
13031 | uint64_t pc = aarch64_get_PC (cpu); | |
13032 | ||
13033 | if (pc == TOP_LEVEL_RETURN_PC) | |
13034 | return FALSE; | |
13035 | ||
13036 | aarch64_set_next_PC (cpu, pc + 4); | |
13037 | aarch64_get_instr (cpu) = aarch64_get_mem_u32 (cpu, pc); | |
13038 | ||
13039 | if (TRACE_INSN_P (cpu)) | |
13040 | { | |
13041 | if (disas) | |
13042 | TRACE_INSN (cpu, " pc = %" PRIx64 " ", pc); | |
13043 | else | |
13044 | TRACE_INSN (cpu, " pc = %" PRIx64 " instr = %x", pc, | |
13045 | aarch64_get_instr (cpu)); | |
13046 | } | |
13047 | else if (disas) | |
13048 | sim_io_eprintf (CPU_STATE (cpu), " %" PRIx64 " ", pc); | |
13049 | ||
13050 | if (disas) | |
13051 | aarch64_print_insn (CPU_STATE (cpu), pc); | |
13052 | ||
13053 | aarch64_decode_and_execute (cpu, pc); | |
13054 | ||
13055 | return TRUE; | |
13056 | } | |
13057 | ||
13058 | void | |
13059 | aarch64_run (SIM_DESC sd) | |
13060 | { | |
13061 | sim_cpu *cpu = STATE_CPU (sd, 0); | |
13062 | ||
13063 | while (aarch64_step (cpu)) | |
13064 | aarch64_update_PC (cpu); | |
13065 | ||
13066 | sim_engine_halt (sd, NULL, NULL, aarch64_get_PC (cpu), | |
13067 | sim_exited, aarch64_get_reg_s32 (cpu, R0, SP_OK)); | |
13068 | } | |
13069 | ||
13070 | void | |
13071 | aarch64_init (sim_cpu *cpu, uint64_t pc) | |
13072 | { | |
13073 | uint64_t sp = aarch64_get_stack_start (cpu); | |
13074 | ||
13075 | /* Install SP, FP and PC and set LR to -20 | |
13076 | so we can detect a top-level return. */ | |
13077 | aarch64_set_reg_u64 (cpu, SP, SP_OK, sp); | |
13078 | aarch64_set_reg_u64 (cpu, FP, SP_OK, sp); | |
13079 | aarch64_set_reg_u64 (cpu, LR, SP_OK, TOP_LEVEL_RETURN_PC); | |
13080 | aarch64_set_next_PC (cpu, pc); | |
13081 | aarch64_update_PC (cpu); | |
13082 | aarch64_init_LIT_table (); | |
13083 | } |