i386: Also check R12-R15 registers when optimizing testq to testb
[deliverable/binutils-gdb.git] / gas / testsuite / gas / i386 / x86-64-lock-1.d
1 #objdump: -dw
2 #name: x86-64 lockable insns
3
4 .*: +file format .*
5
6
7 Disassembly of section .text:
8
9 0+ <foo>:
10 [ ]*[a-f0-9]+: f0 01 03 lock add %eax,\(%rbx\)
11 [ ]*[a-f0-9]+: f0 83 03 64 lock addl \$0x64,\(%rbx\)
12 [ ]*[a-f0-9]+: f0 11 03 lock adc %eax,\(%rbx\)
13 [ ]*[a-f0-9]+: f0 83 13 64 lock adcl \$0x64,\(%rbx\)
14 [ ]*[a-f0-9]+: f0 21 03 lock and %eax,\(%rbx\)
15 [ ]*[a-f0-9]+: f0 83 23 64 lock andl \$0x64,\(%rbx\)
16 [ ]*[a-f0-9]+: f0 0f bb 03 lock btc %eax,\(%rbx\)
17 [ ]*[a-f0-9]+: f0 0f ba 3b 64 lock btcl \$0x64,\(%rbx\)
18 [ ]*[a-f0-9]+: f0 0f b3 03 lock btr %eax,\(%rbx\)
19 [ ]*[a-f0-9]+: f0 0f ba 33 64 lock btrl \$0x64,\(%rbx\)
20 [ ]*[a-f0-9]+: f0 0f ab 03 lock bts %eax,\(%rbx\)
21 [ ]*[a-f0-9]+: f0 0f ba 2b 64 lock btsl \$0x64,\(%rbx\)
22 [ ]*[a-f0-9]+: f0 0f b1 03 lock cmpxchg %eax,\(%rbx\)
23 [ ]*[a-f0-9]+: f0 0f c7 0b lock cmpxchg8b \(%rbx\)
24 [ ]*[a-f0-9]+: f0 48 0f c7 0b lock cmpxchg16b \(%rbx\)
25 [ ]*[a-f0-9]+: f0 ff 0b lock decl \(%rbx\)
26 [ ]*[a-f0-9]+: f0 ff 03 lock incl \(%rbx\)
27 [ ]*[a-f0-9]+: f0 f7 1b lock negl \(%rbx\)
28 [ ]*[a-f0-9]+: f0 f7 13 lock notl \(%rbx\)
29 [ ]*[a-f0-9]+: f0 09 03 lock or %eax,\(%rbx\)
30 [ ]*[a-f0-9]+: f0 83 0b 64 lock orl \$0x64,\(%rbx\)
31 [ ]*[a-f0-9]+: f0 19 03 lock sbb %eax,\(%rbx\)
32 [ ]*[a-f0-9]+: f0 83 1b 64 lock sbbl \$0x64,\(%rbx\)
33 [ ]*[a-f0-9]+: f0 29 03 lock sub %eax,\(%rbx\)
34 [ ]*[a-f0-9]+: f0 83 2b 64 lock subl \$0x64,\(%rbx\)
35 [ ]*[a-f0-9]+: f0 0f c1 03 lock xadd %eax,\(%rbx\)
36 [ ]*[a-f0-9]+: f0 87 03 lock xchg %eax,\(%rbx\)
37 [ ]*[a-f0-9]+: f0 87 03 lock xchg %eax,\(%rbx\)
38 [ ]*[a-f0-9]+: f0 31 03 lock xor %eax,\(%rbx\)
39 [ ]*[a-f0-9]+: f0 83 33 64 lock xorl \$0x64,\(%rbx\)
40 [ ]*[a-f0-9]+: f0 01 03 lock add %eax,\(%rbx\)
41 [ ]*[a-f0-9]+: f0 83 03 64 lock addl \$0x64,\(%rbx\)
42 [ ]*[a-f0-9]+: f0 11 03 lock adc %eax,\(%rbx\)
43 [ ]*[a-f0-9]+: f0 83 13 64 lock adcl \$0x64,\(%rbx\)
44 [ ]*[a-f0-9]+: f0 21 03 lock and %eax,\(%rbx\)
45 [ ]*[a-f0-9]+: f0 83 23 64 lock andl \$0x64,\(%rbx\)
46 [ ]*[a-f0-9]+: f0 0f bb 03 lock btc %eax,\(%rbx\)
47 [ ]*[a-f0-9]+: f0 0f ba 3b 64 lock btcl \$0x64,\(%rbx\)
48 [ ]*[a-f0-9]+: f0 0f b3 03 lock btr %eax,\(%rbx\)
49 [ ]*[a-f0-9]+: f0 0f ba 33 64 lock btrl \$0x64,\(%rbx\)
50 [ ]*[a-f0-9]+: f0 0f ab 03 lock bts %eax,\(%rbx\)
51 [ ]*[a-f0-9]+: f0 0f ba 2b 64 lock btsl \$0x64,\(%rbx\)
52 [ ]*[a-f0-9]+: f0 0f b1 03 lock cmpxchg %eax,\(%rbx\)
53 [ ]*[a-f0-9]+: f0 0f c7 0b lock cmpxchg8b \(%rbx\)
54 [ ]*[a-f0-9]+: f0 48 0f c7 0b lock cmpxchg16b \(%rbx\)
55 [ ]*[a-f0-9]+: f0 ff 0b lock decl \(%rbx\)
56 [ ]*[a-f0-9]+: f0 ff 03 lock incl \(%rbx\)
57 [ ]*[a-f0-9]+: f0 f7 1b lock negl \(%rbx\)
58 [ ]*[a-f0-9]+: f0 f7 13 lock notl \(%rbx\)
59 [ ]*[a-f0-9]+: f0 09 03 lock or %eax,\(%rbx\)
60 [ ]*[a-f0-9]+: f0 83 0b 64 lock orl \$0x64,\(%rbx\)
61 [ ]*[a-f0-9]+: f0 19 03 lock sbb %eax,\(%rbx\)
62 [ ]*[a-f0-9]+: f0 83 1b 64 lock sbbl \$0x64,\(%rbx\)
63 [ ]*[a-f0-9]+: f0 29 03 lock sub %eax,\(%rbx\)
64 [ ]*[a-f0-9]+: f0 83 2b 64 lock subl \$0x64,\(%rbx\)
65 [ ]*[a-f0-9]+: f0 0f c1 03 lock xadd %eax,\(%rbx\)
66 [ ]*[a-f0-9]+: f0 87 03 lock xchg %eax,\(%rbx\)
67 [ ]*[a-f0-9]+: f0 87 03 lock xchg %eax,\(%rbx\)
68 [ ]*[a-f0-9]+: f0 31 03 lock xor %eax,\(%rbx\)
69 [ ]*[a-f0-9]+: f0 83 33 64 lock xorl \$0x64,\(%rbx\)
70 #pass
This page took 0.031559 seconds and 4 git commands to generate.