Simplify c_val_print_array
[deliverable/binutils-gdb.git] / gdbserver / linux-amd64-ipa.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the in-process
2 agent library for GDB.
3
4 Copyright (C) 2010-2020 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "server.h"
22 #include <sys/mman.h>
23 #include "tracepoint.h"
24 #include "linux-x86-tdesc.h"
25 #include "gdbsupport/x86-xstate.h"
26
27 /* Defined in auto-generated file amd64-linux.c. */
28 void init_registers_amd64_linux (void);
29 extern const struct target_desc *tdesc_amd64_linux;
30
31 /* fast tracepoints collect registers. */
32
33 #define FT_CR_RIP 0
34 #define FT_CR_EFLAGS 1
35 #define FT_CR_R8 2
36 #define FT_CR_R9 3
37 #define FT_CR_R10 4
38 #define FT_CR_R11 5
39 #define FT_CR_R12 6
40 #define FT_CR_R13 7
41 #define FT_CR_R14 8
42 #define FT_CR_R15 9
43 #define FT_CR_RAX 10
44 #define FT_CR_RBX 11
45 #define FT_CR_RCX 12
46 #define FT_CR_RDX 13
47 #define FT_CR_RSI 14
48 #define FT_CR_RDI 15
49 #define FT_CR_RBP 16
50 #define FT_CR_RSP 17
51
52 static const int x86_64_ft_collect_regmap[] = {
53 FT_CR_RAX * 8, FT_CR_RBX * 8, FT_CR_RCX * 8, FT_CR_RDX * 8,
54 FT_CR_RSI * 8, FT_CR_RDI * 8, FT_CR_RBP * 8, FT_CR_RSP * 8,
55 FT_CR_R8 * 8, FT_CR_R9 * 8, FT_CR_R10 * 8, FT_CR_R11 * 8,
56 FT_CR_R12 * 8, FT_CR_R13 * 8, FT_CR_R14 * 8, FT_CR_R15 * 8,
57 FT_CR_RIP * 8, FT_CR_EFLAGS * 8
58 };
59
60 #define X86_64_NUM_FT_COLLECT_GREGS \
61 (sizeof (x86_64_ft_collect_regmap) / sizeof(x86_64_ft_collect_regmap[0]))
62
63 void
64 supply_fast_tracepoint_registers (struct regcache *regcache,
65 const unsigned char *buf)
66 {
67 int i;
68
69 for (i = 0; i < X86_64_NUM_FT_COLLECT_GREGS; i++)
70 supply_register (regcache, i,
71 ((char *) buf) + x86_64_ft_collect_regmap[i]);
72 }
73
74 ULONGEST
75 get_raw_reg (const unsigned char *raw_regs, int regnum)
76 {
77 if (regnum >= X86_64_NUM_FT_COLLECT_GREGS)
78 return 0;
79
80 return *(ULONGEST *) (raw_regs + x86_64_ft_collect_regmap[regnum]);
81 }
82
83 #ifdef HAVE_UST
84
85 #include <ust/processor.h>
86
87 /* "struct registers" is the UST object type holding the registers at
88 the time of the static tracepoint marker call. This doesn't
89 contain RIP, but we know what it must have been (the marker
90 address). */
91
92 #define ST_REGENTRY(REG) \
93 { \
94 offsetof (struct registers, REG), \
95 sizeof (((struct registers *) NULL)->REG) \
96 }
97
98 static struct
99 {
100 int offset;
101 int size;
102 } x86_64_st_collect_regmap[] =
103 {
104 ST_REGENTRY(rax),
105 ST_REGENTRY(rbx),
106 ST_REGENTRY(rcx),
107 ST_REGENTRY(rdx),
108 ST_REGENTRY(rsi),
109 ST_REGENTRY(rdi),
110 ST_REGENTRY(rbp),
111 ST_REGENTRY(rsp),
112 ST_REGENTRY(r8),
113 ST_REGENTRY(r9),
114 ST_REGENTRY(r10),
115 ST_REGENTRY(r11),
116 ST_REGENTRY(r12),
117 ST_REGENTRY(r13),
118 ST_REGENTRY(r14),
119 ST_REGENTRY(r15),
120 { -1, 0 },
121 ST_REGENTRY(rflags),
122 ST_REGENTRY(cs),
123 ST_REGENTRY(ss),
124 };
125
126 #define X86_64_NUM_ST_COLLECT_GREGS \
127 (sizeof (x86_64_st_collect_regmap) / sizeof (x86_64_st_collect_regmap[0]))
128
129 /* GDB's RIP register number. */
130 #define AMD64_RIP_REGNUM 16
131
132 void
133 supply_static_tracepoint_registers (struct regcache *regcache,
134 const unsigned char *buf,
135 CORE_ADDR pc)
136 {
137 int i;
138 unsigned long newpc = pc;
139
140 supply_register (regcache, AMD64_RIP_REGNUM, &newpc);
141
142 for (i = 0; i < X86_64_NUM_ST_COLLECT_GREGS; i++)
143 if (x86_64_st_collect_regmap[i].offset != -1)
144 {
145 switch (x86_64_st_collect_regmap[i].size)
146 {
147 case 8:
148 supply_register (regcache, i,
149 ((char *) buf)
150 + x86_64_st_collect_regmap[i].offset);
151 break;
152 case 2:
153 {
154 unsigned long reg
155 = * (short *) (((char *) buf)
156 + x86_64_st_collect_regmap[i].offset);
157 reg &= 0xffff;
158 supply_register (regcache, i, &reg);
159 }
160 break;
161 default:
162 internal_error (__FILE__, __LINE__,
163 "unhandled register size: %d",
164 x86_64_st_collect_regmap[i].size);
165 break;
166 }
167 }
168 }
169
170 #endif /* HAVE_UST */
171
172 #if !defined __ILP32__
173 /* Map the tdesc index to xcr0 mask. */
174 static uint64_t idx2mask[X86_TDESC_LAST] = {
175 X86_XSTATE_X87_MASK,
176 X86_XSTATE_SSE_MASK,
177 X86_XSTATE_AVX_MASK,
178 X86_XSTATE_MPX_MASK,
179 X86_XSTATE_AVX_MPX_MASK,
180 X86_XSTATE_AVX_AVX512_MASK,
181 X86_XSTATE_AVX_MPX_AVX512_PKU_MASK,
182 };
183 #endif
184
185 /* Return target_desc to use for IPA, given the tdesc index passed by
186 gdbserver. */
187
188 const struct target_desc *
189 get_ipa_tdesc (int idx)
190 {
191 if (idx >= X86_TDESC_LAST)
192 {
193 internal_error (__FILE__, __LINE__,
194 "unknown ipa tdesc index: %d", idx);
195 }
196
197 #if defined __ILP32__
198 switch (idx)
199 {
200 case X86_TDESC_SSE:
201 return amd64_linux_read_description (X86_XSTATE_SSE_MASK, true);
202 case X86_TDESC_AVX:
203 return amd64_linux_read_description (X86_XSTATE_AVX_MASK, true);
204 case X86_TDESC_AVX_AVX512:
205 return amd64_linux_read_description (X86_XSTATE_AVX_AVX512_MASK, true);
206 default:
207 break;
208 }
209 #else
210 return amd64_linux_read_description (idx2mask[idx], false);
211 #endif
212
213 internal_error (__FILE__, __LINE__,
214 "unknown ipa tdesc index: %d", idx);
215 }
216
217 /* Allocate buffer for the jump pads. The branch instruction has a
218 reach of +/- 31-bit, and the executable is loaded at low addresses.
219
220 64-bit: Use MAP_32BIT to allocate in the first 2GB. Shared
221 libraries, being allocated at the top, are unfortunately out of
222 luck.
223
224 x32: Since MAP_32BIT is 64-bit only, do the placement manually.
225 Try allocating at '0x80000000 - SIZE' initially, decreasing until
226 we hit a free area. This ensures the executable is fully covered,
227 and is as close as possible to the shared libraries, which are
228 usually mapped at the top of the first 4GB of the address space.
229 */
230
231 void *
232 alloc_jump_pad_buffer (size_t size)
233 {
234 #if __ILP32__
235 uintptr_t addr;
236 int pagesize;
237
238 pagesize = sysconf (_SC_PAGE_SIZE);
239 if (pagesize == -1)
240 perror_with_name ("sysconf");
241
242 addr = 0x80000000 - size;
243
244 /* size should already be page-aligned, but this can't hurt. */
245 addr &= ~(pagesize - 1);
246
247 /* Search for a free area. If we hit 0, we're out of luck. */
248 for (; addr; addr -= pagesize)
249 {
250 void *res;
251
252 /* No MAP_FIXED - we don't want to zap someone's mapping. */
253 res = mmap ((void *) addr, size,
254 PROT_READ | PROT_WRITE | PROT_EXEC,
255 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
256
257 /* If we got what we wanted, return. */
258 if ((uintptr_t) res == addr)
259 return res;
260
261 /* If we got a mapping, but at a wrong address, undo it. */
262 if (res != MAP_FAILED)
263 munmap (res, size);
264 }
265
266 return NULL;
267 #else
268 void *res = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
269 MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
270
271 if (res == MAP_FAILED)
272 return NULL;
273
274 return res;
275 #endif
276 }
277
278 void
279 initialize_low_tracepoint (void)
280 {
281 #if defined __ILP32__
282 amd64_linux_read_description (X86_XSTATE_SSE_MASK, true);
283 amd64_linux_read_description (X86_XSTATE_AVX_MASK, true);
284 amd64_linux_read_description (X86_XSTATE_AVX_AVX512_MASK, true);
285 #else
286 for (auto i = 0; i < X86_TDESC_LAST; i++)
287 amd64_linux_read_description (idx2mask[i], false);
288 #endif
289 }
This page took 0.035244 seconds and 4 git commands to generate.