Automatic Copyright Year update after running gdb/copyright.py
[deliverable/binutils-gdb.git] / gdb / nat / aarch64-linux-hw-point.c
CommitLineData
88b9d363 1/* Copyright (C) 2009-2022 Free Software Foundation, Inc.
554717a3
YQ
2 Contributed by ARM Ltd.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
268a13a5
TT
19#include "gdbsupport/common-defs.h"
20#include "gdbsupport/break-common.h"
21#include "gdbsupport/common-regcache.h"
5e35436e 22#include "nat/linux-nat.h"
554717a3
YQ
23#include "aarch64-linux-hw-point.h"
24
25#include <sys/uio.h>
268c77c1
KB
26
27/* The order in which <sys/ptrace.h> and <asm/ptrace.h> are included
28 can be important. <sys/ptrace.h> often declares various PTRACE_*
29 enums. <asm/ptrace.h> often defines preprocessor constants for
30 these very same symbols. When that's the case, build errors will
31 result when <asm/ptrace.h> is included before <sys/ptrace.h>. */
554717a3 32#include <sys/ptrace.h>
665af52e 33#include <asm/ptrace.h>
268c77c1 34
554717a3
YQ
35#include <elf.h>
36
37/* Number of hardware breakpoints/watchpoints the target supports.
38 They are initialized with values obtained via the ptrace calls
39 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
40
41int aarch64_num_bp_regs;
42int aarch64_num_wp_regs;
43
a3b60e45
JK
44/* True if this kernel does not have the bug described by PR
45 external/20207 (Linux >= 4.10). A fixed kernel supports any
46 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
47 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
48 assuming the bug is fixed, and then detect the bug at
49 PTRACE_SETREGSET time. */
50static bool kernel_supports_any_contiguous_range = true;
51
52/* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
53
54unsigned int
55aarch64_watchpoint_offset (unsigned int ctrl)
56{
57 uint8_t mask = DR_CONTROL_MASK (ctrl);
58 unsigned retval;
59
60 /* Shift out bottom zeros. */
61 for (retval = 0; mask && (mask & 1) == 0; ++retval)
62 mask >>= 1;
63
64 return retval;
65}
66
554717a3
YQ
67/* Utility function that returns the length in bytes of a watchpoint
68 according to the content of a hardware debug control register CTRL.
a3b60e45
JK
69 Any contiguous range of bytes in CTRL is supported. The returned
70 value can be between 0..8 (inclusive). */
554717a3
YQ
71
72unsigned int
73aarch64_watchpoint_length (unsigned int ctrl)
74{
a3b60e45
JK
75 uint8_t mask = DR_CONTROL_MASK (ctrl);
76 unsigned retval;
77
78 /* Shift out bottom zeros. */
79 mask >>= aarch64_watchpoint_offset (ctrl);
80
81 /* Count bottom ones. */
82 for (retval = 0; (mask & 1) != 0; ++retval)
83 mask >>= 1;
84
85 if (mask != 0)
86 error (_("Unexpected hardware watchpoint length register value 0x%x"),
87 DR_CONTROL_MASK (ctrl));
88
89 return retval;
554717a3
YQ
90}
91
92/* Given the hardware breakpoint or watchpoint type TYPE and its
93 length LEN, return the expected encoding for a hardware
94 breakpoint/watchpoint control register. */
95
96static unsigned int
a3b60e45 97aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
554717a3
YQ
98{
99 unsigned int ctrl, ttype;
100
a3b60e45
JK
101 gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
102 gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
103
554717a3
YQ
104 /* type */
105 switch (type)
106 {
107 case hw_write:
108 ttype = 2;
109 break;
110 case hw_read:
111 ttype = 1;
112 break;
113 case hw_access:
114 ttype = 3;
115 break;
116 case hw_execute:
117 ttype = 0;
118 break;
119 default:
120 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
121 }
122
123 ctrl = ttype << 3;
124
a3b60e45
JK
125 /* offset and length bitmask */
126 ctrl |= ((1 << len) - 1) << (5 + offset);
554717a3
YQ
127 /* enabled at el0 */
128 ctrl |= (2 << 1) | 1;
129
130 return ctrl;
131}
132
133/* Addresses to be written to the hardware breakpoint and watchpoint
134 value registers need to be aligned; the alignment is 4-byte and
135 8-type respectively. Linux kernel rejects any non-aligned address
136 it receives from the related ptrace call. Furthermore, the kernel
137 currently only supports the following Byte Address Select (BAS)
138 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
139 watchpoint to be accepted by the kernel (via ptrace call), its
140 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
141 Despite these limitations, the unaligned watchpoint is supported in
142 this port.
143
144 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
145
146static int
147aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len)
148{
8d689ee5
YQ
149 unsigned int alignment = 0;
150
151 if (is_watchpoint)
152 alignment = AARCH64_HWP_ALIGNMENT;
153 else
154 {
155 struct regcache *regcache
156 = get_thread_regcache_for_ptid (current_lwp_ptid ());
157
158 /* Set alignment to 2 only if the current process is 32-bit,
159 since thumb instruction can be 2-byte aligned. Otherwise, set
160 alignment to AARCH64_HBP_ALIGNMENT. */
161 if (regcache_register_size (regcache, 0) == 8)
162 alignment = AARCH64_HBP_ALIGNMENT;
163 else
164 alignment = 2;
165 }
554717a3
YQ
166
167 if (addr & (alignment - 1))
168 return 0;
169
a3b60e45
JK
170 if ((!kernel_supports_any_contiguous_range
171 && len != 8 && len != 4 && len != 2 && len != 1)
172 || (kernel_supports_any_contiguous_range
173 && (len < 1 || len > 8)))
554717a3
YQ
174 return 0;
175
176 return 1;
177}
178
179/* Given the (potentially unaligned) watchpoint address in ADDR and
a3b60e45
JK
180 length in LEN, return the aligned address, offset from that base
181 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
182 and *ALIGNED_LEN_P, respectively. The returned values will be
183 valid values to write to the hardware watchpoint value and control
184 registers.
554717a3
YQ
185
186 The given watchpoint may get truncated if more than one hardware
187 register is needed to cover the watched region. *NEXT_ADDR_P
188 and *NEXT_LEN_P, if non-NULL, will return the address and length
189 of the remaining part of the watchpoint (which can be processed
a3b60e45
JK
190 by calling this routine again to generate another aligned address,
191 offset and length tuple.
554717a3
YQ
192
193 Essentially, unaligned watchpoint is achieved by minimally
194 enlarging the watched area to meet the alignment requirement, and
195 if necessary, splitting the watchpoint over several hardware
a3b60e45
JK
196 watchpoint registers.
197
198 On kernels that predate the support for Byte Address Select (BAS)
199 in the hardware watchpoint control register, the offset from the
200 base address is always zero, and so in that case the trade-off is
201 that there will be false-positive hits for the read-type or the
202 access-type hardware watchpoints; for the write type, which is more
203 commonly used, there will be no such issues, as the higher-level
204 breakpoint management in gdb always examines the exact watched
205 region for any content change, and transparently resumes a thread
206 from a watchpoint trap if there is no change to the watched region.
554717a3
YQ
207
208 Another limitation is that because the watched region is enlarged,
a3b60e45 209 the watchpoint fault address discovered by
554717a3
YQ
210 aarch64_stopped_data_address may be outside of the original watched
211 region, especially when the triggering instruction is accessing a
212 larger region. When the fault address is not within any known
213 range, watchpoints_triggered in gdb will get confused, as the
214 higher-level watchpoint management is only aware of original
215 watched regions, and will think that some unknown watchpoint has
a3b60e45
JK
216 been triggered. To prevent such a case,
217 aarch64_stopped_data_address implementations in gdb and gdbserver
218 try to match the trapped address with a watched region, and return
219 an address within the latter. */
554717a3
YQ
220
221static void
222aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
a3b60e45
JK
223 int *aligned_offset_p, int *aligned_len_p,
224 CORE_ADDR *next_addr_p, int *next_len_p,
225 CORE_ADDR *next_addr_orig_p)
554717a3
YQ
226{
227 int aligned_len;
a3b60e45 228 unsigned int offset, aligned_offset;
554717a3
YQ
229 CORE_ADDR aligned_addr;
230 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
231 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
232
233 /* As assumed by the algorithm. */
234 gdb_assert (alignment == max_wp_len);
235
236 if (len <= 0)
237 return;
238
a3b60e45
JK
239 /* The address put into the hardware watchpoint value register must
240 be aligned. */
554717a3
YQ
241 offset = addr & (alignment - 1);
242 aligned_addr = addr - offset;
a3b60e45
JK
243 aligned_offset
244 = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
554717a3
YQ
245
246 gdb_assert (offset >= 0 && offset < alignment);
247 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
248 gdb_assert (offset + len > 0);
249
250 if (offset + len >= max_wp_len)
251 {
a3b60e45 252 /* Need more than one watchpoint register; truncate at the
554717a3 253 alignment boundary. */
a3b60e45
JK
254 aligned_len
255 = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
554717a3
YQ
256 len -= (max_wp_len - offset);
257 addr += (max_wp_len - offset);
258 gdb_assert ((addr & (alignment - 1)) == 0);
259 }
260 else
261 {
262 /* Find the smallest valid length that is large enough to
263 accommodate this watchpoint. */
264 static const unsigned char
265 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
266 { 1, 2, 4, 4, 8, 8, 8, 8 };
267
a3b60e45
JK
268 aligned_len = (kernel_supports_any_contiguous_range
269 ? len : aligned_len_array[offset + len - 1]);
554717a3
YQ
270 addr += len;
271 len = 0;
272 }
273
274 if (aligned_addr_p)
275 *aligned_addr_p = aligned_addr;
a3b60e45
JK
276 if (aligned_offset_p)
277 *aligned_offset_p = aligned_offset;
554717a3
YQ
278 if (aligned_len_p)
279 *aligned_len_p = aligned_len;
280 if (next_addr_p)
281 *next_addr_p = addr;
282 if (next_len_p)
283 *next_len_p = len;
a3b60e45
JK
284 if (next_addr_orig_p)
285 *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
554717a3
YQ
286}
287
d3a70e03 288/* Helper for aarch64_notify_debug_reg_change. Records the
5e35436e
YQ
289 information about the change of one hardware breakpoint/watchpoint
290 setting for the thread LWP.
5e35436e
YQ
291 N.B. The actual updating of hardware debug registers is not
292 carried out until the moment the thread is resumed. */
293
294static int
d3a70e03
TT
295debug_reg_change_callback (struct lwp_info *lwp, int is_watchpoint,
296 unsigned int idx)
5e35436e 297{
e38504b3 298 int tid = ptid_of_lwp (lwp).lwp ();
5e35436e
YQ
299 struct arch_lwp_info *info = lwp_arch_private_info (lwp);
300 dr_changed_t *dr_changed_ptr;
301 dr_changed_t dr_changed;
302
303 if (info == NULL)
304 {
305 info = XCNEW (struct arch_lwp_info);
306 lwp_set_arch_private_info (lwp, info);
307 }
308
309 if (show_debug_regs)
310 {
311 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
dfe7f77c
YQ
312 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
313 "dr_changed_wp=0x%s\n", tid,
5e35436e
YQ
314 phex (info->dr_changed_bp, 8),
315 phex (info->dr_changed_wp, 8));
316 }
317
318 dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp
319 : &info->dr_changed_bp;
320 dr_changed = *dr_changed_ptr;
321
322 gdb_assert (idx >= 0
323 && (idx <= (is_watchpoint ? aarch64_num_wp_regs
324 : aarch64_num_bp_regs)));
325
326 /* The actual update is done later just before resuming the lwp,
327 we just mark that one register pair needs updating. */
328 DR_MARK_N_CHANGED (dr_changed, idx);
329 *dr_changed_ptr = dr_changed;
330
331 /* If the lwp isn't stopped, force it to momentarily pause, so
332 we can update its debug registers. */
333 if (!lwp_is_stopped (lwp))
334 linux_stop_lwp (lwp);
335
336 if (show_debug_regs)
337 {
dfe7f77c
YQ
338 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
339 "dr_changed_wp=0x%s\n", tid,
5e35436e
YQ
340 phex (info->dr_changed_bp, 8),
341 phex (info->dr_changed_wp, 8));
342 }
343
344 return 0;
345}
346
347/* Notify each thread that their IDXth breakpoint/watchpoint register
348 pair needs to be updated. The message will be recorded in each
349 thread's arch-specific data area, the actual updating will be done
350 when the thread is resumed. */
351
352static void
353aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state,
354 int is_watchpoint, unsigned int idx)
355{
e99b03dc 356 ptid_t pid_ptid = ptid_t (current_lwp_ptid ().pid ());
5e35436e 357
d3a70e03
TT
358 iterate_over_lwps (pid_ptid, [=] (struct lwp_info *info)
359 {
360 return debug_reg_change_callback (info,
361 is_watchpoint,
362 idx);
363 });
5e35436e
YQ
364}
365
a3b60e45
JK
366/* Reconfigure STATE to be compatible with Linux kernels with the PR
367 external/20207 bug. This is called when
368 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
369 don't try to support combining watchpoints with matching (and thus
370 shared) masks, as it's too late when we get here. On buggy
371 kernels, GDB will try to first setup the perfect matching ranges,
372 which will run out of registers before this function can merge
373 them. It doesn't look like worth the effort to improve that, given
374 eventually buggy kernels will be phased out. */
375
376static void
377aarch64_downgrade_regs (struct aarch64_debug_reg_state *state)
378{
379 for (int i = 0; i < aarch64_num_wp_regs; ++i)
380 if ((state->dr_ctrl_wp[i] & 1) != 0)
381 {
382 gdb_assert (state->dr_ref_count_wp[i] != 0);
383 uint8_t mask_orig = (state->dr_ctrl_wp[i] >> 5) & 0xff;
384 gdb_assert (mask_orig != 0);
385 static const uint8_t old_valid[] = { 0x01, 0x03, 0x0f, 0xff };
386 uint8_t mask = 0;
387 for (const uint8_t old_mask : old_valid)
388 if (mask_orig <= old_mask)
389 {
390 mask = old_mask;
391 break;
392 }
393 gdb_assert (mask != 0);
394
395 /* No update needed for this watchpoint? */
396 if (mask == mask_orig)
397 continue;
398 state->dr_ctrl_wp[i] |= mask << 5;
399 state->dr_addr_wp[i]
400 = align_down (state->dr_addr_wp[i], AARCH64_HWP_ALIGNMENT);
401
402 /* Try to match duplicate entries. */
403 for (int j = 0; j < i; ++j)
404 if ((state->dr_ctrl_wp[j] & 1) != 0
405 && state->dr_addr_wp[j] == state->dr_addr_wp[i]
406 && state->dr_addr_orig_wp[j] == state->dr_addr_orig_wp[i]
407 && state->dr_ctrl_wp[j] == state->dr_ctrl_wp[i])
408 {
409 state->dr_ref_count_wp[j] += state->dr_ref_count_wp[i];
410 state->dr_ref_count_wp[i] = 0;
411 state->dr_addr_wp[i] = 0;
412 state->dr_addr_orig_wp[i] = 0;
413 state->dr_ctrl_wp[i] &= ~1;
414 break;
415 }
416
417 aarch64_notify_debug_reg_change (state, 1 /* is_watchpoint */, i);
418 }
419}
420
554717a3
YQ
421/* Record the insertion of one breakpoint/watchpoint, as represented
422 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
423
424static int
425aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state,
426 enum target_hw_bp_type type,
a3b60e45
JK
427 CORE_ADDR addr, int offset, int len,
428 CORE_ADDR addr_orig)
554717a3
YQ
429{
430 int i, idx, num_regs, is_watchpoint;
431 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
a3b60e45 432 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
554717a3
YQ
433
434 /* Set up state pointers. */
435 is_watchpoint = (type != hw_execute);
436 gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
437 if (is_watchpoint)
438 {
439 num_regs = aarch64_num_wp_regs;
440 dr_addr_p = state->dr_addr_wp;
a3b60e45 441 dr_addr_orig_p = state->dr_addr_orig_wp;
554717a3
YQ
442 dr_ctrl_p = state->dr_ctrl_wp;
443 dr_ref_count = state->dr_ref_count_wp;
444 }
445 else
446 {
447 num_regs = aarch64_num_bp_regs;
448 dr_addr_p = state->dr_addr_bp;
a3b60e45 449 dr_addr_orig_p = nullptr;
554717a3
YQ
450 dr_ctrl_p = state->dr_ctrl_bp;
451 dr_ref_count = state->dr_ref_count_bp;
452 }
453
a3b60e45 454 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
554717a3
YQ
455
456 /* Find an existing or free register in our cache. */
457 idx = -1;
458 for (i = 0; i < num_regs; ++i)
459 {
460 if ((dr_ctrl_p[i] & 1) == 0)
461 {
462 gdb_assert (dr_ref_count[i] == 0);
463 idx = i;
464 /* no break; continue hunting for an exising one. */
465 }
a3b60e45
JK
466 else if (dr_addr_p[i] == addr
467 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
468 && dr_ctrl_p[i] == ctrl)
554717a3
YQ
469 {
470 gdb_assert (dr_ref_count[i] != 0);
471 idx = i;
472 break;
473 }
474 }
475
476 /* No space. */
477 if (idx == -1)
478 return -1;
479
480 /* Update our cache. */
481 if ((dr_ctrl_p[idx] & 1) == 0)
482 {
483 /* new entry */
484 dr_addr_p[idx] = addr;
a3b60e45
JK
485 if (dr_addr_orig_p != nullptr)
486 dr_addr_orig_p[idx] = addr_orig;
554717a3
YQ
487 dr_ctrl_p[idx] = ctrl;
488 dr_ref_count[idx] = 1;
489 /* Notify the change. */
490 aarch64_notify_debug_reg_change (state, is_watchpoint, idx);
491 }
492 else
493 {
494 /* existing entry */
495 dr_ref_count[idx]++;
496 }
497
498 return 0;
499}
500
501/* Record the removal of one breakpoint/watchpoint, as represented by
502 ADDR and CTRL, in the process' arch-specific data area *STATE. */
503
504static int
505aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state,
506 enum target_hw_bp_type type,
a3b60e45
JK
507 CORE_ADDR addr, int offset, int len,
508 CORE_ADDR addr_orig)
554717a3
YQ
509{
510 int i, num_regs, is_watchpoint;
511 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
a3b60e45 512 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
554717a3
YQ
513
514 /* Set up state pointers. */
515 is_watchpoint = (type != hw_execute);
554717a3
YQ
516 if (is_watchpoint)
517 {
518 num_regs = aarch64_num_wp_regs;
519 dr_addr_p = state->dr_addr_wp;
a3b60e45 520 dr_addr_orig_p = state->dr_addr_orig_wp;
554717a3
YQ
521 dr_ctrl_p = state->dr_ctrl_wp;
522 dr_ref_count = state->dr_ref_count_wp;
523 }
524 else
525 {
526 num_regs = aarch64_num_bp_regs;
527 dr_addr_p = state->dr_addr_bp;
a3b60e45 528 dr_addr_orig_p = nullptr;
554717a3
YQ
529 dr_ctrl_p = state->dr_ctrl_bp;
530 dr_ref_count = state->dr_ref_count_bp;
531 }
532
a3b60e45 533 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
554717a3
YQ
534
535 /* Find the entry that matches the ADDR and CTRL. */
536 for (i = 0; i < num_regs; ++i)
a3b60e45
JK
537 if (dr_addr_p[i] == addr
538 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
539 && dr_ctrl_p[i] == ctrl)
554717a3
YQ
540 {
541 gdb_assert (dr_ref_count[i] != 0);
542 break;
543 }
544
545 /* Not found. */
546 if (i == num_regs)
547 return -1;
548
549 /* Clear our cache. */
550 if (--dr_ref_count[i] == 0)
551 {
552 /* Clear the enable bit. */
553 ctrl &= ~1;
554 dr_addr_p[i] = 0;
a3b60e45
JK
555 if (dr_addr_orig_p != nullptr)
556 dr_addr_orig_p[i] = 0;
554717a3
YQ
557 dr_ctrl_p[i] = ctrl;
558 /* Notify the change. */
559 aarch64_notify_debug_reg_change (state, is_watchpoint, i);
560 }
561
562 return 0;
563}
564
565int
566aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
567 int len, int is_insert,
568 struct aarch64_debug_reg_state *state)
569{
554717a3 570 if (is_insert)
805035d7
YQ
571 {
572 /* The hardware breakpoint on AArch64 should always be 4-byte
573 aligned, but on AArch32, it can be 2-byte aligned. Note that
574 we only check the alignment on inserting breakpoint because
575 aarch64_point_is_aligned needs the inferior_ptid inferior's
576 regcache to decide whether the inferior is 32-bit or 64-bit.
577 However when GDB follows the parent process and detach breakpoints
578 from child process, inferior_ptid is the child ptid, but the
579 child inferior doesn't exist in GDB's view yet. */
580 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len))
581 return -1;
582
a3b60e45 583 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, -1);
805035d7 584 }
554717a3 585 else
a3b60e45 586 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, -1);
554717a3
YQ
587}
588
589/* This is essentially the same as aarch64_handle_breakpoint, apart
590 from that it is an aligned watchpoint to be handled. */
591
592static int
593aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
594 CORE_ADDR addr, int len, int is_insert,
595 struct aarch64_debug_reg_state *state)
596{
597 if (is_insert)
a3b60e45 598 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, addr);
554717a3 599 else
a3b60e45 600 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, addr);
554717a3
YQ
601}
602
603/* Insert/remove unaligned watchpoint by calling
604 aarch64_align_watchpoint repeatedly until the whole watched region,
605 as represented by ADDR and LEN, has been properly aligned and ready
606 to be written to one or more hardware watchpoint registers.
607 IS_INSERT indicates whether this is an insertion or a deletion.
608 Return 0 if succeed. */
609
610static int
611aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
612 CORE_ADDR addr, int len, int is_insert,
613 struct aarch64_debug_reg_state *state)
614{
a3b60e45
JK
615 CORE_ADDR addr_orig = addr;
616
554717a3
YQ
617 while (len > 0)
618 {
619 CORE_ADDR aligned_addr;
a3b60e45
JK
620 int aligned_offset, aligned_len, ret;
621 CORE_ADDR addr_orig_next = addr_orig;
554717a3 622
a3b60e45
JK
623 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
624 &aligned_len, &addr, &len, &addr_orig_next);
554717a3
YQ
625
626 if (is_insert)
627 ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr,
a3b60e45
JK
628 aligned_offset,
629 aligned_len, addr_orig);
554717a3
YQ
630 else
631 ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr,
a3b60e45
JK
632 aligned_offset,
633 aligned_len, addr_orig);
554717a3
YQ
634
635 if (show_debug_regs)
3675a06a
YQ
636 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
637 " "
638 "aligned_addr: %s, aligned_len: %d\n"
639 " "
a3b60e45
JK
640 "addr_orig: %s\n"
641 " "
642 "next_addr: %s, next_len: %d\n"
643 " "
644 "addr_orig_next: %s\n",
3675a06a 645 is_insert, core_addr_to_string_nz (aligned_addr),
a3b60e45
JK
646 aligned_len, core_addr_to_string_nz (addr_orig),
647 core_addr_to_string_nz (addr), len,
648 core_addr_to_string_nz (addr_orig_next));
649
650 addr_orig = addr_orig_next;
554717a3
YQ
651
652 if (ret != 0)
653 return ret;
654 }
655
656 return 0;
657}
658
659int
660aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
661 int len, int is_insert,
662 struct aarch64_debug_reg_state *state)
663{
664 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len))
665 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert,
666 state);
667 else
668 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
669 state);
670}
671
672/* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
673 registers with data from *STATE. */
674
675void
a3b60e45 676aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state *state,
554717a3
YQ
677 int tid, int watchpoint)
678{
679 int i, count;
680 struct iovec iov;
681 struct user_hwdebug_state regs;
682 const CORE_ADDR *addr;
683 const unsigned int *ctrl;
684
685 memset (&regs, 0, sizeof (regs));
686 iov.iov_base = &regs;
687 count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
688 addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
689 ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
690 if (count == 0)
691 return;
bb82e934
SM
692 iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs)
693 + count * sizeof (regs.dbg_regs[0]));
554717a3
YQ
694
695 for (i = 0; i < count; i++)
696 {
697 regs.dbg_regs[i].addr = addr[i];
698 regs.dbg_regs[i].ctrl = ctrl[i];
699 }
700
701 if (ptrace (PTRACE_SETREGSET, tid,
702 watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK,
703 (void *) &iov))
a3b60e45
JK
704 {
705 /* Handle Linux kernels with the PR external/20207 bug. */
706 if (watchpoint && errno == EINVAL
707 && kernel_supports_any_contiguous_range)
708 {
709 kernel_supports_any_contiguous_range = false;
710 aarch64_downgrade_regs (state);
711 aarch64_linux_set_debug_regs (state, tid, watchpoint);
712 return;
713 }
714 error (_("Unexpected error setting hardware debug registers"));
715 }
554717a3
YQ
716}
717
754e3168
AH
718/* See nat/aarch64-linux-hw-point.h. */
719
720bool
721aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state *state,
722 bool watchpoint)
723{
724 int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
725 if (count == 0)
726 return false;
727
728 const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
729 const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
730
731 for (int i = 0; i < count; i++)
732 if (addr[i] != 0 || ctrl[i] != 0)
733 return true;
734
735 return false;
736}
737
554717a3
YQ
738/* Print the values of the cached breakpoint/watchpoint registers. */
739
740void
741aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
742 const char *func, CORE_ADDR addr,
743 int len, enum target_hw_bp_type type)
744{
745 int i;
746
747 debug_printf ("%s", func);
748 if (addr || len)
749 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
750 (unsigned long) addr, len,
751 type == hw_write ? "hw-write-watchpoint"
752 : (type == hw_read ? "hw-read-watchpoint"
753 : (type == hw_access ? "hw-access-watchpoint"
754 : (type == hw_execute ? "hw-breakpoint"
755 : "??unknown??"))));
756 debug_printf (":\n");
757
758 debug_printf ("\tBREAKPOINTs:\n");
759 for (i = 0; i < aarch64_num_bp_regs; i++)
760 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
761 i, core_addr_to_string_nz (state->dr_addr_bp[i]),
762 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
763
764 debug_printf ("\tWATCHPOINTs:\n");
765 for (i = 0; i < aarch64_num_wp_regs; i++)
a3b60e45 766 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
554717a3 767 i, core_addr_to_string_nz (state->dr_addr_wp[i]),
a3b60e45 768 core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
554717a3
YQ
769 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
770}
af1b22f3
YQ
771
772/* Get the hardware debug register capacity information from the
773 process represented by TID. */
774
775void
776aarch64_linux_get_debug_reg_capacity (int tid)
777{
778 struct iovec iov;
779 struct user_hwdebug_state dreg_state;
780
781 iov.iov_base = &dreg_state;
782 iov.iov_len = sizeof (dreg_state);
783
784 /* Get hardware watchpoint register info. */
785 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0
49ecef2a
AP
786 && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
787 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
788 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
af1b22f3
YQ
789 {
790 aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
791 if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM)
792 {
793 warning (_("Unexpected number of hardware watchpoint registers"
794 " reported by ptrace, got %d, expected %d."),
795 aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM);
796 aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM;
797 }
798 }
799 else
800 {
801 warning (_("Unable to determine the number of hardware watchpoints"
802 " available."));
803 aarch64_num_wp_regs = 0;
804 }
805
806 /* Get hardware breakpoint register info. */
807 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0
49ecef2a
AP
808 && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
809 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
810 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
af1b22f3
YQ
811 {
812 aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
813 if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM)
814 {
815 warning (_("Unexpected number of hardware breakpoint registers"
816 " reported by ptrace, got %d, expected %d."),
817 aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM);
818 aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM;
819 }
820 }
821 else
822 {
823 warning (_("Unable to determine the number of hardware breakpoints"
824 " available."));
825 aarch64_num_bp_regs = 0;
826 }
827}
39edd165
YQ
828
829/* Return true if we can watch a memory region that starts address
830 ADDR and whose length is LEN in bytes. */
831
832int
833aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr, int len)
834{
835 CORE_ADDR aligned_addr;
836
837 /* Can not set watchpoints for zero or negative lengths. */
838 if (len <= 0)
839 return 0;
840
841 /* Must have hardware watchpoint debug register(s). */
842 if (aarch64_num_wp_regs == 0)
843 return 0;
844
845 /* We support unaligned watchpoint address and arbitrary length,
846 as long as the size of the whole watched area after alignment
847 doesn't exceed size of the total area that all watchpoint debug
848 registers can watch cooperatively.
849
850 This is a very relaxed rule, but unfortunately there are
851 limitations, e.g. false-positive hits, due to limited support of
852 hardware debug registers in the kernel. See comment above
853 aarch64_align_watchpoint for more information. */
854
855 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
856 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
857 < addr + len)
858 return 0;
859
860 /* All tests passed so we are likely to be able to set the watchpoint.
861 The reason that it is 'likely' rather than 'must' is because
862 we don't check the current usage of the watchpoint registers, and
863 there may not be enough registers available for this watchpoint.
864 Ideally we should check the cached debug register state, however
865 the checking is costly. */
866 return 1;
867}
This page took 0.706707 seconds and 4 git commands to generate.