Commit | Line | Data |
---|---|---|
abeeff98 LM |
1 | /* Target-dependent code for ROCm. |
2 | ||
ca9af5a1 LM |
3 | Copyright (C) 2019-2020 Free Software Foundation, Inc. |
4 | Copyright (C) 2019-2020 Advanced Micro Devices, Inc. All rights reserved. | |
abeeff98 LM |
5 | |
6 | This file is part of GDB. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 3 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | #include "defs.h" | |
22 | ||
23 | #include "amdgcn-rocm-tdep.h" | |
24 | #include "arch-utils.h" | |
25 | #include "cli/cli-style.h" | |
26 | #include "environ.h" | |
27 | #include "event-loop.h" | |
28 | #include "filenames.h" | |
29 | #include "gdbcmd.h" | |
30 | #include "gdbcore.h" | |
31 | #include "gdbsupport/filestuff.h" | |
32 | #include "gdbsupport/scoped_fd.h" | |
33 | #include "gdbthread.h" | |
34 | #include "hashtab.h" | |
35 | #include "inf-loop.h" | |
36 | #include "inferior.h" | |
37 | #include "location.h" | |
38 | #include "objfiles.h" | |
39 | #include "observable.h" | |
40 | #include "progspace-and-thread.h" | |
41 | #include "regcache.h" | |
42 | #include "rocm-tdep.h" | |
43 | #include "solib.h" | |
44 | #include "solist.h" | |
45 | #include "symfile.h" | |
46 | ||
47 | #include <dlfcn.h> | |
48 | #include <list> | |
49 | #include <set> | |
50 | #include <signal.h> | |
51 | #include <stdarg.h> | |
52 | #include <unordered_map> | |
53 | ||
54 | #include <amd-dbgapi.h> | |
55 | ||
56 | /* Big enough to hold the size of the largest register in bytes. */ | |
57 | #define AMDGCN_MAX_REGISTER_SIZE 256 | |
58 | ||
59 | #define DEFINE_OBSERVABLE(name) decltype (name) name (#name) | |
60 | ||
61 | DEFINE_OBSERVABLE (amd_dbgapi_activated); | |
62 | DEFINE_OBSERVABLE (amd_dbgapi_deactivated); | |
63 | DEFINE_OBSERVABLE (amd_dbgapi_code_object_list_updated); | |
64 | ||
65 | #undef DEFINE_OBSERVABLE | |
66 | ||
67 | struct rocm_notify_shared_library_info | |
68 | { | |
69 | std::string compare; /* Compare loaded library names with this string. */ | |
70 | struct so_list *solib; | |
71 | bool is_loaded; | |
72 | }; | |
73 | ||
74 | /* ROCm-specific inferior data. */ | |
75 | ||
76 | struct rocm_inferior_info | |
77 | { | |
78 | /* The amd_dbgapi_process_id for this inferior. */ | |
b8ff49b7 | 79 | amd_dbgapi_process_id_t process_id{ AMD_DBGAPI_PROCESS_NONE }; |
abeeff98 LM |
80 | |
81 | /* The amd_dbgapi_notifier_t for this inferior. */ | |
b8ff49b7 | 82 | amd_dbgapi_notifier_t notifier{ -1 }; |
abeeff98 LM |
83 | |
84 | /* True if commit_resume should all-start the GPU queues. */ | |
85 | bool commit_resume_all_start; | |
86 | ||
243f18ba LM |
87 | /* True is the inferior has exited. */ |
88 | bool has_exited{ false }; | |
89 | ||
abeeff98 LM |
90 | std::unordered_map<decltype (amd_dbgapi_breakpoint_id_t::handle), |
91 | struct breakpoint *> | |
92 | breakpoint_map; | |
93 | ||
94 | /* List of pending events the rocm target retrieved from the dbgapi. */ | |
95 | std::list<std::pair<amd_dbgapi_wave_id_t, amd_dbgapi_wave_stop_reason_t>> | |
96 | wave_stop_events; | |
97 | ||
98 | /* Map of rocm_notify_shared_library_info's for libraries that have been | |
99 | registered to receive notifications when loading/unloading. */ | |
100 | std::unordered_map<decltype (amd_dbgapi_shared_library_id_t::handle), | |
101 | struct rocm_notify_shared_library_info> | |
102 | notify_solib_map; | |
103 | }; | |
104 | ||
105 | static amd_dbgapi_event_id_t | |
106 | rocm_process_event_queue (amd_dbgapi_event_kind_t until_event_kind | |
107 | = AMD_DBGAPI_EVENT_KIND_NONE); | |
108 | ||
109 | /* Return the inferior's rocm_inferior_info struct. */ | |
110 | static struct rocm_inferior_info * | |
111 | get_rocm_inferior_info (struct inferior *inferior = nullptr); | |
112 | ||
113 | static const target_info rocm_ops_info | |
114 | = { "rocm", N_ ("ROCm GPU debugging support"), | |
115 | N_ ("ROCm GPU debugging support") }; | |
116 | ||
117 | static amd_dbgapi_log_level_t get_debug_amd_dbgapi_log_level (); | |
118 | ||
119 | struct rocm_target_ops final : public target_ops | |
120 | { | |
121 | const target_info & | |
122 | info () const override | |
123 | { | |
124 | return rocm_ops_info; | |
125 | } | |
126 | strata | |
127 | stratum () const override | |
128 | { | |
129 | return arch_stratum; | |
130 | } | |
131 | ||
132 | void mourn_inferior () override; | |
133 | ||
134 | void async (int enable) override; | |
135 | ||
136 | ptid_t wait (ptid_t, struct target_waitstatus *, int) override; | |
137 | void resume (ptid_t, int, enum gdb_signal) override; | |
138 | void commit_resume () override; | |
139 | void stop (ptid_t ptid) override; | |
140 | ||
141 | void fetch_registers (struct regcache *, int) override; | |
142 | void store_registers (struct regcache *, int) override; | |
143 | ||
144 | void update_thread_list () override; | |
145 | ||
146 | struct gdbarch *thread_architecture (ptid_t) override; | |
147 | ||
148 | std::string pid_to_str (ptid_t ptid) override; | |
149 | ||
150 | const char *thread_name (thread_info *tp) override; | |
151 | ||
152 | const char *extra_thread_info (thread_info *tp) override; | |
153 | ||
154 | bool thread_alive (ptid_t ptid) override; | |
155 | ||
156 | enum target_xfer_status xfer_partial (enum target_object object, | |
157 | const char *annex, gdb_byte *readbuf, | |
158 | const gdb_byte *writebuf, | |
159 | ULONGEST offset, ULONGEST len, | |
160 | ULONGEST *xfered_len) override; | |
161 | ||
162 | bool | |
163 | stopped_by_watchpoint () override | |
164 | { | |
165 | return !ptid_is_gpu (inferior_ptid) | |
166 | && beneath ()->stopped_by_watchpoint (); | |
167 | } | |
168 | ||
169 | bool | |
170 | stopped_data_address (CORE_ADDR *addr_p) override | |
171 | { | |
172 | return !ptid_is_gpu (inferior_ptid) | |
173 | && beneath ()->stopped_data_address (addr_p); | |
174 | } | |
175 | ||
176 | bool | |
177 | supports_stopped_by_sw_breakpoint () override | |
178 | { | |
179 | return true; | |
180 | } | |
181 | ||
182 | bool stopped_by_sw_breakpoint () override; | |
183 | ||
184 | bool | |
185 | stopped_by_hw_breakpoint () override | |
186 | { | |
187 | return !ptid_is_gpu (inferior_ptid) | |
188 | && beneath ()->stopped_by_hw_breakpoint (); | |
189 | } | |
190 | }; | |
191 | ||
192 | /* ROCm's target vector. */ | |
193 | static struct rocm_target_ops rocm_ops; | |
194 | ||
195 | /* ROCm breakpoint ops. */ | |
196 | static struct breakpoint_ops rocm_breakpoint_ops; | |
197 | ||
198 | /* Per-inferior data key. */ | |
199 | static const struct inferior_key<rocm_inferior_info> rocm_inferior_data; | |
200 | ||
201 | /* The read/write ends of the pipe registered as waitable file in the | |
202 | event loop. */ | |
203 | static int rocm_event_pipe[2] = { -1, -1 }; | |
204 | ||
205 | /* Flush the event pipe. */ | |
206 | ||
5cf3a303 | 207 | static void |
abeeff98 LM |
208 | async_file_flush (void) |
209 | { | |
210 | int ret; | |
211 | char buf; | |
212 | ||
213 | do | |
214 | { | |
215 | ret = read (rocm_event_pipe[0], &buf, 1); | |
216 | } | |
217 | while (ret >= 0 || (ret == -1 && errno == EINTR)); | |
218 | } | |
219 | ||
220 | /* Put something (anything, doesn't matter what, or how much) in event | |
221 | pipe, so that the select/poll in the event-loop realizes we have | |
222 | something to process. */ | |
223 | ||
224 | static void | |
225 | async_file_mark (void) | |
226 | { | |
227 | int ret; | |
228 | ||
229 | /* It doesn't really matter what the pipe contains, as long we end | |
230 | up with something in it. Might as well flush the previous | |
231 | left-overs. */ | |
232 | async_file_flush (); | |
233 | ||
234 | do | |
235 | { | |
236 | ret = write (rocm_event_pipe[1], "+", 1); | |
237 | } | |
238 | while (ret == -1 && errno == EINTR); | |
239 | ||
240 | /* Ignore EAGAIN. If the pipe is full, the event loop will already | |
241 | be awakened anyway. */ | |
242 | } | |
243 | ||
244 | /* Fetch the rocm_inferior_info data for the given inferior. */ | |
245 | ||
246 | static struct rocm_inferior_info * | |
247 | get_rocm_inferior_info (struct inferior *inferior) | |
248 | { | |
249 | if (!inferior) | |
250 | inferior = current_inferior (); | |
251 | ||
252 | struct rocm_inferior_info *info = rocm_inferior_data.get (inferior); | |
253 | ||
254 | if (!info) | |
255 | info = rocm_inferior_data.emplace (inferior); | |
256 | ||
257 | return info; | |
258 | } | |
259 | ||
260 | /* Fetch the amd_dbgapi_process_id for the given inferior. */ | |
261 | ||
262 | amd_dbgapi_process_id_t | |
263 | get_amd_dbgapi_process_id (struct inferior *inferior) | |
264 | { | |
265 | return get_rocm_inferior_info (inferior)->process_id; | |
266 | } | |
267 | ||
9e275233 LM |
268 | static void |
269 | rocm_breakpoint_re_set (struct breakpoint *b) | |
270 | { | |
271 | } | |
272 | ||
abeeff98 LM |
273 | static void |
274 | rocm_breakpoint_check_status (struct bpstats *bs) | |
275 | { | |
276 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
277 | amd_dbgapi_process_id_t process_id = info->process_id; | |
278 | amd_dbgapi_status_t status; | |
279 | ||
280 | bs->stop = 0; | |
281 | bs->print_it = print_it_noop; | |
282 | ||
283 | /* Find the address the breakpoint is set at. */ | |
284 | auto it = std::find_if ( | |
285 | info->breakpoint_map.begin (), info->breakpoint_map.end (), | |
286 | [=] (const decltype (info->breakpoint_map)::value_type &value) { | |
287 | return value.second == bs->breakpoint_at; | |
288 | }); | |
289 | ||
290 | if (it == info->breakpoint_map.end ()) | |
291 | error (_ ("Could not find breakpoint_id for breakpoint at %#lx"), | |
292 | bs->bp_location_at->address); | |
293 | ||
294 | amd_dbgapi_breakpoint_id_t breakpoint_id{ it->first }; | |
295 | amd_dbgapi_breakpoint_action_t action; | |
296 | ||
297 | status = amd_dbgapi_report_breakpoint_hit (process_id, breakpoint_id, | |
298 | inferior_thread (), &action); | |
299 | ||
300 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
301 | error (_ ("amd_dbgapi_report_breakpoint_hit failed: breakpoint_%ld " | |
302 | "at %#lx (rc=%d)"), | |
303 | breakpoint_id.handle, bs->bp_location_at->address, status); | |
304 | ||
305 | if (action == AMD_DBGAPI_BREAKPOINT_ACTION_RESUME) | |
306 | return; | |
307 | ||
308 | /* If the action is AMD_DBGAPI_BREAKPOINT_ACTION_HALT, we need to wait until | |
309 | a breakpoint resume event for this breakpoint_id is seen. */ | |
310 | ||
311 | amd_dbgapi_event_id_t resume_event_id | |
312 | = rocm_process_event_queue (AMD_DBGAPI_EVENT_KIND_BREAKPOINT_RESUME); | |
313 | ||
314 | /* We should always get a breakpoint_resume event after processing all | |
315 | events generated by reporting the breakpoint was hit. */ | |
316 | gdb_assert (resume_event_id.handle != AMD_DBGAPI_EVENT_NONE.handle); | |
317 | ||
318 | amd_dbgapi_breakpoint_id_t resume_breakpoint_id; | |
319 | status = amd_dbgapi_event_get_info ( | |
320 | process_id, resume_event_id, AMD_DBGAPI_EVENT_INFO_BREAKPOINT, | |
321 | sizeof (resume_breakpoint_id), &resume_breakpoint_id); | |
322 | ||
323 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
324 | error (_ ("amd_dbgapi_event_get_info failed (rc=%d)"), status); | |
325 | ||
326 | /* The debugger API guarantees that [breakpoint_hit...resume_breakpoint] | |
327 | sequences cannot interleave, so this breakpoint resume event must be | |
328 | for our breakpoint_id. */ | |
329 | if (resume_breakpoint_id.handle != breakpoint_id.handle) | |
330 | error (_ ("breakpoint resume event is not for this breakpoint. " | |
331 | "Expected breakpoint_%ld, got breakpoint_%ld"), | |
332 | breakpoint_id.handle, resume_breakpoint_id.handle); | |
333 | ||
334 | amd_dbgapi_event_processed (process_id, resume_event_id); | |
335 | } | |
336 | ||
337 | static void | |
338 | rocm_target_dbgapi_activated () | |
339 | { | |
340 | /* FIXME: only push on the first activation. */ | |
341 | /* Engage the ROCm target_ops and so_ops. */ | |
342 | push_target (&rocm_ops); | |
343 | } | |
344 | ||
345 | static void | |
346 | rocm_target_dbgapi_deactivated () | |
347 | { | |
348 | /* FIXME: only unpush on the last activation. */ | |
349 | /* Disengage the ROCm target_ops. */ | |
350 | unpush_target (&rocm_ops); | |
351 | } | |
352 | ||
353 | bool | |
354 | rocm_target_ops::thread_alive (ptid_t ptid) | |
355 | { | |
356 | if (!ptid_is_gpu (ptid)) | |
357 | return beneath ()->thread_alive (ptid); | |
358 | ||
359 | /* Check that the wave_id is valid. */ | |
360 | ||
361 | inferior *inf = find_inferior_ptid (ptid); | |
362 | if (!inf) | |
363 | return false; | |
364 | ||
365 | amd_dbgapi_wave_state_t state; | |
366 | return amd_dbgapi_wave_get_info ( | |
367 | get_amd_dbgapi_process_id (inf), get_amd_dbgapi_wave_id (ptid), | |
368 | AMD_DBGAPI_WAVE_INFO_STATE, sizeof (state), &state) | |
369 | == AMD_DBGAPI_STATUS_SUCCESS; | |
370 | } | |
371 | ||
372 | const char * | |
373 | rocm_target_ops::thread_name (thread_info *tp) | |
374 | { | |
375 | if (!ptid_is_gpu (tp->ptid)) | |
376 | return beneath ()->thread_name (tp); | |
377 | ||
378 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (tp->inf); | |
379 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (tp->ptid); | |
380 | amd_dbgapi_dispatch_id_t dispatch_id; | |
381 | amd_dbgapi_global_address_t kernel_addr; | |
382 | ||
383 | if (amd_dbgapi_wave_get_info (process_id, wave_id, | |
384 | AMD_DBGAPI_WAVE_INFO_DISPATCH, | |
385 | sizeof (dispatch_id), &dispatch_id) | |
386 | != AMD_DBGAPI_STATUS_SUCCESS | |
387 | || amd_dbgapi_dispatch_get_info ( | |
388 | process_id, dispatch_id, | |
389 | AMD_DBGAPI_DISPATCH_INFO_KERNEL_ENTRY_ADDRESS, | |
390 | sizeof (kernel_addr), &kernel_addr) | |
391 | != AMD_DBGAPI_STATUS_SUCCESS) | |
392 | return NULL; | |
393 | ||
394 | struct bound_minimal_symbol msymbol | |
395 | = lookup_minimal_symbol_by_pc_section (kernel_addr, nullptr); | |
396 | ||
397 | if (msymbol.minsym != NULL) | |
398 | { | |
399 | static char buf[256]; | |
400 | char *endp; | |
401 | ||
402 | xsnprintf (buf, sizeof (buf), "%s", msymbol.minsym->print_name ()); | |
403 | ||
404 | /* Strip the arguments from the demangled function name. */ | |
405 | if ((endp = strchr (buf, '('))) | |
406 | *endp = '\0'; | |
407 | ||
408 | return buf; | |
409 | } | |
410 | ||
411 | return NULL; | |
412 | } | |
413 | ||
414 | std::string | |
415 | rocm_target_ops::pid_to_str (ptid_t ptid) | |
416 | { | |
417 | if (!ptid_is_gpu (ptid)) | |
418 | { | |
419 | return beneath ()->pid_to_str (ptid); | |
420 | } | |
421 | ||
422 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (); | |
423 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (ptid); | |
424 | amd_dbgapi_dispatch_id_t dispatch_id; | |
425 | uint32_t group_ids[3], wave_in_group; | |
426 | ||
427 | if (amd_dbgapi_wave_get_info (process_id, wave_id, | |
428 | AMD_DBGAPI_WAVE_INFO_DISPATCH, | |
429 | sizeof (dispatch_id), &dispatch_id) | |
430 | != AMD_DBGAPI_STATUS_SUCCESS | |
431 | || amd_dbgapi_wave_get_info (process_id, wave_id, | |
432 | AMD_DBGAPI_WAVE_INFO_WORK_GROUP_COORD, | |
433 | sizeof (group_ids), &group_ids) | |
434 | != AMD_DBGAPI_STATUS_SUCCESS | |
435 | || amd_dbgapi_wave_get_info ( | |
436 | process_id, wave_id, | |
437 | AMD_DBGAPI_WAVE_INFO_WAVE_NUMBER_IN_WORK_GROUP, | |
438 | sizeof (wave_in_group), &wave_in_group) | |
439 | != AMD_DBGAPI_STATUS_SUCCESS) | |
8f156997 | 440 | return std::string ("AMDGPU Thread"); |
abeeff98 | 441 | else |
8f156997 | 442 | return string_printf ("AMDGPU Thread %ld.%ld (%d,%d,%d)/%d", |
abeeff98 LM |
443 | dispatch_id.handle, wave_id.handle, group_ids[2], |
444 | group_ids[1], group_ids[0], wave_in_group); | |
445 | } | |
446 | ||
447 | const char * | |
448 | rocm_target_ops::extra_thread_info (thread_info *tp) | |
449 | { | |
450 | if (!ptid_is_gpu (tp->ptid)) | |
451 | beneath ()->extra_thread_info (tp); | |
452 | ||
453 | return NULL; | |
454 | } | |
455 | ||
456 | enum target_xfer_status | |
457 | rocm_target_ops::xfer_partial (enum target_object object, const char *annex, | |
458 | gdb_byte *readbuf, const gdb_byte *writebuf, | |
459 | ULONGEST offset, ULONGEST requested_len, | |
460 | ULONGEST *xfered_len) | |
461 | { | |
462 | gdb::optional<scoped_restore_current_thread> maybe_restore_thread; | |
463 | ||
464 | if (ptid_is_gpu (inferior_ptid)) | |
465 | { | |
466 | gdb_assert (requested_len && xfered_len && "checking invariants"); | |
467 | ||
468 | if (object != TARGET_OBJECT_MEMORY) | |
469 | return TARGET_XFER_E_IO; | |
470 | ||
471 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (); | |
472 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (inferior_ptid); | |
473 | ||
474 | amd_dbgapi_architecture_id_t architecture_id; | |
475 | amd_dbgapi_address_space_id_t address_space_id; | |
476 | ||
477 | if (amd_dbgapi_wave_get_info (process_id, wave_id, | |
478 | AMD_DBGAPI_WAVE_INFO_ARCHITECTURE, | |
479 | sizeof (architecture_id), &architecture_id) | |
480 | != AMD_DBGAPI_STATUS_SUCCESS | |
481 | || amd_dbgapi_architecture_get_info ( | |
482 | architecture_id, | |
483 | AMD_DBGAPI_ARCHITECTURE_INFO_DEFAULT_GLOBAL_ADDRESS_SPACE, | |
484 | sizeof (address_space_id), &address_space_id) | |
485 | != AMD_DBGAPI_STATUS_SUCCESS) | |
edc83450 | 486 | return TARGET_XFER_EOF; |
abeeff98 LM |
487 | |
488 | size_t len = requested_len; | |
489 | amd_dbgapi_status_t status; | |
490 | ||
491 | if (readbuf) | |
492 | status = amd_dbgapi_read_memory ( | |
493 | process_id, wave_id, 0, address_space_id, offset, &len, readbuf); | |
494 | else | |
495 | status = amd_dbgapi_write_memory ( | |
496 | process_id, wave_id, 0, address_space_id, offset, &len, writebuf); | |
497 | ||
498 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
edc83450 | 499 | return TARGET_XFER_EOF; |
abeeff98 LM |
500 | |
501 | *xfered_len = len; | |
502 | return TARGET_XFER_OK; | |
503 | } | |
504 | else | |
505 | return beneath ()->xfer_partial (object, annex, readbuf, writebuf, offset, | |
506 | requested_len, xfered_len); | |
507 | } | |
508 | ||
509 | void | |
510 | rocm_target_ops::resume (ptid_t ptid, int step, enum gdb_signal signo) | |
511 | { | |
512 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
513 | ||
514 | if (debug_infrun) | |
515 | fprintf_unfiltered ( | |
516 | gdb_stdlog, | |
517 | "\e[1;34minfrun: rocm_target_ops::resume ([%d,%ld,%ld])\e[0m\n", | |
518 | ptid.pid (), ptid.lwp (), ptid.tid ()); | |
519 | ||
520 | /* Check if the thread focus is on the GPU device. */ | |
521 | if (ptid == minus_one_ptid || !ptid_is_gpu (ptid)) | |
522 | { | |
523 | beneath ()->resume (ptid, step, signo); | |
524 | if (ptid != minus_one_ptid) | |
525 | return; | |
526 | } | |
527 | ||
528 | /* A specific PTID means `step only this process id'. */ | |
529 | bool resume_one = ptid != minus_one_ptid && !ptid.is_pid (); | |
530 | gdb_assert (resume_one || !step); | |
531 | ||
532 | if (!resume_one) | |
533 | error (_ ("internal error - unimplemented ")); | |
534 | ||
535 | amd_dbgapi_process_set_progress (info->process_id, | |
536 | AMD_DBGAPI_PROGRESS_NO_FORWARD); | |
537 | ||
538 | amd_dbgapi_status_t status = amd_dbgapi_wave_resume ( | |
539 | info->process_id, get_amd_dbgapi_wave_id (ptid), | |
540 | step ? AMD_DBGAPI_RESUME_MODE_SINGLE_STEP | |
541 | : AMD_DBGAPI_RESUME_MODE_NORMAL); | |
542 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
543 | warning (_ ("Could not resume %s (rc=%d)"), | |
544 | target_pid_to_str (ptid).c_str (), status); | |
545 | ||
546 | info->commit_resume_all_start = true; | |
547 | } | |
548 | ||
549 | void | |
550 | rocm_target_ops::commit_resume () | |
551 | { | |
552 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
553 | ||
554 | if (debug_infrun) | |
555 | fprintf_unfiltered ( | |
556 | gdb_stdlog, | |
557 | "\e[1;34minfrun: rocm_target_ops::commit_resume ()\e[0m\n"); | |
558 | ||
559 | beneath ()->commit_resume (); | |
560 | ||
561 | if (info->commit_resume_all_start) | |
562 | { | |
563 | amd_dbgapi_process_set_progress (info->process_id, | |
564 | AMD_DBGAPI_PROGRESS_NORMAL); | |
565 | info->commit_resume_all_start = false; | |
566 | } | |
567 | ||
568 | if (target_can_async_p ()) | |
569 | target_async (1); | |
570 | } | |
571 | ||
572 | static void | |
573 | rocm_target_stop_one_wave (ptid_t ptid) | |
574 | { | |
575 | amd_dbgapi_status_t status; | |
576 | ||
577 | status = amd_dbgapi_wave_stop (get_amd_dbgapi_process_id (), | |
578 | get_amd_dbgapi_wave_id (ptid)); | |
579 | ||
580 | if (status == AMD_DBGAPI_STATUS_ERROR_INVALID_WAVE_ID) | |
581 | { | |
582 | /* the wave must have exited, set the thread status to reflect that. */ | |
583 | auto *tp = find_thread_ptid (ptid); | |
584 | gdb_assert (tp); | |
585 | ||
586 | tp->state = THREAD_EXITED; | |
587 | } | |
588 | else if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
589 | warning (_ ("Could not stop %s (rc=%d)"), | |
590 | target_pid_to_str (ptid).c_str (), status); | |
591 | } | |
592 | ||
593 | void | |
594 | rocm_target_ops::stop (ptid_t ptid) | |
595 | { | |
596 | if (debug_infrun) | |
597 | fprintf_unfiltered ( | |
598 | gdb_stdlog, | |
599 | "\e[1;34minfrun: rocm_target_ops::stop ([%d,%ld,%ld])\e[0m\n", | |
600 | ptid.pid (), ptid.lwp (), ptid.tid ()); | |
601 | ||
602 | if (ptid == minus_one_ptid || !ptid_is_gpu (ptid)) | |
603 | { | |
604 | beneath ()->stop (ptid); | |
605 | if (ptid != minus_one_ptid) | |
606 | return; | |
607 | } | |
608 | ||
609 | if (ptid == minus_one_ptid) | |
610 | error (_ ("internal error - unimplemented ")); | |
611 | ||
612 | rocm_target_stop_one_wave (ptid); | |
613 | } | |
614 | ||
615 | static void | |
616 | handle_target_event (int error, gdb_client_data client_data) | |
617 | { | |
618 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
619 | amd_dbgapi_process_id_t process_id = info->process_id; | |
620 | ||
621 | amd_dbgapi_process_set_progress (process_id, AMD_DBGAPI_PROGRESS_NO_FORWARD); | |
622 | ||
623 | /* Flush the async file first. */ | |
624 | if (target_is_async_p ()) | |
625 | async_file_flush (); | |
626 | ||
627 | rocm_process_event_queue (); | |
628 | ||
629 | /* In all-stop mode, unless the event queue is empty (spurious wake-up), | |
630 | we can keep the process in progress_no_forward mode. The infrun loop | |
631 | will enable forward progress when a thread is resumed. */ | |
632 | if (non_stop || info->wave_stop_events.empty ()) | |
633 | amd_dbgapi_process_set_progress (process_id, AMD_DBGAPI_PROGRESS_NORMAL); | |
634 | ||
635 | if (!info->wave_stop_events.empty ()) | |
636 | inferior_event_handler (INF_REG_EVENT, nullptr); | |
637 | } | |
638 | ||
639 | void | |
640 | rocm_target_ops::async (int enable) | |
641 | { | |
642 | beneath ()->async (enable); | |
643 | ||
644 | if (enable) | |
645 | { | |
646 | if (rocm_event_pipe[0] != -1) | |
647 | return; | |
648 | ||
649 | if (gdb_pipe_cloexec (rocm_event_pipe) == -1) | |
650 | internal_error (__FILE__, __LINE__, "creating event pipe failed."); | |
651 | ||
652 | ::fcntl (rocm_event_pipe[0], F_SETFL, O_NONBLOCK); | |
653 | ::fcntl (rocm_event_pipe[1], F_SETFL, O_NONBLOCK); | |
654 | ||
655 | add_file_handler (rocm_event_pipe[0], handle_target_event, nullptr); | |
656 | ||
657 | /* There may be pending events to handle. Tell the event loop | |
658 | to poll them. */ | |
659 | async_file_mark (); | |
660 | } | |
661 | else | |
662 | { | |
663 | delete_file_handler (rocm_event_pipe[0]); | |
664 | ||
665 | if (rocm_event_pipe[0] == -1) | |
666 | return; | |
667 | ||
668 | ::close (rocm_event_pipe[0]); | |
669 | ::close (rocm_event_pipe[1]); | |
670 | rocm_event_pipe[0] = -1; | |
671 | rocm_event_pipe[1] = -1; | |
672 | } | |
673 | } | |
674 | ||
675 | static void | |
676 | rocm_process_one_event (amd_dbgapi_event_id_t event_id, | |
677 | amd_dbgapi_event_kind_t event_kind) | |
678 | { | |
679 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
680 | amd_dbgapi_process_id_t process_id = info->process_id; | |
681 | amd_dbgapi_status_t status; | |
682 | ||
683 | switch (event_kind) | |
684 | { | |
685 | case AMD_DBGAPI_EVENT_KIND_WAVE_STOP: | |
686 | { | |
687 | amd_dbgapi_wave_id_t wave_id; | |
688 | if ((status = amd_dbgapi_event_get_info (process_id, event_id, | |
689 | AMD_DBGAPI_EVENT_INFO_WAVE, | |
690 | sizeof (wave_id), &wave_id)) | |
691 | != AMD_DBGAPI_STATUS_SUCCESS) | |
692 | error (_ ("event_get_info for event_%ld failed (rc=%d)"), | |
693 | event_id.handle, status); | |
694 | ||
695 | amd_dbgapi_wave_stop_reason_t stop_reason; | |
696 | status = amd_dbgapi_wave_get_info (process_id, wave_id, | |
697 | AMD_DBGAPI_WAVE_INFO_STOP_REASON, | |
698 | sizeof (stop_reason), &stop_reason); | |
699 | ||
700 | if (status != AMD_DBGAPI_STATUS_SUCCESS | |
701 | && status != AMD_DBGAPI_STATUS_ERROR_INVALID_WAVE_ID) | |
702 | error (_ ("wave_get_info for wave_%ld failed (rc=%d)"), | |
703 | wave_id.handle, status); | |
704 | ||
705 | /* The wave may have exited, or the queue went into an error | |
706 | state. In such cases, we will see another wave command | |
707 | terminated event, and handle the wave termination then. */ | |
708 | ||
709 | if (status == AMD_DBGAPI_STATUS_SUCCESS) | |
710 | info->wave_stop_events.emplace_back ( | |
711 | std::make_pair (wave_id, stop_reason)); | |
712 | } | |
713 | break; | |
714 | ||
715 | case AMD_DBGAPI_EVENT_KIND_CODE_OBJECT_LIST_UPDATED: | |
716 | amd_dbgapi_code_object_list_updated.notify (); | |
717 | break; | |
718 | ||
719 | case AMD_DBGAPI_EVENT_KIND_BREAKPOINT_RESUME: | |
720 | /* Breakpoint resume events should be handled by the breakpoint | |
721 | action, and this code should not reach this. */ | |
722 | gdb_assert_not_reached (_ ("unhandled event kind")); | |
723 | break; | |
724 | ||
725 | case AMD_DBGAPI_EVENT_KIND_RUNTIME: | |
726 | { | |
727 | amd_dbgapi_runtime_state_t runtime_state; | |
728 | ||
729 | if ((status = amd_dbgapi_event_get_info ( | |
730 | process_id, event_id, AMD_DBGAPI_EVENT_INFO_RUNTIME_STATE, | |
731 | sizeof (runtime_state), &runtime_state)) | |
732 | != AMD_DBGAPI_STATUS_SUCCESS) | |
733 | error (_ ("event_get_info for event_%ld failed (rc=%d)"), | |
734 | event_id.handle, status); | |
735 | ||
736 | switch (runtime_state) | |
737 | { | |
738 | case AMD_DBGAPI_RUNTIME_STATE_LOADED_SUPPORTED: | |
739 | amd_dbgapi_activated.notify (); | |
740 | break; | |
741 | case AMD_DBGAPI_RUNTIME_STATE_LOADED_UNSUPPORTED: | |
742 | warning (_ ("ROCm-GDB: low-level runtime version not supported")); | |
743 | break; | |
744 | ||
745 | case AMD_DBGAPI_RUNTIME_STATE_UNLOADED: | |
746 | amd_dbgapi_deactivated.notify (); | |
747 | break; | |
748 | } | |
749 | } | |
750 | break; | |
751 | ||
752 | default: | |
753 | error (_ ("event kind (%d) not supported"), event_kind); | |
754 | } | |
755 | ||
756 | amd_dbgapi_event_processed (process_id, event_id); | |
757 | } | |
758 | ||
759 | /* Drain the amd_dbgapi event queue until an event of the given type is seen. | |
760 | If no particular event kind is specified (AMD_DBGAPI_EVENT_KIND_NONE), the | |
761 | event queue is completely drained. Wave stop events that are not returned | |
762 | are re-queued into the current's process pending wave events. */ | |
763 | static amd_dbgapi_event_id_t | |
764 | rocm_process_event_queue (amd_dbgapi_event_kind_t until_event_kind) | |
765 | { | |
766 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
767 | ||
768 | while (true) | |
769 | { | |
770 | amd_dbgapi_event_id_t event_id; | |
771 | amd_dbgapi_event_kind_t event_kind; | |
772 | ||
773 | amd_dbgapi_status_t status = amd_dbgapi_next_pending_event ( | |
774 | info->process_id, &event_id, &event_kind); | |
775 | ||
776 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
777 | error (_ ("next_pending_event failed (rc=%d)"), status); | |
778 | ||
779 | if (event_id.handle == AMD_DBGAPI_EVENT_NONE.handle | |
780 | || event_kind == until_event_kind) | |
781 | return event_id; | |
782 | ||
783 | rocm_process_one_event (event_id, event_kind); | |
784 | } | |
785 | } | |
786 | ||
787 | ptid_t | |
788 | rocm_target_ops::wait (ptid_t ptid, struct target_waitstatus *ws, | |
789 | int target_options) | |
790 | { | |
791 | if (debug_infrun) | |
792 | fprintf_unfiltered (gdb_stdlog, | |
793 | "\e[1;34minfrun: rocm_target_ops::wait\e[0m\n"); | |
794 | ||
795 | if (!ptid_is_gpu (ptid)) | |
796 | { | |
797 | ptid_t event_ptid = beneath ()->wait (ptid, ws, target_options); | |
798 | if (event_ptid != minus_one_ptid) | |
799 | return event_ptid; | |
800 | } | |
801 | ||
802 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
803 | amd_dbgapi_process_id_t process_id = info->process_id; | |
804 | ||
805 | /* Drain all the events from the amd_dbgapi, and preserve the ordering. */ | |
806 | if (info->wave_stop_events.empty ()) | |
807 | { | |
808 | amd_dbgapi_process_set_progress (process_id, | |
809 | AMD_DBGAPI_PROGRESS_NO_FORWARD); | |
810 | ||
811 | /* Flush the async file first. */ | |
812 | if (target_is_async_p ()) | |
813 | async_file_flush (); | |
814 | ||
815 | rocm_process_event_queue (); | |
816 | ||
817 | /* In all-stop mode, unless the event queue is empty (spurious wake-up), | |
818 | we can keep the process in progress_no_forward mode. The infrun loop | |
819 | will enable forward progress when a thread is resumed. */ | |
820 | if (non_stop || info->wave_stop_events.empty ()) | |
821 | amd_dbgapi_process_set_progress (process_id, | |
822 | AMD_DBGAPI_PROGRESS_NORMAL); | |
823 | } | |
824 | ||
825 | if (info->wave_stop_events.empty ()) | |
826 | return minus_one_ptid; | |
827 | ||
828 | amd_dbgapi_wave_id_t event_wave_id; | |
829 | amd_dbgapi_wave_stop_reason_t stop_reason; | |
830 | ||
831 | std::tie (event_wave_id, stop_reason) = info->wave_stop_events.front (); | |
832 | info->wave_stop_events.pop_front (); | |
833 | ||
834 | ptid_t event_ptid (current_inferior ()->pid, 1, event_wave_id.handle); | |
835 | ||
836 | if (!find_thread_ptid (event_ptid)) | |
837 | { | |
838 | add_thread_silent (event_ptid); | |
839 | set_running (event_ptid, 1); | |
840 | set_executing (event_ptid, 1); | |
841 | } | |
842 | ||
843 | /* Since we are manipulating the register cache for the event thread, | |
844 | make sure it is the current thread. */ | |
845 | switch_to_thread (event_ptid); | |
846 | ||
847 | /* By caching the PC now, we avoid having to suspend/resume the queue | |
848 | later when we need to access it. */ | |
849 | amd_dbgapi_global_address_t stop_pc; | |
850 | if (amd_dbgapi_wave_get_info (process_id, event_wave_id, | |
851 | AMD_DBGAPI_WAVE_INFO_PC, sizeof (stop_pc), | |
852 | &stop_pc) | |
853 | == AMD_DBGAPI_STATUS_SUCCESS) | |
854 | { | |
855 | struct regcache *regcache = get_thread_regcache (event_ptid); | |
856 | regcache->raw_supply (gdbarch_pc_regnum (regcache->arch ()), &stop_pc); | |
857 | } | |
858 | ws->kind = TARGET_WAITKIND_STOPPED; | |
859 | ||
860 | if (stop_reason | |
861 | & (AMD_DBGAPI_WAVE_STOP_REASON_BREAKPOINT | |
862 | | AMD_DBGAPI_WAVE_STOP_REASON_SINGLE_STEP)) | |
863 | ws->value.sig = GDB_SIGNAL_TRAP; | |
864 | else if (stop_reason & AMD_DBGAPI_WAVE_STOP_REASON_MEMORY_VIOLATION) | |
865 | ws->value.sig = GDB_SIGNAL_SEGV; | |
866 | else if (stop_reason | |
867 | & (AMD_DBGAPI_WAVE_STOP_REASON_FP_INPUT_DENORMAL | |
868 | | AMD_DBGAPI_WAVE_STOP_REASON_FP_DIVIDE_BY_0 | |
869 | | AMD_DBGAPI_WAVE_STOP_REASON_FP_OVERFLOW | |
870 | | AMD_DBGAPI_WAVE_STOP_REASON_FP_UNDERFLOW | |
871 | | AMD_DBGAPI_WAVE_STOP_REASON_FP_INEXACT | |
872 | | AMD_DBGAPI_WAVE_STOP_REASON_FP_INVALID_OPERATION | |
873 | | AMD_DBGAPI_WAVE_STOP_REASON_INT_DIVIDE_BY_0)) | |
874 | ws->value.sig = GDB_SIGNAL_FPE; | |
875 | else | |
876 | ws->value.sig = GDB_SIGNAL_0; | |
877 | ||
878 | /* If there are more events in the list, mark the async file so that | |
879 | rocm_target_ops::wait gets called again. */ | |
880 | if (target_is_async_p () && !info->wave_stop_events.empty ()) | |
881 | async_file_mark (); | |
882 | ||
883 | return event_ptid; | |
884 | } | |
885 | ||
886 | bool | |
887 | rocm_target_ops::stopped_by_sw_breakpoint () | |
888 | { | |
889 | if (!ptid_is_gpu (inferior_ptid)) | |
890 | return beneath ()->supports_stopped_by_sw_breakpoint () | |
891 | && beneath ()->stopped_by_sw_breakpoint (); | |
892 | ||
893 | /* FIXME: we should check that the wave is not single-stepping. */ | |
894 | ||
895 | struct regcache *regcache = get_thread_regcache (inferior_ptid); | |
896 | ||
897 | CORE_ADDR bkpt_pc = regcache_read_pc (regcache) | |
898 | - gdbarch_decr_pc_after_break (regcache->arch ()); | |
899 | ||
900 | return software_breakpoint_inserted_here_p (regcache->aspace (), bkpt_pc); | |
901 | } | |
902 | ||
903 | void | |
904 | rocm_target_ops::mourn_inferior () | |
905 | { | |
906 | /* FIXME: only unpush on the last activation. */ | |
907 | /* Disengage the ROCm target_ops. */ | |
908 | unpush_target (&rocm_ops); | |
909 | ||
910 | beneath ()->mourn_inferior (); | |
911 | } | |
912 | ||
913 | void | |
914 | rocm_target_ops::fetch_registers (struct regcache *regcache, int regno) | |
915 | { | |
916 | struct gdbarch *gdbarch = regcache->arch (); | |
917 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
918 | ||
919 | /* delegate to the host routines when not on the device */ | |
920 | ||
921 | if (!rocm_is_amdgcn_gdbarch (gdbarch)) | |
922 | { | |
923 | beneath ()->fetch_registers (regcache, regno); | |
924 | return; | |
925 | } | |
926 | ||
927 | inferior *inf = find_inferior_ptid (regcache->ptid ()); | |
928 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (inf); | |
929 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (regcache->ptid ()); | |
930 | ||
931 | gdb_byte raw[AMDGCN_MAX_REGISTER_SIZE]; | |
932 | ||
933 | amd_dbgapi_status_t status = amd_dbgapi_read_register ( | |
934 | process_id, wave_id, tdep->register_ids[regno], 0, | |
935 | TYPE_LENGTH (register_type (gdbarch, regno)), raw); | |
936 | ||
937 | if (status == AMD_DBGAPI_STATUS_SUCCESS) | |
938 | { | |
939 | regcache->raw_supply (regno, raw); | |
940 | } | |
941 | else if (status != AMD_DBGAPI_STATUS_ERROR_INVALID_REGISTER_ID) | |
942 | { | |
943 | warning (_ ("Couldn't read register %s (#%d)."), | |
944 | gdbarch_register_name (gdbarch, regno), regno); | |
945 | } | |
946 | } | |
947 | ||
948 | void | |
949 | rocm_target_ops::store_registers (struct regcache *regcache, int regno) | |
950 | { | |
951 | struct gdbarch *gdbarch = regcache->arch (); | |
952 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
953 | gdb_byte raw[AMDGCN_MAX_REGISTER_SIZE]; | |
954 | ||
955 | if (!rocm_is_amdgcn_gdbarch (gdbarch)) | |
956 | { | |
957 | beneath ()->store_registers (regcache, regno); | |
958 | return; | |
959 | } | |
960 | ||
961 | inferior *inf = find_inferior_ptid (regcache->ptid ()); | |
962 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (inf); | |
963 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (regcache->ptid ()); | |
964 | ||
965 | regcache->raw_collect (regno, &raw); | |
966 | ||
967 | amd_dbgapi_status_t status = amd_dbgapi_write_register ( | |
968 | process_id, wave_id, tdep->register_ids[regno], 0, | |
969 | TYPE_LENGTH (register_type (gdbarch, regno)), raw); | |
970 | ||
971 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
972 | { | |
973 | warning (_ ("Couldn't write register %s (#%d)."), | |
974 | gdbarch_register_name (gdbarch, regno), regno); | |
975 | } | |
976 | } | |
977 | ||
978 | /* Fix breakpoints created with an address location while the | |
979 | architecture was set to the host (could be fixed in core GDB). */ | |
980 | ||
981 | static void | |
982 | rocm_target_breakpoint_fixup (struct breakpoint *b) | |
983 | { | |
984 | if (b->location.get () | |
985 | && event_location_type (b->location.get ()) == ADDRESS_LOCATION | |
986 | && gdbarch_bfd_arch_info (b->loc->gdbarch)->arch == bfd_arch_amdgcn | |
987 | && gdbarch_bfd_arch_info (b->gdbarch)->arch != bfd_arch_amdgcn) | |
988 | { | |
989 | b->gdbarch = b->loc->gdbarch; | |
990 | } | |
991 | } | |
992 | ||
993 | struct gdbarch * | |
994 | rocm_target_ops::thread_architecture (ptid_t ptid) | |
995 | { | |
996 | static std::result_of<decltype (&ptid_t::tid) (ptid_t)>::type last_tid = 0; | |
997 | static struct gdbarch *cached_arch = nullptr; | |
998 | ||
999 | if (!ptid_is_gpu (ptid)) | |
1000 | return beneath ()->thread_architecture (ptid); | |
1001 | ||
1002 | /* We can cache the gdbarch for a given wave_id (ptid::tid) because | |
1003 | wave IDs are unique, and aren't reused. */ | |
1004 | if (ptid.tid () == last_tid) | |
1005 | return cached_arch; | |
1006 | ||
1007 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (); | |
1008 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (ptid); | |
1009 | amd_dbgapi_architecture_id_t architecture_id; | |
1010 | ||
1011 | if (amd_dbgapi_wave_get_info (process_id, wave_id, | |
1012 | AMD_DBGAPI_WAVE_INFO_ARCHITECTURE, | |
1013 | sizeof (architecture_id), &architecture_id) | |
1014 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1015 | error (_ ("Couldn't get architecture for wave_%ld"), ptid.tid ()); | |
1016 | ||
1017 | uint32_t elf_amdgpu_machine; | |
1018 | if (amd_dbgapi_architecture_get_info ( | |
1019 | architecture_id, AMD_DBGAPI_ARCHITECTURE_INFO_ELF_AMDGPU_MACHINE, | |
1020 | sizeof (elf_amdgpu_machine), &elf_amdgpu_machine) | |
1021 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1022 | error (_ ("Couldn't get elf_amdgpu_machine for architecture_%ld"), | |
1023 | architecture_id.handle); | |
1024 | ||
1025 | struct gdbarch_info info; | |
1026 | gdbarch_info_init (&info); | |
1027 | ||
1028 | info.bfd_arch_info = bfd_lookup_arch (bfd_arch_amdgcn, elf_amdgpu_machine); | |
1029 | info.byte_order = BFD_ENDIAN_LITTLE; | |
1030 | info.osabi = GDB_OSABI_AMDGPU_HSA; | |
1031 | ||
1032 | last_tid = ptid.tid (); | |
1033 | if (!(cached_arch = gdbarch_find_by_info (info))) | |
1034 | error (_ ("Couldn't get elf_amdgpu_machine (%#x)"), elf_amdgpu_machine); | |
1035 | ||
1036 | return cached_arch; | |
1037 | } | |
1038 | ||
1039 | void | |
1040 | rocm_target_ops::update_thread_list () | |
1041 | { | |
1042 | for (inferior *inf : all_inferiors ()) | |
1043 | { | |
9e275233 | 1044 | amd_dbgapi_process_id_t process_id; |
abeeff98 LM |
1045 | amd_dbgapi_wave_id_t *wave_list; |
1046 | size_t count; | |
1047 | ||
9e275233 LM |
1048 | process_id = get_amd_dbgapi_process_id (inf); |
1049 | if (process_id.handle == AMD_DBGAPI_PROCESS_NONE.handle) | |
1050 | { | |
1051 | /* The inferior may not be attached yet. */ | |
1052 | continue; | |
1053 | } | |
1054 | ||
abeeff98 LM |
1055 | amd_dbgapi_changed_t changed; |
1056 | amd_dbgapi_status_t status; | |
1057 | if ((status | |
1058 | = amd_dbgapi_wave_list (process_id, &count, &wave_list, &changed)) | |
1059 | != AMD_DBGAPI_STATUS_SUCCESS) | |
9e275233 | 1060 | error (_ ("amd_dbgapi_wave_list failed (rc=%d)"), status); |
abeeff98 LM |
1061 | |
1062 | if (changed == AMD_DBGAPI_CHANGED_NO) | |
1063 | continue; | |
1064 | ||
1065 | /* Create a set and free the wave list. */ | |
1066 | std::set<std::result_of<decltype (&ptid_t::tid) (ptid_t)>::type> threads; | |
1067 | for (size_t i = 0; i < count; ++i) | |
1068 | threads.emplace (wave_list[i].handle); | |
1069 | xfree (wave_list); | |
1070 | ||
1071 | /* Then prune the wave_ids that already have a thread_info. */ | |
1072 | for (thread_info *tp : inf->non_exited_threads ()) | |
1073 | if (ptid_is_gpu (tp->ptid)) | |
1074 | threads.erase (tp->ptid.tid ()); | |
1075 | ||
1076 | /* The wave_ids that are left require a new thread_info. */ | |
1077 | for (auto &&tid : threads) | |
1078 | { | |
1079 | ptid_t wave_ptid (inf->pid, 1, tid); | |
9e275233 | 1080 | /* FIXME: is this really needed? |
abeeff98 LM |
1081 | amd_dbgapi_wave_state_t state; |
1082 | ||
1083 | if (amd_dbgapi_wave_get_info ( | |
1084 | process_id, get_amd_dbgapi_wave_id (wave_ptid), | |
1085 | AMD_DBGAPI_WAVE_INFO_STATE, sizeof (state), &state) | |
1086 | != AMD_DBGAPI_STATUS_SUCCESS) | |
9e275233 | 1087 | continue;*/ |
abeeff98 LM |
1088 | |
1089 | add_thread_silent (wave_ptid); | |
1090 | set_running (wave_ptid, 1); | |
1091 | set_executing (wave_ptid, 1); | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | /* Give the beneath target a chance to do extra processing. */ | |
1096 | this->beneath ()->update_thread_list (); | |
1097 | } | |
1098 | ||
1099 | static void | |
1100 | rocm_target_solib_loaded (struct so_list *solib) | |
1101 | { | |
1102 | /* Notify the amd_dbgapi that a shared library has been loaded. */ | |
1103 | for (auto &&value : get_rocm_inferior_info ()->notify_solib_map) | |
1104 | /* TODO: If we want to support file name wildcards, change this code. */ | |
1105 | if (::strstr (solib->so_original_name, value.second.compare.c_str ()) | |
1106 | && !value.second.is_loaded) | |
1107 | { | |
1108 | value.second.solib = solib; | |
1109 | value.second.is_loaded = true; | |
1110 | ||
1111 | amd_dbgapi_report_shared_library ( | |
1112 | get_amd_dbgapi_process_id (), | |
1113 | amd_dbgapi_shared_library_id_t{ value.first }, | |
1114 | AMD_DBGAPI_SHARED_LIBRARY_STATE_LOADED); | |
1115 | } | |
1116 | } | |
1117 | ||
1118 | static void | |
1119 | rocm_target_solib_unloaded (struct so_list *solib) | |
1120 | { | |
1121 | /* Notify the amd_dbgapi that a shared library will unload. */ | |
1122 | for (auto &&value : get_rocm_inferior_info ()->notify_solib_map) | |
1123 | /* TODO: If we want to support file name wildcards, change this code. */ | |
1124 | if (::strstr (solib->so_original_name, value.second.compare.c_str ()) | |
1125 | && value.second.is_loaded) | |
1126 | { | |
1127 | struct rocm_inferior_info *info = get_rocm_inferior_info (); | |
1128 | ||
1129 | amd_dbgapi_report_shared_library ( | |
1130 | info->process_id, amd_dbgapi_shared_library_id_t{ value.first }, | |
1131 | AMD_DBGAPI_SHARED_LIBRARY_STATE_UNLOADED); | |
1132 | ||
1133 | /* Delete breakpoints that were left inserted in this shared library. | |
1134 | */ | |
1135 | for (auto it = info->breakpoint_map.begin (); | |
1136 | it != info->breakpoint_map.end ();) | |
1137 | if (solib_contains_address_p (solib, it->second->loc->address)) | |
1138 | { | |
1139 | warning (_ ("breakpoint_%ld is still inserted after " | |
1140 | "shared_library_%ld was unloaded"), | |
1141 | it->first, value.first); | |
1142 | delete_breakpoint (it->second); | |
1143 | it = info->breakpoint_map.erase (it); | |
1144 | } | |
1145 | else | |
1146 | ++it; | |
1147 | ||
1148 | value.second.solib = nullptr; | |
1149 | value.second.is_loaded = false; | |
1150 | } | |
1151 | } | |
1152 | ||
1153 | static void | |
1154 | rocm_target_inferior_created (struct target_ops *target, int from_tty) | |
1155 | { | |
1156 | struct inferior *inf = current_inferior (); | |
1157 | auto *info = get_rocm_inferior_info (inf); | |
1158 | amd_dbgapi_status_t status; | |
1159 | ||
1160 | if (!target_can_async_p ()) | |
1161 | { | |
1162 | warning ( | |
1163 | _ ("ROCm-GDB requires target-async, GPU debugging is disabled")); | |
1164 | return; | |
1165 | } | |
1166 | ||
1167 | gdb_assert (info->wave_stop_events.empty ()); | |
1168 | ||
1169 | status = amd_dbgapi_process_attach (inf, &info->process_id); | |
1170 | ||
1171 | if (status == AMD_DBGAPI_STATUS_ERROR_VERSION_MISMATCH) | |
1172 | warning (_ ("The version of the kernel driver does not match the version " | |
1173 | "required by the ROCm debugger library")); | |
1174 | ||
1175 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
1176 | { | |
1177 | warning (_ ("Could not attach to process %d"), inf->pid); | |
1178 | return; | |
1179 | } | |
1180 | ||
1181 | if (amd_dbgapi_process_get_info (info->process_id, | |
1182 | AMD_DBGAPI_PROCESS_INFO_NOTIFIER, | |
1183 | sizeof (info->notifier), &info->notifier) | |
1184 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1185 | { | |
1186 | warning (_ ("Could not retrieve process %d's notifier"), inf->pid); | |
1187 | amd_dbgapi_process_detach (info->process_id); | |
1188 | return; | |
1189 | } | |
1190 | ||
1191 | /* We add a file handler for events returned by the debugger api. We'll use | |
1192 | this handler to signal our async handler that events are available. */ | |
1193 | add_file_handler ( | |
1194 | info->notifier, | |
1195 | [] (int error, gdb_client_data client_data) { | |
1196 | auto info_ = static_cast<struct rocm_inferior_info *> (client_data); | |
1197 | int ret; | |
1198 | ||
1199 | /* Drain the notifier pipe. */ | |
1200 | do | |
1201 | { | |
1202 | char buf; | |
1203 | ret = read (info_->notifier, &buf, 1); | |
1204 | } | |
1205 | while (ret >= 0 || (ret == -1 && errno == EINTR)); | |
1206 | ||
1207 | /* Signal our async handler. */ | |
1208 | async_file_mark (); | |
1209 | }, | |
1210 | info); | |
1211 | ||
1212 | /* Attaching to the inferior may have generated runtime events, process | |
1213 | them now. */ | |
1214 | rocm_process_event_queue (); | |
1215 | } | |
1216 | ||
1217 | static void | |
1218 | rocm_target_inferior_exit (struct inferior *inf) | |
1219 | { | |
1220 | auto *info = get_rocm_inferior_info (inf); | |
243f18ba | 1221 | info->has_exited = true; |
abeeff98 LM |
1222 | |
1223 | amd_dbgapi_deactivated.notify (); | |
1224 | ||
b8ff49b7 LM |
1225 | if (info->notifier != -1) |
1226 | delete_file_handler (info->notifier); | |
abeeff98 LM |
1227 | |
1228 | amd_dbgapi_process_detach (info->process_id); | |
1229 | ||
1230 | /* Delete the breakpoints that are still active. */ | |
1231 | for (auto &&value : info->breakpoint_map) | |
1232 | delete_breakpoint (value.second); | |
1233 | ||
1234 | rocm_inferior_data.clear (inf); | |
1235 | } | |
1236 | ||
1237 | static cli_style_option warning_style ("rocm_warning", ui_file_style::RED); | |
1238 | static cli_style_option info_style ("rocm_info", ui_file_style::GREEN); | |
1239 | static cli_style_option verbose_style ("rocm_verbose", ui_file_style::BLUE); | |
1240 | ||
1241 | static amd_dbgapi_callbacks_t dbgapi_callbacks = { | |
1242 | /* allocate_memory. */ | |
0cf995bc | 1243 | .allocate_memory = malloc, |
abeeff98 LM |
1244 | |
1245 | /* deallocate_memory. */ | |
0cf995bc | 1246 | .deallocate_memory = free, |
abeeff98 LM |
1247 | |
1248 | /* get_os_pid. */ | |
1249 | .get_os_pid = [] (amd_dbgapi_client_process_id_t client_process_id, | |
1250 | pid_t *pid) -> amd_dbgapi_status_t { | |
1251 | inferior *inf = static_cast<inferior *> (client_process_id); | |
243f18ba LM |
1252 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); |
1253 | ||
1254 | if (info->has_exited) | |
1255 | return AMD_DBGAPI_STATUS_ERROR_PROCESS_EXITED; | |
abeeff98 LM |
1256 | |
1257 | *pid = inf->pid; | |
1258 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1259 | }, | |
1260 | ||
1261 | /* enable_notify_shared_library callback. */ | |
1262 | .enable_notify_shared_library | |
1263 | = [] (amd_dbgapi_client_process_id_t client_process_id, | |
1264 | const char *library_name, amd_dbgapi_shared_library_id_t library_id, | |
1265 | amd_dbgapi_shared_library_state_t *library_state) | |
1266 | -> amd_dbgapi_status_t { | |
1267 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1268 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1269 | ||
1270 | if (!library_name || !library_state) | |
1271 | return AMD_DBGAPI_STATUS_ERROR_INVALID_ARGUMENT; | |
1272 | ||
1273 | if (info->notify_solib_map.find (library_id.handle) | |
1274 | != info->notify_solib_map.end ()) | |
1275 | { | |
1276 | /* This library id is already registered. */ | |
1277 | return AMD_DBGAPI_STATUS_ERROR; | |
1278 | } | |
1279 | ||
1280 | /* Check whether the library is already loaded. */ | |
1281 | bool is_loaded = false; | |
1282 | struct so_list *solib; | |
1283 | for (solib = inf->pspace->so_list; solib; solib = solib->next) | |
1284 | if (::strstr (solib->so_original_name, library_name)) | |
1285 | { | |
1286 | is_loaded = true; | |
1287 | break; | |
1288 | } | |
1289 | ||
1290 | /* Check that the library_name is valid. If must not be empty, and | |
1291 | should not have wildcard characters. */ | |
1292 | if (*library_name == '\0' | |
1293 | || std::string (library_name).find_first_of ("*?[]") | |
1294 | != std::string::npos) | |
1295 | return AMD_DBGAPI_STATUS_ERROR_INVALID_ARGUMENT; | |
1296 | ||
1297 | /* Add a new entry in the notify_solib_map. */ | |
1298 | if (!info->notify_solib_map | |
1299 | .emplace (std::piecewise_construct, | |
1300 | std::forward_as_tuple (library_id.handle), | |
1301 | std::forward_as_tuple (rocm_notify_shared_library_info{ | |
1302 | library_name, solib, is_loaded })) | |
1303 | .second) | |
1304 | return AMD_DBGAPI_STATUS_ERROR; | |
1305 | ||
1306 | *library_state = is_loaded ? AMD_DBGAPI_SHARED_LIBRARY_STATE_LOADED | |
1307 | : AMD_DBGAPI_SHARED_LIBRARY_STATE_UNLOADED; | |
1308 | ||
1309 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1310 | }, | |
1311 | ||
1312 | /* disable_notify_shared_library callback. */ | |
1313 | .disable_notify_shared_library | |
1314 | = [] (amd_dbgapi_client_process_id_t client_process_id, | |
1315 | amd_dbgapi_shared_library_id_t library_id) -> amd_dbgapi_status_t { | |
1316 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1317 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1318 | ||
1319 | auto it = info->notify_solib_map.find (library_id.handle); | |
1320 | if (it == info->notify_solib_map.end ()) | |
1321 | return AMD_DBGAPI_STATUS_ERROR_INVALID_SHARED_LIBRARY_ID; | |
1322 | ||
1323 | info->notify_solib_map.erase (it); | |
1324 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1325 | }, | |
1326 | ||
1327 | /* get_symbol_address callback. */ | |
1328 | .get_symbol_address = | |
1329 | [] (amd_dbgapi_client_process_id_t client_process_id, | |
1330 | amd_dbgapi_shared_library_id_t library_id, const char *symbol_name, | |
1331 | amd_dbgapi_global_address_t *address) { | |
1332 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1333 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1334 | ||
1335 | auto it = info->notify_solib_map.find (library_id.handle); | |
1336 | if (it == info->notify_solib_map.end ()) | |
1337 | return AMD_DBGAPI_STATUS_ERROR_INVALID_SHARED_LIBRARY_ID; | |
1338 | ||
1339 | struct so_list *solib = it->second.solib; | |
1340 | if (!solib) | |
1341 | return AMD_DBGAPI_STATUS_ERROR_LIBRARY_NOT_LOADED; | |
1342 | ||
1343 | solib_read_symbols (solib, 0); | |
1344 | gdb_assert (solib->objfile); | |
1345 | ||
1346 | struct bound_minimal_symbol msymbol | |
1347 | = lookup_minimal_symbol (symbol_name, NULL, solib->objfile); | |
1348 | ||
1349 | if (!msymbol.minsym || BMSYMBOL_VALUE_ADDRESS (msymbol) == 0) | |
1350 | return AMD_DBGAPI_STATUS_ERROR_SYMBOL_NOT_FOUND; | |
1351 | ||
1352 | *address = BMSYMBOL_VALUE_ADDRESS (msymbol); | |
1353 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1354 | }, | |
1355 | ||
1356 | /* set_breakpoint callback. */ | |
1357 | .add_breakpoint = | |
1358 | [] (amd_dbgapi_client_process_id_t client_process_id, | |
1359 | amd_dbgapi_shared_library_id_t shared_library_id, | |
1360 | amd_dbgapi_global_address_t address, | |
1361 | amd_dbgapi_breakpoint_id_t breakpoint_id) { | |
1362 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1363 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1364 | ||
1365 | /* Initialize the breakpoint ops lazily since we depend on | |
1366 | bkpt_breakpoint_ops and we can't control the order in which | |
1367 | initializers are called. */ | |
1368 | if (rocm_breakpoint_ops.check_status == NULL) | |
1369 | { | |
1370 | rocm_breakpoint_ops = bkpt_breakpoint_ops; | |
1371 | rocm_breakpoint_ops.check_status = rocm_breakpoint_check_status; | |
9e275233 | 1372 | rocm_breakpoint_ops.re_set = rocm_breakpoint_re_set; |
abeeff98 LM |
1373 | } |
1374 | ||
1375 | auto it = info->breakpoint_map.find (breakpoint_id.handle); | |
1376 | if (it != info->breakpoint_map.end ()) | |
1377 | return AMD_DBGAPI_STATUS_ERROR_INVALID_BREAKPOINT_ID; | |
1378 | ||
1379 | /* Create a new breakpoint. */ | |
1380 | struct obj_section *section = find_pc_section (address); | |
1381 | if (!section || !section->objfile) | |
1382 | return AMD_DBGAPI_STATUS_ERROR; | |
1383 | ||
1384 | event_location_up location | |
1385 | = new_address_location (address, nullptr, 0); | |
1386 | if (!create_breakpoint ( | |
1387 | get_objfile_arch (section->objfile), location.get (), | |
1388 | /*cond_string*/ NULL, /*thread*/ -1, /*extra_sring*/ NULL, | |
1389 | /*parse_extra*/ 0, /*tempflag*/ 0, /*bptype*/ bp_breakpoint, | |
1390 | /*ignore_count*/ 0, /*pending_break*/ AUTO_BOOLEAN_FALSE, | |
1391 | /*ops*/ &rocm_breakpoint_ops, /*from_tty*/ 0, | |
1392 | /*enabled*/ 1, /*internal*/ 1, /*flags*/ 0)) | |
1393 | return AMD_DBGAPI_STATUS_ERROR; | |
1394 | ||
1395 | /* Find our breakpoint in the breakpoint list. */ | |
1396 | auto bp_loc = std::make_pair (inf->aspace, address); | |
1397 | auto bp = breakpoint_find_if ( | |
1398 | [] (struct breakpoint *b, void *data) { | |
1399 | auto *arg = static_cast<decltype (&bp_loc)> (data); | |
1400 | if (b->ops == &rocm_breakpoint_ops && b->loc | |
1401 | && b->loc->pspace->aspace == arg->first | |
1402 | && b->loc->address == arg->second) | |
1403 | return 1; | |
1404 | return 0; | |
1405 | }, | |
1406 | reinterpret_cast<void *> (&bp_loc)); | |
1407 | ||
1408 | if (!bp) | |
1409 | error (_ ("Could not find breakpoint")); | |
1410 | ||
1411 | info->breakpoint_map.emplace (breakpoint_id.handle, bp); | |
1412 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1413 | }, | |
1414 | ||
1415 | /* remove_breakpoint callback. */ | |
1416 | .remove_breakpoint = | |
1417 | [] (amd_dbgapi_client_process_id_t client_process_id, | |
1418 | amd_dbgapi_breakpoint_id_t breakpoint_id) { | |
1419 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1420 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1421 | ||
1422 | auto it = info->breakpoint_map.find (breakpoint_id.handle); | |
1423 | if (it == info->breakpoint_map.end ()) | |
1424 | return AMD_DBGAPI_STATUS_ERROR_INVALID_BREAKPOINT_ID; | |
1425 | ||
1426 | delete_breakpoint (it->second); | |
1427 | info->breakpoint_map.erase (it); | |
1428 | ||
1429 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1430 | }, | |
1431 | ||
1432 | /* set_breakpoint_state callback. */ | |
1433 | .set_breakpoint_state = | |
1434 | [] (amd_dbgapi_client_process_id_t client_process_id, | |
1435 | amd_dbgapi_breakpoint_id_t breakpoint_id, | |
1436 | amd_dbgapi_breakpoint_state_t breakpoint_state) { | |
1437 | inferior *inf = static_cast<inferior *> (client_process_id); | |
1438 | struct rocm_inferior_info *info = get_rocm_inferior_info (inf); | |
1439 | ||
1440 | auto it = info->breakpoint_map.find (breakpoint_id.handle); | |
1441 | if (it == info->breakpoint_map.end ()) | |
1442 | return AMD_DBGAPI_STATUS_ERROR_INVALID_BREAKPOINT_ID; | |
1443 | ||
1444 | if (breakpoint_state == AMD_DBGAPI_BREAKPOINT_STATE_ENABLE) | |
1445 | it->second->enable_state = bp_enabled; | |
1446 | else if (breakpoint_state == AMD_DBGAPI_BREAKPOINT_STATE_DISABLE) | |
1447 | it->second->enable_state = bp_disabled; | |
1448 | else | |
1449 | return AMD_DBGAPI_STATUS_ERROR_INVALID_ARGUMENT; | |
1450 | ||
1451 | return AMD_DBGAPI_STATUS_SUCCESS; | |
1452 | }, | |
1453 | ||
1454 | .log_message | |
1455 | = [] (amd_dbgapi_log_level_t level, const char *message) -> void { | |
1456 | gdb::optional<target_terminal::scoped_restore_terminal_state> tstate; | |
1457 | ||
1458 | if (level > get_debug_amd_dbgapi_log_level ()) | |
1459 | return; | |
1460 | ||
1461 | if (target_supports_terminal_ours ()) | |
1462 | { | |
1463 | tstate.emplace (); | |
1464 | target_terminal::ours_for_output (); | |
1465 | } | |
1466 | ||
1467 | if (filtered_printing_initialized ()) | |
1468 | wrap_here (""); | |
1469 | ||
1470 | struct ui_file *out_file | |
1471 | = (level >= AMD_DBGAPI_LOG_LEVEL_INFO) ? gdb_stdlog : gdb_stderr; | |
1472 | ||
1473 | switch (level) | |
1474 | { | |
1475 | case AMD_DBGAPI_LOG_LEVEL_FATAL_ERROR: | |
1476 | fputs_unfiltered ("[amd-dbgapi]: ", out_file); | |
1477 | break; | |
1478 | case AMD_DBGAPI_LOG_LEVEL_WARNING: | |
1479 | fputs_styled ("[amd-dbgapi]: ", warning_style.style (), out_file); | |
1480 | break; | |
1481 | case AMD_DBGAPI_LOG_LEVEL_INFO: | |
1482 | fputs_styled ("[amd-dbgapi]: ", info_style.style (), out_file); | |
1483 | break; | |
1484 | case AMD_DBGAPI_LOG_LEVEL_VERBOSE: | |
1485 | fputs_styled ("[amd-dbgapi]: ", verbose_style.style (), out_file); | |
1486 | break; | |
1487 | } | |
1488 | ||
1489 | fputs_unfiltered (message, out_file); | |
1490 | fputs_unfiltered ("\n", out_file); | |
1491 | } | |
1492 | }; | |
1493 | ||
1494 | /* Implementation of `_wave_id' variable. */ | |
1495 | ||
1496 | static struct value * | |
1497 | rocm_wave_id_make_value (struct gdbarch *gdbarch, struct internalvar *var, | |
1498 | void *ignore) | |
1499 | { | |
1500 | if (ptid_is_gpu (inferior_ptid)) | |
1501 | { | |
1502 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (); | |
1503 | amd_dbgapi_wave_id_t wave_id = get_amd_dbgapi_wave_id (inferior_ptid); | |
1504 | uint32_t group_ids[3], wave_in_group; | |
1505 | ||
1506 | if (amd_dbgapi_wave_get_info (process_id, wave_id, | |
1507 | AMD_DBGAPI_WAVE_INFO_WORK_GROUP_COORD, | |
1508 | sizeof (group_ids), &group_ids) | |
1509 | == AMD_DBGAPI_STATUS_SUCCESS | |
1510 | && amd_dbgapi_wave_get_info ( | |
1511 | process_id, wave_id, | |
1512 | AMD_DBGAPI_WAVE_INFO_WAVE_NUMBER_IN_WORK_GROUP, | |
1513 | sizeof (wave_in_group), &wave_in_group) | |
1514 | == AMD_DBGAPI_STATUS_SUCCESS) | |
1515 | { | |
1516 | std::string wave_id_str | |
1517 | = string_printf ("(%d,%d,%d)/%d", group_ids[2], group_ids[1], | |
1518 | group_ids[0], wave_in_group); | |
1519 | ||
1520 | return value_cstring (wave_id_str.data (), wave_id_str.length () + 1, | |
1521 | builtin_type (gdbarch)->builtin_char); | |
1522 | } | |
1523 | } | |
1524 | ||
1525 | return allocate_value (builtin_type (gdbarch)->builtin_void); | |
1526 | } | |
1527 | ||
1528 | static const struct internalvar_funcs rocm_wave_id_funcs | |
1529 | = { rocm_wave_id_make_value, NULL, NULL }; | |
1530 | ||
1531 | /* List of set/show debug amd_dbgapi commands. */ | |
1532 | struct cmd_list_element *set_debug_amd_dbgapi_list; | |
1533 | struct cmd_list_element *show_debug_amd_dbgapi_list; | |
1534 | ||
1535 | static void | |
1536 | set_debug_amd_dbgapi (const char *arg, int from_tty) | |
1537 | { | |
1538 | help_list (set_debug_amd_dbgapi_list, "set debug amd-dbgapi ", | |
1539 | (enum command_class) - 1, gdb_stdout); | |
1540 | } | |
1541 | ||
1542 | static void | |
1543 | show_debug_amd_dbgapi (const char *args, int from_tty) | |
1544 | { | |
1545 | cmd_show_list (show_debug_amd_dbgapi_list, from_tty, ""); | |
1546 | } | |
1547 | ||
1548 | constexpr char amd_dbgapi_log_level_off[] = "off"; | |
1549 | constexpr char amd_dbgapi_log_level_error[] = "error"; | |
1550 | constexpr char amd_dbgapi_log_level_warning[] = "warning"; | |
1551 | constexpr char amd_dbgapi_log_level_info[] = "info"; | |
1552 | constexpr char amd_dbgapi_log_level_verbose[] = "verbose"; | |
1553 | ||
1554 | constexpr const char *debug_amd_dbgapi_log_level_enums[] | |
1555 | = { [AMD_DBGAPI_LOG_LEVEL_NONE] = amd_dbgapi_log_level_off, | |
1556 | [AMD_DBGAPI_LOG_LEVEL_FATAL_ERROR] = amd_dbgapi_log_level_error, | |
1557 | [AMD_DBGAPI_LOG_LEVEL_WARNING] = amd_dbgapi_log_level_warning, | |
1558 | [AMD_DBGAPI_LOG_LEVEL_INFO] = amd_dbgapi_log_level_info, | |
1559 | [AMD_DBGAPI_LOG_LEVEL_VERBOSE] = amd_dbgapi_log_level_verbose, | |
1560 | nullptr }; | |
1561 | ||
1562 | static const char *debug_amd_dbgapi_log_level = amd_dbgapi_log_level_error; | |
1563 | ||
1564 | static amd_dbgapi_log_level_t | |
1565 | get_debug_amd_dbgapi_log_level () | |
1566 | { | |
1567 | size_t pos; | |
1568 | for (pos = 0; debug_amd_dbgapi_log_level_enums[pos]; ++pos) | |
1569 | if (debug_amd_dbgapi_log_level == debug_amd_dbgapi_log_level_enums[pos]) | |
1570 | break; | |
1571 | ||
1572 | gdb_assert (debug_amd_dbgapi_log_level_enums[pos]); | |
1573 | return static_cast<amd_dbgapi_log_level_t> (pos); | |
1574 | } | |
1575 | ||
1576 | static void | |
1577 | set_debug_amd_dbgapi_log_level (const char *args, int from_tty, | |
1578 | struct cmd_list_element *c) | |
1579 | { | |
1580 | amd_dbgapi_set_log_level (get_debug_amd_dbgapi_log_level ()); | |
1581 | } | |
1582 | ||
1583 | static void | |
1584 | show_debug_amd_dbgapi_log_level (struct ui_file *file, int from_tty, | |
1585 | struct cmd_list_element *c, | |
1586 | const char *value) | |
1587 | { | |
1588 | fprintf_filtered (file, _ ("The amd-dbgapi log level is %s.\n"), value); | |
1589 | } | |
1590 | ||
1591 | static void | |
1592 | info_agents_command (const char *args, int from_tty) | |
1593 | { | |
1594 | amd_dbgapi_process_id_t process_id = get_amd_dbgapi_process_id (); | |
1595 | struct ui_out *uiout = current_uiout; | |
1596 | amd_dbgapi_status_t status; | |
1597 | ||
1598 | amd_dbgapi_agent_id_t *agent_list; | |
1599 | size_t count = 0; | |
1600 | ||
1601 | if (process_id.handle != AMD_DBGAPI_PROCESS_NONE.handle | |
1602 | && (status | |
1603 | = amd_dbgapi_agent_list (process_id, &count, &agent_list, nullptr)) | |
1604 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1605 | error (_ ("amd_dbgapi_agent_list failed (rc=%d)"), status); | |
1606 | ||
1607 | if (!count && !uiout->is_mi_like_p ()) | |
1608 | { | |
1609 | uiout->field_string (NULL, | |
1610 | _ ("No agents are currently active.\n")); | |
1611 | return; | |
1612 | } | |
1613 | ||
1614 | /* Calculate the maximum size needed to print the agents names. */ | |
1615 | std::vector<std::string> agent_names (count); | |
1616 | ||
1617 | size_t max_name_len = 0; | |
1618 | for (size_t i = 0; i < count; ++i) | |
1619 | { | |
1620 | char *agent_name; | |
1621 | ||
1622 | if ((status = amd_dbgapi_agent_get_info ( | |
1623 | process_id, agent_list[i], AMD_DBGAPI_AGENT_INFO_NAME, | |
1624 | sizeof (agent_name), &agent_name)) | |
1625 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1626 | { | |
1627 | if (status == AMD_DBGAPI_STATUS_ERROR_INVALID_AGENT_ID) | |
1628 | agent_names[i] = "N/A"; | |
1629 | else | |
1630 | error (_ ("amd_dbgapi_agent_get_info failed (rc=%d"), status); | |
1631 | } | |
1632 | else | |
1633 | { | |
1634 | agent_names[i] = agent_name; | |
1635 | xfree (agent_name); | |
1636 | } | |
1637 | ||
1638 | max_name_len = std::max (max_name_len, agent_names[i].size ()); | |
1639 | } | |
1640 | ||
1641 | /* Header: */ | |
1642 | ui_out_emit_table table_emmitter (uiout, 7, count, "InfoRocmDevicesTable"); | |
1643 | ||
1644 | uiout->table_header (2, ui_left, "agent_id", "Id"); | |
1645 | uiout->table_header (8, ui_left, "location_id", "PCI Slot"); | |
1646 | uiout->table_header (std::max (11ul, max_name_len), ui_left, "name", | |
1647 | "Device Name"); | |
1648 | uiout->table_header (14, ui_left, "num_se", "Shader Engines"); | |
1649 | uiout->table_header (13, ui_left, "num_cu", "Compute Units"); | |
1650 | uiout->table_header (7, ui_left, "simd", "SIMD/CU"); | |
1651 | uiout->table_header (15, ui_left, "waves", "Wavefronts/SIMD"); | |
1652 | uiout->table_body (); | |
1653 | ||
1654 | /* Rows: */ | |
1655 | for (size_t i = 0; i < count; ++i) | |
1656 | { | |
1657 | ui_out_emit_tuple tuple_emitter (uiout, "InfoRocmDevicesRow"); | |
1658 | ||
1659 | /* agent */ | |
1660 | uiout->field_signed ("agent_id", agent_list[i].handle); | |
1661 | ||
1662 | /* location */ | |
1663 | uint32_t location_id; | |
1664 | if ((status = amd_dbgapi_agent_get_info ( | |
1665 | process_id, agent_list[i], AMD_DBGAPI_AGENT_INFO_PCIE_SLOT, | |
1666 | sizeof (location_id), &location_id)) | |
1667 | != AMD_DBGAPI_STATUS_SUCCESS) | |
1668 | { | |
1669 | if (status == AMD_DBGAPI_STATUS_ERROR_INVALID_AGENT_ID) | |
1670 | uiout->field_string ("location_id", "N/A"); | |
1671 | else | |
1672 | error (_ ("amd_dbgapi_agent_get_info failed (rc=%d"), status); | |
1673 | } | |
1674 | else | |
1675 | uiout->field_string ( | |
1676 | "location_id", | |
1677 | string_printf ("%02x:%02x.%d", (location_id >> 8) & 0xFF, | |
1678 | (location_id >> 3) & 0x1F, location_id & 0x7)); | |
1679 | ||
1680 | /* name */ | |
1681 | uiout->field_string ("name", agent_names[i]); | |
1682 | ||
1683 | /* num_se, num_cu, simd, waves */ | |
1684 | ||
1685 | #define UIOUT_FIELD_INT(name, query) \ | |
1686 | uint32_t name; \ | |
1687 | if ((status = amd_dbgapi_agent_get_info (process_id, agent_list[i], query, \ | |
1688 | sizeof (name), &name)) \ | |
1689 | != AMD_DBGAPI_STATUS_SUCCESS) \ | |
1690 | { \ | |
1691 | if (status == AMD_DBGAPI_STATUS_ERROR_INVALID_AGENT_ID) \ | |
1692 | uiout->field_string (#name, "N/A"); \ | |
1693 | else \ | |
1694 | error (_ ("amd_dbgapi_agent_get_info failed (rc=%d"), status); \ | |
1695 | } \ | |
1696 | else \ | |
1697 | uiout->field_signed (#name, name); | |
1698 | ||
1699 | UIOUT_FIELD_INT (num_se, AMD_DBGAPI_AGENT_INFO_SHADER_ENGINE_COUNT); | |
1700 | UIOUT_FIELD_INT (num_cu, AMD_DBGAPI_AGENT_INFO_COMPUTE_UNIT_COUNT); | |
1701 | UIOUT_FIELD_INT (simd, AMD_DBGAPI_AGENT_INFO_NUM_SIMD_PER_COMPUTE_UNIT); | |
1702 | UIOUT_FIELD_INT (waves, AMD_DBGAPI_AGENT_INFO_MAX_WAVES_PER_SIMD); | |
1703 | ||
1704 | #undef UIOUT_FIELD_INT | |
1705 | ||
1706 | uiout->text ("\n"); | |
1707 | } | |
1708 | ||
1709 | xfree (agent_list); | |
1710 | gdb_flush (gdb_stdout); | |
1711 | } | |
1712 | ||
1713 | /* -Wmissing-prototypes */ | |
1714 | extern initialize_file_ftype _initialize_rocm_tdep; | |
1715 | ||
1716 | void | |
1717 | _initialize_rocm_tdep (void) | |
1718 | { | |
48e5c3f7 LM |
1719 | /* Make sure the loaded debugger library version is greater than or equal to |
1720 | the one used to build ROCgdb. */ | |
1721 | uint32_t major, minor, patch; | |
1722 | amd_dbgapi_get_version (&major, &minor, &patch); | |
1723 | if (major != AMD_DBGAPI_VERSION_MAJOR || minor < AMD_DBGAPI_VERSION_MINOR) | |
1724 | error ( | |
1725 | _ ("amd-dbgapi library version mismatch, got %d.%d.%d, need %d.%d+"), | |
1726 | major, minor, patch, AMD_DBGAPI_VERSION_MAJOR, | |
1727 | AMD_DBGAPI_VERSION_MINOR); | |
1728 | ||
abeeff98 | 1729 | /* Initialize the ROCm Debug API. */ |
48e5c3f7 LM |
1730 | amd_dbgapi_status_t status = amd_dbgapi_initialize (&dbgapi_callbacks); |
1731 | if (status != AMD_DBGAPI_STATUS_SUCCESS) | |
1732 | error (_ ("amd-dbgapi failed to initialize (rc=%d)"), status); | |
abeeff98 LM |
1733 | |
1734 | /* Set the initial log level. */ | |
1735 | amd_dbgapi_set_log_level (get_debug_amd_dbgapi_log_level ()); | |
1736 | ||
1737 | /* Install observers. */ | |
1738 | gdb::observers::breakpoint_created.attach (rocm_target_breakpoint_fixup); | |
1739 | gdb::observers::solib_loaded.attach (rocm_target_solib_loaded); | |
1740 | gdb::observers::solib_unloaded.attach (rocm_target_solib_unloaded); | |
1741 | gdb::observers::inferior_created.attach (rocm_target_inferior_created); | |
1742 | gdb::observers::inferior_exit.attach (rocm_target_inferior_exit); | |
1743 | ||
1744 | amd_dbgapi_activated.attach (rocm_target_dbgapi_activated); | |
1745 | amd_dbgapi_deactivated.attach (rocm_target_dbgapi_deactivated); | |
1746 | ||
1747 | create_internalvar_type_lazy ("_wave_id", &rocm_wave_id_funcs, NULL); | |
1748 | ||
1749 | add_prefix_cmd ( | |
1750 | "amd-dbgapi", no_class, set_debug_amd_dbgapi, | |
1751 | _ ("Generic command for setting amd-dbgapi debugging flags"), | |
1752 | &set_debug_amd_dbgapi_list, "set debug amd-dbgapi ", 0, &setdebuglist); | |
1753 | ||
1754 | add_prefix_cmd ( | |
1755 | "amd-dbgapi", no_class, show_debug_amd_dbgapi, | |
1756 | _ ("Generic command for showing amd-dbgapi debugging flags"), | |
1757 | &show_debug_amd_dbgapi_list, "show debug amd-dbgapi ", 0, | |
1758 | &showdebuglist); | |
1759 | ||
1760 | add_setshow_enum_cmd ( | |
1761 | "log-level", class_maintenance, debug_amd_dbgapi_log_level_enums, | |
1762 | &debug_amd_dbgapi_log_level, _ ("Set the amd-dbgapi log level."), | |
1763 | _ ("Show the amd-dbgapi log level."), | |
1764 | _ ("off == no logging is enabled\n" | |
1765 | "error == fatal errors are reported\n" | |
1766 | "warning == fatal errors and warnings are reported\n" | |
1767 | "info == fatal errors, warnings, and info messages are reported\n" | |
1768 | "verbose == all messages are reported"), | |
1769 | set_debug_amd_dbgapi_log_level, show_debug_amd_dbgapi_log_level, | |
1770 | &set_debug_amd_dbgapi_list, &show_debug_amd_dbgapi_list); | |
1771 | ||
1772 | add_cmd ("agents", class_info, info_agents_command, | |
1773 | _ ("Info about currently active agents."), &infolist); | |
1774 | } |