| 1 | /* Ada Ravenscar thread support. |
| 2 | |
| 3 | Copyright (C) 2004-2020 Free Software Foundation, Inc. |
| 4 | |
| 5 | This file is part of GDB. |
| 6 | |
| 7 | This program is free software; you can redistribute it and/or modify |
| 8 | it under the terms of the GNU General Public License as published by |
| 9 | the Free Software Foundation; either version 3 of the License, or |
| 10 | (at your option) any later version. |
| 11 | |
| 12 | This program is distributed in the hope that it will be useful, |
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | GNU General Public License for more details. |
| 16 | |
| 17 | You should have received a copy of the GNU General Public License |
| 18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include "defs.h" |
| 21 | #include "gdbcore.h" |
| 22 | #include "gdbthread.h" |
| 23 | #include "ada-lang.h" |
| 24 | #include "target.h" |
| 25 | #include "inferior.h" |
| 26 | #include "command.h" |
| 27 | #include "ravenscar-thread.h" |
| 28 | #include "observable.h" |
| 29 | #include "gdbcmd.h" |
| 30 | #include "top.h" |
| 31 | #include "regcache.h" |
| 32 | #include "objfiles.h" |
| 33 | |
| 34 | /* This module provides support for "Ravenscar" tasks (Ada) when |
| 35 | debugging on bare-metal targets. |
| 36 | |
| 37 | The typical situation is when debugging a bare-metal target over |
| 38 | the remote protocol. In that situation, the system does not know |
| 39 | about high-level concepts such as threads, only about some code |
| 40 | running on one or more CPUs. And since the remote protocol does not |
| 41 | provide any handling for CPUs, the de facto standard for handling |
| 42 | them is to have one thread per CPU, where the thread's ptid has |
| 43 | its lwp field set to the CPU number (eg: 1 for the first CPU, |
| 44 | 2 for the second one, etc). This module will make that assumption. |
| 45 | |
| 46 | This module then creates and maintains the list of threads based |
| 47 | on the list of Ada tasks, with one thread per Ada task. The convention |
| 48 | is that threads corresponding to the CPUs (see assumption above) |
| 49 | have a ptid_t of the form (PID, LWP, 0), while threads corresponding |
| 50 | to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID |
| 51 | is the Ada task's ID as extracted from Ada runtime information. |
| 52 | |
| 53 | Switching to a given Ada task (or its underlying thread) is performed |
| 54 | by fetching the registers of that task from the memory area where |
| 55 | the registers were saved. For any of the other operations, the |
| 56 | operation is performed by first finding the CPU on which the task |
| 57 | is running, switching to its corresponding ptid, and then performing |
| 58 | the operation on that ptid using the target beneath us. */ |
| 59 | |
| 60 | /* If true, ravenscar task support is enabled. */ |
| 61 | static bool ravenscar_task_support = true; |
| 62 | |
| 63 | static const char running_thread_name[] = "__gnat_running_thread_table"; |
| 64 | |
| 65 | static const char known_tasks_name[] = "system__tasking__debug__known_tasks"; |
| 66 | static const char first_task_name[] = "system__tasking__debug__first_task"; |
| 67 | |
| 68 | static const char ravenscar_runtime_initializer[] |
| 69 | = "system__bb__threads__initialize"; |
| 70 | |
| 71 | static const target_info ravenscar_target_info = { |
| 72 | "ravenscar", |
| 73 | N_("Ravenscar tasks."), |
| 74 | N_("Ravenscar tasks support.") |
| 75 | }; |
| 76 | |
| 77 | struct ravenscar_thread_target final : public target_ops |
| 78 | { |
| 79 | ravenscar_thread_target () |
| 80 | { |
| 81 | update_inferior_ptid (); |
| 82 | } |
| 83 | |
| 84 | const target_info &info () const override |
| 85 | { return ravenscar_target_info; } |
| 86 | |
| 87 | strata stratum () const override { return thread_stratum; } |
| 88 | |
| 89 | ptid_t wait (ptid_t, struct target_waitstatus *, int) override; |
| 90 | void resume (ptid_t, int, enum gdb_signal) override; |
| 91 | |
| 92 | void fetch_registers (struct regcache *, int) override; |
| 93 | void store_registers (struct regcache *, int) override; |
| 94 | |
| 95 | void prepare_to_store (struct regcache *) override; |
| 96 | |
| 97 | bool stopped_by_sw_breakpoint () override; |
| 98 | |
| 99 | bool stopped_by_hw_breakpoint () override; |
| 100 | |
| 101 | bool stopped_by_watchpoint () override; |
| 102 | |
| 103 | bool stopped_data_address (CORE_ADDR *) override; |
| 104 | |
| 105 | bool thread_alive (ptid_t ptid) override; |
| 106 | |
| 107 | int core_of_thread (ptid_t ptid) override; |
| 108 | |
| 109 | void update_thread_list () override; |
| 110 | |
| 111 | const char *extra_thread_info (struct thread_info *) override; |
| 112 | |
| 113 | std::string pid_to_str (ptid_t) override; |
| 114 | |
| 115 | ptid_t get_ada_task_ptid (long lwp, long thread) override; |
| 116 | |
| 117 | void mourn_inferior () override; |
| 118 | |
| 119 | void close () override |
| 120 | { |
| 121 | delete this; |
| 122 | } |
| 123 | |
| 124 | private: |
| 125 | |
| 126 | /* PTID of the last thread that received an event. |
| 127 | This can be useful to determine the associated task that received |
| 128 | the event, to make it the current task. */ |
| 129 | ptid_t m_base_ptid = null_ptid; |
| 130 | |
| 131 | void update_inferior_ptid (); |
| 132 | ptid_t active_task (int cpu); |
| 133 | bool task_is_currently_active (ptid_t ptid); |
| 134 | bool runtime_initialized (); |
| 135 | }; |
| 136 | |
| 137 | /* Return true iff PTID corresponds to a ravenscar task. */ |
| 138 | |
| 139 | static bool |
| 140 | is_ravenscar_task (ptid_t ptid) |
| 141 | { |
| 142 | /* By construction, ravenscar tasks have their LWP set to zero. |
| 143 | Also make sure that the TID is nonzero, as some remotes, when |
| 144 | asked for the list of threads, will return the first thread |
| 145 | as having its TID set to zero. For instance, TSIM version |
| 146 | 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo' |
| 147 | query, which the remote protocol layer then treats as a thread |
| 148 | whose TID is 0. This is obviously not a ravenscar task. */ |
| 149 | return ptid.lwp () == 0 && ptid.tid () != 0; |
| 150 | } |
| 151 | |
| 152 | /* Given PTID, which can be either a ravenscar task or a CPU thread, |
| 153 | return which CPU that ptid is running on. |
| 154 | |
| 155 | This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert |
| 156 | will be triggered. */ |
| 157 | |
| 158 | static int |
| 159 | ravenscar_get_thread_base_cpu (ptid_t ptid) |
| 160 | { |
| 161 | int base_cpu; |
| 162 | |
| 163 | if (is_ravenscar_task (ptid)) |
| 164 | { |
| 165 | struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid); |
| 166 | |
| 167 | gdb_assert (task_info != NULL); |
| 168 | base_cpu = task_info->base_cpu; |
| 169 | } |
| 170 | else |
| 171 | { |
| 172 | /* We assume that the LWP of the PTID is equal to the CPU number. */ |
| 173 | base_cpu = ptid.lwp (); |
| 174 | } |
| 175 | |
| 176 | return base_cpu; |
| 177 | } |
| 178 | |
| 179 | /* Given a ravenscar task (identified by its ptid_t PTID), return true |
| 180 | if this task is the currently active task on the cpu that task is |
| 181 | running on. |
| 182 | |
| 183 | In other words, this function determine which CPU this task is |
| 184 | currently running on, and then return nonzero if the CPU in question |
| 185 | is executing the code for that task. If that's the case, then |
| 186 | that task's registers are in the CPU bank. Otherwise, the task |
| 187 | is currently suspended, and its registers have been saved in memory. */ |
| 188 | |
| 189 | bool |
| 190 | ravenscar_thread_target::task_is_currently_active (ptid_t ptid) |
| 191 | { |
| 192 | ptid_t active_task_ptid |
| 193 | = active_task (ravenscar_get_thread_base_cpu (ptid)); |
| 194 | |
| 195 | return ptid == active_task_ptid; |
| 196 | } |
| 197 | |
| 198 | /* Return the CPU thread (as a ptid_t) on which the given ravenscar |
| 199 | task is running. |
| 200 | |
| 201 | This is the thread that corresponds to the CPU on which the task |
| 202 | is running. */ |
| 203 | |
| 204 | static ptid_t |
| 205 | get_base_thread_from_ravenscar_task (ptid_t ptid) |
| 206 | { |
| 207 | int base_cpu; |
| 208 | |
| 209 | if (!is_ravenscar_task (ptid)) |
| 210 | return ptid; |
| 211 | |
| 212 | base_cpu = ravenscar_get_thread_base_cpu (ptid); |
| 213 | return ptid_t (ptid.pid (), base_cpu, 0); |
| 214 | } |
| 215 | |
| 216 | /* Fetch the ravenscar running thread from target memory and |
| 217 | update inferior_ptid accordingly. */ |
| 218 | |
| 219 | void |
| 220 | ravenscar_thread_target::update_inferior_ptid () |
| 221 | { |
| 222 | process_stratum_target *proc_target |
| 223 | = as_process_stratum_target (this->beneath ()); |
| 224 | |
| 225 | int base_cpu; |
| 226 | |
| 227 | m_base_ptid = inferior_ptid; |
| 228 | |
| 229 | gdb_assert (!is_ravenscar_task (inferior_ptid)); |
| 230 | base_cpu = ravenscar_get_thread_base_cpu (m_base_ptid); |
| 231 | |
| 232 | /* If the runtime has not been initialized yet, the inferior_ptid is |
| 233 | the only ptid that there is. */ |
| 234 | if (!runtime_initialized ()) |
| 235 | return; |
| 236 | |
| 237 | /* Make sure we set m_base_ptid before calling active_task |
| 238 | as the latter relies on it. */ |
| 239 | inferior_ptid = active_task (base_cpu); |
| 240 | gdb_assert (inferior_ptid != null_ptid); |
| 241 | |
| 242 | /* The running thread may not have been added to |
| 243 | system.tasking.debug's list yet; so ravenscar_update_thread_list |
| 244 | may not always add it to the thread list. Add it here. */ |
| 245 | if (!find_thread_ptid (proc_target, inferior_ptid)) |
| 246 | add_thread (proc_target, inferior_ptid); |
| 247 | } |
| 248 | |
| 249 | /* The Ravenscar Runtime exports a symbol which contains the ID of |
| 250 | the thread that is currently running. Try to locate that symbol |
| 251 | and return its associated minimal symbol. |
| 252 | Return NULL if not found. */ |
| 253 | |
| 254 | static struct bound_minimal_symbol |
| 255 | get_running_thread_msymbol () |
| 256 | { |
| 257 | struct bound_minimal_symbol msym; |
| 258 | |
| 259 | msym = lookup_minimal_symbol (running_thread_name, NULL, NULL); |
| 260 | if (!msym.minsym) |
| 261 | /* Older versions of the GNAT runtime were using a different |
| 262 | (less ideal) name for the symbol where the active thread ID |
| 263 | is stored. If we couldn't find the symbol using the latest |
| 264 | name, then try the old one. */ |
| 265 | msym = lookup_minimal_symbol ("running_thread", NULL, NULL); |
| 266 | |
| 267 | return msym; |
| 268 | } |
| 269 | |
| 270 | /* Return True if the Ada Ravenscar run-time can be found in the |
| 271 | application. */ |
| 272 | |
| 273 | static bool |
| 274 | has_ravenscar_runtime () |
| 275 | { |
| 276 | struct bound_minimal_symbol msym_ravenscar_runtime_initializer |
| 277 | = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL); |
| 278 | struct bound_minimal_symbol msym_known_tasks |
| 279 | = lookup_minimal_symbol (known_tasks_name, NULL, NULL); |
| 280 | struct bound_minimal_symbol msym_first_task |
| 281 | = lookup_minimal_symbol (first_task_name, NULL, NULL); |
| 282 | struct bound_minimal_symbol msym_running_thread |
| 283 | = get_running_thread_msymbol (); |
| 284 | |
| 285 | return (msym_ravenscar_runtime_initializer.minsym |
| 286 | && (msym_known_tasks.minsym || msym_first_task.minsym) |
| 287 | && msym_running_thread.minsym); |
| 288 | } |
| 289 | |
| 290 | /* Return True if the Ada Ravenscar run-time can be found in the |
| 291 | application, and if it has been initialized on target. */ |
| 292 | |
| 293 | bool |
| 294 | ravenscar_thread_target::runtime_initialized () |
| 295 | { |
| 296 | return active_task (1) != null_ptid; |
| 297 | } |
| 298 | |
| 299 | /* Return the ID of the thread that is currently running. |
| 300 | Return 0 if the ID could not be determined. */ |
| 301 | |
| 302 | static CORE_ADDR |
| 303 | get_running_thread_id (int cpu) |
| 304 | { |
| 305 | struct bound_minimal_symbol object_msym = get_running_thread_msymbol (); |
| 306 | int object_size; |
| 307 | int buf_size; |
| 308 | gdb_byte *buf; |
| 309 | CORE_ADDR object_addr; |
| 310 | struct type *builtin_type_void_data_ptr |
| 311 | = builtin_type (target_gdbarch ())->builtin_data_ptr; |
| 312 | |
| 313 | if (!object_msym.minsym) |
| 314 | return 0; |
| 315 | |
| 316 | object_size = TYPE_LENGTH (builtin_type_void_data_ptr); |
| 317 | object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym) |
| 318 | + (cpu - 1) * object_size); |
| 319 | buf_size = object_size; |
| 320 | buf = (gdb_byte *) alloca (buf_size); |
| 321 | read_memory (object_addr, buf, buf_size); |
| 322 | return extract_typed_address (buf, builtin_type_void_data_ptr); |
| 323 | } |
| 324 | |
| 325 | void |
| 326 | ravenscar_thread_target::resume (ptid_t ptid, int step, |
| 327 | enum gdb_signal siggnal) |
| 328 | { |
| 329 | /* If we see a wildcard resume, we simply pass that on. Otherwise, |
| 330 | arrange to resume the base ptid. */ |
| 331 | inferior_ptid = m_base_ptid; |
| 332 | if (ptid != minus_one_ptid) |
| 333 | ptid = m_base_ptid; |
| 334 | beneath ()->resume (ptid, step, siggnal); |
| 335 | } |
| 336 | |
| 337 | ptid_t |
| 338 | ravenscar_thread_target::wait (ptid_t ptid, |
| 339 | struct target_waitstatus *status, |
| 340 | int options) |
| 341 | { |
| 342 | process_stratum_target *beneath |
| 343 | = as_process_stratum_target (this->beneath ()); |
| 344 | ptid_t event_ptid; |
| 345 | |
| 346 | inferior_ptid = m_base_ptid; |
| 347 | if (ptid != minus_one_ptid) |
| 348 | ptid = m_base_ptid; |
| 349 | event_ptid = beneath->wait (ptid, status, 0); |
| 350 | /* Find any new threads that might have been created, and update |
| 351 | inferior_ptid to the active thread. |
| 352 | |
| 353 | Only do it if the program is still alive, though. Otherwise, |
| 354 | this causes problems when debugging through the remote protocol, |
| 355 | because we might try switching threads (and thus sending packets) |
| 356 | after the remote has disconnected. */ |
| 357 | if (status->kind != TARGET_WAITKIND_EXITED |
| 358 | && status->kind != TARGET_WAITKIND_SIGNALLED) |
| 359 | { |
| 360 | inferior_ptid = event_ptid; |
| 361 | this->update_thread_list (); |
| 362 | this->update_inferior_ptid (); |
| 363 | } |
| 364 | else |
| 365 | inferior_ptid = m_base_ptid; |
| 366 | return inferior_ptid; |
| 367 | } |
| 368 | |
| 369 | /* Add the thread associated to the given TASK to the thread list |
| 370 | (if the thread has already been added, this is a no-op). */ |
| 371 | |
| 372 | static void |
| 373 | ravenscar_add_thread (struct ada_task_info *task) |
| 374 | { |
| 375 | if (find_thread_ptid (current_inferior (), task->ptid) == NULL) |
| 376 | add_thread (current_inferior ()->process_target (), task->ptid); |
| 377 | } |
| 378 | |
| 379 | void |
| 380 | ravenscar_thread_target::update_thread_list () |
| 381 | { |
| 382 | /* Do not clear the thread list before adding the Ada task, to keep |
| 383 | the thread that the process stratum has included into it |
| 384 | (m_base_ptid) and the running thread, that may not have been included |
| 385 | to system.tasking.debug's list yet. */ |
| 386 | |
| 387 | iterate_over_live_ada_tasks (ravenscar_add_thread); |
| 388 | } |
| 389 | |
| 390 | ptid_t |
| 391 | ravenscar_thread_target::active_task (int cpu) |
| 392 | { |
| 393 | CORE_ADDR tid = get_running_thread_id (cpu); |
| 394 | |
| 395 | if (tid == 0) |
| 396 | return null_ptid; |
| 397 | else |
| 398 | return ptid_t (m_base_ptid.pid (), 0, tid); |
| 399 | } |
| 400 | |
| 401 | const char * |
| 402 | ravenscar_thread_target::extra_thread_info (thread_info *tp) |
| 403 | { |
| 404 | return "Ravenscar task"; |
| 405 | } |
| 406 | |
| 407 | bool |
| 408 | ravenscar_thread_target::thread_alive (ptid_t ptid) |
| 409 | { |
| 410 | /* Ravenscar tasks are non-terminating. */ |
| 411 | return true; |
| 412 | } |
| 413 | |
| 414 | std::string |
| 415 | ravenscar_thread_target::pid_to_str (ptid_t ptid) |
| 416 | { |
| 417 | return string_printf ("Thread %#x", (int) ptid.tid ()); |
| 418 | } |
| 419 | |
| 420 | void |
| 421 | ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum) |
| 422 | { |
| 423 | ptid_t ptid = regcache->ptid (); |
| 424 | |
| 425 | if (runtime_initialized () |
| 426 | && is_ravenscar_task (ptid) |
| 427 | && !task_is_currently_active (ptid)) |
| 428 | { |
| 429 | struct gdbarch *gdbarch = regcache->arch (); |
| 430 | struct ravenscar_arch_ops *arch_ops |
| 431 | = gdbarch_ravenscar_ops (gdbarch); |
| 432 | |
| 433 | arch_ops->fetch_registers (regcache, regnum); |
| 434 | } |
| 435 | else |
| 436 | beneath ()->fetch_registers (regcache, regnum); |
| 437 | } |
| 438 | |
| 439 | void |
| 440 | ravenscar_thread_target::store_registers (struct regcache *regcache, |
| 441 | int regnum) |
| 442 | { |
| 443 | ptid_t ptid = regcache->ptid (); |
| 444 | |
| 445 | if (runtime_initialized () |
| 446 | && is_ravenscar_task (ptid) |
| 447 | && !task_is_currently_active (ptid)) |
| 448 | { |
| 449 | struct gdbarch *gdbarch = regcache->arch (); |
| 450 | struct ravenscar_arch_ops *arch_ops |
| 451 | = gdbarch_ravenscar_ops (gdbarch); |
| 452 | |
| 453 | arch_ops->store_registers (regcache, regnum); |
| 454 | } |
| 455 | else |
| 456 | beneath ()->store_registers (regcache, regnum); |
| 457 | } |
| 458 | |
| 459 | void |
| 460 | ravenscar_thread_target::prepare_to_store (struct regcache *regcache) |
| 461 | { |
| 462 | ptid_t ptid = regcache->ptid (); |
| 463 | |
| 464 | if (runtime_initialized () |
| 465 | && is_ravenscar_task (ptid) |
| 466 | && !task_is_currently_active (ptid)) |
| 467 | { |
| 468 | /* Nothing. */ |
| 469 | } |
| 470 | else |
| 471 | beneath ()->prepare_to_store (regcache); |
| 472 | } |
| 473 | |
| 474 | /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */ |
| 475 | |
| 476 | bool |
| 477 | ravenscar_thread_target::stopped_by_sw_breakpoint () |
| 478 | { |
| 479 | scoped_restore save_ptid = make_scoped_restore (&inferior_ptid); |
| 480 | inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid); |
| 481 | return beneath ()->stopped_by_sw_breakpoint (); |
| 482 | } |
| 483 | |
| 484 | /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */ |
| 485 | |
| 486 | bool |
| 487 | ravenscar_thread_target::stopped_by_hw_breakpoint () |
| 488 | { |
| 489 | scoped_restore save_ptid = make_scoped_restore (&inferior_ptid); |
| 490 | inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid); |
| 491 | return beneath ()->stopped_by_hw_breakpoint (); |
| 492 | } |
| 493 | |
| 494 | /* Implement the to_stopped_by_watchpoint target_ops "method". */ |
| 495 | |
| 496 | bool |
| 497 | ravenscar_thread_target::stopped_by_watchpoint () |
| 498 | { |
| 499 | scoped_restore save_ptid = make_scoped_restore (&inferior_ptid); |
| 500 | inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid); |
| 501 | return beneath ()->stopped_by_watchpoint (); |
| 502 | } |
| 503 | |
| 504 | /* Implement the to_stopped_data_address target_ops "method". */ |
| 505 | |
| 506 | bool |
| 507 | ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p) |
| 508 | { |
| 509 | scoped_restore save_ptid = make_scoped_restore (&inferior_ptid); |
| 510 | inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid); |
| 511 | return beneath ()->stopped_data_address (addr_p); |
| 512 | } |
| 513 | |
| 514 | void |
| 515 | ravenscar_thread_target::mourn_inferior () |
| 516 | { |
| 517 | m_base_ptid = null_ptid; |
| 518 | target_ops *beneath = this->beneath (); |
| 519 | unpush_target (this); |
| 520 | beneath->mourn_inferior (); |
| 521 | } |
| 522 | |
| 523 | /* Implement the to_core_of_thread target_ops "method". */ |
| 524 | |
| 525 | int |
| 526 | ravenscar_thread_target::core_of_thread (ptid_t ptid) |
| 527 | { |
| 528 | scoped_restore save_ptid = make_scoped_restore (&inferior_ptid); |
| 529 | inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid); |
| 530 | return beneath ()->core_of_thread (inferior_ptid); |
| 531 | } |
| 532 | |
| 533 | /* Observer on inferior_created: push ravenscar thread stratum if needed. */ |
| 534 | |
| 535 | static void |
| 536 | ravenscar_inferior_created (struct target_ops *target, int from_tty) |
| 537 | { |
| 538 | const char *err_msg; |
| 539 | |
| 540 | if (!ravenscar_task_support |
| 541 | || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL |
| 542 | || !has_ravenscar_runtime ()) |
| 543 | return; |
| 544 | |
| 545 | err_msg = ada_get_tcb_types_info (); |
| 546 | if (err_msg != NULL) |
| 547 | { |
| 548 | warning (_("%s. Task/thread support disabled."), err_msg); |
| 549 | return; |
| 550 | } |
| 551 | |
| 552 | target_ops_up target_holder (new ravenscar_thread_target ()); |
| 553 | push_target (std::move (target_holder)); |
| 554 | } |
| 555 | |
| 556 | ptid_t |
| 557 | ravenscar_thread_target::get_ada_task_ptid (long lwp, long thread) |
| 558 | { |
| 559 | return ptid_t (m_base_ptid.pid (), 0, thread); |
| 560 | } |
| 561 | |
| 562 | /* Command-list for the "set/show ravenscar" prefix command. */ |
| 563 | static struct cmd_list_element *set_ravenscar_list; |
| 564 | static struct cmd_list_element *show_ravenscar_list; |
| 565 | |
| 566 | /* Implement the "set ravenscar" prefix command. */ |
| 567 | |
| 568 | static void |
| 569 | set_ravenscar_command (const char *arg, int from_tty) |
| 570 | { |
| 571 | printf_unfiltered (_(\ |
| 572 | "\"set ravenscar\" must be followed by the name of a setting.\n")); |
| 573 | help_list (set_ravenscar_list, "set ravenscar ", all_commands, gdb_stdout); |
| 574 | } |
| 575 | |
| 576 | /* Implement the "show ravenscar" prefix command. */ |
| 577 | |
| 578 | static void |
| 579 | show_ravenscar_command (const char *args, int from_tty) |
| 580 | { |
| 581 | cmd_show_list (show_ravenscar_list, from_tty, ""); |
| 582 | } |
| 583 | |
| 584 | /* Implement the "show ravenscar task-switching" command. */ |
| 585 | |
| 586 | static void |
| 587 | show_ravenscar_task_switching_command (struct ui_file *file, int from_tty, |
| 588 | struct cmd_list_element *c, |
| 589 | const char *value) |
| 590 | { |
| 591 | if (ravenscar_task_support) |
| 592 | fprintf_filtered (file, _("\ |
| 593 | Support for Ravenscar task/thread switching is enabled\n")); |
| 594 | else |
| 595 | fprintf_filtered (file, _("\ |
| 596 | Support for Ravenscar task/thread switching is disabled\n")); |
| 597 | } |
| 598 | |
| 599 | /* Module startup initialization function, automagically called by |
| 600 | init.c. */ |
| 601 | |
| 602 | void _initialize_ravenscar (); |
| 603 | void |
| 604 | _initialize_ravenscar () |
| 605 | { |
| 606 | /* Notice when the inferior is created in order to push the |
| 607 | ravenscar ops if needed. */ |
| 608 | gdb::observers::inferior_created.attach (ravenscar_inferior_created); |
| 609 | |
| 610 | add_prefix_cmd ("ravenscar", no_class, set_ravenscar_command, |
| 611 | _("Prefix command for changing Ravenscar-specific settings."), |
| 612 | &set_ravenscar_list, "set ravenscar ", 0, &setlist); |
| 613 | |
| 614 | add_prefix_cmd ("ravenscar", no_class, show_ravenscar_command, |
| 615 | _("Prefix command for showing Ravenscar-specific settings."), |
| 616 | &show_ravenscar_list, "show ravenscar ", 0, &showlist); |
| 617 | |
| 618 | add_setshow_boolean_cmd ("task-switching", class_obscure, |
| 619 | &ravenscar_task_support, _("\ |
| 620 | Enable or disable support for GNAT Ravenscar tasks."), _("\ |
| 621 | Show whether support for GNAT Ravenscar tasks is enabled."), |
| 622 | _("\ |
| 623 | Enable or disable support for task/thread switching with the GNAT\n\ |
| 624 | Ravenscar run-time library for bareboard configuration."), |
| 625 | NULL, show_ravenscar_task_switching_command, |
| 626 | &set_ravenscar_list, &show_ravenscar_list); |
| 627 | } |