Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
63
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
71
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76 };
77
78 struct ravenscar_thread_target final : public target_ops
79 {
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
82 {
83 }
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, long thread) override;
122
123 struct btrace_target_info *enable_btrace (ptid_t ptid,
124 const struct btrace_config *conf)
125 override
126 {
127 ptid = get_base_thread_from_ravenscar_task (ptid);
128 return beneath ()->enable_btrace (ptid, conf);
129 }
130
131 void mourn_inferior () override;
132
133 void close () override
134 {
135 delete this;
136 }
137
138 thread_info *add_active_thread ();
139
140 private:
141
142 /* PTID of the last thread that received an event.
143 This can be useful to determine the associated task that received
144 the event, to make it the current task. */
145 ptid_t m_base_ptid;
146
147 ptid_t active_task (int cpu);
148 bool task_is_currently_active (ptid_t ptid);
149 bool runtime_initialized ();
150 int get_thread_base_cpu (ptid_t ptid);
151 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
152 void add_thread (struct ada_task_info *task);
153
154 /* Like switch_to_thread, but uses the base ptid for the thread. */
155 void set_base_thread_from_ravenscar_task (ptid_t ptid)
156 {
157 process_stratum_target *proc_target
158 = as_process_stratum_target (this->beneath ());
159 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
160 switch_to_thread (find_thread_ptid (proc_target, underlying));
161 }
162
163 /* This maps a TID to the CPU on which it was running. This is
164 needed because sometimes the runtime will report an active task
165 that hasn't yet been put on the list of tasks that is read by
166 ada-tasks.c. */
167 std::unordered_map<long, int> m_cpu_map;
168 };
169
170 /* Return true iff PTID corresponds to a ravenscar task. */
171
172 static bool
173 is_ravenscar_task (ptid_t ptid)
174 {
175 /* By construction, ravenscar tasks have their LWP set to zero.
176 Also make sure that the TID is nonzero, as some remotes, when
177 asked for the list of threads, will return the first thread
178 as having its TID set to zero. For instance, TSIM version
179 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
180 query, which the remote protocol layer then treats as a thread
181 whose TID is 0. This is obviously not a ravenscar task. */
182 return ptid.lwp () == 0 && ptid.tid () != 0;
183 }
184
185 /* Given PTID, which can be either a ravenscar task or a CPU thread,
186 return which CPU that ptid is running on.
187
188 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
189 will be triggered. */
190
191 int
192 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
193 {
194 int base_cpu;
195
196 if (is_ravenscar_task (ptid))
197 {
198 /* Prefer to not read inferior memory if possible, to avoid
199 reentrancy problems with xfer_partial. */
200 auto iter = m_cpu_map.find (ptid.tid ());
201
202 if (iter != m_cpu_map.end ())
203 base_cpu = iter->second;
204 else
205 {
206 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
207
208 gdb_assert (task_info != NULL);
209 base_cpu = task_info->base_cpu;
210 }
211 }
212 else
213 {
214 /* We assume that the LWP of the PTID is equal to the CPU number. */
215 base_cpu = ptid.lwp ();
216 }
217
218 return base_cpu;
219 }
220
221 /* Given a ravenscar task (identified by its ptid_t PTID), return true
222 if this task is the currently active task on the cpu that task is
223 running on.
224
225 In other words, this function determine which CPU this task is
226 currently running on, and then return nonzero if the CPU in question
227 is executing the code for that task. If that's the case, then
228 that task's registers are in the CPU bank. Otherwise, the task
229 is currently suspended, and its registers have been saved in memory. */
230
231 bool
232 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
233 {
234 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
235
236 return ptid == active_task_ptid;
237 }
238
239 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
240 task is running.
241
242 This is the thread that corresponds to the CPU on which the task
243 is running. */
244
245 ptid_t
246 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
247 {
248 int base_cpu;
249
250 if (!is_ravenscar_task (ptid))
251 return ptid;
252
253 base_cpu = get_thread_base_cpu (ptid);
254 return ptid_t (ptid.pid (), base_cpu, 0);
255 }
256
257 /* Fetch the ravenscar running thread from target memory, make sure
258 there's a corresponding thread in the thread list, and return it.
259 If the runtime is not initialized, return NULL. */
260
261 thread_info *
262 ravenscar_thread_target::add_active_thread ()
263 {
264 process_stratum_target *proc_target
265 = as_process_stratum_target (this->beneath ());
266
267 int base_cpu;
268
269 gdb_assert (!is_ravenscar_task (m_base_ptid));
270 base_cpu = get_thread_base_cpu (m_base_ptid);
271
272 if (!runtime_initialized ())
273 return nullptr;
274
275 /* Make sure we set m_base_ptid before calling active_task
276 as the latter relies on it. */
277 ptid_t active_ptid = active_task (base_cpu);
278 gdb_assert (active_ptid != null_ptid);
279
280 /* The running thread may not have been added to
281 system.tasking.debug's list yet; so ravenscar_update_thread_list
282 may not always add it to the thread list. Add it here. */
283 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
284 if (active_thr == nullptr)
285 {
286 active_thr = ::add_thread (proc_target, active_ptid);
287 m_cpu_map[active_ptid.tid ()] = base_cpu;
288 }
289 return active_thr;
290 }
291
292 /* The Ravenscar Runtime exports a symbol which contains the ID of
293 the thread that is currently running. Try to locate that symbol
294 and return its associated minimal symbol.
295 Return NULL if not found. */
296
297 static struct bound_minimal_symbol
298 get_running_thread_msymbol ()
299 {
300 struct bound_minimal_symbol msym;
301
302 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
303 if (!msym.minsym)
304 /* Older versions of the GNAT runtime were using a different
305 (less ideal) name for the symbol where the active thread ID
306 is stored. If we couldn't find the symbol using the latest
307 name, then try the old one. */
308 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
309
310 return msym;
311 }
312
313 /* Return True if the Ada Ravenscar run-time can be found in the
314 application. */
315
316 static bool
317 has_ravenscar_runtime ()
318 {
319 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
320 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
321 struct bound_minimal_symbol msym_known_tasks
322 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
323 struct bound_minimal_symbol msym_first_task
324 = lookup_minimal_symbol (first_task_name, NULL, NULL);
325 struct bound_minimal_symbol msym_running_thread
326 = get_running_thread_msymbol ();
327
328 return (msym_ravenscar_runtime_initializer.minsym
329 && (msym_known_tasks.minsym || msym_first_task.minsym)
330 && msym_running_thread.minsym);
331 }
332
333 /* Return True if the Ada Ravenscar run-time can be found in the
334 application, and if it has been initialized on target. */
335
336 bool
337 ravenscar_thread_target::runtime_initialized ()
338 {
339 return active_task (1) != null_ptid;
340 }
341
342 /* Return the ID of the thread that is currently running.
343 Return 0 if the ID could not be determined. */
344
345 static CORE_ADDR
346 get_running_thread_id (int cpu)
347 {
348 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
349 int object_size;
350 int buf_size;
351 gdb_byte *buf;
352 CORE_ADDR object_addr;
353 struct type *builtin_type_void_data_ptr
354 = builtin_type (target_gdbarch ())->builtin_data_ptr;
355
356 if (!object_msym.minsym)
357 return 0;
358
359 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
360 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
361 + (cpu - 1) * object_size);
362 buf_size = object_size;
363 buf = (gdb_byte *) alloca (buf_size);
364 read_memory (object_addr, buf, buf_size);
365 return extract_typed_address (buf, builtin_type_void_data_ptr);
366 }
367
368 void
369 ravenscar_thread_target::resume (ptid_t ptid, int step,
370 enum gdb_signal siggnal)
371 {
372 /* If we see a wildcard resume, we simply pass that on. Otherwise,
373 arrange to resume the base ptid. */
374 inferior_ptid = m_base_ptid;
375 if (ptid.is_pid ())
376 {
377 /* We only have one process, so resume all threads of it. */
378 ptid = minus_one_ptid;
379 }
380 else if (ptid != minus_one_ptid)
381 ptid = m_base_ptid;
382 beneath ()->resume (ptid, step, siggnal);
383 }
384
385 ptid_t
386 ravenscar_thread_target::wait (ptid_t ptid,
387 struct target_waitstatus *status,
388 target_wait_flags options)
389 {
390 process_stratum_target *beneath
391 = as_process_stratum_target (this->beneath ());
392 ptid_t event_ptid;
393
394 if (ptid != minus_one_ptid)
395 ptid = m_base_ptid;
396 event_ptid = beneath->wait (ptid, status, 0);
397 /* Find any new threads that might have been created, and return the
398 active thread.
399
400 Only do it if the program is still alive, though. Otherwise,
401 this causes problems when debugging through the remote protocol,
402 because we might try switching threads (and thus sending packets)
403 after the remote has disconnected. */
404 if (status->kind != TARGET_WAITKIND_EXITED
405 && status->kind != TARGET_WAITKIND_SIGNALLED
406 && runtime_initialized ())
407 {
408 m_base_ptid = event_ptid;
409 this->update_thread_list ();
410 return this->add_active_thread ()->ptid;
411 }
412 return event_ptid;
413 }
414
415 /* Add the thread associated to the given TASK to the thread list
416 (if the thread has already been added, this is a no-op). */
417
418 void
419 ravenscar_thread_target::add_thread (struct ada_task_info *task)
420 {
421 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
422 {
423 ::add_thread (current_inferior ()->process_target (), task->ptid);
424 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
425 }
426 }
427
428 void
429 ravenscar_thread_target::update_thread_list ()
430 {
431 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
432 but this isn't always the case in target methods. So, we ensure
433 it here. */
434 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
435 m_base_ptid);
436
437 /* Do not clear the thread list before adding the Ada task, to keep
438 the thread that the process stratum has included into it
439 (m_base_ptid) and the running thread, that may not have been included
440 to system.tasking.debug's list yet. */
441
442 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
443 {
444 this->add_thread (task);
445 });
446 }
447
448 ptid_t
449 ravenscar_thread_target::active_task (int cpu)
450 {
451 CORE_ADDR tid = get_running_thread_id (cpu);
452
453 if (tid == 0)
454 return null_ptid;
455 else
456 return ptid_t (m_base_ptid.pid (), 0, tid);
457 }
458
459 bool
460 ravenscar_thread_target::thread_alive (ptid_t ptid)
461 {
462 /* Ravenscar tasks are non-terminating. */
463 return true;
464 }
465
466 std::string
467 ravenscar_thread_target::pid_to_str (ptid_t ptid)
468 {
469 if (!is_ravenscar_task (ptid))
470 return beneath ()->pid_to_str (ptid);
471
472 return string_printf ("Ravenscar Thread %#x", (int) ptid.tid ());
473 }
474
475 /* Temporarily set the ptid of a regcache to some other value. When
476 this object is destroyed, the regcache's original ptid is
477 restored. */
478
479 class temporarily_change_regcache_ptid
480 {
481 public:
482
483 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
484 : m_regcache (regcache),
485 m_save_ptid (regcache->ptid ())
486 {
487 m_regcache->set_ptid (new_ptid);
488 }
489
490 ~temporarily_change_regcache_ptid ()
491 {
492 m_regcache->set_ptid (m_save_ptid);
493 }
494
495 private:
496
497 /* The regcache. */
498 struct regcache *m_regcache;
499 /* The saved ptid. */
500 ptid_t m_save_ptid;
501 };
502
503 void
504 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
505 {
506 ptid_t ptid = regcache->ptid ();
507
508 if (runtime_initialized () && is_ravenscar_task (ptid))
509 {
510 if (task_is_currently_active (ptid))
511 {
512 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
513 temporarily_change_regcache_ptid changer (regcache, base);
514 beneath ()->fetch_registers (regcache, regnum);
515 }
516 else
517 {
518 struct gdbarch *gdbarch = regcache->arch ();
519 struct ravenscar_arch_ops *arch_ops
520 = gdbarch_ravenscar_ops (gdbarch);
521
522 arch_ops->fetch_registers (regcache, regnum);
523 }
524 }
525 else
526 beneath ()->fetch_registers (regcache, regnum);
527 }
528
529 void
530 ravenscar_thread_target::store_registers (struct regcache *regcache,
531 int regnum)
532 {
533 ptid_t ptid = regcache->ptid ();
534
535 if (runtime_initialized () && is_ravenscar_task (ptid))
536 {
537 if (task_is_currently_active (ptid))
538 {
539 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
540 temporarily_change_regcache_ptid changer (regcache, base);
541 beneath ()->store_registers (regcache, regnum);
542 }
543 else
544 {
545 struct gdbarch *gdbarch = regcache->arch ();
546 struct ravenscar_arch_ops *arch_ops
547 = gdbarch_ravenscar_ops (gdbarch);
548
549 arch_ops->store_registers (regcache, regnum);
550 }
551 }
552 else
553 beneath ()->store_registers (regcache, regnum);
554 }
555
556 void
557 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
558 {
559 ptid_t ptid = regcache->ptid ();
560
561 if (runtime_initialized () && is_ravenscar_task (ptid))
562 {
563 if (task_is_currently_active (ptid))
564 {
565 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
566 temporarily_change_regcache_ptid changer (regcache, base);
567 beneath ()->prepare_to_store (regcache);
568 }
569 else
570 {
571 /* Nothing. */
572 }
573 }
574 else
575 beneath ()->prepare_to_store (regcache);
576 }
577
578 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
579
580 bool
581 ravenscar_thread_target::stopped_by_sw_breakpoint ()
582 {
583 scoped_restore_current_thread saver;
584 set_base_thread_from_ravenscar_task (inferior_ptid);
585 return beneath ()->stopped_by_sw_breakpoint ();
586 }
587
588 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
589
590 bool
591 ravenscar_thread_target::stopped_by_hw_breakpoint ()
592 {
593 scoped_restore_current_thread saver;
594 set_base_thread_from_ravenscar_task (inferior_ptid);
595 return beneath ()->stopped_by_hw_breakpoint ();
596 }
597
598 /* Implement the to_stopped_by_watchpoint target_ops "method". */
599
600 bool
601 ravenscar_thread_target::stopped_by_watchpoint ()
602 {
603 scoped_restore_current_thread saver;
604 set_base_thread_from_ravenscar_task (inferior_ptid);
605 return beneath ()->stopped_by_watchpoint ();
606 }
607
608 /* Implement the to_stopped_data_address target_ops "method". */
609
610 bool
611 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
612 {
613 scoped_restore_current_thread saver;
614 set_base_thread_from_ravenscar_task (inferior_ptid);
615 return beneath ()->stopped_data_address (addr_p);
616 }
617
618 void
619 ravenscar_thread_target::mourn_inferior ()
620 {
621 m_base_ptid = null_ptid;
622 target_ops *beneath = this->beneath ();
623 current_inferior ()->unpush_target (this);
624 beneath->mourn_inferior ();
625 }
626
627 /* Implement the to_core_of_thread target_ops "method". */
628
629 int
630 ravenscar_thread_target::core_of_thread (ptid_t ptid)
631 {
632 scoped_restore_current_thread saver;
633 set_base_thread_from_ravenscar_task (inferior_ptid);
634 return beneath ()->core_of_thread (inferior_ptid);
635 }
636
637 /* Implement the target xfer_partial method. */
638
639 enum target_xfer_status
640 ravenscar_thread_target::xfer_partial (enum target_object object,
641 const char *annex,
642 gdb_byte *readbuf,
643 const gdb_byte *writebuf,
644 ULONGEST offset, ULONGEST len,
645 ULONGEST *xfered_len)
646 {
647 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
648 /* Calling get_base_thread_from_ravenscar_task can read memory from
649 the inferior. However, that function is written to prefer our
650 internal map, so it should not result in recursive calls in
651 practice. */
652 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
653 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
654 offset, len, xfered_len);
655 }
656
657 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
658
659 static void
660 ravenscar_inferior_created (inferior *inf)
661 {
662 const char *err_msg;
663
664 if (!ravenscar_task_support
665 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
666 || !has_ravenscar_runtime ())
667 return;
668
669 err_msg = ada_get_tcb_types_info ();
670 if (err_msg != NULL)
671 {
672 warning (_("%s. Task/thread support disabled."), err_msg);
673 return;
674 }
675
676 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
677 inf->push_target (target_ops_up (rtarget));
678 thread_info *thr = rtarget->add_active_thread ();
679 if (thr != nullptr)
680 switch_to_thread (thr);
681 }
682
683 ptid_t
684 ravenscar_thread_target::get_ada_task_ptid (long lwp, long thread)
685 {
686 return ptid_t (m_base_ptid.pid (), 0, thread);
687 }
688
689 /* Command-list for the "set/show ravenscar" prefix command. */
690 static struct cmd_list_element *set_ravenscar_list;
691 static struct cmd_list_element *show_ravenscar_list;
692
693 /* Implement the "show ravenscar task-switching" command. */
694
695 static void
696 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
697 struct cmd_list_element *c,
698 const char *value)
699 {
700 if (ravenscar_task_support)
701 fprintf_filtered (file, _("\
702 Support for Ravenscar task/thread switching is enabled\n"));
703 else
704 fprintf_filtered (file, _("\
705 Support for Ravenscar task/thread switching is disabled\n"));
706 }
707
708 /* Module startup initialization function, automagically called by
709 init.c. */
710
711 void _initialize_ravenscar ();
712 void
713 _initialize_ravenscar ()
714 {
715 /* Notice when the inferior is created in order to push the
716 ravenscar ops if needed. */
717 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
718 "ravenscar-thread");
719
720 add_basic_prefix_cmd ("ravenscar", no_class,
721 _("Prefix command for changing Ravenscar-specific settings."),
722 &set_ravenscar_list, 0, &setlist);
723
724 add_show_prefix_cmd ("ravenscar", no_class,
725 _("Prefix command for showing Ravenscar-specific settings."),
726 &show_ravenscar_list, 0, &showlist);
727
728 add_setshow_boolean_cmd ("task-switching", class_obscure,
729 &ravenscar_task_support, _("\
730 Enable or disable support for GNAT Ravenscar tasks."), _("\
731 Show whether support for GNAT Ravenscar tasks is enabled."),
732 _("\
733 Enable or disable support for task/thread switching with the GNAT\n\
734 Ravenscar run-time library for bareboard configuration."),
735 NULL, show_ravenscar_task_switching_command,
736 &set_ravenscar_list, &show_ravenscar_list);
737 }
This page took 0.063037 seconds and 4 git commands to generate.