* ada-lang.c (user_select_syms): Use SYMBOL_SYMTAB.
[deliverable/binutils-gdb.git] / gdb / ia64-hpux-nat.c
CommitLineData
0b302171 1/* Copyright (C) 2010, 2012 Free Software Foundation, Inc.
92c9a463
JB
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18#include "defs.h"
19#include "ia64-tdep.h"
20#include "inferior.h"
21#include "inf-ttrace.h"
22#include "regcache.h"
23#include "solib-ia64-hpux.h"
24
25#include <ia64/sys/uregs.h>
26#include <sys/ttrace.h>
27
28/* The offsets used with ttrace to read the value of the raw registers. */
29
30static int u_offsets[] =
31{ /* Static General Registers. */
32 -1, __r1, __r2, __r3, __r4, __r5, __r6, __r7,
33 __r8, __r9, __r10, __r11, __r12, __r13, __r14, __r15,
34 __r16, __r17, __r18, __r19, __r20, __r21, __r22, __r23,
35 __r24, __r25, __r26, __r27, __r28, __r29, __r30, __r31,
36 -1, -1, -1, -1, -1, -1, -1, -1,
37 -1, -1, -1, -1, -1, -1, -1, -1,
38 -1, -1, -1, -1, -1, -1, -1, -1,
39 -1, -1, -1, -1, -1, -1, -1, -1,
40 -1, -1, -1, -1, -1, -1, -1, -1,
41 -1, -1, -1, -1, -1, -1, -1, -1,
42 -1, -1, -1, -1, -1, -1, -1, -1,
43 -1, -1, -1, -1, -1, -1, -1, -1,
44 -1, -1, -1, -1, -1, -1, -1, -1,
45 -1, -1, -1, -1, -1, -1, -1, -1,
46 -1, -1, -1, -1, -1, -1, -1, -1,
47 -1, -1, -1, -1, -1, -1, -1, -1,
48
49 /* Static Floating-Point Registers. */
50 -1, -1, __f2, __f3, __f4, __f5, __f6, __f7,
51 __f8, __f9, __f10, __f11, __f12, __f13, __f14, __f15,
52 __f16, __f17, __f18, __f19, __f20, __f21, __f22, __f23,
53 __f24, __f25, __f26, __f27, __f28, __f29, __f30, __f31,
54 __f32, __f33, __f34, __f35, __f36, __f37, __f38, __f39,
55 __f40, __f41, __f42, __f43, __f44, __f45, __f46, __f47,
56 __f48, __f49, __f50, __f51, __f52, __f53, __f54, __f55,
57 __f56, __f57, __f58, __f59, __f60, __f61, __f62, __f63,
58 __f64, __f65, __f66, __f67, __f68, __f69, __f70, __f71,
59 __f72, __f73, __f74, __f75, __f76, __f77, __f78, __f79,
60 __f80, __f81, __f82, __f83, __f84, __f85, __f86, __f87,
61 __f88, __f89, __f90, __f91, __f92, __f93, __f94, __f95,
62 __f96, __f97, __f98, __f99, __f100, __f101, __f102, __f103,
63 __f104, __f105, __f106, __f107, __f108, __f109, __f110, __f111,
64 __f112, __f113, __f114, __f115, __f116, __f117, __f118, __f119,
65 __f120, __f121, __f122, __f123, __f124, __f125, __f126, __f127,
66
67 -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1,
75
76 /* Branch Registers. */
77 __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
78
79 /* Virtual frame pointer and virtual return address pointer. */
80 -1, -1,
81
82 /* Other registers. */
83 __pr, __ip, __cr_ipsr, __cfm,
84
85 /* Kernel registers. */
86 -1, -1, -1, -1,
87 -1, -1, -1, -1,
88
89 -1, -1, -1, -1, -1, -1, -1, -1,
90
91 /* Some application registers. */
92 __ar_rsc, __ar_bsp, __ar_bspstore, __ar_rnat,
93
94 -1,
95 -1, /* Not available: FCR, IA32 floating control register. */
96 -1, -1,
97
98 -1, /* Not available: EFLAG. */
99 -1, /* Not available: CSD. */
100 -1, /* Not available: SSD. */
101 -1, /* Not available: CFLG. */
102 -1, /* Not available: FSR. */
103 -1, /* Not available: FIR. */
104 -1, /* Not available: FDR. */
105 -1,
106 __ar_ccv, -1, -1, -1, __ar_unat, -1, -1, -1,
107 __ar_fpsr, -1, -1, -1,
108 -1, /* Not available: ITC. */
109 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
110 -1, -1, -1, -1, -1, -1, -1, -1, -1,
111 __ar_pfs, __ar_lc, __ar_ec,
112 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
113 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
114 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
115 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
116 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
117 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
118 -1
119 /* All following registers, starting with nat0, are handled as
120 pseudo registers, and hence are handled separately. */
121};
122
123/* Some register have a fixed value and can not be modified.
124 Store their value in static constant buffers that can be used
125 later to fill the register cache. */
126static const char r0_value[8] = {0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00};
128static const char f0_value[16] = {0x00, 0x00, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00};
132static const char f1_value[16] = {0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0xff, 0xff,
134 0x80, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00};
136
137/* The "to_wait" routine from the "inf-ttrace" layer. */
138
139static ptid_t (*super_to_wait) (struct target_ops *, ptid_t,
140 struct target_waitstatus *, int);
141
142/* The "to_wait" target_ops routine routine for ia64-hpux. */
143
144static ptid_t
145ia64_hpux_wait (struct target_ops *ops, ptid_t ptid,
146 struct target_waitstatus *ourstatus, int options)
147{
148 ptid_t new_ptid;
149
150 new_ptid = super_to_wait (ops, ptid, ourstatus, options);
151
152 /* If this is a DLD event (hard-coded breakpoint instruction
153 that was activated by the solib-ia64-hpux module), we need to
154 process it, and then resume the execution as if the event did
155 not happen. */
156 if (ourstatus->kind == TARGET_WAITKIND_STOPPED
a493e3e2 157 && ourstatus->value.sig == GDB_SIGNAL_TRAP
92c9a463
JB
158 && ia64_hpux_at_dld_breakpoint_p (new_ptid))
159 {
160 ia64_hpux_handle_dld_breakpoint (new_ptid);
161
a493e3e2 162 target_resume (new_ptid, 0, GDB_SIGNAL_0);
92c9a463
JB
163 ourstatus->kind = TARGET_WAITKIND_IGNORE;
164 }
165
166 return new_ptid;
167}
168
169/* Fetch the RNAT register and supply it to the REGCACHE. */
170
171static void
172ia64_hpux_fetch_rnat_register (struct regcache *regcache)
173{
174 CORE_ADDR addr;
175 gdb_byte buf[8];
176 int status;
177
178 /* The value of RNAT is stored at bsp|0x1f8, and must be read using
179 TT_LWP_RDRSEBS. */
180
181 regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &addr);
182 addr |= 0x1f8;
183
184 status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
185 ptid_get_lwp (inferior_ptid), addr, sizeof (buf),
186 (uintptr_t) buf);
187 if (status < 0)
188 error (_("failed to read RNAT register at %s"),
189 paddress (get_regcache_arch(regcache), addr));
190
191 regcache_raw_supply (regcache, IA64_RNAT_REGNUM, buf);
192}
193
194/* Read the value of the register saved at OFFSET in the save_state_t
195 structure, and store its value in BUF. LEN is the size of the register
196 to be read. */
197
198static int
199ia64_hpux_read_register_from_save_state_t (int offset, gdb_byte *buf, int len)
200{
201 int status;
202
203 status = ttrace (TT_LWP_RUREGS, ptid_get_pid (inferior_ptid),
204 ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
205
206 return status;
207}
208
209/* Fetch register REGNUM from the inferior. */
210
211static void
212ia64_hpux_fetch_register (struct regcache *regcache, int regnum)
213{
214 struct gdbarch *gdbarch = get_regcache_arch (regcache);
215 int offset, len, status;
216 gdb_byte *buf;
217
218 if (regnum == IA64_GR0_REGNUM)
219 {
220 /* r0 is always 0. */
221 regcache_raw_supply (regcache, regnum, r0_value);
222 return;
223 }
224
225 if (regnum == IA64_FR0_REGNUM)
226 {
227 /* f0 is always 0.0. */
228 regcache_raw_supply (regcache, regnum, f0_value);
229 return;
230 }
231
232 if (regnum == IA64_FR1_REGNUM)
233 {
234 /* f1 is always 1.0. */
235 regcache_raw_supply (regcache, regnum, f1_value);
236 return;
237 }
238
239 if (regnum == IA64_RNAT_REGNUM)
240 {
241 ia64_hpux_fetch_rnat_register (regcache);
242 return;
243 }
244
245 /* Get the register location. If the register can not be fetched,
246 then return now. */
247 offset = u_offsets[regnum];
248 if (offset == -1)
249 return;
250
251 len = register_size (gdbarch, regnum);
252 buf = alloca (len * sizeof (gdb_byte));
253 status = ia64_hpux_read_register_from_save_state_t (offset, buf, len);
254 if (status < 0)
a9df6b22 255 warning (_("Failed to read register value for %s."),
92c9a463
JB
256 gdbarch_register_name (gdbarch, regnum));
257
258 regcache_raw_supply (regcache, regnum, buf);
259}
260
261/* The "to_fetch_registers" target_ops routine for ia64-hpux. */
262
263static void
264ia64_hpux_fetch_registers (struct target_ops *ops,
265 struct regcache *regcache, int regnum)
266{
267 if (regnum == -1)
268 for (regnum = 0;
269 regnum < gdbarch_num_regs (get_regcache_arch (regcache));
270 regnum++)
271 ia64_hpux_fetch_register (regcache, regnum);
272 else
273 ia64_hpux_fetch_register (regcache, regnum);
274}
275
276/* Save register REGNUM (stored in BUF) in the save_state_t structure.
277 LEN is the size of the register in bytes.
278
279 Return the value from the corresponding ttrace call (a negative value
280 means that the operation failed). */
281
282static int
283ia64_hpux_write_register_to_saved_state_t (int offset, gdb_byte *buf, int len)
284{
285 return ttrace (TT_LWP_WUREGS, ptid_get_pid (inferior_ptid),
286 ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
287}
288
289/* Store register REGNUM into the inferior. */
290
291static void
292ia64_hpux_store_register (const struct regcache *regcache, int regnum)
293{
294 struct gdbarch *gdbarch = get_regcache_arch (regcache);
295 int offset = u_offsets[regnum];
296 gdb_byte *buf;
297 int len, status;
298
299 /* If the register can not be stored, then return now. */
300 if (offset == -1)
301 return;
302
303 /* I don't know how to store that register for now. So just ignore any
304 request to store it, to avoid an internal error. */
305 if (regnum == IA64_PSR_REGNUM)
306 return;
307
308 len = register_size (gdbarch, regnum);
309 buf = alloca (len * sizeof (gdb_byte));
310 regcache_raw_collect (regcache, regnum, buf);
311
312 status = ia64_hpux_write_register_to_saved_state_t (offset, buf, len);
313
314 if (status < 0)
a9df6b22 315 error (_("failed to write register value for %s."),
92c9a463
JB
316 gdbarch_register_name (gdbarch, regnum));
317}
318
319/* The "to_store_registers" target_ops routine for ia64-hpux. */
320
321static void
322ia64_hpux_store_registers (struct target_ops *ops,
323 struct regcache *regcache, int regnum)
324{
325 if (regnum == -1)
326 for (regnum = 0;
327 regnum < gdbarch_num_regs (get_regcache_arch (regcache));
328 regnum++)
329 ia64_hpux_store_register (regcache, regnum);
330 else
331 ia64_hpux_store_register (regcache, regnum);
332}
333
334/* The "xfer_partial" routine from the "inf-ttrace" target layer.
335 Ideally, we would like to use this routine for all transfer
336 requests, but this platforms has a lot of special cases that
337 need to be handled manually. So we override this routine and
338 delegate back if we detect that we are not in a special case. */
339
340static LONGEST (*super_xfer_partial) (struct target_ops *, enum target_object,
341 const char *, gdb_byte *,
342 const gdb_byte *, ULONGEST, LONGEST);
343
344/* The "xfer_partial" routine for a memory region that is completely
345 outside of the backing-store region. */
346
347static LONGEST
348ia64_hpux_xfer_memory_no_bs (struct target_ops *ops, const char *annex,
349 gdb_byte *readbuf, const gdb_byte *writebuf,
350 CORE_ADDR addr, LONGEST len)
351{
352 /* Memory writes need to be aligned on 16byte boundaries, at least
353 when writing in the text section. On the other hand, the size
354 of the buffer does not need to be a multiple of 16bytes.
355
356 No such restriction when performing memory reads. */
357
358 if (writebuf && addr & 0x0f)
359 {
360 const CORE_ADDR aligned_addr = addr & ~0x0f;
361 const int aligned_len = len + (addr - aligned_addr);
362 gdb_byte *aligned_buf = alloca (aligned_len * sizeof (gdb_byte));
363 LONGEST status;
364
365 /* Read the portion of memory between ALIGNED_ADDR and ADDR, so
366 that we can write it back during our aligned memory write. */
367 status = super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
368 aligned_buf /* read */,
369 NULL /* write */,
370 aligned_addr, addr - aligned_addr);
371 if (status <= 0)
372 return 0;
373 memcpy (aligned_buf + (addr - aligned_addr), writebuf, len);
374
375 return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
376 NULL /* read */, aligned_buf /* write */,
377 aligned_addr, aligned_len);
378 }
379 else
380 /* Memory read or properly aligned memory write. */
381 return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, readbuf,
382 writebuf, addr, len);
383}
384
385/* Read LEN bytes at ADDR from memory, and store it in BUF. This memory
386 region is assumed to be inside the backing store.
387
388 Return zero if the operation failed. */
389
390static int
391ia64_hpux_read_memory_bs (gdb_byte *buf, CORE_ADDR addr, int len)
392{
393 gdb_byte tmp_buf[8];
394 CORE_ADDR tmp_addr = addr & ~0x7;
395
396 while (tmp_addr < addr + len)
397 {
398 int status;
399 int skip_lo = 0;
400 int skip_hi = 0;
401
402 status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
403 ptid_get_lwp (inferior_ptid), tmp_addr,
404 sizeof (tmp_buf), (uintptr_t) tmp_buf);
405 if (status < 0)
406 return 0;
407
408 if (tmp_addr < addr)
409 skip_lo = addr - tmp_addr;
410
411 if (tmp_addr + sizeof (tmp_buf) > addr + len)
412 skip_hi = (tmp_addr + sizeof (tmp_buf)) - (addr + len);
413
414 memcpy (buf + (tmp_addr + skip_lo - addr),
415 tmp_buf + skip_lo,
416 sizeof (tmp_buf) - skip_lo - skip_hi);
417
418 tmp_addr += sizeof (tmp_buf);
419 }
420
421 return 1;
422}
423
424/* Write LEN bytes from BUF in memory at ADDR. This memory region is assumed
425 to be inside the backing store.
426
427 Return zero if the operation failed. */
428
429static int
430ia64_hpux_write_memory_bs (const gdb_byte *buf, CORE_ADDR addr, int len)
431{
432 gdb_byte tmp_buf[8];
433 CORE_ADDR tmp_addr = addr & ~0x7;
434
435 while (tmp_addr < addr + len)
436 {
437 int status;
438 int lo = 0;
439 int hi = 7;
440
441 if (tmp_addr < addr || tmp_addr + sizeof (tmp_buf) > addr + len)
442 /* Part of the 8byte region pointed by tmp_addr needs to be preserved.
443 So read it in before we copy the data that needs to be changed. */
444 if (!ia64_hpux_read_memory_bs (tmp_buf, tmp_addr, sizeof (tmp_buf)))
445 return 0;
446
447 if (tmp_addr < addr)
448 lo = addr - tmp_addr;
449
450 if (tmp_addr + sizeof (tmp_buf) > addr + len)
451 hi = addr - tmp_addr + len - 1;
452
453 memcpy (tmp_buf + lo, buf + tmp_addr - addr + lo, hi - lo + 1);
454
455 status = ttrace (TT_LWP_WRRSEBS, ptid_get_pid (inferior_ptid),
456 ptid_get_lwp (inferior_ptid), tmp_addr,
457 sizeof (tmp_buf), (uintptr_t) tmp_buf);
458 if (status < 0)
459 return 0;
460
461 tmp_addr += sizeof (tmp_buf);
462 }
463
464 return 1;
465}
466
467/* The "xfer_partial" routine for a memory region that is completely
468 inside of the backing-store region. */
469
470static LONGEST
471ia64_hpux_xfer_memory_bs (struct target_ops *ops, const char *annex,
472 gdb_byte *readbuf, const gdb_byte *writebuf,
473 CORE_ADDR addr, LONGEST len)
474{
475 int success;
476
477 if (readbuf)
478 success = ia64_hpux_read_memory_bs (readbuf, addr, len);
479 else
480 success = ia64_hpux_write_memory_bs (writebuf, addr, len);
481
482 if (success)
483 return len;
484 else
485 return 0;
486}
487
973e3cf7
JB
488/* Get a register value as a unsigned value directly from the system,
489 instead of going through the regcache.
490
491 This function is meant to be used when inferior_ptid is not
492 a thread/process known to GDB. */
493
494static ULONGEST
495ia64_hpux_get_register_from_save_state_t (int regnum, int reg_size)
496{
497 gdb_byte *buf = alloca (reg_size);
498 int offset = u_offsets[regnum];
499 int status;
500
501 /* The register is assumed to be available for fetching. */
502 gdb_assert (offset != -1);
503
504 status = ia64_hpux_read_register_from_save_state_t (offset, buf, reg_size);
505 if (status < 0)
506 {
507 /* This really should not happen. If it does, emit a warning
508 and pretend the register value is zero. Not exactly the best
509 error recovery mechanism, but better than nothing. We will
510 try to do better if we can demonstrate that this can happen
511 under normal circumstances. */
512 warning (_("Failed to read value of register number %d."), regnum);
513 return 0;
514 }
515
516 return extract_unsigned_integer (buf, reg_size, BFD_ENDIAN_BIG);
517}
518
92c9a463
JB
519/* The "xfer_partial" target_ops routine for ia64-hpux, in the case
520 where the requested object is TARGET_OBJECT_MEMORY. */
521
522static LONGEST
523ia64_hpux_xfer_memory (struct target_ops *ops, const char *annex,
524 gdb_byte *readbuf, const gdb_byte *writebuf,
525 CORE_ADDR addr, LONGEST len)
526{
527 CORE_ADDR bsp, bspstore;
528 CORE_ADDR start_addr, short_len;
529 int status = 0;
530
531 /* The back-store region cannot be read/written by the standard memory
532 read/write operations. So we handle the memory region piecemeal:
533 (1) and (2) The regions before and after the backing-store region,
534 which can be treated as normal memory;
535 (3) The region inside the backing-store, which needs to be
536 read/written specially. */
537
973e3cf7
JB
538 if (in_inferior_list (ptid_get_pid (inferior_ptid)))
539 {
540 struct regcache *regcache = get_current_regcache ();
541
542 regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
543 regcache_raw_read_unsigned (regcache, IA64_BSPSTORE_REGNUM, &bspstore);
544 }
545 else
546 {
547 /* This is probably a child of our inferior created by a fork.
548 Because this process has not been added to our inferior list
549 (we are probably in the process of handling that child
550 process), we do not have a regcache to read the registers
551 from. So get those values directly from the kernel. */
552 bsp = ia64_hpux_get_register_from_save_state_t (IA64_BSP_REGNUM, 8);
553 bspstore =
554 ia64_hpux_get_register_from_save_state_t (IA64_BSPSTORE_REGNUM, 8);
555 }
92c9a463
JB
556
557 /* 1. Memory region before BSPSTORE. */
558
559 if (addr < bspstore)
560 {
561 short_len = len;
562 if (addr + len > bspstore)
563 short_len = bspstore - addr;
564
565 status = ia64_hpux_xfer_memory_no_bs (ops, annex, readbuf, writebuf,
566 addr, short_len);
567 if (status <= 0)
568 return 0;
569 }
570
571 /* 2. Memory region after BSP. */
572
573 if (addr + len > bsp)
574 {
575 start_addr = addr;
576 if (start_addr < bsp)
577 start_addr = bsp;
578 short_len = len + addr - start_addr;
579
580 status = ia64_hpux_xfer_memory_no_bs
581 (ops, annex,
582 readbuf ? readbuf + (start_addr - addr) : NULL,
583 writebuf ? writebuf + (start_addr - addr) : NULL,
584 start_addr, short_len);
585 if (status <= 0)
586 return 0;
587 }
588
589 /* 3. Memory region between BSPSTORE and BSP. */
590
591 if (bspstore != bsp
592 && ((addr < bspstore && addr + len > bspstore)
593 || (addr + len <= bsp && addr + len > bsp)))
594 {
595 start_addr = addr;
596 if (addr < bspstore)
597 start_addr = bspstore;
598 short_len = len + addr - start_addr;
599
600 if (start_addr + short_len > bsp)
601 short_len = bsp - start_addr;
602
603 gdb_assert (short_len > 0);
604
605 status = ia64_hpux_xfer_memory_bs
606 (ops, annex,
607 readbuf ? readbuf + (start_addr - addr) : NULL,
608 writebuf ? writebuf + (start_addr - addr) : NULL,
609 start_addr, short_len);
610 if (status < 0)
611 return 0;
612 }
613
614 return len;
615}
616
77ca787b
JB
617/* Handle the transfer of TARGET_OBJECT_HPUX_UREGS objects on ia64-hpux.
618 ANNEX is currently ignored.
619
620 The current implementation does not support write transfers (because
621 we do not currently do not need these transfers), and will raise
622 a failed assertion if WRITEBUF is not NULL. */
623
624static LONGEST
625ia64_hpux_xfer_uregs (struct target_ops *ops, const char *annex,
626 gdb_byte *readbuf, const gdb_byte *writebuf,
627 ULONGEST offset, LONGEST len)
628{
629 int status;
630
631 gdb_assert (writebuf == NULL);
632
633 status = ia64_hpux_read_register_from_save_state_t (offset, readbuf, len);
634 if (status < 0)
635 return -1;
636 return len;
637}
638
c4de7027
JB
639/* Handle the transfer of TARGET_OBJECT_HPUX_SOLIB_GOT objects on ia64-hpux.
640
641 The current implementation does not support write transfers (because
642 we do not currently do not need these transfers), and will raise
643 a failed assertion if WRITEBUF is not NULL. */
644
645static LONGEST
646ia64_hpux_xfer_solib_got (struct target_ops *ops, const char *annex,
647 gdb_byte *readbuf, const gdb_byte *writebuf,
648 ULONGEST offset, LONGEST len)
649{
650 CORE_ADDR fun_addr;
651 /* The linkage pointer. We use a uint64_t to make sure that the size
652 of the object we are returning is always 64 bits long, as explained
653 in the description of the TARGET_OBJECT_HPUX_SOLIB_GOT object.
654 This is probably paranoia, but we do not use a CORE_ADDR because
655 it could conceivably be larger than uint64_t. */
656 uint64_t got;
657
658 gdb_assert (writebuf == NULL);
659
660 if (offset > sizeof (got))
661 return 0;
662
663 fun_addr = string_to_core_addr (annex);
664 got = ia64_hpux_get_solib_linkage_addr (fun_addr);
665
666 if (len > sizeof (got) - offset)
667 len = sizeof (got) - offset;
668 memcpy (readbuf, &got + offset, len);
669
670 return len;
671}
672
92c9a463
JB
673/* The "to_xfer_partial" target_ops routine for ia64-hpux. */
674
675static LONGEST
676ia64_hpux_xfer_partial (struct target_ops *ops, enum target_object object,
677 const char *annex, gdb_byte *readbuf,
678 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
679{
680 LONGEST val;
681
682 if (object == TARGET_OBJECT_MEMORY)
683 val = ia64_hpux_xfer_memory (ops, annex, readbuf, writebuf, offset, len);
77ca787b
JB
684 else if (object == TARGET_OBJECT_HPUX_UREGS)
685 val = ia64_hpux_xfer_uregs (ops, annex, readbuf, writebuf, offset, len);
c4de7027
JB
686 else if (object == TARGET_OBJECT_HPUX_SOLIB_GOT)
687 val = ia64_hpux_xfer_solib_got (ops, annex, readbuf, writebuf, offset,
688 len);
92c9a463
JB
689 else
690 val = super_xfer_partial (ops, object, annex, readbuf, writebuf, offset,
691 len);
692
693 return val;
694}
695
696/* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux. */
697
698static int
699ia64_hpux_can_use_hw_breakpoint (int type, int cnt, int othertype)
700{
701 /* No hardware watchpoint/breakpoint support yet. */
702 return 0;
703}
704
705/* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer. */
706
707static void (*super_mourn_inferior) (struct target_ops *);
708
709/* The "to_mourn_inferior" target_ops routine for ia64-hpux. */
710
711static void
712ia64_hpux_mourn_inferior (struct target_ops *ops)
713{
714 const int pid = ptid_get_pid (inferior_ptid);
715 int status;
716
717 super_mourn_inferior (ops);
718
719 /* On this platform, the process still exists even after we received
720 an exit event. Detaching from the process isn't sufficient either,
721 as it only turns the process into a zombie. So the only solution
722 we found is to kill it. */
723 ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0);
724 wait (&status);
725}
726
727/* Prevent warning from -Wmissing-prototypes. */
45717bac 728void _initialize_ia64_hpux_nat (void);
92c9a463
JB
729
730void
45717bac 731_initialize_ia64_hpux_nat (void)
92c9a463
JB
732{
733 struct target_ops *t;
734
735 t = inf_ttrace_target ();
736 super_to_wait = t->to_wait;
737 super_xfer_partial = t->to_xfer_partial;
738 super_mourn_inferior = t->to_mourn_inferior;
739
740 t->to_wait = ia64_hpux_wait;
741 t->to_fetch_registers = ia64_hpux_fetch_registers;
742 t->to_store_registers = ia64_hpux_store_registers;
743 t->to_xfer_partial = ia64_hpux_xfer_partial;
744 t->to_can_use_hw_breakpoint = ia64_hpux_can_use_hw_breakpoint;
745 t->to_mourn_inferior = ia64_hpux_mourn_inferior;
746 t->to_attach_no_wait = 1;
747
748 add_target (t);
749}
This page took 0.215448 seconds and 4 git commands to generate.