1 /* Copyright (C) 2010 Free Software Foundation, Inc.
3 This file is part of GDB.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "ia64-tdep.h"
21 #include "inf-ttrace.h"
23 #include "solib-ia64-hpux.h"
25 #include <ia64/sys/uregs.h>
26 #include <sys/ttrace.h>
28 /* The offsets used with ttrace to read the value of the raw registers. */
30 static int u_offsets
[] =
31 { /* Static General Registers. */
32 -1, __r1
, __r2
, __r3
, __r4
, __r5
, __r6
, __r7
,
33 __r8
, __r9
, __r10
, __r11
, __r12
, __r13
, __r14
, __r15
,
34 __r16
, __r17
, __r18
, __r19
, __r20
, __r21
, __r22
, __r23
,
35 __r24
, __r25
, __r26
, __r27
, __r28
, __r29
, __r30
, __r31
,
36 -1, -1, -1, -1, -1, -1, -1, -1,
37 -1, -1, -1, -1, -1, -1, -1, -1,
38 -1, -1, -1, -1, -1, -1, -1, -1,
39 -1, -1, -1, -1, -1, -1, -1, -1,
40 -1, -1, -1, -1, -1, -1, -1, -1,
41 -1, -1, -1, -1, -1, -1, -1, -1,
42 -1, -1, -1, -1, -1, -1, -1, -1,
43 -1, -1, -1, -1, -1, -1, -1, -1,
44 -1, -1, -1, -1, -1, -1, -1, -1,
45 -1, -1, -1, -1, -1, -1, -1, -1,
46 -1, -1, -1, -1, -1, -1, -1, -1,
47 -1, -1, -1, -1, -1, -1, -1, -1,
49 /* Static Floating-Point Registers. */
50 -1, -1, __f2
, __f3
, __f4
, __f5
, __f6
, __f7
,
51 __f8
, __f9
, __f10
, __f11
, __f12
, __f13
, __f14
, __f15
,
52 __f16
, __f17
, __f18
, __f19
, __f20
, __f21
, __f22
, __f23
,
53 __f24
, __f25
, __f26
, __f27
, __f28
, __f29
, __f30
, __f31
,
54 __f32
, __f33
, __f34
, __f35
, __f36
, __f37
, __f38
, __f39
,
55 __f40
, __f41
, __f42
, __f43
, __f44
, __f45
, __f46
, __f47
,
56 __f48
, __f49
, __f50
, __f51
, __f52
, __f53
, __f54
, __f55
,
57 __f56
, __f57
, __f58
, __f59
, __f60
, __f61
, __f62
, __f63
,
58 __f64
, __f65
, __f66
, __f67
, __f68
, __f69
, __f70
, __f71
,
59 __f72
, __f73
, __f74
, __f75
, __f76
, __f77
, __f78
, __f79
,
60 __f80
, __f81
, __f82
, __f83
, __f84
, __f85
, __f86
, __f87
,
61 __f88
, __f89
, __f90
, __f91
, __f92
, __f93
, __f94
, __f95
,
62 __f96
, __f97
, __f98
, __f99
, __f100
, __f101
, __f102
, __f103
,
63 __f104
, __f105
, __f106
, __f107
, __f108
, __f109
, __f110
, __f111
,
64 __f112
, __f113
, __f114
, __f115
, __f116
, __f117
, __f118
, __f119
,
65 __f120
, __f121
, __f122
, __f123
, __f124
, __f125
, __f126
, __f127
,
67 -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1,
76 /* Branch Registers. */
77 __b0
, __b1
, __b2
, __b3
, __b4
, __b5
, __b6
, __b7
,
79 /* Virtual frame pointer and virtual return address pointer. */
82 /* Other registers. */
83 __pr
, __ip
, __cr_ipsr
, __cfm
,
85 /* Kernel registers. */
89 -1, -1, -1, -1, -1, -1, -1, -1,
91 /* Some application registers. */
92 __ar_rsc
, __ar_bsp
, __ar_bspstore
, __ar_rnat
,
95 -1, /* Not available: FCR, IA32 floating control register. */
98 -1, /* Not available: EFLAG. */
99 -1, /* Not available: CSD. */
100 -1, /* Not available: SSD. */
101 -1, /* Not available: CFLG. */
102 -1, /* Not available: FSR. */
103 -1, /* Not available: FIR. */
104 -1, /* Not available: FDR. */
106 __ar_ccv
, -1, -1, -1, __ar_unat
, -1, -1, -1,
107 __ar_fpsr
, -1, -1, -1,
108 -1, /* Not available: ITC. */
109 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
110 -1, -1, -1, -1, -1, -1, -1, -1, -1,
111 __ar_pfs
, __ar_lc
, __ar_ec
,
112 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
113 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
114 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
115 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
116 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
117 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
119 /* All following registers, starting with nat0, are handled as
120 pseudo registers, and hence are handled separately. */
123 /* Some register have a fixed value and can not be modified.
124 Store their value in static constant buffers that can be used
125 later to fill the register cache. */
126 static const char r0_value
[8] = {0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00};
128 static const char f0_value
[16] = {0x00, 0x00, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00};
132 static const char f1_value
[16] = {0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0xff, 0xff,
134 0x80, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00};
137 /* The "to_wait" routine from the "inf-ttrace" layer. */
139 static ptid_t (*super_to_wait
) (struct target_ops
*, ptid_t
,
140 struct target_waitstatus
*, int);
142 /* The "to_wait" target_ops routine routine for ia64-hpux. */
145 ia64_hpux_wait (struct target_ops
*ops
, ptid_t ptid
,
146 struct target_waitstatus
*ourstatus
, int options
)
150 new_ptid
= super_to_wait (ops
, ptid
, ourstatus
, options
);
152 /* If this is a DLD event (hard-coded breakpoint instruction
153 that was activated by the solib-ia64-hpux module), we need to
154 process it, and then resume the execution as if the event did
156 if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
157 && ourstatus
->value
.sig
== TARGET_SIGNAL_TRAP
158 && ia64_hpux_at_dld_breakpoint_p (new_ptid
))
160 ia64_hpux_handle_dld_breakpoint (new_ptid
);
162 target_resume (new_ptid
, 0, TARGET_SIGNAL_0
);
163 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
169 /* Fetch the RNAT register and supply it to the REGCACHE. */
172 ia64_hpux_fetch_rnat_register (struct regcache
*regcache
)
178 /* The value of RNAT is stored at bsp|0x1f8, and must be read using
181 regcache_raw_read_unsigned (regcache
, IA64_BSP_REGNUM
, &addr
);
184 status
= ttrace (TT_LWP_RDRSEBS
, ptid_get_pid (inferior_ptid
),
185 ptid_get_lwp (inferior_ptid
), addr
, sizeof (buf
),
188 error (_("failed to read RNAT register at %s"),
189 paddress (get_regcache_arch(regcache
), addr
));
191 regcache_raw_supply (regcache
, IA64_RNAT_REGNUM
, buf
);
194 /* Read the value of the register saved at OFFSET in the save_state_t
195 structure, and store its value in BUF. LEN is the size of the register
199 ia64_hpux_read_register_from_save_state_t (int offset
, gdb_byte
*buf
, int len
)
203 status
= ttrace (TT_LWP_RUREGS
, ptid_get_pid (inferior_ptid
),
204 ptid_get_lwp (inferior_ptid
), offset
, len
, (uintptr_t) buf
);
209 /* Fetch register REGNUM from the inferior. */
212 ia64_hpux_fetch_register (struct regcache
*regcache
, int regnum
)
214 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
215 int offset
, len
, status
;
218 if (regnum
== IA64_GR0_REGNUM
)
220 /* r0 is always 0. */
221 regcache_raw_supply (regcache
, regnum
, r0_value
);
225 if (regnum
== IA64_FR0_REGNUM
)
227 /* f0 is always 0.0. */
228 regcache_raw_supply (regcache
, regnum
, f0_value
);
232 if (regnum
== IA64_FR1_REGNUM
)
234 /* f1 is always 1.0. */
235 regcache_raw_supply (regcache
, regnum
, f1_value
);
239 if (regnum
== IA64_RNAT_REGNUM
)
241 ia64_hpux_fetch_rnat_register (regcache
);
245 /* Get the register location. If the register can not be fetched,
247 offset
= u_offsets
[regnum
];
251 len
= register_size (gdbarch
, regnum
);
252 buf
= alloca (len
* sizeof (gdb_byte
));
253 status
= ia64_hpux_read_register_from_save_state_t (offset
, buf
, len
);
255 warning (_("Failed to read register value for %s."),
256 gdbarch_register_name (gdbarch
, regnum
));
258 regcache_raw_supply (regcache
, regnum
, buf
);
261 /* The "to_fetch_registers" target_ops routine for ia64-hpux. */
264 ia64_hpux_fetch_registers (struct target_ops
*ops
,
265 struct regcache
*regcache
, int regnum
)
269 regnum
< gdbarch_num_regs (get_regcache_arch (regcache
));
271 ia64_hpux_fetch_register (regcache
, regnum
);
273 ia64_hpux_fetch_register (regcache
, regnum
);
276 /* Save register REGNUM (stored in BUF) in the save_state_t structure.
277 LEN is the size of the register in bytes.
279 Return the value from the corresponding ttrace call (a negative value
280 means that the operation failed). */
283 ia64_hpux_write_register_to_saved_state_t (int offset
, gdb_byte
*buf
, int len
)
285 return ttrace (TT_LWP_WUREGS
, ptid_get_pid (inferior_ptid
),
286 ptid_get_lwp (inferior_ptid
), offset
, len
, (uintptr_t) buf
);
289 /* Store register REGNUM into the inferior. */
292 ia64_hpux_store_register (const struct regcache
*regcache
, int regnum
)
294 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
295 int offset
= u_offsets
[regnum
];
299 /* If the register can not be stored, then return now. */
303 /* I don't know how to store that register for now. So just ignore any
304 request to store it, to avoid an internal error. */
305 if (regnum
== IA64_PSR_REGNUM
)
308 len
= register_size (gdbarch
, regnum
);
309 buf
= alloca (len
* sizeof (gdb_byte
));
310 regcache_raw_collect (regcache
, regnum
, buf
);
312 status
= ia64_hpux_write_register_to_saved_state_t (offset
, buf
, len
);
315 error (_("failed to write register value for %s."),
316 gdbarch_register_name (gdbarch
, regnum
));
319 /* The "to_store_registers" target_ops routine for ia64-hpux. */
322 ia64_hpux_store_registers (struct target_ops
*ops
,
323 struct regcache
*regcache
, int regnum
)
327 regnum
< gdbarch_num_regs (get_regcache_arch (regcache
));
329 ia64_hpux_store_register (regcache
, regnum
);
331 ia64_hpux_store_register (regcache
, regnum
);
334 /* The "xfer_partial" routine from the "inf-ttrace" target layer.
335 Ideally, we would like to use this routine for all transfer
336 requests, but this platforms has a lot of special cases that
337 need to be handled manually. So we override this routine and
338 delegate back if we detect that we are not in a special case. */
340 static LONGEST (*super_xfer_partial
) (struct target_ops
*, enum target_object
,
341 const char *, gdb_byte
*,
342 const gdb_byte
*, ULONGEST
, LONGEST
);
344 /* The "xfer_partial" routine for a memory region that is completely
345 outside of the backing-store region. */
348 ia64_hpux_xfer_memory_no_bs (struct target_ops
*ops
, const char *annex
,
349 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
350 CORE_ADDR addr
, LONGEST len
)
352 /* Memory writes need to be aligned on 16byte boundaries, at least
353 when writing in the text section. On the other hand, the size
354 of the buffer does not need to be a multiple of 16bytes.
356 No such restriction when performing memory reads. */
358 if (writebuf
&& addr
& 0x0f)
360 const CORE_ADDR aligned_addr
= addr
& ~0x0f;
361 const int aligned_len
= len
+ (addr
- aligned_addr
);
362 gdb_byte
*aligned_buf
= alloca (aligned_len
* sizeof (gdb_byte
));
365 /* Read the portion of memory between ALIGNED_ADDR and ADDR, so
366 that we can write it back during our aligned memory write. */
367 status
= super_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, annex
,
368 aligned_buf
/* read */,
370 aligned_addr
, addr
- aligned_addr
);
373 memcpy (aligned_buf
+ (addr
- aligned_addr
), writebuf
, len
);
375 return super_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, annex
,
376 NULL
/* read */, aligned_buf
/* write */,
377 aligned_addr
, aligned_len
);
380 /* Memory read or properly aligned memory write. */
381 return super_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, annex
, readbuf
,
382 writebuf
, addr
, len
);
385 /* Read LEN bytes at ADDR from memory, and store it in BUF. This memory
386 region is assumed to be inside the backing store.
388 Return zero if the operation failed. */
391 ia64_hpux_read_memory_bs (gdb_byte
*buf
, CORE_ADDR addr
, int len
)
394 CORE_ADDR tmp_addr
= addr
& ~0x7;
396 while (tmp_addr
< addr
+ len
)
402 status
= ttrace (TT_LWP_RDRSEBS
, ptid_get_pid (inferior_ptid
),
403 ptid_get_lwp (inferior_ptid
), tmp_addr
,
404 sizeof (tmp_buf
), (uintptr_t) tmp_buf
);
409 skip_lo
= addr
- tmp_addr
;
411 if (tmp_addr
+ sizeof (tmp_buf
) > addr
+ len
)
412 skip_hi
= (tmp_addr
+ sizeof (tmp_buf
)) - (addr
+ len
);
414 memcpy (buf
+ (tmp_addr
+ skip_lo
- addr
),
416 sizeof (tmp_buf
) - skip_lo
- skip_hi
);
418 tmp_addr
+= sizeof (tmp_buf
);
424 /* Write LEN bytes from BUF in memory at ADDR. This memory region is assumed
425 to be inside the backing store.
427 Return zero if the operation failed. */
430 ia64_hpux_write_memory_bs (const gdb_byte
*buf
, CORE_ADDR addr
, int len
)
433 CORE_ADDR tmp_addr
= addr
& ~0x7;
435 while (tmp_addr
< addr
+ len
)
441 if (tmp_addr
< addr
|| tmp_addr
+ sizeof (tmp_buf
) > addr
+ len
)
442 /* Part of the 8byte region pointed by tmp_addr needs to be preserved.
443 So read it in before we copy the data that needs to be changed. */
444 if (!ia64_hpux_read_memory_bs (tmp_buf
, tmp_addr
, sizeof (tmp_buf
)))
448 lo
= addr
- tmp_addr
;
450 if (tmp_addr
+ sizeof (tmp_buf
) > addr
+ len
)
451 hi
= addr
- tmp_addr
+ len
- 1;
453 memcpy (tmp_buf
+ lo
, buf
+ tmp_addr
- addr
+ lo
, hi
- lo
+ 1);
455 status
= ttrace (TT_LWP_WRRSEBS
, ptid_get_pid (inferior_ptid
),
456 ptid_get_lwp (inferior_ptid
), tmp_addr
,
457 sizeof (tmp_buf
), (uintptr_t) tmp_buf
);
461 tmp_addr
+= sizeof (tmp_buf
);
467 /* The "xfer_partial" routine for a memory region that is completely
468 inside of the backing-store region. */
471 ia64_hpux_xfer_memory_bs (struct target_ops
*ops
, const char *annex
,
472 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
473 CORE_ADDR addr
, LONGEST len
)
478 success
= ia64_hpux_read_memory_bs (readbuf
, addr
, len
);
480 success
= ia64_hpux_write_memory_bs (writebuf
, addr
, len
);
488 /* The "xfer_partial" target_ops routine for ia64-hpux, in the case
489 where the requested object is TARGET_OBJECT_MEMORY. */
492 ia64_hpux_xfer_memory (struct target_ops
*ops
, const char *annex
,
493 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
494 CORE_ADDR addr
, LONGEST len
)
496 CORE_ADDR bsp
, bspstore
;
497 CORE_ADDR start_addr
, short_len
;
500 /* The back-store region cannot be read/written by the standard memory
501 read/write operations. So we handle the memory region piecemeal:
502 (1) and (2) The regions before and after the backing-store region,
503 which can be treated as normal memory;
504 (3) The region inside the backing-store, which needs to be
505 read/written specially. */
507 regcache_raw_read_unsigned (get_current_regcache (), IA64_BSP_REGNUM
, &bsp
);
508 regcache_raw_read_unsigned (get_current_regcache (), IA64_BSPSTORE_REGNUM
,
511 /* 1. Memory region before BSPSTORE. */
516 if (addr
+ len
> bspstore
)
517 short_len
= bspstore
- addr
;
519 status
= ia64_hpux_xfer_memory_no_bs (ops
, annex
, readbuf
, writebuf
,
525 /* 2. Memory region after BSP. */
527 if (addr
+ len
> bsp
)
530 if (start_addr
< bsp
)
532 short_len
= len
+ addr
- start_addr
;
534 status
= ia64_hpux_xfer_memory_no_bs
536 readbuf
? readbuf
+ (start_addr
- addr
) : NULL
,
537 writebuf
? writebuf
+ (start_addr
- addr
) : NULL
,
538 start_addr
, short_len
);
543 /* 3. Memory region between BSPSTORE and BSP. */
546 && ((addr
< bspstore
&& addr
+ len
> bspstore
)
547 || (addr
+ len
<= bsp
&& addr
+ len
> bsp
)))
551 start_addr
= bspstore
;
552 short_len
= len
+ addr
- start_addr
;
554 if (start_addr
+ short_len
> bsp
)
555 short_len
= bsp
- start_addr
;
557 gdb_assert (short_len
> 0);
559 status
= ia64_hpux_xfer_memory_bs
561 readbuf
? readbuf
+ (start_addr
- addr
) : NULL
,
562 writebuf
? writebuf
+ (start_addr
- addr
) : NULL
,
563 start_addr
, short_len
);
571 /* Handle the transfer of TARGET_OBJECT_HPUX_UREGS objects on ia64-hpux.
572 ANNEX is currently ignored.
574 The current implementation does not support write transfers (because
575 we do not currently do not need these transfers), and will raise
576 a failed assertion if WRITEBUF is not NULL. */
579 ia64_hpux_xfer_uregs (struct target_ops
*ops
, const char *annex
,
580 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
581 ULONGEST offset
, LONGEST len
)
585 gdb_assert (writebuf
== NULL
);
587 status
= ia64_hpux_read_register_from_save_state_t (offset
, readbuf
, len
);
593 /* Handle the transfer of TARGET_OBJECT_HPUX_SOLIB_GOT objects on ia64-hpux.
595 The current implementation does not support write transfers (because
596 we do not currently do not need these transfers), and will raise
597 a failed assertion if WRITEBUF is not NULL. */
600 ia64_hpux_xfer_solib_got (struct target_ops
*ops
, const char *annex
,
601 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
602 ULONGEST offset
, LONGEST len
)
605 /* The linkage pointer. We use a uint64_t to make sure that the size
606 of the object we are returning is always 64 bits long, as explained
607 in the description of the TARGET_OBJECT_HPUX_SOLIB_GOT object.
608 This is probably paranoia, but we do not use a CORE_ADDR because
609 it could conceivably be larger than uint64_t. */
612 gdb_assert (writebuf
== NULL
);
614 if (offset
> sizeof (got
))
617 fun_addr
= string_to_core_addr (annex
);
618 got
= ia64_hpux_get_solib_linkage_addr (fun_addr
);
620 if (len
> sizeof (got
) - offset
)
621 len
= sizeof (got
) - offset
;
622 memcpy (readbuf
, &got
+ offset
, len
);
627 /* The "to_xfer_partial" target_ops routine for ia64-hpux. */
630 ia64_hpux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
631 const char *annex
, gdb_byte
*readbuf
,
632 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
636 if (object
== TARGET_OBJECT_MEMORY
)
637 val
= ia64_hpux_xfer_memory (ops
, annex
, readbuf
, writebuf
, offset
, len
);
638 else if (object
== TARGET_OBJECT_HPUX_UREGS
)
639 val
= ia64_hpux_xfer_uregs (ops
, annex
, readbuf
, writebuf
, offset
, len
);
640 else if (object
== TARGET_OBJECT_HPUX_SOLIB_GOT
)
641 val
= ia64_hpux_xfer_solib_got (ops
, annex
, readbuf
, writebuf
, offset
,
644 val
= super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
, offset
,
650 /* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux. */
653 ia64_hpux_can_use_hw_breakpoint (int type
, int cnt
, int othertype
)
655 /* No hardware watchpoint/breakpoint support yet. */
659 /* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer. */
661 static void (*super_mourn_inferior
) (struct target_ops
*);
663 /* The "to_mourn_inferior" target_ops routine for ia64-hpux. */
666 ia64_hpux_mourn_inferior (struct target_ops
*ops
)
668 const int pid
= ptid_get_pid (inferior_ptid
);
671 super_mourn_inferior (ops
);
673 /* On this platform, the process still exists even after we received
674 an exit event. Detaching from the process isn't sufficient either,
675 as it only turns the process into a zombie. So the only solution
676 we found is to kill it. */
677 ttrace (TT_PROC_EXIT
, pid
, 0, 0, 0, 0);
681 /* Prevent warning from -Wmissing-prototypes. */
682 void _initialize_hppa_hpux_nat (void);
685 _initialize_hppa_hpux_nat (void)
687 struct target_ops
*t
;
689 t
= inf_ttrace_target ();
690 super_to_wait
= t
->to_wait
;
691 super_xfer_partial
= t
->to_xfer_partial
;
692 super_mourn_inferior
= t
->to_mourn_inferior
;
694 t
->to_wait
= ia64_hpux_wait
;
695 t
->to_fetch_registers
= ia64_hpux_fetch_registers
;
696 t
->to_store_registers
= ia64_hpux_store_registers
;
697 t
->to_xfer_partial
= ia64_hpux_xfer_partial
;
698 t
->to_can_use_hw_breakpoint
= ia64_hpux_can_use_hw_breakpoint
;
699 t
->to_mourn_inferior
= ia64_hpux_mourn_inferior
;
700 t
->to_attach_no_wait
= 1;