+/* Return the first fast tracepoint whose jump pad contains PC. */
+
+static struct tracepoint *
+fast_tracepoint_from_jump_pad_address (CORE_ADDR pc)
+{
+ struct tracepoint *tpoint;
+
+ for (tpoint = tracepoints; tpoint; tpoint = tpoint->next)
+ if (tpoint->type == fast_tracepoint)
+ if (tpoint->jump_pad <= pc && pc < tpoint->jump_pad_end)
+ return tpoint;
+
+ return NULL;
+}
+
+/* Return GDBserver's tracepoint that matches the IP Agent's
+ tracepoint object that lives at IPA_TPOINT_OBJ in the IP Agent's
+ address space. */
+
+static struct tracepoint *
+fast_tracepoint_from_ipa_tpoint_address (CORE_ADDR ipa_tpoint_obj)
+{
+ struct tracepoint *tpoint;
+
+ for (tpoint = tracepoints; tpoint; tpoint = tpoint->next)
+ if (tpoint->type == fast_tracepoint)
+ if (tpoint->obj_addr_on_target == ipa_tpoint_obj)
+ return tpoint;
+
+ return NULL;
+}
+
+#endif
+
+/* The type of the object that is used to synchronize fast tracepoint
+ collection. */
+
+typedef struct collecting_t
+{
+ /* The fast tracepoint number currently collecting. */
+ uintptr_t tpoint;
+
+ /* A number that GDBserver can use to identify the thread that is
+ presently holding the collect lock. This need not (and usually
+ is not) the thread id, as getting the current thread ID usually
+ requires a system call, which we want to avoid like the plague.
+ Usually this is thread's TCB, found in the TLS (pseudo-)
+ register, which is readable with a single insn on several
+ architectures. */
+ uintptr_t thread_area;
+} collecting_t;
+
+#ifndef IN_PROCESS_AGENT
+
+void
+force_unlock_trace_buffer (void)
+{
+ write_inferior_data_pointer (ipa_sym_addrs.addr_collecting, 0);
+}
+
+/* Check if the thread identified by THREAD_AREA which is stopped at
+ STOP_PC, is presently locking the fast tracepoint collection, and
+ if so, gather some status of said collection. Returns 0 if the
+ thread isn't collecting or in the jump pad at all. 1, if in the
+ jump pad (or within gdb_collect) and hasn't executed the adjusted
+ original insn yet (can set a breakpoint there and run to it). 2,
+ if presently executing the adjusted original insn --- in which
+ case, if we want to move the thread out of the jump pad, we need to
+ single-step it until this function returns 0. */
+
+int
+fast_tracepoint_collecting (CORE_ADDR thread_area,
+ CORE_ADDR stop_pc,
+ struct fast_tpoint_collect_status *status)
+{
+ CORE_ADDR ipa_collecting;
+ CORE_ADDR ipa_gdb_jump_pad_buffer, ipa_gdb_jump_pad_buffer_end;
+ struct tracepoint *tpoint;
+ int needs_breakpoint;
+
+ /* The thread THREAD_AREA is either:
+
+ 0. not collecting at all, not within the jump pad, or within
+ gdb_collect or one of its callees.
+
+ 1. in the jump pad and haven't reached gdb_collect
+
+ 2. within gdb_collect (out of the jump pad) (collect is set)
+
+ 3. we're in the jump pad, after gdb_collect having returned,
+ possibly executing the adjusted insns.
+
+ For cases 1 and 3, `collecting' may or not be set. The jump pad
+ doesn't have any complicated jump logic, so we can tell if the
+ thread is executing the adjust original insn or not by just
+ matching STOP_PC with known jump pad addresses. If we it isn't
+ yet executing the original insn, set a breakpoint there, and let
+ the thread run to it, so to quickly step over a possible (many
+ insns) gdb_collect call. Otherwise, or when the breakpoint is
+ hit, only a few (small number of) insns are left to be executed
+ in the jump pad. Single-step the thread until it leaves the
+ jump pad. */
+
+ again:
+ tpoint = NULL;
+ needs_breakpoint = 0;
+ trace_debug ("fast_tracepoint_collecting");
+
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_gdb_jump_pad_buffer,
+ &ipa_gdb_jump_pad_buffer))
+ fatal ("error extracting `gdb_jump_pad_buffer'");
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_gdb_jump_pad_buffer_end,
+ &ipa_gdb_jump_pad_buffer_end))
+ fatal ("error extracting `gdb_jump_pad_buffer_end'");
+
+ if (ipa_gdb_jump_pad_buffer <= stop_pc && stop_pc < ipa_gdb_jump_pad_buffer_end)
+ {
+ /* We can tell which tracepoint(s) the thread is collecting by
+ matching the jump pad address back to the tracepoint. */
+ tpoint = fast_tracepoint_from_jump_pad_address (stop_pc);
+ if (tpoint == NULL)
+ {
+ warning ("in jump pad, but no matching tpoint?");
+ return 0;
+ }
+ else
+ {
+ trace_debug ("in jump pad of tpoint (%d, %s); jump_pad(%s, %s); "
+ "adj_insn(%s, %s)",
+ tpoint->number, paddress (tpoint->address),
+ paddress (tpoint->jump_pad),
+ paddress (tpoint->jump_pad_end),
+ paddress (tpoint->adjusted_insn_addr),
+ paddress (tpoint->adjusted_insn_addr_end));
+ }
+
+ /* Definitely in the jump pad. May or may not need
+ fast-exit-jump-pad breakpoint. */
+ if (tpoint->jump_pad <= stop_pc
+ && stop_pc < tpoint->adjusted_insn_addr)
+ needs_breakpoint = 1;
+ }
+ else
+ {
+ collecting_t ipa_collecting_obj;
+
+ /* If `collecting' is set/locked, then the THREAD_AREA thread
+ may or not be the one holding the lock. We have to read the
+ lock to find out. */
+
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_collecting,
+ &ipa_collecting))
+ {
+ trace_debug ("fast_tracepoint_collecting:"
+ " failed reading 'collecting' in the inferior");
+ return 0;
+ }
+
+ if (!ipa_collecting)
+ {
+ trace_debug ("fast_tracepoint_collecting: not collecting"
+ " (and nobody is).");
+ return 0;
+ }
+
+ /* Some thread is collecting. Check which. */
+ if (read_inferior_memory (ipa_collecting,
+ (unsigned char *) &ipa_collecting_obj,
+ sizeof (ipa_collecting_obj)) != 0)
+ goto again;
+
+ if (ipa_collecting_obj.thread_area != thread_area)
+ {
+ trace_debug ("fast_tracepoint_collecting: not collecting "
+ "(another thread is)");
+ return 0;
+ }
+
+ tpoint
+ = fast_tracepoint_from_ipa_tpoint_address (ipa_collecting_obj.tpoint);
+ if (tpoint == NULL)
+ {
+ warning ("fast_tracepoint_collecting: collecting, "
+ "but tpoint %s not found?",
+ paddress ((CORE_ADDR) ipa_collecting_obj.tpoint));
+ return 0;
+ }
+
+ /* The thread is within `gdb_collect', skip over the rest of
+ fast tracepoint collection quickly using a breakpoint. */
+ needs_breakpoint = 1;
+ }
+
+ /* The caller wants a bit of status detail. */
+ if (status != NULL)
+ {
+ status->tpoint_num = tpoint->number;
+ status->tpoint_addr = tpoint->address;
+ status->adjusted_insn_addr = tpoint->adjusted_insn_addr;
+ status->adjusted_insn_addr_end = tpoint->adjusted_insn_addr_end;
+ }
+
+ if (needs_breakpoint)
+ {
+ /* Hasn't executed the original instruction yet. Set breakpoint
+ there, and wait till it's hit, then single-step until exiting
+ the jump pad. */
+
+ trace_debug ("\
+fast_tracepoint_collecting, returning continue-until-break at %s",
+ paddress (tpoint->adjusted_insn_addr));
+
+ return 1; /* continue */
+ }
+ else
+ {
+ /* Just single-step until exiting the jump pad. */
+
+ trace_debug ("fast_tracepoint_collecting, returning "
+ "need-single-step (%s-%s)",
+ paddress (tpoint->adjusted_insn_addr),
+ paddress (tpoint->adjusted_insn_addr_end));
+
+ return 2; /* single-step */
+ }
+}
+
+#endif
+
+#ifdef IN_PROCESS_AGENT
+
+/* The global fast tracepoint collect lock. Points to a collecting_t
+ object built on the stack by the jump pad, if presently locked;
+ NULL if it isn't locked. Note that this lock *must* be set while
+ executing any *function other than the jump pad. See
+ fast_tracepoint_collecting. */
+static collecting_t * ATTR_USED collecting;
+
+/* This routine, called from the jump pad (in asm) is designed to be
+ called from the jump pads of fast tracepoints, thus it is on the
+ critical path. */
+
+IP_AGENT_EXPORT void ATTR_USED
+gdb_collect (struct tracepoint *tpoint, unsigned char *regs)
+{
+ struct fast_tracepoint_ctx ctx;
+
+ /* Don't do anything until the trace run is completely set up. */
+ if (!tracing)
+ return;
+
+ ctx.base.type = fast_tracepoint;
+ ctx.regs = regs;
+ ctx.regcache_initted = 0;
+ ctx.tpoint = tpoint;
+
+ /* Wrap the regblock in a register cache (in the stack, we don't
+ want to malloc here). */
+ ctx.regspace = alloca (register_cache_size ());
+ if (ctx.regspace == NULL)
+ {
+ trace_debug ("Trace buffer block allocation failed, skipping");
+ return;
+ }
+
+ /* Test the condition if present, and collect if true. */
+ if (tpoint->cond == NULL
+ || condition_true_at_tracepoint ((struct tracepoint_hit_ctx *) &ctx,
+ tpoint))
+ {
+ collect_data_at_tracepoint ((struct tracepoint_hit_ctx *) &ctx,
+ tpoint->address, tpoint);
+
+ /* Note that this will cause original insns to be written back
+ to where we jumped from, but that's OK because we're jumping
+ back to the next whole instruction. This will go badly if
+ instruction restoration is not atomic though. */
+ if (stopping_tracepoint
+ || trace_buffer_is_full
+ || expr_eval_result != expr_eval_no_error)
+ stop_tracing ();
+ }
+ else
+ {
+ /* If there was a condition and it evaluated to false, the only
+ way we would stop tracing is if there was an error during
+ condition expression evaluation. */
+ if (expr_eval_result != expr_eval_no_error)
+ stop_tracing ();
+ }
+}
+
+#endif
+
+#ifndef IN_PROCESS_AGENT
+
+/* We'll need to adjust these when we consider bi-arch setups, and big
+ endian machines. */
+
+static int
+write_inferior_data_ptr (CORE_ADDR where, CORE_ADDR ptr)
+{
+ return write_inferior_memory (where,
+ (unsigned char *) &ptr, sizeof (void *));
+}
+
+/* The base pointer of the IPA's heap. This is the only memory the
+ IPA is allowed to use. The IPA should _not_ call the inferior's
+ `malloc' during operation. That'd be slow, and, most importantly,
+ it may not be safe. We may be collecting a tracepoint in a signal
+ handler, for example. */
+static CORE_ADDR target_tp_heap;
+
+/* Allocate at least SIZE bytes of memory from the IPA heap, aligned
+ to 8 bytes. */
+
+static CORE_ADDR
+target_malloc (ULONGEST size)
+{
+ CORE_ADDR ptr;
+
+ if (target_tp_heap == 0)
+ {
+ /* We have the pointer *address*, need what it points to. */
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_gdb_tp_heap_buffer,
+ &target_tp_heap))
+ fatal ("could get target heap head pointer");
+ }
+
+ ptr = target_tp_heap;
+ target_tp_heap += size;
+
+ /* Pad to 8-byte alignment. */
+ target_tp_heap = ((target_tp_heap + 7) & ~0x7);
+
+ return ptr;
+}
+
+static CORE_ADDR
+download_agent_expr (struct agent_expr *expr)
+{
+ CORE_ADDR expr_addr;
+ CORE_ADDR expr_bytes;
+
+ expr_addr = target_malloc (sizeof (*expr));
+ write_inferior_memory (expr_addr, (unsigned char *) expr, sizeof (*expr));
+
+ expr_bytes = target_malloc (expr->length);
+ write_inferior_data_ptr (expr_addr + offsetof (struct agent_expr, bytes),
+ expr_bytes);
+ write_inferior_memory (expr_bytes, expr->bytes, expr->length);
+
+ return expr_addr;
+}
+
+/* Align V up to N bits. */
+#define UALIGN(V, N) (((V) + ((N) - 1)) & ~((N) - 1))
+
+static void
+download_tracepoints (void)
+{
+ CORE_ADDR tpptr = 0, prev_tpptr = 0;
+ struct tracepoint *tpoint;
+
+ /* Start out empty. */
+ write_inferior_data_ptr (ipa_sym_addrs.addr_tracepoints, 0);
+
+ for (tpoint = tracepoints; tpoint; tpoint = tpoint->next)
+ {
+ struct tracepoint target_tracepoint;
+
+ if (tpoint->type != fast_tracepoint)
+ continue;
+
+ target_tracepoint = *tpoint;
+
+ prev_tpptr = tpptr;
+ tpptr = target_malloc (sizeof (*tpoint));
+ tpoint->obj_addr_on_target = tpptr;
+
+ if (tpoint == tracepoints)
+ {
+ /* First object in list, set the head pointer in the
+ inferior. */
+ write_inferior_data_ptr (ipa_sym_addrs.addr_tracepoints, tpptr);
+ }
+ else
+ {
+ write_inferior_data_ptr (prev_tpptr + offsetof (struct tracepoint,
+ next),
+ tpptr);
+ }
+
+ /* Write the whole object. We'll fix up its pointers in a bit.
+ Assume no next for now. This is fixed up above on the next
+ iteration, if there's any. */
+ target_tracepoint.next = NULL;
+ /* Need to clear this here too, since we're downloading the
+ tracepoints before clearing our own copy. */
+ target_tracepoint.hit_count = 0;
+
+ write_inferior_memory (tpptr, (unsigned char *) &target_tracepoint,
+ sizeof (target_tracepoint));
+
+ if (tpoint->cond)
+ write_inferior_data_ptr (tpptr + offsetof (struct tracepoint,
+ cond),
+ download_agent_expr (tpoint->cond));
+
+ if (tpoint->numactions)
+ {
+ int i;
+ CORE_ADDR actions_array;
+
+ /* The pointers array. */
+ actions_array
+ = target_malloc (sizeof (*tpoint->actions) * tpoint->numactions);
+ write_inferior_data_ptr (tpptr + offsetof (struct tracepoint,
+ actions),
+ actions_array);
+
+ /* Now for each pointer, download the action. */
+ for (i = 0; i < tpoint->numactions; i++)
+ {
+ CORE_ADDR ipa_action = 0;
+ struct tracepoint_action *action = tpoint->actions[i];
+
+ switch (action->type)
+ {
+ case 'M':
+ ipa_action
+ = target_malloc (sizeof (struct collect_memory_action));
+ write_inferior_memory (ipa_action,
+ (unsigned char *) action,
+ sizeof (struct collect_memory_action));
+ break;
+ case 'R':
+ ipa_action
+ = target_malloc (sizeof (struct collect_registers_action));
+ write_inferior_memory (ipa_action,
+ (unsigned char *) action,
+ sizeof (struct collect_registers_action));
+ break;
+ case 'X':
+ {
+ CORE_ADDR expr;
+ struct eval_expr_action *eaction
+ = (struct eval_expr_action *) action;
+
+ ipa_action = target_malloc (sizeof (*eaction));
+ write_inferior_memory (ipa_action,
+ (unsigned char *) eaction,
+ sizeof (*eaction));
+
+ expr = download_agent_expr (eaction->expr);
+ write_inferior_data_ptr
+ (ipa_action + offsetof (struct eval_expr_action, expr),
+ expr);
+ break;
+ }
+ default:
+ trace_debug ("unknown trace action '%c', ignoring",
+ action->type);
+ break;
+ }
+
+ if (ipa_action != 0)
+ write_inferior_data_ptr
+ (actions_array + i * sizeof (sizeof (*tpoint->actions)),
+ ipa_action);
+ }
+ }
+ }
+}
+
+static void
+download_trace_state_variables (void)
+{
+ CORE_ADDR ptr = 0, prev_ptr = 0;
+ struct trace_state_variable *tsv;
+
+ /* Start out empty. */
+ write_inferior_data_ptr (ipa_sym_addrs.addr_trace_state_variables, 0);
+
+ for (tsv = trace_state_variables; tsv != NULL; tsv = tsv->next)
+ {
+ struct trace_state_variable target_tsv;
+
+ /* TSV's with a getter have been initialized equally in both the
+ inferior and GDBserver. Skip them. */
+ if (tsv->getter != NULL)
+ continue;
+
+ target_tsv = *tsv;
+
+ prev_ptr = ptr;
+ ptr = target_malloc (sizeof (*tsv));
+
+ if (tsv == trace_state_variables)
+ {
+ /* First object in list, set the head pointer in the
+ inferior. */
+
+ write_inferior_data_ptr (ipa_sym_addrs.addr_trace_state_variables,
+ ptr);
+ }
+ else
+ {
+ write_inferior_data_ptr (prev_ptr
+ + offsetof (struct trace_state_variable,
+ next),
+ ptr);
+ }
+
+ /* Write the whole object. We'll fix up its pointers in a bit.
+ Assume no next, fixup when needed. */
+ target_tsv.next = NULL;
+
+ write_inferior_memory (ptr, (unsigned char *) &target_tsv,
+ sizeof (target_tsv));
+
+ if (tsv->name != NULL)
+ {
+ size_t size = strlen (tsv->name) + 1;
+ CORE_ADDR name_addr = target_malloc (size);
+ write_inferior_memory (name_addr,
+ (unsigned char *) tsv->name, size);
+ write_inferior_data_ptr (ptr
+ + offsetof (struct trace_state_variable,
+ name),
+ name_addr);
+ }
+
+ if (tsv->getter != NULL)
+ {
+ fatal ("what to do with these?");
+ }
+ }
+
+ if (prev_ptr != 0)
+ {
+ /* Fixup the next pointer in the last item in the list. */
+ write_inferior_data_ptr (prev_ptr + offsetof (struct trace_state_variable,
+ next), 0);
+ }
+}
+
+/* Upload complete trace frames out of the IP Agent's trace buffer
+ into GDBserver's trace buffer. This always uploads either all or
+ no trace frames. This is the counter part of
+ `trace_alloc_trace_buffer'. See its description of the atomic
+ synching mechanism. */
+
+static void
+upload_fast_traceframes (void)
+{
+ unsigned int ipa_traceframe_read_count, ipa_traceframe_write_count;
+ unsigned int ipa_traceframe_read_count_racy, ipa_traceframe_write_count_racy;
+ CORE_ADDR tf;
+ struct ipa_trace_buffer_control ipa_trace_buffer_ctrl;
+ unsigned int curr_tbctrl_idx;
+ unsigned int ipa_trace_buffer_ctrl_curr;
+ unsigned int ipa_trace_buffer_ctrl_curr_old;
+ CORE_ADDR ipa_trace_buffer_ctrl_addr;
+ struct breakpoint *about_to_request_buffer_space_bkpt;
+ CORE_ADDR ipa_trace_buffer_lo;
+ CORE_ADDR ipa_trace_buffer_hi;
+
+ if (read_inferior_uinteger (ipa_sym_addrs.addr_traceframe_read_count,
+ &ipa_traceframe_read_count_racy))
+ {
+ /* This will happen in most targets if the current thread is
+ running. */
+ return;
+ }
+
+ if (read_inferior_uinteger (ipa_sym_addrs.addr_traceframe_write_count,
+ &ipa_traceframe_write_count_racy))
+ return;
+
+ trace_debug ("ipa_traceframe_count (racy area): %d (w=%d, r=%d)",
+ ipa_traceframe_write_count_racy - ipa_traceframe_read_count_racy,
+ ipa_traceframe_write_count_racy, ipa_traceframe_read_count_racy);
+
+ if (ipa_traceframe_write_count_racy == ipa_traceframe_read_count_racy)
+ return;
+
+ about_to_request_buffer_space_bkpt
+ = set_breakpoint_at (ipa_sym_addrs.addr_about_to_request_buffer_space,
+ NULL);
+
+ if (read_inferior_uinteger (ipa_sym_addrs.addr_trace_buffer_ctrl_curr,
+ &ipa_trace_buffer_ctrl_curr))
+ return;
+
+ ipa_trace_buffer_ctrl_curr_old = ipa_trace_buffer_ctrl_curr;
+
+ curr_tbctrl_idx = ipa_trace_buffer_ctrl_curr & ~GDBSERVER_FLUSH_COUNT_MASK;
+
+ {
+ unsigned int prev, counter;
+
+ /* Update the token, with new counters, and the GDBserver stamp
+ bit. Alway reuse the current TBC index. */
+ prev = ipa_trace_buffer_ctrl_curr & 0x0007ff00;
+ counter = (prev + 0x100) & 0x0007ff00;
+
+ ipa_trace_buffer_ctrl_curr = (0x80000000
+ | (prev << 12)
+ | counter
+ | curr_tbctrl_idx);
+ }
+
+ if (write_inferior_uinteger (ipa_sym_addrs.addr_trace_buffer_ctrl_curr,
+ ipa_trace_buffer_ctrl_curr))
+ return;
+
+ trace_debug ("Lib: Committed %08x -> %08x",
+ ipa_trace_buffer_ctrl_curr_old,
+ ipa_trace_buffer_ctrl_curr);
+
+ /* Re-read these, now that we've installed the
+ `about_to_request_buffer_space' breakpoint/lock. A thread could
+ have finished a traceframe between the last read of these
+ counters and setting the breakpoint above. If we start
+ uploading, we never want to leave this function with
+ traceframe_read_count != 0, otherwise, GDBserver could end up
+ incrementing the counter tokens more than once (due to event loop
+ nesting), which would break the IP agent's "effective" detection
+ (see trace_alloc_trace_buffer). */
+ if (read_inferior_uinteger (ipa_sym_addrs.addr_traceframe_read_count,
+ &ipa_traceframe_read_count))
+ return;
+ if (read_inferior_uinteger (ipa_sym_addrs.addr_traceframe_write_count,
+ &ipa_traceframe_write_count))
+ return;
+
+ if (debug_threads)
+ {
+ trace_debug ("ipa_traceframe_count (blocked area): %d (w=%d, r=%d)",
+ ipa_traceframe_write_count - ipa_traceframe_read_count,
+ ipa_traceframe_write_count, ipa_traceframe_read_count);
+
+ if (ipa_traceframe_write_count != ipa_traceframe_write_count_racy
+ || ipa_traceframe_read_count != ipa_traceframe_read_count_racy)
+ trace_debug ("note that ipa_traceframe_count's parts changed");
+ }
+
+ /* Get the address of the current TBC object (the IP agent has an
+ array of 3 such objects). The index is stored in the TBC
+ token. */
+ ipa_trace_buffer_ctrl_addr = ipa_sym_addrs.addr_trace_buffer_ctrl;
+ ipa_trace_buffer_ctrl_addr
+ += sizeof (struct ipa_trace_buffer_control) * curr_tbctrl_idx;
+
+ if (read_inferior_memory (ipa_trace_buffer_ctrl_addr,
+ (unsigned char *) &ipa_trace_buffer_ctrl,
+ sizeof (struct ipa_trace_buffer_control)))
+ return;
+
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_trace_buffer_lo,
+ &ipa_trace_buffer_lo))
+ return;
+ if (read_inferior_data_pointer (ipa_sym_addrs.addr_trace_buffer_hi,
+ &ipa_trace_buffer_hi))
+ return;
+
+ /* Offsets are easier to grok for debugging than raw addresses,
+ especially for the small trace buffer sizes that are useful for
+ testing. */
+ trace_debug ("Lib: Trace buffer [%d] start=%d free=%d "
+ "endfree=%d wrap=%d hi=%d",
+ curr_tbctrl_idx,
+ (int) (ipa_trace_buffer_ctrl.start - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.free - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.end_free - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.wrap - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_hi - ipa_trace_buffer_lo));
+
+ /* Note that the IPA's buffer is always circular. */
+
+#define IPA_FIRST_TRACEFRAME() (ipa_trace_buffer_ctrl.start)
+
+#define IPA_NEXT_TRACEFRAME_1(TF, TFOBJ) \
+ ((TF) + sizeof (struct traceframe) + (TFOBJ)->data_size)
+
+#define IPA_NEXT_TRACEFRAME(TF, TFOBJ) \
+ (IPA_NEXT_TRACEFRAME_1 (TF, TFOBJ) \
+ - ((IPA_NEXT_TRACEFRAME_1 (TF, TFOBJ) >= ipa_trace_buffer_ctrl.wrap) \
+ ? (ipa_trace_buffer_ctrl.wrap - ipa_trace_buffer_lo) \
+ : 0))
+
+ tf = IPA_FIRST_TRACEFRAME ();
+
+ while (ipa_traceframe_write_count - ipa_traceframe_read_count)
+ {
+ struct tracepoint *tpoint;
+ struct traceframe *tframe;
+ unsigned char *block;
+ struct traceframe ipa_tframe;
+
+ if (read_inferior_memory (tf, (unsigned char *) &ipa_tframe,
+ offsetof (struct traceframe, data)))
+ error ("Uploading: couldn't read traceframe at %s\n", paddress (tf));
+
+ if (ipa_tframe.tpnum == 0)
+ fatal ("Uploading: No (more) fast traceframes, but "
+ "ipa_traceframe_count == %u??\n",
+ ipa_traceframe_write_count - ipa_traceframe_read_count);
+
+ /* Note that this will be incorrect for multi-location
+ tracepoints... */
+ tpoint = find_next_tracepoint_by_number (NULL, ipa_tframe.tpnum);
+
+ tframe = add_traceframe (tpoint);
+ if (tframe == NULL)
+ {
+ trace_buffer_is_full = 1;
+ trace_debug ("Uploading: trace buffer is full");
+ }
+ else
+ {
+ /* Copy the whole set of blocks in one go for now. FIXME:
+ split this in smaller blocks. */
+ block = add_traceframe_block (tframe, ipa_tframe.data_size);
+ if (block != NULL)
+ {
+ if (read_inferior_memory (tf + offsetof (struct traceframe, data),
+ block, ipa_tframe.data_size))
+ error ("Uploading: Couldn't read traceframe data at %s\n",
+ paddress (tf + offsetof (struct traceframe, data)));
+ }
+
+ trace_debug ("Uploading: traceframe didn't fit");
+ finish_traceframe (tframe);
+ }
+
+ tf = IPA_NEXT_TRACEFRAME (tf, &ipa_tframe);
+
+ /* If we freed the traceframe that wrapped around, go back
+ to the non-wrap case. */
+ if (tf < ipa_trace_buffer_ctrl.start)
+ {
+ trace_debug ("Lib: Discarding past the wraparound");
+ ipa_trace_buffer_ctrl.wrap = ipa_trace_buffer_hi;
+ }
+ ipa_trace_buffer_ctrl.start = tf;
+ ipa_trace_buffer_ctrl.end_free = ipa_trace_buffer_ctrl.start;
+ ++ipa_traceframe_read_count;
+
+ if (ipa_trace_buffer_ctrl.start == ipa_trace_buffer_ctrl.free
+ && ipa_trace_buffer_ctrl.start == ipa_trace_buffer_ctrl.end_free)
+ {
+ trace_debug ("Lib: buffer is fully empty. "
+ "Trace buffer [%d] start=%d free=%d endfree=%d",
+ curr_tbctrl_idx,
+ (int) (ipa_trace_buffer_ctrl.start
+ - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.free
+ - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.end_free
+ - ipa_trace_buffer_lo));
+
+ ipa_trace_buffer_ctrl.start = ipa_trace_buffer_lo;
+ ipa_trace_buffer_ctrl.free = ipa_trace_buffer_lo;
+ ipa_trace_buffer_ctrl.end_free = ipa_trace_buffer_hi;
+ ipa_trace_buffer_ctrl.wrap = ipa_trace_buffer_hi;
+ }
+
+ trace_debug ("Uploaded a traceframe\n"
+ "Lib: Trace buffer [%d] start=%d free=%d "
+ "endfree=%d wrap=%d hi=%d",
+ curr_tbctrl_idx,
+ (int) (ipa_trace_buffer_ctrl.start - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.free - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.end_free - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_ctrl.wrap - ipa_trace_buffer_lo),
+ (int) (ipa_trace_buffer_hi - ipa_trace_buffer_lo));
+ }
+
+ if (write_inferior_memory (ipa_trace_buffer_ctrl_addr,
+ (unsigned char *) &ipa_trace_buffer_ctrl,
+ sizeof (struct ipa_trace_buffer_control)))
+ return;
+
+ write_inferior_integer (ipa_sym_addrs.addr_traceframe_read_count,
+ ipa_traceframe_read_count);
+
+ trace_debug ("Done uploading traceframes [%d]\n", curr_tbctrl_idx);
+
+ pause_all (1);
+ cancel_breakpoints ();
+
+ delete_breakpoint (about_to_request_buffer_space_bkpt);
+ about_to_request_buffer_space_bkpt = NULL;
+
+ unpause_all (1);
+
+ if (trace_buffer_is_full)
+ stop_tracing ();
+}
+#endif
+
+#ifdef IN_PROCESS_AGENT
+
+#include <sys/mman.h>
+#include <fcntl.h>
+
+IP_AGENT_EXPORT char *gdb_tp_heap_buffer;
+IP_AGENT_EXPORT char *gdb_jump_pad_buffer;
+IP_AGENT_EXPORT char *gdb_jump_pad_buffer_end;
+
+static void __attribute__ ((constructor))
+initialize_tracepoint_ftlib (void)
+{
+ initialize_tracepoint ();
+}
+
+#endif
+