+ if (seg == NULL)
+ continue;
+ fprintf(stderr, "SECTION %s\n", seg->name);
+ struct trampoline_frag *tf = ts->trampoline_list.next;
+ for ( ; tf; tf = tf->next)
+ {
+ if (tf->fragP == NULL)
+ continue;
+ fprintf(stderr, " 0x%08x: fix=%d, jump_around=%s\n",
+ (int)tf->fragP->fr_address, (int)tf->fragP->fr_fix,
+ tf->needs_jump_around ? "T" : "F");
+ }
+ }
+}
+
+static void dump_litpools (void) __attribute__ ((unused));
+
+static void
+dump_litpools (void)
+{
+ struct litpool_seg *lps = litpool_seg_list.next;
+ struct litpool_frag *lpf;
+
+ for ( ; lps ; lps = lps->next )
+ {
+ printf("litpool seg %s\n", lps->seg->name);
+ for ( lpf = lps->frag_list.next; lpf->fragP; lpf = lpf->next )
+ {
+ fragS *litfrag = lpf->fragP->fr_next;
+ int count = 0;
+ while (litfrag && litfrag->fr_subtype != RELAX_LITERAL_POOL_END)
+ {
+ if (litfrag->fr_fix == 4)
+ count++;
+ litfrag = litfrag->fr_next;
+ }
+ printf(" %ld <%d:%d> (%d) [%d]: ",
+ lpf->addr, lpf->priority, lpf->original_priority,
+ lpf->fragP->fr_line, count);
+ //dump_frag(lpf->fragP);
+ }
+ }
+}
+
+static void
+xtensa_maybe_create_literal_pool_frag (bfd_boolean create,
+ bfd_boolean only_if_needed)
+{
+ struct litpool_seg *lps = litpool_seg_list.next;
+ fragS *fragP;
+ struct litpool_frag *lpf;
+ bfd_boolean needed = FALSE;
+
+ if (use_literal_section || !auto_litpools)
+ return;
+
+ for ( ; lps ; lps = lps->next )
+ {
+ if (lps->seg == now_seg)
+ break;
+ }
+
+ if (lps == NULL)
+ {
+ lps = (struct litpool_seg *)xcalloc (sizeof (struct litpool_seg), 1);
+ lps->next = litpool_seg_list.next;
+ litpool_seg_list.next = lps;
+ lps->seg = now_seg;
+ lps->frag_list.next = &lps->frag_list;
+ lps->frag_list.prev = &lps->frag_list;
+ }
+
+ lps->frag_count++;
+
+ if (create)
+ {
+ if (only_if_needed)
+ {
+ if (past_xtensa_end || !use_transform() ||
+ frag_now->tc_frag_data.is_no_transform)
+ {
+ return;
+ }
+ if (auto_litpool_limit <= 0)
+ {
+ /* Don't create a litpool based only on frag count. */
+ return;
+ }
+ else if (lps->frag_count > auto_litpool_limit)
+ {
+ needed = TRUE;
+ }
+ else
+ {
+ return;
+ }
+ }
+ else
+ {
+ needed = TRUE;
+ }
+ }
+
+ if (needed)
+ {
+ int size = (only_if_needed) ? 3 : 0; /* Space for a "j" insn. */
+ /* Create a potential site for a literal pool. */
+ frag_wane (frag_now);
+ frag_new (0);
+ xtensa_set_frag_assembly_state (frag_now);
+ fragP = frag_now;
+ fragP->tc_frag_data.lit_frchain = frchain_now;
+ fragP->tc_frag_data.literal_frag = fragP;
+ frag_var (rs_machine_dependent, size, size,
+ (only_if_needed) ?
+ RELAX_LITERAL_POOL_CANDIDATE_BEGIN :
+ RELAX_LITERAL_POOL_BEGIN,
+ NULL, 0, NULL);
+ frag_now->tc_frag_data.lit_seg = now_seg;
+ frag_variant (rs_machine_dependent, 0, 0,
+ RELAX_LITERAL_POOL_END, NULL, 0, NULL);
+ xtensa_set_frag_assembly_state (frag_now);
+ }
+ else
+ {
+ /* RELAX_LITERAL_POOL_BEGIN frag is being created;
+ just record it here. */
+ fragP = frag_now;
+ }
+
+ lpf = (struct litpool_frag *)xmalloc(sizeof (struct litpool_frag));
+ /* Insert at tail of circular list. */
+ lpf->addr = 0;
+ lps->frag_list.prev->next = lpf;
+ lpf->next = &lps->frag_list;
+ lpf->prev = lps->frag_list.prev;
+ lps->frag_list.prev = lpf;
+ lpf->fragP = fragP;
+ lpf->priority = (needed) ? (only_if_needed) ? 3 : 2 : 1;
+ lpf->original_priority = lpf->priority;
+
+ lps->frag_count = 0;
+}
+
+static void
+xtensa_cleanup_align_frags (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if ((fragP->fr_type == rs_align
+ || fragP->fr_type == rs_align_code
+ || (fragP->fr_type == rs_machine_dependent
+ && (fragP->fr_subtype == RELAX_DESIRE_ALIGN
+ || fragP->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)))
+ && fragP->fr_fix == 0)
+ {
+ fragS *next = fragP->fr_next;
+
+ while (next
+ && next->fr_fix == 0
+ && next->fr_type == rs_machine_dependent
+ && next->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)
+ {
+ frag_wane (next);
+ next = next->fr_next;
+ }
+ }
+ /* If we don't widen branch targets, then they
+ will be easier to align. */
+ if (fragP->tc_frag_data.is_branch_target
+ && fragP->fr_opcode == fragP->fr_literal
+ && fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_SLOTS
+ && fragP->tc_frag_data.slot_subtypes[0] == RELAX_NARROW)
+ frag_wane (fragP);
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_UNREACHABLE)
+ fragP->tc_frag_data.is_unreachable = TRUE;
+ }
+ }
+}
+
+
+/* Re-process all of the fragments looking to convert all of the
+ RELAX_DESIRE_ALIGN_IF_TARGET fragments. If there is a branch
+ target in the next fragment, convert this to RELAX_DESIRE_ALIGN.
+ Otherwise, convert to a .fill 0. */
+
+static void
+xtensa_fix_target_frags (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ /* When this routine is called, all of the subsections are still intact
+ so we walk over subsections instead of sections. */
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_DESIRE_ALIGN_IF_TARGET)
+ {
+ if (next_frag_is_branch_target (fragP))
+ fragP->fr_subtype = RELAX_DESIRE_ALIGN;
+ else
+ frag_wane (fragP);
+ }
+ }
+ }
+}
+
+
+static bfd_boolean is_narrow_branch_guaranteed_in_range (fragS *, TInsn *);
+
+static void
+xtensa_mark_narrow_branches (void)
+{
+ frchainS *frchP;
+ asection *s;
+
+ for (s = stdoutput->sections; s; s = s->next)
+ for (frchP = seg_info (s)->frchainP; frchP; frchP = frchP->frch_next)
+ {
+ fragS *fragP;
+ /* Walk over all of the fragments in a subsection. */
+ for (fragP = frchP->frch_root; fragP; fragP = fragP->fr_next)
+ {
+ if (fragP->fr_type == rs_machine_dependent
+ && fragP->fr_subtype == RELAX_SLOTS
+ && fragP->tc_frag_data.slot_subtypes[0] == RELAX_IMMED)
+ {
+ vliw_insn vinsn;
+
+ vinsn_from_chars (&vinsn, fragP->fr_opcode);
+ tinsn_immed_from_frag (&vinsn.slots[0], fragP, 0);
+
+ if (vinsn.num_slots == 1
+ && xtensa_opcode_is_branch (xtensa_default_isa,
+ vinsn.slots[0].opcode) == 1
+ && xg_get_single_size (vinsn.slots[0].opcode) == 2
+ && is_narrow_branch_guaranteed_in_range (fragP,
+ &vinsn.slots[0]))
+ {
+ fragP->fr_subtype = RELAX_SLOTS;
+ fragP->tc_frag_data.slot_subtypes[0] = RELAX_NARROW;
+ fragP->tc_frag_data.is_aligning_branch = 1;
+ }
+ }
+ }
+ }
+}
+
+
+/* A branch is typically widened only when its target is out of
+ range. However, we would like to widen them to align a subsequent
+ branch target when possible.
+
+ Because the branch relaxation code is so convoluted, the optimal solution
+ (combining the two cases) is difficult to get right in all circumstances.
+ We therefore go with an "almost as good" solution, where we only
+ use for alignment narrow branches that definitely will not expand to a
+ jump and a branch. These functions find and mark these cases. */
+
+/* The range in bytes of BNEZ.N and BEQZ.N. The target operand is encoded
+ as PC + 4 + imm6, where imm6 is a 6-bit immediate ranging from 0 to 63.
+ We start counting beginning with the frag after the 2-byte branch, so the
+ maximum offset is (4 - 2) + 63 = 65. */
+#define MAX_IMMED6 65
+
+static offsetT unrelaxed_frag_max_size (fragS *);
+
+static bfd_boolean
+is_narrow_branch_guaranteed_in_range (fragS *fragP, TInsn *tinsn)
+{
+ const expressionS *exp = &tinsn->tok[1];
+ symbolS *symbolP = exp->X_add_symbol;
+ offsetT max_distance = exp->X_add_number;
+ fragS *target_frag;
+
+ if (exp->X_op != O_symbol)
+ return FALSE;
+
+ target_frag = symbol_get_frag (symbolP);