📄 tc-mips.c
字号:
/* The previous instruction reads the LO register; if the current instruction writes to the LO register, we must insert two NOPS. Some newer processors have interlocks. Also the tx39's multiply instructions can be exectuted immediatly after a read from HI/LO (without the delay), though the tx39's divide insns still do require the delay. */ if (! (hilo_interlocks || (mips_cpu == CPU_R3900 && (pinfo & INSN_MULT))) && (mips_optimize == 0 || (pinfo & INSN_WRITE_LO))) nops += 2; /* Most mips16 branch insns don't have a delay slot. If a read from LO is immediately followed by a branch to a write to LO we have a read followed by a write less than 2 insns away. We assume the target of a branch might be a write to LO, and insert a nop between a read and an immediately following branch. */ else if (mips_opts.mips16 && (mips_optimize == 0 || (pinfo & MIPS16_INSN_BRANCH))) nops += 1; } else if (prev_insn.insn_mo->pinfo & INSN_READ_HI) { /* The previous instruction reads the HI register; if the current instruction writes to the HI register, we must insert a NOP. Some newer processors have interlocks. Also the note tx39's multiply above. */ if (! (hilo_interlocks || (mips_cpu == CPU_R3900 && (pinfo & INSN_MULT))) && (mips_optimize == 0 || (pinfo & INSN_WRITE_HI))) nops += 2; /* Most mips16 branch insns don't have a delay slot. If a read from HI is immediately followed by a branch to a write to HI we have a read followed by a write less than 2 insns away. We assume the target of a branch might be a write to HI, and insert a nop between a read and an immediately following branch. */ else if (mips_opts.mips16 && (mips_optimize == 0 || (pinfo & MIPS16_INSN_BRANCH))) nops += 1; } /* If the previous instruction was in a noreorder section, then we don't want to insert the nop after all. */ /* Itbl support may require additional care here. */ if (prev_insn_unreordered) nops = 0; /* There are two cases which require two intervening instructions: 1) setting the condition codes using a move to coprocessor instruction which requires a general coprocessor delay and then reading the condition codes 2) reading the HI or LO register and then writing to it (except on processors which have interlocks). If we are not already emitting a NOP instruction, we must check for these cases compared to the instruction previous to the previous instruction. */ if ((! mips_opts.mips16 && ISA_HAS_COPROC_DELAYS (mips_opts.isa) && (prev_prev_insn.insn_mo->pinfo & INSN_COPROC_MOVE_DELAY) && (prev_prev_insn.insn_mo->pinfo & INSN_WRITE_COND_CODE) && (pinfo & INSN_READ_COND_CODE) && ! cop_interlocks) || ((prev_prev_insn.insn_mo->pinfo & INSN_READ_LO) && (pinfo & INSN_WRITE_LO) && ! (hilo_interlocks || (mips_cpu == CPU_R3900 && (pinfo & INSN_MULT)))) || ((prev_prev_insn.insn_mo->pinfo & INSN_READ_HI) && (pinfo & INSN_WRITE_HI) && ! (hilo_interlocks || (mips_cpu == CPU_R3900 && (pinfo & INSN_MULT))))) prev_prev_nop = 1; else prev_prev_nop = 0; if (prev_prev_insn_unreordered) prev_prev_nop = 0; if (prev_prev_nop && nops == 0) ++nops; /* If we are being given a nop instruction, don't bother with one of the nops we would otherwise output. This will only happen when a nop instruction is used with mips_optimize set to 0. */ if (nops > 0 && ! mips_opts.noreorder && ip->insn_opcode == (unsigned) (mips_opts.mips16 ? 0x6500 : 0)) --nops; /* Now emit the right number of NOP instructions. */ if (nops > 0 && ! mips_opts.noreorder) { fragS *old_frag; unsigned long old_frag_offset; int i; struct insn_label_list *l; old_frag = frag_now; old_frag_offset = frag_now_fix (); for (i = 0; i < nops; i++) emit_nop (); if (listing) { listing_prev_line (); /* We may be at the start of a variant frag. In case we are, make sure there is enough space for the frag after the frags created by listing_prev_line. The argument to frag_grow here must be at least as large as the argument to all other calls to frag_grow in this file. We don't have to worry about being in the middle of a variant frag, because the variants insert all needed nop instructions themselves. */ frag_grow (40); } for (l = insn_labels; l != NULL; l = l->next) { valueT val; assert (S_GET_SEGMENT (l->label) == now_seg); symbol_set_frag (l->label, frag_now); val = (valueT) frag_now_fix (); /* mips16 text labels are stored as odd. */ if (mips_opts.mips16) val += 1; S_SET_VALUE (l->label, val); }#ifndef NO_ECOFF_DEBUGGING if (ECOFF_DEBUGGING) ecoff_fix_loc (old_frag, old_frag_offset);#endif } else if (prev_nop_frag != NULL) { /* We have a frag holding nops we may be able to remove. If we don't need any nops, we can decrease the size of prev_nop_frag by the size of one instruction. If we do need some nops, we count them in prev_nops_required. */ if (prev_nop_frag_since == 0) { if (nops == 0) { prev_nop_frag->fr_fix -= mips_opts.mips16 ? 2 : 4; --prev_nop_frag_holds; } else prev_nop_frag_required += nops; } else { if (prev_prev_nop == 0) { prev_nop_frag->fr_fix -= mips_opts.mips16 ? 2 : 4; --prev_nop_frag_holds; } else ++prev_nop_frag_required; } if (prev_nop_frag_holds <= prev_nop_frag_required) prev_nop_frag = NULL; ++prev_nop_frag_since; /* Sanity check: by the time we reach the second instruction after prev_nop_frag, we should have used up all the nops one way or another. */ assert (prev_nop_frag_since <= 1 || prev_nop_frag == NULL); } } if (reloc_type > BFD_RELOC_UNUSED) { /* We need to set up a variant frag. */ assert (mips_opts.mips16 && address_expr != NULL); f = frag_var (rs_machine_dependent, 4, 0, RELAX_MIPS16_ENCODE (reloc_type - BFD_RELOC_UNUSED, mips16_small, mips16_ext, (prev_pinfo & INSN_UNCOND_BRANCH_DELAY), (prev_insn_reloc_type == BFD_RELOC_MIPS16_JMP)), make_expr_symbol (address_expr), (offsetT) 0, (char *) NULL); } else if (place != NULL) f = place; else if (mips_opts.mips16 && ! ip->use_extend && reloc_type != BFD_RELOC_MIPS16_JMP) { /* Make sure there is enough room to swap this instruction with a following jump instruction. */ frag_grow (6); f = frag_more (2); } else { if (mips_opts.mips16 && mips_opts.noreorder && (prev_pinfo & INSN_UNCOND_BRANCH_DELAY) != 0) as_warn (_("extended instruction in delay slot")); f = frag_more (4); } fixp = NULL; if (address_expr != NULL && reloc_type < BFD_RELOC_UNUSED) { if (address_expr->X_op == O_constant) { switch (reloc_type) { case BFD_RELOC_32: ip->insn_opcode |= address_expr->X_add_number; break; case BFD_RELOC_LO16: ip->insn_opcode |= address_expr->X_add_number & 0xffff; break; case BFD_RELOC_MIPS_JMP: if ((address_expr->X_add_number & 3) != 0) as_bad (_("jump to misaligned address (0x%lx)"), (unsigned long) address_expr->X_add_number); ip->insn_opcode |= (address_expr->X_add_number >> 2) & 0x3ffffff; break; case BFD_RELOC_MIPS16_JMP: if ((address_expr->X_add_number & 3) != 0) as_bad (_("jump to misaligned address (0x%lx)"), (unsigned long) address_expr->X_add_number); ip->insn_opcode |= (((address_expr->X_add_number & 0x7c0000) << 3) | ((address_expr->X_add_number & 0xf800000) >> 7) | ((address_expr->X_add_number & 0x3fffc) >> 2)); break; case BFD_RELOC_16_PCREL_S2: goto need_reloc; default: internalError (); } } else { need_reloc: /* Don't generate a reloc if we are writing into a variant frag. */ if (place == NULL) { fixp = fix_new_exp (frag_now, f - frag_now->fr_literal, 4, address_expr, reloc_type == BFD_RELOC_16_PCREL_S2, reloc_type); if (unmatched_hi) { struct mips_hi_fixup *hi_fixup; assert (reloc_type == BFD_RELOC_HI16_S); hi_fixup = ((struct mips_hi_fixup *) xmalloc (sizeof (struct mips_hi_fixup))); hi_fixup->fixp = fixp; hi_fixup->seg = now_seg; hi_fixup->next = mips_hi_fixup_list; mips_hi_fixup_list = hi_fixup; } } } } if (! mips_opts.mips16) md_number_to_chars (f, ip->insn_opcode, 4); else if (reloc_type == BFD_RELOC_MIPS16_JMP) { md_number_to_chars (f, ip->insn_opcode >> 16, 2); md_number_to_chars (f + 2, ip->insn_opcode & 0xffff, 2); } else { if (ip->use_extend) { md_number_to_chars (f, 0xf000 | ip->extend, 2); f += 2; } md_number_to_chars (f, ip->insn_opcode, 2); } /* Update the register mask information. */ if (! mips_opts.mips16) { if (pinfo & INSN_WRITE_GPR_D) mips_gprmask |= 1 << ((ip->insn_opcode >> OP_SH_RD) & OP_MASK_RD); if ((pinfo & (INSN_WRITE_GPR_T | INSN_READ_GPR_T)) != 0) mips_gprmask |= 1 << ((ip->insn_opcode >> OP_SH_RT) & OP_MASK_RT); if (pinfo & INSN_READ_GPR_S) mips_gprmask |= 1 << ((ip->insn_opcode >> OP_SH_RS) & OP_MASK_RS); if (pinfo & INSN_WRITE_GPR_31) mips_gprmask |= 1 << 31; if (pinfo & INSN_WRITE_FPR_D) mips_cprmask[1] |= 1 << ((ip->insn_opcode >> OP_SH_FD) & OP_MASK_FD); if ((pinfo & (INSN_WRITE_FPR_S | INSN_READ_FPR_S)) != 0) mips_cprmask[1] |= 1 << ((ip->insn_opcode >> OP_SH_FS) & OP_MASK_FS); if ((pinfo & (INSN_WRITE_FPR_T | INSN_READ_FPR_T)) != 0) mips_cprmask[1] |= 1 << ((ip->insn_opcode >> OP_SH_FT) & OP_MASK_FT); if ((pinfo & INSN_READ_FPR_R) != 0) mips_cprmask[1] |= 1 << ((ip->insn_opcode >> OP_SH_FR) & OP_MASK_FR); if (pinfo & INSN_COP) { /* We don't keep enough information to sort these cases out. The itbl support does keep this information however, although we currently don't support itbl fprmats as part of the cop instruction. May want to add this support in the future. */ } /* Never set the bit for $0, which is always zero. */ mips_gprmask &= ~1 << 0; } else { if (pinfo & (MIPS16_INSN_WRITE_X | MIPS16_INSN_READ_X)) mips_gprmask |= 1 << ((ip->insn_opcode >> MIPS16OP_SH_RX) & MIPS16OP_MASK_RX); if (pinfo & (MIPS16_INSN_WRITE_Y | MIPS16_INSN_READ_Y)) mips_gprmask |= 1 << ((ip->insn_opcode >> MIPS16OP_SH_RY) & MIPS16OP_MASK_RY); if (pinfo & MIPS16_INSN_WRITE_Z) mips_gprmask |= 1 << ((ip->insn_opcode >> MIPS16OP_SH_RZ) & MIPS16OP_MASK_RZ); if (pinfo & (MIPS16_INSN_WRITE_T | MIPS16_INSN_READ_T)) mips_gprmask |= 1 << TREG; if (pinfo & (MIPS16_INSN_WRITE_SP | MIPS16_INSN_READ_SP)) mips_gprmask |= 1 << SP; if (pinfo & (MIPS16_INSN_WRITE_31 | MIPS16_INSN_READ_31)) mips_gprmask |= 1 << RA; if (pinfo & MIPS16_INSN_WRITE_GPR_Y) mips_gprmask |= 1 << MIPS16OP_EXTRACT_REG32R (ip->insn_opcode); if (pinfo & MIPS16_INSN_READ_Z) mips_gprmask |= 1 << ((ip->insn_opcode >> MIPS16OP_SH_MOVE32Z) & MIPS16OP_MASK_MOVE32Z); if (pinfo & MIPS16_INSN_READ_GPR_X) mips_gprmask |= 1 << ((ip->insn_opcode >> MIPS16OP_SH_REGR32) & MIPS16OP_MASK_REGR32); } if (place == NULL && ! mips_opts.noreorder) { /* Filling the branch delay slot is more complex. We try to switch the branch with the previous instruction, which we can do if the previous instruction does not set up a condition that the branch tests and if the branch is not itself the target of any branch. */ if ((pinfo & INSN_UNCOND_BRANCH_DELAY) || (pinfo & INSN_COND_BRANCH_DELAY)) { if (mips_optimize < 2 /* If we have seen .set volatile or .set nomove, don't optimize. */ || mips_opts.nomove != 0 /* If we had to emit any NOP instructions, then we already know we can not swap. */ || nops != 0 /* If we don't even know the previous insn, we can not swap. */ || ! prev_insn_valid /* If the previous insn is already in a branch delay slot, then we can not swap. */ || prev_insn_is_delay_slot /* If the previous previous insn was in a .set noreorder, we can't swap. Actually, the MIPS assembler will swap in this situation. However, gcc configured -with-gnu-as will generate code like .set noreorder lw $4,XXX .set reorder INSN bne $4,$0,foo in which we can not swap the bne and INSN. If gcc is not configured -with-gnu-as, it does not output the .set pseudo-ops. We don't have to check prev_insn_unreordered, because prev_insn_valid will be 0 in that case. We don't want to use prev_prev_insn_valid, because we do want to be able to swap at the start of a function. */ || prev_prev_insn_unreordered /* If the branch is itself the target of a branch, we can not swap. We cheat on this; all we check for is whether there
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -