📄 mcore.c
字号:
/* If we have a parameter passed partially in regs and partially in memory, the registers will have been stored to memory already in function.c. So we only need to do something here for varargs functions. */ if (fi.arg_size != 0 && current_function_pretend_args_size == 0) { int offset; int rn = FIRST_PARM_REG + NPARM_REGS - 1; int remaining = fi.arg_size; for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4) { emit_insn (gen_movsi (gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, offset)), gen_rtx_REG (SImode, rn))); } } /* Do we need another stack adjustment before we do the register saves? */ if (growth < fi.reg_growth) output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ if (fi.reg_size != 0) { int i; int offs = fi.reg_offset; for (i = 15; i >= 0; i--) { if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000)) { int first_reg = 15; while (fi.reg_mask & (1 << first_reg)) first_reg--; first_reg++; emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx), gen_rtx_REG (SImode, first_reg), GEN_INT (16 - first_reg))); i -= (15 - first_reg); offs += (16 - first_reg) * 4; } else if (fi.reg_mask & (1 << i)) { emit_insn (gen_movsi (gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, offs)), gen_rtx_REG (SImode, i))); offs += 4; } } } /* Figure the locals + outbounds. */ if (frame_pointer_needed) { /* If we haven't already purchased to 'fp'. */ if (growth < fi.local_growth) output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx)); /* ... and then go any remaining distance for outbounds, etc. */ if (fi.growth[growth]) output_stack_adjust (-1, fi.growth[growth++]); } else { if (growth < fi.local_growth) output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ if (fi.growth[growth]) output_stack_adjust (-1, fi.growth[growth++]); }}voidmcore_expand_epilog (void){ struct mcore_frame fi; int i; int offs; int growth = MAX_STACK_GROWS - 1 ; /* Find out what we're doing. */ layout_mcore_frame(&fi); if (mcore_naked_function_p ()) return; /* If we had a frame pointer, restore the sp from that. */ if (frame_pointer_needed) { emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx)); growth = fi.local_growth - 1; } else { /* XXX: while loop should accumulate and do a single sell. */ while (growth >= fi.local_growth) { if (fi.growth[growth] != 0) output_stack_adjust (1, fi.growth[growth]); growth--; } } /* Make sure we've shrunk stack back to the point where the registers were laid down. This is typically 0/1 iterations. Then pull the register save information back off the stack. */ while (growth >= fi.reg_growth) output_stack_adjust ( 1, fi.growth[growth--]); offs = fi.reg_offset; for (i = 15; i >= 0; i--) { if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000)) { int first_reg; /* Find the starting register. */ first_reg = 15; while (fi.reg_mask & (1 << first_reg)) first_reg--; first_reg++; emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg), gen_rtx_MEM (SImode, stack_pointer_rtx), GEN_INT (16 - first_reg))); i -= (15 - first_reg); offs += (16 - first_reg) * 4; } else if (fi.reg_mask & (1 << i)) { emit_insn (gen_movsi (gen_rtx_REG (SImode, i), gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, offs)))); offs += 4; } } /* Give back anything else. */ /* XXX: Should accumulate total and then give it back. */ while (growth >= 0) output_stack_adjust ( 1, fi.growth[growth--]);}/* This code is borrowed from the SH port. *//* The MCORE cannot load a large constant into a register, constants have to come from a pc relative load. The reference of a pc relative load instruction must be less than 1k in front of the instruction. This means that we often have to dump a constant inside a function, and generate code to branch around it. It is important to minimize this, since the branches will slow things down and make things bigger. Worst case code looks like: lrw L1,r0 br L2 align L1: .long value L2: .. lrw L3,r0 br L4 align L3: .long value L4: .. We fix this by performing a scan before scheduling, which notices which instructions need to have their operands fetched from the constant table and builds the table. The algorithm is: scan, find an instruction which needs a pcrel move. Look forward, find the last barrier which is within MAX_COUNT bytes of the requirement. If there isn't one, make one. Process all the instructions between the find and the barrier. In the above example, we can tell that L3 is within 1k of L1, so the first move can be shrunk from the 2 insn+constant sequence into just 1 insn, and the constant moved to L3 to make: lrw L1,r0 .. lrw L3,r0 bra L4 align L3:.long value L4:.long value Then the second move becomes the target for the shortening process. */typedef struct{ rtx value; /* Value in table. */ rtx label; /* Label of value. */} pool_node;/* The maximum number of constants that can fit into one pool, since the pc relative range is 0...1020 bytes and constants are at least 4 bytes long. We subtract 4 from the range to allow for the case where we need to add a branch/align before the constant pool. */#define MAX_COUNT 1016#define MAX_POOL_SIZE (MAX_COUNT/4)static pool_node pool_vector[MAX_POOL_SIZE];static int pool_size;/* Dump out any constants accumulated in the final pass. These will only be labels. */const char *mcore_output_jump_label_table (void){ int i; if (pool_size) { fprintf (asm_out_file, "\t.align 2\n"); for (i = 0; i < pool_size; i++) { pool_node * p = pool_vector + i; (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label)); output_asm_insn (".long %0", &p->value); } pool_size = 0; } return "";}/* Check whether insn is a candidate for a conditional. */static cond_typeis_cond_candidate (rtx insn){ /* The only things we conditionalize are those that can be directly changed into a conditional. Only bother with SImode items. If we wanted to be a little more aggressive, we could also do other modes such as DImode with reg-reg move or load 0. */ if (GET_CODE (insn) == INSN) { rtx pat = PATTERN (insn); rtx src, dst; if (GET_CODE (pat) != SET) return COND_NO; dst = XEXP (pat, 0); if ((GET_CODE (dst) != REG && GET_CODE (dst) != SUBREG) || GET_MODE (dst) != SImode) return COND_NO; src = XEXP (pat, 1); if ((GET_CODE (src) == REG || (GET_CODE (src) == SUBREG && GET_CODE (SUBREG_REG (src)) == REG)) && GET_MODE (src) == SImode) return COND_MOV_INSN; else if (GET_CODE (src) == CONST_INT && INTVAL (src) == 0) return COND_CLR_INSN; else if (GET_CODE (src) == PLUS && (GET_CODE (XEXP (src, 0)) == REG || (GET_CODE (XEXP (src, 0)) == SUBREG && GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) && GET_MODE (XEXP (src, 0)) == SImode && GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) == 1) return COND_INC_INSN; else if (((GET_CODE (src) == MINUS && GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL( XEXP (src, 1)) == 1) || (GET_CODE (src) == PLUS && GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) == -1)) && (GET_CODE (XEXP (src, 0)) == REG || (GET_CODE (XEXP (src, 0)) == SUBREG && GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) && GET_MODE (XEXP (src, 0)) == SImode) return COND_DEC_INSN; /* Some insns that we don't bother with: (set (rx:DI) (ry:DI)) (set (rx:DI) (const_int 0)) */ } else if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF) return COND_BRANCH_INSN; return COND_NO;}/* Emit a conditional version of insn and replace the old insn with the new one. Return the new insn if emitted. */static rtxemit_new_cond_insn (rtx insn, int cond){ rtx c_insn = 0; rtx pat, dst, src; cond_type num; if ((num = is_cond_candidate (insn)) == COND_NO) return NULL; pat = PATTERN (insn); if (GET_CODE (insn) == INSN) { dst = SET_DEST (pat); src = SET_SRC (pat); } else { dst = JUMP_LABEL (insn); src = NULL_RTX; } switch (num) { case COND_MOV_INSN: case COND_CLR_INSN: if (cond) c_insn = gen_movt0 (dst, src, dst); else c_insn = gen_movt0 (dst, dst, src); break; case COND_INC_INSN: if (cond) c_insn = gen_incscc (dst, dst); else c_insn = gen_incscc_false (dst, dst); break; case COND_DEC_INSN: if (cond) c_insn = gen_decscc (dst, dst); else c_insn = gen_decscc_false (dst, dst); break; case COND_BRANCH_INSN: if (cond) c_insn = gen_branch_true (dst); else c_insn = gen_branch_false (dst); break; default: return NULL; } /* Only copy the notes if they exist. */ if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7) { /* We really don't need to bother with the notes and links at this point, but go ahead and save the notes. This will help is_dead() when applying peepholes (links don't matter since they are not used any more beyond this point for the mcore). */ REG_NOTES (c_insn) = REG_NOTES (insn); } if (num == COND_BRANCH_INSN) { /* For jumps, we need to be a little bit careful and emit the new jump before the old one and to update the use count for the target label. This way, the barrier following the old (uncond) jump will get deleted, but the label won't. */ c_insn = emit_jump_insn_before (c_insn, insn); ++ LABEL_NUSES (dst); JUMP_LABEL (c_insn) = dst; } else c_insn = emit_insn_after (c_insn, insn); delete_insn (insn); return c_insn;}/* Attempt to change a basic block into a series of conditional insns. This works by taking the branch at the end of the 1st block and scanning for the end of the 2nd block. If all instructions in the 2nd block have cond. versions and the label at the start of block 3 is the same as the target from the branch at block 1, then conditionalize all insn in block 2 using the inverse condition of the branch at block 1. (Note I'm bending the definition of basic block here.) e.g., change: bt L2 <-- end of block 1 (delete) mov r7,r8 addu r7,1 br L3 <-- end of block 2 L2: ... <-- start of block 3 (NUSES==1) L3: ... to: movf r7,r8 incf r7 bf L3 L3: ... we can delete the L2 label if NUSES==1 and re-apply the optimization starting at the last instruction of block 2. This may allow an entire if-then-else statement to be conditionalized. BRC */static rtxconditionalize_block (rtx first){ rtx insn; rtx br_pat; rtx end_blk_1_br = 0; rtx end_blk_2_insn = 0; rtx start_blk_3_lab = 0; int cond; int br_lab_num; int blk_size = 0; /* Check that the first insn is a candidate conditional jump. This is the one that we'll eliminate. If not, advance to the next insn to try. */ if (GET_CODE (first) != JUMP_INSN || GET_CODE (PATTERN (first)) != SET || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE) return NEXT_INSN (first); /* Extract some information we need. */ end_blk_1_br = first; br_pat = PATTERN (end_blk_1_br); /* Complement the condition since we use the reverse cond. for the insns. */ cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ); /* Determine what kind of branch we have. */ if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF) { /* A normal branch, so extract label out of first arm. */ br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0)); } else { /* An inverse branch, so extract the label out of the 2nd arm and complement the condition. */ cond = (cond == 0); br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0)); } /* Scan forward for the start of block 2: it must start with a label and that label must be the same as the branch target label from block 1. We don't care about whether block 2 actually ends with a branch or a label (an uncond. branch is conditionalizable). */ for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (ins
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -