📄 mips.c
字号:
{ LT, -32769, 32766, 1, 1, 0, 1, 0 }, /* LE */ { LTU, -32769, 32766, 1, 1, 1, 0, 1 }, /* GTU */ { LTU, -32768, 32767, 0, 0, 1, 1, 1 }, /* GEU */ { LTU, -32768, 32767, 0, 0, 0, 0, 1 }, /* LTU */ { LTU, -32769, 32766, 1, 1, 0, 1, 1 }, /* LEU */ }; enum internal_test test; struct cmp_info *p_info; int branch_p; int eqne_p; int invert; rtx reg; rtx reg2; test = map_test_to_internal_test (test_code); if (test == ITEST_MAX) abort (); p_info = &info[ (int)test ]; eqne_p = (p_info->test_code == XOR); /* Eliminate simple branches */ branch_p = (result == (rtx)0); if (branch_p) { if (GET_CODE (cmp0) == REG || GET_CODE (cmp0) == SUBREG) { /* Comparisons against zero are simple branches */ if (GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) == 0) return (rtx)0; /* Test for beq/bne. */ if (eqne_p) return (rtx)0; } /* allocate a pseudo to calculate the value in. */ result = gen_reg_rtx (SImode); } /* Make sure we can handle any constants given to us. */ if (GET_CODE (cmp0) == CONST_INT) cmp0 = force_reg (SImode, cmp0); if (GET_CODE (cmp1) == CONST_INT) { HOST_WIDE_INT value = INTVAL (cmp1); if (value < p_info->const_low || value > p_info->const_high) cmp1 = force_reg (SImode, cmp1); } /* See if we need to invert the result. */ invert = (GET_CODE (cmp1) == CONST_INT) ? p_info->invert_const : p_info->invert_reg; if (p_invert != (int *)0) { *p_invert = invert; invert = FALSE; } /* Comparison to constants, may involve adding 1 to change a LT into LE. Comparison between two registers, may involve switching operands. */ if (GET_CODE (cmp1) == CONST_INT) { if (p_info->const_add != 0) { HOST_WIDE_INT new = INTVAL (cmp1) + p_info->const_add; /* If modification of cmp1 caused overflow, we would get the wrong answer if we follow the usual path; thus, x > 0xffffffffu would turn into x > 0u. */ if ((p_info->unsignedp ? (unsigned HOST_WIDE_INT) new > INTVAL (cmp1) : new > INTVAL (cmp1)) != (p_info->const_add > 0)) /* 1 is the right value in the LE and LEU case. In the GT and GTU case, *p_invert is already set, so this is effectively 0. */ return force_reg (SImode, const1_rtx); else cmp1 = GEN_INT (new); } } else if (p_info->reverse_regs) { rtx temp = cmp0; cmp0 = cmp1; cmp1 = temp; } if (test == ITEST_NE && GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) == 0) reg = cmp0; else { reg = (invert || eqne_p) ? gen_reg_rtx (SImode) : result; emit_move_insn (reg, gen_rtx (p_info->test_code, SImode, cmp0, cmp1)); } if (test == ITEST_NE) { emit_move_insn (result, gen_rtx (GTU, SImode, reg, const0_rtx)); invert = FALSE; } else if (test == ITEST_EQ) { reg2 = (invert) ? gen_reg_rtx (SImode) : result; emit_move_insn (reg2, gen_rtx (LTU, SImode, reg, const1_rtx)); reg = reg2; } if (invert) emit_move_insn (result, gen_rtx (XOR, SImode, reg, const1_rtx)); return result;}/* Emit the common code for doing conditional branches. operand[0] is the label to jump to. The comparison operands are saved away by cmp{si,sf,df}. */voidgen_conditional_branch (operands, test_code) rtx operands[]; enum rtx_code test_code;{ static enum machine_mode mode_map[(int)CMP_MAX][(int)ITEST_MAX] = { { /* CMP_SI */ SImode, /* eq */ SImode, /* ne */ SImode, /* gt */ SImode, /* ge */ SImode, /* lt */ SImode, /* le */ SImode, /* gtu */ SImode, /* geu */ SImode, /* ltu */ SImode, /* leu */ }, { /* CMP_SF */ CC_FPmode, /* eq */ CC_REV_FPmode, /* ne */ CC_FPmode, /* gt */ CC_FPmode, /* ge */ CC_FPmode, /* lt */ CC_FPmode, /* le */ VOIDmode, /* gtu */ VOIDmode, /* geu */ VOIDmode, /* ltu */ VOIDmode, /* leu */ }, { /* CMP_DF */ CC_FPmode, /* eq */ CC_REV_FPmode, /* ne */ CC_FPmode, /* gt */ CC_FPmode, /* ge */ CC_FPmode, /* lt */ CC_FPmode, /* le */ VOIDmode, /* gtu */ VOIDmode, /* geu */ VOIDmode, /* ltu */ VOIDmode, /* leu */ }, }; enum machine_mode mode; enum cmp_type type = branch_type; rtx cmp0 = branch_cmp[0]; rtx cmp1 = branch_cmp[1]; rtx label1 = gen_rtx (LABEL_REF, VOIDmode, operands[0]); rtx label2 = pc_rtx; rtx reg = (rtx)0; int invert = 0; enum internal_test test = map_test_to_internal_test (test_code); if (test == ITEST_MAX) { mode = SImode; goto fail; } /* Get the machine mode to use (CCmode, CC_EQmode, CC_FPmode, or CC_REV_FPmode). */ mode = mode_map[(int)type][(int)test]; if (mode == VOIDmode) goto fail; switch (branch_type) { default: goto fail; case CMP_SI: reg = gen_int_relational (test_code, (rtx)0, cmp0, cmp1, &invert); if (reg != (rtx)0) { cmp0 = reg; cmp1 = const0_rtx; test_code = NE; } /* Make sure not non-zero constant if ==/!= */ else if (GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) != 0) cmp1 = force_reg (SImode, cmp1); break; case CMP_DF: case CMP_SF: { rtx reg = gen_rtx (REG, mode, FPSW_REGNUM); emit_insn (gen_rtx (SET, VOIDmode, reg, gen_rtx (test_code, mode, cmp0, cmp1))); cmp0 = reg; cmp1 = const0_rtx; test_code = NE; } break; } /* Generate the jump */ if (invert) { label2 = label1; label1 = pc_rtx; } emit_jump_insn (gen_rtx (SET, VOIDmode, pc_rtx, gen_rtx (IF_THEN_ELSE, VOIDmode, gen_rtx (test_code, mode, cmp0, cmp1), label1, label2))); return;fail: abort_with_insn (gen_rtx (test_code, mode, cmp0, cmp1), "bad test");}#define UNITS_PER_SHORT (SHORT_TYPE_SIZE / BITS_PER_UNIT)/* Internal code to generate the load and store of one word/short/byte. The load is emitted directly, and the store insn is returned. */#if 0static rtxblock_move_load_store (dest_reg, src_reg, p_bytes, p_offset, align, orig_src) rtx src_reg; /* register holding source memory address */ rtx dest_reg; /* register holding dest. memory address */ int *p_bytes; /* pointer to # bytes remaining */ int *p_offset; /* pointer to current offset */ int align; /* alignment */ rtx orig_src; /* original source for making a reg note */{ int bytes; /* # bytes remaining */ int offset; /* offset to use */ int size; /* size in bytes of load/store */ enum machine_mode mode; /* mode to use for load/store */ rtx reg; /* temporary register */ rtx src_addr; /* source address */ rtx dest_addr; /* destination address */ rtx insn; /* insn of the load */ rtx orig_src_addr; /* original source address */ rtx (*load_func)(); /* function to generate load insn */ rtx (*store_func)(); /* function to generate destination insn */ bytes = *p_bytes; if (bytes <= 0 || align <= 0) abort (); if (bytes >= UNITS_PER_WORD && align >= UNITS_PER_WORD) { mode = SImode; size = UNITS_PER_WORD; load_func = gen_movsi; store_func = gen_movsi; }#if 0 /* Don't generate unligned moves here, rather defer those to the general movestrsi_internal pattern. */ else if (bytes >= UNITS_PER_WORD) { mode = SImode; size = UNITS_PER_WORD; load_func = gen_movsi_ulw; store_func = gen_movsi_usw; }#endif else if (bytes >= UNITS_PER_SHORT && align >= UNITS_PER_SHORT) { mode = HImode; size = UNITS_PER_SHORT; load_func = gen_movhi; store_func = gen_movhi; } else { mode = QImode; size = 1; load_func = gen_movqi; store_func = gen_movqi; } offset = *p_offset; *p_offset = offset + size; *p_bytes = bytes - size; if (offset == 0) { src_addr = src_reg; dest_addr = dest_reg; } else { src_addr = gen_rtx (PLUS, Pmode, src_reg, GEN_INT (offset)); dest_addr = gen_rtx (PLUS, Pmode, dest_reg, GEN_INT (offset)); } reg = gen_reg_rtx (mode); insn = emit_insn ((*load_func) (reg, gen_rtx (MEM, mode, src_addr))); orig_src_addr = XEXP (orig_src, 0); if (CONSTANT_P (orig_src_addr)) REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUIV, plus_constant (orig_src_addr, offset), REG_NOTES (insn)); return (*store_func) (gen_rtx (MEM, mode, dest_addr), reg);}#endif/* Write a series of loads/stores to move some bytes. Generate load/stores as follows: load 1 load 2 load 3 store 1 load 4 store 2 load 5 store 3 ... This way, no NOP's are needed, except at the end, and only two temp registers are needed. Two delay slots are used in deference to the R4000. */#if 0static voidblock_move_sequence (dest_reg, src_reg, bytes, align, orig_src) rtx dest_reg; /* register holding destination address */ rtx src_reg; /* register holding source address */ int bytes; /* # bytes to move */ int align; /* max alignment to assume */ rtx orig_src; /* original source for making a reg note */{ int offset = 0; rtx prev2_store = (rtx)0; rtx prev_store = (rtx)0; rtx cur_store = (rtx)0; while (bytes > 0) { /* Is there a store to do? */ if (prev2_store) emit_insn (prev2_store); prev2_store = prev_store; prev_store = cur_store; cur_store = block_move_load_store (dest_reg, src_reg, &bytes, &offset, align, orig_src); } /* Finish up last three stores. */ if (prev2_store) emit_insn (prev2_store); if (prev_store) emit_insn (prev_store); if (cur_store) emit_insn (cur_store);}#endif/* Write a loop to move a constant number of bytes. Generate load/stores as follows: do { temp1 = src[0]; temp2 = src[1]; ... temp<last> = src[MAX_MOVE_REGS-1]; dest[0] = temp1; dest[1] = temp2; ... dest[MAX_MOVE_REGS-1] = temp<last>; src += MAX_MOVE_REGS; dest += MAX_MOVE_REGS; } while (src != final); This way, no NOP's are needed, and only MAX_MOVE_REGS+3 temp registers are needed. Aligned moves move MAX_MOVE_REGS*4 bytes every (2*MAX_MOVE_REGS)+3 cycles, unaligned moves move MAX_MOVE_REGS*4 bytes every (4*MAX_MOVE_REGS)+3 cycles, assuming no cache misses. */#define MAX_MOVE_REGS 4#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)static voidblock_move_loop (dest_reg, src_reg, bytes, align, orig_src) rtx dest_reg; /* register holding destination address */ rtx src_reg; /* register holding source address */ int bytes; /* # bytes to move */ int align; /* alignment */ rtx orig_src; /* original source for making a reg note */{ rtx dest_mem = gen_rtx (MEM, BLKmode, dest_reg); rtx src_mem = gen_rtx (MEM, BLKmode, src_reg); rtx align_rtx = GEN_INT (align); rtx label; rtx final_src; rtx bytes_rtx; int leftover; if (bytes < 2*MAX_MOVE_BYTES) abort (); leftover = bytes % MAX_MOVE_BYTES; bytes -= leftover; label = gen_label_rtx (); final_src = gen_reg_rtx (Pmode); bytes_rtx = GEN_INT (bytes); if (bytes > 0x7fff) { emit_insn (gen_movsi (final_src, bytes_rtx)); emit_insn (gen_addsi3 (final_src, final_src, src_reg)); } else emit_insn (gen_addsi3 (final_src, src_reg, bytes_rtx)); emit_label (label); bytes_rtx = GEN_INT (MAX_MOVE_BYTES); emit_insn (gen_movstrsi_internal (dest_mem, src_mem, bytes_rtx, align_rtx)); emit_insn (gen_addsi3 (src_reg, src_reg, bytes_rtx)); emit_insn (gen_addsi3 (dest_reg, dest_reg, bytes_rtx)); emit_insn (gen_cmpsi (src_reg, final_src)); emit_jump_insn (gen_bne (label)); if (leftover) emit_insn (gen_movstrsi_internal (dest_mem, src_mem, GEN_INT (leftover), align_rtx));}/* Use a library function to move some bytes. */static voidblock_move_call (dest_reg, src_reg, bytes_rtx) rtx dest_reg; rtx src_reg; rtx bytes_rtx;{#ifdef TARGET_MEM_FUNCTIONS emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3, dest_reg, Pmode, src_reg, Pmode, bytes_rtx, SImode);#else emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "bcopy"), 0, VOIDmode, 3, src_reg, Pmode, dest_reg, Pmode, bytes_rtx, SImode);#endif}/* Expand string/block move operations. operands[0] is the pointer to the destination. operands[1] is the pointer to the source. operands[2] is the number of bytes to move. operands[3] is the alignment. */voidexpand_block_move (operands) rtx operands[];{ rtx bytes_rtx = operands[2]; rtx align_rtx = operands[3]; int constp = (GET_CODE (bytes_rtx) == CONST_INT); int bytes = (constp ? INTVAL (bytes_rtx) : 0); int align = INTVAL (align_rtx); rtx orig_src = operands[1]; rtx src_reg; rtx dest_reg; if (constp && bytes <= 0) return; if (align > UNITS_PER_WORD) align = UNITS_PER_WORD; /* Move the address into scratch registers. */ dest_reg = copy_addr_to_reg (XEXP (operands[0], 0)); src_reg = copy_addr_to_reg (XEXP (orig_src, 0));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -