📄 mcore.c
字号:
} return "";}/* Output a series of bclri's corresponding to mask. */const char *mcore_output_bclri (rtx dst, int mask){ rtx out_operands[2]; int bit; out_operands[0] = dst; for (bit = 0; bit < 32; bit++) { if ((mask & 0x1) == 0x0) { out_operands[1] = GEN_INT (bit); output_asm_insn ("bclri\t%0,%1", out_operands); } mask >>= 1; } return "";}/* Output a conditional move of two constants that are +/- 1 within each other. See the "movtK" patterns in mcore.md. I'm not sure this is really worth the effort. */const char *mcore_output_cmov (rtx operands[], int cmp_t, const char * test){ int load_value; int adjust_value; rtx out_operands[4]; out_operands[0] = operands[0]; /* Check to see which constant is loadable. */ if (const_ok_for_mcore (INTVAL (operands[1]))) { out_operands[1] = operands[1]; out_operands[2] = operands[2]; } else if (const_ok_for_mcore (INTVAL (operands[2]))) { out_operands[1] = operands[2]; out_operands[2] = operands[1]; /* Complement test since constants are swapped. */ cmp_t = (cmp_t == 0); } load_value = INTVAL (out_operands[1]); adjust_value = INTVAL (out_operands[2]); /* First output the test if folded into the pattern. */ if (test) output_asm_insn (test, operands); /* Load the constant - for now, only support constants that can be generated with a single instruction. maybe add general inlinable constants later (this will increase the # of patterns since the instruction sequence has a different length attribute). */ if (load_value >= 0 && load_value <= 127) output_asm_insn ("movi\t%0,%1", out_operands); else if ((load_value & (load_value - 1)) == 0) output_asm_insn ("bgeni\t%0,%P1", out_operands); else if ((load_value & (load_value + 1)) == 0) output_asm_insn ("bmaski\t%0,%N1", out_operands); /* Output the constant adjustment. */ if (load_value > adjust_value) { if (cmp_t) output_asm_insn ("decf\t%0", out_operands); else output_asm_insn ("dect\t%0", out_operands); } else { if (cmp_t) output_asm_insn ("incf\t%0", out_operands); else output_asm_insn ("inct\t%0", out_operands); } return "";}/* Outputs the peephole for moving a constant that gets not'ed followed by an and (i.e. combine the not and the and into andn). BRC */const char *mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[]){ int x, y; rtx out_operands[3]; const char * load_op; char buf[256]; int trick_no; trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y); gcc_assert (trick_no == 2); out_operands[0] = operands[0]; out_operands[1] = GEN_INT(x); out_operands[2] = operands[2]; if (x >= 0 && x <= 127) load_op = "movi\t%0,%1"; /* Try exact power of two. */ else if ((x & (x - 1)) == 0) load_op = "bgeni\t%0,%P1"; /* Try exact power of two - 1. */ else if ((x & (x + 1)) == 0) load_op = "bmaski\t%0,%N1"; else load_op = "BADMOVI\t%0,%1"; sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op); output_asm_insn (buf, out_operands); return "";}/* Output an inline constant. */static const char *output_inline_const (enum machine_mode mode, rtx operands[]){ int x = 0, y = 0; int trick_no; rtx out_operands[3]; char buf[256]; char load_op[256]; const char *dst_fmt; int value; value = INTVAL (operands[1]); trick_no = try_constant_tricks (value, &x, &y); /* lrw's are handled separately: Large inlinable constants never get turned into lrw's. Our caller uses try_constant_tricks to back off to an lrw rather than calling this routine. */ gcc_assert (trick_no != 0); if (trick_no == 1) x = value; /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */ out_operands[0] = operands[0]; out_operands[1] = GEN_INT (x); if (trick_no > 2) out_operands[2] = GEN_INT (y); /* Select dst format based on mode. */ if (mode == DImode && (! TARGET_LITTLE_END)) dst_fmt = "%R0"; else dst_fmt = "%0"; if (x >= 0 && x <= 127) sprintf (load_op, "movi\t%s,%%1", dst_fmt); /* Try exact power of two. */ else if ((x & (x - 1)) == 0) sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt); /* Try exact power of two - 1. */ else if ((x & (x + 1)) == 0) sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt); else sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt); switch (trick_no) { case 1: strcpy (buf, load_op); break; case 2: /* not */ sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 3: /* add */ sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 4: /* sub */ sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 5: /* rsub */ /* Never happens unless -mrsubi, see try_constant_tricks(). */ sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 6: /* bset */ sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 7: /* bclr */ sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 8: /* rotl */ sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 9: /* lsl */ sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); break; case 10: /* ixh */ sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value); break; case 11: /* ixw */ sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value); break; default: return ""; } output_asm_insn (buf, out_operands); return "";}/* Output a move of a word or less value. */const char *mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED){ rtx dst = operands[0]; rtx src = operands[1]; if (GET_CODE (dst) == REG) { if (GET_CODE (src) == REG) { if (REGNO (src) == CC_REG) /* r-c */ return "mvc\t%0"; else return "mov\t%0,%1"; /* r-r*/ } else if (GET_CODE (src) == MEM) { if (GET_CODE (XEXP (src, 0)) == LABEL_REF) return "lrw\t%0,[%1]"; /* a-R */ else switch (GET_MODE (src)) /* r-m */ { case SImode: return "ldw\t%0,%1"; case HImode: return "ld.h\t%0,%1"; case QImode: return "ld.b\t%0,%1"; default: gcc_unreachable (); } } else if (GET_CODE (src) == CONST_INT) { int x, y; if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */ return "movi\t%0,%1"; else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */ return "bgeni\t%0,%P1\t// %1 %x1"; else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */ return "bmaski\t%0,%N1\t// %1 %x1"; else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */ return output_inline_const (SImode, operands); /* 1-2 insns */ else return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */ } else return "lrw\t%0, %1"; /* Into the literal pool. */ } else if (GET_CODE (dst) == MEM) /* m-r */ switch (GET_MODE (dst)) { case SImode: return "stw\t%1,%0"; case HImode: return "st.h\t%1,%0"; case QImode: return "st.b\t%1,%0"; default: gcc_unreachable (); } gcc_unreachable ();}/* Return a sequence of instructions to perform DI or DF move. Since the MCORE cannot move a DI or DF in one instruction, we have to take care when we see overlapping source and dest registers. */const char *mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED){ rtx dst = operands[0]; rtx src = operands[1]; if (GET_CODE (dst) == REG) { if (GET_CODE (src) == REG) { int dstreg = REGNO (dst); int srcreg = REGNO (src); /* Ensure the second source not overwritten. */ if (srcreg + 1 == dstreg) return "mov %R0,%R1\n\tmov %0,%1"; else return "mov %0,%1\n\tmov %R0,%R1"; } else if (GET_CODE (src) == MEM) { rtx memexp = memexp = XEXP (src, 0); int dstreg = REGNO (dst); int basereg = -1; if (GET_CODE (memexp) == LABEL_REF) return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]"; else if (GET_CODE (memexp) == REG) basereg = REGNO (memexp); else if (GET_CODE (memexp) == PLUS) { if (GET_CODE (XEXP (memexp, 0)) == REG) basereg = REGNO (XEXP (memexp, 0)); else if (GET_CODE (XEXP (memexp, 1)) == REG) basereg = REGNO (XEXP (memexp, 1)); else gcc_unreachable (); } else gcc_unreachable (); /* ??? length attribute is wrong here. */ if (dstreg == basereg) { /* Just load them in reverse order. */ return "ldw\t%R0,%R1\n\tldw\t%0,%1"; /* XXX: alternative: move basereg to basereg+1 and then fall through. */ } else return "ldw\t%0,%1\n\tldw\t%R0,%R1"; } else if (GET_CODE (src) == CONST_INT) { if (TARGET_LITTLE_END) { if (CONST_OK_FOR_I (INTVAL (src))) output_asm_insn ("movi %0,%1", operands); else if (CONST_OK_FOR_M (INTVAL (src))) output_asm_insn ("bgeni %0,%P1", operands); else if (INTVAL (src) == -1) output_asm_insn ("bmaski %0,32", operands); else if (CONST_OK_FOR_N (INTVAL (src))) output_asm_insn ("bmaski %0,%N1", operands); else gcc_unreachable (); if (INTVAL (src) < 0) return "bmaski %R0,32"; else return "movi %R0,0"; } else { if (CONST_OK_FOR_I (INTVAL (src))) output_asm_insn ("movi %R0,%1", operands); else if (CONST_OK_FOR_M (INTVAL (src))) output_asm_insn ("bgeni %R0,%P1", operands); else if (INTVAL (src) == -1) output_asm_insn ("bmaski %R0,32", operands); else if (CONST_OK_FOR_N (INTVAL (src))) output_asm_insn ("bmaski %R0,%N1", operands); else gcc_unreachable (); if (INTVAL (src) < 0) return "bmaski %0,32"; else return "movi %0,0"; } } else gcc_unreachable (); } else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG) return "stw\t%1,%0\n\tstw\t%R1,%R0"; else gcc_unreachable ();}/* Predicates used by the templates. */intmcore_arith_S_operand (rtx op){ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op))) return 1; return 0;}/* Expand insert bit field. BRC */intmcore_expand_insv (rtx operands[]){ int width = INTVAL (operands[1]); int posn = INTVAL (operands[2]); int mask; rtx mreg, sreg, ereg; /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191) for width==1 must be removed. Look around line 368. This is something we really want the md part to do. */ if (width == 1 && GET_CODE (operands[3]) == CONST_INT) { /* Do directly with bseti or bclri. */ /* RBE: 2/97 consider only low bit of constant. */ if ((INTVAL(operands[3])&1) == 0) { mask = ~(1 << posn); emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_AND (SImode, operands[0], GEN_INT (mask)))); } else { mask = 1 << posn; emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_IOR (SImode, operands[0], GEN_INT (mask)))); } return 1; } /* Look at some bit-field placements that we aren't interested in handling ourselves, unless specifically directed to do so. */ if (! TARGET_W_FIELD) return 0; /* Generally, give up about now. */ if (width == 8 && posn % 8 == 0) /* Byte sized and aligned; let caller break it up. */ return 0; if (width == 16 && posn % 16 == 0) /* Short sized and aligned; let caller break it up. */ return 0; /* The general case - we can do this a little bit better than what the machine independent part tries. This will get rid of all the subregs that mess up constant folding in combine when working with relaxed immediates. */ /* If setting the entire field, do it directly. */ if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == ((1 << width) - 1)) { mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn)); emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_IOR (SImode, operands[0], mreg))); return 1; } /* Generate the clear mask. */ mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn))); /* Clear the field, to overlay it later with the source. */ emit_insn (gen_rtx_SET (SImode, operands[0], gen_rtx_AND (SImode, operands[0], mreg))); /* If the source is constant 0, we've nothing to add back. */ if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0) return 1; /* XXX: Should we worry about more games with constant values? We've covered the high profile: set/clear single-bit and many-bit fields. How often do we see "arbitrary bit pattern" constants? */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -