📄 mcore.c
字号:
if (GET_CODE (op1) == CONST_INT) op1 = force_reg (SImode, op1); break; case GE: /* Use inverted condition, cmplt. */ code = LT; /* drop through */ case LT: /* Use normal condition, cmplt. */ if (GET_CODE (op1) == CONST_INT && /* covered by btsti x,31 */ INTVAL (op1) != 0 && ! CONST_OK_FOR_J (INTVAL (op1))) op1 = force_reg (SImode, op1); break; case GTU: /* Use inverted condition, cmple. */ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0) { /* Unsigned > 0 is the same as != 0, but we need to invert the condition, so we want to set code = EQ. This cannot be done however, as the mcore does not support such a test. Instead we cope with this case in the "bgtu" pattern itself so we should never reach this point. */ /* code = EQ; */ abort (); break; } code = LEU; /* drop through */ case LEU: /* Use normal condition, reversed cmphs. */ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0) op1 = force_reg (SImode, op1); break; case LTU: /* Use inverted condition, cmphs. */ code = GEU; /* drop through */ case GEU: /* Use normal condition, cmphs. */ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0) op1 = force_reg (SImode, op1); break; default: break; } emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1))); return cc_reg;}intmcore_symbolic_address_p (x) rtx x;{ switch (GET_CODE (x)) { case SYMBOL_REF: case LABEL_REF: return 1; case CONST: x = XEXP (x, 0); return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF || GET_CODE (XEXP (x, 0)) == LABEL_REF) && GET_CODE (XEXP (x, 1)) == CONST_INT); default: return 0; }}intmcore_call_address_operand (x, mode) rtx x; enum machine_mode mode;{ return register_operand (x, mode) || CONSTANT_P (x);}/* Functions to output assembly code for a function call. */char *mcore_output_call (operands, index) rtx operands[]; int index;{ static char buffer[20]; rtx addr = operands [index]; if (REG_P (addr)) { if (TARGET_CG_DATA) { if (mcore_current_function_name == 0) abort (); ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "unknown", 1); } sprintf (buffer, "jsr\t%%%d", index); } else { if (TARGET_CG_DATA) { if (mcore_current_function_name == 0) abort (); if (GET_CODE (addr) != SYMBOL_REF) abort (); ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0); } sprintf (buffer, "jbsr\t%%%d", index); } return buffer;}/* Can we load a constant with a single instruction ? */static intconst_ok_for_mcore (value) int value;{ if (value >= 0 && value <= 127) return 1; /* Try exact power of two. */ if ((value & (value - 1)) == 0) return 1; /* Try exact power of two - 1. */ if ((value & (value + 1)) == 0) return 1; return 0;}/* Can we load a constant inline with up to 2 instructions ? */intmcore_const_ok_for_inline (value) long value;{ int x, y; return try_constant_tricks (value, & x, & y) > 0;}/* Are we loading the constant using a not ? */intmcore_const_trick_uses_not (value) long value;{ int x, y; return try_constant_tricks (value, & x, & y) == 2; } /* Try tricks to load a constant inline and return the trick number if success (0 is non-inlinable). 0: not inlinable 1: single instruction (do the usual thing) 2: single insn followed by a 'not' 3: single insn followed by a subi 4: single insn followed by an addi 5: single insn followed by rsubi 6: single insn followed by bseti 7: single insn followed by bclri 8: single insn followed by rotli 9: single insn followed by lsli 10: single insn followed by ixh 11: single insn followed by ixw. */static inttry_constant_tricks (value, x, y) long value; int * x; int * y;{ int i; unsigned bit, shf, rot; if (const_ok_for_mcore (value)) return 1; /* Do the usual thing. */ if (TARGET_HARDLIT) { if (const_ok_for_mcore (~value)) { *x = ~value; return 2; } for (i = 1; i <= 32; i++) { if (const_ok_for_mcore (value - i)) { *x = value - i; *y = i; return 3; } if (const_ok_for_mcore (value + i)) { *x = value + i; *y = i; return 4; } } bit = 0x80000000L; for (i = 0; i <= 31; i++) { if (const_ok_for_mcore (i - value)) { *x = i - value; *y = i; return 5; } if (const_ok_for_mcore (value & ~bit)) { *y = bit; *x = value & ~bit; return 6; } if (const_ok_for_mcore (value | bit)) { *y = ~bit; *x = value | bit; return 7; } bit >>= 1; } shf = value; rot = value; for (i = 1; i < 31; i++) { int c; /* MCore has rotate left. */ c = rot << 31; rot >>= 1; rot &= 0x7FFFFFFF; rot |= c; /* Simulate rotate. */ if (const_ok_for_mcore (rot)) { *y = i; *x = rot; return 8; } if (shf & 1) shf = 0; /* Can't use logical shift, low order bit is one. */ shf >>= 1; if (shf != 0 && const_ok_for_mcore (shf)) { *y = i; *x = shf; return 9; } } if ((value % 3) == 0 && const_ok_for_mcore (value / 3)) { *x = value / 3; return 10; } if ((value % 5) == 0 && const_ok_for_mcore (value / 5)) { *x = value / 5; return 11; } } return 0;}/* Check whether reg is dead at first. This is done by searching ahead for either the next use (i.e., reg is live), a death note, or a set of reg. Don't just use dead_or_set_p() since reload does not always mark deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We can ignore subregs by extracting the actual register. BRC */intmcore_is_dead (first, reg) rtx first; rtx reg;{ rtx insn; /* For mcore, subregs can't live independently of their parent regs. */ if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); /* Dies immediately. */ if (dead_or_set_p (first, reg)) return 1; /* Look for conclusive evidence of live/death, otherwise we have to assume that it is live. */ for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == JUMP_INSN) return 0; /* We lose track, assume it is alive. */ else if (GET_CODE(insn) == CALL_INSN) { /* Call's might use it for target or register parms. */ if (reg_referenced_p (reg, PATTERN (insn)) || find_reg_fusage (insn, USE, reg)) return 0; else if (dead_or_set_p (insn, reg)) return 1; } else if (GET_CODE (insn) == INSN) { if (reg_referenced_p (reg, PATTERN (insn))) return 0; else if (dead_or_set_p (insn, reg)) return 1; } } /* No conclusive evidence either way, we can not take the chance that control flow hid the use from us -- "I'm not dead yet". */ return 0;}/* Count the number of ones in mask. */intmcore_num_ones (mask) int mask;{ /* A trick to count set bits recently posted on comp.compilers. */ mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555); mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333); mask = ((mask >> 4) + mask) & 0x0f0f0f0f; mask = ((mask >> 8) + mask); return (mask + (mask >> 16)) & 0xff;}/* Count the number of zeros in mask. */intmcore_num_zeros (mask) int mask;{ return 32 - mcore_num_ones (mask);}/* Determine byte being masked. */intmcore_byte_offset (mask) unsigned int mask;{ if (mask == 0x00ffffffL) return 0; else if (mask == 0xff00ffffL) return 1; else if (mask == 0xffff00ffL) return 2; else if (mask == 0xffffff00L) return 3; return -1;}/* Determine halfword being masked. */intmcore_halfword_offset (mask) unsigned int mask;{ if (mask == 0x0000ffffL) return 0; else if (mask == 0xffff0000L) return 1; return -1;}/* Output a series of bseti's corresponding to mask. */const char *mcore_output_bseti (dst, mask) rtx dst; int mask;{ rtx out_operands[2]; int bit; out_operands[0] = dst; for (bit = 0; bit < 32; bit++) { if ((mask & 0x1) == 0x1) { out_operands[1] = GEN_INT (bit); output_asm_insn ("bseti\t%0,%1", out_operands); } mask >>= 1; } return "";}/* Output a series of bclri's corresponding to mask. */const char *mcore_output_bclri (dst, mask) rtx dst; int mask;{ rtx out_operands[2]; int bit; out_operands[0] = dst; for (bit = 0; bit < 32; bit++) { if ((mask & 0x1) == 0x0) { out_operands[1] = GEN_INT (bit); output_asm_insn ("bclri\t%0,%1", out_operands); } mask >>= 1; } return "";}/* Output a conditional move of two constants that are +/- 1 within each other. See the "movtK" patterns in mcore.md. I'm not sure this is really worth the effort. */const char *mcore_output_cmov (operands, cmp_t, test) rtx operands[]; int cmp_t; const char * test;{ int load_value; int adjust_value; rtx out_operands[4]; out_operands[0] = operands[0]; /* Check to see which constant is loadable. */ if (const_ok_for_mcore (INTVAL (operands[1]))) { out_operands[1] = operands[1]; out_operands[2] = operands[2]; } else if (const_ok_for_mcore (INTVAL (operands[2]))) { out_operands[1] = operands[2]; out_operands[2] = operands[1]; /* Complement test since constants are swapped. */ cmp_t = (cmp_t == 0); } load_value = INTVAL (out_operands[1]); adjust_value = INTVAL (out_operands[2]); /* First output the test if folded into the pattern. */ if (test) output_asm_insn (test, operands); /* Load the constant - for now, only support constants that can be generated with a single instruction. maybe add general inlinable constants later (this will increase the # of patterns since the instruction sequence has a different length attribute). */ if (load_value >= 0 && load_value <= 127) output_asm_insn ("movi\t%0,%1", out_operands); else if ((load_value & (load_value - 1)) == 0) output_asm_insn ("bgeni\t%0,%P1", out_operands);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -