📄 bfin.c
字号:
/* Expand a move operation in mode MODE. The operands are in OPERANDS. */voidexpand_move (rtx *operands, enum machine_mode mode){ if (flag_pic && SYMBOLIC_CONST (operands[1])) emit_pic_move (operands, mode); /* Don't generate memory->memory or constant->memory moves, go through a register */ else if ((reload_in_progress | reload_completed) == 0 && GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG) operands[1] = force_reg (mode, operands[1]);}/* Split one or more DImode RTL references into pairs of SImode references. The RTL can be REG, offsettable MEM, integer constant, or CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to split and "num" is its length. lo_half and hi_half are output arrays that parallel "operands". */voidsplit_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]){ while (num--) { rtx op = operands[num]; /* simplify_subreg refuse to split volatile memory addresses, but we still have to handle it. */ if (GET_CODE (op) == MEM) { lo_half[num] = adjust_address (op, SImode, 0); hi_half[num] = adjust_address (op, SImode, 4); } else { lo_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 0); hi_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 4); } }}/* Expand a call instruction. FNADDR is the call target, RETVAL the return value. SIBCALL is nonzero if this is a sibling call. */voidbfin_expand_call (rtx retval, rtx fnaddr, rtx callarg1, int sibcall){ rtx use = NULL, call; /* Static functions and indirect calls don't need the pic register. */ if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0))) use_reg (&use, pic_offset_table_rtx); if (! call_insn_operand (XEXP (fnaddr, 0), Pmode)) { fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0)); fnaddr = gen_rtx_MEM (Pmode, fnaddr); } call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1); if (retval) call = gen_rtx_SET (VOIDmode, retval, call); if (sibcall) { rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); XVECEXP (pat, 0, 0) = call; XVECEXP (pat, 0, 1) = gen_rtx_RETURN (VOIDmode); call = pat; } call = emit_call_insn (call); if (use) CALL_INSN_FUNCTION_USAGE (call) = use;}/* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */inthard_regno_mode_ok (int regno, enum machine_mode mode){ /* Allow only dregs to store value of mode HI or QI */ enum reg_class class = REGNO_REG_CLASS (regno); if (mode == CCmode) return 0; if (mode == V2HImode) return D_REGNO_P (regno); if (class == CCREGS) return mode == BImode; if (mode == PDImode) return regno == REG_A0 || regno == REG_A1; if (mode == SImode && TEST_HARD_REG_BIT (reg_class_contents[PROLOGUE_REGS], regno)) return 1; return TEST_HARD_REG_BIT (reg_class_contents[MOST_REGS], regno);}/* Implements target hook vector_mode_supported_p. */static boolbfin_vector_mode_supported_p (enum machine_mode mode){ return mode == V2HImode;}/* Return the cost of moving data from a register in class CLASS1 to one in class CLASS2. A cost of 2 is the default. */intbfin_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, enum reg_class class1, enum reg_class class2){ /* If optimizing for size, always prefer reg-reg over reg-memory moves. */ if (optimize_size) return 2; /* There are some stalls involved when moving from a DREG to a different class reg, and using the value in one of the following instructions. Attempt to model this by slightly discouraging such moves. */ if (class1 == DREGS && class2 != DREGS) return 2 * 2; return 2;}/* Return the cost of moving data of mode M between a register and memory. A value of 2 is the default; this cost is relative to those in `REGISTER_MOVE_COST'. ??? In theory L1 memory has single-cycle latency. We should add a switch that tells the compiler whether we expect to use only L1 memory for the program; it'll make the costs more accurate. */intbfin_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, enum reg_class class, int in ATTRIBUTE_UNUSED){ /* Make memory accesses slightly more expensive than any register-register move. Also, penalize non-DP registers, since they need secondary reloads to load and store. */ if (! reg_class_subset_p (class, DPREGS)) return 10; return 8;}/* Inform reload about cases where moving X with a mode MODE to a register in CLASS requires an extra scratch register. Return the class needed for the scratch register. */enum reg_classsecondary_input_reload_class (enum reg_class class, enum machine_mode mode, rtx x){ /* If we have HImode or QImode, we can only use DREGS as secondary registers; in most other cases we can also use PREGS. */ enum reg_class default_class = GET_MODE_SIZE (mode) >= 4 ? DPREGS : DREGS; enum reg_class x_class = NO_REGS; enum rtx_code code = GET_CODE (x); if (code == SUBREG) x = SUBREG_REG (x), code = GET_CODE (x); if (REG_P (x)) { int regno = REGNO (x); if (regno >= FIRST_PSEUDO_REGISTER) regno = reg_renumber[regno]; if (regno == -1) code = MEM; else x_class = REGNO_REG_CLASS (regno); } /* We can be asked to reload (plus (FP) (large_constant)) into a DREG. This happens as a side effect of register elimination, and we need a scratch register to do it. */ if (fp_plus_const_operand (x, mode)) { rtx op2 = XEXP (x, 1); int large_constant_p = ! CONST_7BIT_IMM_P (INTVAL (op2)); if (class == PREGS || class == PREGS_CLOBBERED) return NO_REGS; /* If destination is a DREG, we can do this without a scratch register if the constant is valid for an add instruction. */ if (class == DREGS || class == DPREGS) return large_constant_p ? PREGS : NO_REGS; /* Reloading to anything other than a DREG? Use a PREG scratch register. */ return PREGS; } /* Data can usually be moved freely between registers of most classes. AREGS are an exception; they can only move to or from another register in AREGS or one in DREGS. They can also be assigned the constant 0. */ if (x_class == AREGS) return class == DREGS || class == AREGS ? NO_REGS : DREGS; if (class == AREGS) { if (x != const0_rtx && x_class != DREGS) return DREGS; else return NO_REGS; } /* CCREGS can only be moved from/to DREGS. */ if (class == CCREGS && x_class != DREGS) return DREGS; if (x_class == CCREGS && class != DREGS) return DREGS; /* All registers other than AREGS can load arbitrary constants. The only case that remains is MEM. */ if (code == MEM) if (! reg_class_subset_p (class, default_class)) return default_class; return NO_REGS;}/* Like secondary_input_reload_class; and all we do is call that function. */enum reg_classsecondary_output_reload_class (enum reg_class class, enum machine_mode mode, rtx x){ return secondary_input_reload_class (class, mode, x);}/* Implement the macro OVERRIDE_OPTIONS. */voidoverride_options (void){ if (TARGET_OMIT_LEAF_FRAME_POINTER) flag_omit_frame_pointer = 1; /* Library identification */ if (bfin_library_id_string) { int id; if (! TARGET_ID_SHARED_LIBRARY) error ("-mshared-library-id= specified without -mid-shared-library"); id = atoi (bfin_library_id_string); if (id < 0 || id > MAX_LIBRARY_ID) error ("-mshared-library-id=%d is not between 0 and %d", id, MAX_LIBRARY_ID); /* From now on, bfin_library_id_string will contain the library offset. */ asprintf ((char **)&bfin_library_id_string, "%d", (id * -4) - 4); } if (TARGET_ID_SHARED_LIBRARY) /* ??? Provide a way to use a bigger GOT. */ flag_pic = 1; flag_schedule_insns = 0;}/* Return the destination address of BRANCH. We need to use this instead of get_attr_length, because the cbranch_with_nops pattern conservatively sets its length to 6, and we still prefer to use shorter sequences. */static intbranch_dest (rtx branch){ rtx dest; int dest_uid; rtx pat = PATTERN (branch); if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); dest = SET_SRC (pat); if (GET_CODE (dest) == IF_THEN_ELSE) dest = XEXP (dest, 1); dest = XEXP (dest, 0); dest_uid = INSN_UID (dest); return INSN_ADDRESSES (dest_uid);}/* Return nonzero if INSN is annotated with a REG_BR_PROB note that indicates it's a branch that's predicted taken. */static intcbranch_predicted_taken_p (rtx insn){ rtx x = find_reg_note (insn, REG_BR_PROB, 0); if (x) { int pred_val = INTVAL (XEXP (x, 0)); return pred_val >= REG_BR_PROB_BASE / 2; } return 0;}/* Templates for use by asm_conditional_branch. */static const char *ccbranch_templates[][3] = { { "if !cc jump %3;", "if cc jump 4 (bp); jump.s %3;", "if cc jump 6 (bp); jump.l %3;" }, { "if cc jump %3;", "if !cc jump 4 (bp); jump.s %3;", "if !cc jump 6 (bp); jump.l %3;" }, { "if !cc jump %3 (bp);", "if cc jump 4; jump.s %3;", "if cc jump 6; jump.l %3;" }, { "if cc jump %3 (bp);", "if !cc jump 4; jump.s %3;", "if !cc jump 6; jump.l %3;" },};/* Output INSN, which is a conditional branch instruction with operands OPERANDS. We deal with the various forms of conditional branches that can be generated by bfin_reorg to prevent the hardware from doing speculative loads, by - emitting a sufficient number of nops, if N_NOPS is nonzero, or - always emitting the branch as predicted taken, if PREDICT_TAKEN is true. Either of these is only necessary if the branch is short, otherwise the template we use ends in an unconditional jump which flushes the pipeline anyway. */voidasm_conditional_branch (rtx insn, rtx *operands, int n_nops, int predict_taken){ int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn)); /* Note : offset for instructions like if cc jmp; jump.[sl] offset is to be taken from start of if cc rather than jump. Range for jump.s is (-4094, 4096) instead of (-4096, 4094) */ int len = (offset >= -1024 && offset <= 1022 ? 0 : offset >= -4094 && offset <= 4096 ? 1 : 2); int bp = predict_taken && len == 0 ? 1 : cbranch_predicted_taken_p (insn); int idx = (bp << 1) | (GET_CODE (operands[0]) == EQ ? BRF : BRT); output_asm_insn (ccbranch_templates[idx][len], operands); if (n_nops > 0 && bp) abort (); if (len == 0) while (n_nops-- > 0) output_asm_insn ("nop;", NULL);}/* Emit rtl for a comparison operation CMP in mode MODE. Operands have been stored in bfin_compare_op0 and bfin_compare_op1 already. */rtxbfin_gen_compare (rtx cmp, enum machine_mode mode ATTRIBUTE_UNUSED){ enum rtx_code code1, code2; rtx op0 = bfin_compare_op0, op1 = bfin_compare_op1; rtx tem = bfin_cc_rtx; enum rtx_code code = GET_CODE (cmp); /* If we have a BImode input, then we already have a compare result, and do not need to emit another comparison. */ if (GET_MODE (op0) == BImode) { if ((code == NE || code == EQ) && op1 == const0_rtx) tem = op0, code2 = code; else abort (); } else { switch (code) { /* bfin has these conditions */ case EQ: case LT: case LE: case LEU: case LTU: code1 = code; code2 = NE; break; default: code1 = reverse_condition (code); code2 = EQ; break; } emit_insn (gen_rtx_SET (BImode, tem, gen_rtx_fmt_ee (code1, BImode, op0, op1))); } return gen_rtx_fmt_ee (code2, BImode, tem, CONST0_RTX (BImode));}/* Return nonzero iff C has exactly one bit set if it is interpreted as a 32 bit constant. */intlog2constp (unsigned HOST_WIDE_INT c){ c &= 0xFFFFFFFF; return c != 0 && (c & (c-1)) == 0;}/* Returns the number of consecutive least significant zeros in the binary representation of *V. We modify *V to contain the original value arithmetically shifted right by the number of zeroes. */static intshiftr_zero (HOST_WIDE_INT *v){ unsigned HOST_WIDE_INT tmp = *v; unsigned HOST_WIDE_INT sgn; int n = 0; if (tmp == 0) return 0; sgn = tmp & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)); while ((tmp & 0x1) == 0 && n <= 32) { tmp = (tmp >> 1) | sgn; n++; } *v = tmp; return n;}/* After reload, split the load of an immediate constant. OPERANDS are the operands of the movsi_insn pattern which we are splitting. We return nonzero if we emitted a sequence to load the constant, zero if we emitted nothing because we want to use the splitter's default sequence. */intsplit_load_immediate (rtx operands[]){ HOST_WIDE_INT val = INTVAL (operands[1]); HOST_WIDE_INT tmp; HOST_WIDE_INT shifted = val; HOST_WIDE_INT shifted_compl = ~val; int num_zero = shiftr_zero (&shifted); int num_compl_zero = shiftr_zero (&shifted_compl); unsigned int regno = REGNO (operands[0]); enum reg_class class1 = REGNO_REG_CLASS (regno); /* This case takes care of single-bit set/clear constants, which we could also implement with BITSET/BITCLR. */ if (num_zero && shifted >= -32768 && shifted < 65536 && (D_REGNO_P (regno) || (regno >= REG_P0 && regno <= REG_P7 && num_zero <= 2))) { emit_insn (gen_movsi (operands[0], GEN_INT (shifted))); emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (num_zero))); return 1; } tmp = val & 0xFFFF; tmp |= -(tmp & 0x8000); /* If high word has one bit set or clear, try to use a bit operation. */ if (D_REGNO_P (regno)) { if (log2constp (val & 0xFFFF0000)) { emit_insn (gen_movsi (operands[0], GEN_INT (val & 0xFFFF))); emit_insn (gen_iorsi3 (operands[0], operands[0], GEN_INT (val & 0xFFFF0000))); return 1; } else if (log2constp (val | 0xFFFF) && (val & 0x8000) != 0) { emit_insn (gen_movsi (operands[0], GEN_INT (tmp))); emit_insn (gen_andsi3 (operands[0], operands[0], GEN_INT (val | 0xFFFF))); } } if (D_REGNO_P (regno)) { if (CONST_7BIT_IMM_P (tmp)) { emit_insn (gen_movsi (operands[0], GEN_INT (tmp))); emit_insn (gen_movstricthi_high (operands[0], GEN_INT (val & -65536))); return 1; } if ((val & 0xFFFF0000) == 0) { emit_insn (gen_movsi (operands[0], const0_rtx)); emit_insn (gen_movsi_low (operands[0], operands[0], operands[1])); return 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -