📄 bfin.c
字号:
/* Return true when register may be used to pass function parameters. */bool function_arg_regno_p (int n){ int i; for (i = 0; arg_regs[i] != -1; i++) if (n == arg_regs[i]) return true; return false;}/* Returns 1 if OP contains a symbol reference */intsymbolic_reference_mentioned_p (rtx op){ register const char *fmt; register int i; if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return 1; fmt = GET_RTX_FORMAT (GET_CODE (op)); for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--) { if (fmt[i] == 'E') { register int j; for (j = XVECLEN (op, i) - 1; j >= 0; j--) if (symbolic_reference_mentioned_p (XVECEXP (op, i, j))) return 1; } else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i))) return 1; } return 0;}/* Decide whether we can make a sibling call to a function. DECL is the declaration of the function being targeted by the call and EXP is the CALL_EXPR representing the call. */static boolbfin_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, tree exp ATTRIBUTE_UNUSED){ e_funkind fkind = funkind (TREE_TYPE (current_function_decl)); return fkind == SUBROUTINE;}/* Emit RTL insns to initialize the variable parts of a trampoline at TRAMP. FNADDR is an RTX for the address of the function's pure code. CXT is an RTX for the static chain value for the function. */voidinitialize_trampoline (tramp, fnaddr, cxt) rtx tramp, fnaddr, cxt;{ rtx t1 = copy_to_reg (fnaddr); rtx t2 = copy_to_reg (cxt); rtx addr; addr = memory_address (Pmode, plus_constant (tramp, 2)); emit_move_insn (gen_rtx_MEM (HImode, addr), gen_lowpart (HImode, t1)); emit_insn (gen_ashrsi3 (t1, t1, GEN_INT (16))); addr = memory_address (Pmode, plus_constant (tramp, 6)); emit_move_insn (gen_rtx_MEM (HImode, addr), gen_lowpart (HImode, t1)); addr = memory_address (Pmode, plus_constant (tramp, 10)); emit_move_insn (gen_rtx_MEM (HImode, addr), gen_lowpart (HImode, t2)); emit_insn (gen_ashrsi3 (t2, t2, GEN_INT (16))); addr = memory_address (Pmode, plus_constant (tramp, 14)); emit_move_insn (gen_rtx_MEM (HImode, addr), gen_lowpart (HImode, t2));}/* Emit insns to move operands[1] into operands[0]. */voidemit_pic_move (rtx *operands, enum machine_mode mode ATTRIBUTE_UNUSED){ rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode); if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1])) operands[1] = force_reg (SImode, operands[1]); else operands[1] = legitimize_pic_address (operands[1], temp, pic_offset_table_rtx);}/* Expand a move operation in mode MODE. The operands are in OPERANDS. */voidexpand_move (rtx *operands, enum machine_mode mode){ if (flag_pic && SYMBOLIC_CONST (operands[1])) emit_pic_move (operands, mode); /* Don't generate memory->memory or constant->memory moves, go through a register */ else if ((reload_in_progress | reload_completed) == 0 && GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG) operands[1] = force_reg (mode, operands[1]);}/* Split one or more DImode RTL references into pairs of SImode references. The RTL can be REG, offsettable MEM, integer constant, or CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to split and "num" is its length. lo_half and hi_half are output arrays that parallel "operands". */voidsplit_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]){ while (num--) { rtx op = operands[num]; /* simplify_subreg refuse to split volatile memory addresses, but we still have to handle it. */ if (GET_CODE (op) == MEM) { lo_half[num] = adjust_address (op, SImode, 0); hi_half[num] = adjust_address (op, SImode, 4); } else { lo_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 0); hi_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 4); } }}boolbfin_longcall_p (rtx op, int call_cookie){ gcc_assert (GET_CODE (op) == SYMBOL_REF); if (call_cookie & CALL_SHORT) return 0; if (call_cookie & CALL_LONG) return 1; if (TARGET_LONG_CALLS) return 1; return 0;}/* Expand a call instruction. FNADDR is the call target, RETVAL the return value. COOKIE is a CONST_INT holding the call_cookie prepared init_cumulative_args. SIBCALL is nonzero if this is a sibling call. */voidbfin_expand_call (rtx retval, rtx fnaddr, rtx callarg1, rtx cookie, int sibcall){ rtx use = NULL, call; rtx callee = XEXP (fnaddr, 0); rtx pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (sibcall ? 3 : 2)); /* In an untyped call, we can get NULL for operand 2. */ if (cookie == NULL_RTX) cookie = const0_rtx; /* Static functions and indirect calls don't need the pic register. */ if (flag_pic && GET_CODE (callee) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (callee)) use_reg (&use, pic_offset_table_rtx); if ((!register_no_elim_operand (callee, Pmode) && GET_CODE (callee) != SYMBOL_REF) || (GET_CODE (callee) == SYMBOL_REF && (flag_pic || bfin_longcall_p (callee, INTVAL (cookie))))) { callee = copy_to_mode_reg (Pmode, callee); fnaddr = gen_rtx_MEM (Pmode, callee); } call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1); if (retval) call = gen_rtx_SET (VOIDmode, retval, call); XVECEXP (pat, 0, 0) = call; XVECEXP (pat, 0, 1) = gen_rtx_USE (VOIDmode, cookie); if (sibcall) XVECEXP (pat, 0, 2) = gen_rtx_RETURN (VOIDmode); call = emit_call_insn (pat); if (use) CALL_INSN_FUNCTION_USAGE (call) = use;}/* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */inthard_regno_mode_ok (int regno, enum machine_mode mode){ /* Allow only dregs to store value of mode HI or QI */ enum reg_class class = REGNO_REG_CLASS (regno); if (mode == CCmode) return 0; if (mode == V2HImode) return D_REGNO_P (regno); if (class == CCREGS) return mode == BImode; if (mode == PDImode) return regno == REG_A0 || regno == REG_A1; if (mode == SImode && TEST_HARD_REG_BIT (reg_class_contents[PROLOGUE_REGS], regno)) return 1; return TEST_HARD_REG_BIT (reg_class_contents[MOST_REGS], regno);}/* Implements target hook vector_mode_supported_p. */static boolbfin_vector_mode_supported_p (enum machine_mode mode){ return mode == V2HImode;}/* Return the cost of moving data from a register in class CLASS1 to one in class CLASS2. A cost of 2 is the default. */intbfin_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, enum reg_class class1, enum reg_class class2){ /* If optimizing for size, always prefer reg-reg over reg-memory moves. */ if (optimize_size) return 2; /* There are some stalls involved when moving from a DREG to a different class reg, and using the value in one of the following instructions. Attempt to model this by slightly discouraging such moves. */ if (class1 == DREGS && class2 != DREGS) return 2 * 2; return 2;}/* Return the cost of moving data of mode M between a register and memory. A value of 2 is the default; this cost is relative to those in `REGISTER_MOVE_COST'. ??? In theory L1 memory has single-cycle latency. We should add a switch that tells the compiler whether we expect to use only L1 memory for the program; it'll make the costs more accurate. */intbfin_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, enum reg_class class, int in ATTRIBUTE_UNUSED){ /* Make memory accesses slightly more expensive than any register-register move. Also, penalize non-DP registers, since they need secondary reloads to load and store. */ if (! reg_class_subset_p (class, DPREGS)) return 10; return 8;}/* Inform reload about cases where moving X with a mode MODE to a register in CLASS requires an extra scratch register. Return the class needed for the scratch register. */enum reg_classsecondary_input_reload_class (enum reg_class class, enum machine_mode mode, rtx x){ /* If we have HImode or QImode, we can only use DREGS as secondary registers; in most other cases we can also use PREGS. */ enum reg_class default_class = GET_MODE_SIZE (mode) >= 4 ? DPREGS : DREGS; enum reg_class x_class = NO_REGS; enum rtx_code code = GET_CODE (x); if (code == SUBREG) x = SUBREG_REG (x), code = GET_CODE (x); if (REG_P (x)) { int regno = REGNO (x); if (regno >= FIRST_PSEUDO_REGISTER) regno = reg_renumber[regno]; if (regno == -1) code = MEM; else x_class = REGNO_REG_CLASS (regno); } /* We can be asked to reload (plus (FP) (large_constant)) into a DREG. This happens as a side effect of register elimination, and we need a scratch register to do it. */ if (fp_plus_const_operand (x, mode)) { rtx op2 = XEXP (x, 1); int large_constant_p = ! CONST_7BIT_IMM_P (INTVAL (op2)); if (class == PREGS || class == PREGS_CLOBBERED) return NO_REGS; /* If destination is a DREG, we can do this without a scratch register if the constant is valid for an add instruction. */ if (class == DREGS || class == DPREGS) return large_constant_p ? PREGS : NO_REGS; /* Reloading to anything other than a DREG? Use a PREG scratch register. */ return PREGS; } /* Data can usually be moved freely between registers of most classes. AREGS are an exception; they can only move to or from another register in AREGS or one in DREGS. They can also be assigned the constant 0. */ if (x_class == AREGS) return class == DREGS || class == AREGS ? NO_REGS : DREGS; if (class == AREGS) { if (x != const0_rtx && x_class != DREGS) return DREGS; else return NO_REGS; } /* CCREGS can only be moved from/to DREGS. */ if (class == CCREGS && x_class != DREGS) return DREGS; if (x_class == CCREGS && class != DREGS) return DREGS; /* All registers other than AREGS can load arbitrary constants. The only case that remains is MEM. */ if (code == MEM) if (! reg_class_subset_p (class, default_class)) return default_class; return NO_REGS;}/* Like secondary_input_reload_class; and all we do is call that function. */enum reg_classsecondary_output_reload_class (enum reg_class class, enum machine_mode mode, rtx x){ return secondary_input_reload_class (class, mode, x);}/* Implement TARGET_HANDLE_OPTION. */static boolbfin_handle_option (size_t code, const char *arg, int value){ switch (code) { case OPT_mshared_library_id_: if (value > MAX_LIBRARY_ID) error ("-mshared-library-id=%s is not between 0 and %d", arg, MAX_LIBRARY_ID); bfin_lib_id_given = 1; return true; default: return true; }}/* Implement the macro OVERRIDE_OPTIONS. */voidoverride_options (void){ if (TARGET_OMIT_LEAF_FRAME_POINTER) flag_omit_frame_pointer = 1; /* Library identification */ if (bfin_lib_id_given && ! TARGET_ID_SHARED_LIBRARY) error ("-mshared-library-id= specified without -mid-shared-library"); if (TARGET_ID_SHARED_LIBRARY) /* ??? Provide a way to use a bigger GOT. */ flag_pic = 1; flag_schedule_insns = 0;}/* Return the destination address of BRANCH. We need to use this instead of get_attr_length, because the cbranch_with_nops pattern conservatively sets its length to 6, and we still prefer to use shorter sequences. */static intbranch_dest (rtx branch){ rtx dest; int dest_uid; rtx pat = PATTERN (branch); if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); dest = SET_SRC (pat); if (GET_CODE (dest) == IF_THEN_ELSE) dest = XEXP (dest, 1); dest = XEXP (dest, 0); dest_uid = INSN_UID (dest); return INSN_ADDRESSES (dest_uid);}/* Return nonzero if INSN is annotated with a REG_BR_PROB note that indicates it's a branch that's predicted taken. */static intcbranch_predicted_taken_p (rtx insn){ rtx x = find_reg_note (insn, REG_BR_PROB, 0); if (x) { int pred_val = INTVAL (XEXP (x, 0)); return pred_val >= REG_BR_PROB_BASE / 2; } return 0;}/* Templates for use by asm_conditional_branch. */static const char *ccbranch_templates[][3] = { { "if !cc jump %3;", "if cc jump 4 (bp); jump.s %3;", "if cc jump 6 (bp); jump.l %3;" }, { "if cc jump %3;", "if !cc jump 4 (bp); jump.s %3;", "if !cc jump 6 (bp); jump.l %3;" }, { "if !cc jump %3 (bp);", "if cc jump 4; jump.s %3;", "if cc jump 6; jump.l %3;" }, { "if cc jump %3 (bp);", "if !cc jump 4; jump.s %3;", "if !cc jump 6; jump.l %3;" },};/* Output INSN, which is a conditional branch instruction with operands OPERANDS. We deal with the various forms of conditional branches that can be generated by bfin_reorg to prevent the hardware from doing speculative loads, by - emitting a sufficient number of nops, if N_NOPS is nonzero, or - always emitting the branch as predicted taken, if PREDICT_TAKEN is true. Either of these is only necessary if the branch is short, otherwise the template we use ends in an unconditional jump which flushes the pipeline anyway. */voidasm_conditional_branch (rtx insn, rtx *operands, int n_nops, int predict_taken){ int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn)); /* Note : offset for instructions like if cc jmp; jump.[sl] offset is to be taken from start of if cc rather than jump. Range for jump.s is (-4094, 4096) instead of (-4096, 4094) */ int len = (offset >= -1024 && offset <= 1022 ? 0 : offset >= -4094 && offset <= 4096 ? 1 : 2); int bp = predict_taken && len == 0 ? 1 : cbranch_predicted_taken_p (insn); int idx = (bp << 1) | (GET_CODE (operands[0]) == EQ ? BRF : BRT); output_asm_insn (ccbranch_templates[idx][len], operands); gcc_assert (n_nops == 0 || !bp); if (len == 0) while (n_nops-- > 0) output_asm_insn ("nop;", NULL);}/* Emit rtl for a comparison operation CMP in mode MODE. Operands have been stored in bfin_compare_op0 and bfin_compare_op1 already. */rtxbfin_gen_compare (rtx cmp, enum machine_mode mode ATTRIBUTE_UNUSED){ enum rtx_code code1, code2; rtx op0 = bfin_compare_op0, op1 = bfin_compare_op1; rtx tem = bfin_cc_rtx; enum rtx_code code = GET_CODE (cmp);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -