📄 mcore.c
字号:
rtx op; enum machine_mode mode;{ if (register_operand (op, mode)) return 1; if (GET_CODE (op) == CONST_INT) return 1; return 0;}/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */intmcore_arith_O_operand (op, mode) rtx op; enum machine_mode mode;{ if (register_operand (op, mode)) return 1; if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op))) return 1; return 0;}/* Nonzero if OP is a valid source operand for a btsti. */intmcore_literal_K_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED;{ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))) return 1; return 0;}/* Nonzero if OP is a valid source operand for an add/sub insn. */intmcore_addsub_operand (op, mode) rtx op; enum machine_mode mode;{ if (register_operand (op, mode)) return 1; if (GET_CODE (op) == CONST_INT) { return 1; /* The following is removed because it precludes large constants from being returned as valid source operands for and add/sub insn. While large constants may not directly be used in an add/sub, they may if first loaded into a register. Thus, this predicate should indicate that they are valid, and the constraint in mcore.md should control whether an additional load to register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */ /* if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op))) return 1; */ } return 0;}/* Nonzero if OP is a valid source operand for a compare operation. */intmcore_compare_operand (op, mode) rtx op; enum machine_mode mode;{ if (register_operand (op, mode)) return 1; if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0) return 1; return 0;}/* Expand insert bit field. BRC */intmcore_expand_insv (operands) rtx operands[];{ int width = INTVAL (operands[1]); int posn = INTVAL (operands[2]); int mask; rtx mreg, sreg, ereg; /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191) for width==1 must be removed. Look around line 368. This is something we really want the md part to do. */ if (width == 1 && GET_CODE (operands[3]) == CONST_INT) { /* Do directly with bseti or bclri. */ /* RBE: 2/97 consider only low bit of constant. */ if ((INTVAL(operands[3])&1) == 0) { mask = ~(1 << posn); emit_insn (gen_rtx (SET, SImode, operands[0], gen_rtx (AND, SImode, operands[0], GEN_INT (mask)))); } else { mask = 1 << posn; emit_insn (gen_rtx (SET, SImode, operands[0], gen_rtx (IOR, SImode, operands[0], GEN_INT (mask)))); } return 1; } /* Look at some bit-field placements that we aren't interested in handling ourselves, unless specifically directed to do so. */ if (! TARGET_W_FIELD) return 0; /* Generally, give up about now. */ if (width == 8 && posn % 8 == 0) /* Byte sized and aligned; let caller break it up. */ return 0; if (width == 16 && posn % 16 == 0) /* Short sized and aligned; let caller break it up. */ return 0; /* The general case - we can do this a little bit better than what the machine independent part tries. This will get rid of all the subregs that mess up constant folding in combine when working with relaxed immediates. */ /* If setting the entire field, do it directly. */ if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == ((1 << width) - 1)) { mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn)); emit_insn (gen_rtx (SET, SImode, operands[0], gen_rtx (IOR, SImode, operands[0], mreg))); return 1; } /* Generate the clear mask. */ mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn))); /* Clear the field, to overlay it later with the source. */ emit_insn (gen_rtx (SET, SImode, operands[0], gen_rtx (AND, SImode, operands[0], mreg))); /* If the source is constant 0, we've nothing to add back. */ if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0) return 1; /* XXX: Should we worry about more games with constant values? We've covered the high profile: set/clear single-bit and many-bit fields. How often do we see "arbitrary bit pattern" constants? */ sreg = copy_to_mode_reg (SImode, operands[3]); /* Extract src as same width as dst (needed for signed values). We always have to do this since we widen everything to SImode. We don't have to mask if we're shifting this up against the MSB of the register (e.g., the shift will push out any hi-order bits. */ if (width + posn != (int) GET_MODE_SIZE (SImode)) { ereg = force_reg (SImode, GEN_INT ((1 << width) - 1)); emit_insn (gen_rtx (SET, SImode, sreg, gen_rtx (AND, SImode, sreg, ereg))); } /* Insert source value in dest. */ if (posn != 0) emit_insn (gen_rtx (SET, SImode, sreg, gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn)))); emit_insn (gen_rtx (SET, SImode, operands[0], gen_rtx (IOR, SImode, operands[0], sreg))); return 1;}/* Return 1 if OP is a load multiple operation. It is known to be a PARALLEL and the first section will be tested. */intmcore_load_multiple_operation (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED;{ int count = XVECLEN (op, 0); int dest_regno; rtx src_addr; int i; /* Perform a quick check so we don't blow up below. */ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) return 0; dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); for (i = 1; i < count; i++) { rtx elt = XVECEXP (op, 0, i); if (GET_CODE (elt) != SET || GET_CODE (SET_DEST (elt)) != REG || GET_MODE (SET_DEST (elt)) != SImode || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i) || GET_CODE (SET_SRC (elt)) != MEM || GET_MODE (SET_SRC (elt)) != SImode || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4) return 0; } return 1;}/* Similar, but tests for store multiple. */intmcore_store_multiple_operation (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED;{ int count = XVECLEN (op, 0); int src_regno; rtx dest_addr; int i; /* Perform a quick check so we don't blow up below. */ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) return 0; src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); for (i = 1; i < count; i++) { rtx elt = XVECEXP (op, 0, i); if (GET_CODE (elt) != SET || GET_CODE (SET_SRC (elt)) != REG || GET_MODE (SET_SRC (elt)) != SImode || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i) || GET_CODE (SET_DEST (elt)) != MEM || GET_MODE (SET_DEST (elt)) != SImode || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4) return 0; } return 1;}/* ??? Block move stuff stolen from m88k. This code has not been verified for correctness. *//* Emit code to perform a block move. Choose the best method. OPERANDS[0] is the destination. OPERANDS[1] is the source. OPERANDS[2] is the size. OPERANDS[3] is the alignment safe to use. *//* Emit code to perform a block move with an offset sequence of ldw/st instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are known constants. DEST and SRC are registers. OFFSET is the known starting point for the output pattern. */static const enum machine_mode mode_from_align[] ={ VOIDmode, QImode, HImode, VOIDmode, SImode, VOIDmode, VOIDmode, VOIDmode, DImode};static voidblock_move_sequence (dest, dst_mem, src, src_mem, size, align, offset) rtx dest, dst_mem; rtx src, src_mem; int size; int align; int offset;{ rtx temp[2]; enum machine_mode mode[2]; int amount[2]; int active[2]; int phase = 0; int next; int offset_ld = offset; int offset_st = offset; active[0] = active[1] = FALSE; /* Establish parameters for the first load and for the second load if it is known to be the same mode as the first. */ amount[0] = amount[1] = align; mode[0] = mode_from_align[align]; temp[0] = gen_reg_rtx (mode[0]); if (size >= 2 * align) { mode[1] = mode[0]; temp[1] = gen_reg_rtx (mode[1]); } do { rtx srcp, dstp; next = phase; phase = !phase; if (size > 0) { /* Change modes as the sequence tails off. */ if (size < amount[next]) { amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1)); mode[next] = mode_from_align[amount[next]]; temp[next] = gen_reg_rtx (mode[next]); } size -= amount[next]; srcp = gen_rtx (MEM,#if 0 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,#else mode[next],#endif gen_rtx (PLUS, Pmode, src, gen_rtx (CONST_INT, SImode, offset_ld))); RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem); MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem); MEM_IN_STRUCT_P (srcp) = 1; emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp)); offset_ld += amount[next]; active[next] = TRUE; } if (active[phase]) { active[phase] = FALSE; dstp = gen_rtx (MEM,#if 0 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,#else mode[phase],#endif gen_rtx (PLUS, Pmode, dest, gen_rtx (CONST_INT, SImode, offset_st))); RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem); MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem); MEM_IN_STRUCT_P (dstp) = 1; emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase])); offset_st += amount[phase]; } } while (active[next]);}voidmcore_expand_block_move (dst_mem, src_mem, operands) rtx dst_mem; rtx src_mem; rtx * operands;{ int align = INTVAL (operands[3]); int bytes; if (GET_CODE (operands[2]) == CONST_INT) { bytes = INTVAL (operands[2]); if (bytes <= 0) return; if (align > 4) align = 4; /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before we give up and go to memcpy. */ if ((align == 4 && (bytes <= 4*4 || ((bytes & 01) == 0 && bytes <= 8*4) || ((bytes & 03) == 0 && bytes <= 16*4))) || (align == 2 && bytes <= 4*2) || (align == 1 && bytes <= 4*1)) { block_move_sequence (operands[0], dst_mem, operands[1], src_mem, bytes, align, 0); return; } } /* If we get here, just use the library routine. */ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3, operands[0], Pmode, operands[1], Pmode, operands[2], SImode);}/* Code to generate prologue and epilogue sequences. */static int number_of_regs_before_varargs;/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is for a varargs function. */static int current_function_anonymous_args;#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)#define STORE_REACH (64) /* Maximum displace of word store + 4. */#define ADDI_REACH (32) /* Maximum addi operand. */static voidlayout_mcore_frame (infp) struct mcore_frame * infp;{ int n; unsigned int i; int nbytes; int regarg; int localregarg; int localreg; int outbounds; unsigned int growths; int step; /* Might have to spill bytes to re-assemble a big argument that was passed partially in registers and partially on the stack. */ nbytes = current_function_pretend_args_size; /* Determine how much space for spilled anonymous args (e.g., stdarg). */ if (current_function_anonymous_args) nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD; infp->arg_size = nbytes; /* How much space to save non-volatile registers we stomp. */ infp->reg_mask = calc_live_regs (& n); infp->reg_size = n * 4; /* And the rest of it... locals and space for overflowed outbounds. */ infp->local_size = get_frame_size (); infp->outbound_size = current_function_outgoing_args_size; /* Make sure we have a whole number of words for the locals. */ if (infp->local_size % STACK_BYTES) infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1); /* Only thing we know we have to pad is the outbound space, since we've aligned our locals assuming that base of locals is aligned. */ infp->pad_local = 0; infp->pad_reg = 0; infp->pad_outbound = 0; if (infp->outbound_size % STACK_BYTES) infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES); /* Now we see how we want to stage the prologue so that it does the most appropriate stack growth and register saves to either: (1) run fast, (2) reduce instruction space, or (3) reduce stack space. */ for (i = 0; i < ARRAY_SIZE (infp->growth); i++) infp->growth[i] = 0; regarg = infp->reg_size + infp->arg_size; localregarg = infp->local_size + regarg; localreg = infp->local_size + infp->reg_size; outbounds = infp->outbound_size + infp->pad_outbound; growths = 0; /* XXX: Consider one where we consider localregarg + outbound too! */ /* Frame of <= 32 bytes and using stm would get <= 2 registers. use stw's with offsets and buy the frame in one shot. */ if (localregarg <= ADDI_REACH && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000)) { /* Make sure we'll be aligned. */ if (localregarg % STACK_BYTES) infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES); step = localregarg + infp->pad_reg; infp->reg_offset = infp->local_size; if (outbounds + step <= ADDI_REACH && !frame_pointer_needed) { step += outbounds; infp->reg_offset += outbounds; outbounds = 0; } infp->arg_offset = step - 4; infp->growth[growths++] = step;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -