📄 alliant.md
字号:
return \"mov%.w %1,%0\";}")(define_insn "movstricthi" [(set (strict_low_part (match_operand:HI 0 "general_operand" "+dm")) (match_operand:HI 1 "general_operand" "rmn"))] "" "*{ if (GET_CODE (operands[1]) == CONST_INT) { if (operands[1] == const0_rtx && (DATA_REG_P (operands[0]) || GET_CODE (operands[0]) == MEM)) return \"clr%.w %0\"; } return \"mov%.w %1,%0\";}")(define_insn "movqi" [(set (match_operand:QI 0 "general_operand" "=d,*a,m,m,?*a") (match_operand:QI 1 "general_operand" "dmi*a,d*a,dmi,?*a,m"))] "" "*{ rtx xoperands[4]; if (ADDRESS_REG_P (operands[0]) && GET_CODE (operands[1]) == MEM) { xoperands[1] = operands[1]; xoperands[2] = gen_rtx (MEM, QImode, gen_rtx (PLUS, VOIDmode, stack_pointer_rtx, const1_rtx)); xoperands[3] = stack_pointer_rtx; /* Just pushing a byte puts it in the high byte of the halfword. */ /* We must put it in the low half, the second byte. */ output_asm_insn (\"subq%.w %#2,%3\;mov%.b %1,%2\", xoperands); return \"mov%.w %+,%0\"; } if (ADDRESS_REG_P (operands[1]) && GET_CODE (operands[0]) == MEM) { xoperands[0] = operands[0]; xoperands[1] = operands[1]; xoperands[2] = gen_rtx (MEM, QImode, gen_rtx (PLUS, VOIDmode, stack_pointer_rtx, const1_rtx)); xoperands[3] = stack_pointer_rtx; output_asm_insn (\"mov%.w %1,%-\;mov%.b %2,%0\;addq%.w %#2,%3\", xoperands); return \"\"; } if (operands[1] == const0_rtx) return \"clr%.b %0\"; if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) == -1) return \"st %0\"; if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1])) return \"mov%.l %1,%0\"; if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1])) return \"mov%.w %1,%0\"; return \"mov%.b %1,%0\";}")(define_insn "movstrictqi" [(set (strict_low_part (match_operand:QI 0 "general_operand" "+dm")) (match_operand:QI 1 "general_operand" "dmn"))] "" "*{ if (operands[1] == const0_rtx) return \"clr%.b %0\"; return \"mov%.b %1,%0\";}");; Floating-point moves on a CE are faster using an FP register than;; with movl instructions. (Especially for double floats, but also;; for single floats, even though it takes an extra instruction.) But;; on an IP, the FP registers are simulated and so should be avoided.;; We do this by using define_expand for movsf and movdf, and using;; different constraints for each target type. The constraints for;; TARGET_CE allow general registers because they sometimes need to;; hold floats, but they are not preferable.(define_expand "movsf" [(set (match_operand:SF 0 "general_operand" "") (match_operand:SF 1 "nonimmediate_operand" ""))] "" "")(define_insn "" [(set (match_operand:SF 0 "general_operand" "=f,m,!*r,!f*m") (match_operand:SF 1 "nonimmediate_operand" "fm,f,f*r*m,*r"))] "TARGET_CE" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.s %1,%0\"; if (REG_P (operands[1])) return \"mov%.l %1,%-\;fmove%.s %+,%0\"; return \"fmove%.s %1,%0\"; } if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) return \"fmove%.s %1,%-\;mov%.l %+,%0\"; return \"fmove%.s %1,%0\"; } return \"mov%.l %1,%0\";}")(define_insn "" [(set (match_operand:SF 0 "general_operand" "=frm") (match_operand:SF 1 "nonimmediate_operand" "frm"))] "!TARGET_CE" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.s %1,%0\"; if (REG_P (operands[1])) return \"mov%.l %1,%-\;fmove%.s %+,%0\"; return \"fmove%.s %1,%0\"; } if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) return \"fmove%.s %1,%-\;mov%.l %+,%0\"; return \"fmove%.s %1,%0\"; } return \"mov%.l %1,%0\";}")(define_expand "movdf" [(set (match_operand:DF 0 "general_operand" "") (match_operand:DF 1 "nonimmediate_operand" ""))] "" "")(define_insn "" [(set (match_operand:DF 0 "general_operand" "=f,m,!*r,!f*m") (match_operand:DF 1 "nonimmediate_operand" "fm,f,f*r*m,*r"))] "TARGET_CE" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.d %1,%0\"; if (REG_P (operands[1])) { rtx xoperands[2]; xoperands[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1); output_asm_insn (\"mov%.l %1,%-\", xoperands); output_asm_insn (\"mov%.l %1,%-\", operands); return \"fmove%.d %+,%0\"; } return \"fmove%.d %1,%0\"; } else if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) { output_asm_insn (\"fmove%.d %1,%-\;mov%.l %+,%0\", operands); operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1); return \"mov%.l %+,%0\"; } return \"fmove%.d %1,%0\"; } return output_move_double (operands);}")(define_insn "" [(set (match_operand:DF 0 "general_operand" "=frm") (match_operand:DF 1 "nonimmediate_operand" "frm"))] "!TARGET_CE" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.d %1,%0\"; if (REG_P (operands[1])) { rtx xoperands[2]; xoperands[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1); output_asm_insn (\"mov%.l %1,%-\", xoperands); output_asm_insn (\"mov%.l %1,%-\", operands); return \"fmove%.d %+,%0\"; } return \"fmove%.d %1,%0\"; } else if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) { output_asm_insn (\"fmove%.d %1,%-\;mov%.l %+,%0\", operands); operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1); return \"mov%.l %+,%0\"; } return \"fmove%.d %1,%0\"; } return output_move_double (operands);}")(define_insn "movdi" [(set (match_operand:DI 0 "general_operand" "=rm,&r,&ro<>") (match_operand:DI 1 "general_operand" "r,m,roi<>"))] "" "*{ return output_move_double (operands);}");; This goes after the move instructions;; because the move instructions are better (require no spilling);; when they can apply. It goes before the add/sub insns;; so we will prefer it to them.(define_insn "pushasi" [(set (match_operand:SI 0 "push_operand" "=m") (match_operand:SI 1 "address_operand" "p"))] "" "pea %a1");; truncation instructions(define_insn "truncsiqi2" [(set (match_operand:QI 0 "general_operand" "=dm,d") (truncate:QI (match_operand:SI 1 "general_operand" "doJ,i")))] "" "*{ if (GET_CODE (operands[0]) == REG) return \"mov%.l %1,%0\"; if (GET_CODE (operands[1]) == MEM) operands[1] = adj_offsettable_operand (operands[1], 3); return \"mov%.b %1,%0\";}")(define_insn "trunchiqi2" [(set (match_operand:QI 0 "general_operand" "=dm,d") (truncate:QI (match_operand:HI 1 "general_operand" "doJ,i")))] "" "*{ if (GET_CODE (operands[0]) == REG && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[1]) == CONST_INT)) return \"mov%.w %1,%0\"; if (GET_CODE (operands[0]) == REG) return \"mov%.l %1,%0\"; if (GET_CODE (operands[1]) == MEM) operands[1] = adj_offsettable_operand (operands[1], 1); return \"mov%.b %1,%0\";}")(define_insn "truncsihi2" [(set (match_operand:HI 0 "general_operand" "=dm,d") (truncate:HI (match_operand:SI 1 "general_operand" "roJ,i")))] "" "*{ if (GET_CODE (operands[0]) == REG) return \"mov%.l %1,%0\"; if (GET_CODE (operands[1]) == MEM) operands[1] = adj_offsettable_operand (operands[1], 2); return \"mov%.w %1,%0\";}");; zero extension instructions(define_expand "zero_extendhisi2" [(set (match_operand:SI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (subreg:HI (match_dup 0) 0)) (match_operand:HI 1 "general_operand" ""))] "" "operands[1] = make_safe_from (operands[1], operands[0]);")(define_expand "zero_extendqihi2" [(set (match_operand:HI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (subreg:QI (match_dup 0) 0)) (match_operand:QI 1 "general_operand" ""))] "" "operands[1] = make_safe_from (operands[1], operands[0]);")(define_expand "zero_extendqisi2" [(set (match_operand:SI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (subreg:QI (match_dup 0) 0)) (match_operand:QI 1 "general_operand" ""))] "" " operands[1] = make_safe_from (operands[1], operands[0]); ");; Patterns to recognize zero-extend insns produced by the combiner.;; Note that the one starting from HImode comes before those for QImode;; so that a constant operand will match HImode, not QImode.(define_insn "" [(set (match_operand:SI 0 "general_operand" "=do<>") (zero_extend:SI (match_operand:HI 1 "general_operand" "rmn")))] "" "*{ if (DATA_REG_P (operands[0])) { if (GET_CODE (operands[1]) == REG && REGNO (operands[0]) == REGNO (operands[1])) return \"and%.l %#0xFFFF,%0\"; if (reg_mentioned_p (operands[0], operands[1])) return \"mov%.w %1,%0\;and%.l %#0xFFFF,%0\"; return \"clr%.l %0\;mov%.w %1,%0\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) return \"mov%.w %1,%0\;clr%.w %0\"; else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == POST_INC) return \"clr%.w %0\;mov%.w %1,%0\"; else { output_asm_insn (\"clr%.w %0\", operands); operands[0] = adj_offsettable_operand (operands[0], 2); return \"mov%.w %1,%0\"; }}")(define_insn "" [(set (match_operand:HI 0 "general_operand" "=do<>") (zero_extend:HI (match_operand:QI 1 "general_operand" "dmn")))] "" "*{ if (DATA_REG_P (operands[0])) { if (GET_CODE (operands[1]) == REG && REGNO (operands[0]) == REGNO (operands[1])) return \"and%.w %#0xFF,%0\"; if (reg_mentioned_p (operands[0], operands[1])) return \"mov%.b %1,%0\;and%.w %#0xFF,%0\"; return \"clr%.w %0\;mov%.b %1,%0\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) { if (REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM) return \"clr%.w %-\;mov%.b %1,%0\"; else return \"mov%.b %1,%0\;clr%.b %0\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == POST_INC) return \"clr%.b %0\;mov%.b %1,%0\"; else { output_asm_insn (\"clr%.b %0\", operands); operands[0] = adj_offsettable_operand (operands[0], 1); return \"mov%.b %1,%0\"; }}")(define_insn "" [(set (match_operand:SI 0 "general_operand" "=do<>") (zero_extend:SI (match_operand:QI 1 "general_operand" "dmn")))] "" "*{ if (DATA_REG_P (operands[0])) { if (GET_CODE (operands[1]) == REG && REGNO (operands[0]) == REGNO (operands[1])) return \"and%.l %#0xFF,%0\"; if (reg_mentioned_p (operands[0], operands[1])) return \"mov%.b %1,%0\;and%.l %#0xFF,%0\"; return \"clr%.l %0\;mov%.b %1,%0\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) { operands[0] = XEXP (XEXP (operands[0], 0), 0); return \"clr%.l %0@-\;mov%.b %1,%0@(3)\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == POST_INC) { operands[0] = XEXP (XEXP (operands[0], 0), 0); return \"clr%.l %0@+\;mov%.b %1,%0@(-1)\"; } else { output_asm_insn (\"clr%.l %0\", operands); operands[0] = adj_offsettable_operand (operands[0], 3); return \"mov%.b %1,%0\"; }}");; sign extension instructions;; Note that the one starting from HImode comes before those for QImode;; so that a constant operand will match HImode, not QImode.(define_insn "extendhisi2" [(set (match_operand:SI 0 "general_operand" "=*d,a") (sign_extend:SI (match_operand:HI 1 "general_operand" "0,rmn")))] "" "*{ if (ADDRESS_REG_P (operands[0])) return \"mov%.w %1,%0\"; return \"ext%.l %0\";}")(define_insn "extendqihi2" [(set (match_operand:HI 0 "general_operand" "=d") (sign_extend:HI (match_operand:QI 1 "general_operand" "0")))] "" "ext%.w %0")(define_insn "extendqisi2" [(set (match_operand:SI 0 "general_operand" "=d") (sign_extend:SI (match_operand:QI 1 "general_operand" "0")))] "TARGET_68020" "extb%.l %0");; Conversions between float and double.(define_insn "extendsfdf2" [(set (match_operand:DF 0 "general_operand" "=f,m") (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fm,f")))]
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -