📄 m68k.md
字号:
return \"move%.l %+,%0\"; } else return \"fmove%.d %f1,%0\"; } return output_move_double (operands);}")(define_insn "" [(set (match_operand:DF 0 "nonimmediate_operand" "=r,g") (match_operand:DF 1 "general_operand" "g,r"))] "TARGET_5200" "* return output_move_double (operands);")(define_expand "movxf" [(set (match_operand:XF 0 "nonimmediate_operand" "") (match_operand:XF 1 "general_operand" ""))] "" "{ if (CONSTANT_P (operands[1])) { operands[1] = force_const_mem (XFmode, operands[1]); if (! memory_address_p (XFmode, XEXP (operands[1], 0)) && ! reload_in_progress) operands[1] = adjust_address (operands[1], XFmode, 0); } if (flag_pic && TARGET_PCREL && ! reload_in_progress) { /* Don't allow writes to memory except via a register; the m68k doesn't consider PC-relative addresses to be writable. */ if (GET_CODE (operands[0]) == MEM && symbolic_operand (XEXP (operands[0], 0), SImode)) operands[0] = gen_rtx (MEM, XFmode, force_reg (SImode, XEXP (operands[0], 0))); }}")(define_insn "" [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,!r,!f,!r") (match_operand:XF 1 "nonimmediate_operand" "m,f,f,f,r,!r"))] "TARGET_68881" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.x %1,%0\"; if (REG_P (operands[1])) { rtx xoperands[2]; xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); output_asm_insn (\"move%.l %1,%-\", xoperands); xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); output_asm_insn (\"move%.l %1,%-\", xoperands); output_asm_insn (\"move%.l %1,%-\", operands); return \"fmove%.x %+,%0\"; } if (GET_CODE (operands[1]) == CONST_DOUBLE) return \"fmove%.x %1,%0\"; return \"fmove%.x %f1,%0\"; } if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) { output_asm_insn (\"fmove%.x %f1,%-\;move%.l %+,%0\", operands); operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); output_asm_insn (\"move%.l %+,%0\", operands); operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); return \"move%.l %+,%0\"; } /* Must be memory destination. */ return \"fmove%.x %f1,%0\"; } return output_move_double (operands);}")(define_insn "" [(set (match_operand:XF 0 "nonimmediate_operand" "=rm,rf,&rof<>") (match_operand:XF 1 "nonimmediate_operand" "rf,m,rof<>"))] "! TARGET_68881 && ! TARGET_5200" "*{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.x %1,%0\"; if (REG_P (operands[1])) { rtx xoperands[2]; xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); output_asm_insn (\"move%.l %1,%-\", xoperands); xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); output_asm_insn (\"move%.l %1,%-\", xoperands); output_asm_insn (\"move%.l %1,%-\", operands); return \"fmove%.x %+,%0\"; } if (GET_CODE (operands[1]) == CONST_DOUBLE) return \"fmove%.x %1,%0\"; return \"fmove%.x %f1,%0\"; } if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) { output_asm_insn (\"fmove%.x %f1,%-\;move%.l %+,%0\", operands); operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); output_asm_insn (\"move%.l %+,%0\", operands); operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); return \"move%.l %+,%0\"; } else return \"fmove%.x %f1,%0\"; } return output_move_double (operands);}")(define_insn "" [(set (match_operand:XF 0 "nonimmediate_operand" "=r,g") (match_operand:XF 1 "nonimmediate_operand" "g,r"))] "! TARGET_68881 && TARGET_5200" "* return output_move_double (operands);")(define_expand "movdi" ;; Let's see if it really still needs to handle fp regs, and, if so, why. [(set (match_operand:DI 0 "nonimmediate_operand" "") (match_operand:DI 1 "general_operand" ""))] "" "");; movdi can apply to fp regs in some cases(define_insn "" ;; Let's see if it really still needs to handle fp regs, and, if so, why. [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r,&ro<>,y,rm,!*x,!rm") (match_operand:DI 1 "general_operand" "rF,m,roi<>F,rmiF,y,rmF,*x"))]; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&r,&ro<>,!&rm,!&f,y,rm,x,!x,!rm"); (match_operand:DI 1 "general_operand" "r,m,roi<>,fF,rfmF,rmi,y,rm,x"))]; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&rf,&ro<>,!&rm,!&f"); (match_operand:DI 1 "general_operand" "r,m,roi<>,fF,rfF"))] "!TARGET_5200" "*{ if (which_alternative == 8) return \"fpmove%.d %x1,fpa0\;fpmove%.d fpa0,%x0\"; if (FPA_REG_P (operands[0]) || FPA_REG_P (operands[1])) return \"fpmove%.d %x1,%x0\"; if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return \"fmove%.x %1,%0\"; if (REG_P (operands[1])) { rtx xoperands[2]; xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); output_asm_insn (\"move%.l %1,%-\", xoperands); output_asm_insn (\"move%.l %1,%-\", operands); return \"fmove%.d %+,%0\"; } if (GET_CODE (operands[1]) == CONST_DOUBLE) return output_move_const_double (operands); return \"fmove%.d %f1,%0\"; } else if (FP_REG_P (operands[1])) { if (REG_P (operands[0])) { output_asm_insn (\"fmove%.d %f1,%-\;move%.l %+,%0\", operands); operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); return \"move%.l %+,%0\"; } else return \"fmove%.d %f1,%0\"; } return output_move_double (operands);}")(define_insn "" [(set (match_operand:DI 0 "nonimmediate_operand" "=r,g") (match_operand:DI 1 "general_operand" "g,r"))] "TARGET_5200" "* return output_move_double (operands);");; Thus goes after the move instructions;; because the move instructions are better (require no spilling);; when they can apply. It goes before the add/sub insns;; so we will prefer it to them.(define_insn "pushasi" [(set (match_operand:SI 0 "push_operand" "=m") (match_operand:SI 1 "address_operand" "p"))] "" "pea %a1");; truncation instructions(define_insn "truncsiqi2" [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d") (truncate:QI (match_operand:SI 1 "general_src_operand" "doJS,i")))] "" "*{ if (GET_CODE (operands[0]) == REG) { /* Must clear condition codes, since the move.l bases them on the entire 32 bits, not just the desired 8 bits. */ CC_STATUS_INIT; return \"move%.l %1,%0\"; } if (GET_CODE (operands[1]) == MEM) operands[1] = adjust_address (operands[1], QImode, 3); return \"move%.b %1,%0\";}")(define_insn "trunchiqi2" [(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d") (truncate:QI (match_operand:HI 1 "general_src_operand" "doJS,i")))] "" "*{ if (GET_CODE (operands[0]) == REG && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[1]) == CONST_INT)) { /* Must clear condition codes, since the move.w bases them on the entire 16 bits, not just the desired 8 bits. */ CC_STATUS_INIT; return \"move%.w %1,%0\"; } if (GET_CODE (operands[0]) == REG) { /* Must clear condition codes, since the move.l bases them on the entire 32 bits, not just the desired 8 bits. */ CC_STATUS_INIT; return \"move%.l %1,%0\"; } if (GET_CODE (operands[1]) == MEM) operands[1] = adjust_address (operands[1], QImode, 1); return \"move%.b %1,%0\";}")(define_insn "truncsihi2" [(set (match_operand:HI 0 "nonimmediate_operand" "=dm,d") (truncate:HI (match_operand:SI 1 "general_src_operand" "roJS,i")))] "" "*{ if (GET_CODE (operands[0]) == REG) { /* Must clear condition codes, since the move.l bases them on the entire 32 bits, not just the desired 8 bits. */ CC_STATUS_INIT; return \"move%.l %1,%0\"; } if (GET_CODE (operands[1]) == MEM) operands[1] = adjust_address (operands[1], QImode, 2); return \"move%.w %1,%0\";}");; zero extension instructions(define_insn "zero_extendqidi2" [(set (match_operand:DI 0 "nonimmediate_operand" "=&d") (zero_extend:DI (match_operand:QI 1 "general_operand" "dm")))] "" "*{ CC_STATUS_INIT; operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); return \"moveq %#0,%0\;moveq %#0,%2\;move%.b %1,%2\";}")(define_insn "zero_extendhidi2" [(set (match_operand:DI 0 "nonimmediate_operand" "=&d") (zero_extend:DI (match_operand:HI 1 "general_operand" "rm")))] "" "*{ CC_STATUS_INIT; operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); return \"moveq %#0,%0\;moveq %#0,%2\;move%.w %1,%2\";}");; this is the canonical form for (lshiftrt:DI x 32)(define_expand "zero_extendsidi2" [(set (match_operand:DI 0 "nonimmediate_operand" "") (zero_extend:DI (match_operand:SI 1 "general_operand" "")))] "" "")(define_insn "*zero_extendsidi2_cf" [(set (match_operand:DI 0 "nonimmediate_operand" "=r,m") (zero_extend:DI (match_operand:SI 1 "general_operand" "rm,r")))] "TARGET_5200" "*{ CC_STATUS_INIT; if (GET_CODE (operands[0]) == REG) operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) return \"move%.l %1,%0\;clr%.l %0\"; else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) return \"clr%.l %0\;move%.l %1,%0\"; else operands[2] = adjust_address (operands[0], SImode, 4); if (GET_CODE (operands[1]) != REG || GET_CODE (operands[2]) != REG || REGNO (operands[1]) != REGNO (operands[2])) output_asm_insn (\"move%.l %1,%2\", operands); if (ADDRESS_REG_P (operands[0])) return \"sub%.l %0,%0\"; else return \"clr%.l %0\";}")(define_insn "*zero_extendsidi2" [(set (match_operand:DI 0 "nonimmediate_operand" "=rm") (zero_extend:DI (match_operand:SI 1 "general_operand" "rm")))] "!TARGET_5200" "*{ CC_STATUS_INIT; if (GET_CODE (operands[0]) == REG) operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) return \"move%.l %1,%0\;clr%.l %0\"; else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) return \"clr%.l %0\;move%.l %1,%0\"; else operands[2] = adjust_address (operands[0], SImode, 4); if (GET_CODE (operands[1]) != REG || GET_CODE (operands[2]) != REG || REGNO (operands[1]) != REGNO (operands[2])) output_asm_insn (\"move%.l %1,%2\", operands); if (ADDRESS_REG_P (operands[0])) return \"sub%.l %0,%0\"; else return \"clr%.l %0\";}")(define_expand "zero_extendhisi2" [(set (match_operand:SI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (match_dup 2)) (match_operand:HI 1 "general_operand" ""))] "" "{ operands[1] = make_safe_from (operands[1], operands[0]); operands[2] = gen_lowpart_SUBREG (HImode, operands[0]);}")(define_expand "zero_extendqihi2" [(set (match_operand:HI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (match_dup 2)) (match_operand:QI 1 "general_operand" ""))] "" "{ operands[1] = make_safe_from (operands[1], operands[0]); operands[2] = gen_lowpart_SUBREG (QImode, operands[0]);}")(define_expand "zero_extendqisi2" [(set (match_operand:SI 0 "register_operand" "") (const_int 0)) (set (strict_low_part (match_dup 2)) (match_operand:QI 1 "general_operand" ""))] "" "{ operands[1] = make_safe_from (operands[1], operands[0]); operands[2] = gen_lowpart_SUBREG (QImode, operands[0]);}");; Patterns to recognize zero-extend insns produced by the combiner.;; We don't allow both operands in memory, because of aliasing problems.;; Explicitly disallow two memory operands via the condition since reloading;; of this case will result in worse code than the uncombined patterns.(define_insn "" [(set (match_operand:SI 0 "nonimmediate_operand" "=do<>,d<") (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "r,mS")))] "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM" "*{ if (DATA_REG_P (operands[0])) { if (GET_CODE (operands[1]) == REG && REGNO (operands[0]) == REGNO (operands[1])) return \"and%.l %#0xFFFF,%0\"; if (reg_mentioned_p (operands[0], operands[1])) return \"move%.w %1,%0\;and%.l %#0xFFFF,%0\"; return \"clr%.l %0\;move%.w %1,%0\"; } else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) return \"move%.w %1,%0\;clr%.w %0\"; else if (GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == POST_INC) return \"clr%.w %0\;move%.w %1,%0\"; else { output_asm_insn (\"clr%.w %0\", operands); operands[0] = adjust_address (operands[0], HImode, 2); return \"move%.w %1,%0\"; }}")(define_insn ""
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -