📄 sh.h
字号:
#define CPP_SPEC " %(subtarget_cpp_spec) "#ifndef SUBTARGET_CPP_SPEC#define SUBTARGET_CPP_SPEC ""#endif#ifndef SUBTARGET_EXTRA_SPECS#define SUBTARGET_EXTRA_SPECS#endif#define EXTRA_SPECS \ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \ { "link_emul_prefix", LINK_EMUL_PREFIX }, \ { "link_default_cpu_emul", LINK_DEFAULT_CPU_EMUL }, \ { "subtarget_link_emul_suffix", SUBTARGET_LINK_EMUL_SUFFIX }, \ { "subtarget_link_spec", SUBTARGET_LINK_SPEC }, \ { "subtarget_asm_endian_spec", SUBTARGET_ASM_ENDIAN_SPEC }, \ SUBTARGET_EXTRA_SPECS#define ASM_SPEC "%(subtarget_asm_endian_spec) %{mrelax:-relax}"#ifndef SUBTARGET_ASM_ENDIAN_SPEC#if TARGET_ENDIAN_DEFAULT == LITTLE_ENDIAN_BIT#define SUBTARGET_ASM_ENDIAN_SPEC "%{mb:-big} %{!mb:-little}"#else#define SUBTARGET_ASM_ENDIAN_SPEC "%{ml:-little} %{!ml:-big}"#endif#endif#define LINK_EMUL_PREFIX "sh%{ml:l}"#if TARGET_CPU_DEFAULT & SH5_BIT#if TARGET_CPU_DEFAULT & SH3E_BIT#define LINK_DEFAULT_CPU_EMUL "32"#else#define LINK_DEFAULT_CPU_EMUL "64"#endif /* SH3E_BIT */#else#define LINK_DEFAULT_CPU_EMUL ""#endif /* SH5_BIT */#define SUBTARGET_LINK_EMUL_SUFFIX ""#define SUBTARGET_LINK_SPEC ""/* svr4.h redefines LINK_SPEC inappropriately, so go via SH_LINK_SPEC, so that we can undo the damage without code replication. */#define LINK_SPEC SH_LINK_SPEC#define SH_LINK_SPEC "\-m %(link_emul_prefix)\%{m5-compact*|m5-32media*:32}\%{m5-64media*:64}\%{!m1:%{!m2:%{!m3*:%{!m4*:%{!m5*:%(link_default_cpu_emul)}}}}}\%(subtarget_link_emul_suffix) \%{mrelax:-relax} %(subtarget_link_spec)"#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \do { \ if (LEVEL) \ flag_omit_frame_pointer = -1; \ if (SIZE) \ target_flags |= SPACE_BIT; \} while (0)#define ASSEMBLER_DIALECT assembler_dialectextern int assembler_dialect;#define OVERRIDE_OPTIONS \do { \ int regno; \ \ sh_cpu = CPU_SH1; \ assembler_dialect = 0; \ if (TARGET_SH2) \ sh_cpu = CPU_SH2; \ if (TARGET_SH3) \ sh_cpu = CPU_SH3; \ if (TARGET_SH3E) \ sh_cpu = CPU_SH3E; \ if (TARGET_SH4) \ { \ assembler_dialect = 1; \ sh_cpu = CPU_SH4; \ } \ if (TARGET_SH5) \ { \ sh_cpu = CPU_SH5; \ target_flags |= DALIGN_BIT; \ if (TARGET_FPU_ANY \ && ! (TARGET_SHCOMPACT && TARGET_LITTLE_ENDIAN)) \ target_flags |= FMOVD_BIT; \ if (TARGET_SHMEDIA) \ { \ /* There are no delay slots on SHmedia. */ \ flag_delayed_branch = 0; \ /* Relaxation isn't yet supported for SHmedia */ \ target_flags &= ~RELAX_BIT; \ } \ if (profile_flag || profile_arc_flag) \ { \ warning ("Profiling is not supported on this target."); \ profile_flag = profile_arc_flag = 0; \ } \ } \ else \ { \ /* Only the sh64-elf assembler fully supports .quad properly. */\ targetm.asm_out.aligned_op.di = NULL; \ targetm.asm_out.unaligned_op.di = NULL; \ } \ if (TARGET_FMOVD) \ reg_class_from_letter['e'] = NO_REGS; \ \ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \ if (! VALID_REGISTER_P (regno)) \ sh_register_names[regno][0] = '\0'; \ \ for (regno = 0; regno < ADDREGNAMES_SIZE; regno++) \ if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno))) \ sh_additional_register_names[regno][0] = '\0'; \ \ if (flag_omit_frame_pointer < 0) \ { \ /* The debugging information is sufficient, \ but gdb doesn't implement this yet */ \ if (0) \ flag_omit_frame_pointer \ = (PREFERRED_DEBUGGING_TYPE == DWARF_DEBUG \ || PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG); \ else \ flag_omit_frame_pointer = 0; \ } \ \ if (flag_pic && ! TARGET_PREFERGOT) \ flag_no_function_cse = 1; \ \ /* Never run scheduling before reload, since that can \ break global alloc, and generates slower code anyway due \ to the pressure on R0. */ \ flag_schedule_insns = 0; \ \ if (align_loops == 0) \ align_loops = 1 << (TARGET_SH5 ? 3 : 2); \ if (align_jumps == 0) \ align_jumps = 1 << CACHE_LOG; \ else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2)) \ align_jumps = TARGET_SHMEDIA ? 4 : 2; \ \ /* Allocation boundary (in *bytes*) for the code of a function. \ SH1: 32 bit alignment is faster, because instructions are always \ fetched as a pair from a longword boundary. \ SH2 .. SH5 : align to cache line start. */ \ if (align_functions == 0) \ align_functions \ = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG); \ /* The linker relaxation code breaks when a function contains \ alignments that are larger than that at the start of a \ compilation unit. */ \ if (TARGET_RELAX) \ { \ int min_align \ = align_loops > align_jumps ? align_loops : align_jumps; \ \ /* Also take possible .long constants / mova tables int account. */\ if (min_align < 4) \ min_align = 4; \ if (align_functions < min_align) \ align_functions = min_align; \ } \} while (0)/* Target machine storage layout. *//* Define this if most significant bit is lowest numbered in instructions that operate on numbered bit-fields. */#define BITS_BIG_ENDIAN 0/* Define this if most significant byte of a word is the lowest numbered. */#define BYTES_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)/* Define this if most significant word of a multiword number is the lowest numbered. */#define WORDS_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)/* Define this to set the endianness to use in libgcc2.c, which can not depend on target_flags. */#if defined(__LITTLE_ENDIAN__)#define LIBGCC2_WORDS_BIG_ENDIAN 0#else#define LIBGCC2_WORDS_BIG_ENDIAN 1#endif#define MAX_BITS_PER_WORD 64#define MAX_LONG_TYPE_SIZE MAX_BITS_PER_WORD/* Width in bits of an `int'. We want just 32-bits, even if words are longer. */#define INT_TYPE_SIZE 32/* Width in bits of a `long'. */#define LONG_TYPE_SIZE (TARGET_SHMEDIA64 ? 64 : 32)/* Width in bits of a `long long'. */#define LONG_LONG_TYPE_SIZE 64/* Width in bits of a `long double'. */#define LONG_DOUBLE_TYPE_SIZE 64/* Width of a word, in units (bytes). */#define UNITS_PER_WORD (TARGET_SHMEDIA ? 8 : 4)#define MIN_UNITS_PER_WORD 4/* Width in bits of a pointer. See also the macro `Pmode' defined below. */#define POINTER_SIZE (TARGET_SHMEDIA64 ? 64 : 32)/* Allocation boundary (in *bits*) for storing arguments in argument list. */#define PARM_BOUNDARY (TARGET_SH5 ? 64 : 32)/* Boundary (in *bits*) on which stack pointer should be aligned. */#define STACK_BOUNDARY BIGGEST_ALIGNMENT/* The log (base 2) of the cache line size, in bytes. Processors prior to SH2 have no actual cache, but they fetch code in chunks of 4 bytes. The SH2/3 have 16 byte cache lines, and the SH4 has a 32 byte cache line */#define CACHE_LOG (TARGET_CACHE32 ? 5 : TARGET_SH2 ? 4 : 2)/* ABI given & required minimum allocation boundary (in *bits*) for the code of a function. */#define FUNCTION_BOUNDARY (16 << TARGET_SHMEDIA)/* On SH5, the lowest bit is used to indicate SHmedia functions, so the vbit must go into the delta field of pointers-to-member-functions. */#define TARGET_PTRMEMFUNC_VBIT_LOCATION \ (TARGET_SH5 ? ptrmemfunc_vbit_in_delta : ptrmemfunc_vbit_in_pfn)/* Alignment of field after `int : 0' in a structure. */#define EMPTY_FIELD_BOUNDARY 32/* No data type wants to be aligned rounder than this. */#define BIGGEST_ALIGNMENT (TARGET_ALIGN_DOUBLE ? 64 : 32)/* The best alignment to use in cases where we have a choice. */#define FASTEST_ALIGNMENT (TARGET_SH5 ? 64 : 32)/* Make strings word-aligned so strcpy from constants will be faster. */#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ ((TREE_CODE (EXP) == STRING_CST \ && (ALIGN) < FASTEST_ALIGNMENT) \ ? FASTEST_ALIGNMENT : (ALIGN))/* Make arrays of chars word-aligned for the same reasons. */#define DATA_ALIGNMENT(TYPE, ALIGN) \ (TREE_CODE (TYPE) == ARRAY_TYPE \ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))/* Number of bits which any structure or union's size must be a multiple of. Each structure or union's size is rounded up to a multiple of this. */#define STRUCTURE_SIZE_BOUNDARY (TARGET_PADSTRUCT ? 32 : 8)/* Set this nonzero if move instructions will actually fail to work when given unaligned data. */#define STRICT_ALIGNMENT 1/* If LABEL_AFTER_BARRIER demands an alignment, return its base 2 logarithm. */#define LABEL_ALIGN_AFTER_BARRIER(LABEL_AFTER_BARRIER) \ barrier_align (LABEL_AFTER_BARRIER)#define LOOP_ALIGN(A_LABEL) \ ((! optimize || TARGET_HARVARD || TARGET_SMALLCODE) \ ? 0 : sh_loop_align (A_LABEL))#define LABEL_ALIGN(A_LABEL) \( \ (PREV_INSN (A_LABEL) \ && GET_CODE (PREV_INSN (A_LABEL)) == INSN \ && GET_CODE (PATTERN (PREV_INSN (A_LABEL))) == UNSPEC_VOLATILE \ && XINT (PATTERN (PREV_INSN (A_LABEL)), 1) == UNSPECV_ALIGN) \ /* explicit alignment insn in constant tables. */ \ ? INTVAL (XVECEXP (PATTERN (PREV_INSN (A_LABEL)), 0, 0)) \ : 0)/* Jump tables must be 32 bit aligned, no matter the size of the element. */#define ADDR_VEC_ALIGN(ADDR_VEC) 2/* The base two logarithm of the known minimum alignment of an insn length. */#define INSN_LENGTH_ALIGNMENT(A_INSN) \ (GET_CODE (A_INSN) == INSN \ ? 1 << TARGET_SHMEDIA \ : GET_CODE (A_INSN) == JUMP_INSN || GET_CODE (A_INSN) == CALL_INSN \ ? 1 << TARGET_SHMEDIA \ : CACHE_LOG)/* Standard register usage. *//* Register allocation for the Hitachi calling convention: r0 arg return r1..r3 scratch r4..r7 args in r8..r13 call saved r14 frame pointer/call saved r15 stack pointer ap arg pointer (doesn't really exist, always eliminated) pr subroutine return address t t bit mach multiply/accumulate result, high part macl multiply/accumulate result, low part. fpul fp/int communication register rap return address pointer register fr0 fp arg return fr1..fr3 scratch floating point registers fr4..fr11 fp args in fr12..fr15 call saved floating point registers */#define MAX_REGISTER_NAME_LENGTH 5extern char sh_register_names[][MAX_REGISTER_NAME_LENGTH + 1];#define SH_REGISTER_NAMES_INITIALIZER \{ \ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \ "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", \ "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", \ "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", \ "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", \ "fr0", "fr1", "fr2", "fr3", "fr4", "fr5", "fr6", "fr7", \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -