📄 arm.c
字号:
cpu = arm6; } sel = &all_cores[cpu]; insn_flags = sel->flags; /* Now check to see if the user has specified some command line switch that require certain abilities from the cpu. */ sought = 0; if (TARGET_INTERWORK || TARGET_THUMB) { sought |= (FL_THUMB | FL_MODE32); /* There are no ARM processors that support both APCS-26 and interworking. Therefore we force FL_MODE26 to be removed from insn_flags here (if it was set), so that the search below will always be able to find a compatible processor. */ insn_flags &= ~FL_MODE26; } if (sought != 0 && ((sought & insn_flags) != sought)) { /* Try to locate a CPU type that supports all of the abilities of the default CPU, plus the extra abilities requested by the user. */ for (sel = all_cores; sel->name != NULL; sel++) if ((sel->flags & sought) == (sought | insn_flags)) break; if (sel->name == NULL) { unsigned current_bit_count = 0; const struct processors * best_fit = NULL; /* Ideally we would like to issue an error message here saying that it was not possible to find a CPU compatible with the default CPU, but which also supports the command line options specified by the programmer, and so they ought to use the -mcpu=<name> command line option to override the default CPU type. If we cannot find a cpu that has both the characteristics of the default cpu and the given command line options we scan the array again looking for a best match. */ for (sel = all_cores; sel->name != NULL; sel++) if ((sel->flags & sought) == sought) { unsigned count; count = bit_count (sel->flags & insn_flags); if (count >= current_bit_count) { best_fit = sel; current_bit_count = count; } } if (best_fit == NULL) abort (); else sel = best_fit; } insn_flags = sel->flags; } sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch); if (arm_tune == arm_none) arm_tune = (enum processor_type) (sel - all_cores); } /* The processor for which we should tune should now have been chosen. */ if (arm_tune == arm_none) abort (); tune_flags = all_cores[(int)arm_tune].flags; if (optimize_size) targetm.rtx_costs = arm_size_rtx_costs; else targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs; /* Make sure that the processor choice does not conflict with any of the other command line choices. */ if (TARGET_INTERWORK && !(insn_flags & FL_THUMB)) { warning ("target CPU does not support interworking" ); target_flags &= ~ARM_FLAG_INTERWORK; } if (TARGET_THUMB && !(insn_flags & FL_THUMB)) { warning ("target CPU does not support THUMB instructions"); target_flags &= ~ARM_FLAG_THUMB; } if (TARGET_APCS_FRAME && TARGET_THUMB) { /* warning ("ignoring -mapcs-frame because -mthumb was used"); */ target_flags &= ~ARM_FLAG_APCS_FRAME; } /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done from here where no function is being compiled currently. */ if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE)) && TARGET_ARM) warning ("enabling backtrace support is only meaningful when compiling for the Thumb"); if (TARGET_ARM && TARGET_CALLEE_INTERWORKING) warning ("enabling callee interworking support is only meaningful when compiling for the Thumb"); if (TARGET_ARM && TARGET_CALLER_INTERWORKING) warning ("enabling caller interworking support is only meaningful when compiling for the Thumb"); if (TARGET_APCS_STACK && !TARGET_APCS_FRAME) { warning ("-mapcs-stack-check incompatible with -mno-apcs-frame"); target_flags |= ARM_FLAG_APCS_FRAME; } if (TARGET_POKE_FUNCTION_NAME) target_flags |= ARM_FLAG_APCS_FRAME; if (TARGET_APCS_REENT && flag_pic) error ("-fpic and -mapcs-reent are incompatible"); if (TARGET_APCS_REENT) warning ("APCS reentrant code not supported. Ignored"); /* If this target is normally configured to use APCS frames, warn if they are turned off and debugging is turned on. */ if (TARGET_ARM && write_symbols != NO_DEBUG && !TARGET_APCS_FRAME && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME)) warning ("-g with -mno-apcs-frame may not give sensible debugging"); /* If stack checking is disabled, we can use r10 as the PIC register, which keeps r9 available. */ if (flag_pic) arm_pic_register = TARGET_APCS_STACK ? 9 : 10; if (TARGET_APCS_FLOAT) warning ("passing floating point arguments in fp regs not yet supported"); /* Initialize boolean versions of the flags, for use in the arm.md file. */ arm_arch3m = (insn_flags & FL_ARCH3M) != 0; arm_arch4 = (insn_flags & FL_ARCH4) != 0; arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0); arm_arch5 = (insn_flags & FL_ARCH5) != 0; arm_arch5e = (insn_flags & FL_ARCH5E) != 0; arm_arch6 = (insn_flags & FL_ARCH6) != 0; arm_arch_xscale = (insn_flags & FL_XSCALE) != 0; arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0; arm_ld_sched = (tune_flags & FL_LDSCHED) != 0; arm_is_strong = (tune_flags & FL_STRONG) != 0; thumb_code = (TARGET_ARM == 0); arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32)) && !(tune_flags & FL_ARCH4))) != 0; arm_tune_xscale = (tune_flags & FL_XSCALE) != 0; arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0; /* V5 code we generate is completely interworking capable, so we turn off TARGET_INTERWORK here to avoid many tests later on. */ /* XXX However, we must pass the right pre-processor defines to CPP or GLD can get confused. This is a hack. */ if (TARGET_INTERWORK) arm_cpp_interwork = 1; if (arm_arch5) target_flags &= ~ARM_FLAG_INTERWORK; if (target_abi_name) { for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++) { if (streq (arm_all_abis[i].name, target_abi_name)) { arm_abi = arm_all_abis[i].abi_type; break; } } if (i == ARRAY_SIZE (arm_all_abis)) error ("invalid ABI option: -mabi=%s", target_abi_name); } else arm_abi = ARM_DEFAULT_ABI; if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN) error ("iwmmxt requires an AAPCS compatible ABI for proper operation"); if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT) error ("iwmmxt abi requires an iwmmxt capable cpu"); arm_fp_model = ARM_FP_MODEL_UNKNOWN; if (target_fpu_name == NULL && target_fpe_name != NULL) { if (streq (target_fpe_name, "2")) target_fpu_name = "fpe2"; else if (streq (target_fpe_name, "3")) target_fpu_name = "fpe3"; else error ("invalid floating point emulation option: -mfpe=%s", target_fpe_name); } if (target_fpu_name != NULL) { /* The user specified a FPU. */ for (i = 0; i < ARRAY_SIZE (all_fpus); i++) { if (streq (all_fpus[i].name, target_fpu_name)) { arm_fpu_arch = all_fpus[i].fpu; arm_fpu_tune = arm_fpu_arch; arm_fp_model = fp_model_for_fpu[arm_fpu_arch]; break; } } if (arm_fp_model == ARM_FP_MODEL_UNKNOWN) error ("invalid floating point option: -mfpu=%s", target_fpu_name); } else {#ifdef FPUTYPE_DEFAULT /* Use the default if it is specified for this platform. */ arm_fpu_arch = FPUTYPE_DEFAULT; arm_fpu_tune = FPUTYPE_DEFAULT;#else /* Pick one based on CPU type. */ /* ??? Some targets assume FPA is the default. if ((insn_flags & FL_VFP) != 0) arm_fpu_arch = FPUTYPE_VFP; else */ if (arm_arch_cirrus) arm_fpu_arch = FPUTYPE_MAVERICK; else arm_fpu_arch = FPUTYPE_FPA_EMU2;#endif if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2) arm_fpu_tune = FPUTYPE_FPA; else arm_fpu_tune = arm_fpu_arch; arm_fp_model = fp_model_for_fpu[arm_fpu_arch]; if (arm_fp_model == ARM_FP_MODEL_UNKNOWN) abort (); } if (target_float_abi_name != NULL) { /* The user specified a FP ABI. */ for (i = 0; i < ARRAY_SIZE (all_float_abis); i++) { if (streq (all_float_abis[i].name, target_float_abi_name)) { arm_float_abi = all_float_abis[i].abi_type; break; } } if (i == ARRAY_SIZE (all_float_abis)) error ("invalid floating point abi: -mfloat-abi=%s", target_float_abi_name); } else if (target_float_switch) { /* This is a bit of a hack to avoid needing target flags for these. */ if (target_float_switch[0] == 'h') arm_float_abi = ARM_FLOAT_ABI_HARD; else arm_float_abi = ARM_FLOAT_ABI_SOFT; } else arm_float_abi = TARGET_DEFAULT_FLOAT_ABI; if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP) sorry ("-mfloat-abi=hard and VFP"); /* If soft-float is specified then don't use FPU. */ if (TARGET_SOFT_FLOAT) arm_fpu_arch = FPUTYPE_NONE; /* For arm2/3 there is no need to do any scheduling if there is only a floating point emulator, or we are doing software floating-point. */ if ((TARGET_SOFT_FLOAT || arm_fpu_tune == FPUTYPE_FPA_EMU2 || arm_fpu_tune == FPUTYPE_FPA_EMU3) && (tune_flags & FL_MODE32) == 0) flag_schedule_insns = flag_schedule_insns_after_reload = 0; /* Override the default structure alignment for AAPCS ABI. */ if (arm_abi == ARM_ABI_AAPCS) arm_structure_size_boundary = 8; if (structure_size_string != NULL) { int size = strtol (structure_size_string, NULL, 0); if (size == 8 || size == 32 || (ARM_DOUBLEWORD_ALIGN && size == 64)) arm_structure_size_boundary = size; else warning ("structure size boundary can only be set to %s", ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32"); } if (arm_pic_register_string != NULL) { int pic_register = decode_reg_name (arm_pic_register_string); if (!flag_pic) warning ("-mpic-register= is useless without -fpic"); /* Prevent the user from choosing an obviously stupid PIC register. */ else if (pic_register < 0 || call_used_regs[pic_register] || pic_register == HARD_FRAME_POINTER_REGNUM || pic_register == STACK_POINTER_REGNUM || pic_register >= PC_REGNUM) error ("unable to use '%s' for PIC register", arm_pic_register_string); else arm_pic_register = pic_register; } if (TARGET_THUMB && flag_schedule_insns) { /* Don't warn since it's on by default in -O2. */ flag_schedule_insns = 0; } if (optimize_size) { /* There's some dispute as to whether this should be 1 or 2. However, experiments seem to show that in pathological cases a setting of 1 degrades less severely than a setting of 2. This could change if other parts of the compiler change their behavior. */ arm_constant_limit = 1; /* If optimizing for size, bump the number of instructions that we are prepared to conditionally execute (even on a StrongARM). */ max_insns_skipped = 6; } else { /* For processors with load scheduling, it never costs more than 2 cycles to load a constant, and the load scheduler may well reduce that to 1. */ if (arm_ld_sched) arm_constant_limit = 1; /* On XScale the longer latency of a load makes it more difficult to achieve a good schedule, so it's faster to synthesize constants that can be done in two insns. */ if (arm_tune_xscale) arm_constant_limit = 2; /* StrongARM has early execution of branches, so a sequence that is worth skipping is shorter. */ if (arm_is_strong) max_insns_skipped = 3; } /* Register global variables with the garbage collector. */ arm_add_gc_roots ();}static voidarm_add_gc_roots (void){ gcc_obstack_init(&minipool_obstack); minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);}/* A table of known ARM exception types. For use with the interrupt function attribute. */typedef struct{ const char *const arg; const unsigned long return_value;}isr_attribute_arg;static const isr_attribute_arg isr_attribute_args [] ={ { "IRQ", ARM_FT_ISR }, { "irq", ARM_FT_ISR }, { "FIQ", ARM_FT_FIQ }, { "fiq", ARM_FT_FIQ }, { "ABORT", ARM_FT_ISR }, { "abort", ARM_FT_ISR }, { "ABORT", ARM_FT_ISR }, { "abort", ARM_FT_ISR }, { "UNDEF", ARM_FT_EXCEPTION }, { "undef", ARM_FT_EXCEPTION }, { "SWI", ARM_FT_EXCEPTION }, { "swi", ARM_FT_EXCEPTION }, { NULL, ARM_FT_NORMAL }};/* Returns the (interrupt) function type of the current function, or ARM_FT_UNKNOWN if the type cannot be determined. */static unsigned longarm_isr_value (tree argument){ const isr_attribute_arg * ptr; const char * arg; /* No argument - default to IRQ. */ if (argument == NULL_TREE) return ARM_FT_ISR; /* Get the value of the argument. */ if (TREE_VALUE (argument) == NULL_TREE || TREE_CODE (TREE_VALUE (argument)) != STRING_CST) return ARM_FT_UNKNOWN; arg = TREE_STRING_POINTER (TREE_VALUE (argument));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -