From 67a501b1c914392e1e26cc4a7ae9dc1b74e3dd7f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 19 Oct 2009 02:09:52 +0000 Subject: [PATCH] add a gcc variant based on gcc 4.3.3 with codesourcery enhancements SVN-Revision: 18059 --- toolchain/gcc/Config.in | 4 + toolchain/gcc/Config.version | 2 + toolchain/gcc/Makefile | 13 +- .../000-codesourcery_2009q1_203.patch | 57486 ++++++++++++++++ .../gcc/patches/4.3.3+cs/105-libtool.patch | 84 + .../4.3.3+cs/106-fix_linker_error.patch | 11 + .../4.3.3+cs/301-missing-execinfo_h.patch | 11 + .../patches/4.3.3+cs/302-c99-snprintf.patch | 13 + .../305-libmudflap-susv3-legacy.patch | 49 + .../patches/4.3.3+cs/410-fix_pr37436.patch | 71 + .../patches/4.3.3+cs/420-fix_pr26515.patch | 13 + .../4.3.3+cs/810-arm-softfloat-libgcc.patch | 25 + .../gcc/patches/4.3.3+cs/910-mbsd_multi.patch | 270 + .../993-arm_insn-opinit-RTX_CODE-fixup.patch | 32 + .../998-gcc-4.3.0-fix-header.00.patch | 13 + .../gcc/patches/4.3.3+cs/999-coldfire.patch | 10 + 16 files changed, 58101 insertions(+), 6 deletions(-) create mode 100644 toolchain/gcc/patches/4.3.3+cs/000-codesourcery_2009q1_203.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/105-libtool.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/106-fix_linker_error.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/301-missing-execinfo_h.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/302-c99-snprintf.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/305-libmudflap-susv3-legacy.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/410-fix_pr37436.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/420-fix_pr26515.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/810-arm-softfloat-libgcc.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/910-mbsd_multi.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/993-arm_insn-opinit-RTX_CODE-fixup.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/998-gcc-4.3.0-fix-header.00.patch create mode 100644 toolchain/gcc/patches/4.3.3+cs/999-coldfire.patch diff --git a/toolchain/gcc/Config.in b/toolchain/gcc/Config.in index a543b2825e..2c0f93e79a 100644 --- a/toolchain/gcc/Config.in +++ b/toolchain/gcc/Config.in @@ -39,6 +39,10 @@ if !LINUX_2_4 config GCC_VERSION_LLVM bool "llvm-gcc 4.2" + + config GCC_VERSION_4_3_3_CS + bool "gcc 4.3.3 with CodeSourcery enhancements" + endif endchoice diff --git a/toolchain/gcc/Config.version b/toolchain/gcc/Config.version index 291d345af9..12987f4020 100644 --- a/toolchain/gcc/Config.version +++ b/toolchain/gcc/Config.version @@ -4,6 +4,7 @@ config GCC_VERSION default "4.1.2" if GCC_VERSION_4_1_2 default "4.2.4" if GCC_VERSION_4_2_4 default "4.3.3" if GCC_VERSION_4_3_3 + default "4.3.3+cs" if GCC_VERSION_4_3_3_CS default "4.3.4" if GCC_VERSION_4_3_4 default "4.4.0" if GCC_VERSION_4_4_0 default "4.4.1" if GCC_VERSION_4_4_1 @@ -37,6 +38,7 @@ config GCC_VERSION_4_2 config GCC_VERSION_4_3 bool default y if GCC_VERSION_4_3_3 + default y if GCC_VERSION_4_3_3_CS default y if GCC_VERSION_4_3_4 config GCC_VERSION_4_4 diff --git a/toolchain/gcc/Makefile b/toolchain/gcc/Makefile index 40804d5e0f..fdcee3f217 100644 --- a/toolchain/gcc/Makefile +++ b/toolchain/gcc/Makefile @@ -21,15 +21,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gcc -PKG_VERSION:=$(call qstrip,$(CONFIG_GCC_VERSION)) -GCC_VERSION:=$(PKG_VERSION) +GCC_VERSION:=$(call qstrip,$(CONFIG_GCC_VERSION)) +PKG_VERSION:=$(firstword $(subst +, ,$(GCC_VERSION))) GCC_DIR:=$(PKG_NAME)-$(PKG_VERSION) ifdef CONFIG_GCC_VERSION_LLVM PKG_SOURCE_VERSION:=c98c494b72ff875884c0c7286be67f16f9f6d7ab PKG_REV:=83504 GCC_DIR:=llvm-gcc-4.2-r$(PKG_REV) - GCC_VERSION:=4.2.1 + PKG_VERSION:=4.2.1 PKG_SOURCE:=$(GCC_DIR).tar.gz PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=git://repo.or.cz/llvm-gcc-4.2.git @@ -56,7 +56,7 @@ else endif endif -PATCH_DIR=./patches/$(PKG_VERSION) +PATCH_DIR=./patches/$(GCC_VERSION) include $(INCLUDE_DIR)/toolchain-build.mk @@ -85,7 +85,8 @@ GCC_CONFIGURE:= \ $(SOFT_FLOAT_CONFIG_OPTION) \ $(call qstrip,$(CONFIG_EXTRA_GCC_CONFIG_OPTIONS)) \ $(if $(CONFIG_mips64)$(CONFIG_mips64el),--with-arch=mips64 --with-abi=64) \ - $(if $(CONFIG_GCC_VERSION_LLVM),--enable-llvm=$(BUILD_DIR_BASE)/host/llvm) + $(if $(CONFIG_GCC_VERSION_LLVM),--enable-llvm=$(BUILD_DIR_BASE)/host/llvm) \ + $(if $(CONFIG_GCC_VERSION_4_3_3_CS),--enable-poison-system-directories) ifneq ($(CONFIG_GCC_VERSION_4_3)$(CONFIG_GCC_VERSION_4_4),) GCC_BUILD_TARGET_LIBGCC:=y @@ -191,7 +192,7 @@ define Stage1/Install $(if $(GCC_BUILD_TARGET_LIBGCC),install-target-libgcc) # XXX: glibc insists on linking against libgcc_eh - ( cd $(TOOLCHAIN_DIR)/usr/lib/gcc/$(REAL_GNU_TARGET_NAME)/$(GCC_VERSION) ; \ + ( cd $(TOOLCHAIN_DIR)/usr/lib/gcc/$(REAL_GNU_TARGET_NAME)/$(PKG_VERSION) ; \ [ -e libgcc_eh.a ] || ln -sf libgcc.a libgcc_eh.a ; \ cp libgcc.a libgcc_initial.a; \ ) diff --git a/toolchain/gcc/patches/4.3.3+cs/000-codesourcery_2009q1_203.patch b/toolchain/gcc/patches/4.3.3+cs/000-codesourcery_2009q1_203.patch new file mode 100644 index 0000000000..4e93370341 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/000-codesourcery_2009q1_203.patch @@ -0,0 +1,57486 @@ +--- /dev/null ++++ b/ChangeLog.csl +@@ -0,0 +1,7077 @@ ++2009-05-21 Paul Brook ++ ++ Issue #5545 ++ gcc/ ++ * config/arm/arm.md (ifcompare_neg_move): Disable when ++ TARGET_NO_SINGLE_COND_EXEC. ++ ++2009-05-20 Joseph Myers ++ ++ Issue #5399 ++ ++ gcc/ ++ * config/mips/mips.md (sqrt2): Condition on ++ . ++ ++2009-05-20 Maciej W. Rozycki ++ ++ Issue #5448 ++ gcc/ ++ * config/mips/predicates.md (const_call_insn_operand): Correct the ++ condition used for -call_nonpic support. ++ ++ * release-notes-csl.xml (Compiler performance bug fix): New. ++ ++2009-05-12 Maxim Kuvyrkov ++ ++ * ChangeLog.csl: Add changelog for the previous commit. ++ gcc/ ++ * configure: Regenerate with proper autoconf version. ++ ++2009-05-12 Maxim Kuvyrkov ++ ++ gcc/ ++ * common.opt (feglibc): New dummy option. ++ * opts.c (common_handle_option): Handle it. ++ * config.gcc: Handle 'eglibc' vendor. ++ * config/t-eglibc: Define multilibs for testing EGLIBC features. ++ * configure.ac (--with-eglibc-configs, EGLICB_CONFIGS): New option and ++ variable. ++ * configure: Regenerate. ++ * Makefile.in (EGLIBC_CONFIGS): Handle ++ ++2009-05-08 Nathan Sidwell ++ ++ Issue 5335 ++ gcc/ ++ * class.c (resolve_address_of_overloaded_function): Use ++ OVL_CURRENT for error. ++ (instantiate_type): Allow FUNCTION_DECL when ms_extensions are ++ active. Don't copy the rhs node. Delete COMPOUND_EXPR code. ++ * typeck.c (build_compound_expr): Check RHS has known type. ++ ++ gcc/testsuite/ ++ * g++.dg/ext/ms-1.C: New. ++ * g++.old-deja/g++.other/overload11.C: Adjust. ++ ++ * release-notes-csl.xml: Add two notes. ++ ++2009-04-22 Maxim Kuvyrkov ++ ++ gcc/testsuite/ ++ * gcc.dg/tls/alias-1.c: Fix check for TLS. ++ ++2009-04-22 Maxim Kuvyrkov ++ ++ Issue #5106 ++ Issue #4768 ++ ++ gcc/testsuite/ ++ * gcc.dg/falign-labels-1.c (dg-options): Don't set for m68k and fido. ++ ++2009-04-21 Andrew Jenner ++ ++ gcc/testsuite/ ++ * gcc.dg/pr34856.c: Handle powerpc*-*-elf. ++ ++2009-04-21 Andrew Jenner ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp: Handle powerpc-*-elf. ++ ++2009-04-21 Maxim Kuvyrkov ++ ++ gcc/testsuite/ ++ * gcc.target/m68k/tls-ld.c, gcc.target/m68k/tls-le.c, ++ * gcc.target/m68k/tls-ld-xgot-xtls.c, gcc.target/m68k/tls-gd-xgot.c, ++ * gcc.target/m68k/tls-ie-xgot.c, gcc.target/m68k/tls-ld-xgot.c, ++ * gcc.target/m68k/tls-ld-xtls.c, gcc.target/m68k/tls-le-xtls.c, ++ * gcc.target/m68k/tls-gd.c, gcc.target/m68k/tls-ie.c: Remove -mcpu=5475 ++ setting, run only for *-linux-* target. ++ ++2009-04-15 Daniel Jacobowitz ++ ++ Revert (moved to scripts): ++ ++ 2009-04-10 Maxim Kuvyrkov ++ ++ Issue #693 ++ ++ gcc/ ++ * config/arm/linux-eabi.h (TARGET_UNWIND_TABLES_DEFAULT): Define ++ to true. ++ ++2009-04-13 Kazu Hirata ++ ++ gcc/testsuite/ ++ * gcc.dg/promote-short-3.c: XFAIL on fido. ++ ++2009-04-10 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * gcc.dg/promote-short-3.c: Correct XFAIL syntax. ++ ++2009-04-10 Maxim Kuvyrkov ++ ++ Issue #693 ++ ++ gcc/ ++ * config/arm/linux-eabi.h (TARGET_UNWIND_TABLES_DEFAULT): Define ++ to true. ++ ++2009-04-09 Sandra Loosemore ++ ++ Issue #5174 ++ Backport from mainline: ++ ++ gcc/ ++ * doc/invoke.texi (Optimize Options): Add cross-reference to ++ -Q --help=optimizers examples. ++ ++2009-04-09 Nathan Froyd ++ ++ gcc/testsuite/ ++ * gcc.dg/promote-short-3.c: XFAIL test for x86, m68k, sh, and mips. ++ ++2009-04-09 Nathan Froyd ++ ++ Issue #5186 ++ ++ gcc/ ++ * tree-ssa-loop-promote.c (rebuild_with_promotion_1): Load a memory ++ reference prior to promoting it. ++ ++ gcc/testsuite/ ++ * gcc.dg/promote-short-9.c: New test. ++ ++2009-04-08 Nathan Froyd ++ ++ Issue #5171 ++ ++ gcc/ ++ * tree-ssa-loop-promote.c (collection_promotion_candidates): ++ Delay allocation and initialization of new promote_info until we ++ know we have a candidate loop index. ++ ++2009-04-06 Daniel Jacobowitz ++ ++ Backport from upstream: ++ ++ gcc/ ++ 2008-04-24 Uros Bizjak ++ ++ PR rtl-optimization/36006 ++ * expmed.c (store_fixed_bit_field): Copy op0 rtx before moving ++ temp to op0 in order to avoid invalid rtx sharing. ++ ++ gcc/testsuite/ ++ 2008-04-24 Francois-Xavier Coudert ++ ++ PR rtl-optimization/36006 ++ * gfortran.dg/pr36006-1.f90: New test. ++ * gfortran.dg/pr36006-2.f90: Ditto. ++ ++2009-04-06 Paul Brook ++ ++ Issue #5117 ++ Partial backport from FSF. ++ ++ gcc/ ++ * tree-ssa-pre.c (create_expression_by_pieces): Convert to sizetype ++ for POINTER_PLUS_EXPR. ++ ++2009-04-04 Daniel Jacobowitz ++ ++ gcc/ ++ * gcc.c (do_self_spec): Handle switches with arguments. ++ ++2009-04-04 Daniel Jacobowitz ++ ++ gcc/ ++ * testsuite/gcc.dg/pr34263.c: Add -fno-unroll-loops. ++ ++2009-04-04 Daniel Jacobowitz ++ ++ gcc/ ++ * config/arm/arm.md (insv): Do not share operands[0]. ++ ++2009-04-04 Sandra Loosemore ++ ++ Issue #5104 ++ PR tree-optimization/39604 ++ ++ * release-notes-csl.xml (Corruption of block-scope variables): ++ New note. ++ ++ gcc/testsuite ++ * g++.dg/tree-ssa/sink-1.C: New. ++ ++ gcc/ ++ * tree_ssa-sink.c (sink_code_in_bb): Do not sink statements out ++ of a lexical block containing variable definitions. ++ ++2009-03-31 Andrew Jenner ++ ++ gcc/testsuite/ ++ * gcc.dg/arm-g2.c: Add dg-skip-if for MontaVista. ++ * gcc.dg/arm-scd42-2.c: Ditto. ++ ++2009-03-31 Daniel Jacobowitz ++ ++ gcc/ ++ * common.opt (fpromote-loop-indices): Add Optimization keyword. ++ ++2009-03-31 Kazu Hirata ++ ++ Issue #5105 ++ gcc/testsuite/ ++ * gcc.target/m68k/pr36134.c: Use dg-skip-if to skip the testcase ++ if there is a conflict with -mcpu=. Use -mcpu=5208. ++ ++2009-03-30 Andrew Jenner ++ ++ gcc/ ++ * config.gcc: Accept montavista*-, not just montavista-. ++ * config/mips/t-montavista-linux: Add Octeon multilibs. ++ ++2009-03-25 Andrew Stubbs ++ ++ gcc/testsuite/ ++ * gcc.dg/pragma-isr-trapa2.c: Skip test for FPU-less architectures. ++ ++2009-03-24 Andrew Stubbs ++ ++ Backport from upstream: ++ gcc/testsuite/ ++ 2008-02-25 Kaz Kojima ++ * gcc.dg/tree-ssa/ssa-pre-10.c: Use -fno-finite-math-only on ++ sh* targets. ++ ++2009-03-22 Mark Mitchell ++ ++ Backport: ++ ++ libstdc++-v3/ ++ * testsuite/25_algorithms/search_n/iterator.cc: Condition ++ iterations for simulators. ++ * testsuite/25_algorithms/heap/moveable.cc: Likewise. ++ * testsuite/21_strings/basic_string/inserters_extractors/char/28277.cc ++ Condition stream width for simulators. ++ * testsuite/27_io/basic_ostream/inserters_character/char/28277-3.cc: ++ Likewise. ++ * testsuite/27_io/basic_ostream/inserters_character/char/28277-4.cc: ++ Likewise. ++ * testsuite/ext/vstring/inserters_extractors/char/28277.cc: Likewise. ++ ++2009-03-20 Mark Mitchell ++ ++ Issue #4403 ++ ++ * release-notes-csl.xsml: Document compile-time performance ++ improvement. ++ ++2009-03-19 Joseph Myers ++ ++ Issue #2062 ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/t-cs-linux: Add MULTILIB_MATCHES for ARMv4T -mcpu ++ options and for -mfpu=neon-fp16. Add armv7-a-hard multilib. ++ ++2009-03-19 Daniel Gutson ++ ++ Issue #4459 ++ ++ gcc/ ++ * config/arm/t-cs-linux: Replaced armv7 by armv7-a in MULTILIB_OPTIONS ++ and added mfpu=neon, plus the required MULTILIB_ALIASES. ++ ++ * release-notes.xml: Document. ++ ++2009-03-19 Andrew Stubbs ++ ++ gcc/ ++ * config.gcc (sh-*-*): Add support for --enable-extra-sgxx-multilibs. ++ ++2009-03-18 Daniel Gutson ++ ++ Issue #4753 ++ ++ gcc/ ++ * doc/invoke.texi: Added entries for cpus ARM Cortex-M0 and Cortex-M1. ++ ++2009-03-18 Sandra Loosemore ++ ++ Issue #4882 ++ ++ * release-notes-csl.xml (Better code for accessing global variables): ++ Copy-edit. Reference updated GCC manual discussion. ++ ++ Applied simultaneously to mainline: ++ gcc/ ++ * doc/invoke.texi (Code Gen Options): Expand discussion of ++ -fno-common. ++ ++2009-03-18 Kazu Hirata ++ ++ gcc/ ++ * config/sparc/sparc.c (sparc_emit_float_lib_cmp): Pass a libcall ++ SYMBOL_REF to hard_libcall_value. ++ ++2009-03-17 Sandra Loosemore ++ ++ Issue #4755 ++ ++ gcc/ ++ * config/arm/arm.c (arm_emit_fp16_const): New function. ++ * config/arm/arm-protos.h (arm_emit_fp16_const): Declare it. ++ * config/arm/arm.md (consttable_2): Replace logic for HFmode values ++ with assertion that they can't appear here. ++ (consttable_4): Add HFmode case and use the new function for it. ++ ++2009-03-17 Sandra Loosemore ++ ++ Issue #4755 ++ ++ Revert: ++ ++ 2009-01-23 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/arm.c (dump_minipool): Use size of mode, not padded size, ++ in switch that controls whether to emit padding. ++ ++ 2009-02-05 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/arm.c (struct minipool_fixup): Split mode field into ++ value_mode and ref_mode. ++ (add_minipool_forward_ref): Use value_mode of fixup. ++ (add_minipool_backward_ref): Likewise. ++ (push_minipool_fix): Pass both value_mode and ref_mode as parameters, ++ and store them in the fixup. ++ (note_invalid_constants): Adjust arguments to push_minipool_fix. ++ (arm_reorg): Use ref_mode of fixup. ++ ++2009-03-17 Daniel Gutson ++ ++ Issue #4753 ++ ++ gcc/ ++ * config/arm/t-cs-eabi (MULTILIB_MATCHES): Added cortex-m0 as a synonym of march=armv6-m. ++ * config/arm/arm-cores.def: Added core cortex-m0. ++ * config/arm/arm-tune.md ("tune"): Aded cortexm0. ++ * config/arm/t-arm-elf (MULTILIB_MATCHES): Added cortex-m0 as a synonym of march=armv6-m. ++ * config/arm/t-uclinux-eabi (MULTILIB_MATCHES): Added cortex-m0 as a synonym of march=armv6-m. ++ ++ * release-notes.csl: Document. ++ ++2009-03-16 Daniel Jacobowitz ++ ++ gcc/ ++ * config/arm/neon-testgen.ml: Use dg-add-options arm_neon. ++ ++ gcc/testsuite/ ++ * gcc/target/arm/neon/: Regenerated test cases. ++ ++ * gcc.target/arm/neon-dse-1.c, gcc.target/arm/neon/polytypes.c, ++ gcc.target/arm/neon-vmla-1.c, gcc.target/arm/neon-vmls-1.c, ++ gcc.target/arm/neon-cond-1.c, gcc.dg/torture/arm-fp16-ops-8.c, ++ gcc.dg/torture/arm-fp16-ops-7.c, g++.dg/ext/arm-fp16/arm-fp16-ops-7.C, ++ g++.dg/ext/arm-fp16/arm-fp16-ops-8.C, g++.dg/abi/mangle-neon.C: Use ++ dg-add-options arm_neon. ++ ++ * gcc.target/arm/fp16-compile-vcvt.c, gcc.dg/torture/arm-fp16-ops-5.c, ++ gcc.dg/torture/arm-fp16-ops-6.c, g++.dg/ext/arm-fp16/arm-fp16-ops-5.C, ++ g++.dg/ext/arm-fp16/arm-fp16-ops-6.C: Use dg-add-options arm_neon_fp16 ++ and arm_neon_fp16_ok. ++ ++ * gcc.dg/vect/vect.exp, g++.dg/vect/vect.exp, ++ gfortran.dg/vect/vect.exp: Use add_options_for_arm_neon. ++ ++ * lib/target-supports.exp (add_options_for_arm_neon): New. ++ (check_effective_target_arm_neon_ok_nocache): New, from ++ check_effective_target_arm_neon_ok. Check multiple possibilities. ++ (check_effective_target_arm_neon_ok): Use ++ check_effective_target_arm_neon_ok_nocache. ++ (add_options_for_arm_neon_fp16) ++ (check_effective_target_arm_neon_fp16_ok) ++ check_effective_target_arm_neon_fp16_ok_nocache): New. ++ (check_effective_target_arm_neon_hw): Use add_options_for_arm_neon. ++ ++2009-03-16 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_arm_neon_ok): ++ Correct arm_neon.h typo. ++ ++2009-03-16 Sandra Loosemore ++ ++ Issue #4878 ++ ++ * release-notes-csl.xml (VFP ABI support): New note. ++ ++2008-03-15 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ 2008-02-11 David Ung ++ ++ * config/mips/mips.c (mips_output_division): When ++ GENERATE_DIVIDE_TRAPS, generate the trap instrutions ++ against zero before the actual divide. This is friendlier ++ to out-of-order cpus like the 74k. ++ ++2009-03-13 Joseph Myers ++ ++ Issue #2062 ++ ++ gcc/ ++ * config/arm/t-linux-eabi: Add MULTILIB_MATCHES for ARMv4T -mcpu ++ options. ++ ++2009-03-13 Mark Mitchell ++ ++ Issue #3999 ++ ++ * release-notes-csl.xml: Document. ++ gcc/ ++ * config/arm/neon.md (*mul3add_neon): New pattern. ++ (*mul3negadd_neon): Likewise. ++ gcc/testsuite ++ * gcc.dg/target/arm/neon-vmla-1.c: New. ++ * gcc.dg/target/arm/neon-vmls-1.c: Likewise. ++ ++2009-03-13 Catherine Moore ++ ++ gcc/ ++ * config/i386/x-mingw32 (host-mingw32.o): Replace ++ diagnostic.h with $(DIAGNOSTIC_H). ++ ++2009-03-12 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/t-cs-eabi: Add MULTILIB_MATCHES for -mhard-float. ++ ++2009-03-12 Joseph Myers ++ ++ Issue #4730 ++ Issue #4850 ++ ++ gcc/ ++ * config/arm/t-cs-eabi: Add VFP ABI multilib. Add ++ MULTILIB_MATCHES for -march=armv5t and -mfpu=neon-fp16. ++ ++2009-03-12 Daniel Gutson ++ ++ Issue #4459 ++ ++ gcc/ ++ * config/arm/t-cs-eabi: Replaced Thumb2 VFP multilibs by ARM VFP3 NEON. ++ * release-notes-csl.xml: Document. ++ ++2009-03-11 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2009-03-10 Richard Guenther ++ Nathan Froyd ++ ++ PR middle-end/37850 ++ * libgcc2.c (__mulMODE3): Use explicit assignments to form the ++ result. ++ (__divMODE3): Likewise. ++ ++2009-03-11 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/testsuite/ ++ 2009-03-11 Nathan Froyd ++ ++ * gcc.dg/vect/vect-82.c: Combine dg-do and ++ dg-require-effective-target into dg-skip-if. ++ * gcc.dg/vect/vect-83.c: Likewise. ++ ++2009-03-10 Nathan Froyd ++ ++ Issue #4569 ++ ++ * release-notes-csl.xml (Loop optimization improvements): New note. ++ ++2009-03-09 Nathan Froyd ++ ++ Issue #4569 ++ ++ gcc/ ++ * tree-ssa-loop-promote.c: New file. ++ * common.opt (fpromote-loop-indices): New option. ++ * timevar.def (TV_TREE_LOOP_PROMOTE): New timevar. ++ * Makefile.in (tree-ssa-loop-promote.o): New rule. ++ (OBJS-comon): Include it. ++ * tree-pass.h (pass_promote_short_indices): Declare. ++ * passes.c (init_optimization_passes): Add it. ++ * pointer-set.h (pointer_set_n_elements, pointer_set_clear, ++ pointer_map_n_elements, pointer_map_clear): Declare. ++ * pointer-set.c (pointer_set_n_elements, pointer_set_clear, ++ pointer_map_n_elements, pointer_map_clear): Define. ++ ++ gcc/doc/ ++ * invoke.texi (-fpromote-loop-indices): New entry. ++ ++ gcc/testsuite/ ++ * gcc.dg/promote-short-1.c: New file. ++ * gcc.dg/promote-short-2.c: New file. ++ * gcc.dg/promote-short-3.c: New file. ++ * gcc.dg/promote-short-4.c: New file. ++ * gcc.dg/promote-short-5.c: New file. ++ * gcc.dg/promote-short-6.c: New file. ++ * gcc.dg/promote-short-7.c: New file. ++ * gcc.dg/promote-short-8.c: New file. ++ ++2009-03-07 Mark Mitchell ++ ++ * release-notes-csl.xml: Mention use of -fno-common by default on ++ bare--metal targets. ++ ++2009-03-07 Joseph Myers ++ ++ Issue #4730 ++ ++ Merge from ARM/hard_vfp_4_4_branch: ++ ++ gcc/testsuite/ ++ 2009-03-06 Richard Earnshaw ++ * lib/target-supports.exp (check_effective_target_hard_vfp_ok): Make ++ this a linkage test. ++ * gcc.target/arm/aapcs/aapcs.exp: New framework for testing AAPCS ++ argument marshalling. ++ * abitest.h: New file. ++ * vfp1.c, vfp2.c, vfp3.c, vfp4.c, vfp5.c, vfp6.c, vfp7.c: New tests. ++ * vfp8.c, vfp9.c, vfp10.c, vfp11.c, vfp12.c, vfp13.c, vfp14.c: New. ++ ++2009-03-06 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * doc/invoke.texi (-mfloat-abi=@var{name}): Remove statement about ++ -mfloat-abi=hard not being supported for VFP. ++ ++2009-03-06 Mark Mitchell ++ ++ gcc/ ++ * configure.ac (--with-specs): New option. ++ * configure: Regenerated. ++ * gcc.c (driver_self_specs): Include CONFIGURE_SPECS. ++ * Makefile.in (DRIVER_DEFINES): Add -DCONFIGURE_SPECS. ++ ++2009-03-05 Mark Mitchell ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2009-01-07 Janis Johnson ++ * g++.dg/torture/pr38586.C: Ignore a possible warning. ++ ++2009-03-05 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/arm.c (arm_handle_pcs_attribute): New. ++ (arm_get_pcs_model): Pass attribute arguments to ++ arm_pcs_from_attribute. ++ (arm_init_cumulative_args): Use base AAPCS for conversions from ++ floating-point types to DImode. ++ (arm_attribute_table): Add pcs attribute. ++ (arm_handle_pcs_attribute): New. ++ * config/arm/bpabi.h (DECLARE_LIBRARY_RENAMES): When renaming ++ conversions from floating-point types to DImode, also declare them ++ to use base AAPCS and declare functions they call to use base ++ AAPCS and their RTABI names. ++ ++ gcc/testsuite/ ++ * gcc.target/arm/eabi1.c: Do not skip for non-base ABI variants. ++ (PCS): Define macro to use base AAPCS. ++ (decl_float, __aeabi_d2f, __aeabi_f2d): Use PCS macro. ++ ++2009-03-05 Mark Mitchell ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-11-24 DJ Delorie ++ * gcc.c-torture/execute/pr36321.c: Don't rely on argv[0] being set. ++ ++2009-03-05 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/arm.c (aapcs_vfp_sub_candidate): Use V2SImode and ++ V4SImode as representatives of all 64-bit and 128-bit vector ++ types. Allow vector types without vector modes. ++ (aapcs_vfp_is_call_or_return_candidate): Handle vector types ++ without vector modes like BLKmode. ++ (aapcs_vfp_allocate): Handle TImode for non-TARGET_NEON like ++ BLKmode. Avoid unsupported vector modes or TImode moves for ++ non-TARGET_NEON. ++ (aapcs_vfp_allocate_return_reg): Likewise. ++ (arm_vector_mode_supported_p): Only support V2SImode, V4HImode and ++ V8QImode if TARGET_NEON || TARGET_IWMMXT. ++ ++2009-03-04 Daniel Gutson ++ ++ Issue #4462 ++ ++ gcc/ ++ * config/arm/t-cs-linux: Removed marvell-f multilibs. ++ ++ * release-notes-csl.xml: Document. ++ ++2009-03-04 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/arm.c (arm_return_in_memory): Handle returning ++ vectors of suitable size in registers also for AAPCS case. ++ ++2009-03-04 Joseph Myers ++ ++ Issue #3681 ++ ++ Backport: ++ ++ gcc/ ++ 2009-03-03 Joseph Myers ++ * emit-rtl.c (adjust_address_1): Reduce offset to a signed value ++ that fits within Pmode. ++ ++ gcc/testsuite/ ++ 2009-03-03 Joseph Myers ++ * gcc.c-torture/compile/20090303-1.c, ++ gcc.c-torture/compile/20090303-2.c: New tests. ++ ++2009-03-03 Andrew Stubbs ++ ++ gcc/ ++ * config/sh/t-sgxxlite-linux (MULTILIB_EXCEPTIONS): Allow big endian ++ SH4A multilib. ++ ++2009-03-01 Mark Mitchell ++ ++ Issue #4768 ++ ++ * release-notes-csl.xml: Document. ++ gcc/ ++ * final.c (shorten_branches): Do not align labels for jump tables. ++ (final_scan_insn): Use JUMP_TABLE_DATA_P. ++ ++2009-03-02 Daniel Gutson ++ ++ Issue #4462 ++ ++ gcc/ ++ * config/arm/t-cs-eabi: Replaced marvell-f with armv5t multilibs. ++ ++ * release-notes-csl.xml: Document. ++ ++2009-03-02 Nathan Froyd ++ ++ Issue #4344 ++ ++ gcc/ ++ * tree.h (struct tree_type): Enlarge precision field. Rearrange ++ fields to position things within bytes. Move packed_flag to... ++ (struct tree_base): ...here. Decrease spare field accordingly. ++ (TYPE_PACKED): Adjust to reflect new packed_flag location. ++ * config/arm/arm-modes.def (XI): Define it as a real INT_MODE. ++ ++ gcc/testsuite/ ++ * gcc.target/arm/neon-dse-2.c: New test. ++ ++2009-02-27 Daniel Gutson ++ ++ Issue #4459 ++ ++ gcc/ ++ * config/arm/linux-eabi.h (LINK_SPEC): BE8_LINK_SPEC added. ++ * config/arm/arm-cores.def: Comment added. ++ * config/arm/bpapi.h (BE8_LINK_SPEC): New define. ++ (LINK_SPEC): BE_LINK_SPEC added. ++ ++ * release-notes-csl.xml: Add note. ++ ++2009-02-27 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/arm.c (aapcs_layout_arg): Handle coprocessor argument ++ candidates after a previous argument failed to be allocated to ++ coprocessor registers the same way as the first failing argument. ++ ++2009-02-26 Joseph Myers ++ ++ Issue #4730 ++ ++ gcc/ ++ * config/arm/arm.c (arm_libcall_value, arm_init_cumulative_args): ++ Use base ABI for conversion libfuncs between HFmode and SFmode. ++ ++2009-02-26 Nathan Froyd ++ ++ Issue #4344 ++ ++ gcc/testsuite/ ++ * gcc.target/arm/neon-dse-1.c: New test. ++ ++2009-02-25 Nathan Froyd ++ ++ Issue #4344 ++ ++ * release-notes-csl.xml (Internal compiler error with large ++ NEON types): New note. ++ ++ gcc/ ++ * dse.c (struct store_info): Change positions_needed to an ++ unsigned HOST_WIDEST_INT. ++ ++ Backport portions of: ++ 2008-12-23 Jakub Jelinek ++ ++ gcc/ ++ * dse.c (struct store_info): Change begin and end fields to ++ HOST_WIDE_INT. ++ (set_position_unneeded, set_all_positions_unneeded, ++ any_positions_needed_p, all_positions_needed_p): New static inline ++ functions. ++ (set_usage_bits): Don't look at stores where ++ offset + width >= MAX_OFFSET. ++ (check_mem_read_rtx): Use all_positions_needed_p function. ++ ++ Backport from mainline: ++ gcc/ ++ 2008-04-11 H.J. Lu ++ ++ * dse.c (lowpart_bitmask): New. ++ ++2009-02-26 Joseph Myers ++ ++ Issue #4730 ++ ++ Merge from ARM/hard_vfp_4_4_branch: ++ ++ gcc/ ++ 2009-01-13 Richard Earnshaw ++ ++ * doc/tm.texi (TARGET_LIBCALL_VALUE): Add missing end statement. ++ ++ 2008-12-09 Richard Earnshaw ++ ++ ARM Hard-VFP calling convention ++ * target-def.h (TARGET_LIBCALL_VALUE): New hook. ++ * target.h (gcc_target): Add libcall_value to table of call hooks. ++ * targhooks.h (default_libcall_value): Default implementation. ++ * targhooks.c (default_libcall_value): Likewise. ++ * doc/tm.texi (TARGET_LIBCALL_VALUE): Document it. ++ * optabs.c (expand_unop): Use it. ++ * expr.h (hard_libcall_value): Pass the function RTX through. ++ * calls.c (emit_library_call_value_1): Update call to ++ hard_libcall_value. ++ * explow.c (hard_libcall_value): Use new target hook. ++ * testsuite/lib/target-supports.exp ++ (check_effective_target_arm_hard_vfp_ok): New hook. ++ (check_effective_target_arm_neon_ok): Improve test for neon ++ availability. ++ * testsuite/gcc.target/arm/eabi1.c: Only run test in base variant. ++ * config/arm/arm.c: Include cgraph.h ++ (TARGET_FUNCTION_VALUE): Override default hook. ++ (arm_pcs_default): New variable. ++ (arm_override_options): Don't fault hard calling convention with VFP. ++ Add support for AAPCS variants. ++ (arm_function_value): Make static. Handle AAPCS variants. ++ (arm_libcall_value): New function. ++ (arm_apply_result_size): Handle VFP registers in results. ++ (arm_return_in_memory): Rework all AAPCS variants; handle hard-vfp ++ conventions. ++ (pcs_attribute_args): New variable. ++ (arm_pcs_from_attribute): New function. ++ (arm_get_pcs_model): New function. ++ (aapcs_vfp_cum_init): New function. ++ (aapcs_vfp_sub_candidate): New function. ++ (aapcs_vfp_is_return_candidate): New function. ++ (aapcs_vfp_is_call_candidate): New function. ++ (aapcs_vfp_allocate): New function. ++ (aapcs_vfp_allocate_return_reg): New function. ++ (aapcs_vfp_advance): New function. ++ (aapcs_cp_arg_layout): New variable. ++ (aapcs_select_call_coproc): New function. ++ (aapcs_select_return_coproc): New function. ++ (aapcs_allocate_return_reg): New function. ++ (aapcs_libcall_value): New function. ++ (aapcs_layout_arg): New function. ++ (arm_init_cumulative_args): Initialize AAPCS args data. ++ (arm_function_arg): Handle AAPCS variants using new interface. ++ (arm_arg_parital_bytes): Likewise. ++ (arm_function_arg_advance): New function. ++ (arm_function_ok_for_sibcall): Ensure that sibling calls agree on ++ calling conventions. ++ (arm_setup_incoming_varargs): Handle new AAPCS args data. ++ * arm.h (NUM_VFP_ARG_REGS): Define. ++ (LIBCALL_VALUE): Update. ++ (FUNCTION_VALUE): Delete. ++ (FUNCTION_VALUE_REGNO_P): Add VFP regs. ++ (arm_pcs): New enum. ++ (CUMULATIVE_ARGS): New data to support AAPCS argument marshalling. ++ (FUNCTION_ARG_ADVANCE): Call arm_function_arg_advance. ++ (FUNCTION_ARG_REGNO_P): Add VFP regs. ++ * arm-protos.h (arm_function_arg_advance): Add. ++ (aapcs_libcall_value): Add. ++ (arm_function_value): Delete. ++ ++2009-02-25 Joseph Myers ++ ++ Backport from FSF: ++ ++ gcc/ ++ 2008-05-08 Kai Tietz ++ * config/arm/arm.c (arm_return_in_memory): Add fntype argumen. ++ * config/arm/arm.h (RETURN_IN_MEMORY): Replace RETURN_IN_MEMORY ++ by TARGET_RETURN_IN_MEMORY. ++ * config/arm/arm-protos.h (arm_return_in_memory): Add fntype argument. ++ ++ 2008-05-15 Diego Novillo ++ * config/arm/arm.c (arm_return_in_memory): Fix return type. ++ * config/arm/arm-protos.h (arm_return_in_memory): Likewise. ++ ++ 2008-06-19 Chung-Lin Tang ++ * arm-protos.h (arm_return_in_memory): Remove public ++ arm_return_in_memory() prototype. ++ * arm.c (arm_return_in_memory): Add static prototype, add target ++ hook macro, change definition and comments. ++ * arm.h (TARGET_RETURN_IN_MEMORY): Remove. ++ ++2009-02-24 Sandra Loosemore ++ ++ Issue #2369 ++ Committed upstream at the same time. ++ ++ gcc/ ++ * doc/invoke.texi (Link Options): Document an easier way to pass ++ options that take arguments to the GNU linker using -Xlinker and ++ -Wl. ++ ++2009-02-24 Andrew Stubbs ++ ++ gcc/ ++ * config/sh/lib1funcs.asm (ic_invalidate): icbi is not valid in a ++ delay slot. ++ ++2009-02-24 Andrew Stubbs ++ ++ gcc/testsuite/ ++ * gcc.target/sh/sh4a-memmovua.c: Include string.h instead of stdlib.h. ++ ++2009-02-24 Andrew Stubbs ++ ++ gcc/testsuite/ ++ * gcc.target/sh/sh4a-bitmovua.c (y0): Rename to y_0 to avoid a clash ++ with the built-in y0, and the subsequent warning. ++ (y1): Likewise, rename to y_1. ++ ++2009-02-21 Mark Mitchell ++ ++ Issue #4694 ++ Backport: ++ libiberty/ ++ 2008-07-31 Jakub Jelinek ++ * mkstemps.c (mkstemps): Keep looping even for EISDIR. ++ 2008-07-31 Denys Vlasenko ++ * mkstemps.c (mkstemps): If open failed with errno other than ++ EEXIST, return immediately. ++ * make-temp-file.c: Include errno.h. ++ (make_temp_file): If mkstemps failed, print an error message ++ before aborting. ++ ++2009-02-19 Kazu Hirata ++ ++ Issue #4152 ++ * release-notes-csl.xml: Mention the bug fix below. ++ ++ Backport: ++ 2008-07-30 Andrew Jenner ++ ++ * config/arm/arm.c (arm_compute_static_chain_stack_bytes): New ++ function. ++ (arm_compute_initial_elimination_offset): Use it. ++ (arm_compute_save_reg_mask): Include static chain save slot when ++ calculating alignment. ++ (arm_get_frame_offsets): Ditto. ++ (thumb1_compute_save_reg_mask): Ensure we have a low register saved ++ that we can use to decrement the stack when the stack decrement ++ could be too big for an immediate value in a single insn. ++ (thumb1_expand_prologue): Avoid using r12 for stack decrement. ++ ++2009-02-19 Catherine Moore ++ ++ Issue #2953 ++ ++ gcc/ ++ * debug.h (set_name): Declare. ++ * dwarf2out.c (dwarf2out_set_name): Declare. ++ (dwarf2_debug_hooks): Add set_name. ++ (find_AT_string): New. ++ (add_AT_string): Call find_AT_string. ++ (dwarf2out_set_name): New. ++ * cp/decl.c (grokdeclarator): Call set_name. ++ * vmsdbgout.c (vmsdbg_debug_hooks): Add set_name_debug_nothing. ++ * debug.c (do_nothing_debug_hooks): Likewise. ++ * dbxout.c (dbx_debug_hooks): Likewise. ++ * sdbout.c (sdb_debug_hooks): Likewise. ++ ++ * release-notes-csl.xml: Add note. ++ ++2009-02-19 Kazu Hirata ++ ++ Issue #4613 ++ gcc/ ++ * config/arm/arm.c (arm_rtx_costs_1): Teach that the cost of MLS ++ is the same as its underlying multiplication. ++ * config/arm/arm.md (two splitters): New. ++ * config/arm/predicates.md (binary_operator): New. ++ ++ * release-notes-csl.xml: Add a release note fragment for this ++ optimization. ++ ++2009-02-17 Andrew Jenner ++ Maciej Rozycki ++ ++ gcc/ ++ * unwind.inc (_Unwind_RaiseException): Use return value of ++ uw_init_context. ++ * unwind-dw2.c (uw_init_context): Make macro an expression instead of ++ a statement. ++ (uw_init_context_1): Add return value. ++ * unwind-sjlj.c (uw_init_context): Add return value. ++ ++2009-02-17 Kazu Hirata ++ ++ gcc/ ++ * config/arm/arm.c (arm_rtx_costs_1): Treat a minus with a shift ++ the same as a minus without a shift. ++ ++2009-02-16 Joseph Myers ++ ++ Issue #4622 ++ ++ gcc/ ++ * tree-predcom.c (ref_at_iteration): Return NULL_TREE if loop ++ header is an empty block. ++ ++ gcc/testsuite/ ++ * g++.dg/torture/predcom-1.C: New test. ++ ++2009-02-16 Julian Brown ++ ++ Issue #3747 ++ gcc/ ++ * config/arm/t-linux-eabi (LIB2FUNCS_STATIC_EXTRA): Add ++ config/arm/linux-atomic.c. ++ * config/arm/linux-atomic.c: New. ++ ++2009-02-12 Nathan Sidwell ++ ++ Issue #4620 ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_init_builtins): Set TYPE_NAME of ++ our distinct integral and vector types. ++ gcc/testsuite/ ++ * g++.dg/ext/altivec-17.C: New. ++ ++ * release-notes-csl.xml: Add note. ++ ++2009-02-10 Mark Mitchell ++ ++ libjava/classpath/ ++ * m4/acinclude.m4 (CLASSPATH_TOOLEXECLIBDIR): Match libjava. ++ * configure.ac (--enable-version-specific-runtime-libs): Support. ++ * Makefile.in, */Makefile.in: Regenerated. ++ ++ libjava/ ++ * Makefile.am (pkgconfigdir): Use toolexeclibdir, not $(libdir). ++ * configure.ac (dbexecdir): Likewise. ++ * configure: Regenerated. ++ ++ libjava/ ++ * Makefile.am (jardir): Set to a target-specific location. ++ gcc/java/ ++ * Make-lang.in: Adjust to match. ++ ++2009-02-09 Mark Mitchell ++ ++ Backport: ++ libffi/ ++ 2008-05-09 Julian Brown ++ * Makefile.am (LTLDFLAGS): New. ++ (libffi_la_LDFLAGS): Use above. ++ * Makefile.in: Regenerate. ++ boehm-gc/ ++ 2009-02-09 Mark Mitchell ++ * Makefile.am (LTLDFLAGS): New variable. ++ (LINK): Use it. ++ * Makefile.in: Regenerated. ++ libjava/ ++ 2009-02-09 Mark Mitchell ++ * Makefile.am (LTLDFLAGS): Define. ++ (GCJLINK): Use it. ++ (LIBLINK): Likewise. ++ * Makefile.in: Regenerated. ++ 2009-02-09 Mark Mitchell ++ * configure.ac: Define enable_sjlj_exceptions ++ appropriately under the ARM EH ABI. ++ * configure: Regenerated. ++ PR other/5303 ++ * addr2name.awk: Remove. ++ * Makefile.am (bin_SCRIPTS): Remove addr2name.awk. ++ * Makefile.in: Regenerated. ++ ++2009-02-05 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/arm.c (struct minipool_fixup): Split mode field into ++ value_mode and ref_mode. ++ (add_minipool_forward_ref): Use value_mode of fixup. ++ (add_minipool_backward_ref): Likewise. ++ (push_minipool_fix): Pass both value_mode and ref_mode as parameters, ++ and store them in the fixup. ++ (note_invalid_constants): Adjust arguments to push_minipool_fix. ++ (arm_reorg): Use ref_mode of fixup. ++ ++2009-02-04 Andrew Jenner ++ ++ gcc/ ++ * config.gcc: Handle arm-montavista-linux-gnueabi, ++ mips-montavista-linux-gnu, mips64octeon*-montavista-elf* and ++ powerpc-montavista-linux-gnu. ++ * config/rs6000/t-montavista-linux: New file. ++ * config/rs6000/montavista-linux.h: New file. ++ * config/arm/t-montavista-linux: New file. ++ * config/arm/montavista-linux.h: New file. ++ * config/mips/t-montavista-linux: New file. ++ * config/mips/t-montavista-elf: New file. ++ * config/mips/montavista-linux.h: New file. ++ ++ libgcc/ ++ * config.host: Handle mips64octeon-montavista-elf*. ++ ++2009-02-04 Catherine Moore ++ ++ Backport: ++ ++ 2009-02-02 Catherine Moore ++ ++ * sde.h (SUBTARGET_ARM_SPEC): Don;t assemble -fpic code as ++ -mabicalls. ++ ++2009-02-03 Kazu Hirata ++ ++ * release-notes-csl.xml: Add a release note for improved ++ multiplication.c ++ ++2009-02-03 Kazu Hirata ++ ++ gcc/ ++ * expmed.c (synth_mult): When trying out a shift, pass the result ++ of a signed shift. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * config.gcc (sh-*): Add --enable-extra-sgxxlite-multilibs option to ++ enable uclibc multilibs. ++ * config/sh/cs-sgxxlite-linux.h: New file. ++ * config/sh/t-sgxxlite-linux: New file. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * config/sh/linux-unwind.h: Disable when inhibit_libc is defined. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * config.gcc (sh-*-*): Add sysroot-suffix.h to tm_file. ++ Add t-sysroot-suffix to tmake_file. ++ ++2009-02-03 Kazu Hirata ++ ++ config/ ++ * mh-mingw: Add a comment. ++ ++ libiberty/ ++ * cygpath.c (msvcrt_dll): Change the return type to HMODULE. ++ (msvcrt_fopen): Use HMODULE for the return value from msvcrt_dll. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * configure.ac: Add new AC_SUBST for TM_ENDIAN_CONFIG, ++ TM_MULTILIB_CONFIG and TM_MULTILIB_EXCEPTIONS_CONFIG. ++ * configure: Regenerate. ++ * Makefile.in: Add variables TM_ENDIAN_CONFIG, TM_MULTILIB_CONFIG ++ and TM_MULTILIB_EXCEPTIONS_CONFIG. ++ * config.gcc (sh-*-*): Switch to using TM_ENDIAN_CONFIG, ++ TM_MULTILIB_CONFIG, and TM_MULTILIB_EXCEPTIONS_CONFIG. ++ Don't add default cpu to multilib list unnecessarily, but do enable ++ the relevant compiler option.. ++ Add support for --with-multilib-list=none, and ++ --with-multilib-list=! to supress unwanted multilibs. ++ Remove use_fixproto=yes. ++ * config/sh/t-sh (DEFAULT_ENDIAN, OTHER_ENDIAN): New variables. ++ (MULTILIB_ENDIAN, MULTILIB_CPUS): Delete variables. ++ (MULTILIB_OPTIONS): Redefine using OTHER_ENDIAN and ++ TM_MULTILIB_CONFIG. ++ (MULTILIB_EXCEPTIONS): Add TM_MULTILIB_EXCEPTIONS_CONFIG. ++ (MULTILIB_OSDIRNAMES): New variable. ++ * config/sh/t-1e: Delete file. ++ * config/sh/t-mlib-sh1: Delete file. ++ * config/sh/t-mlib-sh2: Delete file. ++ * config/sh/t-mlib-sh2a: Delete file. ++ * config/sh/t-mlib-sh2a-nofpu: Delete file. ++ * config/sh/t-mlib-sh2a-single: Delete file. ++ * config/sh/t-mlib-sh2a-single-only: Delete file. ++ * config/sh/t-mlib-sh2e: Delete file. ++ * config/sh/t-mlib-sh3e: Delete file. ++ * config/sh/t-mlib-sh4: Delete file. ++ * config/sh/t-mlib-sh4-nofpu: Delete file. ++ * config/sh/t-mlib-sh4-single: Delete file. ++ * config/sh/t-mlib-sh4-single-only: Delete file. ++ * config/sh/t-mlib-sh4a: Delete file. ++ * config/sh/t-mlib-sh4a-nofpu: Delete file. ++ * config/sh/t-mlib-sh4a-single: Delete file. ++ * config/sh/t-mlib-sh4a-single-only: Delete file. ++ * config/sh/t-mlib-sh4al: Delete file. ++ * config/sh/t-mlib-sh5-32media: Delete file. ++ * config/sh/t-mlib-sh5-32media-nofpu: Delete file. ++ * config/sh/t-mlib-sh5-64media: Delete file. ++ * config/sh/t-mlib-sh5-64media-nofpu: Delete file. ++ * config/sh/t-mlib-sh5-compact: Delete file. ++ * config/sh/t-mlib-sh5-compact-nofpu: Delete file. ++ * config/sh/t-linux: Don't override MULTILIB_EXCEPTIONS. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * config/print-sysroot-suffix.sh: Add support for MULTILIB_ALIASES. ++ * config/t-sysroot-suffix: Pass MULTILIB_ALIASES. ++ ++2009-02-03 Andrew Stubbs ++ ++ gcc/ ++ * config/arm/print-sysroot-suffix.sh: Move to ... ++ * config/print-sysroot-suffix.sh: ... here. ++ Remove all MULTILIB_ALIASES to make it suitable for upstream ++ submission. ++ * config/arm/t-sysroot-suffix: Move to ... ++ * config/t-sysroot-suffix: ... here. ++ Modify path to print-sysroot-suffix.sh. ++ Remove all MULTILIB_ALIASES. ++ * config.gcc: Modify paths to print-sysroot-suffix.sh. ++ ++2009-01-30 Andrew Stubbs ++ ++ gcc/libstdc++-v3/ ++ * config/cpu/sh/atomicity.h: Put the SH4A specific functions in the ++ __gnu_cxx namespace. Remove "static inline". ++ ++2009-01-29 Kazu Hirata ++ ++ * expmed.c (shiftsub_cost): Rename to shiftsub0_cost. ++ (shiftsub1_cost): New. ++ (init_expmed): Compute shiftsub1_cost. ++ (synth_mult): Optimize multiplications by constants of the form ++ -(2^^m-1) for some constant positive integer m. ++ ++2009-01-27 Nathan Sidwell ++ ++ Issue #4428 ++ gcc/ ++ * config/mips/mips.md (jump): Deal with $gp restoration in delay ++ slot for o32 and o64 ABIs. ++ ++ gcc/testsuite/ ++ * gcc.target/mips/branch-2.c: New. ++ ++ * release-notes-csl.xml: Add note. ++ ++2009-01-26 Kazu Hirata ++ ++ * release-notes-csl.xml: Mention performance improvements for ARM. ++ ++ Backport from mainline: ++ 2009-01-13 Richard Earnshaw ++ ++ * arm.c (struct processors): Pass for speed down into cost helper ++ functions. ++ (const_ok_for_op): Handle COMPARE and inequality nodes. ++ (arm_rtx_costs_1): Rewrite. ++ (arm_size_rtx_costs): Update prototype. ++ (arm_rtx_costs): Pass speed down to helper functions. ++ (arm_slowmul_rtx_costs): Rework cost calculations. ++ (arm_fastmul_rtx_costs, arm_xscale_rtx_costs): Likewise. ++ (arm_9e_rtx_costs): Likewise. ++ ++2009-01-26 Julian Brown ++ ++ Issue #4515 ++ ++ gcc/ ++ * config/arm/ieee754-df.S (cmpdf2): Avoid writing below SP. ++ * config/arm/ieee754-sf.S (cmpsf2): Likewise. ++ ++2009-01-23 Sandra Loosemore ++ ++ Issue #3989 ++ ++ * release-notes-csl.xml (Thumb half-precision floating point bug fix): ++ New note. ++ ++ gcc/ ++ * config/arm/arm.c (dump_minipool): Use size of mode, not padded size, ++ in switch that controls whether to emit padding. ++ ++2009-01-20 Sandra Loosemore ++ ++ Issue #4289 ++ ++ fixincludes/ ++ * server.c (run_shell): Quote directory name passed to cd. ++ ++2009-01-14 Nathan Froyd ++ ++ * release-notes-csl.xml: Add note. Correct TARGET line for ++ previous note. ++ ++ gcc/ ++ * tree-ssa-remove-local-statics.c (maybe_discover_new_declaration): ++ Avoid variables with aggregate and vector types. ++ (maybe_create_new_variable): Create the var_ann prior to marking ++ the symbol for renaming. ++ ++ gcc/testsuite/ ++ * gcc.dg/remove-local-statics-15.c: New test. ++ * gcc.dg/remove-local-statics-16.c: New test. ++ ++2009-01-14 Joseph Myers ++ ++ gcc/ ++ * config/sparc/sol2-bi.h (LINK_ARCH64_SPEC_BASE): Use %R with ++ absolute library paths. ++ ++2009-01-12 Joseph Myers ++ ++ gcc/ ++ * config/sol2.h (LINK_ARCH32_SPEC_BASE): Use %R with absolute ++ library paths. ++ ++2009-01-06 Andrew Stubbs ++ Nathan Sidwell ++ ++ Issue #4436 ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_override_options): Don't override ++ an explicit -mno-isel. ++ ++2009-01-02 Nathan Sidwell ++ ++ Issue 4361 ++ gcc/ ++ * config/m68k/m68k-devices.def: Add 51jm. ++ ++ * release-notes-csl.xml: Document 51jm addition. ++ ++2008-12-21 Mark Mitchell ++ ++ * release-notes-csl.xml: Adjust wording of last note. ++ ++2008-12-18 Mark Mitchell ++ ++ Issue #4399 ++ * release-notes-csl.xml: Document. ++ gcc/ ++ * tree-ssa-pre.c (compute_antic): Correct loop bounds. ++ ++2008-12-19 Joseph Myers ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/20081204-1.c: Require powerpc_spe_ok. ++ ++2008-12-19 Joseph Myers ++ ++ Backport from FSF: ++ ++ gcc/testsuite/ ++ 2008-03-13 Uros Bizjak ++ * gcc.dg/vect/vect-align-2.c: Remove dg-do run directive. ++ (main): Call check_vect. ++ ++2008-12-18 Joseph Myers ++ ++ Backport from FSF: ++ ++ gcc/ ++ 2008-12-18 Joseph Myers ++ * config/rs6000/rs6000.c (rs6000_generate_compare): Condition ++ choice of e500 comparison instructions on flag_finite_math_only && ++ !flag_trapping_math, not flag_unsafe_math_optimizations. ++ * config/rs6000/rs6000.md (abstf2): Condition choice of e500 ++ instructions on flag_finite_math_only && !flag_trapping_math, not ++ flag_unsafe_math_optimizations. ++ (bltgt, sltgt): Disable for TARGET_HARD_FLOAT && !TARGET_FPRS. ++ * config/rs6000/spe.md (cmpsfeq_gpr, tstsfeq_gpr, cmpsfgt_gpr, ++ tstsfgt_gpr, cmpsflt_gpr, tstsflt_gpr, cmpdfeq_gpr, tstdfeq_gpr, ++ cmpdfgt_gpr, tstdfgt_gpr, cmpdflt_gpr, tstdflt_gpr, cmptfeq_gpr, ++ tsttfeq_gpr, cmptfgt_gpr, tsttfgt_gpr, cmptflt_gpr, tsttflt_gpr): ++ Condition choice of comparison instructions on ++ flag_finite_math_only && !flag_trapping_math, not ++ flag_unsafe_math_optimizations. ++ ++2008-12-18 Catherine Moore ++ ++ Issue #4439 ++ ++ * release-notes-csl.xml: Document -march= bug fix. ++ ++2008-12-18 Catherine Moore ++ ++ Issue #4334 ++ ++ Backport: ++ ++ gcc/ ++ 2008-11-23 Richard Sandiford ++ ++ * config/mips/mips.c (mips_legitimize_address): Handle ++ illegitimate CONST_INT addresses. ++ ++ ++ 2008-06-01 Richard Sandiford ++ ++ * config/mips/mips.c (mips_valid_offset_p): New function. ++ ++2008-12-18 Catherine Moore ++ ++ Issue #4439 ++ ++ gcc/ ++ * config/mips/mips.h (MIPS_ISA_LEVEL_SPEC): Remove extraneous ++ colon. ++ ++2008-12-16 Joseph Myers ++ ++ gcc/ ++ * config/i386/cs-linux.opt (mrh73, mrhel3): New options. ++ * config/i386/cs-linux.h (SYSROOT_SUFFIX_SPEC): Handle new ++ options. ++ * config/i386/t-cs-linux (MULTILIB_OPTIONS, MULTILIB_DIRNAMES, ++ MULTILIB_OSDIRNAMES): Update for new options. ++ (MULTILIB_EXCEPTIONS): Define. ++ ++2008-12-05 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc-target/mips/mips-nonpic/mips-nonpic.h: New. ++ * gcc-target/mips/mips-nonpic/nonpic-[0-9]*.c: Rename to ++ main-[0-9]*.c. ++ * gcc-target/mips/mips-nonpic/mips-nonpic.exp: Run ++ main-*.c tests. ++ * gcc-target/mips/mips-nonpic/pic-*.c: Include mips-nonpic.h. ++ * gcc-target/mips/mips-nonpic/nonpic-*.c: Likewise. ++ ++2008-12-05 Catherine Moore ++ ++ * gcc/config/mips/MIPS-TOOLCHAIN.pdf: Remove. ++ ++2008-12-04 Joseph Myers ++ ++ gcc/ ++ * config/rs6000/rs6000.md (move_from_CR_gt_bit): Enable for ++ TARGET_HARD_FLOAT && !TARGET_FPRS, not TARGET_E500. ++ * config/rs6000/spe.md (e500_cr_ior_compare): Likewise. ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/20081204-1.c: New test. ++ ++2008-12-03 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * gcc.dg/vect/vect-shift-2.c, gcc.dg/vect/vect-shift-3.c: New. ++ * lib/target-supports.exp (check_effective_target_vect_shift): New ++ function. ++ ++2008-12-02 Daniel Jacobowitz ++ ++ Issue #4343 ++ * release-notes-csl.xml: Document right shift fix. ++ ++ Backport from trunk: ++ ++ gcc/ ++ 2008-09-25 Dorit Nuzman ++ ++ * tree-vectorizer.c (vect_is_simple_use): Fix indentation. ++ * tree-vect-transform.c (vect_get_constant_vectors): Use vectype ++ instead of vector_type for constants. Take computation out of loop. ++ (vect_get_vec_def_for_operand): Use only vectype for constant case, ++ and use only vector_type for invariant case. ++ (get_initial_def_for_reduction): Use vectype instead of vector_type. ++ ++ gcc/testsuite/ ++ 2008-09-25 Dorit Nuzman ++ ++ * gcc.dg/vect/ggc-pr37574.c: New test. ++ * gcc.dg/vect/vect.exp: Compile some tests with ggc flags. ++ ++2008-12-02 Maxim Kuvyrkov ++ ++ gcc/testsuite/ ++ * gcc.target/m68k/tls-1.c: Rename to tls-ie.c; fix. ++ * gcc.target/m68k/tls-2.c: Rename to tls-le.c; fix. ++ * gcc.target/m68k/tls-1-pic.c: Rename to tls-gd.c; fix. ++ * gcc.target/m68k/tls-2-pic.c: Rename to tls-ld.c; fix. ++ * gcc.target/m68k/xtls-1.c: Rename to tls-ie-xgot.c; fix. ++ * gcc.target/m68k/xtls-2.c: Rename to tls-le-xtls.c; fix. ++ * gcc.target/m68k/xtls-1-pic.c: Rename to tls-gd-xgot.c; fix. ++ * gcc.target/m68k/xtls-2-pic.c: Split into tls-ld-xgot.c, ++ tls-ld-xtls.c and tls-ld-xgot-xtls.c; fix. ++ ++ gcc/ ++ * config/m68k/m68k.md (UNSPEC_XGOT, UNSPEC_TLS, UNSPEC_XTLS): Replace ++ with ... ++ (UNSPEC_RELOC, UNSPEC_RELOC32): New. ++ * config/m68k/m68k.opt: Fix documentation. ++ * config/m68k/m68k.c (m68k_unwrap_symbol): Update. ++ (m68k_decompose_address): Update comment. ++ (enum m68k_tls_reloc): Rename to m68k_reloc; add RELOC_GOT value. ++ (TLS_RELOC_P): New macro. ++ (m68k_get_tls_unspec): Rewrite, rename to m68k_wrap_symbol. ++ (m68k_move_to_reg, m68k_wrap_symbol_into_got_ref): New static ++ functions. ++ (legitimize_pic_address): Use them, update comment. ++ (m68k_call_tls_get_addr, m68k_call_read_tp): Rewrite. ++ (m68k_legitimize_tls_address): Rewrite, fix code generation for ++ initial exec model. ++ (m68k_tls_referenced_p_1, m68k_tls_mentioned_p): Update. ++ (m68k_legitimize_address): Remove excessive assert. ++ (m68k_get_tls_decoration): Rename to m68k_get_reloc_decoration, update. ++ (m68k_output_addr_const_extra): Update. ++ (sched_attr_op_type): Update comment. ++ ++2008-12-01 Catherine Moore ++ ++ * gcc/config/mips/MIPS-TOOLCHAIN.pdf: New. ++ ++2008-11-30 Maxim Kuvyrkov ++ ++ Fix bugs in TLS code generation, add -mxtls option, add tests. ++ ++ gcc/ ++ * config/m68k/predicates.md (symbolc_operand): Fix. ++ * config/m68k/m68k.md (UNSPEC_GOTOFF): Rename to UNSPEC_XGOT, update ++ all uses. ++ (UNSPEX_XTLS): New constant. ++ (addsi3_5200): Handle XTLS symbols, indent. ++ * config/m68k/m68k-protos.h (m68k_unwrap_symbol): Declare. ++ * config/m68k/m68k.opt (mxtls): New option. ++ * config/m68k/m68k.c (m68k_unwrap_symbol): New function. ++ (m68k_decompose_address): Handle TLS references. ++ (m68k_get_gp): Move to a better place. ++ (legitimize_pic_address): Update, cleanup, add REG_EQUAL note when ++ appropriate. ++ (m68k_get_tls_unspec): New function to unify generation of TLS ++ references. ++ (m68k_libcall_value_in_a0_p): New static variable. ++ (m68k_call_tls_get_addr, m68k_call_m68k_read_tp): Rewrite. ++ (m68k_legitimize_tls_address): Cleanup, use m68k_get_tls_unspec. ++ (m68k_tls_referenced_p_1, m68k_tls_mentioned_p): Handle UNSPEC_XTLS. ++ (m68k_output_addr_const_extra): Handle UNSPEC_XTLS. ++ (print_operand_address): Update. ++ (m68k_libcall_value): Support calls to TLS helpers. ++ (m68k_sched_attr_op_type): Update. ++ * config/m68k/constraints.md (Cu): New constraint. ++ ++ gcc/testsuite/ ++ * gcc.target/m68k/tls-1.c: New test. ++ * gcc.target/m68k/tls-1-pic.c: New test. ++ * gcc.target/m68k/tls-2.c: New test. ++ * gcc.target/m68k/tls-2-pic.c: New test. ++ * gcc.target/m68k/xtls-1.c: New test. ++ * gcc.target/m68k/xtls-1-pic.c: New test. ++ * gcc.target/m68k/xtls-2.c: New test. ++ * gcc.target/m68k/xtls-2-pic.c: New test. ++ ++2008-11-29 Joseph Myers ++ ++ Backport from FSF: ++ ++ gcc/testsuite/ ++ 2008-11-29 Joseph Myers ++ * g++.dg/cpp/stringop-1.C: New test. ++ ++ libcpp/ ++ 2008-11-29 Joseph Myers ++ * lex.c (cpp_token_len): Use 6 as default length. ++ ++2008-11-26 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc.target/mips/vr-mult-1.c: Require hard-float. ++ * gcc.target/mips/branch-cost-1.c: Likewise. ++ * gcc.target/mips/movcc-2.c: Likewise. ++ * gcc.target/mips/rsqrt-3.c: Likewise. ++ * gcc.target/mips/vr-mult-2.c: Likewise. ++ * gcc.target/mips/branch-cost-2.c: Likewise. ++ * gcc.target/mips/movcc-3.c: Likewise. ++ * gcc.target/mips/nmadd-1.c: Likewise. ++ * gcc.target/mips/nmadd-2.c: Likewise. ++ * gcc.target/mips/movcc-1.c: Likewise. ++ * gcc.target/mips/nmadd-3.c: Likewise. ++ ++2008-11-24 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc.target/mips/mips-nonpic/mips-nonpic.exp: Don't run for mips16. ++ ++2008-11-24 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_savres_strategy): Always use ++ inline saves and restores when compiling position-independent code. ++ ++2008-11-24 Nathan Froyd ++ ++ gcc/ ++ * config.gcc (powerpc-*-elf*): Only include e500mc-specific files ++ if --enable-powerpc-e500mc-elf was specified. ++ ++2008-11-20 Maxim Kuvyrkov ++ ++ PR35018 ++ ++ gcc/ ++ * config/m68k/m68k.md (addsi_lshrsi_31): Rename to ++ addsi_lshrsi_31_m68k, don't use it for ColdFire. ++ Add (define_expand "addsi_lshrsi_31"). ++ (addsi_lshrsi_31_cf): New, almost identical copy of extendsidi2_m68k. ++ ++ gcc/testsuite/ ++ * gcc.target/m68k/pr35018.c: New. ++ ++2008-11-20 Joseph Myers ++ ++ gcc/ ++ * config/arm/thumb2.md (thumb2_casesi_internal, ++ thumb2_casesi_internal_pic): Use earlyclobber for scratch operand ++ 4. ++ ++2008-11-19 Andrew Stubbs ++ ++ Issue #3283 ++ ++ gcc/ ++ PR target/36133 ++ * config/m68k/m68k.h (CC_OVERFLOW_UNUSABLE, CC_NO_CARRY): New defines. ++ * config/m68k/m68k.c (notice_update_cc): Set cc_status properly for ++ shift instructions. ++ * config/m68k/m68k.md: Adjust all conditional branches that use the ++ carry and overflow flags so they understand CC_OVERFLOW_UNUSABLE. ++ ++ gcc/testsuite/ ++ PR target/36133 ++ * gcc.target/m68k/pr36133.c: New test. ++ ++2008-11-17 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_emit_epilogue): Adjust ++ computation of restore_lr. Duplicate restoration of LR and ++ execute the appropriate one depending on whether GPRs are being ++ restored inline. ++ ++2008-11-17 Catherine Moore ++ ++ * config/mt-sde: Revert last patch. ++ ++2008-11-17 Paul Brook ++ ++ gcc/ ++ * config/arm/t-symbian (MULTILIB_EXCEPTIONS, MULTILIB_MATCHES, ++ MULTILIB_ALIASES): Define. ++ ++2008-11-17 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_savres_routine_sym): Fix ++ computation for cache selector. Mark the generated symbol as a ++ function. ++ (rs6000_emit_prologue): Correct condition. ++ * config/rs6000/rs6000.md (*save_gpregs_): Use explicit ++ match for register 11. ++ (*save_fpregs_): Likewise. ++ (*restore_gpregs_): Likewise. ++ (*return_and_restore_gpregs_): Likewise. ++ (*return_and_restore_fpregs_): Likewise. ++ * config/rs6000/spe.md (*save_gpregs_spe): Use explicit match for ++ register 11. ++ (*restore_gpregs_spe): Likewise. ++ (*return_and_restore_gpregs_spe): Likewise. ++ ++2008-11-14 Catherine Moore ++ ++ * config/mt-sde (CFLAGS_FOR_TARGET): Add -mexplicit-relocs. ++ (CXXFLAGS_FOR_TARGET): Likewise. ++ ++2008-11-14 Maxim Kuvyrkov ++ Andrew Stubbs ++ Gunnar Von Boehn ++ ++ Issue #3284 ++ ++ gcc/ ++ PR target/36134 ++ * config/m68k/m68k.md (addsi3_5200): Add a new alternative preferring ++ the shorter LEA insn over ADD.L where possible. ++ ++ gcc/testsuite/ ++ PR target/36134 ++ * gcc.target/m68k/pr36134.c: New test. ++ ++2008-11-13 Joseph Myers ++ ++ gcc/ ++ * config/mips/sicortex.h, config/mips/t-sicortex: New. ++ * config.gcc (mips64el-sicortex-linux-gnu): Use these config ++ files. ++ ++2008-11-13 Nathan Froyd ++ ++ gcc/ ++ * config.gcc (powerpc*-elf*): Configure for e500mc. ++ * config/rs6000/t-ppc-e500mc: New. ++ * config/rs6000/e500mc.h: New. ++ ++2008-11-12 Nathan Sidwell ++ ++ Issue 4221/1 ++ * release-notes-csl.xml: Document removal of default -mfix-ice9a. ++ ++2008-11-11 Joseph Myers ++ ++ gcc/ ++ * function.c (alignment_for_aligned_arrays): Use floor_log2 ++ instead of CLZ_HWI. ++ ++2008-11-10 Nathan Froyd ++ ++ Issue #4082 ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_legitimize_address): Check for ++ non-word-aligned REG+CONST addressing. ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/20081104-1.c: New test. ++ ++2008-11-07 Julian Brown ++ ++ Issue #4085 ++ ++ gcc/ ++ * combine.c (find_split_point): Disable patch from PR27971. ++ ++2008-11-06 Kazu Hirata ++ ++ Issue 4029 ++ gcc/ ++ Backport: ++ 2008-11-06 Kazu Hirata ++ PR target/35574 ++ * config/sparc/predicates.md (const_double_or_vector_operand): ++ New. ++ * config/sparc/sparc.c (sparc_extra_constraint_check): Handle the ++ 'D' constraint. ++ * config/sparc/sparc.h: Document the 'D' constraint. ++ * config/sparc/sparc.md (*movdf_insn_sp32_v9, *movdf_insn_sp64): ++ Use the 'D' constraint in addition to 'F' in some alternatives. ++ (DF splitter): Generalize for V64mode. ++ * doc/md.texi (SPARC): Document the 'D' constraint. ++ ++ * release-notes-csl.xml: Add a release note for the fix above. ++ ++2008-11-06 Andrew Stubbs ++ ++ Issue #3120 ++ ++ gcc/ ++ * release-notes-csl.xml: -pg support for ARM EABI. ++ ++2008-10-29 Andrew Stubbs ++ ++ Issue 3120 ++ ++ gcc/ ++ * config/arm/linux-eabi.h (ARM_FUNCTION_PROFILER): Delete. ++ (SUBTARGET_FRAME_POINTER_REQUIRED): Delete. ++ * config/arm/bpabi.h (PROFILE_HOOK): New undef. ++ ++ Back-port from mainline: ++ 2008-10-08 Paul Brook ++ gcc/ ++ * config/arm/bpabi.h (ARM_FUNCTION_PROFILER): Define new EABI ++ compatible profiler (__gnu_mcount_nc). ++ (SUBTARGET_FRAME_POINTER_REQUIRED): Define. ++ ++2008-10-27 Catherine Moore ++ ++ Issue #4105 ++ ++ Backport: ++ ++ gcc/ ++ 2008-10-22 Chao-ying Fu ++ ++ * config/mips/mips.opt (msmartmips): Accept -mno-smartmips. ++ ++2008-10-24 Maxim Kuvyrkov ++ ++ gcc/ ++ * config/m68k/m68k.c (m68k_output_dwarf_dtprel): Use .long instead of ++ .word for TLS debug information. ++ ++2008-10-24 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (no_global_regs_above): Fix precedence ++ problem. ++ ++2008-10-23 Kazu Hirata ++ ++ Issue 3852 ++ gcc/ ++ * config/arm/t-asa (MULTILIB_EXTRA_OPTS): New. ++ ++2008-10-22 Paul Brook ++ ++ gcc/ ++ * config/arm/t-uclinux-eabi (MULTILIB_EXCEPTIONS): Exclude bogus ARM ++ multilib. ++ ++2008-10-21 Paul Brook ++ ++ gcc/ ++ * doc/invoke.texi: Document -mfix-cortex-m3-ldrd. ++ * config/arm/arm.c (arm_override_options): Set fix_cm3_ldrd ++ if Cortex-M3 cpu is selected. ++ (output_move_double): Avoid overlapping base register and first ++ destination register when fix_cm3_ldrd. ++ * config/arm/arm.opt: Add -mfix-cortex-m3-ldrd. ++ * config/arm/t-cs-eabi: Add -mfix-cortex-m3-ldrd to Thumb-2 multilib. ++ * config/arm/t-arm-elf: Ditto. ++ * config/arm/t-uclinux-eabi: Ditto. ++ ++2008-10-21 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.md (consttable_4): Handle (high ...). ++ ++2008-10-16 Nathan Froyd ++ ++ gcc/ ++ * config.gcc (powerpc-*-eabi*): Add rs6000/t-cs-eabi when ++ --enable-extra-sgxx-multilibs is passed to configure. ++ * config/rs6000/t-ppcgas (MULTILIB_OPTIONS): Remove te500mc. ++ (MULTILIB_DIRNAMES): Likewise. ++ (MULTILIB_EXCEPTIONS): Likewise. ++ * config/rs6000/t-cs-eabi: New file. ++ ++2008-10-16 Julian Brown ++ ++ Issue #4039 ++ ++ gcc/ ++ * config/arm/neon.md (movmisalign): Use expander/unnamed insn ++ for both D & Q variants. Don't permit both operands to be mems. ++ * release-notes-csl.xml (Misaligned NEON memory accesses): Add note. ++ ++2008-10-15 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc-target/mips/octeon-1.c (dg-mips-options): Use -mno-abicalls. ++ * gcc-target/mips/octeon-5.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-6.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-18.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-19.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-23.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-28.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-34.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-37.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-43.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-44.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-49.c (dg-mips-options): Likewise. ++ * gcc-target/mips/octeon-54.c (dg-mips-options): Likewise. ++ ++2008-10-14 Sandra Loosemore ++ ++ Issue #4017 ++ ++ * release-notes-csl.xml (Linker script option syntax): New note. ++ ++ gcc/ ++ * config.gcc (powerpc-*): Make t-ppcgas imply usegas.h. ++ * config/svr4.h (SVR4_ASM_SPEC): New. ++ (ASM_SPEC): Inherit from SVR4_ASM_SPEC. ++ * config/rs6000/sysv4.h (ASM_SPEC): Inherit from SVR4_ASM_SPEC. ++ ++ gcc/doc/ ++ * invoke.texi (Option Summary): Add -T to linker options. ++ (Link Options): Document -T. ++ ++2008-10-13 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_file_start): Output gnu ++ attribute for struct return convention. ++ ++2008-10-13 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.h (fputype): Remove stray comma. ++ ++2008-10-13 Andrew Stubbs ++ ++ Issue #3884 ++ ++ gcc/ ++ * doc/invoke.texi (PowerPC Options): -meabi option no longer places ++ __eabi function in main. ++ ++2008-10-12 Mark Mitchell ++ ++ Issue #3224 ++ * release-notes-csl.xml: Mention OpenMP add-on. ++ ++2008-10-12 Catherine Moore ++ ++ Issue # 3903 ++ ++ Backport: ++ ++ 2008-07-28 Ilie Garbacea ++ Chao-ying Fu ++ ++ * configure.tgt: Enable futex for MIPS. ++ * config/linux/mips/futex.h: New file. ++ ++2008-10-12 Catherine Moore ++ ++ gcc/ ++ * config/mips/mips.opt (muclibc): New option entry. ++ * config/mips/mips.c (mips_override_options): Disable ++ __thread support when the -muclibc option is used. ++ ++2008-10-11 Maxim Kuvyrkov ++ ++ M68K NPTL support. ++ gcc/ ++ * configure.ac (m68k-*-*): Check if binutils support TLS. ++ * configure: Regenerate. ++ * config/m68k/predicates.md (symbolic_operand): Handle UNSPECs. ++ * config/m68k/m68k.md (UNSPEC_TLS): New constant. ++ (movsi): Handle TLS symbols. ++ * config/m68k/m68k-protos.h (m68k_legitimize_tls_address): Declare. ++ (m68k_tls_referenced_p, m68k_tls_mentioned_p): Declare. ++ (m68k_legitimize_address): Declare. ++ * config/m68k/m68k.c (ggc.h): Include. ++ (m68k_output_dwarf_dtprel): Implement hook. ++ (TARGET_HAVE_TLS, TARGET_ASM_OUTPUT_DWARF_DTPREL): Define. ++ (m68k_expand_prologue): Load GOT pointer when function needs it. ++ (m68k_illegitimate_symbolic_constant_p): Handle TLS symbols. ++ (m68k_legitimate_constant_address_p): Same. ++ (legitimize_pic_address): Same. ++ (enum m68k_tls_reloc): New. ++ (m68k_tls_get_addr, m68k_get_tls_get_addr, m68k_get_gp) ++ (m68k_call_tls_get_addr, m68k_read_tp, m68k_get_m68k_read_tp) ++ (m68k_call_m68k_read_tp): Helper variables and functions for ... ++ (m68k_legitimize_tls_address): Handle TLS references. ++ (m68k_tls_symbol_p, m68k_tls_referenced_p_1, m68k_tls_referenced_p) ++ (m68k_tls_mentioned_p): New functions. ++ (m68k_legitimize_address): Rewrite LEGITIMIZE_ADDRESS macro, handle ++ TLS symbols. ++ (m68k_get_tls_decoration): New static function. ++ (m68k_output_addr_const_extra): Handle UNSPEC_TLS. ++ (m68k_output_dwarf_dtprel): Implement hook. ++ (gt-m68k.h): Include. ++ * config/m68k/m68k.h (LEGITIMATE_PIC_OPERAND_P): Support TLS. ++ (LEGITIMATE_ADDRESS): Move logic to m68k.c:m68k_legitimize_address. ++ ++2008-10-11 Maxim Kuvyrkov ++ ++ gcc/ ++ * config/m68k/lb1sf68.asm (PICCALL, PICJUMP): Use GOT instead of ++ PC-relative addressing when compiling for uclinux PIC. ++ ++2008-10-09 Catherine Moore ++ ++ Issue #3312 ++ ++ gcc/ ++ * config/mips/mips.h ( DSP_CTRL_REG_FIRST): Define. ++ (DSP_CTRL_REG_LAST): Define. ++ * config/mips/mips.c (mips_conditional_register_usage): Handle ++ DSP registers. ++ ++2008-10-08 Maxim Kuvyrkov ++ ++ * release-notes-csl.xml: Fix typo. ++ ++2008-10-08 Nathan Sidwell ++ Maxim Kuvyrkov ++ ++ * release-notes-csl.xml (Shared Libraries bug fix): New. ++ ++ gcc/ ++ * config/m68k/lb1sf68.asm (__cmpdf_internal, __cmpsf_internal): Hide. ++ (__cmpdf, __cmpsf): Use PIC call sequence. ++ ++2008-10-07 Nathan Froyd ++ ++ Issue #3988 ++ ++ * release-notes-csl.xml (Dynamic libraries and -Os bug fix): New. ++ ++ gcc/ ++ * config/rs6000/ppc-asm.h (HIDDEN_FUNC): New macro. ++ * config/rs6000/crtresfpr.asm, config/rs6000/crtresgpr.asm, ++ config/rs6000/crtresxfpr.asm, config/rs6000/crtresxgpr.asm, ++ config/rs6000/crtsavfpr.asm, config/rs6000/crtsavgpr.asm, ++ config/rs6000/e500crtres32gpr.asm, ++ config/rs6000/e500crtres64gpr.asm, ++ config/rs6000/e500crtres64gprctr.asm, ++ config/rs6000/e500crtrest32gpr.asm, ++ config/rs6000/e500crtrest64gpr.asm, ++ config/rs6000/e500crtresx32gpr.asm, ++ config/rs6000/e500crtresx64gpr.asm, ++ config/rs6000/e500crtsav32gpr.asm, ++ config/rs6000/e500crtsav64gpr.asm, ++ config/rs6000/e500crtsav64gprctr.asm, ++ config/rs6000/e500crtsavg32gpr.asm, ++ config/rs6000/e500crtsavg64gpr.asm, ++ config/rs6000/e500crtsavg64gprctr.asm: Use it. ++ ++2008-10-07 Nathan Sidwell ++ ++ * release-notes-csl.xml: Document it. ++ ++ gcc/ ++ * doc/invoke.texi (MIPS Options): Add ice9 arch. ++ * config/mips/mips.c (mips_cpu_info_table): Add ice9 arch. ++ ++2008-10-03 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc.target/mips/fix-ice9a-1.c: Disable for soft-float ++ multilibs. ++ * gcc.target/mips/fix-ice9a-1.c: Likewise. ++ ++2008-10-03 Kazu Hirata ++ ++ Backport: ++ gcc/testsuite/ ++ 2008-09-23 Eric Botcazou ++ ++ * gcc.dg/pragma-init-fini.c: Use dg-warning in lieu of dg-error. ++ * gcc.dg/pragma-align-2.c: Likewise. ++ * gcc.dg/format/cmn-err-1.c: Likewise. ++ ++2008-10-02 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc.target/mips/lazy-binding-1.c: Compile with -fpic. ++ ++2008-10-02 Maciej W. Rozycki ++ ++ Issue #3673 ++ gcc/testsuite/ ++ * lib/target-supports.exp ++ (check_effective_target_arm_iwmmxt_ok): New procedure. ++ * gcc.dg/arm-mmx-1.c: Only run if arm_iwmmxt_ok. ++ ++2008-09-29 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-09-29 Joseph Myers ++ * ifcvt.c (noce_emit_store_flag): If using condition from original ++ jump, reverse it if if_info->cond was reversed. ++ ++2008-09-29 Maxim Kuvyrkov ++ ++ Issue #3922 ++ * release-notes-csl.xml (Code generation bug fix): New. ++ gcc/ ++ * config/m68k/m68k.md (extendsidi2): Rename to extendsidi2_m68k, ++ don't use it for ColdFire. Add (define_expand "extendsidi2"). ++ (extendsidi2_cf): New, almost identical copy of extendsidi2_m68k. ++ gcc/testsuite/ ++ * gcc.c-torture/compile/20080929-1.c: New. ++ ++2008-09-29 Maxim Kuvyrkov ++ ++ * release-notes-csl.xml (ColdFire M54455 support): Fix target. ++ ++2008-09-25 Sandra Loosemore ++ ++ Issue #3208 ++ ++ * release-notes-csl.xml (Half-precision floating point): New note. ++ ++2008-09-25 Paul Brook ++ ++ gcc/ ++ * config/arm/fp16.c (__gnu_f2h_ieee, __gnu_h2f_ieee): Enable on ++ ARMv6-M. ++ * config/arm/t-bpabi (LIB2FUNCS_EXTRA): Remove fp16.c. ++ (LIB2FUNCS_STATIC_EXTRA): Add fp16.c. ++ * config/arm/t-symbian (LIB2FUNCS_EXTRA): Rename... ++ (LIB2FUNCS_STATIC_EXTRA): ... to this. ++ * config/arm/t-arm-softfp: Remove HFmode conversions. ++ * config/soft-fp/extendhfsf2.c: Revert HFmode suport. ++ * config/soft-fp/truncsfhf2.c: Ditto. ++ * config/soft-fp/README: Ditto. ++ * config/soft-fp/half.h: Ditto. ++ ++2008-09-25 Sandra Loosemore ++ ++ gcc/testsuite/ ++ * gcc.dg/torture/arm-fp16-ops.h: Fix bogus tests. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops.h: Ditto. ++ ++2008-09-25 Julian Brown ++ ++ gcc/ ++ * config/arm/arm.c (arm_hard_regno_mode_ok): Allow 4-word quantities ++ in core registers. Update comment. ++ ++2008-09-25 Nathan Sidwell ++ ++ * release-notes-csl.xml: Document ice9a option. ++ ++2008-09-25 Julian Brown ++ ++ Issue #3800 ++ ++ gcc/testsuite/ ++ * gcc.target/arm/eabi1.c (__eabi_uread4, __eabi_uwrite4) ++ (__eabi_uread8, __eabi_uwrite8): Change spellings of declarations ++ to... ++ (__aeabi_uread4, __aeabi_uwrite4, __aeabi_uread8, __aeabi_uwrite8): ++ These. ++ ++2008-09-24 Paul Brook ++ ++ gcc/ ++ * config/arm/t-arm-softfp (softfp_extensions): Add hfsf. ++ (softfp_truncations): Add sfhf. ++ * config/arm/sfp-machine.h (_FP_NANFRAC_H, _FP_NANSIGN_H): Define. ++ * config/arm/fp16.c: New file. ++ * config/arm/t-bpabi (LIB2FUNCS_EXTRA): Add fp16.c. ++ * config/arm/t-symbian (LIB2FUNCS_EXTRA): Add fp16.c. ++ * config/soft-fp/extendhfsf2.c: New file. ++ * config/soft-fp/truncsfhf2.c: New file. ++ * config/soft-fp/half.h: New file. ++ * config/soft-fp/README: HFmode routines do not come from gcc. ++ ++2008-09-22 Daniel Gutson ++ Nathan Sidwell ++ Maciej W. Rozycki ++ ++ Issue #3634 ++ gcc/ ++ * config.gcc (all_defaults): Add fix-ice9a. ++ * config/mips/mips.c (mips_conditional_register_usage): Add $f30 ++ and $f31 as appropriate as fixed registers. ++ * config/mips/mips.h (OPTION_DEFAULT_SPECS): Add -mfix-ice9a ++ handling. ++ (ASM_SPEC): Likewise. ++ * config/mips/mips.md (ice9a_stallnops): New mode attribute. ++ (ice9a_round): Likewise. ++ (ice9a_length_stall): Likewise. ++ (ice9a_length_round): Likewise. ++ (ice9a_length_both): Likewise. ++ (*mul3): Change condition. ++ (*mul3_fix_ice9a): New. ++ (*madd): Change condition. ++ (*madd_ice9a): New. ++ (*msub): Change condition. ++ (*msub_ice9a): New. ++ (*nmadd): Change condition. ++ (*nmadd_fastmath): Likewise. ++ (*nmadd_ice9a): New. ++ (*nmadd_fastmath_ice9a): New. ++ (*nmsub): Change condition. ++ (*nmsub_fastmath): Likewise. ++ (*nmsub_ice9a): New. ++ (*nmsub_fastmath_ice9a): Likewise. ++ (*recip3): Change condition and definition. Move the SB1 ++ fix to... ++ (*recip3_fix_sb1): ... this new pattern. ++ (*recip3_fix_ice9a): New. ++ (sqrt2): Change from define_insn to define_expand. Move ++ the SB1 fix to... ++ (*sqrt2): New. ++ (*sqrt2_fix_sb1): ... this new pattern. ++ (*sqrt2_fix_ice9a): New. ++ (*rsqrta): Change condition and definition. Move the SB1 ++ fix to... ++ (*rsqrta_fix_sb1): ... this new pattern. ++ (*rsqrta_fix_ice9a): New. ++ (*rsqrtb): Likewise *rsqrta. ++ (*rsqrtb_fix_sb1): Likewise *rsqrta_fix_sb1. ++ (*rsqrtb_fix_ice9a): New. ++ * config/mips/mips.opt (mfix-ice9a): New option. ++ * doc/invoke.texi (-mno-fix-ice9a): New option. ++ (-mfix-ice9a): Likewise. ++ ++ gcc/testsuite/ ++ * gcc.target/mips/fix-ice9a.h: New file. ++ * gcc.target/mips/fix-ice9a-1.c: Likewise. ++ * gcc.target/mips/fix-ice9a-2.c: Likewise. ++ ++2008-09-23 Sandra Loosemore ++ ++ Issue #3208 ++ ++ gcc/ ++ * config/arm/arm.c (arm_init_libfuncs): Add NULL entries for ++ HFmode arithmetic functions. ++ (arm_override_options): Call sorry for fp16 and no ldrh. ++ (arm_legitimate_index_p): Treat HFmode like HImode. ++ (coproc_secondary_reload_class): Special-case HFmode. ++ * config/arm/arm.md (floatsihf2): Use emit_move_insn. ++ (floatdihf2): Likewise. ++ (truncdfhf2): Likewise. ++ (*thumb1_movhf): Fix backwards operands to strh. ++ ++2008-09-23 Sandra Loosemore ++ ++ Issue #3208 ++ ++ gcc/testsuite/ ++ * gcc.target/arm/fp16-compile-alt-10.c: Add -std=gnu99 to options. ++ * gcc.target/arm/fp16-compile-alt-11.c: Likewise. ++ * gcc.target/arm/fp16-compile-ieee-10.c: Likewise. ++ * gcc.target/arm/fp16-compile-ieee-11.c: Likewise. ++ * gcc.target/arm/fp16-compile-exprtype.c: New. ++ * gcc.target/arm/fp16-builtins-1.c: New. ++ * gcc.target/arm/fp16-unprototyped-1.c: New. ++ * gcc.target/arm/fp16-unprototyped-2.c: New. ++ * gcc.target/arm/fp16-variadic-1.c: New. ++ * gcc.target/arm/fp16-rounding-alt-1.c: New. ++ * gcc.target/arm/fp16-rounding-ieee-1.c: New. ++ * gcc.dg/torture/arm-fp16-int-convert-alt.c: New. ++ * gcc.dg/torture/arm-fp16-int-convert-ieee.c: New. ++ * gcc.dg/torture/arm-fp16-ops.h: New. ++ * gcc.dg/torture/arm-fp16-ops-1.c: New. ++ * gcc.dg/torture/arm-fp16-ops-2.c: New. ++ * gcc.dg/torture/arm-fp16-ops-3.c: New. ++ * gcc.dg/torture/arm-fp16-ops-4.c: New. ++ * gcc.dg/torture/arm-fp16-ops-5.c: New. ++ * gcc.dg/torture/arm-fp16-ops-6.c: New. ++ * gcc.dg/torture/arm-fp16-ops-7.c: New. ++ * gcc.dg/torture/arm-fp16-ops-8.c: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops.h: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-1.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-2.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-3.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-4.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-5.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-6.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-7.C: New. ++ * g++.dg/ext/arm-fp16/arm-fp16-ops-8.C: New. ++ ++2008-09-23 Sandra Loosemore ++ ++ gcc/ ++ * optabs.c (prepare_float_lib_cmp): Test that the comparison, ++ swapped, and reversed optabs exist before trying to use them. ++ ++2008-09-23 Julian Brown ++ ++ gcc/ ++ * config/arm/arm.c (arm_override_options): Override alignments if ++ tuning for Cortex-A8. ++ (create_fix_barrier, arm_reorg): If aligning to jumps or loops, ++ make labels have a size. ++ * config/arm/arm.md (VUNSPEC_ALIGN16, VUNSPEC_ALIGN32): New constants. ++ (align_16, align_32): New patterns. ++ ++2008-09-23 Julian Brown ++ ++ gcc/ ++ * config/arm/vfp.md (*arm_movsi_vfp, *thumb2_movsi_vfp) ++ (*arm_movdi_vfp, *thumb2_movdi_vfp, *movsf_vfp, *thumb2_movsf_vfp) ++ (*movdf_vfp, *thumb2_movdf_vfp, *movsfcc_vfp, *thumb2_movsfcc_vfp) ++ (*movdfcc_vfp, *thumb2_movdfcc_vfp): Add neon_type. ++ * config/arm/arm.md (neon_type): Update comment. ++ ++2008-09-23 Julian Brown ++ ++ gcc/ ++ * config/arm/arm.md (movsi): Don't split symbol refs here. ++ (define_split): New. ++ ++2008-09-22 Maxim Kuvyrkov ++ Paul Brook ++ ++ gcc/ ++ * config/m68k/lb1sf68.asm: Add GNU-stack annotation to avoid ++ executable stack. ++ ++2008-09-18 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-09-17 Joseph Myers ++ * expr.c (emit_group_store): Do not shift before moving via a ++ stack slot. ++ ++ 2008-08-13 H.J. Lu ++ PR middle-end/36701 ++ * expr.c (emit_group_store): Allocate stack temp with the ++ largest alignment when copying from register to stack. ++ ++ 2008-09-02 H.J. Lu ++ * expr.c (emit_group_store): Don't assert stack temp mode size. ++ ++2008-09-15 Joseph Myers ++ ++ gcc/ ++ * config/mips-octeon-elf.h (TARGET_OS_CPP_BUILTINS): Remove. ++ ++2008-09-11 Mark Mitchell ++ ++ Issue #3606 ++ * release-notes-csl.xml: Document dllexport fix. ++ ++ gcc/ ++ * tree.c (handle_dll_attribute): Mark dllexport'd inlines as ++ non-external. ++ gcc/cp ++ * decl2.c (decl_needed_p): Consider dllexport'd functions needed. ++ * semantics.c (expand_or_defer_fn): Similarly. ++ gcc/testsuite/ ++ * gcc.dg/dll-6.c: New test. ++ * gcc.dg/dll-6a.c: Likewise. ++ * gcc.dg/dll-7.c: Likewise. ++ * gcc.dg/dll-7a.c: Likewise. ++ * g++.dg/ext/dllexport2.C: Likewise. ++ * g++.dg/ext/dllexport2a.cc: Likewise. ++ ++2008-09-12 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/testsuite/ ++ 2008-08-25 Janis Johnson ++ * gcc.dg/Wstrict-aliasing-bogus-ref-all-2.c: Ignore a warning. ++ ++2008-09-11 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-09-11 Joseph Myers ++ * gcc.dg/builtins-8.c: Condition cbrt test on HAVE_C99_RUNTIME. ++ ++ 2008-09-11 Joseph Myers ++ * gcc.target/i386/sse5-haddX.c, gcc.target/i386/sse5-hsubX.c: ++ Avoid intN_t types. ++ ++ 2008-09-11 Joseph Myers ++ * lib/compat.exp, gcc.dg/compat/struct-layout-1.exp, ++ g++.dg/compat/struct-layout-1.exp: Use .exe extension for compat ++ test executables. ++ * gcc.dg/compat/struct-layout-1_generate.c, ++ g++.dg/compat/struct-layout-1_generate.c: Convert backslash to ++ slash in srcdir for dg-options string. ++ ++2008-09-11 Nathan Sidwell ++ ++ gcc/ ++ * config.gcc (mips*-sde-elf*): Always apply sdemtk parts. Apply ++ t-sdelib only when not building newlib. ++ * config/mips/t-sdemtk: Move sdelib specific pieces to ... ++ * config/mips/t-sdelib: ... here. New file. ++ ++2008-09-10 Daniel Jacobowitz ++ ++ Issue #3406 ++ * release-notes-csl.xml: Document -fpie fix. ++ ++ gcc/ ++ * config/mips/linux.h (SUBTARGET_ASM_SPEC): Add -fpie and -fPIE. ++ * config/mips/linux64.h (SUBTARGET_ASM_SPEC): Likewise. ++ ++2008-09-09 Sandra Loosemore ++ ++ Issue #3208 ++ ++ gcc/testsuite/ ++ * gcc.target/arm/fp16-compile-alt-1.c: New. ++ * gcc.target/arm/fp16-compile-alt-2.c: New. ++ * gcc.target/arm/fp16-compile-alt-3.c: New. ++ * gcc.target/arm/fp16-compile-alt-4.c: New. ++ * gcc.target/arm/fp16-compile-alt-5.c: New. ++ * gcc.target/arm/fp16-compile-alt-6.c: New. ++ * gcc.target/arm/fp16-compile-alt-7.c: New. ++ * gcc.target/arm/fp16-compile-alt-8.c: New. ++ * gcc.target/arm/fp16-compile-alt-9.c: New. ++ * gcc.target/arm/fp16-compile-alt-10.c: New. ++ * gcc.target/arm/fp16-compile-alt-11.c: New. ++ * gcc.target/arm/fp16-compile-ieee-1.c: New. ++ * gcc.target/arm/fp16-compile-ieee-2.c: New. ++ * gcc.target/arm/fp16-compile-ieee-3.c: New. ++ * gcc.target/arm/fp16-compile-ieee-4.c: New. ++ * gcc.target/arm/fp16-compile-ieee-5.c: New. ++ * gcc.target/arm/fp16-compile-ieee-6.c: New. ++ * gcc.target/arm/fp16-compile-ieee-7.c: New. ++ * gcc.target/arm/fp16-compile-ieee-8.c: New. ++ * gcc.target/arm/fp16-compile-ieee-9.c: New. ++ * gcc.target/arm/fp16-compile-ieee-10.c: New. ++ * gcc.target/arm/fp16-compile-ieee-11.c: New. ++ * gcc.target/arm/fp16-compile-none-1.c: New. ++ * gcc.target/arm/fp16-param-1.c: New. ++ * gcc.target/arm/fp16-return-1.c: New. ++ * gcc.target/arm/fp16-compile-vcvt.c: New. ++ * gcc.dg/torture/arm-fp16-compile-assign.c: New. ++ * gcc.dg/torture/arm-fp16-compile-convert.c: New. ++ * g++.dg/ext/arm-fp16/fp16-overload-1.C: New. ++ * g++.dg/ext/arm-fp16/fp16-return-1.C: New. ++ * g++.dg/ext/arm-fp16/fp16-param-1.C: New. ++ * g++.dg/ext/arm-fp16/fp16-mangle-1.C: New. ++ ++2008-09-09 Sandra Loosemore ++ ++ Issue #3208 ++ ++ gcc/ ++ * doc/tm.texi (Misc): Document TARGET_INVALID_PARAMETER_TYPE, ++ TARGET_INVALID_RETURN_TYPE, TARGET_PROMOTED_TYPE, and ++ TARGET_CONVERT_TO_TYPE. ++ * doc/invoke.texi (Option Summary): List -mfp16-format. ++ (ARM Options): List neon-fp16 as -mfpu value. Document -mfp16-format. ++ * hooks.c (hook_tree_const_tree_null): Define. ++ * hooks.h (hook_tree_const_tree_null): Declare. ++ * target.h (struct gcc_target): Add invalid_parameter_type, ++ invalid_return_type, promoted_type, and convert_to_type fields. ++ * target-def.h: (TARGET_INVALID_PARAMETER_TYPE): Define. ++ (TARGET_INVALID_RETURN_TYPE): Define. ++ (TARGET_PROMOTED_TYPE): Define. ++ (TARGET_CONVERT_TO_TYPE): Define. ++ (TARGET_INITIALIZER): Update for new fields. ++ * fold-const.c (fold_convert_const_real_from_real): Check for ++ overflow. ++ * real.c (encode_ieee_half): Define. ++ (decode_ieee_half): Define. ++ (ieee_half_format): Define. ++ (arm_half_format): Define. ++ * real.h (ieee_half_format): Declare. ++ (arm_half_format): Declare. ++ * c-decl.c (grokdeclarator): Check targetm.invalid_return_type. ++ (grokparms): Check targetm.invalid_parameter_type. ++ * c-typeck.c (default_conversion): Check targetm.promoted_type. ++ * c-convert.c (convert): Check targetm.convert_to_type. ++ * cp/typeck.c (default_conversion): Check targetm.promoted_type. ++ * cp/decl.c (grokdeclarator): Check targetm.invalid_return_type. ++ (grokparms): Check targetm.invalid_parameter_type. ++ * cp/cvt.c (ocp_convert): Check targetm.convert_to_type. ++ (build_expr_type_conversion): Check targetm.promoted_type. ++ * config/arm/arm.c: Include intl.h. ++ (TARGET_INVALID_PARAMETER_TYPE): Redefine. ++ (TARGET_INVALID_RETURN_TYPE): Redefine. ++ (TARGET_PROMOTED_TYPE): Redefine. ++ (TARGET_CONVERT_TO_TYPE): Redefine. ++ (arm_fp16_format): Define. ++ (all_fpus): Add entry for neon-fp16. ++ (fp_model_for_fpu): Likewise. ++ (struct fp16_format): Declare. ++ (all_fp16_formats): Define. ++ (arm_init_libfuncs): Add entries for HFmode conversions. ++ (arm_override_options): Set arm_fp16_format. ++ (thumb1_legitimate_address_p): Make it recognize HFmode constants. ++ (arm_print_operand): Add 'z' specifier for vld1.16/vst1.16. ++ (arm_hard_regno_mode_ok): Allow HFmode values in VFP registers. ++ (arm_init_fp16_builtins): New. ++ (arm_init_builtins): Call it. ++ (arm_invalid_parameter_type): New. ++ (arm_invalid_return_type): New. ++ (arm_promoted_type): New. ++ (arm_convert_to_type). ++ (arm_file_start): Deal with neon-fp16 as fpu_name. Emit tag for fp16 ++ format. ++ (arm_mangle_type): Mangle __fp16 as "Dh". ++ * config/arm/arm.h (TARGET_VFPD32): Make it know about ++ FPUTYPE_NEON_FP16. ++ (TARGET_NEON_FP16): New. ++ (TARGET_NEON): Make it know about FPUTYPE_NEON_FP16. ++ (enum fputype): Add FPUTYPE_NEON_FP16. ++ (enum arm_fp16_format_type): Declare. ++ (arm_fp16_format): Declare. ++ (LARGEST_EXPONENT_IS_NORMAL): Define. ++ * config/arm/arm-modes.def (HFmode): Define. ++ * config/arm/vfp.md: (*movhf_vfp): New. ++ (extendhfsf2): New. ++ (truncsfhf2): New. ++ * config/arm/arm.opt (mfp16-format=): New. ++ * config/arm/arm.md: (fpu): Add neon_fp16. ++ (floatsihf2, floatdihf2): New. ++ (fix_trunchfsi2, fix_trunchfdi2): New. ++ (truncdfhf2): New. ++ (extendhfdf2): New. ++ (movhf): New. ++ (*arm32_movhf): New. ++ (*thumb1_movhf): New. ++ (consttable_2): Handle HFmode constants. ++ ++ libiberty/ ++ * cp-demangle.c (cplus_demangle_builtin_Dh_type): Declare. ++ (cplus_demangle_type): Make it handle "Dh". ++ ++2008-09-09 Sandra Loosemore ++ ++ Issue #3732 ++ ++ gcc/ ++ * doc/invoke.texi (ARM Options): Correct errors in discussion ++ of -mfloat-abi, -mhard-float, and -msoft-float. ++ ++2008-09-09 Kazu Hirata ++ ++ gcc/ ++ * config.gcc (mips-sgi-irix[56]*, mips*-*-netbsd*, ++ mips*-*-openbsd*, mips*-sde-elf*, mips64octeon*-wrs-elf*, ++ mipsisa64-*-elf*, mipsisa64el-*-elf*, mipsisa64sr71k-*-elf*, ++ mipsisa64sb1-*-elf*, mipsisa64sb1el-*-elf*, mips-*-elf*, ++ mipsel-*-elf*, mips64-*-elf*, mips64el-*-elf*, mips64vr-*-elf*, ++ mips64vrel-*-elf*, mips64orion-*-elf*, mips64orionel-*-elf*, ++ mips*-*-rtems*, mips-wrs-vxworks, mips-wrs-windiss, ++ mipstx39-*-elf*, mipstx39el-*-elf*): Don't add t-crtfm to ++ tmake_file. ++ ++ libgcc/ ++ * config.host (mips-sgi-irix[56]*, mips*-*-netbsd*, ++ mips*-*-openbsd*, mipsisa32-*-elf*, mipsisa32el-*-elf*, ++ mipsisa32r2-*-elf*, mipsisa32r2el-*-elf*, mipsisa64-*-elf*, ++ mipsisa64el-*-elf*, mipsisa64sr71k-*-elf*, mipsisa64sb1-*-elf*, ++ mipsisa64sb1el-*-elf*, mips-*-elf*, mipsel-*-elf*, mips64-*-elf*, ++ mips64el-*-elf*, mips64vr-*-elf*, mips64vrel-*-elf*, ++ mips64orion-*-elf*, mips64orionel-*-elf*, mips64octeon-wrs-elf*, ++ mips64octeonel-wrs-elf*, mips*-*-rtems*, mips-wrs-vxworks, ++ mips-wrs-windiss, mipstx39-*-elf*, mipstx39el-*-elf*): Remove ++ extra_parts and tmake_file. ++ ++2008-09-08 Daniel Jacobowitz ++ ++ * release-notes-csl.xml: Document exception handler fix. ++ ++ gcc/ ++ * config/arm/unwind-arm.c (__gnu_unwind_pr_common): Correct test ++ for barrier handlers. ++ ++2008-09-08 Daniel Jacobowitz ++ Mark Mitchell ++ ++ gcc/testsuite/ ++ * g++.dg/compat/eh/filter2_x.C: Declare abort. ++ * g++.dg/compat/eh/new1_x.C, g++.dg/compat/eh/new1_y.C: Include ++ cstddef and use std::size_t. ++ ++ * gcc.dg/compat/compat-common.h: Define SKIP_COMPLEX_INT if ++ SKIP_COMPLEX. Honor SKIP_COMPLEX. ++ * gcc.dg/compat/scalar-by-value-3_x.c, ++ gcc.dg/compat/scalar-by-value-3_y.c, ++ gcc.dg/compat/scalar-by-value-4_x.c, ++ gcc.dg/compat/scalar-by-value-4_y.c, ++ gcc.dg/compat/scalar-by-value-5.c, ++ gcc.dg/compat/scalar-by-value-5_main.c, ++ gcc.dg/compat/scalar-by-value-6.c, ++ gcc.dg/compat/scalar-by-value-6_main.c, ++ gcc.dg/compat/scalar-by-value-6_x.c, ++ gcc.dg/compat/scalar-by-value-6_y.c, ++ gcc.dg/compat/struct-by-value-16_x.c, ++ gcc.dg/compat/struct-by-value-16_y.c, ++ gcc.dg/compat/struct-by-value-17_x.c, ++ gcc.dg/compat/struct-by-value-17_y.c, ++ gcc.dg/compat/struct-by-value-18_x.c, ++ gcc.dg/compat/struct-by-value-18_y.c, ++ gcc.dg/compat/struct-layout-1.h, ++ gcc.dg/compat/scalar-return-3_x.c, ++ gcc.dg/compat/scalar-return-3_y.c, ++ gcc.dg/compat/scalar-return-4_x.c, ++ gcc.dg/compat/scalar-return-4_y.c: Honor SKIP_COMPLEX. ++ ++ * gcc.dg/compat/scalar-by-value-y.h: Use stdarg.h for non-GCC ++ compilers. ++ ++ * gcc.dg/compat/struct-by-value-22_y.c, ++ gcc.dg/compat/struct-by-value-22_main.c, ++ gcc.dg/compat/struct-by-value-22_x.c: Honor SKIP_VLA_IN_STRUCT. ++ ++ * lib/c-compat.exp (compat_setup_dfp): Check the compiler under test ++ first. ++ * lib/compat.exp: Document COMPLEX and VLA_IN_STRUCT skips. ++ ++2008-09-08 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.md (arm_addsi3): Add r/r/k alternative. ++ ++2008-09-08 Kazu Hirata ++ ++ gcc/ ++ * config.gcc (mips-sgi-irix[56]*, mips*-*-netbsd*, mips*-*-linux*, ++ mips*-sde-elf*, mips64octeon*-wrs-elf*, mipsisa32r2*, ++ mipsisa64sr71k-*-elf*, mipsisa64sb1*, mips64vr*, mips64orion*, ++ mips*-*-rtems*, mips-wrs-vxworks, mips-wrs-windiss, mipstx39): Add ++ mips/t-crtfm to tmake_file. ++ ++ libgcc/ ++ * config.host (mips*): Add mips/t-crtfm to tmake_file. Add ++ crtfastmath.o to extra_parts. ++ * config/mips/t-crtfm: New. ++ ++2008-09-07 Maxim Kuvyrkov ++ ++ gcc/testsuite/ ++ * gcc.gd/struct/wo_prof_global_var.c: Use uninitialized integer ++ values instead of uninitialized FP values to avoid NaNs. ++ * gcc.dg/struct/wo_prof_local_var.c: Same. ++ ++2008-09-07 Maxim Kuvyrkov ++ ++ gcc/ ++ * config/m68k/m68k.c (sched_attr_op_type): Handle all CONSTs. ++ ++ gcc/testsuite/ ++ * gcc.target/m68k/xgot-1.c (dg-options): Add -O2. ++ ++2008-09-06 Joseph Myers ++ ++ gcc/ ++ * combine.c (simplify_set): Avoid calling LOAD_EXTEND_OP on ++ non-integer modes. ++ ++2008-09-05 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-09-05 Joseph Myers ++ * config/mips/mips.h (enum reg_class): Add FRAME_REGS. ++ (REG_CLASS_NAMES): Update. ++ (REG_CLASS_CONTENTS): Update. ++ * config/mips/mips.c (mips_regno_to_class): Use FRAME_REGS instead ++ of ALL_REGS for regs 77 and 78. ++ * function.c (instantiate_virtual_regs_in_insn): Assert that ++ return value of simplify_gen_subreg is not NULL. ++ ++ gcc/testsuite/ ++ 2008-09-05 Joseph Myers ++ * gcc.c-torture/compile/20080903-1.c: New test. ++ ++2008-09-04 Nathan Sidwell ++ ++ Issue 3304 ++ gcc/ ++ * config/arm/arm.c (arm_print_operand): Deal with HIGH. ++ * config/arm/constraints.md (j): New constraint for movw operands. ++ (N): Remove thumb2 meaning. ++ * config/arm/arm.md (*arm_movw): Delete. ++ (*arm_movsi_insn): Use j constraint for movw instead of N constraint. ++ * config/arm/vfp.md (*arm_movsi_vfp, *thumb2_movsi_vfp): Likewise. ++ * config/arm/thumb2.md (*thumb2_movsi_insn): Likewise. ++ ++2008-09-04 Julian Brown ++ ++ gcc/ ++ * Makefile.in (CSL_LICENSELIB): Remove space after -L to appease ++ Darwin ld. ++ ++2008-09-04 Nathan Sidwell ++ ++ gcc/ ++ * config/arm/bpabi.h (LINK_SPEC): Add --fix-janus-2cc if needed. ++ ++ * release-notes-csl.xml: Adjust janus-2cc note. ++ ++2008-09-03 Nathan Froyd ++ ++ libgomp/ ++ * libgomp.texi (Library Index): Renamed from "Index" to prevent ++ conflict with index.html on case-insensitive file systems. ++ ++2008-09-03 Julian Brown ++ ++ * release-notes-csl.xml (NEON improvements): Add release note. ++ ++2008-09-02 Joseph Myers ++ ++ gcc/testsuite/ ++ * g++.dg/abi/arm_va_list.C: Correct order of dg-do and ++ dg-require-effective-target directives. ++ ++2008-09-02 Mark Mitchell ++ ++ gcc/testsuite/ ++ * gcc.target/arm/long-calls-1.c: Tolerate the lack of sibling ++ calls and/or PLT markers. ++ * gcc.target/arm/long-calls-2.c: Tolerate the lack of sibling ++ calls and/or PLT markers. ++ * gcc.target/arm/long-calls-3.c: Tolerate the lack of sibling ++ calls and/or PLT markers. ++ * gcc.target/arm/long-calls-4.c: Tolerate the lack of sibling ++ calls and/or PLT markers. ++ ++2008-09-01 Mark Mitchell ++ ++ Backport: ++ 2008-09-01 Mark Mitchell ++ * include/std/type_traits (__make_unsigned_selector<>): Consider ++ enums of size smaller than short. ++ (__make_signed_selector<>): Likewise. ++ * testsuite/20_util/make_signed/requirements/typedefs_neg.cc: ++ Adjust line numbers. ++ * testsuite/20_util/make_usigned/requirements/typedefs_neg.cc: ++ Adjust line numbers. ++ * testsuite/20_util/make_signed/requirements/typedefs-2.cc: ++ Ensure test_enum is the same size as short. ++ * testsuite/20_util/make_unsigned/requirements/typedefs-2.cc: ++ Ensure test_enum is the same size as short. ++ ++2008-09-01 Joseph Myers ++ ++ * release-notes-csl.xml: Avoid line containing only whitespace. ++ ++2008-08-27 Daniel Gutson ++ ++ Janus 2CC ARM shift fix: ++ gcc/ ++ * config/arm/arm.md (*addsi3_carryin_shift): Added "length" clause ++ to handle the extra NOP. ++ (andsi_not_shiftsi_si): Likewise. ++ (*thumb1_ashlsi3): Likewise. ++ (*thumb1_ashrsi3): Likewise. ++ (*thumb1_lshrsi3): Likewise. ++ (*thumb1_rotrsi3): Likewise. ++ (*arm_shiftsi3): Likewise. ++ (*shiftsi3_compare0): Likewise. ++ (*shiftsi3_compare0_scratch): Likewise. ++ (*arm_notsi_shiftsi): Likewise. ++ (*arm_notsi_shiftsi_compare0): Likewise. ++ (*arm_not_shiftsi_compare0_scratch): Likewise. ++ (*arm_cmpsi_shiftsi): Likewise. ++ (*arm_cmpsi_shiftsi_swp): Likewise. ++ (*arm_cmpsi_negshiftsi_si): Likewise. ++ (*arith_shiftsi): Likewise. ++ (*arith_shiftsi_compare0): Likewise. ++ (*arith_shiftsi_compare0_scratch): Likewise. ++ (*sub_shiftsi): Likewise. ++ (*sub_shiftsi_compare0): Likewise. ++ (*sub_shiftsi_compare0_scratch): Likewise. ++ (*if_shift_move): Likewise. ++ (*if_move_shift): Likewise. ++ (*if_shift_shift): Likewise. ++ (*thumb1_ashlsi3_janus2): New. Duplicated pattern to handle the ++ extra NOP. ++ (*thumb1_ashrsi3_janus2): Likewise. ++ (*thumb1_lshrsi3_janus2): Likewise. ++ (*thumb1_rotrsi3_janus2): Likewise. ++ * config/arm/arm.c (arm_print_operand): Added the nop after the %S ++ pattern. ++ (arm_override_options): Added handling of the -mfix-janus-2cc flag. ++ * config/arm/arm.h (janus2_code): Declare. ++ * config/arm/arm.opt (-mfix-janus-2cc): New. ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_arm_no_thumb): ++ New function. ++ * gcc.target/arm/janus-2cc-shift-1.c: New. ++ * gcc.target/arm/janus-2cc-shift-2.c: New. ++ ++ * release-notes-csl.xml: Document. ++ ++2008-08-31 Mark Mitchell ++ ++ gcc/ ++ * gcc.target/arm/va_list.c: Return zero on success. ++ ++ * release-notes-csl.xml: Update note for va_list change. ++ ++2008-08-30 Mark Mitchell ++ ++ libstdc++-v3/ ++ * testsuite/25_algorithms/nth_element/2.cc: Constrain iterations ++ when testing on a simultor. ++ ++2008-08-29 Mark Mitchell ++ ++ * release-notes-csl.xml: Update note for NEON mangling. ++ ++ Issue #3579 ++ gcc/ ++ * config/arm/arm.c (arm_build_builtin_va_list): New function. ++ (arm_extract_valist_ptr): Likewise. ++ (arm_expand_builtin_va_start): Likewise. ++ (arm_gimplify_va_arg_expr): Likewise. ++ (TARGET_BUILD_BUILTIN_VA_LIST): Define. ++ (TARGET_EXPAND_BUILTIN_VA_START): Likewise. ++ (TARGET_GIMPLIFY_VA_VARG_EXPR): Likewise. ++ (arm_mangle_type): Handle __va_list specially. ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_arm_eabi): New ++ function. ++ * gcc.target/arm/va_list.c: New test. ++ * g++.dg/abi/arm_va_list.C: Likewise. ++ ++2008-08-29 Mark Mitchell ++ ++ gcc/cp/ ++ * mangle.c (write_type): Add target-specific manglings for ++ non-fundamental types to the substitution table. ++ gcc/testsuite/ ++ * g++.dg/abi/mangle-neon.C: Add substitution test. ++ ++ * release-notes-csl.xml: Document change. ++ ++2008-08-29 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-04-09 Andy Hutchinson ++ PR testsuite/34894 ++ PR testsuite/33782 ++ * lib/target-supports.dg: Add check_effective_target_trampolines. ++ Disable profiling for avr-*-*. ++ * gcc.c-torture/compile/pr27889.c: dg-requires trampolines. ++ * gcc.c-torture/compile/nested-1.c: Ditto. ++ * gcc.c-torture/compile/20050122-2.c: Ditto. ++ * gcc.c-torture/compile/20010226-1.c: Ditto. ++ * gcc.c-torture/compile/20010327-1.c: Skip for avr-*-*. ++ * gcc.c-torture/compile/980506-1.c: Ditto. ++ * gcc.c-torture/compile/20020604-1.c: Ditto. ++ * gcc.c-torture/compile/limits-stringlit.c: Ditto ++ * gcc.c-torture/compile/20001226-1.c: Ditto ++ ++ 2008-05-12 Andy Hutchinson ++ * gcc.dg/pr34457-1.c: Skip for target without trampolines. ++ * gcc.dg/20050607-1.c: Ditto. ++ * gcc.dg/trampoline-1.c: Ditto. ++ * gcc.dg/debug/debug-3.c: Ditto. ++ * gcc.dg/debug/debug-5.c: Ditto. ++ ++2008-08-29 Mark Mitchell ++ ++ gcc/testsuite/ ++ * gcc.dg/vect/vect-105.c: Prevent compiler from hoisting abort out ++ of loop. ++ ++2008-08-28 Mark Mitchell ++ ++ gcc/testsuite/ ++ * gcc.dg/struct/wo_prof_single_str_global.c: Mask return value. ++ * gcc.dg/struct/wo_prof_single_str_local.c: Mask return value. ++ * gcc.dg/struct/wo_prof_single_str_pointer.c: Mask return value. ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-04-22 Steve Ellcey ++ * gcc.dg/struct/wo_prof_global_var.c: Initialize array. ++ * gcc.dg/struct/wo_prof_malloc_size_var.c: Ditto. ++ * gcc.dg/struct/w_prof_local_var.c: Ditto. ++ * gcc.dg/struct/w_prof_global_var.c: Ditto. ++ * gcc.dg/struct/wo_prof_local_var.c: Ditto. ++ ++2008-08-28 Mark Mitchell ++ ++ gcc/cp ++ * decl.c (maybe_deduce_size_from_array_init): Use relayout_decl. ++ gcc/testsuite/ ++ * g++.dg/cpp/_Pragma1.C: Skip on arm*-*-eabi*. ++ * g++.dg/ext/visibility/arm1.C: Require DLL targets. ++ * g++.dg/init/ref15.C: Require unwrapped targets. ++ ++2008-08-28 Paul Brook ++ ++ Merge from Sourcery G++ 4.2: ++ gcc/ ++ * config/arm/neon.md (neon_type): Move to arm.md. ++ (neon_mov): Add neon_type attribute. ++ (movmisalign): Ditto. ++ * config/arm/arm.md (neon_type): Move to here. ++ (conds): Add "unconditioal" and use as default for NEON insns. ++ ++ gcc/testsuite/ ++ * gcc.target/arm/neon-cond-1.c: New test. ++ ++2008-08-27 Nathan Froyd ++ ++ libgomp/ ++ * Makefile.am: Use install-data-local for install-html and ++ install-pdf. ++ * Makefile.in: Regenerate. ++ ++2008-08-26 Maxim Kuvyrkov ++ ++ Port not-reviewed patch from gcc-patches@. ++ ++ gcc/ ++ 200x-xx-xx Roman Zippel ++ PR middle-end/29474 ++ * gcc/recog.c (validate_replace_rtx_1): Prevent swap of ++ commutative operands during reload ++ ++ ++2008-08-26 Maxim Kuvyrkov ++ ++ gcc/ ++ * config/m68k/m68k.md (cmpdi): Use (scratch) instead of pseudo. ++ ++2008-08-25 Nathan Froyd ++ ++ Issue #3604 ++ ++ * release-notes-csl.xml (-msim build fix): New. ++ ++ gcc/ ++ * config/rs6000/sysv4.h (LIB_SIM_SPEC): Use LIB_DEFAULT_SPEC. ++ (STARTFILE_SIM_SPEC): Remove sim-crt0.o%s. ++ (ENDFILE_SIM_SPEC): Add -Tsim-hosted.ld. ++ (LINK_OS_SIM_SPEC): Define to empty. ++ ++2008-08-23 Nathan Froyd ++ ++ * release-notes-csl.xml (OpenMP support): New. ++ ++2008-08-21 Nathan Sidwell ++ ++ gcc/ ++ * config/m68k/m68k-devices.def (52274, 52277): New devices. ++ ++ * release-notes-csl.xml: Document addition of DragonFire0. ++ ++2008-08-21 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-08-21 Joseph Myers ++ * g++.dg/opt/anchor1.C (foo): Return the return value of ++ ycf->ascent. ++ ++2008-08-21 Nathan Froyd ++ ++ Backport from mainline: ++ ++ libgomp/ ++ 2008-08-21 Nathan Froyd ++ * testsuite/libgomp.exp (libgomp_init): Only set things that ++ depend on blddir if blddir exists. ++ (libgomp_target_compile): Likewise. ++ * testsuite/libgomp.c++/c++.exp: Likewise. ++ * testsuite/libgomp.fortran/fortran.exp: Likewise. ++ ++2008-08-20 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-08-20 Joseph Myers ++ PR target/31070 ++ * config/sparc/sparc.c (function_arg_slotno): Handle structure ++ with MODE_VECTOR_INT mode. ++ ++2008-08-19 Joseph Myers ++ ++ * release-notes-csl.xml (Target architecture defaults to i686): ++ Add new release note. ++ ++2008-08-19 Joseph Myers ++ ++ * release-notes-csl.xml: Update release note for upgrade to refer ++ to version 4.3.2. ++ ++2008-08-19 Kazu Hirata ++ ++ Issue 3422 ++ gcc/ ++ * config.gcc (mips64*-*-linux*, mips-*-elf*, mipsel-*-elf*, ++ mips64-*-elf*, mips64el-*-elf*): Add mips/t-crtfm. ++ * config/mips/crtfastmath.c: New. ++ * config/mips/linux.h (ENDFILE_SPEC): New. ++ * config/mips/linux64.h (ENDFILE_SPEC): New. ++ * config/mips/t-crtfm: New. ++ ++ * release-notes-csl.xml: Add a release note for the new FPU ++ defaults on mips64el-sicortex-linux-gnu ++ ++2008-08-18 Nathan Froyd ++ ++ gcc/testuite/ ++ * gcc.dg/pr34856.c: Fix thinko ++ ++2008-08-18 Nathan Froyd ++ ++ libgomp/ ++ * Makefile.am (datarootdir, docdir, htmldir, pdfdir): Define. ++ (HTMLS_INSTALL, HTMLS_BUILD): Define. ++ ($(HTMLS_BUILD)): New rule. ++ (html__strip_dir): Define. ++ (install-data-am): Add install-html and install-pdf prerequsites. ++ (install-html): Add actions. ++ (TEXI2HTML): Define. ++ * Makefile.in: Regenerate. ++ * configure.ac (datarootdir, docdir, htmldir, pdfdir): Add ++ appropriate --with options and AC_SUBSTs. ++ * configure: Regenerate. ++ ++2008-08-18 Nathan Froyd ++ ++ libgomp/ ++ * Makefile.am (LTLDFLAGS): Define. ++ (LINK): Define. ++ * Makefile.in: Regenerate. ++ ++2008-08-18 Nathan Froyd ++ ++ gcc/testuite/ ++ * gcc.dg/pr34856.c: Add powerpc*-eabi* exception. ++ ++2008-08-15 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-06-28 Andrew Jenner ++ * regrename.c (build_def_use): Don't copy RTX. ++ ++2008-08-13 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-08-13 Joseph Myers ++ * config/sparc/sparc.c (emit_soft_tfmode_cvt): Explicitly sign or ++ zero extend SImode values being converted to TFmode before passing ++ to libcalls. ++ ++2008-08-12 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-08-12 Nathan Froyd ++ ++ PR libgomp/26165 ++ * gcc.c (include_spec_function): Tweak call to find_a_file. ++ ++2008-08-10 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-28 Julian Brown ++ ++ Merge from MIPS: ++ ++ gcc/ ++ * Makefile.in (stmp-int-hdrs): Don't depend on ++ fixinc_list. Only ++ process fixincludes if fixinc_list is ++ present. ++ (install-mkheaders): Likewise. ++ ++ 2008-02-11 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2004-06-29 Nigel Stephens ++ ++ * Makefile.in (libgcc.mk): Make this depend on ++ $(tmake_file), in ++ case new multilib options have been defined. ++ (s-mlib): Similarly. ++ ++2008-08-07 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ * config/arm/neon.md neon_vget_lane): Adjust element indices ++ for big-endian. ++ ++2008-08-07 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-08-07 Joseph Myers ++ * config/arm/iwmmxt.md (movv8qi_internal, movv4hi_internal, ++ movv2si_internal): Combine into mov_internal. ++ (movv2si_internal_2): Remove. ++ ++2008-08-06 Catherine Moore ++ ++ gcc/ ++ * config/mips/mips.h (MIPS_ARCH_DSP_SPEC): Add missing *. ++ ++ * release-notes-csl.xml: Fix target. ++ ++2008-08-06 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-08-06 Joseph Myers ++ * jump.c (rtx_renumbered_equal_p): Do not call subreg_regno_offset ++ for unrepresentable subregs or treat them as equal to other regs ++ or subregs with the same register number. ++ ++2008-08-05 Catherine Moore ++ ++ Issue #3088 ++ gcc/ ++ * config/mips/sde.h (SUBTARGET_SELF_SPECS): Add ++ MIPS_ARCH_DSP_SPEC. ++ * config/mips/mips.h (MIPS_ARCH_DSP_SPEC): New. ++ ++ * release-notes-csl.xml: Document. ++ ++2008-08-04 Joseph Myers ++ ++ gcc/testsuite/ ++ * gcc.target/mips/mips-nonpic/nonpic-9.c (main): Call exit. ++ ++2008-07-30 Nathan Froyd ++ ++ Issue #2576 ++ ++ Backport: ++ ++ gcc/ ++ 2008-07-30 Nathan Froyd ++ ++ * config/arm/arm.c (arm_expand_prologue): Use 0-length rtvec ++ instead of NULL_RTVEC. ++ ++2008-07-28 Mark Mitchell ++ ++ Issue #466 ++ gcc/ ++ * config/arm/thumb2.md: Add 16-bit multiply instructions. ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_arm_thumb2_ok): ++ New function. ++ * gcc.target/arm/thumb2-mul-space.c: New file. ++ * gcc.target/arm/thumb2-mul-space-2.c: New file. ++ * gcc.target/arm/thumb2-mul-space-3.c: New file. ++ * gcc.target/arm/thumb2-mul-speed.c: New file. ++ ++ * release-notes-csl.xml: Document. ++ ++2008-07-29 Catherine Moore ++ Daniel Jacobowitz ++ ++ gcc/ ++ * config/mips/mips.h (ISA_HAS_BBIT): Enable for TARGET_OCTEON. ++ * config/mips/mips.md (branch_with_likely): New attribute. ++ (branch_without_likely): New attribute. ++ (define_delay): Check for new branch_likely attributes. ++ (branch_bit): Set branch_without_likely to "yes". ++ (branch_bit_truncdi): Likewise. ++ (branch_bit_inverted): Likewise. ++ (branch_bit_truncdi_inverted): Likewise. ++ ++2008-07-25 Mark Mitchell ++ ++ Issue #3433 ++ gcc/ ++ * gcc.c (SWITCHES_NEED_SPACES): Define to "o". ++ ++ * release-notes-csl.xml: Document. ++ ++2008-07-25 Joseph Myers ++ ++ gcc/ ++ * config/arm/iwmmxt.md (movv8qi_internal, movv4hi_internal, ++ movv2si_internal): Use "*" for pool_range and neg_pool_range for ++ mem = reg alternative. ++ ++2008-07-25 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-06-03 Maxim Kuvyrkov ++ * release-notes-csl.xml: Add note. ++ gcc/ ++ * config/mips/mips.c (mips_expand_prologue): Fix thinko. ++ ++ 2008-05-27 Maxim Kuvyrkov ++ -mwarn-framesize= option for MIPS. ++ * release-notes-csl.xml: Add note. ++ gcc/ ++ * doc/invoke.texi (mwarn-framesize): Document option. ++ * config/mips/mips.opt (mwarn-framesize): Add option. ++ * config/mips/mips.c (mips_warn_framesize): New static variable. ++ (mips_handle_option): Handle mwarn-framesize. ++ (mips_expand_prologue): Emit warning if frame size exceeds specified ++ value. ++ ++2008-07-24 Joseph Myers ++ ++ * config.sub: Allow mips64octeon* targets. ++ ++ NOT ASSIGNED TO FSF ++ COPYRIGHT CAVIUM ++ gcc/ ++ * config/mips/octeon-elf-unwind.h, config/mips/octeon-elf.h, ++ config/mips/octeon.h, config/mips/t-octeon-elf: New. ++ * config.gcc: Handle mips64octeon*-wrs-elf*. ++ (mips-wrs-linux-gnu): Use mips/octeon.h. ++ * config/mips/mips-protos.h (octeon_output_shared_variable): New. ++ * config/mips/mips.c (octeon_handle_cvmx_shared_attribute, ++ octeon_select_section, octeon_unique_section, ++ octeon_output_shared_variable): New. ++ (mips_attribute_table): Add cvmx_shared. ++ (mips_in_small_data_p): Check for cvmx_shared attribute. ++ * config/mips/mips.opt (mocteon-useun): Use Mask. ++ ++ libgcc/ ++ * config.host: Handle mips64octeon*-wrs-elf*. ++ ++2008-07-24 Joseph Myers ++ ++ gcc/ ++ * config/mips/mips.c (mips_expand_ins_as_unaligned_store): Restore ++ Octeon unaligned store support. ++ ++2008-07-21 Mark Mitchell ++ ++ Issue #3245 ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-07-21 Mark Mitchell ++ * config/os/gnu-linux/arm-eabi-extra.ver: New file. ++ * configure.host: Use it for arm*-*-linux-*eabi. ++ ++ * release-notes-csl.xml: Document. ++ ++2008-07-21 Joseph Myers ++ ++ gcc/ ++ * config/mips/mips.md (extzv): Avoid using dext instructions for ++ certain DImode subreg extractions. From Cavium toolchain. ++ ++2008-07-21 Nathan Froyd ++ ++ gcc/ ++ * tree-ssa-remove-local-statics.c ++ (find_static_nonvolatile_declarations): Don't check for potential ++ definitions if we're looking at a statement with a CALL_EXPR. ++ (compute_definedness_for_block): Reorganize logic. ++ ++ gcc/testsuite/ ++ * gcc.dg/remove-local-statics-13.c: New test. ++ * gcc.dg/remove-local-statics-14.c: New test. ++ ++2008-07-18 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-07-18 Joseph Myers ++ * gcc.dg/fshort-wchar.c: Use -Wl,--no-wchar-size-warning on ++ arm*-*-*eabi. ++ ++2008-07-17 Catherine Moore ++ ++ gcc/ ++ * config/mips/sde.h (TARET_MIPS_SDE): Define to 1. ++ (SUBTARGET_SELF_SPECS): Undefine before defining. ++ ++2008-07-10 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-07-10 Joseph Myers ++ PR middle-end/29056 ++ * gcc.target/powerpc/ppc-negeq0-1.c: Use long instead of int. ++ Adjust shift and scan-assembler-not pattern to allow for 64-bit ++ case. ++ ++2008-07-10 Joseph Myers ++ ++ config/ ++ * mh-mingw (LDFLAGS): Append to rather than replacing previous ++ value. ++ ++2008-07-09 Joseph Myers ++ ++ gcc/ ++ * config/mips/linux64.h (SUBTARGET_ASM_SPEC): Update for non-PIC. ++ ++2008-07-09 Joseph Myers ++ ++ gcc/ ++ * config/mips/wrs-linux.h (SUBTARGET_SELF_SPECS): Add missing ++ comma. ++ ++2008-07-09 Joseph Myers ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-07-09 Joseph Myers ++ * libsupc++/unwind-cxx.h (__is_gxx_forced_unwind_class, ++ __GXX_INIT_FORCED_UNWIND_CLASS): Define for ARM EABI unwinder. ++ * libsupc++/eh_personality.cc (PERSONALITY_FUNCTION): Call ++ __GXX_INIT_FORCED_UNWIND_CLASS for forced unwind with ARM EABI ++ unwinder. ++ * libsupc++/eh_arm.cc (__cxa_type_match): Use ++ __is_gxx_forced_unwind_class to check for forced unwind. ++ ++2008-07-09 Joseph Myers ++ ++ gcc/ ++ * config/mips/wrs-linux.h (SUBTARGET_SELF_SPECS): Add ++ NO_SHARED_SPECS. ++ ++2008-07-09 Joseph Myers ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-07-09 Joseph Myers ++ * testsuite/20_util/make_signed/requirements/typedefs-2.cc, ++ testsuite/20_util/make_unsigned/requirements/typedefs-2.cc: Use ++ -Wl,--no-enum-size-warning for arm*-*-linux*eabi. ++ ++2008-07-09 Joseph Myers ++ ++ gcc/ ++ * config/mips/mips.h (ISA_HAS_BBIT): Temporarily disable. ++ ++2008-07-09 Joseph Myers ++ ++ gcc/ ++ * config/mips/linux64.h (SUBTARGET_SELF_SPECS): Undefine before ++ redefining. ++ ++2008-07-08 Catherine Moore ++ ++ gcc/config/mips ++ xlr.md (ir_xlr_alu): Add logical, signext attributes. ++ ++2008-07-08 Nathan Froyd ++ ++ gcc/ ++ * passes.c (init_optimization_passes): Move pass_remove_local_statics ++ later in the pass order. ++ * tree-ssa-remove-local-statics.c (rls_done): Conditionally free the ++ bitmaps and NULL out bb->aux. ++ (unstaticize_variable): Deal with GIMPLE_MODIFY_STMTs instead of ++ MODIFY_EXPRs. ++ (compute_definedness_for_block): Check for defines only if we haven't ++ found a CALL_EXPR. ++ ++2008-07-07 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-07-07 Joseph Myers ++ * config/arm/aout.h (DOLLARS_IN_IDENTIFIERS): Remove. ++ ++2008-07-07 Vladimir Prus ++ ++ gcc/ ++ * gcc.c (print_sysroot): New. ++ (option_map, display_help, process_command): Handle the ++ -print-sysroot option. ++ (main): Print the sysroot if requested. ++ ++ gcc/doc/ ++ * invoke.texi (Debugging Options): Document -print-sysroot. ++ ++2008-07-03 Joseph Myers ++ ++ gcc/ ++ * config/arm/arm.c (arm_init_neon_builtins): Register built-in ++ types immediately after creating them. ++ ++2008-07-03 Joseph Myers ++ ++ gcc/ ++ * config/arm/arm.c (add_minipool_backward_ref): Check for ++ 8-byte-aligned entries in second case of forcing insertion after a ++ particular entry. Change third case to avoid inserting ++ non-8-byte-aligned entries before 8-byte-aligned ones. ++ ++2008-07-03 Joseph Myers ++ ++ gcc/ ++ * config/arm/iwmmxt.md (movv8qi_internal, movv4hi_internal, ++ movv2si_internal): Add mem = reg alternative. ++ ++2008-07-03 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-07-02 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/t-ppcgas (MULTILIB_OPTIONS): Add te500mc. ++ (MULTILIB_DIRNAMES): Likewise. ++ (MULTILIB_EXCEPTIONS): Add exception for te500mc. ++ * config/rs6000/eabi.h (NAME__MAIN, INVOKE__main): Remove. ++ (CC1_EXTRA_SPEC): Add te500mc clause. ++ (ASM_DEFAULT_SPEC): Likewise. ++ * config/rs6000/t-ppccomm (LIB2FUNS_STATIC_EXTRA): Remove eabi.S. ++ (eabi.S): Remove rule. ++ ++2008-07-03 Nathan Sidwell ++ ++ gcc/ ++ * config/m68k/t-uclinux (M68K_MLIB_CPU): Check for FL_UCLINUX. ++ * config/m68k/m68k-devices.def: Add FL_UCLINUX to 68020 and 54455 ++ multilibs. ++ * config/m68k/m68k.h (FL_UCLINUX): Define. ++ ++ * release-notes-csl.xml: Document. ++ ++2008-07-02 Joseph Myers ++ ++ gcc/ ++ * c-incpath.c: Include toplev.h. ++ (merge_include_chains): Use warning instead of cpp_error for ++ system directory poisoning diagnostic. ++ * Makefile.in (c-incpath.o): Depend on toplev.h. ++ * gcc.c (LINK_COMMAND_SPEC): Pass ++ --error-poison-system-directories if ++ -Werror=poison-system-directories. ++ ++2008-07-02 Julian Brown ++ ++ Backport from mainline: ++ ++ 2008-06-27 Mark Mitchell ++ ++ libstdc++-v3/ ++ * libsupc++/vec.cc (__aeabi_vec_dtor_cookie): Handle NULL array ++ address. ++ (__aeabi_vec_delete): Likewise. ++ (__aeabi_vec_delete3): Likewise. ++ (__aeabi_vec_delete3_nodtor): Likewise. ++ ++ gcc/testsuite/ ++ * g++.dg/abi/arm_cxa_vec2.C: New test. ++ ++2008-07-01 Joseph Myers ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_arm_neon): New. ++ (check_effective_target_vect_cmdline_needed): Use it. ++ ++2008-07-01 Joseph Myers ++ ++ gcc/ ++ * config/arm/neon.md (neon_vget_lane_sext_internal, ++ neon_vget_lane_zext_internal): Adjust element indices for ++ big-endian. ++ ++2008-07-01 Nathan Sidwell ++ ++ gcc/ ++ * config/mips/linux.h (SUBTARGET_SELF_SPECS): Override this, ++ rather than ... ++ (DRIVER_SELF_SPECS): ... this. ++ * config/mips/mips.md (extzv, extzv, insv, insv, ++ *insvdi): Use mips_use_ins_ext_p rather than mips_use_ext_p ++ and mips_use_ins_p. ++ * config/mips/mips-protos.h (mips_lower_sign_bit_p, ++ mips_use_ext_p): Delete. ++ (mips_expand_vector_init): Declare. ++ * config/mips/mips.c (mips_gnu_local_gp): Declare. ++ (mips_got_base): Use can_create_pseudo_p. ++ (mips16_build_function_stub): Remove unused variable. ++ (mips_lower_sign_bit_p, mips_use_ins_p, mips_use_ext_p): Delete. ++ ++ gcc/ ++ * config/mips/mips.md (type): Correct typo for accext. ++ ++2008-06-30 Joseph Myers ++ ++ config/ ++ * mh-mingw (BOOT_CFLAGS): Do not use -D__USE_MINGW_ACCESS. ++ ++2008-06-28 Sandra Loosemore ++ ++ Backport 2 patches from mainline: ++ ++ 2008-06-28 Sandra Loosemore ++ ++ gcc/ ++ * doc/extend.texi (Variable Attributes): Use @ref instead of @xref. ++ (Type Attributes): Fix nesting of @table and @subsection. Adjust ++ punctuation. Use @ref instead of @xref. ++ (Function Names): Remove stray @display/@end display. ++ (C++ Attributes): Use @ref instead of @xref. ++ (Deprecated Features): Fix punctuation around @xref. ++ (Backwards Compatibility): Likewise. ++ * doc/rtl.texi (Incdec): Remove stray @table/@end table. ++ ++ 2008-06-15 Ralf Wildenhues ++ ++ gcc/ ++ * doc/sourcebuild.texi (Config Fragments): Remove obsolete ++ FIXME note about gcc/config.guess. ++ * doc/options.texi (Option file format): Remove non-ASCII bytes. ++ * doc/cpp.texi: Expand TABs, drop indentation outside examples. ++ * doc/cppopts.texi: Likewise. ++ * doc/extend.texi: Likewise. ++ * doc/gcc.texi: Likewise. ++ * doc/gccint.texi: Likewise. ++ * doc/gcov.texi: Likewise. ++ * doc/gty.texi: Likewise. ++ * doc/hostconfig.texi: Likewise. ++ * doc/install.texi: Likewise. ++ * doc/invoke.texi: Likewise. ++ * doc/loop.texi: Likewise. ++ * doc/makefile.texi: Likewise. ++ * doc/md.texi: Likewise. ++ * doc/passes.texi: Likewise. ++ * doc/tm.texi: Likewise. ++ * doc/tree-ssa.texi: Likewise. ++ * doc/trouble.texi: Likewise. ++ ++2008-06-27 Julian Brown ++ ++ gcc/cp/ ++ * decl2.c (determine_visibility): Allow target to override ++ visibility of class data. ++ ++ gcc/ ++ * config/arm/arm.c (arm_cxx_determine_class_data_visibility): Make ++ no-op for targets which don't use DLLs. ++ ++ gcc/testsuite/ ++ * g++.dg/ext/visibility/arm3.C: Add explanatory text. Skip on ++ non-DLL targets. ++ ++2008-06-26 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-13 Nathan Froyd ++ ++ gcc/ ++ * optabs.c (expand_binop): Force operands to registers before ++ generating libcalls. ++ ++2008-06-26 Daniel Jacobowitz ++ ++ gcc/ ++ * config/mips/mips.c (mips_call_tls_get_addr) ++ (mips_emit_loadgp): Correct merge. ++ ++2008-06-26 Joseph Myers ++ ++ * release-notes-csl.xml: Resync release note text with Sourcery ++ G++ 4.2. ++ ++2008-06-25 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-04-01 Joseph Myers ++ ++ gcc/ ++ * config/mips/mips.h (TARGET_CPU_CPP_BUILTINS): Define ++ __mips_isa_rev=2 for Octeon. ++ ++2008-06-25 Julian Brown ++ ++ gcc/ ++ * config.gcc (arm*-*-uclinux*): Remove duplicate uclinux-elf.h. ++ ++2008-06-25 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-02-15 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2007-11-06 David Ung ++ ++ gcc/ ++ * config/mips/mips.h (AC1HI_REGNUM, AC1LO_REGNUM, AC2HI_REGNUM) ++ (AC2LO_REGNUM, AC3HI_REGNUM, AC3LO_REGNUM): Define constants. ++ ++2008-06-25 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-07-02 Richard Sandiford ++ ++ gcc/ ++ * config/mips/mips.h (MIPS_ISA_LEVEL_SPEC): Handle -march=octeon. ++ ++2008-06-25 Catherine Moore ++ ++ Revert: ++ ++ 2008-06-25 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-10-18 Joseph Myers ++ ++ NOT ASSIGNED TO FSF ++ COPYRIGHT RAZA ++ * config.sub (mipsisa64xlr, ipsisa64xlrel): Add new machine names. ++ ++ gcc/ ++ * config.gcc (mipsisa64xlr-*-elf*, mipsisa64xlrel-*-elf*): New ++ targets. ++ ++2008-06-25 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-10-18 Joseph Myers ++ ++ NOT ASSIGNED TO FSF ++ COPYRIGHT RAZA ++ * config.sub (mipsisa64xlr, ipsisa64xlrel): Add new machine names. ++ ++ gcc/ ++ * config.gcc (mipsisa64xlr-*-elf*, mipsisa64xlrel-*-elf*): New ++ targets. ++ * config/mips/mips.h (PROCESSOR_XLR, TARGET_XLR): Define. ++ (MIPS_ISA_LEVEL_SPEC): Handle -march=xlr. ++ ++2008-06-24 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-02-12 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2008-01-16 David Ung ++ ++ * config/mips/sdemtk.h: Define macro TARGET_MIPS_SDEMTK. ++ * config/mips/mips.c (mips_file_start): Check against ++ TARGET_MIPS_SDEMTK which supports the TARGET_NO_FLOAT option. ++ ++ 2007-11-02 Thiemo Seufer ++ ++ * config/mips/mips.c (mips_file_start): Add support for flagging ++ 32-bit code with -mfp64 floating point. ++ ++2008-06-24 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-03-12 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2007-11-29 Thiemo Seufer ++ ++ gcc/ ++ * config/mips/mips.c (override_options): Let -fpic imply ++ -mabicalls, forward port from SDE6. ++ ++2008-06-23 Julian Brown ++ ++ gcc/ ++ * config/arm/arm.h (ASM_OUTPUT_REG_PUSH): Handle STATIC_CHAIN_REGNUM ++ specially for Thumb-1. ++ (ASM_OUTPUT_REG_POP): Likewise. ++ ++2008-06-23 Julian Brown ++ ++ gcc/ ++ * config/arm/thumb2.md (*thumb2_negscc): Remove bad negated-GT ++ code sequence. ++ ++2008-06-20 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-10-21 Sandra Loosemore ++ ++ gcc/ ++ * config/mips/mips.c (mips_cpu_info_table): Fix damaged merge ++ of XLR entry from r185319. ++ (mips_rtx_cost_data): Likewise. ++ (mips_sched_reorder): Add ATTRIBUTE_UNUSED to cycle parameter. ++ ++2008-06-18 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-09-06 Sandra Loosemore ++ ++ gcc/ ++ * config/mips/mips.opt (mips16e): Add as deprecated alias ++ for -mips16. ++ * doc/invoke.texi (Option Summary, MIPS Options): Document it. ++ ++2008-06-18 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-02-12 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2007-12-21 David Ung ++ ++ gcc/ ++ * config/mips/mips.h (TARGET_MIPS_SDE): Define macro as 0. ++ * config/mips/sde.h (TARGET_MIPS_SDE): Override macro definition to 1. ++ * config/mips/mips.md (abs2): Enable abs.[sd] patterns if ++ TARGET_MIPS_SDE && TARGET_HARD_FLOAT. ++ ++2008-06-18 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-10-14 Sandra Loosemore ++ ++ * config/mt-sde: Update to make it agree with the mainline ++ version committed with the below patch. ++ ++ Backport from mainline: ++ gcc/ ++ ++ 2007-08-17 Richard Sandiford ++ Nigel Stephens ++ ++ * config/mips/sde.h (DRIVER_SELF_SPECS): Add commas. ++ Treat -mno-data-in-code and -mcode-xonly as aliases for ++ -mcode-readable=no and -mcode-readable=pcrel respectively. ++ * config/mips/t-sde (TARGET_LIBGCC2_CFLAGS): Add -mcode-xonly. ++ (MULTILIB_OPTIONS): Add -mcode-readable=no multilibs. ++ (MULTILIB_DIRNAMES): Update accordingly. ++ ++2008-06-18 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2007-09-08 Sandra Loosemore ++ ++ gcc/ ++ * config/mips/t-sde (MULTILIB_MATCHES): Add mips16e as alias ++ for mips16. ++ ++2008-06-18 Joseph Myers ++ ++ gcc/ ++ * config/arm/arm.c (arm_assemble_integer): Do not handle ++ big-endian NEON vectors specially. ++ * config/arm/neon.md (vec_set_internal, vec_extract): ++ Adjust element indices for big-endian. ++ ++2008-06-18 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-02-11 Julian Brown ++ ++ Merge from MIPS: ++ ++ gcc/ ++ * config/mips/t-sde (MULTILIB_OPTIONS): Substitute mno-data-in-code for ++ mcode-readable=no option. ++ ++2008-06-17 Catherine Moore ++ ++ Merge from SourceryG++ 4.2: ++ ++ 2008-03-28 Nathan Sidwell ++ ++ * config/mips/t-sdemtk (MULTILIB_OPTIONS, MULTILIB_DIRNAMES, ++ MULTILIB_EXCLUSIONS): Likewise. ++ ++2008-06-17 Catherine Moore ++ ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-17 Julian Brown ++ ++ gcc/ ++ * config.gcc (mips*-sde-elf*): Add SourceryG++ multilib support. ++ * config/mips/t-sgxx-sde: New. ++ * config/mips/sdemtk.h (MIPS_ARCH_FLOAT_SPEC): Override, adding ++ -mno-float option. ++ ++2008-06-17 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-03-09 Ira Rosen ++ * config/rs6000/rs6000.c (builtin_description): Rename vector ++ left shift operations. ++ * config/rs6000/altivec.md (UNSPEC_VSL): Remove. ++ (altivec_vsl): Rename to ... ++ (ashl3): ... new name. ++ (mulv4sf3, mulv4si3, negv4sf2): Replace gen_altivec_vslw with ++ gen_ashlv4si3. ++ (absv4sf2): Convert to use ashift:V4SI instead of UNSPEC_VSL. ++ ++2008-06-16 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-30 Mark Mitchell ++ ++ gcc/testsuite/ ++ * gcc.dg/sibcall-3.c: XFAIL for Thumb. ++ * gcc.dg/sibcall-4.c: Likewise. ++ ++2008-06-16 Paul Brook ++ ++ Merge from Sourcery G++ 4.2 ++ 2007-03-30 Paul Brook ++ gcc/ ++ * calls.c (store_one_arg): Check alignment of mode used for save. ++ ++2008-06-13 Nathan Froyd ++ ++ gcc/ ++ * config.gcc (powerpc-*-linux*): Add rs6000/e500.h to tm_file ++ and rs6000/t-linux to tmake_file. ++ ++2008-06-13 Paul Brook ++ ++ Merge from Sourcery G++ 4.2 ++ Issue #1510 ++ 2007-04-27 Paul Brook ++ gcc/ ++ * cse.c (cse_process_notes): Make sure PLUS are canonical. ++ ++2008-06-13 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ libgcc/ ++ * config.host (arm*-*-linux*, arm*-*-uclinux*, arm*-*-eabi*) ++ (arm*-*-symbianelf): Add arm/t-divmod-ef to tmake_file. ++ * Makefile.in (LIB2_DIVMOD_EXCEPTION_FLAGS): Set to previous ++ default if not set by a target-specific Makefile fragment. ++ (lib2-divmod-o, lib2-divmod-s-o): Use above. ++ * config/arm/t-divmod-ef: New. ++ ++2008-06-13 Daniel Jacobowitz ++ ++ libgcc/ ++ * shared-object.mk (c_flags-$(base)$(objext)): New. ++ ($(base)$(objext)): Use above. ++ ($(base)_s$(objext)): Likewise. ++ * static-object.mk (c_flags-$(base)$(objext)): New. ++ ($(base)$(objext)): Use above. ++ ++2008-06-13 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_vect_int) ++ (check_effective_target_vect_shift) ++ (check_effective_target_vect_long) ++ (check_effective_target_vect_float) ++ (check_effective_target_vect_int_mult): Check for ARM. ++ ++2008-06-12 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config/mips/linux64.h: USE_SUBTARGET_SELF_SPECS. ++ * config/mips/sde.h: Likewise. ++ * config/mips/iris6.h: Likewise. ++ ++2008-06-12 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-15 Julian Brown ++ ++ gcc/ ++ * config/mips/t-sgxx-linux: New target fragment. ++ * config/mips/t-sgxxlite-linux: New target fragment. ++ * config/mips/cs-sgxx-linux.h: New header file. ++ * config/mips/cs-sgxxlite-linux.h: New header file. ++ * config/mips/t-none-linux: Remove. ++ * config/mips/cs-linux.h: Remove. ++ * config.gcc (mips*-*-linux*): Handle --enable-extra-sgxx-multilibs ++ and --enable-extra-sgxxlite-multilibs configure options. Use ++ sgxx-specific header files and target fragments. Remove use of ++ t-none-linux and cs-linux.h. ++ ++2008-06-12 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-25 Maxim Kuvyrkov ++ Julian Brown ++ ++ * 74k.md: (r74k_dsp_alu, r74k_dsp_alu_sat, r74k_dsp_mac, r74k_dsp_mac_sat) ++ (r74k_dsp_acc_ext, r74k_dsp_acc_mod): New insn reservations. ++ (r74k_dsp_mac, r74k_dsp_mac_sat, r74k_int_mult, r74k_int_mul3) ++ (r74k_dsp_mac, r74k_dsp_mac_sat): New bypasses. ++ ++ ++2008-06-12 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-25 Maxim Kuvyrkov ++ Julian Brown ++ ++ Merge from MIPS: ++ ++ gcc/ ++ * config/mips/mips-protos.h (dspalu_bypass_p): Add prototype. ++ * config/mips/mips.c (dspalu_bypass_table): New. ++ (dspalu_bypass_p): New. ++ * 24k.md (r24k_dsp_alu, r24k_dsp_mac, r24k_dsp_mac_sat) ++ (r24k_dsp_acc_ext, r24k_dsp_acc_mod): New insn reservations. ++ (r24k_int_mult, r24k_int_mthilo, r24k_dsp_mac, r24k_dsp_mac_sat) ++ (r24k_dsp_acc_ext, r24k_dsp_acc_mod, r24k_dsp_alu): New bypasses. ++ * config/mips/mips.md (dspmac, dspmacsat, accext, accmod, dspalu) ++ (dspalusat): Add insn types. ++ * config/mips/mips-dsp.md (add3) ++ (mips_add_s_) ++ (sub3, mips_sub_s_, mips_addsc) ++ (mips_addwc, mips_modsub, mips_raddu_w_qb, mips_absq_s_) ++ (mips_precrq_qb_ph, mips_precrq_ph_w, mips_precrq_rs_ph_w) ++ (mips_precrqu_s_qb_ph, mips_preceq_w_phl, mips_preceq_w_phr) ++ (mips_precequ_ph_qbl, mips_precequ_ph_qbr, mips_precequ_ph_qbla) ++ (mips_precequ_ph_qbra, mips_preceu_ph_qbl, mips_preceu_ph_qbr) ++ (mips_preceu_ph_qbla, mips_preceu_ph_qbra, mips_shll_) ++ (mips_shll_s_, mips_shll_s_, mips_shrl_qb) ++ (mips_shra_ph, mips_shra_r_, mips_bitrev, mips_insv) ++ (mips_repl_qb, mips_repl_ph, mips_cmp_eq_) ++ (mips_cmp_lt_) ++ (mips_cmp_le_, mips_cmpgu_eq_qb) ++ (mips_cmpgu_lt_qb, mips_cmpgu_le_qb, mips_pick_) ++ (mips_packrl_ph, mips_wrdsp, mips_rddsp): Change type to dspalu. ++ (mips_dpau_h_qbl, mips_dpau_h_qbr, mips_dpsu_h_qbl, mips_dpsu_h_qbr) ++ (mips_dpaq_s_w_ph, mips_dpsq_s_w_ph, mips_mulsaq_s_w_ph) ++ (mips_maq_s_w_phl, mips_maq_s_w_phr, mips_maq_sa_w_phr: Set type to ++ dspmac. ++ (mips_dpaq_sa_l_w, mips_dpsq_sa_l_w, mips_maq_sa_w_phl): Set type to ++ dspmacsat. ++ (mips_extr_w, mips_extr_r_w, mips_extr_rs_w, mips_extp, mips_extpdp): ++ Set type to accext. ++ (mips_shilo, mips_mthlip): Set type to accmod. ++ * config/mips/mips-dspr2.md (mips_absq_s_qb, mips_addu_s_ph) ++ (mips_adduh_r_qb): Set type to dspalusat. ++ (mips_addu_ph, mips_adduh_qb, mips_append, mips_balign) ++ (mips_cmpgdu_eq_qb, mips_cmpgdu_lt_qb, mips_cmpgdu_le_qb) ++ (mips_precr_qb_ph, mips_precr_sra_ph_w, mips_precr_sra_r_ph_w) ++ (mips_prepend, mips_shra_qb, mips_shra_r_qb, mips_shrl_ph) ++ (mips_subu_ph, mips_subuh_qb, mips_subuh_r_qb, mips_addqh_ph) ++ (mips_addqh_r_ph, mips_addqh_w, mips_addqh_r_w, mips_subqh_ph) ++ (mips_subqh_r_ph, mips_subqh_w, mips_subqh_r_w): Set type to dspalu. ++ (mips_dpa_w_ph, mips_dps_w_ph, mips_mulsa_w_ph, mips_dpax_w_ph) ++ (mips_dpsx_w_ph, mips_dpaqx_s_w_ph, mips_dpsqx_s_w_ph): Set type to ++ dspmac. ++ (mips_subu_s_ph): Set type to dspalusat. ++ (mips_dpaqx_sa_w_ph, mips_dpsqx_sa_w_ph): Set type to dspmacsat. ++ ++2008-06-12 Joseph Myers ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp ++ (check_effective_target_powerpc_hard_double): New. ++ * gcc.dg/tree-ssa/loop-19.c: Use powerpc_hard_double instead of ++ powerpc*-*-*. ++ ++2008-06-12 Joseph Myers ++ ++ gcc/testsuite/ ++ * gcc.dg/dfp/convert-bfp-6.c, gcc.dg/dfp/convert-bfp-9.c: XFAIL ++ for lax_strtofp. ++ ++2008-06-12 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-05-21 Janis Johnson ++ * doc/sourcebuild.texi (Test Directives): Add dg-xfail-run-if. ++ ++ gcc/testsuite/ ++ 2008-05-21 Janis Johnson ++ * lib/target-supports-dg.exp (dg-xfail-run-if): New. ++ ++2008-06-12 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-25 Maxim Kuvyrkov ++ Julian Brown ++ ++ Merge from MIPS: ++ ++ gcc/ ++ * config/mips/mips.c (mips_mult_madd_chain_bypass_p): New. ++ * config/mips/mips-protos.h (mips_mult_madd_chain_bypass_p): Add ++ prototype. ++ * config/mips/74k.md: Add bypasses for r74k_int_mult, r74_int_madd, ++ r74k_int_mul3. ++ ++ ++2008-06-11 Catherine Moore ++ ++ Backport: ++ ++ gcc/ ++ 2008-06-06 Sandip Matte ++ ++ * doc/invoke.texi: Document -march=xlr. ++ * config/mips/xlr.md: New file. ++ * config/mips/mips.md: Include it. ++ (cpu): Add "xlr". ++ * config/mips/mips.h (PROCESSOR_XLR): New processor_type. ++ * config/mips/mips.c (mips_cpu_info_table): Add an XLR entry. ++ (mips_rtx_cost_data): Likewise. ++ ++2008-06-10 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ ++ 2008-03-27 Daniel Jacobowitz ++ ++ gcc/ ++ * config/mips/mips.md (loadgp_nonpic): New pattern. ++ (builtin_longjmp): Use for all TARGET_ABICALLS. ++ (exception_receiver): Revert local changes. ++ * config/mips/mips.c (mips_gnu_local_gp, mips_got_base): New functions. ++ (struct machine_function): Update the comment for ++ mips16_gp_pseudo_rtx. ++ (mips_call_tls_get_addr, mips_legitimize_tls_address): Use ++ mips_got_base. ++ (mips_restore_gp): Revert local changes. Assert PIC. ++ (mips_load_call_address, mips_expand_call): Revert local changes. ++ (mips_conditional_register_usage): Make $gp ordinary for ++ non-PIC. ++ (mips_tls_got_ref_1, mips_tls_got_ref_p): Delete. ++ (mips_function_has_gp_insn, mips_global_pointer): Revert local changes. ++ (mips_save_reg_p): Check for fixed $gp. ++ (mips_gnu_local_gp_rtx): Renamed from mips_gnu_local_gp. ++ (mips_emit_loadgp): Use mips_gnu_local_gp. ++ (mips_dangerous_for_la25_p): Revert local change. ++ (mips16_gp_pseudo_reg): Use gen_loadgp_nonpic. ++ (mips_extra_live_on_entry): Revert local change. ++ * config/mips/mips.h (TARGET_USE_GOT): Require flag_pic. ++ (TARGET_CALL_CLOBBERED_GP): Likewise. ++ (TARGET_NONPIC_ABICALLS): Define. ++ 2008-03-19 Mark Shinwell ++ Catherine Moore ++ Daniel Jacobowitz ++ ++ gcc/ ++ * configure.ac: Add --enable-mips-nonpic. ++ * configure: Regenerated. ++ * config.gcc: Set TARGET_ABICALLS_DEFAULT instead of MASK_ABICALLS ++ for MIPS targets. Handle --enable-mips-nonpic. ++ * config/mips/linux.h (TARGET_DEFAULT): Delete. ++ (SUBTARGET_ASM_SPEC): Use -mnon-pic-abicalls. ++ * config/mips/elfoabi.h, config/mips/linux64.h, ++ config/mips/sde.h, config/mips/iris6.h, config/mips/wrs-linux.h, ++ config/mips/vr.h: Use SUBTARGET_SELF_SPECS. ++ * config/mips/mips.md (exception_receiver): Disable for ++ non-PIC. ++ * config/mips/mips.c (mips_classify_symbol): Do not use the GOT ++ for non-PIC. ++ (mips_tls_symbol_ref_1, mips_cannot_force_const_mem): Correct comments. ++ (mips_restore_gp): Skip for non-PIC. ++ (mips_load_call_address): Skip lazy binding for non-PIC. ++ (mips_expand_call): Skip GP usage for non-PIC. ++ (override_options): Remove flag_pic override. Use sorry for ++ other ABIs. ++ (mips_file_start): Emit pic0 for non-PIC. ++ (mips_tls_got_ref_1, mips_tls_got_ref_p): New. ++ (mips_function_has_gp_insn): Use mips_tls_got_ref_p. Skip jump ++ tables. ++ (mips_global_pointer, mips_current_loadgp_style): Adjust for non-PIC. ++ (mips_expand_prologue): Do not cprestore for non-PIC. ++ (mips_function_rodata_section): Skip for non-PIC. ++ (mips_dangerous_for_la25_p): Likewise. ++ (mips_extra_live_on_entry): Skip for non-PIC. ++ * config/mips/mips.h (TARGET_GPWORD): Require flag_pic. ++ (ABICALLS_SPEC, ABICALLS_SELF_SPECS, SUBTARGET_SELF_SPECS) ++ (DRIVER_SELF_SPECS): New. ++ (MIPS_CALL): Correct for non-PIC. ++ ++2008-06-09 Kazu Hirata ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-05-28 Kazu Hirata ++ ++ Issue 2895 ++ gcc/ ++ * config.gcc (arm*-*-linux*): Handle enable_extra_asa_multilibs. ++ enable_extra_asa_multilibs. ++ * config/arm/t-asa: New. ++ ++ 2008-05-28 Kazu Hirata ++ ++ * config/arm/t-asa (MULTILIB_EXCEPTIONS): Remove ++ march=armv4t/mfpu=neon* and march=armv4t/*mfloat-abi=softfp. Add ++ *march=armv4t*/*mfpu=neon* and *march=armv4t*/*mfloat-abi=softfp*. ++ (MULTILIB_ALIASES): Remove march?armv4t=mthumb/march?armv4t* and ++ march?armv6=mthumb/march?armv6*. Add ++ march?armv4t=mthumb/march?armv4t, march?armv6=mthumb/march?armv6, ++ and ++ march?armv6/mfloat-abi?softfp=mthumb/march?armv6/mfloat-abi?softfp. ++ ++2008-06-09 Joseph Myers ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/20030218-1.c: Separate dg-message and ++ dg-error for two diagnostics on the same line. ++ ++2008-06-09 Catherine Moore ++ ++ From 4.2 branch: ++ ++ gcc/testsuite/ ++ * gcc.target/mips/branch-1.c: Support OCTEON. ++ ++2008-06-09 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-05-20 Janis Johnson ++ * g++.dg/ext/vector14.C: Ignore a possible warning. ++ ++2008-06-09 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-06-09 Joseph Myers ++ * gcc.dg/pr34856.c: Condition use of -maltivec on ++ powerpc_altivec_ok. Use -w on other powerpc*-*-linux*. ++ ++2008-06-09 Catherine Moore ++ ++ From 4.2 branch: ++ ++ gcc/testsuite/ ++ * gcc.target/mips/mips-nonpic: New testsuite. ++ ++2008-06-09 Catherine Moore ++ ++ gcc/testsuite/ ++ * gcc.target/mips/mips32-dsp-run.c (mipsisa32-sde-elf): Add as ++ target. ++ * gcc.target/mips/mips32-dsp.c: Likewise. ++ ++2008-06-07 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-04-04 Janis Johnson ++ * g++.dg/other/anon5.C: Don't depend on line number for error message. ++ * gcc.dg/torture/builtin-modf-1.c: Use special options for ++ powerpc*-*-linux*. ++ * gcc.dg/var-expand3.c: Skip for powerpc-linux if not on AltiVec HW. ++ * gcc.dg/pr34856.c: Use -maltivec on powerpc linux. ++ ++2008-06-07 Joseph Myers ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/altivec-24.c, gcc.target/powerpc/pr35907.c: ++ Correct target selector syntax. ++ ++2008-06-06 Sandra Loosemore ++ ++ From 4.2 branch: ++ ++ * release-notes-csl.xml (GCC stack size limit increased): ++ Conditionalize release note for host. ++ (UNC pathname bug fix): Likewise. ++ ++2008-06-06 Joseph Myers ++ ++ gcc/ ++ * config/arm/wrs-linux.h (SUBTARGET_EXTRA_LINK_SPEC): Don't pass ++ --be8 for -r links. ++ ++2008-06-06 Joseph Myers ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-06-06 Joseph Myers ++ * configure.ac: Do not check for gconv.h. ++ * crossconfig.m4 (GLIBCXX_CROSSCONFIG): Do not test for gconv.h or ++ gconf.h. For glibc and uClibc systems, define ++ _GLIBCXX_USE_RANDOM_TR1 and HAVE_MMAP and use AC_LC_MESSAGES and ++ AM_ICONV. ++ * configure, config.h.in: Regenerate. ++ ++2008-06-06 Joseph Myers ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-06-06 Joseph Myers ++ * testsuite/17_intro/headers/all.cc, ++ testsuite/17_intro/headers/all_c++200x_compatibility.cc, ++ testsuite/17_intro/headers/all_pedantic_errors.cc, ++ testsuite/ext/headers.cc: Only include ++ and if ++ _GLIBCXX_HAVE_ICONV. ++ ++2008-06-05 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-12-15 Richard Sandiford ++ ++ gcc/testsuite/ ++ * gcc.target/mips/mips.exp (setup_mips_tests): Record whether ++ endianness is forced. Trest -mabicalls and -mno-abicalls as ++ ABI options. ++ (is_gp32_flag): Treat -mabi=32 as a 32-bit option. ++ (is_gp64_flag): New function. ++ (dg-mips-options): Generalize -mgp64 handling to is_gp64_flag. ++ Do not set the ABI if the arguments already specify one. ++ Skip tests if the arguments specify an incompatible ABI. ++ Use -mno-abicalls for -mabi=eabi. ++ * gcc.target/mips/octeon-1.c, gcc.target/mips/octeon-2.c: New tests. ++ * gcc.target/mips/octeon-3.c, gcc.target/mips/octeon-4.c: Likewise ++ * gcc.target/mips/octeon-5.c, gcc.target/mips/octeon-6.c: Likewise ++ * gcc.target/mips/octeon-7.c, gcc.target/mips/octeon-8.c: Likewise ++ * gcc.target/mips/octeon-9.c, gcc.target/mips/octeon-10.c: Likewise ++ * gcc.target/mips/octeon-11.c, gcc.target/mips/octeon-12.c: Likewise ++ * gcc.target/mips/octeon-13.c, gcc.target/mips/octeon-14.c: Likewise ++ * gcc.target/mips/octeon-15.c, gcc.target/mips/octeon-16.c: Likewise ++ * gcc.target/mips/octeon-17.c, gcc.target/mips/octeon-18.c: Likewise ++ * gcc.target/mips/octeon-19.c, gcc.target/mips/octeon-20.c: Likewise ++ * gcc.target/mips/octeon-21.c, gcc.target/mips/octeon-22.c: Likewise ++ * gcc.target/mips/octeon-23.c, gcc.target/mips/octeon-24.c: Likewise ++ * gcc.target/mips/octeon-25.c, gcc.target/mips/octeon-26.c: Likewise ++ * gcc.target/mips/octeon-27.c, gcc.target/mips/octeon-28.c: Likewise ++ * gcc.target/mips/octeon-29.c, gcc.target/mips/octeon-30.c: Likewise ++ * gcc.target/mips/octeon-31.c, gcc.target/mips/octeon-32.c: Likewise ++ * gcc.target/mips/octeon-33.c, gcc.target/mips/octeon-34.c: Likewise ++ * gcc.target/mips/octeon-35.c, gcc.target/mips/octeon-36.c: Likewise ++ * gcc.target/mips/octeon-37.c, gcc.target/mips/octeon-38.c: Likewise ++ * gcc.target/mips/octeon-39.c, gcc.target/mips/octeon-40.c: Likewise ++ * gcc.target/mips/octeon-41.c, gcc.target/mips/octeon-42.c: Likewise ++ * gcc.target/mips/octeon-43.c, gcc.target/mips/octeon-44.c: Likewise ++ * gcc.target/mips/octeon-45.c, gcc.target/mips/octeon-46.c: Likewise ++ * gcc.target/mips/octeon-47.c, gcc.target/mips/octeon-48.c: Likewise ++ * gcc.target/mips/octeon-49.c, gcc.target/mips/octeon-50.c: Likewise ++ * gcc.target/mips/octeon-51.c, gcc.target/mips/octeon-52.c: Likewise ++ * gcc.target/mips/octeon-53.c, gcc.target/mips/octeon-54.c: Likewise ++ * gcc.target/mips/octeon-55.c, gcc.target/mips/octeon-56.c: Likewise ++ * gcc.target/mips/scc-1.c, gcc.target/mips/scc-2.c: Likewise. ++ * gcc.target/mips/branch-1.c: Likewise. ++2008-06-05 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-12-15 Richard Sandiford ++ ++ Adapted from a patch by Cavium Networks. ++ ++ gcc/ ++ * config/mips/mips.opt (mocteon-useun): New option. ++ * config/mips/mips-protos.h (mask_low_and_shift_len): Declare. ++ (mips_lower_sign_bit_p, mips_use_ins_p, mips_use_ext_p): Declare. ++ (mips_adjust_register_ext_operands): Likewise. ++ * config/mips/mips.h (PROCESSOR_OCTEON): New processor_type. ++ (TARGET_OCTEON): New macro. ++ (ISA_HAS_DCLZ_DCLO): Delete. ++ (ISA_HAS_POPCOUNT): New macro. ++ (ISA_HAS_ROTR_SI, ISA_HAS_ROTR_DI): Include TARGET_OCTEON. ++ (ISA_HAS_SEB_SEH, ISA_HAS_INS_EXT): Likewise. ++ (ISA_HAS_EXTS, ISA_HAS_BBIT, ISA_HAS_SEQ_SNE, ISA_HAS_BADDU) ++ (ISA_HAS_UL_US, ISA_HAS_CINS): New macros. ++ (ASM_SPEC): Pass down -mocteon-useun and -mno-octeon-useun. ++ * config/mips/mips.c (mips_cpu_info_table): Add an octeon entry. ++ (mips_rtx_cost_data): Likewise. ++ (mask_low_and_shift_len, mips_get_seq_sne_operand): New functions. ++ (mips_emit_scc): Use mips_get_seq_sne_operand to choose between ++ seq/sne and xor/addu. ++ (mips_expand_unaligned_load): Use mov_ulw and mov_uld if ++ ISA_HAS_UL_US. ++ (mips_expand_unaligned_store): Likewise mov_usw and mov_usd. ++ (mips_lower_sign_bit_p, mips_use_ins_p, mips_use_ext_p): New functions. ++ (mips_adjust_register_ext_operands): Likewise. ++ (print_operand): Add %E, %G and %H formats. ++ (mips_issue_rate): Return 2 when scheduling for PROCESSOR_OCTEON. ++ (mips_multipass_dfa_lookahead): Likewise. ++ * config/mips/octeon.md: New file. ++ * config/mips/mips.md: Include it. ++ (UNSPEC_UNALIGNED_LOAD, UNSPEC_UNALIGNED_STORE): New constants. ++ (type): Add pop. ++ (cpu): Add octeon. ++ (SUBDI): New mode macro. ++ (topbit): New mode attribute. ++ (any_extract, any_shiftrt, equailty_op): New code macros. ++ (*baddu_si, *baddu_disi, *baddu_didi, *baddu_didi2, popcount2) ++ (*_trunc_exts, *trunc_zero_ext_): ++ New patterns. ++ (zero_extendsidi2): Turn into a define_expand. Rename old ++ define_insn_and_split to... ++ (*zero_extendsidi2): ...this and require !ISA_HAS_EXT_INS. ++ (*clear_upper32): Require !ISA_HAS_EXT_INS. ++ (*zero_extendsidi2_dext, *clear_upper32_dext): New patterns. ++ (extv): Change operand 1 from a QImode memory_operand to any ++ nonimmediate_operand. Try using extvsi and extvdi for register ++ extractions if ISA_HAS_EXTS. ++ (extv, *extv_truncdi): New patterns. ++ (extzv): Use mips_use_ext_p instead of mips_use_ins_ext_p. ++ Call mips_adjust_register_ext_operands. ++ (extzv): Use mips_use_ext_p instead of mips_use_ins_ext_p. ++ (*extzv_truncdi, *extz_truncdi_exts): New patterns. ++ (insv): Use mips_use_ins_p instead of mips_use_ins_ext_p. ++ Fix formatting. ++ (insv): Use mips_use_ins_p instead of mips_use_ins_ext_p. ++ (*insvdi, *insv__di, *insvdi_clear_upper32) ++ (*cins): New patterns. ++ (mov_l, mov_r, mov_l, mov_r): Require ++ ISA_HAS_UL_US. ++ (mov_u, mov_u): New patterns. ++ (*truncsi_storeqi, *truncsi_storehi): Likewise. ++ (*branch_bit, *branch_bit_testdi): New patterns. ++ (*branch_bit_inverted): New pattern. ++ (*branch_bit_truncdi_inverted): Likewise. ++ (*seq_, *seq__mips16, *sne_): Require ++ !ISA_HAS_SEQ_SNE. ++ (*seq_si_to_di, *seq_si_to_di_mips16, *sne_si_to_di): New patterns. ++ (*s__s, *s_si_to_di_s): Likewise. ++ * config/mips/predicates.md (mask_low_and_shift_operator): New ++ predicate. ++ ++2008-06-05 Joseph Myers ++ ++ gcc/ ++ * config.gcc (powerpc-*-linux*spe*): Use t-dfprules. ++ * config/rs6000/dfp.md (negdd2, absdd2, negtd2, abstd2): Do not ++ enable for TARGET_E500_DOUBLE. ++ (*movdd_softfloat32): Also enable for !TARGET_FPRS. ++ * config/rs6000/rs6000.c (invalid_e500_subreg): Treat decimal ++ floating-point modes like integer modes for E500 double. ++ (rs6000_legitimate_offset_address_p): Likewise. ++ (rs6000_legitimize_address): Likewise. Do not allow REG+REG ++ addressing for DDmode for E500 double. ++ (rs6000_hard_regno_nregs): Do not treat decimal floating-point ++ modes as using 64-bits of registers for E500 double. ++ (spe_build_register_parallel): Do not handle DDmode or TDmode. ++ (rs6000_spe_function_arg): Do not handle DDmode or TDmode ++ specially for E500 double. ++ (function_arg): Do not call rs6000_spe_function_arg for DDmode or ++ TDmode for E500 double. ++ (rs6000_gimplify_va_arg): Only handle SDmode in registers ++ specially if TARGET_HARD_FLOAT && TARGET_FPRS. ++ (rs6000_split_multireg_move): Do not handle TDmode specially for ++ E500 double. ++ (spe_func_has_64bit_regs_p): Do not treat DDmode or TDmode as ++ using 64-bit registers for E500 double. ++ (emit_frame_save): Do not handle DDmode specially for E500 double. ++ (gen_frame_mem_offset): Likewise. ++ (rs6000_function_value): Do not call spe_build_register_parallel ++ for DDmode or TDmode. ++ (rs6000_libcall_value): Likewise. ++ * config/rs6000/rs6000.h (LOCAL_ALIGNMENT, MEMBER_TYPE_FORCES_BLK, ++ DATA_ALIGNMENT, CLASS_MAX_NREGS): Do not handle DDmode specially ++ for E500 double. ++ ++2008-06-05 Joseph Myers ++ ++ gcc/ ++ * dfp.c (WORDS_BIGENDIAN): Define to 0 if not defined. ++ (encode_decimal64, decode_decimal64, encode_decimal128, ++ decode_decimal128): Reverse order of 32-bit parts of value if host ++ and target endianness differ. ++ ++ libdecnumber/ ++ * dconfig.h: New. ++ * decContext.c, decExcept.c, decExcept.h, decLibrary.c, ++ decNumber.c, decNumberLocal.h, decRound.c, dpd/decimal128.c, ++ dpd/decimal32.c, dpd/decimal64.c: Include dconfig.h not config.h. ++ * dpd/decimal128Local.h (decimal128SetSign, decimal128ClearSign, ++ decimal128FlipSign): Use WORDS_BIGENDIAN not ++ FLOAT_WORDS_BIG_ENDIAN. ++ * bid/host-ieee128.c: Include dconfig.h. ++ (__host_to_ieee_128, __ieee_to_host_128): Swap 64-bit halves of ++ value if WORDS_BIGENDIAN. ++ ++ libgcc/ ++ * Makefile.in (DECNUMINC): Remove ++ -I$(MULTIBUILDTOP)../../libdecnumber. ++ * gstdint.h: New. ++ ++2008-06-05 Joseph Myers ++ ++ gcc/ ++ * config/arm/arm.c (arm_init_neon_builtins): Move initialization ++ with function calls after declarations. Lay out ++ neon_float_type_node before further use. ++ ++2008-06-04 Catherine Moore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-27 Robin Randhawa ++ ++ * libstdc++-v3/config/cpu/mips/atomicity.h : Added memory barriers ++ to enforce strict ordering on weakly ordered systems. ++ ++2008-06-04 Paul Brook ++ ++ Fix Issue #2917 ++ gcc/ ++ * config/arm/arm.c (neon_vector_mem_operand): Handle element/structure ++ loads. Allow PRE_DEC. ++ (output_move_neon): Handle PRE_DEC. ++ (arm_print_operand): Add 'A' for neon structure loads. ++ * config/arm/arm-protos.h (neon_vector_mem_operand): Update prototype. ++ * config/arm/neon.md (movmisalign): Use Um constraint and %A. ++ * config/arm/constraints.md (Un, Us): Update neon_vector_mem_operand ++ calls. ++ (Um): New constraint. ++ ++2008-06-04 Joseph Myers ++ ++ Backport: ++ ++ gcc/testsuite/ ++ 2008-06-04 Joseph Myers ++ * lib/target-supports.exp (check_effective_target_powerpc_spu): ++ Call check_effective_target_powerpc_altivec_ok. ++ * gcc.target/powerpc/dfp-dd.c, gcc.target/powerpc/dfp-td.c, ++ gcc.target/powerpc/ppc32-abi-dfp-1.c, ++ gcc.target/powerpc/ppu-intrinsics.c: Require powerpc_fprs. ++ ++2008-06-04 Kazu Hirata ++ ++ Issue 1073 ++ gcc/ ++ * config/m68k/m68k.c (m68k_tune_flags): New. ++ (override_options): Compute m68k_tune_flags. ++ (MULL_COST, MULW_COST): Update for various variants of CFV2. ++ * config/m68k/m68k.h (TUNE_MAC, TUNE_EMAC): New. ++ ++2008-06-03 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-06-02 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/t-linux (MULTILIB_OPTIONS): Add te500mc. ++ (MULTILIB_DIRNAMES): Likewise. ++ (MULTILIB_EXCEPTIONS): Handle te500mc. ++ * config/rs6000/linux.h (CC1_EXTRA_SPEC): Handle te500mc. ++ (ASM_DEFAULT_SPEC): Likewise. ++ * config/rs6000/rs6000.h (OPTION_DEFAULT_SPECS): Handle te500mc. ++ ++2008-06-03 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-06-02 Nathan Froyd ++ ++ NOT ASSIGNED TO FSF ++ COPYRIGHT FREESCALE ++ ++ gcc/doc: ++ * invoke.texi: Mention e500mc as a legitimate Power cpu. ++ ++ gcc/ ++ * config.gcc: Mention e500mc as a legitimate --with-cpu option. ++ * config/rs6000/rs6000.c (ppce500mc_cost): New. ++ (rs6000_override_options): Add e500mc to processor_target_table. ++ Enable isel for e500mc. Disable string instructions for e500mc. ++ Set rs6000_cost for e500mc. ++ (rs6000_issue_rate): Handle CPU_PPCE500MC. ++ * config/rs6000/rs6000.h (ASM_CPU_SPEC): Handle mcpu=e500mc. ++ (enum processor_type): Add PROCESSOR_PPCE500MC. ++ (TARGET_ISEL): Use rs6000_isel. ++ * config/rs6000/e500mc.md: New file. ++ * config/rs6000/rs6000.md: Include it. ++ (define_attr "cpu"): Add e500mc. ++ (define_attr "type"): Add insert_dword. ++ * config/rs6000/e500.h (TARGET_ISEL): Remove. ++ (CHECK_E500_OPTIONS): Remove TARGET_ISEL condition. ++ ++ 2008-06-02 Nathan Froyd ++ ++ * release-notes-csl.xml (E500mc support): New. ++ ++ gcc/ ++ * config/rs6000/e500mc.md: Eliminate duplication. ++ ++2008-06-03 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-02-22 Nathan Froyd ++ * config/rs6000/rs6000.c (rs6000_legitimize_address): Check to ++ ensure that we can address an entire entity > 8 bytes. Don't ++ generate reg+reg addressing for such data. ++ ++ 2008-03-07 Peter Bergner ++ PR target/35373 ++ * config/rs6000/rs6000.c (rs6000_legitimize_address): Don't generate ++ reg+const addressing for Altivec modes. Don't generate reg+reg ++ addressing for TFmode or TDmode quantities. ++ ++2008-06-03 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-28 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp (check_effective_target_powerpc_spe_ok): New. ++ ++2008-06-03 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/predicates.md (save_world_operation): Adjust checks. ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-05-22 Nathan Froyd ++ ++ Issue #3062 ++ ++ * release-notes-csl.xml (E500 size optimization compiler crash): New. ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_emit_prologue): Mark the ++ adjustment to r11 as frame related when generating out-of-line ++ prologues. ++ ++ 2008-03-07 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_savres_strategy): Be slightly ++ smarter about restoring with an out-of-line function. ++ (rs6000_emit_prologue): Make sure we only set r11 once. Be ++ smarter about restoring LR. ++ ++ 2008-02-29 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (emit_allocate_stack): Add copy_r11 ++ parameter. Copy stack_reg to r11 where appropriate. ++ (rs6000_stack_info): Only add padding for SPE save area if we ++ are saving SPE GPRs and CR. ++ (saveres_routine_syms): New variable. ++ (FIRST_SAVRES_REGISTER, LAST_SAVRES_REGISTER, N_SAVRES_REGISTERS): ++ Define. ++ (rs6000_savres_routine_sym): New function. ++ (rs6000_emit_stack_reset, rs6000_restore_saved_cr): New functions, ++ split out of... ++ (rs6000_emit_epilogue): ...here. Use rs6000_use_multiple_p and ++ rs6000_savres_strategy. Restore GPRs out-of-line if appropriate. ++ Tweak FPR out-of-line saving. ++ (rs6000_make_savres_rtx): New function. ++ (rs6000_use_multiple_p): New function. ++ (rs6000_savres_strategy): New function. ++ (rs6000_emit_prologue): Use rs6000_savres_strategy. Save GPRs ++ out-of-line if appropriate. ++ * config/rs6000/sysv4.h (FP_SAVE_INLINE): Save FPRs out-of-line ++ if we are optimizing for size. ++ (GP_SAVE_INLINE): Define. ++ (SAVE_FP_SUFFIX, RESTORE_FP_SUFFIX): Only use _l on 64-bit targets. ++ * config/rs6000/darwin.h (GP_SAVE_INLINE): Define. ++ * config/rs6000/aix.h (GP_SAVE_INLINE): Define. ++ * config/rs6000/rs6000.md (*save_gpregs_): New insn. ++ (*save_fpregs_): Add use of r11. ++ (*restore_gpregs_): New insn. ++ (*return_and_restore_gpregs_): New insn. ++ (*return_and_restore_fpregs_): Adjust to clobber LR and ++ use r11. ++ * config/rs6000/spe.md (*save_gpregs_spe): New insn. ++ (*restore_gpregs_spe): New insn. ++ (*return_and_restore_gpregs_spe): New insn. ++ ++2008-06-02 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.md (absv2sf2, negv2sf2, addv2sf3, subv2sf3, ++ mulv2sf3, divv2sf3): New expanders. ++ * config/rs6000/spe.md (spe_evabs, spe_evand, spe_evaddw, ++ spe_evdivws): Rename to use standard GCC names. ++ * config/rs6000/paired.md (negv2sf, absv2sf2, addv2sf3, subv2sf3, ++ mulv2sf3, divv2sf3): Rename to avoid conflict with the new expanders. ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-19 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (bdesc_2arg, bdesc_1arg): Use new CODE_FOR_ ++ names for renamed patterns. ++ ++2008-05-30 Joseph Myers ++ ++ gcc/ ++ * config/arm/wrs-linux.h (CC1_SPEC): Allow -tcortex-a8-be8 ++ -mfloat-abi=softfp. ++ (SUBTARGET_EXTRA_ASM_SPEC): Use -meabi=5. ++ * config/arm/t-wrs-linux (MULTILIB_EXCEPTIONS): Remove ++ *cortex-a8-be8*/*mfloat-abi=softfp*. ++ (MULTILIB_ALIASES): Add ++ tcortex-a8-be8=tcortex-a8-be8/mfloat-abi?softfp. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-12-12 Richard Sandiford ++ gcc/testsuite/ ++ * gcc.dg/torture/m68k-interrupt-1.c: New file. ++ 2006-06-23 Richard Sandiford ++ gcc/testsuite/ ++ * gcc.dg/tree-ssa/20040204-1.c: Don't XFAIL for m68k*-*-*. ++ ++2008-05-30 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/rs6000.c (ppc8540_cost): Fix typo. ++ (spe_synthesize_frame_save): Remove declaration. ++ ++2008-05-30 Nathan Froyd ++ ++ gcc/ ++ * tree-ssa-remove-local-statics.c ++ (find_static_nonvolatile_declarations): Use SSA_OP_VDEF. ++ (unstaticize_variable): Likewise. ++ (dump_final_bitmaps): Remove. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Tree->const_tree fix. ++ ++ gcc/ ++ * config/m68k/m68k.c (m68k_return_in_memory): Fix arguments types. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-16 Richard Sandiford ++ gcc/ ++ * config/m68k/m68k.h (INDEX_REG_CLASS): Delete in favor of... ++ (MODE_INDEX_REG_CLASS): ...this new macro. Return NO_REGS unless ++ MODE_OK_FOR_INDEX_P. ++ (MODE_OK_FOR_INDEX_P): New macro. ++ (REGNO_OK_FOR_INDEX_P): Delete in favor of... ++ (REGNO_MODE_OK_FOR_INDEX_P): ...this new macro. Return false ++ unless MODE_OK_FOR_INDEX_P. ++ (REG_OK_FOR_INDEX_P): Delete in favor of... ++ (REG_MODE_OK_FOR_INDEX_P): ...this new macro. Return false ++ unless MODE_OK_FOR_INDEX_P. ++ * m68k-protos.h (m68k_legitimate_index_reg_p): Add mode argument. ++ * m68k.c (m68k_legitimate_index_reg_p, m68k_decompose_index): ++ Add mode argument. Use it. ++ * config/m68k/m68k.md (tst_cf, cmp_cf, movsf_cf_hard) ++ (movdf_cf_hard, extendsfdf2_cf, truncdfsf2_cf, ftrunc2_cf) ++ (add3_cf, sub3_cf, fmul3_cf, div3_cf) ++ (neg2_cf, sqrt2_cf, abs2_cf): Replace "Q" ++ constraints for FP addresses with "m" constraints. ++ 2007-02-16 Nathan Sidwell ++ gcc/testsuite/ ++ * gcc.dg/m68k-fp-1.c: New. ++ ++2008-05-30 Julian Brown ++ ++ gcc/ ++ * config/arm/cortex-r4.md: Update GPLv3 notice. ++ * hwdiv.md: Likewise. ++ * marvell-f-vfp.md: Likewise. ++ * marvell-f.md: Likewise. ++ * nocrt0.h: Likewise. ++ * vfp11.md: Likewise. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2 (Add missing testcase from ++ earlier merge): ++ ++ 2008-02-01 Joseph Myers ++ gcc/testsuite/ ++ * gcc.target/m68k/xgot-1.c: New test. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-11 Kazu Hirata ++ Issue 2396 ++ gcc/ ++ * configure.ac: Teach that fido supports .debug_line. ++ * configure: Regenerate. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2 (config.gcc part was merged earlier): ++ ++ 2007-03-26 Nathan Sidwell ++ gcc/ ++ * config.gcc (m68k-*-linux*): Add sysroot-suffix.h to tm_file. Add ++ m68k/t-floatlib, m68k/t-linux & m68k/t-mlibs to tmake_file. ++ * config/m68k/t-linux: New. ++ * doc/install.texi: Document m68k-*-linux is now multilibbed by ++ default. ++ ++2008-05-30 Maxim Kuvyrkov ++ ++ Revert: ++ ++ 2008-05-29 Maxim Kuvyrkov ++ Merge from Sourcery G++ 4.2: ++ 2007-02-16 Paul Brook ++ Richard Sandiford ++ gcc/ ++ * config/m68k/m68k.md (UNSPEC_MOVEQ_MEM): New constant. ++ (*movsi_smallconst): New pattern. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ * config/m68k/m68k.md: Fix previous commit. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-16 Richard Sandiford ++ gcc/ ++ * config/m68k/m68k.md (tst_cf, cmp_cf, movsf_cf_hard) ++ (movdf_cf_hard, extendsfdf2_cf, truncdfsf2_cf, floatsi2_cf) ++ (floathi2_cf, floatqi2_cf, ftrunc2_cf) ++ (fixqi2_cf, fixhi2_cf, fixsi2_cf) ++ (add3_cf, sub3_cf, fmul3_cf, div3_cf) ++ (divmodsi4_cf, udivmodsi4_cf) ++ (neg2_cf, sqrt2_cf, abs2_cf): Replace "Q" ++ constraints for FP addresses with "m" constraints. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-16 Paul Brook ++ Richard Sandiford ++ gcc/ ++ * config/m68k/m68k.md (UNSPEC_MOVEQ_MEM): New constant. ++ (*movsi_smallconst): New pattern. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-05-19 Kazu Hirata ++ ++ gcc/ ++ * config.gcc (arm-timesys-linux-gnueabi): Add ./sysroot-suffix.h ++ ./sysroot-suffix.h to tm_file while removing arm/timesys-linux.h ++ from tm_file. Add tmake_file. ++ * config/arm/t-timesys (MULTILIB_OSDIRNAMES): Populate. ++ * config/arm/timesys-liunx.h: Remove. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-05-07 Paul Brook ++ ++ * config/arm/arm.c (arm_no_early_mul_dep): Correct the logic to ++ look into a MAC instruction. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-04-04 Paul Brook ++ ++ gcc/ ++ * config/arm/linux-eabi.h (ARM_FUNCTION_PROFILER): Define. ++ (SUBTARGET_FRAME_POINTER_REQUIRED): Define. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-08 Paul Brook ++ ++ gcc/ ++ * config/arm/t-linux-eabi (MULTILIB_OSDIRNAMES): Override old value. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-29 Paul Brook ++ ++ gcc/ ++ * config/arm/lib1funcs.asm (THUMB_LDIV0): Fix bogus ARCH_7 ifdefs. ++ Use cbz. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-25 Sandra Loosemore ++ ++ gcc/ ++ * testsuite/gcc.dg/arm-mmx-1.c: Skip if conflicting -mcpu or -mabi ++ argument specified. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-18 Julian Brown ++ ++ gcc/ ++ * config/arm/bpabi.S (test_div_by_zero): New macro. ++ (aeabi_ldivmod): Use above macro to tailcall long long div-by-zero ++ handler. ++ (aeabi_uldivmod): Likewise. ++ * config/arm/bpabi-v6m.S (test_div_by_zero): New macro. ++ (aeabi_ldivmod, aeabi_uldivmod): Use above macro. ++ * config/arm/lib1funcs.asm (ARM_LDIV0): Tailcall int div-by-zero ++ handler. Add signed/unsigned argument, pass correct value to that ++ handler. ++ (THUMB_LDIV0): Same, for Thumb. ++ (DIV_FUNC_END): Add signed argument. ++ (WEAK): New macro. ++ (__udivsi3, __umodsi3): Add unsigned argument to DIV_FUNC_END. ++ (__divsi3, modsi3): Add signed argument to DIV_FUNC_END. ++ (__aeabi_uidivmod, __aeabi_idivmod): Check division by zero. ++ (__div0): Rename to __aeabi_idiv0, __aeabi_ldiv0 for EABI, and declare ++ those names weak. ++ * config/arm/t-bpabi (LIB1ASMFUNCS): Add _aeabi_idiv0, _aeabi_ldiv0. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-17 Paul Brook ++ ++ gcc/ ++ * doc/invoke.texi: Document -mword-relocations. ++ * config/arm/uclinux-elf.h: Define TARGET_DEFAULT_WORD_RELOCATIONS. ++ * config/arm/symbian.h: Define TARGET_DEFAULT_WORD_RELOCATIONS. ++ * config/arm/vxworks.h: Define TARGET_DEFAULT_WORD_RELOCATIONS. ++ * config/arm/arm.h: Define TARGET_DEFAULT_WORD_RELOCATIONS. ++ * config/arm/arm.md (movsi): Don't use movt if only word relocations ++ are permitted. ++ * config/arm/arm.opt: Add -mword-relocations. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-16 Paul Brook ++ ++ gcc/ ++ * config.gcc (arm*): Handle --enable-extra-sgxx-multilibs. ++ Use arm/t-sysroot-suffix and ./sysroot-suffix.h. ++ * config/arm/uclinux-eabi.h (SYSROOT_SUFFIX_SPEC): Remove. ++ * config/arm/linux-eabi.h (SYSROOT_SUFFIX_SPEC): Remove. ++ * config/arm/t-linux-eabi: Remove marvell-f multilib. ++ Match Cortex-A9 and Cortex-R4F. ++ * config/arm/t-arm-elf: Remove marvell-f multilib. ++ Match Cortex-A9 and Cortex-R4F. ++ * config/arm/t-uclinux-eabi: Match Cortex-A9 and Cortex-R4F. ++ * config/arm/print-sysroot-suffix.sh: New file. ++ * config/arm/t-sysroot-suffix: New file. ++ * config/arm/t-cs-eabi: New file. ++ * config/arm/t-cs-linux: New file. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-05 Paul Brook ++ ++ gcc/ ++ * genmultilib: Fix sed patterns. ++ Verify that aliases are valid. ++ * config/arm/t-timesys (MULTILIB_ALIASES): Set. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-05 Paul Brook ++ ++ gcc/doc/ ++ * fragments.texi: Document MULTILIB_ALIASES. ++ ++ gcc/ ++ * genmultilib: Add aliases. ++ * Makefile.in (s-mlib): Pass MULTILIB_ALIASES. ++ * config/arm/t-linux-eabi: Use MULTILIB_ALIASES. ++ * config/arm/t-arm-elf: Ditto. ++ * config/arm/t-uclinux-eabi: Ditto. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-28 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_override_options): Set arm_abi earlier. ++ Allow Interworking on ARMv4 EABI based targets. ++ * config/arm/bpabi.h (TARGET_FIX_V4BX_SPEC): Define. ++ (SUBTARGET_EXTRA_ASM_SPEC, LINK_SPEC): Add TARGET_FIX_V4BX_SPEC. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-25 Paul Brook ++ ++ gcc/ ++ * config/arm/ieee754-df.S (muldf3): Use RET macros. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-23 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_tune_cortex_a9): New variable. ++ (arm_override_options): Set arm_tune_cortex_a9. ++ (arm_split_constant): Use arm_emit_movpair. ++ (arm_rtx_costs_1): Increase cost of register shifts on cortex-A9. ++ Add costs for HIGH and LO_SUM. ++ (arm_size_rtx_costs): Add costs for HIGH and LO_SUM. ++ (arm_emit_movpair): New function. ++ (arm_print_operand): Handle symbols with %c. ++ (arm_final_prescan_insn): Use TARGET_NO_SINGLE_COND_EXEC. ++ (arm_issue_rate): Add cortexa9. ++ * config/arm/arm.h (TARGET_NO_SINGLE_COND_EXEC): Define. ++ (TARGET_USE_MOVT): Define. ++ (arm_tune_cortex_a9): Add prototype. ++ * config/arm/arm-cores.def: Add cortex-a9. ++ * config/arm/arm-tune.md: Regenerate. ++ * config/arm/arm-protos.h (arm_emit_movpair): Add prototype. ++ * config/arm/arm.md: Include cortex-a9.md. ++ Add TARGET_NO_SINGLE_COND_EXEC conditions. ++ (generic_sched, generic_vfp): Add cortex-a9. ++ (movsi): Use arm_emit_movpair. ++ (arm_movt, arm_movw): New patterns. ++ * config/arm/cortex-a9.md: New file. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-09 Julian Brown ++ ++ gcc/ ++ * config/arm/neon.md (UNSPEC_MISALIGNED_ACCESS): New constant. ++ (movmisalign): Define for D and Q width registers. ++ ++ gcc/testsuite/ ++ * lib/target-supports.exp ++ (check_effective_target_arm_vect_no_misalign): New function. ++ (check_effective_target_vect_no_align): Use above to determine ++ whether misaligned accesses are expected for ARM. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-04 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_output_epilogue): Avoid clobbering tail call ++ arguments. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-01-03 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_rtx_costs_1): Add costs for ARMv6 value ++ extension instructions. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-12-20 Paul Brook ++ ++ gcc/ ++ * config/arm/cortex-r4f.md: New file. ++ * config/arm/arm.c (arm_no_early_mul_dep): Also match ++ multiply-subtract. ++ (arm_issue_rate): Return 2 for cortex-a8 and cortex-r4. ++ * config/arm/cortex-r4.md: Replace (eq_attr "tune" "cortexr4") ++ with (eq_attr "tune_cortexr4" "yes"). ++ * config/arm/vfp.md: Split ffarith and ffarith into fcpys, ffariths, ++ ffarithd, fadds, faddd, fconsts, fconstd, fcmps and fcmpd. ++ * config/arm/arm.md: Inlcude cortex-r4f.md. ++ (define_attr fpu): Add new VFP variants. ++ (define_attr type): Add new types. ++ (tune_cortexr4): New attr. ++ (generic_sched, generic_vfp): Use tune_cortexr4 and new FPU types. ++ * config/arm/cortex-a8-neon.md: Split farith and ffarith insn types. ++ * config/arm/marvell-f-vfp.md: Ditto. ++ * config/arm/arm1020e.md: Ditto. ++ * config/arm/vfp11.md: Ditto. ++ * config/arm/arm-tune.md: Regenerate. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-12-14 Paul Brook ++ ++ gcc/ ++ * doc/invoke.texi: Document new ARM -mfpu= and -mcpu= options. ++ * config/arm/arm.c (all_fpus): Add vfpv3 and vfpv3-d16. ++ (fp_model_for_fpu): Add entry for FPUTYPE_VFP3D16. ++ (arm_file_start): Add FPUTYPE_VFP3D16. Rename vfp3 to vfpv3. ++ * config/arm/arm.h (TARGET_VFPD32): Define. ++ (TARGET_VFP3): Use TARGET_VFPD32. ++ (fputype): Add FPUTYPE_VFP3D16. ++ (LAST_VFP_REGNUM): Use TARGET_VFPD32. ++ * config/arm/constraints.md ("w"): Use TARGET_VFPD32. ++ * config/arm/arm-cores.def: Add cortex-r4f. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-12-11 Paul Brook ++ ++ gcc/ ++ * config/arm/thumb2.md: Extend peephole to cover 3-arg subs. ++ (thumb2_alusi3_short): Exclude MINUS. ++ (thumb2_subsi_short): New pattern. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-27 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_optimization_options): Revert flag_see ++ change. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-19 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (FL_COMPAT): Define. ++ (arm_override_options): Mask out FL_COMPAT when checking cpu vs. arch. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-19 Vladimir Prus ++ ++ gcc/ ++ * config/arm/arm.c (arm_optimization_options): ++ Enable -fsee and disable -fmove-loop-invariants. ++ Use very restrictive inlining heuristics. ++ ++ gcc/testsuite/ ++ * gcc.c-torture/execute/bcp-1.x: New. Don't ++ run bcp-1.c test on arm, with -Os. ++ * gcc.c-torture/execute/990208-1.x: New. Likewise. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-20 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.md (insv): Use gen_insv_t2 and gen_insv_zero. ++ (extzv): Use gen_extzv_t2. ++ (insv_t2, insv_zero, extv, extzv_t2): New patterns. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-20 Paul Brook ++ ++ gcc/ ++ * config/arm/thumb2.md (thumb2_one_cmplsi2_short, ++ thumb2_negsi2_short): New patterns and peepholes. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-20 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_size_rtx_costs): Use ARM costs for Thumb-2. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-13 Paul Brook ++ ++ * config/arm/arm.c (arm_output_epilogue): Adjust stack pointer by ++ popping call-clobbered registers. ++ (arm_expand_prologue): Adjust stack pointer by pushing extra ++ registers. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-12 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.c (TARGET_ADJUST_REG_ALLOC_ORDER): Define. ++ (thumb_core_reg_alloc_order): New. ++ (arm_adjust_reg_alloc_order): New. ++ * config/arm/arm.h (REG_ALLOC_ORDER): Adjust comment. ++ * config/arm/arm-protos.h (arm_adjust_reg_alloc_order): New ++ prototype. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-12 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.h (CLASS_LIKELY_SPILLED_P): Update comment. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-12 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.h (CLASS_LIKELY_SPILLED_P): Check against ++ LO_REGS only for Thumb-1. ++ (MODE_BASE_REG_CLASS): Restrict base registers to low ++ registers for Thumb-2. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-10 Paul Brook ++ ++ * config/arm/arm.md (arm_addsi3): Add r/k/n alternative. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-07 Kazu Hirata ++ ++ gcc/testsuite/ ++ * gcc.dg/arm-g2.c, gcc.dg/arm-mmx-1.c, gcc.dg/arm-scd42-2.c: ++ Skip if the multilib testing specifies -march that does not ++ agree with the one specified in the testcase. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-25 Nathan Sidwell ++ ++ gcc/ ++ * config.gcc (arm*-*-linux*): Add timesys specific files. ++ * config/arm/timesys-linux.h: New. ++ * config/arm/t-timesys: New. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-16 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (use_return_insn): Use offsets->saved_regs_mask ++ instead of {arm,thumb}_compute_save_reg_mask. ++ (output_return_instruction): Ditto. ++ (arm_output_epilogue): Ditto. ++ (arm_expand_prologue): Ditto. ++ (thumb_unexpanded_epilogue): Ditto. ++ (thumb1_expand_prologue): Ditto. ++ (thumb1_output_function_prologue): Ditto. ++ (arm_set_return_address): Ditto. ++ (thumb_set_return_address): Ditto. ++ (arm_get_frame_offsets): Set offsets->saved_regs_mask. Push extra ++ regs to achieve stack alignment. ++ (thumb1_compute_save_reg_mask): Fix compiler warning. ++ * gcc/config/arm.h (arm_stack_offsets): Add saved_regs_mask. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-05 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.h (BRANCH_COST): Set to 1 when optimizing ++ for size on Thumb-2. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-05 Richard Sandiford ++ ++ gcc/ ++ * config/arm/neon-gen.ml: Include vxWorks.h rather than stdint.h ++ for VxWorks kernels. ++ * config/arm/arm_neon.h: Regenerate. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-05 Mark Shinwell ++ ++ gcc/ ++ * config/arm/thumb2.md (thumb2_movsi_insn): Split ldr and ++ str alternatives according to use of high and low regs. ++ * config/arm/vfp.md (thumb2_movsi_vfp): Likewise. ++ * config/arm/arm.h (CONDITIONAL_REGISTER_USAGE): Use high ++ regs when optimizing for size on Thumb-2. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-07-02 Paul Brook ++ ++ gcc/ ++ * config/arm/thumb2.md (thumb2_alusi3_short): Exclude PLUS. ++ (thumb2_addsi_shortim): Rename ... ++ (thumb2_addsi_short): ... to this. Allow register operands. ++ ++2008-05-29 Julian Brown ++ ++ Backport from mainline: ++ ++ 2008-02-26 Paul Brook ++ ++ * config/arm/arm.c (thumb_set_frame_pointer): Ensure SP is first ++ operand for Thumb-2. ++ * config/arm/arm.h (reg_class): Add CORE_REGS. ++ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Ditto. ++ (BASE_REG_CLASS): Use CORE_REGS. ++ (PREFERRED_RELOAD_CLASS): Add STACK_REG. ++ (REGNO_MODE_OK_FOR_REG_BASE_P): Use REGNO_MODE_OK_FOR_BASE_P. ++ (REGNO_OK_FOR_INDEX_P): Exclude SP. ++ (ARM_REG_OK_FOR_INDEX_P): Always define. Use ++ ARM_REGNO_OK_FOR_INDEX_P. ++ (ARM_PRINT_OPERAND_ADDRESS): Swap operands for [reg, sp]. ++ * config/arm/arm.md (arm_addsi3, thumb1_addsi3, arm_subsi3_insn, ++ arm_movsi_insn, thumb1_movsi_insni, stack_tie): Add "k" alternatives. ++ (ldm/stm peepholes): Ditto. ++ * config/arm/thumb2.md (thumb2_movdi): Add "k" alternatives. ++ * config/arm/vfp.md (arm_movsi_vfp, thumb2_movsi_vfp): Ditto. ++ * config/arm/iwmmxt.md (iwmmxt_movsi_insn): Ditto. ++ * config/arm/constraints.md: Enable "k" constraint on ARM. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-01 Joseph Myers ++ * release-notes-csl.xml: Add -mxgot release note. ++ gcc/ ++ * config/m68k/m68k.opt (mxgot): New option. ++ * config/m68k/m68k.c (legitimize_pic_address): Handle -mxgot. ++ (m68k_output_addr_const_extra): New. ++ * config/m68k/m68k.h (OUTPUT_ADDR_CONST_EXTRA): New. ++ * config/m68k/m68k-protos.h (m68k_output_addr_const_extra): Declare. ++ * config/m68k/m68k.md (UNSPEC_GOTOFF): Define. ++ * doc/invoke.texi (M680x0 Options): Document -mxgot. ++ gcc/testsuite/ ++ * gcc.target/m68k/xgot-1.c: New test. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-12 Nathan Sidwell ++ gcc/ ++ * config/m68k/t-cf (MULTILIB_EXTRA_OPTS): Add no-mac. ++ * config/m68k/m68k-devices.def: Remove multilibs that only differ ++ by MAC/EMAC. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-06-13 Joseph Myers ++ ++ gcc/ ++ * config/arm/crti.asm, config/arm/crtn.asm: Remove .file ++ directives. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-06-06 Joseph Myers ++ ++ gcc/ ++ * config/arm/arm.h (VALID_IWMMXT_REG_MODE): Allow SImode. ++ (ARM_LEGITIMIZE_RELOAD_ADDRESS): Reduce range allowed for SImode ++ offsets with iWMMXt. ++ * config/arm/arm.c (arm_hard_regno_mode_ok): Update for change to ++ VALID_IWMMXT_REG_MODE. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-05-17 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (output_move_double): Prefer LDRD to LDM. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-05-15 Paul Brook ++ ++ gcc/ ++ * config/arm/nocrt0.h (LIB_SPEC): Remove default -T. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-05-04 Mark Shinwell ++ ++ gcc/ ++ * config/arm/bpabi.h (SUBTARGET_EXTRA_ASM_SPEC): Bump EABI ++ version number to five. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-05-02 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (arm_unwind_emit): Suppress unused unwinding ++ annotations. ++ (arm_output_fn_unwind): Mark functions that can not be unwound. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-26 Vladimir Prus ++ ++ gcc/ ++ * config/arm/arm.c (vfp_output_fldmd): When low_irq_latency ++ is non zero, pop each register separately. ++ (vfp_emit_fstmd): When low_irq_latency is non zero, ++ save each register separately. ++ (arm_get_vfp_saved_size): Adjust saved register ++ size calculation for the above changes. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-25 Paul Brook ++ ++ gcc/ ++ * config/arm/bpabi-v6m.S (aeabi_lcmp): Use unsigned comparison for ++ low word. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-22 Mark Shinwell ++ ++ gcc/ ++ * config/arm/lib1funcs.asm (div0): Use correct punctuation. ++ * config/arm/ieee754-sf.S (mulsf3): Likewise. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-18 Vladimir Prus ++ ++ gcc/ ++ * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Set ++ __low_irq_latency__. ++ * config/arm/lib1funcs.asm: Define do_pop and ++ do_push as variadic macros. When __low_irq_latency__ ++ is defined, push and pop registers individually. ++ * config/arm/ieee754-df.S: Adjust syntax of using ++ do_push. ++ * config/arm/ieee754-sf.S: Likewise. ++ * config/arm/bpapi.S: Likewise. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-17 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (TARGET_DWARF_REGISTER_SPAN): Define. ++ (arm_dwarf_register_span): New function. ++ (arm_dbx_register_number): Add VFPv3 dwarf numbering. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-16 Paul Brook ++ ++ gcc/ ++ * config/arm/arm.c (print_pop_reg_by_ldr): Fix warning about ambiguous ++ else. ++ ++2008-05-29 Julian Brown ++ ++ Backport from mainline: ++ ++ 2008-05-06 Mark Shinwell ++ Daniel Jacobowitz ++ Andrew Jenner ++ ++ * g++.old-deja/g++.jason/enum6.C, g++.old-deja/g++.law/enum9.C, ++ g++.old-deja/g++.other/enum4.C, gfortran/enum_9.f90, ++ gfortran.dg/enum_10.f90: Broaden dg-options pattern. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-01 Paul Brook ++ ++ gcc/ ++ * config/arm/uclinux-eabi.h (SUBTARGET_EXTRA_LINK_SPEC): Add ++ --target2=abs. ++ * config/arm/unwind-arm.h (_Unwind_decode_target2): Handle uClinux. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-31 Paul Brook ++ ++ * config/arm/arm.c (output_move_double): Only apply limited range ++ check in ARM mode. ++ ++2008-05-29 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-30 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/arm.c (use_return_insn): Test for TARGET_APCS_FRAME ++ if we need to adjust the stack. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-07 Mark Shinwell ++ gcc/ ++ * config/m68k/lb1sf68.asm: Add PIC macros for Linux targets. ++ ++2008-05-29 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config.gcc (m68k-*-linux*): Add with_arch, adjust tm_file, ++ add tmake_file. ++ ++2008-05-28 Paul Brook ++ ++ Avoid Issue #2945 ++ gcc/ ++ * config/arm/arm.md (abssi2): Add TARGET_NO_SINGLE_COND_EXEC expander. ++ (arm_abssi2, arm_neg_abssi2): Enable for Thumb-2. Always split. ++ (arm_nocond_abssi2, arm_nocond_neg_abssi2): New patterns. ++ Add splitters for abssi patterns. ++ * config/arm/thumb2.md (thumb2_abssi2, thumb2_neg_abssi2): Remove. ++ ++2008-05-26 Carlos O'Donell ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-05-23 Paul Brook ++ Carlos O'Donell ++ ++ * doc/extend.texi: Clarify use of __attribute__((naked)). ++ * doc/tm.texi: Document TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. ++ * target.h (gcc_target): Add allocate_stack_slots_for_args. ++ * function.c (use_register_for_decl): Use ++ targetm.calls.allocate_stack_slots_for_args. ++ * target-def.h (TARGET_CALLS): Add ++ TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. ++ * config/arm/arm.c (arm_allocate_stack_slots_for_args): ++ New function. ++ (TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS): Define. ++ ++ gcc/testsuite/ ++ 2008-05-23 Paul Brook ++ Carlos O'Donell ++ ++ * gcc.target/arm/naked-1.c: New test. ++ * gcc.target/arm/naked-2.c: New test. ++ ++2008-05-26 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-04-30 Nathan Froyd ++ ++ * config/rs6000/crtresgpr.asm, config/rs6000/crtresxgpr.asm, ++ config/rs6000/crtsavgpr.asm, config/rs6000/crtresfpr.asm, ++ config/rs6000/crtresxfpr.asm, config/rs6000/crtsavfpr.asm: Break out ++ from... ++ * config/rs6000/crtsavres.asm: ...here. Remove unneeded file. ++ * config/rs6000/e500crtres32gpr.asm, config/rs6000/e500crtres64gpr.asm, ++ config/rs6000/e500crtres64gprctr.asm, ++ config/rs6000/e500crtrest32gpr.asm, config/rs6000/e500crtrest64gpr.asm, ++ config/rs6000/e500crtresx32gpr.asm, config/rs6000/e500crtresx64gpr.asm, ++ config/rs6000/e500crtsav32gpr.asm, config/rs6000/e500crtsav64gpr.asm, ++ config/rs6000/e500crtsav64gprctr.asm, ++ config/rs6000/e500crtsavg32gpr.asm, config/rs6000/e500crtsavg64gpr.asm, ++ config/rs6000/e500crtsavg64gprctr.asm: New files. ++ * config/rs6000/t-ppccomm: Add build rules for new files. ++ (LIB2FUNCS_STATIC_EXTRA): Add new files. ++ * config/rs6000/t-netbsd: Add build rules for new files. ++ (LIB2FUNCS_STATIC_EXTRA): New variable. ++ * config/rs6000/sysv4.h (ENDFILE_SPEC): Don't include crtsavres.o ++ (CRTSAVRES_DEFAULT_SPEC): Likewise. ++ * config/rs6000/netbsd.h (ENDFILE_SPEC): Likewise. ++ ++ libgcc/ ++ 2008-04-30 Nathan Froyd ++ ++ * config/rs6000/t-ppccomm: Add build rules for new files. ++ (LIB2ADD_ST): New variable. ++ ++2008-05-26 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-25 Nathan Froyd ++ ++ gcc/ ++ * tree-ssa-remove-local-statics. (initialize_statement_dataflow): ++ Continue hash table traversal. ++ (compute_definedness_for_block): Delete useless return statement. ++ Adjust comment accordingly. ++ ++ 2007-03-05 Nathan Froyd ++ ++ gcc/ ++ * tree-pass.h (pass_remove_local_statics): Declare. ++ * passes.c (init_optimization_passes): Add ++ pass_remove_local_statics to the optimization passes. ++ * Makefile.in (OBJS-common): Add tree-ssa-remove-local-statics.c. ++ (tree-ssa-remove-local-statics.o): New rule. ++ * tree-ssa-remove-local-statics.c: New file. ++ * c.opt (fremove-local-statics): New option. ++ * timevar.def (TV_RLS): New timevar. ++ * toplev.h (flag_remove_local_statics): Declare. ++ * cgraph.h (struct cgraph_node): Add 'ever_was_nested'. ++ * cgraph.c (cgraph_node): Set ever_was_nested in the node and ++ its parent when creating a new node. ++ gcc/doc/ ++ * invoke.texi: Document -fremove-local-statics. ++ gcc/testsuite/ ++ * gcc.dg/remove-local-statics-1.c: New file. ++ * gcc.dg/remove-local-statics-2.c: New file. ++ * gcc.dg/remove-local-statics-3.c: New file. ++ * gcc.dg/remove-local-statics-4.c: New file. ++ * gcc.dg/remove-local-statics-5.c: New file. ++ * gcc.dg/remove-local-statics-6.c: New file. ++ * gcc.dg/remove-local-statics-7.c: New file. ++ * gcc.dg/remove-local-statics-8.c: New file. ++ * gcc.dg/remove-local-statics-9.c: New file. ++ * gcc.dg/remove-local-statics-10.c: New file. ++ * gcc.dg/remove-local-statics-11.c: New file. ++ * gcc.dg/remove-local-statics-12.c: New file. ++ ++ ++2008-05-26 Nathan Froyd ++ ++ Backport from mainline: ++ ++ 2008-04-24 Nathan Froyd ++ Nathan Sidwell ++ ++ * config/rs6000/rs6000.opt (mspe): Remove Var property. ++ (misel): Likewise. ++ * config/rs6000/rs6000.h (rs6000_spe): Declare. ++ (rs6000_isel): Likewise. ++ * config/rs6000/rs6000.c (rs6000_spe): New variable. ++ (rs6000_isel): New variable. ++ (rs6000_handle_option): Handle OPT_mspe and OPT_misel. ++ ++2008-05-26 Nathan Froyd ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/altivec-24.c, gcc.target/powerpc/pr35907.c: ++ Run if vmx_hw, compile otherwise. Do not check for AltiVec at ++ runtime. ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/altivec-vec-merge.c, ++ gcc.target/powerpc/altivec-10.c, gcc.target/powerpc/altivec-12.c, ++ gcc.target/powerpc/altivec-1.c, gcc.target/powerpc/altivec-3.c, ++ g++.dg/ext/altivec-2.C, g++.dg/ext/altivec-3.C: Run if vmx_hw, compile ++ otherwise. Do not check for AltiVec at runtime. ++ * gcc.target/powerpc/altivec_check.h: Delete. ++ * g++.dg/eh/simd-2.C: Only use -maltivec if vmx_hw. ++ * g++.dg/ext/altivec_check.h: Delete. ++ * g++.dg/eh/check-vect.h (sig_ill_handler): Remove AltiVec runtime ++ check. ++ ++ * gcc.target/powerpc/20030505.c: Compile for all EABI targets. ++ Explicitly enable SPE. ++ * gcc.target/powerpc/ppc-spe.c: Likewise. ++ ++ * gcc.target/powerpc/darwin-longlong.c: Explicitly require 64-bit ++ instruction support. Do not check for it at runtime. ++ ++ * gcc.target/powerpc/20030218-1.c: Pass -mfloat-gprs=single. Expect ++ -flax-vector-conversions message. ++ * gcc.target/powerpc/spe1.c: Pass -mfloat-gprs=single. Make Foo ++ extern. ++ * g++.dg/other/opaque-2.C: Pass -mfloat-gprs=single. ++ * g++.dg/other/opaque-3.C, g++.dg/ext/spe1.C: Likewise. ++ ++ * gcc.dg/cpp/assert4.c: Recognize __PPC__. ++ ++ * g++.dg/other/opaque-1.C: Run on targets with SPE. ++ * g++.dg/other/profile1.C: Use dg-require-profiling. ++ ++ * g++.dg/conversion/simd1.C: Expect warning on all PowerPC ++ non-AltiVec targets. ++ * g++.dg/ext/attribute-test-1.C, g++.dg/ext/attribute-test-2.C, ++ g++.dg/ext/attribute-test-3.C, g++.dg/ext/attribute-test-4.C: Likewise. ++ ++ * lib/target-supports.exp (check_effective_target_ppc64): New. ++ ++2008-05-26 Maxim Kuvyrkov ++ ++ * release-notes-csl.xml: Add missing release note. ++ ++2008-05-23 Joseph Myers ++ ++ gcc/ ++ * config/arm/t-wrs-linux (MULTILIB_OPTIONS, MULTILIB_DIRNAMES, ++ MULTILIB_EXCEPTIONS): Add -tcortex-a8-be8 multilib. ++ * config/arm/wrs-linux.h (CC1_SPEC, SUBTARGET_EXTRA_ASM_SPEC, ++ SUBTARGET_EXTRA_LINK_SPEC, SYSROOT_SUFFIX_SPEC): Update for new ++ multilib. ++ ++2008-05-23 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-02-23 David Edelsohn ++ ++ * config/rs6000/rs6000.h (CONSTANT_ALIGNMENT): Use STRICT_ALIGNMENT ++ instead of TARGET_STRICT_ALIGN. ++ ++ gcc/ ++ 2008-02-22 Nathan Froyd ++ ++ * config/rs6000/rs6000.h (CONSTANT_ALIGNMENT): Don't overalign ++ strings when optimizing for size, unless the target cares about ++ alignment. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-28 Paul Brook ++ ++ Merge ARMv6-M support. ++ gcc/ ++ * config/arm/t-linux-eabi: Remove explicit marm (default) multilib. ++ Add entries for all multilibs to MULTILIB_OSDIRNAMES. ++ * config/arm/t-arm-elf: Ditto. Add armv6-m multilib. ++ (LIB1ASMFUNCS): Prefix sf/df routines with arm_. ++ * config/arm/t-uclinux-eabi: New file. ++ * config/arm/t-linux-eabi: Add Thumb-2 multilib. ++ * config/arm/uclinux-eabi.h (SYSROOT_SUFFIX_SPEC): Define. ++ * config/arm/linux-eabi.h (SYSROOT_SUFFIX_SPEC): Add thumb-2 sysroot. ++ * config.gcc: Add t-softfp and t-arm-softfp to ARM ELF based targets. ++ Add armv6-m. ++ * config/arm/t-arm-softfp: New file. ++ * config/arm/elf.h: Prevent libgcc float conversion routines being ++ built when we have assembly implementations. ++ * config/arm/ieee754-sf.S: Rename L_* L_arm_* ++ * config/arm/ieee754-df.S: Ditto. ++ * config/arm/arm.c (FL_FOR_ARCH6M): Define. ++ (all_architectures): Add armv6-m. ++ (arm_output_mi_thunk): Add TARGET_THUNMB1_ONLY code. ++ * config/arm/arm.h (TARGET_THUMB1_ONLY): Define. ++ (ARM_DECLARE_FUNCTION_NAME): Handle v6m thunks. ++ * config/arm/lib1funcs.asm: Add __ARM_ARCH_6M__. Omit ARM mode ++ code and macros when it is defined. Include bpabi-v6m.S. ++ (gnu_Unwind_Restore_VFP_D, gnu_Unwind_Save_VFP_D, ++ gnu_Unwind_Restore_VFP_D_16_to_31, gnu_Unwind_Save_VFP_D_16_to_31, ++ gnu_Unwind_Restore_WMMXD, gnu_Unwind_Save_WMMXD, ++ gnu_Unwind_Restore_WMMXC, gnu_Unwind_Save_WMMXC): Stubs for ARMv6-M. ++ * config/arm/sfp-machine.h: New file. ++ * config/arm/arm-cores.def: Add cortex-m1. ++ * config/arm/arm-tune.md: Regenerate. ++ * config/arm/libunwind.S: Add ARMv6-M implementation. ++ * config/arm/bpabi.h: Add renames for unsigned conversion routines. ++ * config/arm/bpabi-v6m.S: New file. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-26 Joseph Myers ++ ++ Merge from Sourcery G++ 4.1 branch: ++ ++ 2006-03-01 Paul Brook ++ gcc/testsuite/ ++ * g++.dg/other/armv7m-1.C: New test. ++ ++ 2006-10-27 Joseph Myers ++ Richard Sandiford ++ gcc/testsuite/ ++ * gcc.dg/arm-vfp1.c, gcc.target/arm/vfp-ldmdbd.c, ++ gcc.target/arm/vfp-ldmdbs.c, gcc.target/arm/vfp-ldmiad.c, ++ gcc.target/arm/vfp-ldmias.c, gcc.target/arm/vfp-stmdbd.c, ++ gcc.target/arm/vfp-stmdbs.c, gcc.target/arm/vfp-stmiad.c, ++ gcc.target/arm/vfp-stmias.c: Use arm_vfp_ok. ++ ++ 2006-08-19 Joseph Myers ++ gcc/testsuite/ ++ * gcc.target/arm/vfp-ldmdbd.c, gcc.target/arm/vfp-ldmdbs.c, ++ gcc.target/arm/vfp-ldmiad.c, gcc.target/arm/vfp-ldmias.c, ++ gcc.target/arm/vfp-stmdbd.c, gcc.target/arm/vfp-stmdbs.c, ++ gcc.target/arm/vfp-stmiad.c, gcc.target/arm/vfp-stmias.c: Skip for ++ iWMMXt. ++ ++ 2006-04-21 Kazu Hirata ++ gcc/testsuite/ ++ * gcc.target/arm/vfp-ldmdbd.c, gcc.target/arm/vfp-ldmdbs.c, ++ gcc.target/arm/vfp-ldmiad.c, gcc.target/arm/vfp-ldmias.c, ++ gcc.target/arm/vfp-stmdbd.c, gcc.target/arm/vfp-stmdbs.c, ++ gcc.target/arm/vfp-stmiad.c, gcc.target/arm/vfp-stmias.c: New. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-25 Vladimir Prus ++ ++ gcc/ ++ * config/arm/arm.c (load_multiple_sequence): Return ++ 0 if low irq latency is requested. ++ (store_multiple_sequence): Likewise. ++ (arm_gen_load_multiple): Load registers one-by-one ++ if low irq latency is requested. ++ (arm_gen_store_multiple): Likewise. ++ * config/arm/predicates.md (load_multiple_operation): ++ Return false is low irq latency is requested. ++ (store_multiple_operation): Likewise. ++ * config/arm/arm.h (low_irq_latency): Define. ++ * config/arm/arm.md (movmemqi): Don't use ++ it if low irq latency is requsted. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-24 Vladimir Prus ++ ++ gcc/ ++ * config/arm/arm.c (arm_override_options): Warn if ++ mlow-irq-latency is specified in thumb mode. ++ (print_pop_reg_by_ldr): New. ++ (arm_output_epilogue): Use print_pop_reg_by_ldr ++ when low irq latency is requested. ++ (emit_multi_reg_push): Push registers separately ++ if low irq latency is requested. ++ * config/arm/arm.opt (mlow-irq-latency): New option. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-23 Paul Brook ++ ++ gcc/ ++ * config/arm/uclinux-eabi.h (SUBTARGET_EXTRA_LINK_SPEC): Add -elf2flt ++ and --pic-veneer. ++ * config/arm/bpabi.h (SUBTARGET_EXTRA_LINK_SPEC): Provide empty ++ default definition. ++ (LINK_SPEC): Include SUBTARGET_EXTRA_LINK_SPEC. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Paul Brook ++ ++ gcc/ ++ * config.gcc: Loosen checks for arm uclinux eabi targets. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/lib1funcs.asm (ARM_DIV_BODY): Conditionalize for ++ __ARM_TUNE_MARVELL_F__. ++ * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Add code to define ++ __ARM_TUNE_MARVELL_F__. ++ * config/arm/linux-eabi.h (SYSROOT_SUFFIX_SPEC): Add support for ++ marvell-f multilibs. ++ * config/arm/t-linux-eabi (MULTILIB_OPTIONS, MULTILIB_DIRNAMES, ++ MULTILIB_EXCEPTIONS, MULTILIB_MATCHES): Likewise. ++ * config/arm/t-arm-elf (MULTILIB_OPTIONS, MULTILIB_DIRNAMES, ++ MULTILIB_EXCEPTIONS, MULTILIB_MATCHES): Likewise. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Mark Shinwell ++ ++ gcc/ ++ * config/arm/cortex-r4.md: New. ++ * config/arm/hwdiv.md (divsi3, udivsi3): Annotate with ++ insn attributes. ++ * config/arm/arm.md: Include cortex-r4.md. ++ (insn): Add sdiv and udiv values. ++ (generic_sched): Don't use generic scheduling for Cortex-R4. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Vladimir Prus ++ ++ * config/arm/arm.c ++ (arm_compute_save_reg0_reg12_mask): Always ++ check if register 11 must be saved. Additionally ++ force save of it if frame_pointer_needeed. ++ (arm_compute_save_reg_mask): Save IP and PC ++ only with apcs frames. ++ (arm_output_epilogue): Adjust Thumb2 codepath to ++ be also invoked and work for ARM non-apcs frames. ++ (arm_expand_prologue): Don't bother saving IP ++ for non-apcs frame, since it's not clobbered by ++ prologue code. Implement non-apcs frame ++ layout. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-20 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.c (arm_arch_marvell_f): Delete. ++ (all_architectures): Delete marvell-f entry. ++ (arm_override_options): Improve diagnostic. Ignore ++ FL_MARVELL_F when checking CPU feature flags against ++ architecture feature flags. Don't set arm_arch_marvell_f. ++ Check insn_flags instead of TARGET_MARVELL_F. ++ * config/arm/arm.h (arm_arch_marvell_f): Delete. ++ (TARGET_MARVELL_F): Delete. ++ * doc/invoke.texi: Remove marvell-f entry from -march= ++ documentation. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-20 Carlos O'Donell ++ ++ Issue #1314 ++ gcc/ ++ * target.h (calls): Add use_reg_for_func. ++ * function.c (use_register_for_decl): Return true if ++ target hook use_ref_for_func returns true. ++ * target-def.h (TARGET_USE_REG_FOR_FUNC): Define. ++ (TARGET_CALLS): Add TARGET_USE_REG_FOR_FUNC. ++ * config/arm/arm.c (arm_use_reg_for_func): New function. ++ (TARGET_USE_REG_FOR_FUNC): Define as arm_use_reg_for_func. ++ * doc/extend.texi (naked): Naked functions must only have ++ asms without operands. ++ * release-notes-csl.xml: Document #1314 fix. ++ ++2008-05-23 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-05-23 Steven Munroe ++ ++ * config/rs6000/darwin-ldouble.c (fmsub): Eliminate the full ++ PACK/UNPACK between FP_SUB_Q and FD_TRUNC so that the result ++ is only rounded once. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-18 Mark Shinwell ++ ++ gcc/ ++ * doc/invoke.texi: Document -mmarvell-div. ++ * config/arm/arm.c (arm_override_options): Take setting of ++ -mmarvell-div and TARGET_THUMB2 into account when setting ++ arm_arch_hwdiv. Cause error if -mmarvell-div is used when ++ not targeting a Marvell core. ++ * config/arm/arm.opt: Add entry for -mmarvell-div option. ++ * config/arm/hwdiv.md: Use only arm_arch_hwdiv to check ++ applicability of instruction patterns. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-18 Mark Shinwell ++ ++ gcc/ ++ * config/arm/vfp.md: When targeting a Marvell core, only ++ enable patterns involving multiply-accumulate type ++ instructions when optimizing for size. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-12 Sandra Loosemore ++ ++ gcc/ ++ * config/arm/arm.c (arm_final_prescan_insn): Skip this processing ++ if TARGET_NO_COND_EXEC is true. ++ * config/arm/arm.h (TARGET_NO_COND_EXEC): Define. ++ * config/arm/arm.md (smaxsi3, *arm_smax_insn): Disable if ++ TARGET_NO_COND_EXEC is set. ++ (sminsi3, *arm_smin_insn): Likewise. ++ (umaxsi3, *arm_umaxsi3): Likewise. ++ (uminsi3, *arm_uminsi3): Likewise. ++ (*store_minmaxsi): Likewise. ++ (seq, sne, sgt, sle, sge, slt): Likewise. ++ (sgtu, sleu, sgeu, sltu): Likewise. ++ (sunordered, sordered): Likewise. ++ (sungt, sunge, sunlt, sunle): Likewise. ++ (movsicc, movsfcc, movdfcc): Likewise. ++ (*cond_return, *cond_return_inverted): Likewise. ++ (*compare_scc): Likewise. ++ (*cond_arith): Likewise. ++ (movcond): Likewise. ++ (anonymous define_split patterns): Likewise. ++ (define_cond_exec): Likewise. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-02 Mark Shinwell ++ Richard Earnshaw ++ ++ gcc/ ++ * config/arm/arm.c (TARGET_MAX_ANCHOR_OFFSET): New. ++ (TARGET_MIN_ANCHOR_OFFSET): New. ++ (arm_override_options): Set correct anchor ranges for Thumb-1 ++ and Thumb-2 if required. ++ (legitimize_pic_address): Handle case involving a TLS symbol ++ reference with an addend. ++ (arm_optimization_options): Enable section anchors at -O1 and ++ above. ++ * config/arm/arm.h (OPTIMIZATION_OPTIONS): New. ++ * config/arm/arm-protos.h (arm_optimization_options): New. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-02 Mark Shinwell ++ ++ gcc/ ++ * config/arm/arm.md (UNSPEC_STACK_ALIGN): Use a number that ++ does not clash with other unspecs. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-02 Mark Shinwell ++ ++ gcc/ ++ * config/arm/thumb2.md: Update copyright notice and FSF address. ++ Include hwdiv.md and move instruction patterns for sdiv and udiv ++ to that file. ++ * config/arm/arm.c (arm_arch_marvell_f): New. ++ (all_architectures): Add marvell-f entry. ++ (ARM_ARCH_NAME_SIZE): Define. ++ (arm_arch_name): Allocate ARM_ARCH_NAME_SIZE bytes of space. ++ (arm_override_options): Be more careful writing to arm_arch_name. ++ Set arm_arch_hwdiv if arm_tune_marvell_f is set. ++ * config/arm/arm.h (arm_arch_marvell_f): New. ++ * config/arm/arm_cores.def: Add FL_MARVELL_F for the marvell-f ++ entry. ++ * config/arm/hwdiv.md: New. ++ * config/arm/t-arm (MD_INCLUDES): Add hwdiv.md. ++ * config.gcc: Recognize marvell-f as a supported ARM architecture. ++ * doc/invoke.texi (ARM Options): Document -mcpu=marvell-f and ++ -march=marvell-f. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-01-10 Mark Shinwell ++ ++ gcc/ ++ * config/arm/marvell-f.md: Fix FSF address and comment ++ capitalization. ++ * config/arm/marvell-f-vfp.md: New. ++ * config/arm/arm-cores.def: Add FL_VFPV2 for marvell-f. ++ * config/arm/arm.md: Include marvell-f-vfp.md. ++ (generic_vfp): Don't set attribute to "yes" for marvell_f tuning. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-01-07 Mark Shinwell ++ ++ gcc/ ++ * config/arm/vfp.md: Document fmul{s,d} and fmac{s,d} types. ++ Remove documentation entry for fmul type. ++ Use fmuls to annotate single-precision multiplication patterns, ++ fmuld to annotate double-precision multiplication patterns, ++ fmacs to annotate single-precision multiply-accumulate patterns ++ and fmacd to annotate double-precision multiply-accumulate patterns. ++ * config/arm/vfp11.md: Update reservations accordingly. ++ * config/arm/arm.md: Note that certain values of the "type" ++ attribute are documented in vfp.md. Add fmul{s,d} and fmac{s,d} ++ values for that attribute. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-01-04 Mark Shinwell ++ ++ gcc/ ++ * config/arm/vfp.md: Move pipeline description for VFP11 to... ++ * config/arm/vfp11.md: ...here. New. ++ * config/arm/arm.md: Include vfp11.md. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-01-03 Mark Shinwell ++ ++ NOT ASSIGNED TO FSF ++ Port from Marvell compiler: ++ gcc/ ++ * config/arm/arm.c (arm_issue_rate): New. ++ (arm_multipass_dfa_lookahead): New. ++ (TARGET_SCHED_ISSUE_RATE): Define. ++ (TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): Define. ++ (FL_MARVELL_F): New. ++ (arm_tune_marvell_f): New. ++ (arm_override_options): Set arm_tune_marvell_f as appropriate. ++ * config/arm/arm.h (arm_tune_marvell_f): Declare. ++ * config/arm/arm-cores.def: Add marvell-f entry. ++ * config/arm/arm-tune.md: Regenerate. ++ * config/arm/t-arm (MD_INCLUDES): Add marvell-f.md. ++ * config/arm/arm.md: Don't use generic scheduler for marvell-f. ++ Include marvell-f.md. Extend "insn" attribute with mov/mvn/ ++ and/orr/eor cases and annotate instruction patterns accordingly. ++ * config/arm/vfp.md: Annotate likewise. ++ * config/arm/marvell-f.md: New. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-11-09 Paul Brook ++ ++ Merge from branches/csl/sourcerygxx-4_1. ++ gcc/ ++ * config/arm/arm.c (all_architectures): Add iWMMXt2 entry. ++ * config/arm/arm-cores.def: New ARM_CORE entry for iWMMXt2. ++ * config/arm/arm-tune.md: Regenerate. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-11-09 Paul Brook ++ ++ Merge from branches/csl/sourcerygxx-4_1. ++ 2006-09-10 Paul Brook ++ gcc/ ++ * config/arm/linux-eabi.h (SYSROOT_SUFFIX_SPEC): Define. ++ * config/arm/t-linux-eabi (MULTILIB_OPTIONS, MULTILIB_DIRNAMES): ++ Add armv4t multilib. ++ ++2008-05-23 Julian Brown ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-11-02 Paul Brook ++ ++ gcc/ ++ * config.gcc (arm*-*-eabi*): Add arm/nocrt0.h to tm_file. ++ * config/arm/nocrt0.h: New file. ++ ++2008-05-23 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/e300c2c3.md: Correctly use FSF upstream version, ++ not Sourcery G++ 4.2 version. ++ ++2008-05-23 Nathan Froyd ++ ++ Backport from mainline: ++ ++ gcc/ ++ 2008-02-26 Edmar Wienskoski ++ ++ * config/rs6000/rs6000.c (processor_costs): Update e300 cache ++ line sizes. ++ * doc/invoke.texi: Add e300c2 and e300c3 to list of cpus. ++ ++ gcc/ ++ 2008-02-24 Edmar Wienskoski ++ ++ * config.gcc (powerpc*-*-*): Add new cores e300c2 and e300c3. ++ * config/rs6000/e300c2c3.md: New file. ++ * config/rs6000/rs6000.c (processor_costs): Add new costs for ++ e300c2 and e300c3. ++ (rs6000_override_options): Add e300c2 and e300c3 cases to ++ processor_target_table. Do not allow usage of Altivec or Spe ++ with e300 cores. Initialize rs6000_cost for e300c2 and e300c3. ++ (rs6000_issue_rate): Set issue rate for e300c2 and e300c3. ++ * config/rs6000/rs6000.h (processor_type): Add ++ PROCESSOR_PPCE300C2 and PROCESSOR_PPCE300C3. ++ (ASM_CPU_SPEC): Add e300c2 and e300c3. ++ * config/rs6000/rs6000.md (define_attr "cpu"): Add ppce300c2 ++ and ppce300c3. Include e300c2c3.md. ++ ++2008-05-23 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-12-31 Joseph Myers ++ ++ gcc/ ++ * config/rs6000/eabi-cn.asm, config/rs6000/sol-ci.asm, ++ config/rs6000/sol-cn.asm: Remove .file directives. ++ ++2008-05-23 Nathan Froyd ++ ++ Backport from mainline: ++ ++ 2008-03-06 Nathan Froyd ++ ++ * dwarf2out.c (dwarf2out_frame_debug_expr): Consult the ++ dwarf_register_span hook when emitting unwind information for ++ register-to-memory saves. ++ * config/rs6000/rs6000.c (spe_synthesize_frame): Delete. ++ (rs6000_frame_related): Remove call to spe_synthesize_frame. ++ ++2008-05-23 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-03-04 Nathan Froyd ++ ++ gcc/ ++ * config/rs6000/eabi.asm (__eabi): Don't run __init. ++ (__eabi_convert, __eabi_uconvert): Define only if _RELOCATABLE. ++ ++2008-05-23 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Daniel Jacobowitz ++ ++ gcc/ ++ * config/rs6000/eabi.asm (.Lfini): New. ++ ++2008-05-23 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-12 Nathan Sidwell ++ Daniel Jacobowitz ++ ++ gcc/ ++ * config/rs6000/eabi-ci.asm (__init): Add _init func start. ++ (__fini): Also declare _fini for newlib. ++ ++2008-05-22 Daniel Jacobowitz ++ ++ gcc/ ++ * function.c (assign_parm_remove_parallels): New. ++ (assign_parm_setup_block_p): Do not return true for non-BLKmode ++ PARALLELs. ++ (assign_parm_setup_block): Do not handle them. ++ (assign_parm_setup_reg, assign_parm_setup_stack): Call ++ assign_parm_remove_parallels. ++ ++2008-05-22 Daniel Jacobowitz ++ ++ gcc/ ++ * c-typeck.c (convert_for_assignment): Use ++ vector_targets_convertible_p. ++ * c-common.c (vector_targets_convertible_p): New. ++ * c-common.h (vector_targets_convertible_p): New prototype. ++ * config/rs6000/rs6000.c (rs6000_is_opaque_type): Do not check ++ opaque_p_V2SI_type_node. ++ ++ gcc/cp/ ++ * typeck.c (comp_ptr_ttypes_real): Use vector_targets_convertible_p. ++ (comp_ptr_ttypes_const): Likewise. ++ ++ gcc/testsuite/ ++ * g++.dg/other/opaque-1.C, g++.dg/other/opaque-2.C, ++ g++.dg/other/opaque-3.C: Also run on powerpc*-*-linux*spe*. ++ ++2008-05-22 Sandra Loosemore ++ ++ Revert (as already fixed in a different way): ++ ++ 2008-05-21 Sandra Loosemore ++ ++ libgcc/ ++ * config/t-vxworks: New file. ++ * config.host (Common parts for widely ported systems): Use it. ++ ++2008-05-22 Nathan Sidwell ++ ++ gcc/testsuite/ ++ Backport 2008-05-22 Nathan Sidwell ++ * lib/dg-pch.exp (dg-pch): Fix if bracing. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-13 Joseph Myers ++ ++ Merge from Sourcery G++ 4.1 branch: ++ ++ 2007-03-27 Mark Mitchell ++ gcc/testsuite/ ++ * gcc.target/i386/sse-10.c: Pass -mno-omit-leaf-frame-pointer. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-26 Mark Mitchell ++ ++ gcc/testsuite/ ++ * lib/prune.exp (prune_warnings): Extend the default ++ implementation to prune linker warnings about libm on Solaris. ++ libstdc++-v3/ ++ * testsuite/lib/prune.exp (prune_g++_output): Prune linker ++ warnings about libm on Solaris. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-26 Mark Mitchell ++ ++ fixincludes/ ++ * inclhack.def (solaris_mutex_init_2): Remove precise machine ++ checks; look at to determine whether fix is ++ required. ++ (solaris_rwlock_init_1): Likewise. ++ (solaris_once_init_2): Likewise. ++ * tests/base/sys/types.h: Add output generated by ++ solaris_mutex_init_2. ++ * fixincl.x: Regenerated. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-05 Mark Mitchell ++ ++ * configure.in (*-*-vxworks*): Remove target-libstdc++-v3 from ++ noconfigdirs. ++ * configure: Regenerated. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-12 Richard Sandiford ++ ++ gcc/ ++ * config/vx-common.h (TARGET_FLEXLM): Define. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Daniel Jacobowitz ++ ++ gcc/testsuite/ ++ * g++.dg/other/profile1.C: Use dg-require-profiling. ++ ++2008-05-21 Sandra Loosemore ++ ++ libgcc/ ++ * config/t-vxworks: New file. ++ * config.host (Common parts for widely ported systems): Use it. ++ ++2008-05-21 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-04-25 Paul Brook ++ ++ Merge from sourcerygxx-4_1 ++ 2005-03-10 Julian Brown ++ libstdc++-v3/ ++ * configure.ac (LIBSUPCXX_PRONLY): New AM_CONDITIONAL: yes ++ if we are compiling for SymbianOS on ARM. ++ * include/Makefile.am: Don't install C++ headers if ++ LIBSUPCXX_PRONLY is true. ++ * libsupc++/Makefile.am: Include only eh_personality.cc ++ in libsupc++ if LIBSUPCXX_PRONLY is true. ++ * Makefile.in: Regenerate. ++ * configure: Regenerate. ++ * include/Makefile.in: Regenerate. ++ * libmath/Makefile.in: Regenerate. ++ * libsupc++/Makefile.in: Regenerate. ++ * po/Makefile.in: Regenerate. ++ * src/Makefile.in: Regenerate. ++ * testsuite/Makefile.in: Regenerate. ++ ++2008-05-21 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-05-02 Maxim Kuvyrkov ++ gcc/ ++ Backport from mainline. ++ 2008-02-19 Christian Bruel ++ Zdenek Dvorak ++ * tree-ssa-loop-ivopts.c (may_be_unaligned_p): Check step alignment. ++ ++2008-05-21 Maxim Kuvyrkov ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-12-05 Maxim Kuvyrkov ++ Make scheduler better process end of the blocks. ++ ++ gcc/ ++ * haifa-sched.c (insn_finishes_cycle_p): New static function. ++ (max_issue): Use it. Fix handling of number of instruction to try. ++ * sched-int.h (struct sched_info: insn_finished_block_p): New ++ scheduler hook. ++ * sched-rgn.c (rgn_insn_finishes_block_p): Implement it. ++ (region_sched_info): Update. ++ * sched-ebb.c (ebb_sched_info): Update. ++ * modulo-sched.c (sms_sched_info): Update. ++ ++2008-05-20 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-03-22 Daniel Jacobowitz ++ ++ libstdc++-v3/ ++ * testsuite/27_io/basic_filebuf/sputbackc/char/9425.cc: Use ++ dg-require-fileio. ++ * testsuite/27_io/basic_filebuf/sputbackc/char/1-out.cc: Likewise. ++ * testsuite/27_io/basic_filebuf/sputbackc/char/2-out.cc: Likewise. ++ ++2008-05-20 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2008-02-12 Julian Brown ++ ++ Merge from MIPS: ++ ++ 2007-12-05 Thiemo Seufer ++ ++ libcpp/ ++ * Makefile.in ($(srcdir)/config.in): Fix dependency. ++ ++2008-05-20 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-03-29 Richard Sandiford ++ gcc/ ++ * config.gcc (tm_file): Update commentary. ++ ++2008-05-20 Nathan Sidwell ++ ++ Merge from Sourcery G++ 4.2: ++ ++ * gcc.c-torture/execute/builtins/memops-asm.c: Set inside_main. ++ ++ * lib/gcc-dg.exp (cleanup-saved-temps): Add optional list of ++ suffixes not to delete. ++ * gcc.dg/pch/save-temps-1.c: Don't delete ".s" temp. ++ * g++.dg/pch/pch.C: Likewise. ++ ++ * g++.old-deja/g++.pt/static11.C: Replace xfail by target requirement. ++ ++ * lib/dg-pch.exp (dg-pch): Don't expect .s files if there are ++ dg-errors expected. ++ ++2008-05-20 Nathan Sidwell ++ ++ Merge from Sourcery G++ 4.2: ++ ++ * c-incpath.c (INO_T_EQ): Do not define on non-inode systems. ++ (DIRS_EQ): New. ++ (remove_duplicates): Do not set inode on non-inode systems. Use ++ DIRS_EQ. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-12 Mark Shinwell ++ ++ gcc/ ++ * target.h (gcc_target): Add adjust_reg_alloc_order member. ++ * target-def.h (TARGET_ADJUST_REG_ALLOC_ORDER): New. ++ (TARGET_INITIALIZER): Add TARGET_ADJUST_REG_ALLOC_ORDER. ++ * regclass.c (init_reg_sets): Don't initialize ++ inv_reg_alloc_order. ++ (init_reg_sets_1): Call adjust_reg_alloc_order hook and ++ then initialize inv_reg_alloc_order. ++ * hooks.c (hook_intp_void): New. ++ * hooks.h (hook_intp_void): New. ++ * doc/tm.texi: Document TARGET_ADJUST_REG_ALLOC_ORDER. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-02 Mark Shinwell ++ Richard Earnshaw ++ ++ gcc/ ++ * varasm.c (use_object_blocks_p): Prevent use of object blocks ++ if -fno-toplevel-reorder is specified. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-02-16 Richard Sandiford ++ ++ gcc/ ++ * Makefile.in (postreload.o): Depend on addresses.h. ++ * addresses.h (index_reg_class, ok_for_index_p_1): New functions. ++ (regno_ok_for_index_p): New function. ++ * postreload.c: Include addresses.h. ++ (reload_combine): Use index_reg_class instead of INDEX_REG_CLASS. ++ * regclass.c (ok_for_index_p_nonstrict): Add a mode argument. ++ Use ok_for_index_p_1 instead of REGNO_OK_FOR_INDEX_P. ++ (record_address_regs): Use index_reg_class instead of INDEX_REG_CLASS. ++ Update calls to ok_for_index_p_nonstrict. ++ * regrename.c (scan_rtx_address): Use regno_ok_for_index_p instead of ++ REGNO_OK_FOR_INDEX_P and index_reg_class instead of INDEX_REG_CLASS. ++ (replace_oldest_value_addr): Likewise. ++ * reload.c (find_reloads_address): Use index_reg_class instead ++ of INDEX_REG_CLASS. Do not push an index register reload if ++ index_reg_class returns NO_REGS. ++ (find_reloads_address_1): Use index_reg_class instead ++ of INDEX_REG_CLASS and regno_ok_for_index_p instead of ++ REGNO_OK_FOR_INDEX_P. ++ * doc/tm.texi (MODE_INDEX_REG_CLASS): Document new macro. ++ (REGNO_MODE_OK_FOR_INDEX_P): Likewise. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-06-05 Mark Shinwell ++ ++ * release-notes-csl.xml (Register allocation bug fix): New. ++ ++ gcc/ ++ * reload1.c (emit_reload_insns): Upon discovery of an input ++ reload whose reload register is not a spill register, ++ invalidate any existing reloads involving that register. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-10-24 Mark Shinwell ++ gcc/ ++ * final.c (asm_insn_count): Return zero for an empty asm body. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2006-12-15 Richard Sandiford ++ gcc/testsuite/ ++ * gcc.c-torture/compile/20061214-1.c: New test. ++ ++2008-05-19 Sandra Loosemore ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-05-02 Mark Shinwell ++ ++ * release-notes-csl.xml (Forced alignment of array variables): ++ New. ++ ++ gcc/ ++ * doc/tm.texi: Document that LOCAL_ALIGNMENT and ++ DATA_ALIGNMENT should not be used directly. ++ * doc/invoke.texi (-falign-arrays): Document. ++ * function.c (DATA_ALIGNMENT): Define to a default if ++ undefined. ++ (alignment_for_aligned_arrays): New. ++ (calculate_local_alignment): New. ++ (calculate_global_alignment): New. ++ * function.h (calculate_local_alignment): New. ++ (calculate_global_alignment): New. ++ * cfgexpand.c (LOCAL_ALIGNMENT): Don't define to a default. ++ (get_decl_align_unit): Use calculate_local_alignment. ++ * common.opt (-falign-arrays): New. ++ * varasm.c (assemble_variable): Use calculate_data_alignment, ++ and use it irrespective of whether DATA_ALIGNMENT is defined. ++ ++2008-05-16 Nathan Froyd ++ Kazu Hirata ++ Daniel Jacobowitz ++ Nathan Sidwell ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config/rs6000/linux.h (CC1_EXTRA_SPEC, ASM_DEFAULT_SPEC, ++ SYSROOT_SUFFIX_SPEC): Define. ++ * config/rs6000/eabi.h (CC1_EXTRA_SPEC, ASM_DEFAULT_SPEC): Define. ++ * config/rs6000/t-linux: New file. ++ * config/rs6000/t-ppcgas (MULTILIB_OPTIONS): Add te500v1/te500v2/te600. ++ (MULTILIB_DIRNAMES): Add te500v1 te500v2 te600. ++ (MULTILIB_EXCEPTIONS): New. ++ (MULTILIB_EXTRA_OPTS): Remove mrelocatable-lib. ++ ++2008-05-16 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-09-07 Daniel Jacobowitz ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_dwarf_register_span): Fix ++ debug output for other floating point modes. ++ ++2008-05-16 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ 2007-08-16 Daniel Jacobowitz ++ gcc/ ++ * config/rs6000/rs6000.c (rs6000_conditional_register_usage): Mark ++ call-saved AltiVec registers call-used if ! TARGET_ALTIVEC_ABI. ++ * config/rs6000/rs6000.h (CALL_USED_REGISTERS): Mark the first 20 ++ AltiVec registers call-used. ++ (CALL_REALLY_USED_REGISTERS): Likewise. ++ ++ gcc/testsuite/ ++ * gcc.target/powerpc/altivec-consts.c: Remove -mabi=altivec. ++ * gcc.target/powerpc/altivec-varargs-1.c: Likewise. ++ * gcc.dg/vmx/vmx.exp: Likewise. ++ ++2008-05-16 Nathan Froyd ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config.gcc (powerpc-timesys-linux-gnu): Handle new target. ++ * config/rs6000/timesys-linux.h: New file. ++ * config/rs6000/t-timesys: New file. ++ ++2008-05-14 Joseph Myers ++ ++ Backport: ++ ++ fixincludes/ ++ 2008-05-14 Joseph Myers ++ * inclhack.def (AAB_fd_zero_asm_posix_types_h): Bypass on ++ posix_types_64. ++ * fixincl.x: Regenerate. ++ ++2008-05-09 Maxim Kuvyrkov ++ ++ Backport from mainline. ++ ++ gcc/ ++ 2008-05-09 Maxim Kuvyrkov ++ ++ * rtl-factoring.c (collect_pattern_seqs): Fix typo. ++ ++2008-05-09 Maxim Kuvyrkov ++ ++ Backport from mainline. ++ ++ gcc/ ++ 2008-05-07 Maxim Kuvyrkov ++ ++ Cleanup ColdFire scheduling support and add V4 pipeline model. ++ ++ * config/m68k/m68k.md (UNSPEC_TIE): New constant. ++ (define_attr cpu): Add cfv4 value. ++ (define_attr type, define_attr type1): Merge into a single 'type' ++ attribute. Update all uses. ++ (define_attr opx_type, define_attr opy_type, define_attr opx_access): ++ Rearrange and update. Rename value 'reg' to 'Rn', add value 'FPn'. ++ Update all uses. ++ (define_attr opx_mem, define_attr opy_mem): Remove. ++ (define_attr op_mem): Clean up, update comment. ++ (define_attr size): Use specific values instead of general int. ++ (define_attr guess, define_attr split): Remove. Update all uses. ++ (movdf_internal, tstsi_internal, tsthi_internal, tstqi_internal, ++ tst_68881, pushexthisi_const, movsi_const0_68000_10, ++ movsi_const0_68040_60, movsi_const0, movsi_cf, movstrictqi_cf, ++ zero_extendhisi2_cf, zero_extendqisi2_cfv4, cfv4_extendhisi2, ++ 68k_extendhisi2, extendqihi2, cfv4_extendqisi2, 68k_extendqisi2, ++ floatsi2_68881, ftrunc2_68881, ftrunc2_cf, ++ fixqi2_68881, fixhi2_68881, fixsi2_68881, ++ adddi_dishl32, addsi3_5200, add3_floatsi_68881, ++ add3_floathi_68881, add3_floatqi_68881, ++ add3_68881, add3_cf, subdi_dishl32, subsi3, ++ sub3_floatsi_68881, sub3_floathi_68881, ++ sub3_floatqi_68881, sub3_68881, sub3_cf, ++ mulhi3, mulhisi3, mulhisisi3_s, mulsi3_68020, mulsi3_cf, ++ umulhisi3, mulhisisi3_z, mul3_floatsi_68881, ++ mul3_floathi_68881, mul3_floatqi_68881, fmul3_cf, ++ div3_cf, sqrt2_cf, abs2_cf, clzsi2, ++ one_cmplsi2_5200, subreghi1ashrdi_const32, ashrsi3, lshrsi3, ++ bsetmemqi, bsetmemqi_ext, bclrmemqi, bclrmemqi_ext, ++ beq, bne, bgt, blt, bordered, bunordered, buneq, bunge, bungt, bunle, ++ bunlt, bltgt, tablejump_internal, call, non_symbolic_call_value, ++ symbolic_call_value_jsr, symbolic_call_value_bsr, link): ++ Update or set attributes. ++ (stack_tie): New fake instruction. ++ ++ * config/m68k/m68k.h (TUNE_CFV4): New macro. ++ (m68k_sched_attr_size): Update declaration. ++ (m68k_sched_attr_type2): Remove. ++ (m68k_sched_address_bypass_p, m68k_sched_indexed_address_bypass_p): ++ Declare new bypass predicates. ++ ++ * config/m68k/m68k.c (m68k_sched_issue_rate, ++ m68k_sched_first_cycle_multipass_dfa_lookahead): Declare hook ++ implementations. ++ (TARGET_SCHED_ISSUE_RATE, ++ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): Override hooks. ++ (override_options): Handle scheduling for ColdFire V4 core. ++ (m68k_expand_prologue): Emit stack_tie. ++ (enum attr_op_type): Split value 'OP_TYPE_REG' to 'OP_TYPE_RN' and ++ 'OP_TYPE_FPN'. Update all uses. ++ (sched_guess_p): Remove. ++ (sched_address_type): Handle symbolic addresses. ++ (sched_get_operand): New static function. ++ (sched_operand_type): Merge into sched_attr_op_type. ++ (sched_attr_op_type): Handle FP registers, handle quick constants, ++ update. ++ (m68k_sched_attr_opx_type, m68k_sched_attr_opy_type): Update. ++ (m68k_sched_attr_size): Update. Move logic to ... ++ (sched_get_attr_size_int): New static function. ++ (sched_get_opxy_mem_type): New static function. ++ (m68k_sched_attr_op_mem): Update. ++ (m68k_sched_attr_type2): Remove. ++ (sched_cfv4_bypass_data): New static variable. ++ (m68k_sched_adjust_cost): Handle ColdFire V4 bypass. ++ (m68k_sched_issue_rate): Implement scheduler hook. ++ (struct _sched_ib: enabled_p): New field. ++ (m68k_sched_variable_issue): Update. Handle V4. ++ (SCHED_DUMP_TODO, SCHED_DUMP_DONE, SCHED_DUMP_NOTHING, ++ sched_dump_class_func_t, sched_dump_split_class, ++ sched_dump_dfa_guess_unit_code, sched_dump_dfa_state, ++ sched_dump_dfa_class, m68k_sched_dump): Remove. ++ (m68k_sched_first_cycle_multipass_dfa_lookahead): Implement scheduler ++ hook. ++ (m68k_sched_init_global): Remove statisctics dumping, introduce ++ sanity check that all instructions have pipeline reservations. Handle ++ ColdFire V4 core. ++ (m68k_sched_dfa_pre_advance_cycle, m68k_sched_dfa_post_advance_cycle): ++ Handle ColdFire V4 core. ++ (sched_mem_operand_p, sched_get_reg_operand, sched_get_mem_operand): ++ New static functions. ++ (m68k_sched_address_bypass_p): New bypass predicate. ++ (sched_get_indexed_address_scale): New static function. ++ (m68k_sched_indexed_address_bypass_p): New bypass predicate. ++ ++ * cf.md: Update comments. ++ (define_attr type2): Remove. Use 'type' attribute instead. ++ Update all uses. ++ (cf_ib): Rename to cfv123_ib. Update all uses. ++ (cf_oep): Rename to cfv123_oep. Update all uses. ++ (cf_chr): Rename to cfv123_chr. Update all uses. ++ (cf_mem): Rename to cfv123_mem. Update all uses. ++ (cf_mac): Move to more appropriate place. ++ (cfv123_guess): New automaton and cpu_unit. ++ (cfv123_*, cfv12_*, cfv1_*, cfv2_*, cfv3_*): Use type attribute. ++ Update uses of 'size' attribute. Handle before reload scheduling. ++ (cfv123_guess): New dummy reservation for unhandled instructions. ++ (cfv4_*): Pipeline description of ColdFire V4 core. ++ (ignore): New reservation to handle 'ignore' type. ++ ++2008-05-09 Maxim Kuvyrkov ++ ++ Backport from mainline. ++ ++ gcc/ ++ ++ 2008-04-22 Maxim Kuvyrkov ++ ++ Support scheduling for ColdFire V1 and V3 microarchitecture. ++ Improve scheduling of multiplication instructions. ++ ++ * config/m68k/m68k.md (cpu): Add cfv1 and cfv3. Rename cf_v2 to cfv1. ++ (mac): New instruction attribute. ++ * config/m68k/m68k.c (override_options): Handle cfv1, cfv3 and mac. ++ (m68k_sched_mac): New variable. ++ (m68k_sched_attr_type2, m68k_sched_md_init_global): Update. ++ Handle cfv1 and cfv3. ++ (max_insn_size): New static variable. ++ (struct _sched_ib): New type. ++ (sched_ib): New static variable. ++ (sched_ib_size, sched_ib_filled, sched_ib_insn): Convert variables ++ to fields of 'struct _sched_ib sched_ib'. Update all uses. ++ (m68k_sched_variable_issue): Add modeling of cfv3 instruction buffer. ++ Update. ++ (m68k_sched_md_init_global, m68k_sched_md_finish_global, ++ m68k_sched_md_init, m68k_sched_md_finish): Handle cfv1 and cfv3. Init ++ new variables. Update. ++ (m68k_sched_dfa_pre_advance_cycle, m68k_sched_dfa_post_advance_cycle): ++ Add modeling of cfv3 instruction buffer. Update. ++ * config/m68k/m68k-protos.h (m68k_sched_mac): Declare. ++ * config/m68k/m68k.h (TUNE_CFV3): New macro. ++ * config/m68k/cf.md: Change substrings 'cf_v2' to 'cfv12' or 'cfv123'. ++ (cf_* reservations): Rename to cfv12 or cfv123 to indicate cores ++ a particular reservation applies to. ++ (type2): Reorganize attribute values. Rename alu to alu_reg, ++ alu_l to alu, move_l to omove. Join move to alu. Split mul ++ to mul_l and mul_w. ++ (cf_ib_*): Simplify description of instruction buffer. ++ (cf_ib_w0, cf_ib_w4, cf_ib_w5, cf_ib_w6): Remove. ++ (cf_mem): Split into cf_mem1 and cf_mem2. ++ (cf_v2_move_??): Rename to cfv12_alu_??. ++ (cf_v2_move_l_??): Rename to cfv12_omove_??. ++ (cf_v2_mul_??): Remove reservations. ++ (cfv12_mul_l_??, cfv12_mul_w_??, cfv12_mac_w_??, cfv12_mac_l_??, ++ cfv12_emac_??, cfv12_emac_w_i0): New reservations. ++ (cfv12_rts, cfv12_call, cfv12_bcc, cfv12_bra, cfv12_jmp): Move to ++ appropriate place. ++ (cfv3_alu_10, cfv3_omove_10, cfv3_alu_i0, cfv3_omove_i0, cfv3_alu_01, ++ cfv3_alu_0i, cfv3_alu_11, cfv3_omove_11, cfv3_alu_i1, cfv3_omove_i1, ++ cfv3_alu_1i, cfv3_omove_1i, cfv3_pea_11, cfv3_pea_i1, cfv3_mul_w_10, ++ cfv3_mul_l_10, cfv3_mul_w_i0, cfv3_mac_w_10, cfv3_mac_l_10, ++ cfv3_mac_w_i0, cfv3_emac_10, cfv3_emac_w_i0, cfv3_rts, cfv3_call, ++ cfv3_bcc, cfv3_bra, cfv3_jmp): New reservations. ++ (cfv3_*_1, cfv3_*_2, cfv3_*_3): New instruction reservations that are ++ expansions of the above reservations for instructions of sizes ++ 1, 2 and 3 words. ++ ++2008-05-09 Maxim Kuvyrkov ++ ++ Backport from mainline. ++ ++ gcc/ ++ 2008-04-22 Maxim Kuvyrkov ++ * rtl-factoring.c (collect_patterns_seqs): Handle CC0 targets. ++ ++2008-05-05 Mark Mitchell ++ Joseph Myers ++ Mark Shinwell ++ Vladimir Prus ++ Paul Brook ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config.gcc (arm-wrs-linux-gnueabi, i586-wrs-linux-gnu, ++ mips-wrs-linux-gnu, powerpc-wrs-linux-gnu, sparc-wrs-linux-gnu): ++ Handle new targets. ++ * config/arm/t-wrs-linux, config/arm/wrs-linux.h, ++ config/mips/t-wrs-linux, config/mips/wrs-linux.h, ++ config/rs6000/t-wrs-linux, config/rs6000/wrs-linux.h: New. ++ * config/sparc/linux64.h (TARGET_DEFAULT): Define differently for ++ BIARCH_32BIT_DEFAULT. ++ ++ libcpp/ ++ * configure.ac (sparc-wrs-linux-gnu): Add to need_64bit_hwint=yes ++ targets. ++ * configure: Regenerate. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config/sparc/linux64.h (LINK_ARCH32_SPEC, LINK_ARCH64_SPEC, ++ LINK_SPEC): Use %R in -Y P argument. ++ ++2008-05-05 Joseph Myers ++ Daniel Jacobowitz ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config/rs6000/rs6000.h (OPTION_DEFAULT_SPECS): Handle -te500v1, ++ -te500v2 and -te600. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config/rs6000/sysv4.h (CC1_EXTRA_SPEC): Define and use. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/testsuite/ ++ * g++.dg/compat/struct-layout-1.exp: Compile generator on build ++ system. ++ * gcc.dg/compat/struct-layout-1.exp: Likewise. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/testsuite/ ++ * lib/gcc-dg.exp (remove-build-file): Remove files on remote host ++ as well as on build. ++ ++2008-05-05 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-03-04 Joseph Myers ++ * config/i386/i386.c (override_options): Force ++ -maccumulate-outgoing-args on if TARGET_STACK_PROBE. ++ ++ gcc/testsuite/ ++ 2008-03-04 Joseph Myers ++ * gcc.target/i386/sse-10.c: Don't use ++ -mno-accumulate-outgoing-args on *-*-mingw* *-*-cygwin*. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ config/ ++ * config/mh-mingw (LDFLAGS): Define. ++ ++ gcc/ ++ * configure.ac: Use empty LDFLAGS when running configure for the ++ build system. ++ * configure: Regenerate. ++ * Makefile.in (BUILD_LDFLAGS): Do not define to $(LDFLAGS) unless ++ host == build. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * libgcc2.c (__do_global_dtors): Do not call ++ __deregister_frame_info on MinGW. ++ (__do_global_ctors): Call atexit before calling constructors. Do ++ not call __register_frame_info on MinGW. ++ * config/i386/mingw32.h (LIBGCC_SPEC): Start with -lgcc. ++ ++2008-05-05 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ 2007-06-13 Joseph Myers ++ * common.opt (--Wno-poison-system-directories): New. ++ * doc/invoke.texi (-Wno-poison-system-directories): Document. ++ * c-incpath.c: Include flags.h. ++ (merge_include_chains): Check flag_poison_system_directories. ++ * gcc.c (LINK_COMMAND_SPEC): Pass --no-poison-system-directories ++ to linker if -Wno-poison-system-directories. ++ * Makefile.in (c-incpath.o): Depend on $(FLAGS_H). ++ ++ 2007-03-20 Daniel Jacobowitz ++ Joseph Myers ++ * configure.ac (--enable-poison-system-directories): New option. ++ * configure, config.in: Regenerate. ++ * c-incpath.c (merge_include_chains): If ++ ENABLE_POISON_SYSTEM_DIRECTORIES defined, warn for use of ++ /usr/include, /usr/local/include or /usr/X11R6/include. ++ ++2008-05-02 Joseph Myers ++ ++ Backport: ++ ++ gcc/ ++ 2008-02-23 Joseph Myers ++ * explow.c (memory_address): Assert that the generated address is ++ valid. ++ ++2008-05-02 Joseph Myers ++ ++ Backport: ++ ++ libstdc++-v3/ ++ 2008-03-04 Joseph Myers ++ * crossconfig.m4 (*-mingw32*): Define HAVE_STRTOF and ++ HAVE_STRTOLD. ++ * configure: Regenerate. ++ ++2008-05-02 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * collect2.c (find_a_file): Use IS_ABSOLUTE_PATH. ++ ++2008-05-02 Joseph Myers ++ ++ gcc/ ++ * config.gcc (i[34567]86-*-* | x86_64-*-*): Support arch32 arch64. ++ * config/i386/i386.h (OPT_ARCH32, OPT_ARCH64): Define. ++ (OPTION_DEFAULT_SPECS): Add arch32 and arch64. ++ ++2008-05-02 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config.gcc (mips*-*-*): Support arch32 arch64 tune32 tune64. ++ (powerpc*-*-* | rs6000-*-*): Support cpu32 cpu64. ++ (all_defaults): Add arch32 arch64 cpu32 cpu64 tune32 tune64. ++ * config/mips/mips.h (OPTION_DEFAULT_SPECS): Add support for ++ arch32 arch64 tune32 tune64. ++ * gcc/config/rs6000/rs6000.h (OPTION_DEFAULT_SPECS): Add cpu32 and ++ cpu64. ++ ++2008-05-02 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ * config.gcc (i[34567]86-*-linux*): Use extra config files if ++ --enable-extra-sgxx-multilibs. ++ * config/i386/cs-linux.h, config/i386/cs-linux.opt, ++ config/i386/t-cs-linux: New. ++ ++2008-05-01 Mark Mitchell ++ Vladimir Prus ++ Joseph Myers ++ Carlos O'Donell ++ Daniel Jacobowitz ++ Kazu Hirata ++ ++ libiberty/ ++ * configure.ac: Add cygpath for mingw hosts. ++ * configure: Regenerate. ++ * Makefile.in: Add cygpath. ++ * cygpath.c: New. ++ * pex-win32.c (pex_win32_open_read, pex_win32_open_write): Use ++ open not _open. ++ ++ include/ ++ * libiberty.h (cygpath): Declare. ++ ++2008-05-01 Carlos O'Donell ++ ++ Merge from Sourcery G++ 4.2: ++ ++ * Makefile.tpl (install): Call install-html and install-pdf. ++ * Makefile.in: Regenerate. ++ ++ gcc/ ++ * Makefile.in (install): Depend on install-html and install-pdf. ++ ++2008-05-01 Joseph Myers ++ ++ Merge from Sourcery G++ 4.2: ++ ++ gcc/ ++ 2007-10-16 Joseph Myers ++ * gcc.c (license_me_flag): Define to 1 if not TARGET_FLEXLM. ++ ++ 2007-08-10 Nathan Froyd ++ * gcc.c (main): Consult license_me_flag to see if failure to ++ acquire a license implies bailing out entirely. ++ ++ 2007-08-24 Nathan Froyd ++ Issue #1892 ++ * gcc.c (main): Check license_me_flag before declaring failure. ++ ++ 2007-08-30 Nathan Sidwell ++ Issue #1892 ++ * gcc.c (main): Don't complain if license fails without -flicense-me ++ ++ 2007-04-12 Richard Sandiford ++ * gcc.c (main): If find_a_file fails, pass the original subproc ++ to csl_subproc_license_new. ++ ++ 2006-12-27 Mark Mitchell ++ NOT ASSIGNED TO FSF ++ COPYRIGHT CODESOURCERY ++ * gcc.c (main): If the license check fails, remove the generated ++ file. ++ ++ 2006-12-22 Mark Mitchell ++ NOT ASSIGNED TO FSF ++ COPYRIGHT CODESOURCERY ++ * aclocal.m4: Move licensing options ... ++ * acinclude.m4: ... here. ++ ++ 2006-12-13 Mark Mitchell ++ NOT ASSIGNED TO FSF ++ COPYRIGHT CODESOURCERY ++ * gcc.c (csl/license.h): Include, if required. ++ (license_checked): New variable. ++ (no_license): Remove. ++ (process_command): Set license_checked, not no_license. ++ (main): Use CodeSourcery license library. Remove most ++ TARGET_FLEXLM code. ++ * aclocal.m4 (--with-license): New option. ++ (--with-csl-license-feature): Likewise. ++ (--with-csl-license-version): Likewise. ++ * Makefile.in (CSL_LICENSEINC): Define it. ++ (CSL_LICENSELIB): Likewise. ++ (CSL_LICENSE_PROG): Likewise. ++ (LIBS): Depend on CSL_LICENSELIB. ++ (GCC_PASSES): Depend on CSL_LICENSE_PROG. ++ (INCLUDES): Add CSL_LICENSEINC. ++ * configure.ac (CSL_AC_LICENSE_VERSION): Use it. ++ (CSL_AC_LICENSE): Likewise. ++ (CSL_AC_LICENSE_FEATURE): Likewise. ++ * config.in: Regenerated. ++ * configure: Regenerated. ++ ++ 2006-10-29 Richard Sandiford ++ Joseph Myers ++ * gcc.c (license_me_flag): New variable. ++ (feature_proxy_flag): New variable. ++ (no_license): New variable. ++ (process_command): Handle -flicense-me, -ffeature-proxy and ++ -fno-feature-proxy. Initialize no_license. ++ (main): Check licenses. ++ ++2008-05-01 Joseph Myers ++ ++ * release-notes-csl.xml: New. ++ ++ ++Local Variables: ++mode: change-log ++change-log-default-name: "ChangeLog.csl" ++End: +--- a/boehm-gc/Makefile.am ++++ b/boehm-gc/Makefile.am +@@ -66,7 +66,8 @@ TESTS = gctest + ## CFLAGS, not those passed in from the top level make. + LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(MY_CFLAGS) $(GC_CFLAGS) +-LINK = $(LIBTOOL) --mode=link $(CC) $(AM_CFLAGS) $(MY_CFLAGS) $(LDFLAGS) -o $@ ++LTLDFLAGS = $(shell $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++LINK = $(LIBTOOL) --mode=link $(CC) $(AM_CFLAGS) $(MY_CFLAGS) $(LTLDFLAGS) -o $@ + + # Work around what appears to be a GNU make bug handling MAKEFLAGS + # values defined in terms of make variables, as is the case for CC and +--- a/boehm-gc/Makefile.in ++++ b/boehm-gc/Makefile.in +@@ -303,7 +303,8 @@ TESTS = gctest + LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(MY_CFLAGS) $(GC_CFLAGS) + +-LINK = $(LIBTOOL) --mode=link $(CC) $(AM_CFLAGS) $(MY_CFLAGS) $(LDFLAGS) -o $@ ++LTLDFLAGS = $(shell $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++LINK = $(LIBTOOL) --mode=link $(CC) $(AM_CFLAGS) $(MY_CFLAGS) $(LTLDFLAGS) -o $@ + + # Work around what appears to be a GNU make bug handling MAKEFLAGS + # values defined in terms of make variables, as is the case for CC and +--- a/config.sub ++++ b/config.sub +@@ -254,6 +254,7 @@ case $basic_machine in + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ ++ | mips64octeon | mips64octeonel \ + | mips64vr | mips64vrel \ + | mips64orion | mips64orionel \ + | mips64vr4100 | mips64vr4100el \ +@@ -335,6 +336,7 @@ case $basic_machine in + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ ++ | mips64octeon-* | mips64octeonel-* \ + | mips64vr-* | mips64vrel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64vr4100-* | mips64vr4100el-* \ +--- a/config/mh-mingw ++++ b/config/mh-mingw +@@ -1,3 +1,7 @@ + # Add -D__USE_MINGW_ACCESS to enable the built compiler to work on Windows + # Vista (see PR33281 for details). +-BOOT_CFLAGS += -D__USE_MINGW_ACCESS ++# Because we wrap access in libiberty/cygpath.c, we do not want to use ++# the MinGW wrappers for access. ++# BOOT_CFLAGS += -D__USE_MINGW_ACCESS ++# Increase stack limit to same as Linux default. ++LDFLAGS += -Wl,--stack,8388608 +--- a/config/mt-sde ++++ b/config/mt-sde +@@ -6,5 +6,5 @@ + # has two purposes: it allows libraries to be used in situations where + # $gp != our _gp, and it allows them to be built with -G8 while + # retaining link compability with -G0 and -G4. +-CFLAGS_FOR_TARGET += -Os -minterlink-mips16 -mcode-xonly -mno-gpopt ++CFLAGS_FOR_TARGET += -Os -minterlink-mips16 -mcode-xonly -mno-gpopt + CXXFLAGS_FOR_TARGET += -Os -minterlink-mips16 -mcode-xonly -mno-gpopt +--- a/configure ++++ b/configure +@@ -2195,7 +2195,7 @@ case "${target}" in + noconfigdirs="$noconfigdirs target-newlib target-libgloss target-rda ${libgcj}" + ;; + *-*-vxworks*) +- noconfigdirs="$noconfigdirs target-newlib target-libgloss target-libiberty target-libstdc++-v3 ${libgcj}" ++ noconfigdirs="$noconfigdirs target-newlib target-libgloss target-libiberty ${libgcj}" + ;; + alpha*-dec-osf*) + # ld works, but does not support shared libraries. +--- a/configure.ac ++++ b/configure.ac +@@ -472,7 +472,7 @@ case "${target}" in + noconfigdirs="$noconfigdirs target-newlib target-libgloss target-rda ${libgcj}" + ;; + *-*-vxworks*) +- noconfigdirs="$noconfigdirs target-newlib target-libgloss target-libiberty target-libstdc++-v3 ${libgcj}" ++ noconfigdirs="$noconfigdirs target-newlib target-libgloss target-libiberty ${libgcj}" + ;; + alpha*-dec-osf*) + # ld works, but does not support shared libraries. +--- a/fixincludes/fixincl.x ++++ b/fixincludes/fixincl.x +@@ -2,11 +2,11 @@ + * + * DO NOT EDIT THIS FILE (fixincl.x) + * +- * It has been AutoGen-ed Monday January 5, 2009 at 04:00:24 PM PST ++ * It has been AutoGen-ed Tuesday February 17, 2009 at 01:49:33 PM PST + * From the definitions inclhack.def + * and the template file fixincl + */ +-/* DO NOT SVN-MERGE THIS FILE, EITHER Mon Jan 5 16:00:24 PST 2009 ++/* DO NOT SVN-MERGE THIS FILE, EITHER Tue Feb 17 13:49:33 PST 2009 + * + * You must regenerate it. Use the ./genfixes script. + * +@@ -214,11 +214,14 @@ tSCC zAab_Fd_Zero_Asm_Posix_Types_HBypas + "} while"; + tSCC zAab_Fd_Zero_Asm_Posix_Types_HBypass1[] = + "x86_64"; ++tSCC zAab_Fd_Zero_Asm_Posix_Types_HBypass2[] = ++ "posix_types_64"; + +-#define AAB_FD_ZERO_ASM_POSIX_TYPES_H_TEST_CT 2 ++#define AAB_FD_ZERO_ASM_POSIX_TYPES_H_TEST_CT 3 + static tTestDesc aAab_Fd_Zero_Asm_Posix_Types_HTests[] = { + { TT_NEGREP, zAab_Fd_Zero_Asm_Posix_Types_HBypass0, (regex_t*)NULL }, +- { TT_NEGREP, zAab_Fd_Zero_Asm_Posix_Types_HBypass1, (regex_t*)NULL }, }; ++ { TT_NEGREP, zAab_Fd_Zero_Asm_Posix_Types_HBypass1, (regex_t*)NULL }, ++ { TT_NEGREP, zAab_Fd_Zero_Asm_Posix_Types_HBypass2, (regex_t*)NULL }, }; + + /* + * Fix Command Arguments for Aab_Fd_Zero_Asm_Posix_Types_H +@@ -5974,8 +5977,7 @@ tSCC zSolaris_Mutex_Init_2List[] = + * Machine/OS name selection pattern + */ + tSCC* apzSolaris_Mutex_Init_2Machs[] = { +- "*-*-solaris2.[0-9]", +- "*-*-solaris2.[0-9][!0-9]*", ++ "*-*-solaris*", + (const char*)NULL }; + + /* +@@ -5984,8 +5986,15 @@ tSCC* apzSolaris_Mutex_Init_2Machs[] = { + tSCC zSolaris_Mutex_Init_2Select0[] = + "@\\(#\\)pthread.h[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + +-#define SOLARIS_MUTEX_INIT_2_TEST_CT 1 ++/* ++ * perform the 'test' shell command - do fix on success ++ */ ++tSCC zSolaris_Mutex_Init_2Test0[] = ++ " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; ++ ++#define SOLARIS_MUTEX_INIT_2_TEST_CT 2 + static tTestDesc aSolaris_Mutex_Init_2Tests[] = { ++ { TT_TEST, zSolaris_Mutex_Init_2Test0, 0 /* unused */ }, + { TT_EGREP, zSolaris_Mutex_Init_2Select0, (regex_t*)NULL }, }; + + /* +@@ -6027,8 +6036,15 @@ tSCC* apzSolaris_Rwlock_Init_1Machs[] = + tSCC zSolaris_Rwlock_Init_1Select0[] = + "@\\(#\\)pthread.h[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + +-#define SOLARIS_RWLOCK_INIT_1_TEST_CT 1 ++/* ++ * perform the 'test' shell command - do fix on success ++ */ ++tSCC zSolaris_Rwlock_Init_1Test0[] = ++ " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; ++ ++#define SOLARIS_RWLOCK_INIT_1_TEST_CT 2 + static tTestDesc aSolaris_Rwlock_Init_1Tests[] = { ++ { TT_TEST, zSolaris_Rwlock_Init_1Test0, 0 /* unused */ }, + { TT_EGREP, zSolaris_Rwlock_Init_1Select0, (regex_t*)NULL }, }; + + /* +@@ -6098,8 +6114,7 @@ tSCC zSolaris_Once_Init_2List[] = + * Machine/OS name selection pattern + */ + tSCC* apzSolaris_Once_Init_2Machs[] = { +- "*-*-solaris2.[0-9]", +- "*-*-solaris2.[0-9][!0-9]*", ++ "*-*-solaris*", + (const char*)NULL }; + + /* +@@ -6108,8 +6123,15 @@ tSCC* apzSolaris_Once_Init_2Machs[] = { + tSCC zSolaris_Once_Init_2Select0[] = + "@\\(#\\)pthread.h[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + +-#define SOLARIS_ONCE_INIT_2_TEST_CT 1 ++/* ++ * perform the 'test' shell command - do fix on success ++ */ ++tSCC zSolaris_Once_Init_2Test0[] = ++ " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; ++ ++#define SOLARIS_ONCE_INIT_2_TEST_CT 2 + static tTestDesc aSolaris_Once_Init_2Tests[] = { ++ { TT_TEST, zSolaris_Once_Init_2Test0, 0 /* unused */ }, + { TT_EGREP, zSolaris_Once_Init_2Select0, (regex_t*)NULL }, }; + + /* +@@ -8606,7 +8628,7 @@ static const char* apzX11_SprintfPatch[] + * + * List of all fixes + */ +-#define REGEX_COUNT 255 ++#define REGEX_COUNT 256 + #define MACH_LIST_SIZE_LIMIT 261 + #define FIX_COUNT 212 + +--- a/fixincludes/inclhack.def ++++ b/fixincludes/inclhack.def +@@ -141,6 +141,7 @@ fix = { + mach = 'i[34567]86-*-linux*'; + bypass = '} while'; + bypass = 'x86_64'; ++ bypass = 'posix_types_64'; + + /* + * Define _POSIX_TYPES_H_WRAPPER at the end of the wrapper, not +@@ -3274,24 +3275,32 @@ fix = { + + + /* +- * Sun Solaris defines PTHREAD_MUTEX_INITIALIZER with a trailing +- * "0" for the last field of the pthread_mutex_t structure, which is +- * of type upad64_t, which itself is typedef'd to int64_t, but with +- * __STDC__ defined (e.g. by -ansi) it is a union. So change the +- * initializer to "{0}" instead ++ * Sun Solaris defines the last field of the pthread_mutex_t structure ++ * to have type upad64_t. Whether upad64_t is an integer type or a ++ * union depends on whether or not the headers believe that a 64-bit ++ * integer type is available. But, PTHREAD_MUTEX_INITIALIZER is not ++ * appropriately conditionalized; it always uses "0", and never "{0}". ++ * In order to avoid warnings/errors from the compiler, we must make ++ * the initializer use braces where appropriate. ++ * ++ * Prior to Solaris 10, if __STDC__ is 1 (as when compiling with ++ * -ansi), the definition would be a union. Beginning with Solaris ++ * 10, the headers check for __GNUC__, and will never use a union with ++ * GCC. We check /usr/include/sys/types.h to see if it checks for ++ * __STDC__. ++ * ++ * A "mach" test for Solaris 10 is undesirable because we want to ++ * allow a compiler built for Solaris <10 to be used on Solaris >=10, ++ * but the installed version of fixincludes hard-wires the target ++ * machine to the configure-time $target, rather than automatically ++ * determining it at installation time. + */ + fix = { + hackname = solaris_mutex_init_2; + select = '@\(#\)pthread.h' "[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + files = pthread.h; +- /* +- * On Solaris 10, this fix is unnecessary because upad64_t is +- * always defined correctly regardless of the definition of the +- * __STDC__ macro. The first "mach" pattern matches up to +- * solaris9. The second "mach" pattern will not match any two (or +- * more) digit solaris version, but it will match e.g. 2.5.1. +- */ +- mach = '*-*-solaris2.[0-9]', '*-*-solaris2.[0-9][!0-9]*'; ++ mach = '*-*-solaris*'; ++ test = " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; + c_fix = format; + c_fix_arg = "#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)\n" + "%0\n" +@@ -3302,6 +3311,7 @@ fix = { + "(|/\*.*\*/[ \t]*\\\\\n[ \t]*)\\{.*)" + ",[ \t]*0\\}" "(|[ \t].*)$"; + test_text = ++ "`mkdir -p sys; echo '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' >> sys/types.h`" + '#ident "@(#)pthread.h 1.26 98/04/12 SMI"'"\n" + "#define PTHREAD_MUTEX_INITIALIZER\t{{{0},0}, {{{0}}}, 0}\n" + "#define PTHREAD_COND_INITIALIZER\t{{{0}, 0}, 0}\t/* DEFAULTCV */\n" +@@ -3313,17 +3323,14 @@ fix = { + + + /* +- * Sun Solaris defines PTHREAD_RWLOCK_INITIALIZER with a "0" for some +- * fields of the pthread_rwlock_t structure, which are of type +- * upad64_t, which itself is typedef'd to int64_t, but with __STDC__ +- * defined (e.g. by -ansi) it is a union. So change the initializer +- * to "{0}" instead. ++ * See comments for solaris_mutex_init_2 re. upad64_t. + */ + fix = { + hackname = solaris_rwlock_init_1; + select = '@\(#\)pthread.h' "[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + files = pthread.h; + mach = '*-*-solaris*'; ++ test = " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; + c_fix = format; + c_fix_arg = "#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)\n" + "%0\n" +@@ -3359,24 +3366,14 @@ fix = { + + + /* +- * Sun Solaris defines PTHREAD_ONCE_INIT with a "0" for some +- * fields of the pthread_once_t structure, which are of type +- * upad64_t, which itself is typedef'd to int64_t, but with __STDC__ +- * defined (e.g. by -ansi) it is a union. So change the initializer +- * to "{0}" instead. This test relies on solaris_once_init_1. ++ * See comments for solaris_mutex_init_2 re. upad64_t. + */ + fix = { + hackname = solaris_once_init_2; + select = '@\(#\)pthread.h' "[ \t]+1.[0-9]+[ \t]+[0-9/]+ SMI"; + files = pthread.h; +- /* +- * On Solaris 10, this fix is unnecessary because upad64_t is +- * always defined correctly regardless of the definition of the +- * __STDC__ macro. The first "mach" pattern matches up to +- * solaris9. The second "mach" pattern will not match any two (or +- * more) digit solaris version, but it will match e.g. 2.5.1. +- */ +- mach = '*-*-solaris2.[0-9]', '*-*-solaris2.[0-9][!0-9]*'; ++ mach = '*-*-solaris*'; ++ test = " -n \"`grep '#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)' \\`dirname $file\\`/sys/types.h`\""; + c_fix = format; + c_fix_arg = "#if __STDC__ - 0 == 0 && !defined(_NO_LONGLONG)\n" + "%0\n" +--- a/fixincludes/server.c ++++ b/fixincludes/server.c +@@ -266,7 +266,7 @@ run_shell (const char* pz_cmd) + /* Make sure the process will pay attention to us, send the + supplied command, and then have it output a special marker that + we can find. */ +- fprintf (server_pair.pf_write, "cd %s\n%s\n\necho\necho %s\n", ++ fprintf (server_pair.pf_write, "cd '%s'\n%s\n\necho\necho %s\n", + p_cur_dir, pz_cmd, z_done); + fflush (server_pair.pf_write); + +--- a/fixincludes/tests/base/sys/types.h ++++ b/fixincludes/tests/base/sys/types.h +@@ -28,3 +28,4 @@ typedef __WCHAR_TYPE__ wchar_t; + + #endif /* ushort_t */ + #endif /* GNU_TYPES_CHECK */ ++#if !defined(__STRICT_ANSI__) && !defined(_NO_LONGLONG) +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -321,6 +321,8 @@ GCC_FOR_TARGET = $(STAGE_CC_WRAPPER) ./x + # It also specifies -isystem ./include to find, e.g., stddef.h. + GCC_CFLAGS=$(CFLAGS_FOR_TARGET) $(INTERNAL_CFLAGS) $(X_CFLAGS) $(T_CFLAGS) $(LOOSE_WARN) -Wold-style-definition $($@-warn) -isystem ./include $(TCFLAGS) + ++EGLIBC_CONFIGS = @EGLIBC_CONFIGS@ ++ + # --------------------------------------------------- + # Programs which produce files for the target machine + # --------------------------------------------------- +@@ -402,6 +404,9 @@ TARGET_SYSTEM_ROOT = @TARGET_SYSTEM_ROOT + + xmake_file=@xmake_file@ + tmake_file=@tmake_file@ ++TM_ENDIAN_CONFIG=@TM_ENDIAN_CONFIG@ ++TM_MULTILIB_CONFIG=@TM_MULTILIB_CONFIG@ ++TM_MULTILIB_EXCEPTIONS_CONFIG=@TM_MULTILIB_EXCEPTIONS_CONFIG@ + out_file=$(srcdir)/config/@out_file@ + out_object_file=@out_object_file@ + md_file=$(srcdir)/config/@md_file@ +@@ -688,7 +693,11 @@ CC_FOR_BUILD = @CC_FOR_BUILD@ + BUILD_CFLAGS= @BUILD_CFLAGS@ -DGENERATOR_FILE + + # Native linker and preprocessor flags. For x-fragment overrides. ++ifeq ($(host),$(build)) + BUILD_LDFLAGS=$(LDFLAGS) ++else ++BUILD_LDFLAGS= ++endif + BUILD_CPPFLAGS=$(ALL_CPPFLAGS) + + # Actual name to use when installing a native compiler. +@@ -1205,6 +1214,7 @@ OBJS-common = \ + tree-ssa-loop-manip.o \ + tree-ssa-loop-niter.o \ + tree-ssa-loop-prefetch.o \ ++ tree-ssa-loop-promote.o \ + tree-ssa-loop-unswitch.o \ + tree-ssa-loop.o \ + tree-ssa-math-opts.o \ +@@ -1213,6 +1223,7 @@ OBJS-common = \ + tree-ssa-pre.o \ + tree-ssa-propagate.o \ + tree-ssa-reassoc.o \ ++ tree-ssa-remove-local-statics.o \ + tree-ssa-sccvn.o \ + tree-ssa-sink.o \ + tree-ssa-structalias.o \ +@@ -1605,7 +1616,7 @@ libgcc-support: libgcc.mvars stmp-int-hd + $(MACHMODE_H) $(FPBIT) $(DPBIT) $(TPBIT) $(LIB2ADD) \ + $(LIB2ADD_ST) $(LIB2ADDEH) $(srcdir)/emutls.c gcov-iov.h $(SFP_MACHINE) + +-libgcc.mvars: config.status Makefile $(LIB2ADD) $(LIB2ADD_ST) specs \ ++libgcc.mvars: config.status Makefile $(LIB2ADD) $(LIB2ADD_ST) specs $(tmake_file) \ + xgcc$(exeext) + : > tmp-libgcc.mvars + echo LIB1ASMFUNCS = '$(LIB1ASMFUNCS)' >> tmp-libgcc.mvars +@@ -1656,7 +1667,7 @@ libgcc.mvars: config.status Makefile $(L + # driver program needs to select the library directory based on the + # switches. + multilib.h: s-mlib; @true +-s-mlib: $(srcdir)/genmultilib Makefile ++s-mlib: $(srcdir)/genmultilib Makefile $(tmakefile) + if test @enable_multilib@ = yes \ + || test -n "$(MULTILIB_OSDIRNAMES)"; then \ + $(SHELL) $(srcdir)/genmultilib \ +@@ -1667,10 +1678,11 @@ s-mlib: $(srcdir)/genmultilib Makefile + "$(MULTILIB_EXTRA_OPTS)" \ + "$(MULTILIB_EXCLUSIONS)" \ + "$(MULTILIB_OSDIRNAMES)" \ ++ "$(MULTILIB_ALIASES)" \ + "@enable_multilib@" \ + > tmp-mlib.h; \ + else \ +- $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' no \ ++ $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' '' no \ + > tmp-mlib.h; \ + fi + $(SHELL) $(srcdir)/../move-if-change tmp-mlib.h multilib.h +@@ -1744,7 +1756,7 @@ gcc.srcextra: gengtype-lex.c + + c-incpath.o: c-incpath.c c-incpath.h $(CONFIG_H) $(SYSTEM_H) $(CPPLIB_H) \ + intl.h prefix.h coretypes.h $(TM_H) cppdefault.h $(TARGET_H) \ +- $(MACHMODE_H) ++ $(MACHMODE_H) $(FLAGS_H) toplev.h + + c-decl.o : c-decl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \ + $(RTL_H) $(C_TREE_H) $(GGC_H) $(TARGET_H) $(FLAGS_H) $(FUNCTION_H) output.h \ +@@ -1874,7 +1886,8 @@ DRIVER_DEFINES = \ + -DTOOLDIR_BASE_PREFIX=\"$(libsubdir_to_prefix)$(prefix_to_exec_prefix)\" \ + @TARGET_SYSTEM_ROOT_DEFINE@ \ + $(VALGRIND_DRIVER_DEFINES) \ +- `test "X$${SHLIB_LINK}" = "X" || test "@enable_shared@" != "yes" || echo "-DENABLE_SHARED_LIBGCC"` ++ `test "X$${SHLIB_LINK}" = "X" || test "@enable_shared@" != "yes" || echo "-DENABLE_SHARED_LIBGCC"` \ ++ -DCONFIGURE_SPECS="\"@CONFIGURE_SPECS@\"" + + gcc.o: gcc.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) intl.h multilib.h \ + Makefile $(lang_specs_files) specs.h prefix.h $(GCC_H) $(FLAGS_H) \ +@@ -2091,6 +2104,9 @@ tree-ssa-pre.o : tree-ssa-pre.c $(TREE_F + $(TM_H) coretypes.h $(TREE_DUMP_H) tree-pass.h $(FLAGS_H) $(CFGLOOP_H) \ + alloc-pool.h $(BASIC_BLOCK_H) bitmap.h $(HASHTAB_H) $(TREE_GIMPLE_H) \ + $(TREE_INLINE_H) tree-iterator.h tree-ssa-sccvn.h $(PARAMS_H) ++tree-ssa-remove-local-statics.o: tree-ssa-remove-local-statics.c \ ++ coretypes.h $(CONFIG_H) $(SYSTEM_H) $(BASIC_BLOCK_H) tree.h tree-pass.h \ ++ $(TM_H) + tree-ssa-sccvn.o : tree-ssa-sccvn.c $(TREE_FLOW_H) $(CONFIG_H) \ + $(SYSTEM_H) $(TREE_H) $(GGC_H) $(DIAGNOSTIC_H) $(TIMEVAR_H) \ + $(TM_H) coretypes.h $(TREE_DUMP_H) tree-pass.h $(FLAGS_H) $(CFGLOOP_H) \ +@@ -2190,6 +2206,9 @@ tree-ssa-loop-prefetch.o: tree-ssa-loop- + $(CFGLOOP_H) $(PARAMS_H) langhooks.h $(BASIC_BLOCK_H) hard-reg-set.h \ + tree-chrec.h toplev.h langhooks.h $(TREE_INLINE_H) $(TREE_DATA_REF_H) \ + $(OPTABS_H) ++tree-ssa-loop-promote.o: tree-ssa-loop-promote.c \ ++ coretypes.h $(CONFIG_H) $(SYSTEM_H) $(BASIC_BLOCK_H) $(CFGLOOP_H) $(TIMEVAR_H) \ ++ $(TREE_DUMP_H) tree.h tree-pass.h $(TM_H) + tree-predcom.o: tree-predcom.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_P_H) \ + $(CFGLOOP_H) $(TREE_FLOW_H) $(GGC_H) $(TREE_DATA_REF_H) $(SCEV_H) \ + $(PARAMS_H) $(DIAGNOSTIC_H) tree-pass.h $(TM_H) coretypes.h tree-affine.h \ +@@ -2759,7 +2778,7 @@ postreload.o : postreload.c $(CONFIG_H) + $(RTL_H) $(REAL_H) $(FLAGS_H) $(EXPR_H) $(OPTABS_H) reload.h $(REGS_H) \ + hard-reg-set.h insn-config.h $(BASIC_BLOCK_H) $(RECOG_H) output.h \ + $(FUNCTION_H) toplev.h cselib.h $(TM_P_H) except.h $(TREE_H) $(MACHMODE_H) \ +- $(OBSTACK_H) $(TIMEVAR_H) tree-pass.h $(DF_H) ++ $(OBSTACK_H) $(TIMEVAR_H) tree-pass.h addresses.h $(DF_H) + postreload-gcse.o : postreload-gcse.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ + $(TM_H) $(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \ + $(RECOG_H) $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) output.h toplev.h \ +@@ -3406,7 +3425,7 @@ gcov-dump$(exeext): $(GCOV_DUMP_OBJS) $( + # be rebuilt. + + # Build the include directories. +-stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) $(UNWIND_H) fixinc_list ++stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) $(UNWIND_H) + # Copy in the headers provided with gcc. + # The sed command gets just the last file name component; + # this is necessary because VPATH could add a dirname. +@@ -3425,21 +3444,23 @@ stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) + done + rm -f include/unwind.h + cp $(UNWIND_H) include/unwind.h +- set -e; for ml in `cat fixinc_list`; do \ +- sysroot_headers_suffix=`echo $${ml} | sed -e 's/;.*$$//'`; \ +- multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \ +- fix_dir=include-fixed$${multi_dir}; \ +- if $(LIMITS_H_TEST) ; then \ +- cat $(srcdir)/limitx.h $(srcdir)/glimits.h $(srcdir)/limity.h > tmp-xlimits.h; \ +- else \ +- cat $(srcdir)/glimits.h > tmp-xlimits.h; \ +- fi; \ +- $(mkinstalldirs) $${fix_dir}; \ +- chmod a+rx $${fix_dir} || true; \ +- rm -f $${fix_dir}/limits.h; \ +- mv tmp-xlimits.h $${fix_dir}/limits.h; \ +- chmod a+r $${fix_dir}/limits.h; \ +- done ++ set -e; if [ -f fixinc_list ] ; then \ ++ for ml in `cat fixinc_list`; do \ ++ sysroot_headers_suffix=`echo $${ml} | sed -e 's/;.*$$//'`; \ ++ multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \ ++ fix_dir=include-fixed$${multi_dir}; \ ++ if $(LIMITS_H_TEST) ; then \ ++ cat $(srcdir)/limitx.h $(srcdir)/glimits.h $(srcdir)/limity.h > tmp-xlimits.h; \ ++ else \ ++ cat $(srcdir)/glimits.h > tmp-xlimits.h; \ ++ fi; \ ++ $(mkinstalldirs) $${fix_dir}; \ ++ chmod a+rx $${fix_dir} || true; \ ++ rm -f $${fix_dir}/limits.h; \ ++ mv tmp-xlimits.h $${fix_dir}/limits.h; \ ++ chmod a+r $${fix_dir}/limits.h; \ ++ done; \ ++ fi + # Install the README + rm -f include-fixed/README + cp $(srcdir)/../fixincludes/README-fixinc include-fixed/README +@@ -4164,16 +4185,18 @@ real-install-headers-cp: + + # Install supporting files for fixincludes to be run later. + install-mkheaders: stmp-int-hdrs $(STMP_FIXPROTO) install-itoolsdirs \ +- macro_list fixinc_list ++ macro_list + $(INSTALL_DATA) $(srcdir)/gsyslimits.h \ + $(DESTDIR)$(itoolsdatadir)/gsyslimits.h + $(INSTALL_DATA) macro_list $(DESTDIR)$(itoolsdatadir)/macro_list +- $(INSTALL_DATA) fixinc_list $(DESTDIR)$(itoolsdatadir)/fixinc_list +- set -e; for ml in `cat fixinc_list`; do \ +- multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \ +- $(mkinstalldirs) $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}; \ +- $(INSTALL_DATA) include-fixed$${multidir}/limits.h $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}/limits.h; \ +- done ++ set -e; if [ -f fixinc_list ] ; then \ ++ $(INSTALL_DATA) fixinc_list $(DESTDIR)$(itoolsdatadir)/fixinc_list; \ ++ for ml in `cat fixinc_list`; do \ ++ multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \ ++ $(mkinstalldirs) $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}; \ ++ $(INSTALL_DATA) include-fixed$${multidir}/limits.h $(DESTDIR)$(itoolsdatadir)/include$${multi_dir}/limits.h; \ ++ done; \ ++ fi + $(INSTALL_SCRIPT) $(srcdir)/../mkinstalldirs \ + $(DESTDIR)$(itoolsdir)/mkinstalldirs ; \ + if [ x$(STMP_FIXPROTO) != x ] ; then \ +--- a/gcc/addresses.h ++++ b/gcc/addresses.h +@@ -78,3 +78,42 @@ regno_ok_for_base_p (unsigned regno, enu + + return ok_for_base_p_1 (regno, mode, outer_code, index_code); + } ++ ++/* Wrapper function to unify target macros MODE_INDEX_REG_CLASS and ++ INDEX_REG_CLASS. Arguments as for the MODE_INDEX_REG_CLASS macro. */ ++ ++static inline enum reg_class ++index_reg_class (enum machine_mode mode ATTRIBUTE_UNUSED) ++{ ++#ifdef MODE_INDEX_REG_CLASS ++ return MODE_INDEX_REG_CLASS (mode); ++#else ++ return INDEX_REG_CLASS; ++#endif ++} ++ ++/* Wrapper function to unify target macros REGNO_MODE_OK_FOR_INDEX_P ++ and REGNO_OK_FOR_INDEX_P. Arguments as for the ++ REGNO_MODE_OK_FOR_INDEX_P macro. */ ++ ++static inline bool ++ok_for_index_p_1 (unsigned regno, enum machine_mode mode ATTRIBUTE_UNUSED) ++{ ++#ifdef REGNO_MODE_OK_FOR_INDEX_P ++ return REGNO_MODE_OK_FOR_INDEX_P (regno, mode); ++#else ++ return REGNO_OK_FOR_INDEX_P (regno); ++#endif ++} ++ ++/* Wrapper around ok_for_index_p_1, for use after register allocation is ++ complete. Arguments as for the called function. */ ++ ++static inline bool ++regno_ok_for_index_p (unsigned regno, enum machine_mode mode) ++{ ++ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0) ++ regno = reg_renumber[regno]; ++ ++ return ok_for_index_p_1 (regno, mode); ++} +--- a/gcc/c-common.c ++++ b/gcc/c-common.c +@@ -1173,6 +1173,20 @@ check_main_parameter_types (tree decl) + pedwarn ("%q+D takes only zero or two arguments", decl); + } + ++/* True if pointers to distinct types T1 and T2 can be converted to ++ each other without an explicit cast. Only returns true for opaque ++ vector types. */ ++bool ++vector_targets_convertible_p (const_tree t1, const_tree t2) ++{ ++ if (TREE_CODE (t1) == VECTOR_TYPE && TREE_CODE (t2) == VECTOR_TYPE ++ && (targetm.vector_opaque_p (t1) || targetm.vector_opaque_p (t2)) ++ && tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2))) ++ return true; ++ ++ return false; ++} ++ + /* True if vector types T1 and T2 can be converted to each other + without an explicit cast. If EMIT_LAX_NOTE is true, and T1 and T2 + can only be converted with -flax-vector-conversions yet that is not +--- a/gcc/c-common.h ++++ b/gcc/c-common.h +@@ -829,6 +829,7 @@ extern tree finish_label_address_expr (t + extern tree lookup_label (tree); + extern tree lookup_name (tree); + ++extern bool vector_targets_convertible_p (const_tree t1, const_tree t2); + extern bool vector_types_convertible_p (const_tree t1, const_tree t2, bool emit_lax_note); + + extern rtx c_expand_expr (tree, rtx, enum machine_mode, int, rtx *); +--- a/gcc/c-convert.c ++++ b/gcc/c-convert.c +@@ -70,6 +70,7 @@ convert (tree type, tree expr) + tree e = expr; + enum tree_code code = TREE_CODE (type); + const char *invalid_conv_diag; ++ tree e1; + + if (type == error_mark_node + || expr == error_mark_node +@@ -85,7 +86,8 @@ convert (tree type, tree expr) + + if (type == TREE_TYPE (expr)) + return expr; +- ++ if (e1 = targetm.convert_to_type (type, expr)) ++ return e1; + if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) + return fold_convert (type, expr); + if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) +--- a/gcc/c-decl.c ++++ b/gcc/c-decl.c +@@ -3995,6 +3995,7 @@ grokdeclarator (const struct c_declarato + bool bitfield = width != NULL; + tree element_type; + struct c_arg_info *arg_info = 0; ++ const char *errmsg; + + if (decl_context == FUNCDEF) + funcdef_flag = true, decl_context = NORMAL; +@@ -4513,6 +4514,12 @@ grokdeclarator (const struct c_declarato + error ("%qs declared as function returning an array", name); + type = integer_type_node; + } ++ errmsg = targetm.invalid_return_type (type); ++ if (errmsg) ++ { ++ error (errmsg); ++ type = integer_type_node; ++ } + + /* Construct the function type and go to the next + inner layer of declarator. */ +@@ -5039,6 +5046,7 @@ grokparms (struct c_arg_info *arg_info, + { + tree parm, type, typelt; + unsigned int parmno; ++ const char *errmsg; + + /* If there is a parameter of incomplete type in a definition, + this is an error. In a declaration this is valid, and a +@@ -5082,6 +5090,14 @@ grokparms (struct c_arg_info *arg_info, + } + } + ++ errmsg = targetm.invalid_parameter_type (type); ++ if (errmsg) ++ { ++ error (errmsg); ++ TREE_VALUE (typelt) = error_mark_node; ++ TREE_TYPE (parm) = error_mark_node; ++ } ++ + if (DECL_NAME (parm) && TREE_USED (parm)) + warn_if_shadowing (parm); + } +--- a/gcc/c-incpath.c ++++ b/gcc/c-incpath.c +@@ -30,6 +30,8 @@ + #include "intl.h" + #include "c-incpath.h" + #include "cppdefault.h" ++#include "flags.h" ++#include "toplev.h" + + /* Windows does not natively support inodes, and neither does MSDOS. + Cygwin's emulation can generate non-unique inodes, so don't use it. +@@ -37,15 +39,18 @@ + #ifdef VMS + # define INO_T_EQ(A, B) (!memcmp (&(A), &(B), sizeof (A))) + # define INO_T_COPY(DEST, SRC) memcpy(&(DEST), &(SRC), sizeof (SRC)) +-#else +-# if (defined _WIN32 && !defined (_UWIN)) || defined __MSDOS__ +-# define INO_T_EQ(A, B) 0 +-# else +-# define INO_T_EQ(A, B) ((A) == (B)) +-# endif ++#elif !((defined _WIN32 && !defined (_UWIN)) || defined __MSDOS__) ++# define INO_T_EQ(A, B) ((A) == (B)) + # define INO_T_COPY(DEST, SRC) (DEST) = (SRC) + #endif + ++#if defined INO_T_EQ ++#define DIRS_EQ(A, B) ((A)->dev == (B)->dev \ ++ && INO_T_EQ((A)->ino, (B)->ino)) ++#else ++#define DIRS_EQ(A, B) (!strcasecmp ((A)->name, (B)->name)) ++#endif ++ + static const char dir_separator_str[] = { DIR_SEPARATOR, 0 }; + + static void add_env_var_paths (const char *, int); +@@ -241,14 +246,15 @@ remove_duplicates (cpp_reader *pfile, st + "%s: not a directory", cur->name); + else + { ++#if defined (INO_T_COPY) + INO_T_COPY (cur->ino, st.st_ino); + cur->dev = st.st_dev; ++#endif + + /* Remove this one if it is in the system chain. */ + reason = REASON_DUP_SYS; + for (tmp = system; tmp; tmp = tmp->next) +- if (INO_T_EQ (tmp->ino, cur->ino) && tmp->dev == cur->dev +- && cur->construct == tmp->construct) ++ if (DIRS_EQ (tmp, cur) && cur->construct == tmp->construct) + break; + + if (!tmp) +@@ -256,16 +262,14 @@ remove_duplicates (cpp_reader *pfile, st + /* Duplicate of something earlier in the same chain? */ + reason = REASON_DUP; + for (tmp = head; tmp != cur; tmp = tmp->next) +- if (INO_T_EQ (cur->ino, tmp->ino) && cur->dev == tmp->dev +- && cur->construct == tmp->construct) ++ if (DIRS_EQ (cur, tmp) && cur->construct == tmp->construct) + break; + + if (tmp == cur + /* Last in the chain and duplicate of JOIN? */ + && !(cur->next == NULL && join +- && INO_T_EQ (cur->ino, join->ino) +- && cur->dev == join->dev +- && cur->construct == join->construct)) ++ && DIRS_EQ (cur, join) ++ && cur->construct == join->construct)) + { + /* Unique, so keep this directory. */ + pcur = &cur->next; +@@ -297,8 +301,8 @@ add_sysroot_to_chain (const char *sysroo + } + + /* Merge the four include chains together in the order quote, bracket, +- system, after. Remove duplicate dirs (as determined by +- INO_T_EQ()). ++ system, after. Remove duplicate dirs (determined in ++ system-specific manner). + + We can't just merge the lists and then uniquify them because then + we may lose directories from the <> search path that should be +@@ -352,6 +356,24 @@ merge_include_chains (const char *sysroo + } + fprintf (stderr, _("End of search list.\n")); + } ++ ++#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES ++ if (flag_poison_system_directories) ++ { ++ struct cpp_dir *p; ++ ++ for (p = heads[QUOTE]; p; p = p->next) ++ { ++ if ((!strncmp (p->name, "/usr/include", 12)) ++ || (!strncmp (p->name, "/usr/local/include", 18)) ++ || (!strncmp (p->name, "/usr/X11R6/include", 18))) ++ warning (OPT_Wpoison_system_directories, ++ "include location \"%s\" is unsafe for " ++ "cross-compilation", ++ p->name); ++ } ++ } ++#endif + } + + /* Use given -I paths for #include "..." but not #include <...>, and +--- a/gcc/c-typeck.c ++++ b/gcc/c-typeck.c +@@ -1754,6 +1754,7 @@ default_conversion (tree exp) + tree orig_exp; + tree type = TREE_TYPE (exp); + enum tree_code code = TREE_CODE (type); ++ tree promoted_type; + + /* Functions and arrays have been converted during parsing. */ + gcc_assert (code != FUNCTION_TYPE); +@@ -1790,6 +1791,10 @@ default_conversion (tree exp) + if (exp == error_mark_node) + return error_mark_node; + ++ promoted_type = targetm.promoted_type (type); ++ if (promoted_type) ++ return convert (promoted_type, exp); ++ + if (INTEGRAL_TYPE_P (type)) + return perform_integral_promotions (exp); + +@@ -4196,10 +4201,7 @@ convert_for_assignment (tree type, tree + if (TREE_CODE (mvr) != ARRAY_TYPE) + mvr = TYPE_MAIN_VARIANT (mvr); + /* Opaque pointers are treated like void pointers. */ +- is_opaque_pointer = (targetm.vector_opaque_p (type) +- || targetm.vector_opaque_p (rhstype)) +- && TREE_CODE (ttl) == VECTOR_TYPE +- && TREE_CODE (ttr) == VECTOR_TYPE; ++ is_opaque_pointer = vector_targets_convertible_p (ttl, ttr); + + /* C++ does not allow the implicit conversion void* -> T*. However, + for the purpose of reducing the number of false positives, we +--- a/gcc/c.opt ++++ b/gcc/c.opt +@@ -697,6 +697,10 @@ fpreprocessed + C ObjC C++ ObjC++ + Treat the input file as already preprocessed + ++fremove-local-statics ++C Var(flag_remove_local_statics) ++Convert function-local static variables to automatic variables when it is safe to do so ++ + freplace-objc-classes + ObjC ObjC++ + Used in Fix-and-Continue mode to indicate that object files may be swapped in at runtime +--- a/gcc/calls.c ++++ b/gcc/calls.c +@@ -3834,7 +3834,7 @@ emit_library_call_value_1 (int retval, r + cse'ing of library calls could delete a call and leave the pop. */ + NO_DEFER_POP; + valreg = (mem_value == 0 && outmode != VOIDmode +- ? hard_libcall_value (outmode) : NULL_RTX); ++ ? hard_libcall_value (outmode, orgfun) : NULL_RTX); + + /* Stack must be properly aligned now. */ + gcc_assert (!(stack_pointer_delta +@@ -4133,8 +4133,17 @@ store_one_arg (struct arg_data *arg, rtx + /* We need to make a save area. */ + unsigned int size = arg->locate.size.constant * BITS_PER_UNIT; + enum machine_mode save_mode = mode_for_size (size, MODE_INT, 1); +- rtx adr = memory_address (save_mode, XEXP (arg->stack_slot, 0)); +- rtx stack_area = gen_rtx_MEM (save_mode, adr); ++ rtx adr; ++ rtx stack_area; ++ ++ /* We can only use save_mode if the arg is sufficiently ++ aligned. */ ++ if (STRICT_ALIGNMENT ++ && GET_MODE_ALIGNMENT (save_mode) > arg->locate.boundary) ++ save_mode = BLKmode; ++ ++ adr = memory_address (save_mode, XEXP (arg->stack_slot, 0)); ++ stack_area = gen_rtx_MEM (save_mode, adr); + + if (save_mode == BLKmode) + { +--- a/gcc/cfgexpand.c ++++ b/gcc/cfgexpand.c +@@ -86,10 +86,6 @@ failed: + } + + +-#ifndef LOCAL_ALIGNMENT +-#define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT +-#endif +- + #ifndef STACK_ALIGNMENT_NEEDED + #define STACK_ALIGNMENT_NEEDED 1 + #endif +@@ -160,7 +156,7 @@ get_decl_align_unit (tree decl) + unsigned int align; + + align = DECL_ALIGN (decl); +- align = LOCAL_ALIGNMENT (TREE_TYPE (decl), align); ++ align = calculate_local_alignment (TREE_TYPE (decl), align); + if (align > PREFERRED_STACK_BOUNDARY) + align = PREFERRED_STACK_BOUNDARY; + if (cfun->stack_alignment_needed < align) +--- a/gcc/cgraph.c ++++ b/gcc/cgraph.c +@@ -205,9 +205,11 @@ cgraph_node (tree decl) + if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL) + { + node->origin = cgraph_node (DECL_CONTEXT (decl)); ++ node->origin->ever_was_nested = 1; + node->next_nested = node->origin->nested; + node->origin->nested = node; + node->master_clone = node; ++ node->ever_was_nested = 1; + } + return node; + } +--- a/gcc/cgraph.h ++++ b/gcc/cgraph.h +@@ -178,6 +178,8 @@ struct cgraph_node GTY((chain_next ("%h. + unsigned output : 1; + /* Set for aliases once they got through assemble_alias. */ + unsigned alias : 1; ++ /* Set if the function is a nested function or has nested functions. */ ++ unsigned ever_was_nested : 1; + + /* In non-unit-at-a-time mode the function body of inline candidates is saved + into clone before compiling so the function in original form can be +--- a/gcc/collect2.c ++++ b/gcc/collect2.c +@@ -605,11 +605,7 @@ find_a_file (struct path_prefix *pprefix + + /* Determine the filename to execute (special case for absolute paths). */ + +- if (*name == '/' +-#ifdef HAVE_DOS_BASED_FILE_SYSTEM +- || (*name && name[1] == ':') +-#endif +- ) ++ if (IS_ABSOLUTE_PATH (name)) + { + if (access (name, X_OK) == 0) + { +--- a/gcc/combine.c ++++ b/gcc/combine.c +@@ -3989,14 +3989,18 @@ find_split_point (rtx *loc, rtx insn) + return &XEXP (XEXP (x, 0), 0); + } + ++#if 0 + /* If we have a PLUS whose first operand is complex, try computing it +- separately by making a split there. */ ++ separately by making a split there. ++ This causes non-canonical RTL to be created, at least on ARM. ++ See CSL issue #4085. */ + if (GET_CODE (XEXP (x, 0)) == PLUS + && ! memory_address_p (GET_MODE (x), XEXP (x, 0)) + && ! OBJECT_P (XEXP (XEXP (x, 0), 0)) + && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG + && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) + return &XEXP (XEXP (x, 0), 0); ++#endif + break; + + case SET: +@@ -5876,6 +5880,7 @@ simplify_set (rtx x) + zero_extend to avoid the reload that would otherwise be required. */ + + if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) ++ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (src))) == MODE_INT + && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN + && SUBREG_BYTE (src) == 0 + && (GET_MODE_SIZE (GET_MODE (src)) +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -142,6 +142,10 @@ Wpadded + Common Var(warn_padded) Warning + Warn when padding is required to align structure members + ++Wpoison-system-directories ++Common Var(flag_poison_system_directories) Init(1) ++Warn for -I and -L options using system directories if cross compiling ++ + Wshadow + Common Var(warn_shadow) Warning + Warn when one local variable shadows another +@@ -259,6 +263,12 @@ Common Separate + fabi-version= + Common Joined UInteger Var(flag_abi_version) Init(2) + ++falign-arrays ++Target Report Var(flag_align_arrays) ++Set the minimum alignment for array variables to be the largest power ++of two less than or equal to their total storage size, or the biggest ++alignment used on the machine, whichever is smaller. ++ + falign-functions + Common Report Var(align_functions,0) + Align the start of functions +@@ -444,6 +454,10 @@ fearly-inlining + Common Report Var(flag_early_inlining) Init(1) Optimization + Perform early inlining + ++feglibc= ++Common Report Joined Undocumented ++EGLIBC configuration specifier, serves multilib purposes. ++ + feliminate-dwarf2-dups + Common Report Var(flag_eliminate_dwarf2_dups) + Perform DWARF2 duplicate elimination +@@ -805,6 +819,10 @@ fprofile-values + Common Report Var(flag_profile_values) + Insert code to profile values of expressions + ++fpromote-loop-indices ++Common Report Var(flag_promote_loop_indices) Optimization ++Promote loop indices to word-sized indices when safe ++ + frandom-seed + Common + +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -70,6 +70,10 @@ + # This helps to keep OS specific stuff out of the CPU + # defining header ${cpu_type}/${cpu_type.h}. + # ++# It is possible to include automatically-generated ++# build-directory files by prefixing them with "./". ++# All other files should relative to $srcdir/config. ++# + # tm_p_file Location of file with declarations for functions + # in $out_file. + # +@@ -751,32 +755,62 @@ arm*-*-linux*) # ARM GNU/Linux with EL + need_64bit_hwint=yes + # The EABI requires the use of __cxa_atexit. + default_use_cxa_atexit=yes ++ case ${target} in ++ arm-timesys-linux-gnueabi) ++ tmake_file="${tmake_file} arm/t-timesys" ++ tm_file="$tm_file ./sysroot-suffix.h" ++ tmake_file="$tmake_file t-sysroot-suffix" ++ ;; ++ arm-wrs-linux-gnueabi) ++ tm_file="$tm_file arm/wrs-linux.h" ++ tmake_file="$tmake_file arm/t-wrs-linux" ++ tm_defines="$tm_defines TARGET_FLEXLM" ++ ;; ++ arm-montavista*-linux-gnueabi) ++ tm_file="$tm_file arm/montavista-linux.h" ++ tmake_file="$tmake_file arm/t-montavista-linux" ++ ;; ++ *) ++ if test x$enable_extra_asa_multilibs = xyes; then ++ tmake_file="${tmake_file} arm/t-asa" ++ elif test x$enable_extra_sgxx_multilibs = xyes; then ++ tmake_file="${tmake_file} arm/t-cs-linux" ++ fi ++ tm_file="$tm_file ./sysroot-suffix.h" ++ tmake_file="$tmake_file t-sysroot-suffix" ++ ;; ++ esac + ;; + *) + tmake_file="$tmake_file arm/t-linux" + ;; + esac + tm_file="$tm_file arm/aout.h arm/arm.h" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + arm*-*-uclinux*) # ARM ucLinux +- tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/linux-gas.h arm/uclinux-elf.h arm/uclinux-elf.h" ++ tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/linux-gas.h arm/uclinux-elf.h" + tmake_file="arm/t-arm arm/t-arm-elf" + case ${target} in +- arm*-*-uclinux-*eabi) ++ arm*-*-uclinux*eabi) + tm_file="$tm_file arm/bpabi.h arm/uclinux-eabi.h" +- tmake_file="$tmake_file arm/t-bpabi" ++ tmake_file="$tmake_file arm/t-bpabi arm/t-uclinux-eabi" + # The BPABI long long divmod functions return a 128-bit value in + # registers r0-r3. Correctly modeling that requires the use of + # TImode. + need_64bit_hwint=yes + # The EABI requires the use of __cxa_atexit. + default_use_cxa_atexit=yes ++ tm_file="$tm_file ./sysroot-suffix.h" ++ tmake_file="$tmake_file t-sysroot-suffix" + esac ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + tm_file="$tm_file arm/aout.h arm/arm.h" + ;; + arm*-*-ecos-elf) + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h arm/ecos-elf.h" + tmake_file="arm/t-arm arm/t-arm-elf" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + arm*-*-eabi* | arm*-*-symbianelf* ) + # The BPABI long long divmod functions return a 128-bit value in +@@ -788,7 +822,11 @@ arm*-*-eabi* | arm*-*-symbianelf* ) + tmake_file="arm/t-arm arm/t-arm-elf" + case ${target} in + arm*-*-eabi*) ++ tm_file="${tm_file} arm/nocrt0.h" + tmake_file="${tmake_file} arm/t-bpabi" ++ if test x$enable_extra_sgxx_multilibs = xyes; then ++ tmake_file="${tmake_file} arm/t-cs-eabi" ++ fi + ;; + arm*-*-symbianelf*) + tm_file="${tm_file} arm/symbian.h" +@@ -798,14 +836,17 @@ arm*-*-eabi* | arm*-*-symbianelf* ) + ;; + esac + tm_file="${tm_file} arm/aout.h arm/arm.h" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + arm*-*-rtems*) + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h arm/rtems-elf.h rtems.h" + tmake_file="arm/t-arm arm/t-arm-elf t-rtems arm/t-rtems" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + arm*-*-elf | ep9312-*-elf) + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h" + tmake_file="arm/t-arm arm/t-arm-elf" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + arm*-wince-pe*) + tm_file="arm/semi.h arm/aout.h arm/arm.h arm/coff.h dbxcoff.h arm/pe.h arm/wince-pe.h" +@@ -822,6 +863,7 @@ arm-*-pe*) + arm*-*-kaos*) + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h kaos.h arm/kaos-arm.h" + tmake_file="arm/t-arm arm/t-arm-elf" ++ tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp" + ;; + avr-*-rtems*) + tm_file="avr/avr.h dbxelf.h avr/rtems.h rtems.h" +@@ -1179,6 +1221,16 @@ i[34567]86-*-linux* | i[34567]86-*-kfree + else + tm_file="${tm_file} i386/linux.h" + fi ++ case ${target} in ++ *-wrs-linux*) ++ tm_defines="${tm_defines} TARGET_FLEXLM" ++ ;; ++ esac ++ if test x$enable_extra_sgxx_multilibs = xyes; then ++ tm_file="${tm_file} i386/cs-linux.h" ++ tmake_file="${tmake_file} i386/t-cs-linux" ++ extra_options="${extra_options} i386/cs-linux.opt" ++ fi + ;; + i[34567]86-*-knetbsd*-gnu) tm_file="${tm_file} i386/linux.h knetbsd-gnu.h i386/knetbsd-gnu.h" ;; + i[34567]86-*-kfreebsd*-gnu) tm_file="${tm_file} i386/linux.h kfreebsd-gnu.h i386/kfreebsd-gnu.h" ;; +@@ -1616,9 +1668,11 @@ m68k-*-linux*) # Motorola m68k's runnin + # aka the GNU/Linux C library 6. + default_m68k_cpu=68020 + default_cf_cpu=5475 +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h m68k/linux.h" ++ with_arch=${with_arch:-m68k} ++ tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h m68k/linux.h ./sysroot-suffix.h" + extra_options="${extra_options} m68k/ieee.opt" + tm_defines="${tm_defines} MOTOROLA=1" ++ tmake_file="${tmake_file} m68k/t-floatlib m68k/t-linux m68k/t-mlibs" + # if not configured with --enable-sjlj-exceptions, bump the + # libgcc version number + if test x$sjlj != x1; then +@@ -1646,7 +1700,7 @@ mcore-*-pe*) + mips-sgi-irix[56]*) + tm_file="elfos.h ${tm_file} mips/iris.h" + tmake_file="mips/t-iris mips/t-slibgcc-irix" +- target_cpu_default="MASK_ABICALLS" ++ tm_defines="${tm_defines} TARGET_ABICALLS_DEFAULT=1" + case ${target} in + *-*-irix5*) + tm_file="${tm_file} mips/iris5.h" +@@ -1672,31 +1726,77 @@ mips-sgi-irix[56]*) + use_fixproto=yes + ;; + mips*-*-netbsd*) # NetBSD/mips, either endian. +- target_cpu_default="MASK_ABICALLS" ++ tm_defines="${tm_defines} TARGET_ABICALLS_DEFAULT=1" + tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h" + ;; + mips64*-*-linux*) + tm_file="dbxelf.h elfos.h svr4.h linux.h ${tm_file} mips/linux.h mips/linux64.h" ++ tm_defines="${tm_defines} TARGET_ABICALLS_DEFAULT=1" + tmake_file="${tmake_file} mips/t-linux64" +- tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_N32" ++ if test x${enable_mips_nonpic}; then ++ tm_defines="${tm_defines} TARGET_ABICALLS_NONPIC=1" ++ fi ++ case "$with_abi" in ++ "" | "n32" ) ++ tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_N32" ++ ;; ++ 64 ) ++ tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_64" ++ ;; ++ *) ++ echo "Unknown ABI used in --with-abi=$with_abi" ++ exit 1 ++ ;; ++ esac ++ case ${target} in ++ mips64el-sicortex-linux-gnu) ++ tm_file="${tm_file} mips/sicortex.h" ++ tmake_file="${tmake_file} mips/t-sicortex" ++ ;; ++ esac ++ tmake_file="$tmake_file mips/t-crtfm" + gnu_ld=yes + gas=yes + test x$with_llsc != x || with_llsc=yes + ;; + mips*-*-linux*) # Linux MIPS, either endian. + tm_file="dbxelf.h elfos.h svr4.h linux.h ${tm_file} mips/linux.h" ++ tm_defines="${tm_defines} TARGET_ABICALLS_DEFAULT=1" ++ if test x${enable_mips_nonpic}; then ++ tm_defines="${tm_defines} TARGET_ABICALLS_NONPIC=1" ++ fi + case ${target} in + mipsisa32r2*) + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33" + ;; + mipsisa32*) + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=32" ++ ;; ++ mips-wrs-linux-gnu) ++ tmake_file="$tmake_file mips/t-linux64 mips/t-wrs-linux" ++ tm_file="$tm_file mips/linux64.h mips/octeon.h mips/wrs-linux.h" ++ tm_defines="$tm_defines TARGET_FLEXLM" ++ ;; ++ mips-montavista*-linux-gnu) ++ tmake_file="$tmake_file mips/t-linux64 mips/t-montavista-linux" ++ tm_file="$tm_file mips/linux64.h mips/octeon.h mips/montavista-linux.h" ++ ;; ++ *) ++ if test x$enable_extra_sgxx_multilibs = xyes; then ++ tmake_file="$tmake_file mips/t-sgxx-linux" ++ tm_file="$tm_file mips/cs-sgxx-linux.h" ++ elif test x$enable_extra_sgxxlite_multilibs = xyes; then ++ tmake_file="$tmake_file mips/t-sgxxlite-linux" ++ tm_file="$tm_file mips/cs-sgxxlite-linux.h" ++ fi ++ ;; + esac + test x$with_llsc != x || with_llsc=yes ++ tmake_file="$tmake_file mips/t-crtfm" + ;; + mips*-*-openbsd*) + tm_defines="${tm_defines} OBSD_HAS_DECLARE_FUNCTION_NAME OBSD_HAS_DECLARE_OBJECT OBSD_HAS_CORRECT_SPECS" +- target_cpu_default="MASK_ABICALLS" ++ tm_defines="${tm_defines} TARGET_ABICALLS_DEFAULT=1" + tm_file="mips/mips.h openbsd.h mips/openbsd.h mips/sdb.h" + case ${target} in + mips*el-*-openbsd*) +@@ -1707,15 +1807,15 @@ mips*-*-openbsd*) + mips*-sde-elf*) + tm_file="elfos.h ${tm_file} mips/elf.h mips/sde.h" + tmake_file="mips/t-sde mips/t-libgcc-mips16" ++ tm_file="$tm_file mips/sdemtk.h" ++ extra_options="$extra_options mips/sdemtk.opt" + case "${with_newlib}" in + yes) +- # newlib / libgloss. ++ # newlib ++ # FIXME: threading? + ;; + *) +- # MIPS toolkit libraries. +- tm_file="$tm_file mips/sdemtk.h" +- tmake_file="$tmake_file mips/t-sdemtk" +- extra_options="$extra_options mips/sdemtk.opt" ++ tmake_file="$tmake_file mips/t-sdelib" + case ${enable_threads} in + "" | yes | mipssde) + thread_file='mipssde' +@@ -1734,6 +1834,23 @@ mips*-sde-elf*) + tm_defines="MIPS_ISA_DEFAULT=64 MIPS_ABI_DEFAULT=ABI_N32" + ;; + esac ++ if [ "$enable_sgxx_sde_multilibs" = "yes" ]; then ++ tmake_file="$tmake_file mips/t-sgxx-sde" ++ # SourceryG++ is configured --with-arch=mips32r2. ++ tm_defines="MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32" ++ fi ++ ;; ++mips64octeon*-wrs-elf*) ++ tm_file="elfos.h ${tm_file} mips/elf.h mips/octeon.h mips/octeon-elf.h" ++ tmake_file=mips/t-octeon-elf ++ tm_defines="MIPS_ABI_DEFAULT=ABI_EABI MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\" TARGET_FLEXLM" ++ default_use_cxa_atexit=no ++ ;; ++mips64octeon*-montavista-elf*) ++ tm_file="elfos.h ${tm_file} mips/elf.h mips/octeon.h mips/octeon-elf.h" ++ tmake_file="mips/t-octeon-elf mips/t-montavista-elf" ++ tm_defines="MIPS_ABI_DEFAULT=ABI_EABI MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\"" ++ default_use_cxa_atexit=no + ;; + mipsisa32-*-elf* | mipsisa32el-*-elf* | \ + mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ +@@ -1767,10 +1884,11 @@ mipsisa64-*-elf* | mipsisa64el-*-elf*) + ;; + mipsisa64sr71k-*-elf*) + tm_file="elfos.h ${tm_file} mips/elf.h" +- tmake_file=mips/t-sr71k ++ tmake_file="mips/t-sr71k" + target_cpu_default="MASK_64BIT|MASK_FLOAT64" + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=64 MIPS_CPU_STRING_DEFAULT=\\\"sr71000\\\" MIPS_ABI_DEFAULT=ABI_EABI" + use_fixproto=yes ++ tmake_file="$tmake_file" + ;; + mipsisa64sb1-*-elf* | mipsisa64sb1el-*-elf*) + tm_file="elfos.h ${tm_file} mips/elf.h" +@@ -1793,7 +1911,7 @@ mips64-*-elf* | mips64el-*-elf*) + ;; + mips64vr-*-elf* | mips64vrel-*-elf*) + tm_file="mips/vr.h elfos.h ${tm_file} mips/elf.h" +- tmake_file=mips/t-vr ++ tmake_file="mips/t-vr" + use_fixproto=yes + ;; + mips64orion-*-elf* | mips64orionel-*-elf*) +@@ -1926,15 +2044,18 @@ powerpc-*-eabisimaltivec*) + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcendian rs6000/t-ppccomm" + ;; + powerpc-*-eabisim*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h" + extra_options="${extra_options} rs6000/sysv4.opt" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + ;; + powerpc-*-elf*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h" + extra_options="${extra_options} rs6000/sysv4.opt" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" +- use_fixproto=yes ++ if test x$enable_powerpc_e500mc_elf = xyes; then ++ tm_file="${tm_file} rs6000/e500mc.h" ++ tmake_file="${tmake_file} rs6000/t-ppc-e500mc" ++ fi + ;; + powerpc-*-eabialtivec*) + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabialtivec.h" +@@ -1942,9 +2063,12 @@ powerpc-*-eabialtivec*) + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcendian rs6000/t-ppccomm" + ;; + powerpc-*-eabi*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h" + extra_options="${extra_options} rs6000/sysv4.opt" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" ++ if test x$enable_extra_sgxx_multilibs = xyes; then ++ tmake_file="${tmake_file} rs6000/t-cs-eabi" ++ fi + ;; + powerpc-*-rtems*) + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/rtems.h rtems.h" +@@ -1959,7 +2083,7 @@ powerpc-*-linux*altivec*) + powerpc-*-linux*spe*) + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxspe.h rs6000/e500.h" + extra_options="${extra_options} rs6000/sysv4.opt" +- tmake_file="rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm" ++ tmake_file="t-dfprules rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm" + ;; + powerpc-*-linux*paired*) + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/750cl.h" +@@ -1980,12 +2104,28 @@ powerpc-*-linux*) + extra_options="${extra_options} rs6000/linux64.opt" + ;; + *) +- tm_file="${tm_file} rs6000/linux.h" ++ tm_file="${tm_file} rs6000/linux.h rs6000/e500.h" ++ tmake_file="$tmake_file rs6000/t-linux" + ;; + esac + if test x${enable_secureplt} = xyes; then + tm_file="rs6000/secureplt.h ${tm_file}" + fi ++ case ${target} in ++ powerpc-wrs-linux-gnu) ++ tm_file="$tm_file rs6000/wrs-linux.h rs6000/e500.h" ++ tmake_file="$tmake_file rs6000/t-wrs-linux" ++ tm_defines="$tm_defines TARGET_FLEXLM" ++ ;; ++ powerpc-montavista*-linux-gnu) ++ tm_file="$tm_file rs6000/montavista-linux.h" ++ tmake_file="$tmake_file rs6000/t-montavista-linux" ++ ;; ++ powerpc-timesys-linux-gnu*) ++ tmake_file="${tmake_file} rs6000/t-timesys" ++ tm_file="${tm_file} rs6000/timesys-linux.h" ++ ;; ++ esac + ;; + powerpc-*-gnu-gnualtivec*) + tm_file="${cpu_type}/${cpu_type}.h elfos.h svr4.h freebsd-spec.h gnu.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxaltivec.h rs6000/gnu.h" +@@ -2019,7 +2159,7 @@ powerpc-wrs-vxworks|powerpc-wrs-vxworksa + esac + ;; + powerpc-wrs-windiss*) # Instruction-level simulator for VxWorks. +- tm_file="${tm_file} elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/windiss.h" ++ tm_file="${tm_file} elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/windiss.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + thread_file="" +@@ -2043,28 +2183,28 @@ powerpcle-*-sysv*) + use_fixproto=yes + ;; + powerpcle-*-elf*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + use_fixproto=yes + ;; + powerpcle-*-eabisim*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + ;; + powerpcle-*-eabi*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + ;; + powerpc-*-kaos*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h kaos.h rs6000/kaos-ppc.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h kaos.h rs6000/kaos-ppc.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + ;; + powerpcle-*-kaos*) +- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h kaos.h rs6000/kaos-ppc.h" ++ tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h kaos.h rs6000/kaos-ppc.h" + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm" + extra_options="${extra_options} rs6000/sysv4.opt" + ;; +@@ -2162,8 +2302,10 @@ sh-*-symbianelf* | sh[12346l]*-*-symbian + esac + fi + case ${with_endian} in +- big|little) tmake_file="${tmake_file} sh/t-1e" ;; +- big,little|little,big) ;; ++ big) TM_ENDIAN_CONFIG=mb ;; ++ little) TM_ENDIAN_CONFIG=ml ;; ++ big,little) TM_ENDIAN_CONFIG="mb ml" ;; ++ little,big) TM_ENDIAN_CONFIG="ml mb" ;; + *) echo "with_endian=${with_endian} not supported."; exit 1 ;; + esac + case ${with_endian} in +@@ -2288,29 +2430,40 @@ sh-*-symbianelf* | sh[12346l]*-*-symbian + fi + target_cpu_default=SELECT_`echo ${sh_cpu_default}|tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_` + tm_defines=${tm_defines}' SH_MULTILIB_CPU_DEFAULT=\"'`echo $sh_cpu_default|sed s/sh/m/`'\"' +- sh_multilibs=`echo $sh_multilibs,$sh_cpu_default | sed -e 's/[ ,/][ ,]*/ /g' -e 's/ $//' -e 's/^m/sh/' -e 's/ m/ sh/g' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ_ abcdefghijklmnopqrstuvwxyz-` ++ tm_defines="$tm_defines SUPPORT_`echo $sh_cpu_default | sed 's/^m/sh/' | tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_`=1" ++ sh_multilibs=`echo $sh_multilibs | sed -e 's/,/ /g' -e 's/^sh/m/i' -e 's/ sh/ m/gi' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ_ abcdefghijklmnopqrstuvwxyz-` + for sh_multilib in ${sh_multilibs}; do + case ${sh_multilib} in +- sh1 | sh2 | sh2e | sh3 | sh3e | \ +- sh4 | sh4-single | sh4-single-only | sh4-nofpu | sh4-300 |\ +- sh4a | sh4a-single | sh4a-single-only | sh4a-nofpu | sh4al | \ +- sh2a | sh2a-single | sh2a-single-only | sh2a-nofpu | \ +- sh5-64media | sh5-64media-nofpu | \ +- sh5-32media | sh5-32media-nofpu | \ +- sh5-compact | sh5-compact-nofpu) +- tmake_file="${tmake_file} sh/t-mlib-${sh_multilib}" +- tm_defines="$tm_defines SUPPORT_`echo $sh_multilib|tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_`=1" ++ m1 | m2 | m2e | m3 | m3e | \ ++ m4 | m4-single | m4-single-only | m4-nofpu | m4-300 |\ ++ m4a | m4a-single | m4a-single-only | m4a-nofpu | m4al | \ ++ m2a | m2a-single | m2a-single-only | m2a-nofpu | \ ++ m5-64media | m5-64media-nofpu | \ ++ m5-32media | m5-32media-nofpu | \ ++ m5-compact | m5-compact-nofpu) ++ TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG}/${sh_multilib}" ++ tm_defines="$tm_defines SUPPORT_`echo $sh_multilib | sed 's/^m/sh/' | tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_`=1" + ;; ++ !*) TM_MULTILIB_EXCEPTIONS_CONFIG="${TM_MULTILIB_EXCEPTIONS_CONFIG} ${sh_multilib#!}" ;; ++ none) ;; + *) + echo "with_multilib_list=${sh_multilib} not supported." + exit 1 + ;; + esac + done ++ TM_MULTILIB_CONFIG=${TM_MULTILIB_CONFIG#/} + if test x${enable_incomplete_targets} = xyes ; then + tm_defines="$tm_defines SUPPORT_SH1=1 SUPPORT_SH2E=1 SUPPORT_SH4=1 SUPPORT_SH4_SINGLE=1 SUPPORT_SH2A=1 SUPPORT_SH2A_SINGLE=1 SUPPORT_SH5_32MEDIA=1 SUPPORT_SH5_32MEDIA_NOFPU=1 SUPPORT_SH5_64MEDIA=1 SUPPORT_SH5_64MEDIA_NOFPU=1" + fi +- use_fixproto=yes ++ if test x$enable_extra_sgxxlite_multilibs = xyes \ ++ || test x$enable_extra_sgxx_multilibs = xyes; then ++ # SG++ and Lite do not differ, as yet, so use the Lite files for both ++ tm_file="$tm_file sh/cs-sgxxlite-linux.h" ++ tmake_file="$tmake_file sh/t-sgxxlite-linux" ++ fi ++ tm_file="$tm_file ./sysroot-suffix.h" ++ tmake_file="$tmake_file t-sysroot-suffix" + ;; + sh-*-rtems*) + tmake_file="sh/t-sh sh/t-elf t-rtems sh/t-rtems" +@@ -2340,6 +2493,13 @@ sparc-*-elf*) + extra_parts="crti.o crtn.o crtbegin.o crtend.o" + use_fixproto=yes + ;; ++sparc-wrs-linux*) ++ tm_file="sparc/biarch64.h ${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/linux64.h" ++ extra_options="${extra_options} sparc/long-double-switch.opt" ++ tmake_file="${tmake_file} sparc/t-linux sparc/t-linux64 sparc/t-crtfm" ++ tm_defines="${tm_defines} BIARCH_32BIT_DEFAULT TARGET_FLEXLM" ++ need_64bit_hwint=yes ++ ;; + sparc-*-linux*) # SPARC's running GNU/Linux, libc6 + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/linux.h" + extra_options="${extra_options} sparc/long-double-switch.opt" +@@ -2882,7 +3042,8 @@ case "${target}" in + "" \ + | armv[23456] | armv2a | armv3m | armv4t | armv5t \ + | armv5te | armv6j |armv6k | armv6z | armv6zk \ +- | iwmmxt | ep9312) ++ | armv6-m | armv7-a | armv7-r | armv7-m \ ++ | iwmmxt | ep9312 | marvell-f ) + # OK + ;; + *) +@@ -3017,8 +3178,8 @@ case "${target}" in + ;; + + i[34567]86-*-* | x86_64-*-*) +- supported_defaults="arch cpu tune" +- for which in arch cpu tune; do ++ supported_defaults="arch arch32 arch64 cpu tune" ++ for which in arch arch32 arch64 cpu tune; do + eval "val=\$with_$which" + case ${val} in + i386 | i486 \ +@@ -3029,8 +3190,10 @@ case "${target}" in + | prescott | pentium-m | pentium4m | pentium3m) + case "${target}" in + x86_64-*-*) +- echo "CPU given in --with-$which=$val doesn't support 64bit mode." 1>&2 +- exit 1 ++ if [ "x$which" != "xarch32" ]; then ++ echo "CPU given in --with-$which=$val doesn't support 64bit mode." 1>&2 ++ exit 1 ++ fi + ;; + esac + # OK +@@ -3047,7 +3210,7 @@ case "${target}" in + ;; + + mips*-*-*) +- supported_defaults="abi arch float tune divide llsc" ++ supported_defaults="abi arch arch32 arch64 float tune tune32 tune64 divide llsc" + + case ${with_float} in + "" | soft | hard) +@@ -3079,6 +3242,21 @@ case "${target}" in + ;; + esac + ++ for fix in ice9a; do ++ supported_defaults="$supported_defaults fix-$fix" ++ eval "val=\$with_fix_$fix" ++ case $val in ++ "" | off) ++ eval "\$with_fix_$fix=" ++ ;; ++ on) ++ ;; ++ *) ++ echo "Unknown argument to --with-fix-$fix: $val" ++ ;; ++ esac ++ done ++ + case ${with_llsc} in + yes) + with_llsc=llsc +@@ -3116,9 +3294,9 @@ case "${target}" in + ;; + + powerpc*-*-* | rs6000-*-*) +- supported_defaults="cpu float tune" ++ supported_defaults="cpu cpu32 cpu64 float tune" + +- for which in cpu tune; do ++ for which in cpu cpu32 cpu64 tune; do + eval "val=\$with_$which" + case ${val} in + default32 | default64) +@@ -3134,8 +3312,8 @@ case "${target}" in + | rios | rios1 | rios2 | rsc | rsc1 | rs64a \ + | 401 | 403 | 405 | 405fp | 440 | 440fp | 505 \ + | 601 | 602 | 603 | 603e | ec603e | 604 \ +- | 604e | 620 | 630 | 740 | 750 | 7400 | 7450 \ +- | 854[08] | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell) ++ | 604e | 620 | 630 | 740 | 750 | 7400 | 7450 |e300c[23] \ ++ | 854[08] | e500mc | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell) + # OK + ;; + *) +@@ -3356,11 +3534,28 @@ case ${target} in + ;; + esac + ++case ${target} in ++ *-eglibc-*-*) ++ tmake_file="${tmake_file} t-eglibc" ++ ++ case ${target} in ++ arm-*) ++ # ARM already includes below. ++ ;; ++ *) ++ tmake_file="${tmake_file} t-sysroot-suffix" ++ tm_file="${tm_file} ./sysroot-suffix.h" ++ ;; ++ esac ++ ;; ++esac ++ + t= +-all_defaults="abi cpu arch tune schedule float mode fpu divide llsc" ++all_defaults="abi cpu cpu32 cpu64 arch arch32 arch64 tune tune32 tune64 schedule float mode fpu divide fix-ice9a llsc" + for option in $all_defaults + do +- eval "val=\$with_$option" ++ underscoreoption=`echo $option | sed -e s/-/_/g` ++ eval "val=\$with_$underscoreoption" + if test -n "$val"; then + case " $supported_defaults " in + *" $option "*) +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -100,6 +100,12 @@ + #endif + + ++/* Define to warn for use of native system header directories */ ++#ifndef USED_FOR_TARGET ++#undef ENABLE_POISON_SYSTEM_DIRECTORIES ++#endif ++ ++ + /* Define if you want all operations on RTL (the basic data structure of the + optimizer and back end) to be checked for dynamic type safety at runtime. + This is quite expensive. */ +@@ -1369,37 +1375,37 @@ + #endif + + +-/* The size of `int', as computed by sizeof. */ ++/* The size of a `int', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF_INT + #endif + + +-/* The size of `long', as computed by sizeof. */ ++/* The size of a `long', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF_LONG + #endif + + +-/* The size of `long long', as computed by sizeof. */ ++/* The size of a `long long', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF_LONG_LONG + #endif + + +-/* The size of `short', as computed by sizeof. */ ++/* The size of a `short', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF_SHORT + #endif + + +-/* The size of `void *', as computed by sizeof. */ ++/* The size of a `void *', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF_VOID_P + #endif + + +-/* The size of `__int64', as computed by sizeof. */ ++/* The size of a `__int64', as computed by sizeof. */ + #ifndef USED_FOR_TARGET + #undef SIZEOF___INT64 + #endif +--- a/gcc/config/arm/aout.h ++++ b/gcc/config/arm/aout.h +@@ -191,9 +191,6 @@ + } + #endif + +-/* Arm Assembler barfs on dollars. */ +-#define DOLLARS_IN_IDENTIFIERS 0 +- + #ifndef NO_DOLLAR_IN_LABEL + #define NO_DOLLAR_IN_LABEL 1 + #endif +--- a/gcc/config/arm/arm-cores.def ++++ b/gcc/config/arm/arm-cores.def +@@ -102,6 +102,8 @@ ARM_CORE("arm1020e", arm1020e, 5TE, + ARM_CORE("arm1022e", arm1022e, 5TE, FL_LDSCHED, fastmul) + ARM_CORE("xscale", xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale) + ARM_CORE("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale) ++ARM_CORE("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale) ++ARM_CORE("marvell-f", marvell_f, 5TE, FL_LDSCHED | FL_MARVELL_F | FL_VFPV2, 9e) + + /* V5TEJ Architecture Processors */ + ARM_CORE("arm926ej-s", arm926ejs, 5TEJ, FL_LDSCHED, 9e) +@@ -115,6 +117,12 @@ ARM_CORE("arm1176jzf-s", arm1176jzfs, 6 + ARM_CORE("mpcorenovfp", mpcorenovfp, 6K, FL_LDSCHED, 9e) + ARM_CORE("mpcore", mpcore, 6K, FL_LDSCHED | FL_VFPV2, 9e) + ARM_CORE("arm1156t2-s", arm1156t2s, 6T2, FL_LDSCHED, 9e) ++ ++/* V7 Architecture Processors */ + ARM_CORE("cortex-a8", cortexa8, 7A, FL_LDSCHED, 9e) ++ARM_CORE("cortex-a9", cortexa9, 7A, FL_LDSCHED, 9e) + ARM_CORE("cortex-r4", cortexr4, 7R, FL_LDSCHED, 9e) ++ARM_CORE("cortex-r4f", cortexr4f, 7R, FL_LDSCHED, 9e) + ARM_CORE("cortex-m3", cortexm3, 7M, FL_LDSCHED, 9e) ++ARM_CORE("cortex-m1", cortexm1, 6M, FL_LDSCHED, 9e) ++ARM_CORE("cortex-m0", cortexm0, 6M, FL_LDSCHED, 9e) +--- a/gcc/config/arm/arm-modes.def ++++ b/gcc/config/arm/arm-modes.def +@@ -25,6 +25,11 @@ + FIXME What format is this? */ + FLOAT_MODE (XF, 12, 0); + ++/* Half-precision floating point */ ++FLOAT_MODE (HF, 2, 0); ++ADJUST_FLOAT_FORMAT (HF, ((arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE) ++ ? &arm_half_format : &ieee_half_format)); ++ + /* CCFPEmode should be used with floating inequalities, + CCFPmode should be used with floating equalities. + CC_NOOVmode should be used with SImode integer equalities. +@@ -62,6 +67,4 @@ VECTOR_MODES (FLOAT, 16); /* V + INT_MODE (EI, 24); + INT_MODE (OI, 32); + INT_MODE (CI, 48); +-/* ??? This should actually have 512 bits but the precision only has 9 +- bits. */ +-FRACTIONAL_INT_MODE (XI, 511, 64); ++INT_MODE (XI, 64); +--- a/gcc/config/arm/arm-protos.h ++++ b/gcc/config/arm/arm-protos.h +@@ -24,6 +24,7 @@ + #define GCC_ARM_PROTOS_H + + extern void arm_override_options (void); ++extern void arm_optimization_options (int, int); + extern int use_return_insn (int, rtx); + extern int arm_regno_class (int); + extern void arm_load_pic_register (unsigned long); +@@ -42,9 +43,6 @@ extern unsigned int arm_dbx_register_num + extern void arm_output_fn_unwind (FILE *, bool); + + +-#ifdef TREE_CODE +-extern int arm_return_in_memory (const_tree); +-#endif + #ifdef RTX_CODE + extern bool arm_vector_mode_supported_p (enum machine_mode); + extern int arm_hard_regno_mode_ok (unsigned int, enum machine_mode); +@@ -90,7 +88,7 @@ extern bool arm_cannot_force_const_mem ( + + extern int cirrus_memory_offset (rtx); + extern int arm_coproc_mem_operand (rtx, bool); +-extern int neon_vector_mem_operand (rtx, bool); ++extern int neon_vector_mem_operand (rtx, int); + extern int neon_struct_mem_operand (rtx); + extern int arm_no_early_store_addr_dep (rtx, rtx); + extern int arm_no_early_alu_shift_dep (rtx, rtx); +@@ -125,6 +123,7 @@ extern const char *fp_immediate_constant + extern void arm_emit_call_insn (rtx, rtx); + extern const char *output_call (rtx *); + extern const char *output_call_mem (rtx *); ++void arm_emit_movpair (rtx, rtx); + extern const char *output_mov_long_double_fpa_from_arm (rtx *); + extern const char *output_mov_long_double_arm_from_fpa (rtx *); + extern const char *output_mov_long_double_arm_from_arm (rtx *); +@@ -145,6 +144,7 @@ extern void arm_final_prescan_insn (rtx) + extern int arm_debugger_arg_offset (int, rtx); + extern bool arm_is_long_call_p (tree); + extern int arm_emit_vector_const (FILE *, rtx); ++extern void arm_emit_fp16_const (rtx c); + extern const char * arm_output_load_gr (rtx *); + extern const char *vfp_output_fstmd (rtx *); + extern void arm_set_return_address (rtx, rtx); +@@ -155,13 +155,15 @@ extern bool arm_output_addr_const_extra + + #if defined TREE_CODE + extern rtx arm_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int); ++extern void arm_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, ++ tree, bool); + extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree); + extern bool arm_pad_arg_upward (enum machine_mode, const_tree); + extern bool arm_pad_reg_upward (enum machine_mode, tree, int); + extern bool arm_needs_doubleword_align (enum machine_mode, tree); +-extern rtx arm_function_value(const_tree, const_tree); + #endif + extern int arm_apply_result_size (void); ++extern rtx aapcs_libcall_value (enum machine_mode); + + #endif /* RTX_CODE */ + +@@ -208,6 +210,7 @@ extern void arm_pr_no_long_calls (struct + extern void arm_pr_long_calls_off (struct cpp_reader *); + + extern void arm_lang_object_attributes_init(void); ++extern void arm_adjust_reg_alloc_order (int *); + + extern const char *arm_mangle_type (const_tree); + +--- a/gcc/config/arm/arm-tune.md ++++ b/gcc/config/arm/arm-tune.md +@@ -1,5 +1,5 @@ + ;; -*- buffer-read-only: t -*- + ;; Generated automatically by gentune.sh from arm-cores.def + (define_attr "tune" +- "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexr4,cortexm3" ++ "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,marvell_f,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexa9,cortexr4,cortexr4f,cortexm3,cortexm1,cortexm0" + (const (symbol_ref "arm_tune"))) +--- a/gcc/config/arm/arm.c ++++ b/gcc/config/arm/arm.c +@@ -1,6 +1,6 @@ + /* Output routines for GCC for ARM. + Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, +- 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. ++ 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. + Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) + and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rearnsha@arm.com). +@@ -42,6 +42,7 @@ + #include "optabs.h" + #include "toplev.h" + #include "recog.h" ++#include "cgraph.h" + #include "ggc.h" + #include "except.h" + #include "c-pragma.h" +@@ -52,6 +53,7 @@ + #include "debug.h" + #include "langhooks.h" + #include "df.h" ++#include "intl.h" + + /* Forward definitions of types. */ + typedef struct minipool_node Mnode; +@@ -62,6 +64,7 @@ const struct attribute_spec arm_attribut + void (*arm_lang_output_object_attributes_hook)(void); + + /* Forward function declarations. */ ++static int arm_compute_static_chain_stack_bytes (void); + static arm_stack_offsets *arm_get_frame_offsets (void); + static void arm_add_gc_roots (void); + static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx, +@@ -74,7 +77,6 @@ static int thumb1_base_register_rtx_p (r + inline static int thumb1_index_register_rtx_p (rtx, int); + static int thumb_far_jump_used_p (void); + static bool thumb_force_lr_save (void); +-static unsigned long thumb1_compute_save_reg_mask (void); + static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code); + static rtx emit_sfm (int, int); + static unsigned arm_size_return_regs (void); +@@ -109,6 +111,7 @@ static unsigned long arm_compute_save_re + static unsigned long arm_isr_value (tree); + static unsigned long arm_compute_func_type (void); + static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *); ++static tree arm_handle_pcs_attribute (tree *, tree, tree, int, bool *); + static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *); + #if TARGET_DLLIMPORT_DECL_ATTRIBUTES + static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *); +@@ -122,15 +125,20 @@ static int arm_adjust_cost (rtx, rtx, rt + static int count_insns_for_constant (HOST_WIDE_INT, int); + static int arm_get_strip_length (int); + static bool arm_function_ok_for_sibcall (tree, tree); ++static bool arm_return_in_memory (const_tree, const_tree); ++static rtx arm_function_value (const_tree, const_tree, bool); ++static rtx arm_libcall_value (enum machine_mode, rtx); ++ + static void arm_internal_label (FILE *, const char *, unsigned long); + static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, + tree); +-static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code); ++static bool arm_rtx_costs_1 (rtx, enum rtx_code, int*); + static bool arm_size_rtx_costs (rtx, int, int, int *); +-static bool arm_slowmul_rtx_costs (rtx, int, int, int *); +-static bool arm_fastmul_rtx_costs (rtx, int, int, int *); +-static bool arm_xscale_rtx_costs (rtx, int, int, int *); +-static bool arm_9e_rtx_costs (rtx, int, int, int *); ++static bool arm_slowmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *); ++static bool arm_fastmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *); ++static bool arm_xscale_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *); ++static bool arm_9e_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *); ++static bool arm_rtx_costs (rtx, int, int, int *); + static int arm_address_cost (rtx); + static bool arm_memory_load_p (rtx); + static bool arm_cirrus_insn_p (rtx); +@@ -146,6 +154,9 @@ static void emit_constant_insn (rtx cond + static rtx emit_set_insn (rtx, rtx); + static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, + tree, bool); ++static rtx aapcs_allocate_return_reg (enum machine_mode, const_tree, ++ const_tree); ++static int aapcs_select_return_coproc (const_tree, const_tree); + + #ifdef OBJECT_FORMAT_ELF + static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED; +@@ -167,11 +178,13 @@ static bool arm_default_short_enums (voi + static bool arm_align_anon_bitfield (void); + static bool arm_return_in_msb (const_tree); + static bool arm_must_pass_in_stack (enum machine_mode, const_tree); ++static bool arm_return_in_memory (const_tree, const_tree); + #ifdef TARGET_UNWIND_INFO + static void arm_unwind_emit (FILE *, rtx); + static bool arm_output_ttype (rtx); + #endif + static void arm_dwarf_handle_frame_unspec (const char *, rtx, int); ++static rtx arm_dwarf_register_span(rtx); + + static tree arm_cxx_guard_type (void); + static bool arm_cxx_guard_mask_bit (void); +@@ -183,12 +196,22 @@ static void arm_cxx_determine_class_data + static bool arm_cxx_class_data_always_comdat (void); + static bool arm_cxx_use_aeabi_atexit (void); + static void arm_init_libfuncs (void); ++static tree arm_build_builtin_va_list (void); ++static void arm_expand_builtin_va_start (tree, rtx); ++static tree arm_gimplify_va_arg_expr (tree, tree, tree *, tree *); + static bool arm_handle_option (size_t, const char *, int); + static void arm_target_help (void); + static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode); + static bool arm_cannot_copy_insn_p (rtx); + static bool arm_tls_symbol_p (rtx x); + static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; ++static bool arm_allocate_stack_slots_for_args (void); ++static int arm_issue_rate (void); ++static int arm_multipass_dfa_lookahead (void); ++static const char *arm_invalid_parameter_type (const_tree t); ++static const char *arm_invalid_return_type (const_tree t); ++static tree arm_promoted_type (const_tree t); ++static tree arm_convert_to_type (tree type, tree expr); + + + /* Initialize the GCC target structure. */ +@@ -248,14 +271,19 @@ static void arm_output_dwarf_dtprel (FIL + #undef TARGET_FUNCTION_OK_FOR_SIBCALL + #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall + ++#undef TARGET_FUNCTION_VALUE ++#define TARGET_FUNCTION_VALUE arm_function_value ++ ++#undef TARGET_LIBCALL_VALUE ++#define TARGET_LIBCALL_VALUE arm_libcall_value ++ + #undef TARGET_ASM_OUTPUT_MI_THUNK + #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk + #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK + #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall + +-/* This will be overridden in arm_override_options. */ + #undef TARGET_RTX_COSTS +-#define TARGET_RTX_COSTS arm_slowmul_rtx_costs ++#define TARGET_RTX_COSTS arm_rtx_costs + #undef TARGET_ADDRESS_COST + #define TARGET_ADDRESS_COST arm_address_cost + +@@ -289,6 +317,9 @@ static void arm_output_dwarf_dtprel (FIL + #undef TARGET_SETUP_INCOMING_VARARGS + #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs + ++#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS ++#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arm_allocate_stack_slots_for_args ++ + #undef TARGET_DEFAULT_SHORT_ENUMS + #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums + +@@ -329,6 +360,9 @@ static void arm_output_dwarf_dtprel (FIL + #undef TARGET_RETURN_IN_MSB + #define TARGET_RETURN_IN_MSB arm_return_in_msb + ++#undef TARGET_RETURN_IN_MEMORY ++#define TARGET_RETURN_IN_MEMORY arm_return_in_memory ++ + #undef TARGET_MUST_PASS_IN_STACK + #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack + +@@ -347,6 +381,9 @@ static void arm_output_dwarf_dtprel (FIL + #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC + #define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec + ++#undef TARGET_DWARF_REGISTER_SPAN ++#define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span ++ + #undef TARGET_CANNOT_COPY_INSN_P + #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p + +@@ -355,17 +392,54 @@ static void arm_output_dwarf_dtprel (FIL + #define TARGET_HAVE_TLS true + #endif + ++#undef TARGET_ADJUST_REG_ALLOC_ORDER ++#define TARGET_ADJUST_REG_ALLOC_ORDER arm_adjust_reg_alloc_order ++ + #undef TARGET_CANNOT_FORCE_CONST_MEM + #define TARGET_CANNOT_FORCE_CONST_MEM arm_cannot_force_const_mem + + #undef TARGET_MANGLE_TYPE + #define TARGET_MANGLE_TYPE arm_mangle_type + ++#undef TARGET_BUILD_BUILTIN_VA_LIST ++#define TARGET_BUILD_BUILTIN_VA_LIST arm_build_builtin_va_list ++#undef TARGET_EXPAND_BUILTIN_VA_START ++#define TARGET_EXPAND_BUILTIN_VA_START arm_expand_builtin_va_start ++#undef TARGET_GIMPLIFY_VA_ARG_EXPR ++#define TARGET_GIMPLIFY_VA_ARG_EXPR arm_gimplify_va_arg_expr ++ + #ifdef HAVE_AS_TLS + #undef TARGET_ASM_OUTPUT_DWARF_DTPREL + #define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel + #endif + ++#undef TARGET_MAX_ANCHOR_OFFSET ++#define TARGET_MAX_ANCHOR_OFFSET 4095 ++ ++/* The minimum is set such that the total size of the block ++ for a particular anchor is -4088 + 1 + 4095 bytes, which is ++ divisible by eight, ensuring natural spacing of anchors. */ ++#undef TARGET_MIN_ANCHOR_OFFSET ++#define TARGET_MIN_ANCHOR_OFFSET -4088 ++ ++#undef TARGET_SCHED_ISSUE_RATE ++#define TARGET_SCHED_ISSUE_RATE arm_issue_rate ++ ++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD arm_multipass_dfa_lookahead ++ ++#undef TARGET_INVALID_PARAMETER_TYPE ++#define TARGET_INVALID_PARAMETER_TYPE arm_invalid_parameter_type ++ ++#undef TARGET_INVALID_RETURN_TYPE ++#define TARGET_INVALID_RETURN_TYPE arm_invalid_return_type ++ ++#undef TARGET_PROMOTED_TYPE ++#define TARGET_PROMOTED_TYPE arm_promoted_type ++ ++#undef TARGET_CONVERT_TO_TYPE ++#define TARGET_CONVERT_TO_TYPE arm_convert_to_type ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + /* Obstack for minipool constant handling. */ +@@ -403,6 +477,9 @@ enum fputype arm_fpu_tune; + /* Whether to use floating point hardware. */ + enum float_abi_type arm_float_abi; + ++/* Which __fp16 format to use. */ ++enum arm_fp16_format_type arm_fp16_format; ++ + /* Which ABI to use. */ + enum arm_abi_type arm_abi; + +@@ -441,9 +518,18 @@ static int thumb_call_reg_needed; + #define FL_DIV (1 << 18) /* Hardware divide. */ + #define FL_VFPV3 (1 << 19) /* Vector Floating Point V3. */ + #define FL_NEON (1 << 20) /* Neon instructions. */ ++#define FL_MARVELL_F (1 << 21) /* Marvell Feroceon. */ + + #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */ + ++/* Some flags are ignored when comparing -mcpu and -march: ++ FL_MARVELL_F so that -mcpu=marvell-f -march=v5te works. ++ FL_LDSCHED and FL_WBUF only effect tuning, ++ FL_CO_PROC, FL_VFPV2, FL_VFPV3 and FL_NEON because FP ++ coprocessors are handled separately. */ ++#define FL_COMPAT (FL_MARVELL_F | FL_LDSCHED | FL_WBUF | FL_CO_PROC | \ ++ FL_VFPV2 | FL_VFPV3 | FL_NEON) ++ + #define FL_FOR_ARCH2 FL_NOTM + #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32) + #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M) +@@ -460,6 +546,7 @@ static int thumb_call_reg_needed; + #define FL_FOR_ARCH6Z FL_FOR_ARCH6 + #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K + #define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2) ++#define FL_FOR_ARCH6M (FL_FOR_ARCH6 & ~FL_NOTM) + #define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM) + #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM) + #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV) +@@ -518,13 +605,22 @@ int arm_arch_xscale = 0; + /* Nonzero if tuning for XScale */ + int arm_tune_xscale = 0; + ++/* Nonzero if tuning for Marvell Feroceon. */ ++int arm_tune_marvell_f = 0; ++ + /* Nonzero if we want to tune for stores that access the write-buffer. + This typically means an ARM6 or ARM7 with MMU or MPU. */ + int arm_tune_wbuf = 0; + ++/* Nonzero if tuning for Cortex-A9. */ ++int arm_tune_cortex_a9 = 0; ++ + /* Nonzero if generating Thumb instructions. */ + int thumb_code = 0; + ++/* Nonzero if generating code for Janus2. */ ++int janus2_code = 0; ++ + /* Nonzero if we should define __THUMB_INTERWORK__ in the + preprocessor. + XXX This is a bit of a hack, it's intended to help work around +@@ -557,6 +653,8 @@ static int after_arm_reorg = 0; + /* The maximum number of insns to be used when loading a constant. */ + static int arm_constant_limit = 3; + ++static enum arm_pcs arm_pcs_default; ++ + /* For an explanation of these variables, see final_prescan_insn below. */ + int arm_ccfsm_state; + /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */ +@@ -593,7 +691,7 @@ struct processors + enum processor_type core; + const char *arch; + const unsigned long flags; +- bool (* rtx_costs) (rtx, int, int, int *); ++ bool (* rtx_costs) (rtx, enum rtx_code, enum rtx_code, int *); + }; + + /* Not all of these give usefully different compilation alternatives, +@@ -632,12 +730,14 @@ static const struct processors all_archi + {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL}, + {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL}, + {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC | FL_FOR_ARCH6T2, NULL}, ++ {"armv6-m", cortexm1, "6M", FL_FOR_ARCH6M, NULL}, + {"armv7", cortexa8, "7", FL_CO_PROC | FL_FOR_ARCH7, NULL}, + {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL}, + {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL}, + {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL}, + {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL}, + {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL}, ++ {"iwmmxt2", iwmmxt2, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL}, + {NULL, arm_none, NULL, 0 , NULL} + }; + +@@ -667,7 +767,8 @@ static struct arm_cpu_select arm_select[ + + /* The name of the preprocessor macro to define for this architecture. */ + +-char arm_arch_name[] = "__ARM_ARCH_0UNK__"; ++#define ARM_ARCH_NAME_SIZE 25 ++char arm_arch_name[ARM_ARCH_NAME_SIZE] = "__ARM_ARCH_0UNK__"; + + struct fpu_desc + { +@@ -680,13 +781,16 @@ struct fpu_desc + + static const struct fpu_desc all_fpus[] = + { +- {"fpa", FPUTYPE_FPA}, +- {"fpe2", FPUTYPE_FPA_EMU2}, +- {"fpe3", FPUTYPE_FPA_EMU2}, +- {"maverick", FPUTYPE_MAVERICK}, +- {"vfp", FPUTYPE_VFP}, +- {"vfp3", FPUTYPE_VFP3}, +- {"neon", FPUTYPE_NEON} ++ {"fpa", FPUTYPE_FPA}, ++ {"fpe2", FPUTYPE_FPA_EMU2}, ++ {"fpe3", FPUTYPE_FPA_EMU2}, ++ {"maverick", FPUTYPE_MAVERICK}, ++ {"vfp", FPUTYPE_VFP}, ++ {"vfp3", FPUTYPE_VFP3}, ++ {"vfpv3", FPUTYPE_VFP3}, ++ {"vfpv3-d16", FPUTYPE_VFP3D16}, ++ {"neon", FPUTYPE_NEON}, ++ {"neon-fp16", FPUTYPE_NEON_FP16} + }; + + +@@ -702,8 +806,10 @@ static const enum fputype fp_model_for_f + ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */ + ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */ + ARM_FP_MODEL_VFP, /* FPUTYPE_VFP */ ++ ARM_FP_MODEL_VFP, /* FPUTYPE_VFP3D16 */ + ARM_FP_MODEL_VFP, /* FPUTYPE_VFP3 */ +- ARM_FP_MODEL_VFP /* FPUTYPE_NEON */ ++ ARM_FP_MODEL_VFP, /* FPUTYPE_NEON */ ++ ARM_FP_MODEL_VFP /* FPUTYPE_NEON_FP16 */ + }; + + +@@ -724,6 +830,23 @@ static const struct float_abi all_float_ + }; + + ++struct fp16_format ++{ ++ const char *name; ++ enum arm_fp16_format_type fp16_format_type; ++}; ++ ++ ++/* Available values for -mfp16-format=. */ ++ ++static const struct fp16_format all_fp16_formats[] = ++{ ++ {"none", ARM_FP16_FORMAT_NONE}, ++ {"ieee", ARM_FP16_FORMAT_IEEE}, ++ {"alternative", ARM_FP16_FORMAT_ALTERNATIVE} ++}; ++ ++ + struct abi_name + { + const char *name; +@@ -881,6 +1004,131 @@ arm_init_libfuncs (void) + set_optab_libfunc (umod_optab, DImode, NULL); + set_optab_libfunc (smod_optab, SImode, NULL); + set_optab_libfunc (umod_optab, SImode, NULL); ++ ++ /* Half-precision float operations. The compiler handles all operations ++ with NULL libfuncs by converting the SFmode. */ ++ switch (arm_fp16_format) ++ { ++ case ARM_FP16_FORMAT_IEEE: ++ case ARM_FP16_FORMAT_ALTERNATIVE: ++ ++ /* Conversions. */ ++ set_conv_libfunc (trunc_optab, HFmode, SFmode, ++ (arm_fp16_format == ARM_FP16_FORMAT_IEEE ++ ? "__gnu_f2h_ieee" ++ : "__gnu_f2h_alternative")); ++ set_conv_libfunc (sext_optab, SFmode, HFmode, ++ (arm_fp16_format == ARM_FP16_FORMAT_IEEE ++ ? "__gnu_h2f_ieee" ++ : "__gnu_h2f_alternative")); ++ ++ /* Arithmetic. */ ++ set_optab_libfunc (add_optab, HFmode, NULL); ++ set_optab_libfunc (sdiv_optab, HFmode, NULL); ++ set_optab_libfunc (smul_optab, HFmode, NULL); ++ set_optab_libfunc (neg_optab, HFmode, NULL); ++ set_optab_libfunc (sub_optab, HFmode, NULL); ++ ++ /* Comparisons. */ ++ set_optab_libfunc (eq_optab, HFmode, NULL); ++ set_optab_libfunc (ne_optab, HFmode, NULL); ++ set_optab_libfunc (lt_optab, HFmode, NULL); ++ set_optab_libfunc (le_optab, HFmode, NULL); ++ set_optab_libfunc (ge_optab, HFmode, NULL); ++ set_optab_libfunc (gt_optab, HFmode, NULL); ++ set_optab_libfunc (unord_optab, HFmode, NULL); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++/* On AAPCS systems, this is the "struct __va_list". */ ++static GTY(()) tree va_list_type; ++ ++/* Return the type to use as __builtin_va_list. */ ++static tree ++arm_build_builtin_va_list (void) ++{ ++ tree va_list_name; ++ tree ap_field; ++ ++ if (!TARGET_AAPCS_BASED) ++ return std_build_builtin_va_list (); ++ ++ /* AAPCS \S 7.1.4 requires that va_list be a typedef for a type ++ defined as: ++ ++ struct __va_list ++ { ++ void *__ap; ++ }; ++ ++ The C Library ABI further reinforces this definition in \S ++ 4.1. ++ ++ We must follow this definition exactly. The structure tag ++ name is visible in C++ mangled names, and thus forms a part ++ of the ABI. The field name may be used by people who ++ #include . */ ++ /* Create the type. */ ++ va_list_type = lang_hooks.types.make_type (RECORD_TYPE); ++ /* Give it the required name. */ ++ va_list_name = build_decl (TYPE_DECL, ++ get_identifier ("__va_list"), ++ va_list_type); ++ DECL_ARTIFICIAL (va_list_name) = 1; ++ TYPE_NAME (va_list_type) = va_list_name; ++ /* Create the __ap field. */ ++ ap_field = build_decl (FIELD_DECL, ++ get_identifier ("__ap"), ++ ptr_type_node); ++ DECL_ARTIFICIAL (ap_field) = 1; ++ DECL_FIELD_CONTEXT (ap_field) = va_list_type; ++ TYPE_FIELDS (va_list_type) = ap_field; ++ /* Compute its layout. */ ++ layout_type (va_list_type); ++ ++ return va_list_type; ++} ++ ++/* Return an expression of type "void *" pointing to the next ++ available argument in a variable-argument list. VALIST is the ++ user-level va_list object, of type __builtin_va_list. */ ++static tree ++arm_extract_valist_ptr (tree valist) ++{ ++ if (TREE_TYPE (valist) == error_mark_node) ++ return error_mark_node; ++ ++ /* On an AAPCS target, the pointer is stored within "struct ++ va_list". */ ++ if (TARGET_AAPCS_BASED) ++ { ++ tree ap_field = TYPE_FIELDS (TREE_TYPE (valist)); ++ valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field), ++ valist, ap_field, NULL_TREE); ++ } ++ ++ return valist; ++} ++ ++/* Implement TARGET_EXPAND_BUILTIN_VA_START. */ ++static void ++arm_expand_builtin_va_start (tree valist, rtx nextarg) ++{ ++ valist = arm_extract_valist_ptr (valist); ++ std_expand_builtin_va_start (valist, nextarg); ++} ++ ++/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */ ++static tree ++arm_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, ++ tree *post_p) ++{ ++ valist = arm_extract_valist_ptr (valist); ++ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p); + } + + /* Implement TARGET_HANDLE_OPTION. */ +@@ -1007,7 +1255,9 @@ void + arm_override_options (void) + { + unsigned i; ++ int len; + enum processor_type target_arch_cpu = arm_none; ++ enum processor_type selected_cpu = arm_none; + + /* Set up the flags based on the cpu/architecture selected by the user. */ + for (i = ARRAY_SIZE (arm_select); i--;) +@@ -1023,7 +1273,11 @@ arm_override_options (void) + { + /* Set the architecture define. */ + if (i != ARM_OPT_SET_TUNE) +- sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch); ++ { ++ len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE, ++ "__ARM_ARCH_%s__", sel->arch); ++ gcc_assert (len < ARM_ARCH_NAME_SIZE); ++ } + + /* Determine the processor core for which we should + tune code-generation. */ +@@ -1040,14 +1294,17 @@ arm_override_options (void) + if (i == ARM_OPT_SET_ARCH) + target_arch_cpu = sel->core; + ++ if (i == ARM_OPT_SET_CPU) ++ selected_cpu = (enum processor_type) (sel - ptr->processors); ++ + if (i != ARM_OPT_SET_TUNE) + { + /* If we have been given an architecture and a processor + make sure that they are compatible. We only generate + a warning though, and we prefer the CPU over the + architecture. */ +- if (insn_flags != 0 && (insn_flags ^ sel->flags)) +- warning (0, "switch -mcpu=%s conflicts with -march= switch", ++ if (insn_flags != 0 && ((insn_flags ^ sel->flags) & ~FL_COMPAT)) ++ warning (0, "switch -mcpu=%s conflicts with -march= switch, assuming CPU feature set", + ptr->string); + + insn_flags = sel->flags; +@@ -1070,21 +1327,20 @@ arm_override_options (void) + { + const struct processors * sel; + unsigned int sought; +- enum processor_type cpu; + +- cpu = TARGET_CPU_DEFAULT; +- if (cpu == arm_none) ++ selected_cpu = TARGET_CPU_DEFAULT; ++ if (selected_cpu == arm_none) + { + #ifdef SUBTARGET_CPU_DEFAULT + /* Use the subtarget default CPU if none was specified by + configure. */ +- cpu = SUBTARGET_CPU_DEFAULT; ++ selected_cpu = SUBTARGET_CPU_DEFAULT; + #endif + /* Default to ARM6. */ +- if (cpu == arm_none) +- cpu = arm6; ++ if (selected_cpu == arm_none) ++ selected_cpu = arm6; + } +- sel = &all_cores[cpu]; ++ sel = &all_cores[selected_cpu]; + + insn_flags = sel->flags; + +@@ -1148,7 +1404,11 @@ arm_override_options (void) + + insn_flags = sel->flags; + } +- sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch); ++ ++ len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE, ++ "__ARM_ARCH_%s__", sel->arch); ++ gcc_assert (len < ARM_ARCH_NAME_SIZE); ++ + arm_default_cpu = (enum processor_type) (sel - all_cores); + if (arm_tune == arm_none) + arm_tune = arm_default_cpu; +@@ -1158,18 +1418,59 @@ arm_override_options (void) + chosen. */ + gcc_assert (arm_tune != arm_none); + ++ if (arm_tune == cortexa8 && optimize >= 3) ++ { ++ /* These alignments were experimentally determined to improve SPECint ++ performance on SPECCPU 2000. */ ++ if (align_functions <= 0) ++ align_functions = 16; ++ if (align_jumps <= 0) ++ align_jumps = 16; ++ } ++ + tune_flags = all_cores[(int)arm_tune].flags; +- if (optimize_size) +- targetm.rtx_costs = arm_size_rtx_costs; ++ ++ if (target_fp16_format_name) ++ { ++ for (i = 0; i < ARRAY_SIZE (all_fp16_formats); i++) ++ { ++ if (streq (all_fp16_formats[i].name, target_fp16_format_name)) ++ { ++ arm_fp16_format = all_fp16_formats[i].fp16_format_type; ++ break; ++ } ++ } ++ if (i == ARRAY_SIZE (all_fp16_formats)) ++ error ("invalid __fp16 format option: -mfp16-format=%s", ++ target_fp16_format_name); ++ } ++ else ++ arm_fp16_format = ARM_FP16_FORMAT_NONE; ++ ++ if (target_abi_name) ++ { ++ for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++) ++ { ++ if (streq (arm_all_abis[i].name, target_abi_name)) ++ { ++ arm_abi = arm_all_abis[i].abi_type; ++ break; ++ } ++ } ++ if (i == ARRAY_SIZE (arm_all_abis)) ++ error ("invalid ABI option: -mabi=%s", target_abi_name); ++ } + else +- targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs; ++ arm_abi = ARM_DEFAULT_ABI; + + /* Make sure that the processor choice does not conflict with any of the + other command line choices. */ + if (TARGET_ARM && !(insn_flags & FL_NOTM)) + error ("target CPU does not support ARM mode"); + +- if (TARGET_INTERWORK && !(insn_flags & FL_THUMB)) ++ /* BPABI targets use linker tricks to allow interworking on cores ++ without thumb support. */ ++ if (TARGET_INTERWORK && !((insn_flags & FL_THUMB) || TARGET_BPABI)) + { + warning (0, "target CPU does not support interworking" ); + target_flags &= ~MASK_INTERWORK; +@@ -1245,10 +1546,45 @@ arm_override_options (void) + arm_ld_sched = (tune_flags & FL_LDSCHED) != 0; + arm_tune_strongarm = (tune_flags & FL_STRONG) != 0; + thumb_code = (TARGET_ARM == 0); ++ janus2_code = (TARGET_FIX_JANUS != 0); ++ if (janus2_code && TARGET_THUMB2) ++ error ("janus2 fix is not applicable when targeting a thumb2 core"); + arm_tune_wbuf = (tune_flags & FL_WBUF) != 0; + arm_tune_xscale = (tune_flags & FL_XSCALE) != 0; ++ arm_tune_marvell_f = (tune_flags & FL_MARVELL_F) != 0; ++ arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0; + arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0; +- arm_arch_hwdiv = (insn_flags & FL_DIV) != 0; ++ ++ /* Hardware integer division is supported by some variants of the ARM ++ architecture in Thumb-2 mode. In addition some (but not all) Marvell ++ CPUs support their own hardware integer division instructions. ++ The assembler will pick the correct encoding. */ ++ if (TARGET_MARVELL_DIV && (insn_flags & FL_MARVELL_F) == 0) ++ error ("-mmarvell-div is only supported when targeting a Marvell core"); ++ ++ arm_arch_hwdiv = (TARGET_ARM && TARGET_MARVELL_DIV) ++ || (TARGET_THUMB2 && (insn_flags & FL_DIV) != 0); ++ ++ /* If we are not using the default (ARM mode) section anchor offset ++ ranges, then set the correct ranges now. */ ++ if (TARGET_THUMB1) ++ { ++ /* Thumb-1 LDR instructions cannot have negative offsets. ++ Permissible positive offset ranges are 5-bit (for byte loads), ++ 6-bit (for halfword loads), or 7-bit (for word loads). ++ Empirical results suggest a 7-bit anchor range gives the best ++ overall code size. */ ++ targetm.min_anchor_offset = 0; ++ targetm.max_anchor_offset = 127; ++ } ++ else if (TARGET_THUMB2) ++ { ++ /* The minimum is set such that the total size of the block ++ for a particular anchor is 248 + 1 + 4095 bytes, which is ++ divisible by eight, ensuring natural spacing of anchors. */ ++ targetm.min_anchor_offset = -248; ++ targetm.max_anchor_offset = 4095; ++ } + + /* V5 code we generate is completely interworking capable, so we turn off + TARGET_INTERWORK here to avoid many tests later on. */ +@@ -1261,22 +1597,6 @@ arm_override_options (void) + if (arm_arch5) + target_flags &= ~MASK_INTERWORK; + +- if (target_abi_name) +- { +- for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++) +- { +- if (streq (arm_all_abis[i].name, target_abi_name)) +- { +- arm_abi = arm_all_abis[i].abi_type; +- break; +- } +- } +- if (i == ARRAY_SIZE (arm_all_abis)) +- error ("invalid ABI option: -mabi=%s", target_abi_name); +- } +- else +- arm_abi = ARM_DEFAULT_ABI; +- + if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN) + error ("iwmmxt requires an AAPCS compatible ABI for proper operation"); + +@@ -1354,9 +1674,6 @@ arm_override_options (void) + else + arm_float_abi = TARGET_DEFAULT_FLOAT_ABI; + +- if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP) +- sorry ("-mfloat-abi=hard and VFP"); +- + /* FPA and iWMMXt are incompatible because the insn encodings overlap. + VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon + will ever exist. GCC makes no attempt to support this combination. */ +@@ -1367,10 +1684,36 @@ arm_override_options (void) + if (TARGET_THUMB2 && TARGET_IWMMXT) + sorry ("Thumb-2 iWMMXt"); + ++ /* __fp16 support currently assumes the core has ldrh. */ ++ if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE) ++ sorry ("__fp16 and no ldrh"); ++ + /* If soft-float is specified then don't use FPU. */ + if (TARGET_SOFT_FLOAT) + arm_fpu_arch = FPUTYPE_NONE; + ++ if (TARGET_AAPCS_BASED) ++ { ++ if (arm_abi == ARM_ABI_IWMMXT) ++ arm_pcs_default = ARM_PCS_AAPCS_IWMMXT; ++ else if (arm_float_abi == ARM_FLOAT_ABI_HARD ++ && TARGET_HARD_FLOAT ++ && TARGET_VFP) ++ arm_pcs_default = ARM_PCS_AAPCS_VFP; ++ else ++ arm_pcs_default = ARM_PCS_AAPCS; ++ } ++ else ++ { ++ if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP) ++ sorry ("-mfloat-abi=hard and VFP"); ++ ++ if (arm_abi == ARM_ABI_APCS) ++ arm_pcs_default = ARM_PCS_APCS; ++ else ++ arm_pcs_default = ARM_PCS_ATPCS; ++ } ++ + /* For arm2/3 there is no need to do any scheduling if there is only + a floating point emulator, or we are doing software floating-point. */ + if ((TARGET_SOFT_FLOAT +@@ -1456,6 +1799,15 @@ arm_override_options (void) + arm_pic_register = pic_register; + } + ++ /* Enable -mfix-cortex-m3-ldrd by default for Cortex-M3 cores. */ ++ if (fix_cm3_ldrd == 2) ++ { ++ if (selected_cpu == cortexm3) ++ fix_cm3_ldrd = 1; ++ else ++ fix_cm3_ldrd = 0; ++ } ++ + /* ??? We might want scheduling for thumb2. */ + if (TARGET_THUMB && flag_schedule_insns) + { +@@ -1493,6 +1845,13 @@ arm_override_options (void) + + /* Register global variables with the garbage collector. */ + arm_add_gc_roots (); ++ ++ if (low_irq_latency && TARGET_THUMB) ++ { ++ warning (0, ++ "-low-irq-latency has no effect when compiling for the Thumb"); ++ low_irq_latency = 0; ++ } + } + + static void +@@ -1614,6 +1973,14 @@ arm_current_func_type (void) + + return cfun->machine->func_type; + } ++ ++bool ++arm_allocate_stack_slots_for_args (void) ++{ ++ /* Naked functions should not allocate stack slots for arguments. */ ++ return !IS_NAKED (arm_current_func_type ()); ++} ++ + + /* Return 1 if it is possible to return using a single instruction. + If SIBLING is non-null, this is a test for a return before a sibling +@@ -1656,10 +2023,11 @@ use_return_insn (int iscond, rtx sibling + || current_function_calls_alloca + /* Or if there is a stack adjustment. However, if the stack pointer + is saved on the stack, we can use a pre-incrementing stack load. */ +- || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4))) ++ || !(stack_adjust == 0 || (TARGET_APCS_FRAME && frame_pointer_needed ++ && stack_adjust == 4))) + return 0; + +- saved_int_regs = arm_compute_save_reg_mask (); ++ saved_int_regs = offsets->saved_regs_mask; + + /* Unfortunately, the insn + +@@ -1812,6 +2180,24 @@ const_ok_for_op (HOST_WIDE_INT i, enum r + switch (code) + { + case PLUS: ++ case COMPARE: ++ case EQ: ++ case NE: ++ case GT: ++ case LE: ++ case LT: ++ case GE: ++ case GEU: ++ case LTU: ++ case GTU: ++ case LEU: ++ case UNORDERED: ++ case ORDERED: ++ case UNEQ: ++ case UNGE: ++ case UNLT: ++ case UNGT: ++ case UNLE: + return const_ok_for_arm (ARM_SIGN_EXTEND (-i)); + + case MINUS: /* Should only occur with (MINUS I reg) => rsb */ +@@ -1872,14 +2258,22 @@ arm_split_constant (enum rtx_code code, + { + /* Currently SET is the only monadic value for CODE, all + the rest are diadic. */ +- emit_set_insn (target, GEN_INT (val)); ++ if (TARGET_USE_MOVT) ++ arm_emit_movpair (target, GEN_INT (val)); ++ else ++ emit_set_insn (target, GEN_INT (val)); ++ + return 1; + } + else + { + rtx temp = subtargets ? gen_reg_rtx (mode) : target; + +- emit_set_insn (temp, GEN_INT (val)); ++ if (TARGET_USE_MOVT) ++ arm_emit_movpair (temp, GEN_INT (val)); ++ else ++ emit_set_insn (temp, GEN_INT (val)); ++ + /* For MINUS, the value is subtracted from, since we never + have subtraction of a constant. */ + if (code == MINUS) +@@ -2678,14 +3072,19 @@ arm_canonicalize_comparison (enum rtx_co + + /* Define how to find the value returned by a function. */ + +-rtx +-arm_function_value(const_tree type, const_tree func ATTRIBUTE_UNUSED) ++static rtx ++arm_function_value(const_tree type, const_tree func, ++ bool outgoing ATTRIBUTE_UNUSED) + { + enum machine_mode mode; + int unsignedp ATTRIBUTE_UNUSED; + rtx r ATTRIBUTE_UNUSED; + + mode = TYPE_MODE (type); ++ ++ if (TARGET_AAPCS_BASED) ++ return aapcs_allocate_return_reg (mode, type, func); ++ + /* Promote integer types. */ + if (INTEGRAL_TYPE_P (type)) + PROMOTE_FUNCTION_MODE (mode, unsignedp, type); +@@ -2702,7 +3101,36 @@ arm_function_value(const_tree type, cons + } + } + +- return LIBCALL_VALUE(mode); ++ return LIBCALL_VALUE (mode); ++} ++ ++rtx ++arm_libcall_value (enum machine_mode mode, rtx libcall) ++{ ++ if (TARGET_AAPCS_BASED && arm_pcs_default != ARM_PCS_AAPCS ++ && GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ /* The following libcalls return their result in integer registers, ++ even though they return a floating point value. */ ++ if (rtx_equal_p (libcall, ++ convert_optab_libfunc (sfloat_optab, mode, SImode)) ++ || rtx_equal_p (libcall, ++ convert_optab_libfunc (ufloat_optab, mode, SImode)) ++ || rtx_equal_p (libcall, ++ convert_optab_libfunc (sfloat_optab, mode, DImode)) ++ || rtx_equal_p (libcall, ++ convert_optab_libfunc (ufloat_optab, mode, DImode)) ++ || rtx_equal_p (libcall, ++ convert_optab_libfunc (trunc_optab, HFmode, SFmode)) ++ || rtx_equal_p (libcall, ++ convert_optab_libfunc (sext_optab, SFmode, HFmode))) ++ return gen_rtx_REG (mode, ARG_REGISTER(1)); ++ ++ /* XXX There are other libcalls that return in integer registers, ++ but I think they are all handled by hard insns. */ ++ } ++ ++ return LIBCALL_VALUE (mode); + } + + /* Determine the amount of memory needed to store the possible return +@@ -2712,10 +3140,12 @@ arm_apply_result_size (void) + { + int size = 16; + +- if (TARGET_ARM) ++ if (TARGET_32BIT) + { + if (TARGET_HARD_FLOAT_ABI) + { ++ if (TARGET_VFP) ++ size += 32; + if (TARGET_FPA) + size += 12; + if (TARGET_MAVERICK) +@@ -2728,27 +3158,56 @@ arm_apply_result_size (void) + return size; + } + +-/* Decide whether a type should be returned in memory (true) +- or in a register (false). This is called by the macro +- RETURN_IN_MEMORY. */ +-int +-arm_return_in_memory (const_tree type) ++/* Decide whether TYPE should be returned in memory (true) ++ or in a register (false). FNTYPE is the type of the function making ++ the call. */ ++static bool ++arm_return_in_memory (const_tree type, const_tree fntype) + { + HOST_WIDE_INT size; + +- size = int_size_in_bytes (type); ++ size = int_size_in_bytes (type); /* Negative if not fixed size. */ ++ ++ if (TARGET_AAPCS_BASED) ++ { ++ /* Simple, non-aggregate types (ie not including vectors and ++ complex) are always returned in a register (or registers). ++ We don't care about which register here, so we can short-cut ++ some of the detail. */ ++ if (!AGGREGATE_TYPE_P (type) ++ && TREE_CODE (type) != VECTOR_TYPE ++ && TREE_CODE (type) != COMPLEX_TYPE) ++ return false; ++ ++ /* Any return value that is no larger than one word can be ++ returned in r0. */ ++ if (((unsigned HOST_WIDE_INT) size) <= UNITS_PER_WORD) ++ return false; ++ ++ /* Check any available co-processors to see if they accept the ++ type as a register candidate (VFP, for example, can return ++ some aggregates in consecutive registers). These aren't ++ available if the call is variadic. */ ++ if (aapcs_select_return_coproc (type, fntype) >= 0) ++ return false; ++ ++ /* Vector values should be returned using ARM registers, not ++ memory (unless they're over 16 bytes, which will break since ++ we only have four call-clobbered registers to play with). */ ++ if (TREE_CODE (type) == VECTOR_TYPE) ++ return (size < 0 || size > (4 * UNITS_PER_WORD)); ++ ++ /* The rest go in memory. */ ++ return true; ++ } + +- /* Vector values should be returned using ARM registers, not memory (unless +- they're over 16 bytes, which will break since we only have four +- call-clobbered registers to play with). */ + if (TREE_CODE (type) == VECTOR_TYPE) + return (size < 0 || size > (4 * UNITS_PER_WORD)); + + if (!AGGREGATE_TYPE_P (type) && +- !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE)) +- /* All simple types are returned in registers. +- For AAPCS, complex types are treated the same as aggregates. */ +- return 0; ++ (TREE_CODE (type) != VECTOR_TYPE)) ++ /* All simple types are returned in registers. */ ++ return false; + + if (arm_abi != ARM_ABI_APCS) + { +@@ -2765,7 +3224,7 @@ arm_return_in_memory (const_tree type) + the aggregate is either huge or of variable size, and in either case + we will want to return it via memory and not in a register. */ + if (size < 0 || size > UNITS_PER_WORD) +- return 1; ++ return true; + + if (TREE_CODE (type) == RECORD_TYPE) + { +@@ -2785,18 +3244,18 @@ arm_return_in_memory (const_tree type) + continue; + + if (field == NULL) +- return 0; /* An empty structure. Allowed by an extension to ANSI C. */ ++ return false; /* An empty structure. Allowed by an extension to ANSI C. */ + + /* Check that the first field is valid for returning in a register. */ + + /* ... Floats are not allowed */ + if (FLOAT_TYPE_P (TREE_TYPE (field))) +- return 1; ++ return true; + + /* ... Aggregates that are not themselves valid for returning in + a register are not allowed. */ +- if (RETURN_IN_MEMORY (TREE_TYPE (field))) +- return 1; ++ if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE)) ++ return true; + + /* Now check the remaining fields, if any. Only bitfields are allowed, + since they are not addressable. */ +@@ -2808,10 +3267,10 @@ arm_return_in_memory (const_tree type) + continue; + + if (!DECL_BIT_FIELD_TYPE (field)) +- return 1; ++ return true; + } + +- return 0; ++ return false; + } + + if (TREE_CODE (type) == UNION_TYPE) +@@ -2828,18 +3287,18 @@ arm_return_in_memory (const_tree type) + continue; + + if (FLOAT_TYPE_P (TREE_TYPE (field))) +- return 1; ++ return true; + +- if (RETURN_IN_MEMORY (TREE_TYPE (field))) +- return 1; ++ if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE)) ++ return true; + } + +- return 0; ++ return false; + } + #endif /* not ARM_WINCE */ + + /* Return all other types in memory. */ +- return 1; ++ return true; + } + + /* Indicate whether or not words of a double are in big-endian order. */ +@@ -2864,60 +3323,811 @@ arm_float_words_big_endian (void) + return 1; + } + +-/* Initialize a variable CUM of type CUMULATIVE_ARGS +- for a call to a function whose data type is FNTYPE. +- For a library call, FNTYPE is NULL. */ +-void +-arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype, +- rtx libname ATTRIBUTE_UNUSED, +- tree fndecl ATTRIBUTE_UNUSED) ++const struct pcs_attribute_arg + { +- /* On the ARM, the offset starts at 0. */ +- pcum->nregs = 0; +- pcum->iwmmxt_nregs = 0; +- pcum->can_split = true; +- +- /* Varargs vectors are treated the same as long long. +- named_count avoids having to change the way arm handles 'named' */ +- pcum->named_count = 0; +- pcum->nargs = 0; ++ const char *arg; ++ enum arm_pcs value; ++} pcs_attribute_args[] = ++ { ++ {"aapcs", ARM_PCS_AAPCS}, ++ {"aapcs-vfp", ARM_PCS_AAPCS_VFP}, ++ {"aapcs-iwmmxt", ARM_PCS_AAPCS_IWMMXT}, ++ {"atpcs", ARM_PCS_ATPCS}, ++ {"apcs", ARM_PCS_APCS}, ++ {NULL, ARM_PCS_UNKNOWN} ++ }; + +- if (TARGET_REALLY_IWMMXT && fntype) +- { +- tree fn_arg; ++static enum arm_pcs ++arm_pcs_from_attribute (tree attr) ++{ ++ const struct pcs_attribute_arg *ptr; ++ const char *arg; + +- for (fn_arg = TYPE_ARG_TYPES (fntype); +- fn_arg; +- fn_arg = TREE_CHAIN (fn_arg)) +- pcum->named_count += 1; ++ /* Get the value of the argument. */ ++ if (TREE_VALUE (attr) == NULL_TREE ++ || TREE_CODE (TREE_VALUE (attr)) != STRING_CST) ++ return ARM_PCS_UNKNOWN; + +- if (! pcum->named_count) +- pcum->named_count = INT_MAX; +- } +-} ++ arg = TREE_STRING_POINTER (TREE_VALUE (attr)); + ++ /* Check it against the list of known arguments. */ ++ for (ptr = pcs_attribute_args; ptr->arg != NULL; ptr++) ++ if (streq (arg, ptr->arg)) ++ return ptr->value; + +-/* Return true if mode/type need doubleword alignment. */ +-bool +-arm_needs_doubleword_align (enum machine_mode mode, tree type) +-{ +- return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY +- || (type && TYPE_ALIGN (type) > PARM_BOUNDARY)); ++ /* An unrecognized interrupt type. */ ++ return ARM_PCS_UNKNOWN; + } + ++/* Get the PCS variant to use for this call. TYPE is the function's type ++ specification, DECL is the specific declartion. DECL may be null if ++ the call could be indirect or if this is a library call. */ ++static enum arm_pcs ++arm_get_pcs_model (const_tree type, const_tree decl) ++{ ++ bool user_convention = false; ++ enum arm_pcs user_pcs = arm_pcs_default; ++ tree attr; + +-/* Determine where to put an argument to a function. +- Value is zero to push the argument on the stack, +- or a hard register in which to store the argument. ++ gcc_assert (type); + +- MODE is the argument's machine mode. +- TYPE is the data type of the argument (as a tree). +- This is null for libcalls where that information may +- not be available. +- CUM is a variable of type CUMULATIVE_ARGS which gives info about +- the preceding args and about the function being called. +- NAMED is nonzero if this argument is a named parameter +- (otherwise it is an extra parameter matching an ellipsis). */ ++ attr = lookup_attribute ("pcs", TYPE_ATTRIBUTES (type)); ++ if (attr) ++ { ++ user_pcs = arm_pcs_from_attribute (TREE_VALUE (attr)); ++ user_convention = true; ++ } ++ ++ if (TARGET_AAPCS_BASED) ++ { ++ /* Detect varargs functions. These always use the base rules ++ (no argument is ever a candidate for a co-processor ++ register). */ ++ bool base_rules = (TYPE_ARG_TYPES (type) != 0 ++ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (type))) ++ != void_type_node)); ++ ++ if (user_convention) ++ { ++ if (user_pcs > ARM_PCS_AAPCS_LOCAL) ++ sorry ("Non-AAPCS derived PCS variant"); ++ else if (base_rules && user_pcs != ARM_PCS_AAPCS) ++ error ("Variadic functions must use the base AAPCS variant"); ++ } ++ ++ if (base_rules) ++ return ARM_PCS_AAPCS; ++ else if (user_convention) ++ return user_pcs; ++ else if (decl && flag_unit_at_a_time) ++ { ++ /* Local functions never leak outside this compilation unit, ++ so we are free to use whatever conventions are ++ appropriate. */ ++ /* FIXME: remove CONST_CAST_TREE when cgraph is constified. */ ++ struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl)); ++ if (i && i->local) ++ return ARM_PCS_AAPCS_LOCAL; ++ } ++ } ++ else if (user_convention && user_pcs != arm_pcs_default) ++ sorry ("PCS variant"); ++ ++ /* For everything else we use the target's default. */ ++ return arm_pcs_default; ++} ++ ++ ++static void ++aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED, ++ const_tree fntype ATTRIBUTE_UNUSED, ++ rtx libcall ATTRIBUTE_UNUSED, ++ const_tree fndecl ATTRIBUTE_UNUSED) ++{ ++ /* Record the unallocated VFP registers. */ ++ pcum->aapcs_vfp_regs_free = (1 << NUM_VFP_ARG_REGS) - 1; ++ pcum->aapcs_vfp_reg_alloc = 0; ++} ++ ++/* Walk down the type tree of TYPE counting consecutive base elements. ++ If *MODEP is VOIDmode, then set it to the first valid floating point ++ type. If a non-floating point type is found, or if a floating point ++ type that doesn't match a non-VOIDmode *MODEP is found, then return -1, ++ otherwise return the count in the sub-tree. */ ++static int ++aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) ++{ ++ enum machine_mode mode; ++ HOST_WIDE_INT size; ++ ++ switch (TREE_CODE (type)) ++ { ++ case REAL_TYPE: ++ mode = TYPE_MODE (type); ++ if (mode != DFmode && mode != SFmode) ++ return -1; ++ ++ if (*modep == VOIDmode) ++ *modep = mode; ++ ++ if (*modep == mode) ++ return 1; ++ ++ break; ++ ++ case COMPLEX_TYPE: ++ mode = TYPE_MODE (TREE_TYPE (type)); ++ if (mode != DFmode && mode != SFmode) ++ return -1; ++ ++ if (*modep == VOIDmode) ++ *modep = mode; ++ ++ if (*modep == mode) ++ return 2; ++ ++ break; ++ ++ case VECTOR_TYPE: ++ /* Use V2SImode and V4SImode as representatives of all 64-bit ++ and 128-bit vector types, whether or not those modes are ++ supported with the present options. */ ++ size = int_size_in_bytes (type); ++ switch (size) ++ { ++ case 8: ++ mode = V2SImode; ++ break; ++ case 16: ++ mode = V4SImode; ++ break; ++ default: ++ return -1; ++ } ++ ++ if (*modep == VOIDmode) ++ *modep = mode; ++ ++ /* Vector modes are considered to be opaque: two vectors are ++ equivalent for the purposes of being homogeneous aggregates ++ if they are the same size. */ ++ if (*modep == mode) ++ return 1; ++ ++ break; ++ ++ case ARRAY_TYPE: ++ { ++ int count; ++ tree index = TYPE_DOMAIN (type); ++ ++ /* Can't handle incomplete types. */ ++ if (!COMPLETE_TYPE_P(type)) ++ return -1; ++ ++ count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep); ++ if (count == -1 ++ || !index ++ || !TYPE_MAX_VALUE (index) ++ || !host_integerp (TYPE_MAX_VALUE (index), 1) ++ || !TYPE_MIN_VALUE (index) ++ || !host_integerp (TYPE_MIN_VALUE (index), 1) ++ || count < 0) ++ return -1; ++ ++ count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) ++ - tree_low_cst (TYPE_MIN_VALUE (index), 1)); ++ ++ /* There must be no padding. */ ++ if (!host_integerp (TYPE_SIZE (type), 1) ++ || (tree_low_cst (TYPE_SIZE (type), 1) ++ != count * GET_MODE_BITSIZE (*modep))) ++ return -1; ++ ++ return count; ++ } ++ ++ case RECORD_TYPE: ++ { ++ int count = 0; ++ int sub_count; ++ tree field; ++ ++ /* Can't handle incomplete types. */ ++ if (!COMPLETE_TYPE_P(type)) ++ return -1; ++ ++ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) ++ { ++ if (TREE_CODE (field) != FIELD_DECL) ++ continue; ++ ++ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); ++ if (sub_count < 0) ++ return -1; ++ count += sub_count; ++ } ++ ++ /* There must be no padding. */ ++ if (!host_integerp (TYPE_SIZE (type), 1) ++ || (tree_low_cst (TYPE_SIZE (type), 1) ++ != count * GET_MODE_BITSIZE (*modep))) ++ return -1; ++ ++ return count; ++ } ++ ++ case UNION_TYPE: ++ case QUAL_UNION_TYPE: ++ { ++ /* These aren't very interesting except in a degenerate case. */ ++ int count = 0; ++ int sub_count; ++ tree field; ++ ++ /* Can't handle incomplete types. */ ++ if (!COMPLETE_TYPE_P(type)) ++ return -1; ++ ++ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) ++ { ++ if (TREE_CODE (field) != FIELD_DECL) ++ continue; ++ ++ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); ++ if (sub_count < 0) ++ return -1; ++ count = count > sub_count ? count : sub_count; ++ } ++ ++ /* There must be no padding. */ ++ if (!host_integerp (TYPE_SIZE (type), 1) ++ || (tree_low_cst (TYPE_SIZE (type), 1) ++ != count * GET_MODE_BITSIZE (*modep))) ++ return -1; ++ ++ return count; ++ } ++ ++ default: ++ break; ++ } ++ ++ return -1; ++} ++ ++static bool ++aapcs_vfp_is_call_or_return_candidate (enum machine_mode mode, const_tree type, ++ int *base_mode, ++ int *count) ++{ ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT ++ || GET_MODE_CLASS (mode) == MODE_VECTOR_INT ++ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) ++ { ++ *count = 1; ++ *base_mode = mode; ++ return true; ++ } ++ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) ++ { ++ *count = 2; ++ *base_mode = (mode == DCmode ? DFmode : SFmode); ++ return true; ++ } ++ else if (type && (mode == BLKmode || TREE_CODE (type) == VECTOR_TYPE)) ++ { ++ enum machine_mode aggregate_mode = VOIDmode; ++ int ag_count = aapcs_vfp_sub_candidate (type, &aggregate_mode); ++ ++ if (ag_count > 0 && ag_count <= 4) ++ { ++ *count = ag_count; ++ *base_mode = aggregate_mode; ++ return true; ++ } ++ } ++ return false; ++} ++ ++static bool ++aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant, ++ enum machine_mode mode, const_tree type) ++{ ++ int count ATTRIBUTE_UNUSED; ++ int ag_mode ATTRIBUTE_UNUSED; ++ ++ if (!(pcs_variant == ARM_PCS_AAPCS_VFP ++ || (pcs_variant == ARM_PCS_AAPCS_LOCAL ++ && TARGET_32BIT && TARGET_VFP && TARGET_HARD_FLOAT))) ++ return false; ++ return aapcs_vfp_is_call_or_return_candidate (mode, type, &ag_mode, &count); ++} ++ ++static bool ++aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ++ const_tree type) ++{ ++ if (!(pcum->pcs_variant == ARM_PCS_AAPCS_VFP ++ || (pcum->pcs_variant == ARM_PCS_AAPCS_LOCAL ++ && TARGET_32BIT && TARGET_VFP && TARGET_HARD_FLOAT))) ++ return false; ++ return aapcs_vfp_is_call_or_return_candidate (mode, type, ++ &pcum->aapcs_vfp_rmode, ++ &pcum->aapcs_vfp_rcount); ++} ++ ++static bool ++aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ++ const_tree type ATTRIBUTE_UNUSED) ++{ ++ int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode); ++ unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1; ++ int regno; ++ ++ for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift) ++ if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask) ++ { ++ pcum->aapcs_vfp_reg_alloc = mask << regno; ++ if (mode == BLKmode || (mode == TImode && !TARGET_NEON)) ++ { ++ int i; ++ int rcount = pcum->aapcs_vfp_rcount; ++ int rshift = shift; ++ enum machine_mode rmode = pcum->aapcs_vfp_rmode; ++ rtx par; ++ if (!TARGET_NEON) ++ { ++ /* Avoid using unsupported vector modes. */ ++ if (rmode == V2SImode) ++ rmode = DImode; ++ else if (rmode == V4SImode) ++ { ++ rmode = DImode; ++ rcount *= 2; ++ rshift /= 2; ++ } ++ } ++ par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount)); ++ for (i = 0; i < rcount; i++) ++ { ++ rtx tmp = gen_rtx_REG (rmode, ++ FIRST_VFP_REGNUM + regno + i * rshift); ++ tmp = gen_rtx_EXPR_LIST ++ (VOIDmode, tmp, ++ GEN_INT (i * GET_MODE_SIZE (rmode))); ++ XVECEXP (par, 0, i) = tmp; ++ } ++ ++ pcum->aapcs_reg = par; ++ } ++ else ++ pcum->aapcs_reg = gen_rtx_REG (mode, FIRST_VFP_REGNUM + regno); ++ return true; ++ } ++ return false; ++} ++ ++static rtx ++aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED, ++ enum machine_mode mode, ++ const_tree type ATTRIBUTE_UNUSED) ++{ ++ if (!(pcs_variant == ARM_PCS_AAPCS_VFP ++ || (pcs_variant == ARM_PCS_AAPCS_LOCAL ++ && TARGET_32BIT && TARGET_VFP && TARGET_HARD_FLOAT))) ++ return false; ++ if (mode == BLKmode || (mode == TImode && !TARGET_NEON)) ++ { ++ int count; ++ int ag_mode; ++ int i; ++ rtx par; ++ int shift; ++ ++ aapcs_vfp_is_call_or_return_candidate (mode, type, &ag_mode, &count); ++ ++ if (!TARGET_NEON) ++ { ++ if (ag_mode == V2SImode) ++ ag_mode = DImode; ++ else if (ag_mode == V4SImode) ++ { ++ ag_mode = DImode; ++ count *= 2; ++ } ++ } ++ shift = GET_MODE_SIZE(ag_mode) / GET_MODE_SIZE(SFmode); ++ par = gen_rtx_PARALLEL (mode, rtvec_alloc (count)); ++ for (i = 0; i < count; i++) ++ { ++ rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift); ++ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, ++ GEN_INT (i * GET_MODE_SIZE (ag_mode))); ++ XVECEXP (par, 0, i) = tmp; ++ } ++ ++ return par; ++ } ++ ++ return gen_rtx_REG (mode, FIRST_VFP_REGNUM); ++} ++ ++static void ++aapcs_vfp_advance (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED, ++ enum machine_mode mode ATTRIBUTE_UNUSED, ++ const_tree type ATTRIBUTE_UNUSED) ++{ ++ pcum->aapcs_vfp_regs_free &= ~pcum->aapcs_vfp_reg_alloc; ++ pcum->aapcs_vfp_reg_alloc = 0; ++ return; ++} ++ ++#define AAPCS_CP(X) \ ++ { \ ++ aapcs_ ## X ## _cum_init, \ ++ aapcs_ ## X ## _is_call_candidate, \ ++ aapcs_ ## X ## _allocate, \ ++ aapcs_ ## X ## _is_return_candidate, \ ++ aapcs_ ## X ## _allocate_return_reg, \ ++ aapcs_ ## X ## _advance \ ++ } ++ ++/* Table of co-processors that can be used to pass arguments in ++ registers. Idealy no arugment should be a candidate for more than ++ one co-processor table entry, but the table is processed in order ++ and stops after the first match. If that entry then fails to put ++ the argument into a co-processor register, the argument will go on ++ the stack. */ ++static struct ++{ ++ /* Initialize co-processor related state in CUMULATIVE_ARGS structure. */ ++ void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree); ++ ++ /* Return true if an argument of mode MODE (or type TYPE if MODE is ++ BLKmode) is a candidate for this co-processor's registers; this ++ function should ignore any position-dependent state in ++ CUMULATIVE_ARGS and only use call-type dependent information. */ ++ bool (*is_call_candidate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree); ++ ++ /* Return true if the argument does get a co-processor register; it ++ should set aapcs_reg to an RTX of the register allocated as is ++ required for a return from FUNCTION_ARG. */ ++ bool (*allocate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree); ++ ++ /* Return true if a result of mode MODE (or type TYPE if MODE is ++ BLKmode) is can be returned in this co-processor's registers. */ ++ bool (*is_return_candidate) (enum arm_pcs, enum machine_mode, const_tree); ++ ++ /* Allocate and return an RTX element to hold the return type of a ++ call, this routine must not fail and will only be called if ++ is_return_candidate returned true with the same parameters. */ ++ rtx (*allocate_return_reg) (enum arm_pcs, enum machine_mode, const_tree); ++ ++ /* Finish processing this argument and prepare to start processing ++ the next one. */ ++ void (*advance) (CUMULATIVE_ARGS *, enum machine_mode, const_tree); ++} aapcs_cp_arg_layout[ARM_NUM_COPROC_SLOTS] = ++ { ++ AAPCS_CP(vfp) ++ }; ++ ++#undef AAPCS_CP ++ ++static int ++aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ++ tree type) ++{ ++ int i; ++ ++ for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++) ++ if (aapcs_cp_arg_layout[i].is_call_candidate (pcum, mode, type)) ++ return i; ++ ++ return -1; ++} ++ ++static int ++aapcs_select_return_coproc (const_tree type, const_tree fntype) ++{ ++ /* We aren't passed a decl, so we can't check that a call is local. ++ However, it isn't clear that that would be a win anyway, since it ++ might limit some tail-calling opportunities. */ ++ enum arm_pcs pcs_variant; ++ ++ if (fntype) ++ { ++ const_tree fndecl = NULL_TREE; ++ ++ if (TREE_CODE (fntype) == FUNCTION_DECL) ++ { ++ fndecl = fntype; ++ fntype = TREE_TYPE (fntype); ++ } ++ ++ pcs_variant = arm_get_pcs_model (fntype, fndecl); ++ } ++ else ++ pcs_variant = arm_pcs_default; ++ ++ if (pcs_variant != ARM_PCS_AAPCS) ++ { ++ int i; ++ ++ for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++) ++ if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, ++ TYPE_MODE (type), ++ type)) ++ return i; ++ } ++ return -1; ++} ++ ++static rtx ++aapcs_allocate_return_reg (enum machine_mode mode, const_tree type, ++ const_tree fntype) ++{ ++ /* We aren't passed a decl, so we can't check that a call is local. ++ However, it isn't clear that that would be a win anyway, since it ++ might limit some tail-calling opportunities. */ ++ enum arm_pcs pcs_variant; ++ ++ if (fntype) ++ { ++ const_tree fndecl = NULL_TREE; ++ ++ if (TREE_CODE (fntype) == FUNCTION_DECL) ++ { ++ fndecl = fntype; ++ fntype = TREE_TYPE (fntype); ++ } ++ ++ pcs_variant = arm_get_pcs_model (fntype, fndecl); ++ } ++ else ++ pcs_variant = arm_pcs_default; ++ ++ /* Promote integer types. */ ++ if (type && INTEGRAL_TYPE_P (type)) ++ PROMOTE_FUNCTION_MODE (mode, unsignedp, type); ++ ++ if (pcs_variant != ARM_PCS_AAPCS) ++ { ++ int i; ++ ++ for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++) ++ if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, mode, ++ type)) ++ return aapcs_cp_arg_layout[i].allocate_return_reg (pcs_variant, ++ mode, type); ++ } ++ ++ /* Promotes small structs returned in a register to full-word size ++ for big-endian AAPCS. */ ++ if (type && arm_return_in_msb (type)) ++ { ++ HOST_WIDE_INT size = int_size_in_bytes (type); ++ if (size % UNITS_PER_WORD != 0) ++ { ++ size += UNITS_PER_WORD - size % UNITS_PER_WORD; ++ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); ++ } ++ } ++ ++ return gen_rtx_REG (mode, R0_REGNUM); ++} ++ ++rtx ++aapcs_libcall_value (enum machine_mode mode) ++{ ++ return aapcs_allocate_return_reg (mode, NULL_TREE, NULL_TREE); ++} ++ ++/* Lay out a function argument using the AAPCS rules. The rule ++ numbers referred to here are those in the AAPCS. */ ++static void ++aapcs_layout_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ++ tree type, int named) ++{ ++ int nregs, nregs2; ++ int ncrn; ++ ++ /* We only need to do this once per argument. */ ++ if (pcum->aapcs_arg_processed) ++ return; ++ ++ pcum->aapcs_arg_processed = true; ++ ++ /* Special case: if named is false then we are handling an incoming ++ anonymous argument which is on the stack. */ ++ if (!named) ++ return; ++ ++ /* Is this a potential co-processor register candidate? */ ++ if (pcum->pcs_variant != ARM_PCS_AAPCS) ++ { ++ pcum->aapcs_cprc_slot = aapcs_select_call_coproc (pcum, mode, type); ++ ++ /* We don't have to apply any of the rules from part B of the ++ preparation phase, these are handled elsewhere in the ++ compiler. */ ++ ++ if (pcum->aapcs_cprc_slot >= 0) ++ { ++ if (!pcum->aapcs_cprc_failed[pcum->aapcs_cprc_slot]) ++ { ++ /* C1.cp - Try to allocate the argument to co-processor ++ registers. */ ++ if (aapcs_cp_arg_layout[pcum->aapcs_cprc_slot].allocate (pcum, ++ mode, ++ type)) ++ return; ++ /* C2.cp - Put the argument on the stack and note that we ++ can't assign any more candidates in this slot. We also ++ need to note that we have allocated stack space, so that ++ we won't later try to split a non-cprc candidate between ++ core registers and the stack. */ ++ pcum->aapcs_cprc_failed[pcum->aapcs_cprc_slot] = true; ++ pcum->can_split = false; ++ return; ++ } ++ else ++ { ++ /* Subsequent cprc candidates after one that was not ++ allocated to coprocessor registers cannot go in core ++ registers either. */ ++ pcum->can_split = false; ++ return; ++ } ++ } ++ } ++ ++ /* C3 - For double-word aligned arguments, round the NCRN up to the ++ next even number. */ ++ ncrn = pcum->aapcs_ncrn; ++ if ((ncrn & 1) && arm_needs_doubleword_align (mode, type)) ++ ncrn++; ++ ++ nregs = ARM_NUM_REGS2(mode, type); ++ ++ /* Sigh, this test should really assert that nregs > 0, but a GCC ++ extension allows empty structs and then gives them empty size; it ++ then allows such a structure to be passed by value. For some of ++ the code below we have to pretend that such an argument has ++ non-zero size so that we 'locate' it correctly either in ++ registers or on the stack. */ ++ gcc_assert (nregs >= 0); ++ ++ nregs2 = nregs ? nregs : 1; ++ ++ /* C4 - Argument fits entirely in core registers. */ ++ if (ncrn + nregs2 <= NUM_ARG_REGS) ++ { ++ pcum->aapcs_reg = gen_rtx_REG (mode, ncrn); ++ pcum->aapcs_next_ncrn = ncrn + nregs; ++ return; ++ } ++ ++ /* C5 - Some core registers left and there are no arguments already ++ on the stack: split this argument between the remaining core ++ registers and the stack. */ ++ if (ncrn < NUM_ARG_REGS && pcum->can_split) ++ { ++ pcum->aapcs_reg = gen_rtx_REG (mode, ncrn); ++ pcum->aapcs_next_ncrn = NUM_ARG_REGS; ++ pcum->aapcs_partial = (NUM_ARG_REGS - ncrn) * UNITS_PER_WORD; ++ return; ++ } ++ ++ /* C6 - NCRN is set to 4. */ ++ pcum->aapcs_next_ncrn = NUM_ARG_REGS; ++ ++ /* C7,C8 - arugment goes on the stack. We have nothing to do here. */ ++ return; ++} ++ ++/* Initialize a variable CUM of type CUMULATIVE_ARGS ++ for a call to a function whose data type is FNTYPE. ++ For a library call, FNTYPE is NULL. */ ++void ++arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype, ++ rtx libname, ++ tree fndecl ATTRIBUTE_UNUSED) ++{ ++ /* Long call handling. */ ++ if (fntype) ++ pcum->pcs_variant = arm_get_pcs_model (fntype, fndecl); ++ else ++ pcum->pcs_variant = arm_pcs_default; ++ ++ if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) ++ { ++ /* XXX We should also detect some library calls here and handle ++ them using the base rules too; for example the floating point ++ support functions always work this way. */ ++ ++ if (rtx_equal_p (libname, ++ convert_optab_libfunc (sfix_optab, DImode, DFmode)) ++ || rtx_equal_p (libname, ++ convert_optab_libfunc (ufix_optab, DImode, DFmode)) ++ || rtx_equal_p (libname, ++ convert_optab_libfunc (sfix_optab, DImode, SFmode)) ++ || rtx_equal_p (libname, ++ convert_optab_libfunc (ufix_optab, DImode, SFmode)) ++ || rtx_equal_p (libname, ++ convert_optab_libfunc (trunc_optab, HFmode, SFmode)) ++ || rtx_equal_p (libname, ++ convert_optab_libfunc (sext_optab, SFmode, HFmode))) ++ pcum->pcs_variant = ARM_PCS_AAPCS; ++ ++ pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0; ++ pcum->aapcs_reg = NULL_RTX; ++ pcum->aapcs_partial = 0; ++ pcum->aapcs_arg_processed = false; ++ pcum->aapcs_cprc_slot = -1; ++ pcum->can_split = true; ++ ++ if (pcum->pcs_variant != ARM_PCS_AAPCS) ++ { ++ int i; ++ ++ for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++) ++ { ++ pcum->aapcs_cprc_failed[i] = false; ++ aapcs_cp_arg_layout[i].cum_init (pcum, fntype, libname, fndecl); ++ } ++ } ++ return; ++ } ++ ++ /* Legacy ABIs */ ++ ++ /* On the ARM, the offset starts at 0. */ ++ pcum->nregs = 0; ++ pcum->iwmmxt_nregs = 0; ++ pcum->can_split = true; ++ ++ /* Varargs vectors are treated the same as long long. ++ named_count avoids having to change the way arm handles 'named' */ ++ pcum->named_count = 0; ++ pcum->nargs = 0; ++ ++ if (TARGET_REALLY_IWMMXT && fntype) ++ { ++ tree fn_arg; ++ ++ for (fn_arg = TYPE_ARG_TYPES (fntype); ++ fn_arg; ++ fn_arg = TREE_CHAIN (fn_arg)) ++ pcum->named_count += 1; ++ ++ if (! pcum->named_count) ++ pcum->named_count = INT_MAX; ++ } ++} ++ ++ ++/* Return true if mode/type need doubleword alignment. */ ++bool ++arm_needs_doubleword_align (enum machine_mode mode, tree type) ++{ ++ return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY ++ || (type && TYPE_ALIGN (type) > PARM_BOUNDARY)); ++} ++ ++ ++/* Determine where to put an argument to a function. ++ Value is zero to push the argument on the stack, ++ or a hard register in which to store the argument. ++ ++ MODE is the argument's machine mode. ++ TYPE is the data type of the argument (as a tree). ++ This is null for libcalls where that information may ++ not be available. ++ CUM is a variable of type CUMULATIVE_ARGS which gives info about ++ the preceding args and about the function being called. ++ NAMED is nonzero if this argument is a named parameter ++ (otherwise it is an extra parameter matching an ellipsis). */ + + rtx + arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode, +@@ -2925,6 +4135,17 @@ arm_function_arg (CUMULATIVE_ARGS *pcum, + { + int nregs; + ++ /* Handle the special case quickly. Pick an arbitrary value for op2 of ++ a call insn (op3 of a call_value insn). */ ++ if (mode == VOIDmode) ++ return const0_rtx; ++ ++ if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) ++ { ++ aapcs_layout_arg (pcum, mode, type, named); ++ return pcum->aapcs_reg; ++ } ++ + /* Varargs vectors are treated the same as long long. + named_count avoids having to change the way arm handles 'named' */ + if (TARGET_IWMMXT_ABI +@@ -2966,10 +4187,16 @@ arm_function_arg (CUMULATIVE_ARGS *pcum, + + static int + arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode, +- tree type, bool named ATTRIBUTE_UNUSED) ++ tree type, bool named) + { + int nregs = pcum->nregs; + ++ if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) ++ { ++ aapcs_layout_arg (pcum, mode, type, named); ++ return pcum->aapcs_partial; ++ } ++ + if (TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (mode)) + return 0; + +@@ -2981,6 +4208,39 @@ arm_arg_partial_bytes (CUMULATIVE_ARGS * + return 0; + } + ++void ++arm_function_arg_advance (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ++ tree type, bool named) ++{ ++ if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) ++ { ++ aapcs_layout_arg (pcum, mode, type, named); ++ ++ if (pcum->aapcs_cprc_slot >= 0) ++ { ++ aapcs_cp_arg_layout[pcum->aapcs_cprc_slot].advance (pcum, mode, ++ type); ++ pcum->aapcs_cprc_slot = -1; ++ } ++ ++ /* Generic stuff. */ ++ pcum->aapcs_arg_processed = false; ++ pcum->aapcs_ncrn = pcum->aapcs_next_ncrn; ++ pcum->aapcs_reg = NULL_RTX; ++ pcum->aapcs_partial = 0; ++ } ++ else ++ { ++ pcum->nargs += 1; ++ if (arm_vector_mode_supported_p (mode) ++ && pcum->named_count > pcum->nargs ++ && TARGET_IWMMXT_ABI) ++ pcum->iwmmxt_nregs += 1; ++ else ++ pcum->nregs += ARM_NUM_REGS2 (mode, type); ++ } ++} ++ + /* Variable sized types are passed by reference. This is a GCC + extension to the ARM ABI. */ + +@@ -3031,6 +4291,8 @@ const struct attribute_spec arm_attribut + /* Whereas these functions are always known to reside within the 26 bit + addressing range. */ + { "short_call", 0, 0, false, true, true, NULL }, ++ /* Specify the procedure call conventions for a function. */ ++ { "pcs", 1, 1, false, true, true, arm_handle_pcs_attribute }, + /* Interrupt Service Routines have special prologue and epilogue requirements. */ + { "isr", 0, 1, false, false, false, arm_handle_isr_attribute }, + { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute }, +@@ -3133,6 +4395,21 @@ arm_handle_isr_attribute (tree *node, tr + return NULL_TREE; + } + ++/* Handle a "pcs" attribute; arguments as in struct ++ attribute_spec.handler. */ ++static tree ++arm_handle_pcs_attribute (tree *node ATTRIBUTE_UNUSED, tree name, tree args, ++ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) ++{ ++ if (arm_pcs_from_attribute (args) == ARM_PCS_UNKNOWN) ++ { ++ warning (OPT_Wattributes, "%qs attribute ignored", ++ IDENTIFIER_POINTER (name)); ++ *no_add_attrs = true; ++ } ++ return NULL_TREE; ++} ++ + #if TARGET_DLLIMPORT_DECL_ATTRIBUTES + /* Handle the "notshared" attribute. This attribute is another way of + requesting hidden visibility. ARM's compiler supports +@@ -3298,7 +4575,7 @@ arm_is_long_call_p (tree decl) + + /* Return nonzero if it is ok to make a tail-call to DECL. */ + static bool +-arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) ++arm_function_ok_for_sibcall (tree decl, tree exp) + { + unsigned long func_type; + +@@ -3331,6 +4608,21 @@ arm_function_ok_for_sibcall (tree decl, + if (IS_INTERRUPT (func_type)) + return false; + ++ if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl)))) ++ { ++ /* Check that the return value locations are the same. For ++ example that we aren't returning a value from the sibling in ++ a VFP register but then need to transfer it to a core ++ register. */ ++ rtx a, b; ++ ++ a = arm_function_value (TREE_TYPE (exp), decl, false); ++ b = arm_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)), ++ cfun->decl, false); ++ if (!rtx_equal_p (a, b)) ++ return false; ++ } ++ + /* Never tailcall if function may be called with a misaligned SP. */ + if (IS_STACKALIGN (func_type)) + return false; +@@ -3472,10 +4764,22 @@ legitimize_pic_address (rtx orig, enum m + && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg) + return orig; + ++ /* Handle the case where we have: const (UNSPEC_TLS). */ + if (GET_CODE (XEXP (orig, 0)) == UNSPEC + && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS) + return orig; + ++ /* Handle the case where we have: ++ const (plus (UNSPEC_TLS) (ADDEND)). The ADDEND must be a ++ CONST_INT. */ ++ if (GET_CODE (XEXP (orig, 0)) == PLUS ++ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == UNSPEC ++ && XINT (XEXP (XEXP (orig, 0), 0), 1) == UNSPEC_TLS) ++ { ++ gcc_assert (GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT); ++ return orig; ++ } ++ + if (reg == 0) + { + gcc_assert (can_create_pseudo_p ()); +@@ -3924,6 +5228,7 @@ arm_legitimate_index_p (enum machine_mod + if (GET_MODE_SIZE (mode) <= 4 + && ! (arm_arch4 + && (mode == HImode ++ || mode == HFmode + || (mode == QImode && outer == SIGN_EXTEND)))) + { + if (code == MULT) +@@ -3952,13 +5257,15 @@ arm_legitimate_index_p (enum machine_mod + load. */ + if (arm_arch4) + { +- if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode)) ++ if (mode == HImode ++ || mode == HFmode ++ || (outer == SIGN_EXTEND && mode == QImode)) + range = 256; + else + range = 4096; + } + else +- range = (mode == HImode) ? 4095 : 4096; ++ range = (mode == HImode || mode == HFmode) ? 4095 : 4096; + + return (code == CONST_INT + && INTVAL (index) < range +@@ -4129,7 +5436,8 @@ thumb1_legitimate_address_p (enum machin + return 1; + + /* This is PC relative data after arm_reorg runs. */ +- else if (GET_MODE_SIZE (mode) >= 4 && reload_completed ++ else if ((GET_MODE_SIZE (mode) >= 4 || mode == HFmode) ++ && reload_completed + && (GET_CODE (x) == LABEL_REF + || (GET_CODE (x) == CONST + && GET_CODE (XEXP (x, 0)) == PLUS +@@ -4799,121 +6107,255 @@ thumb1_rtx_costs (rtx x, enum rtx_code c + return 99; + } + +- default: +- return 99; +- } +-} ++ default: ++ return 99; ++ } ++} ++ ++static inline bool ++arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total) ++{ ++ enum machine_mode mode = GET_MODE (x); ++ enum rtx_code subcode; ++ rtx operand; ++ enum rtx_code code = GET_CODE (x); ++ int extra_cost; ++ *total = 0; ++ ++ switch (code) ++ { ++ case MEM: ++ /* Memory costs quite a lot for the first word, but subsequent words ++ load at the equivalent of a single insn each. */ ++ *total = COSTS_N_INSNS (2 + ARM_NUM_REGS (mode)); ++ return true; ++ ++ case DIV: ++ case MOD: ++ case UDIV: ++ case UMOD: ++ if (TARGET_HARD_FLOAT && mode == SFmode) ++ *total = COSTS_N_INSNS (2); ++ else if (TARGET_HARD_FLOAT && mode == DFmode) ++ *total = COSTS_N_INSNS (4); ++ else ++ *total = COSTS_N_INSNS (20); ++ return false; ++ ++ case ROTATE: ++ if (GET_CODE (XEXP (x, 1)) == REG) ++ *total = COSTS_N_INSNS (1); /* Need to subtract from 32 */ ++ else if (GET_CODE (XEXP (x, 1)) != CONST_INT) ++ *total = rtx_cost (XEXP (x, 1), code); ++ ++ /* Fall through */ ++ case ROTATERT: ++ if (mode != SImode) ++ { ++ *total += COSTS_N_INSNS (4); ++ return true; ++ } ++ ++ /* Fall through */ ++ case ASHIFT: case LSHIFTRT: case ASHIFTRT: ++ *total += rtx_cost (XEXP (x, 0), code); ++ if (mode == DImode) ++ { ++ *total += COSTS_N_INSNS (3); ++ return true; ++ } ++ ++ *total += COSTS_N_INSNS (1); ++ /* Increase the cost of complex shifts because they aren't any faster, ++ and reduce dual issue opportunities. */ ++ if (arm_tune_cortex_a9 ++ && outer != SET && GET_CODE (XEXP (x, 1)) != CONST_INT) ++ ++*total; ++ ++ return true; ++ ++ case MINUS: ++ if (TARGET_THUMB2) ++ { ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ *total = COSTS_N_INSNS (1); ++ else ++ *total = COSTS_N_INSNS (20); ++ } ++ else ++ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode)); ++ /* Thumb2 does not have RSB, so all arguments must be ++ registers (subtracting a constant is canonicalized as ++ addition of the negated constant). */ ++ return false; ++ } ++ ++ if (mode == DImode) ++ { ++ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode)); ++ if (GET_CODE (XEXP (x, 0)) == CONST_INT ++ && const_ok_for_arm (INTVAL (XEXP (x, 0)))) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; ++ } ++ ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT ++ && const_ok_for_arm (INTVAL (XEXP (x, 1)))) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ ++ return false; ++ } ++ ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE ++ && arm_const_double_rtx (XEXP (x, 0))) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; ++ } ++ ++ if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE ++ && arm_const_double_rtx (XEXP (x, 1))) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ ++ return false; ++ } ++ *total = COSTS_N_INSNS (20); ++ return false; ++ } ++ ++ *total = COSTS_N_INSNS (1); ++ if (GET_CODE (XEXP (x, 0)) == CONST_INT ++ && const_ok_for_arm (INTVAL (XEXP (x, 0)))) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; ++ } + ++ subcode = GET_CODE (XEXP (x, 1)); ++ if (subcode == ASHIFT || subcode == ASHIFTRT ++ || subcode == LSHIFTRT ++ || subcode == ROTATE || subcode == ROTATERT) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode); ++ return true; ++ } + +-/* Worker routine for arm_rtx_costs. */ +-/* ??? This needs updating for thumb2. */ +-static inline int +-arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer) +-{ +- enum machine_mode mode = GET_MODE (x); +- enum rtx_code subcode; +- int extra_cost; ++ /* A shift as a part of RSB costs no more than RSB itself. */ ++ if (GET_CODE (XEXP (x, 0)) == MULT ++ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) ++ & (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)) ++ { ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), code); ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; ++ } + +- switch (code) +- { +- case MEM: +- /* Memory costs quite a lot for the first word, but subsequent words +- load at the equivalent of a single insn each. */ +- return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD) +- + (GET_CODE (x) == SYMBOL_REF +- && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0)); ++ if (subcode == MULT ++ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 1), 1)) & ++ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode); ++ return true; ++ } + +- case DIV: +- case MOD: +- case UDIV: +- case UMOD: +- return optimize_size ? COSTS_N_INSNS (2) : 100; ++ if (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMPARE ++ || GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMM_COMPARE) ++ { ++ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code); ++ if (GET_CODE (XEXP (XEXP (x, 1), 0)) == REG ++ && REGNO (XEXP (XEXP (x, 1), 0)) != CC_REGNUM) ++ *total += COSTS_N_INSNS (1); + +- case ROTATE: +- if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG) +- return 4; +- /* Fall through */ +- case ROTATERT: +- if (mode != SImode) +- return 8; +- /* Fall through */ +- case ASHIFT: case LSHIFTRT: case ASHIFTRT: +- if (mode == DImode) +- return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8) +- + ((GET_CODE (XEXP (x, 0)) == REG +- || (GET_CODE (XEXP (x, 0)) == SUBREG +- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) +- ? 0 : 8)); +- return (1 + ((GET_CODE (XEXP (x, 0)) == REG +- || (GET_CODE (XEXP (x, 0)) == SUBREG +- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)) +- ? 0 : 4) +- + ((GET_CODE (XEXP (x, 1)) == REG +- || (GET_CODE (XEXP (x, 1)) == SUBREG +- && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG) +- || (GET_CODE (XEXP (x, 1)) == CONST_INT)) +- ? 0 : 4)); ++ return true; ++ } + +- case MINUS: +- if (GET_CODE (XEXP (x, 1)) == MULT && mode == SImode && arm_arch_thumb2) ++ /* MLS is just as expensive as its underlying multiplication. ++ Exclude a shift by a constant, which is expressed as a ++ multiplication. */ ++ if (TARGET_32BIT && arm_arch_thumb2 ++ && GET_CODE (XEXP (x, 1)) == MULT ++ && ! (GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 1), 1)) & ++ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0))) + { +- extra_cost = rtx_cost (XEXP (x, 1), code); +- if (!REG_OR_SUBREG_REG (XEXP (x, 0))) +- extra_cost += 4 * ARM_NUM_REGS (mode); +- return extra_cost; ++ /* The cost comes from the cost of the multiply. */ ++ return false; + } + +- if (mode == DImode) +- return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8) +- + ((REG_OR_SUBREG_REG (XEXP (x, 0)) +- || (GET_CODE (XEXP (x, 0)) == CONST_INT +- && const_ok_for_arm (INTVAL (XEXP (x, 0))))) +- ? 0 : 8)); +- +- if (GET_MODE_CLASS (mode) == MODE_FLOAT) +- return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1)) +- || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE +- && arm_const_double_rtx (XEXP (x, 1)))) +- ? 0 : 8) +- + ((REG_OR_SUBREG_REG (XEXP (x, 0)) +- || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE +- && arm_const_double_rtx (XEXP (x, 0)))) +- ? 0 : 8)); +- +- if (((GET_CODE (XEXP (x, 0)) == CONST_INT +- && const_ok_for_arm (INTVAL (XEXP (x, 0))) +- && REG_OR_SUBREG_REG (XEXP (x, 1)))) +- || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT +- || subcode == ASHIFTRT || subcode == LSHIFTRT +- || subcode == ROTATE || subcode == ROTATERT +- || (subcode == MULT +- && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT +- && ((INTVAL (XEXP (XEXP (x, 1), 1)) & +- (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0))) +- && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0)) +- && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1)) +- || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT) +- && REG_OR_SUBREG_REG (XEXP (x, 0)))) +- return 1; + /* Fall through */ + + case PLUS: +- if (GET_CODE (XEXP (x, 0)) == MULT) ++ if (code == PLUS && arm_arch6 && mode == SImode ++ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND ++ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) ++ { ++ *total = COSTS_N_INSNS (1); ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), GET_CODE (XEXP (x, 0))); ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; ++ } ++ ++ /* MLA: All arguments must be registers. We filter out ++ multiplication by a power of two, so that we fall down into ++ the code below. */ ++ if (GET_CODE (XEXP (x, 0)) == MULT ++ && ! (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) & ++ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0))) + { +- extra_cost = rtx_cost (XEXP (x, 0), code); +- if (!REG_OR_SUBREG_REG (XEXP (x, 1))) +- extra_cost += 4 * ARM_NUM_REGS (mode); +- return extra_cost; ++ /* The cost comes from the cost of the multiply. */ ++ return false; + } + + if (GET_MODE_CLASS (mode) == MODE_FLOAT) +- return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) +- + ((REG_OR_SUBREG_REG (XEXP (x, 1)) +- || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE +- && arm_const_double_rtx (XEXP (x, 1)))) +- ? 0 : 8)); ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE ++ && arm_const_double_rtx (XEXP (x, 1))) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ ++ return false; ++ } ++ ++ *total = COSTS_N_INSNS (20); ++ return false; ++ } ++ ++ if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE ++ || GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE) ++ { ++ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 1), code); ++ if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG ++ && REGNO (XEXP (XEXP (x, 0), 0)) != CC_REGNUM) ++ *total += COSTS_N_INSNS (1); ++ return true; ++ } + + /* Fall through */ ++ + case AND: case XOR: case IOR: + extra_cost = 0; + +@@ -4927,37 +6369,56 @@ arm_rtx_costs_1 (rtx x, enum rtx_code co + && GET_CODE (XEXP (x, 1)) != CONST_INT) + || (REG_OR_SUBREG_REG (XEXP (x, 0)) + && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0))))) +- extra_cost = 4; ++ *total = 4; + + if (mode == DImode) +- return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8) +- + ((REG_OR_SUBREG_REG (XEXP (x, 1)) +- || (GET_CODE (XEXP (x, 1)) == CONST_INT +- && const_ok_for_op (INTVAL (XEXP (x, 1)), code))) +- ? 0 : 8)); +- +- if (REG_OR_SUBREG_REG (XEXP (x, 0))) +- return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost) +- + ((REG_OR_SUBREG_REG (XEXP (x, 1)) +- || (GET_CODE (XEXP (x, 1)) == CONST_INT +- && const_ok_for_op (INTVAL (XEXP (x, 1)), code))) +- ? 0 : 4)); +- +- else if (REG_OR_SUBREG_REG (XEXP (x, 1))) +- return (1 + extra_cost +- + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT +- || subcode == LSHIFTRT || subcode == ASHIFTRT +- || subcode == ROTATE || subcode == ROTATERT +- || (subcode == MULT +- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT +- && ((INTVAL (XEXP (XEXP (x, 0), 1)) & +- (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0))) +- && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0))) +- && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1))) +- || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)) +- ? 0 : 4)); ++ { ++ *total += COSTS_N_INSNS (2); ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT ++ && const_ok_for_op (INTVAL (XEXP (x, 1)), code)) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } + +- return 8; ++ return false; ++ } ++ ++ *total += COSTS_N_INSNS (1); ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT ++ && const_ok_for_op (INTVAL (XEXP (x, 1)), code)) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ subcode = GET_CODE (XEXP (x, 0)); ++ if (subcode == ASHIFT || subcode == ASHIFTRT ++ || subcode == LSHIFTRT ++ || subcode == ROTATE || subcode == ROTATERT) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode); ++ return true; ++ } ++ ++ if (subcode == MULT ++ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) & ++ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode); ++ return true; ++ } ++ ++ if (subcode == UMIN || subcode == UMAX ++ || subcode == SMIN || subcode == SMAX) ++ { ++ *total = COSTS_N_INSNS (3); ++ return true; ++ } ++ ++ return false; + + case MULT: + /* This should have been handled by the CPU specific routines. */ +@@ -4971,90 +6432,281 @@ arm_rtx_costs_1 (rtx x, enum rtx_code co + == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))) + && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND + || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND)) +- return 8; +- return 99; ++ { ++ *total = rtx_cost (XEXP (XEXP (x, 0), 0), LSHIFTRT); ++ return true; ++ } ++ *total = COSTS_N_INSNS (2); /* Plus the cost of the MULT */ ++ return false; + + case NEG: + if (GET_MODE_CLASS (mode) == MODE_FLOAT) +- return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6); ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ *total = COSTS_N_INSNS (2); ++ return false; ++ } ++ + /* Fall through */ + case NOT: +- if (mode == DImode) +- return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); ++ *total = COSTS_N_INSNS (ARM_NUM_REGS(mode)); ++ if (mode == SImode && code == NOT) ++ { ++ subcode = GET_CODE (XEXP (x, 0)); ++ if (subcode == ASHIFT || subcode == ASHIFTRT ++ || subcode == LSHIFTRT ++ || subcode == ROTATE || subcode == ROTATERT ++ || (subcode == MULT ++ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) & ++ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0))) ++ { ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode); ++ /* Register shifts cost an extra cycle. */ ++ if (GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT) ++ *total += COSTS_N_INSNS (1) + rtx_cost (XEXP (XEXP (x, 0), 1), ++ subcode); ++ return true; ++ } ++ } + +- return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4); ++ return false; + + case IF_THEN_ELSE: + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) +- return 14; +- return 2; ++ { ++ *total = COSTS_N_INSNS (4); ++ return true; ++ } ++ ++ operand = XEXP (x, 0); ++ ++ if (!((GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMPARE ++ || GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMM_COMPARE) ++ && GET_CODE (XEXP (operand, 0)) == REG ++ && REGNO (XEXP (operand, 0)) == CC_REGNUM)) ++ *total += COSTS_N_INSNS (1); ++ *total += (rtx_cost (XEXP (x, 1), code) ++ + rtx_cost (XEXP (x, 2), code)); ++ return true; ++ ++ case NE: ++ if (mode == SImode && XEXP (x, 1) == const0_rtx) ++ { ++ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ goto scc_insn; ++ ++ case GE: ++ if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM) ++ && mode == SImode && XEXP (x, 1) == const0_rtx) ++ { ++ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ goto scc_insn; ++ ++ case LT: ++ if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM) ++ && mode == SImode && XEXP (x, 1) == const0_rtx) ++ { ++ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ goto scc_insn; ++ ++ case EQ: ++ case GT: ++ case LE: ++ case GEU: ++ case LTU: ++ case GTU: ++ case LEU: ++ case UNORDERED: ++ case ORDERED: ++ case UNEQ: ++ case UNGE: ++ case UNLT: ++ case UNGT: ++ case UNLE: ++ scc_insn: ++ /* SCC insns. In the case where the comparison has already been ++ performed, then they cost 2 instructions. Otherwise they need ++ an additional comparison before them. */ ++ *total = COSTS_N_INSNS (2); ++ if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM) ++ { ++ return true; ++ } + ++ /* Fall through */ + case COMPARE: +- return 1; ++ if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM) ++ { ++ *total = 0; ++ return true; ++ } ++ ++ *total += COSTS_N_INSNS (1); ++ if (GET_CODE (XEXP (x, 1)) == CONST_INT ++ && const_ok_for_op (INTVAL (XEXP (x, 1)), code)) ++ { ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; ++ } ++ ++ subcode = GET_CODE (XEXP (x, 0)); ++ if (subcode == ASHIFT || subcode == ASHIFTRT ++ || subcode == LSHIFTRT ++ || subcode == ROTATE || subcode == ROTATERT) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode); ++ return true; ++ } ++ ++ if (subcode == MULT ++ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) & ++ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)) ++ { ++ *total += rtx_cost (XEXP (x, 1), code); ++ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode); ++ return true; ++ } ++ ++ return false; ++ ++ case UMIN: ++ case UMAX: ++ case SMIN: ++ case SMAX: ++ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code); ++ if (GET_CODE (XEXP (x, 1)) != CONST_INT ++ || !const_ok_for_arm (INTVAL (XEXP (x, 1)))) ++ *total += rtx_cost (XEXP (x, 1), code); ++ return true; + + case ABS: +- return 4 + (mode == DImode ? 4 : 0); ++ if (GET_MODE_CLASS (mode == MODE_FLOAT)) ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ *total = COSTS_N_INSNS (20); ++ return false; ++ } ++ *total = COSTS_N_INSNS (1); ++ if (mode == DImode) ++ *total += COSTS_N_INSNS (3); ++ return false; + + case SIGN_EXTEND: +- /* ??? value extensions are cheaper on armv6. */ +- if (GET_MODE (XEXP (x, 0)) == QImode) +- return (4 + (mode == DImode ? 4 : 0) +- + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); ++ if (GET_MODE_CLASS (mode) == MODE_INT) ++ { ++ *total = 0; ++ if (mode == DImode) ++ *total += COSTS_N_INSNS (1); ++ ++ if (GET_MODE (XEXP (x, 0)) != SImode) ++ { ++ if (arm_arch6) ++ { ++ if (GET_CODE (XEXP (x, 0)) != MEM) ++ *total += COSTS_N_INSNS (1); ++ } ++ else if (!arm_arch4 || GET_CODE (XEXP (x, 0)) != MEM) ++ *total += COSTS_N_INSNS (2); ++ } ++ ++ return false; ++ } ++ + /* Fall through */ + case ZERO_EXTEND: +- switch (GET_MODE (XEXP (x, 0))) ++ *total = 0; ++ if (GET_MODE_CLASS (mode) == MODE_INT) + { +- case QImode: +- return (1 + (mode == DImode ? 4 : 0) +- + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); ++ if (mode == DImode) ++ *total += COSTS_N_INSNS (1); + +- case HImode: +- return (4 + (mode == DImode ? 4 : 0) +- + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); ++ if (GET_MODE (XEXP (x, 0)) != SImode) ++ { ++ if (arm_arch6) ++ { ++ if (GET_CODE (XEXP (x, 0)) != MEM) ++ *total += COSTS_N_INSNS (1); ++ } ++ else if (!arm_arch4 || GET_CODE (XEXP (x, 0)) != MEM) ++ *total += COSTS_N_INSNS (GET_MODE (XEXP (x, 0)) == QImode ? ++ 1 : 2); ++ } + +- case SImode: +- return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0)); ++ return false; ++ } + ++ switch (GET_MODE (XEXP (x, 0))) ++ { + case V8QImode: + case V4HImode: + case V2SImode: + case V4QImode: + case V2HImode: +- return 1; ++ *total = COSTS_N_INSNS (1); ++ return false; + + default: + gcc_unreachable (); + } + gcc_unreachable (); + ++ case ZERO_EXTRACT: ++ case SIGN_EXTRACT: ++ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code); ++ return true; ++ + case CONST_INT: +- if (const_ok_for_arm (INTVAL (x))) +- return outer == SET ? 2 : -1; +- else if (outer == AND +- && const_ok_for_arm (~INTVAL (x))) +- return -1; +- else if ((outer == COMPARE +- || outer == PLUS || outer == MINUS) +- && const_ok_for_arm (-INTVAL (x))) +- return -1; ++ if (const_ok_for_arm (INTVAL (x)) ++ || const_ok_for_arm (~INTVAL (x))) ++ *total = COSTS_N_INSNS (1); + else +- return 5; ++ *total = COSTS_N_INSNS (arm_gen_constant (SET, mode, NULL_RTX, ++ INTVAL (x), NULL_RTX, ++ NULL_RTX, 0, 0)); ++ return true; + + case CONST: + case LABEL_REF: + case SYMBOL_REF: +- return 6; ++ *total = COSTS_N_INSNS (3); ++ return true; ++ ++ case HIGH: ++ *total = COSTS_N_INSNS (1); ++ return true; ++ ++ case LO_SUM: ++ *total = COSTS_N_INSNS (1); ++ *total += rtx_cost (XEXP (x, 0), code); ++ return true; + + case CONST_DOUBLE: +- if (arm_const_double_rtx (x) || vfp3_const_double_rtx (x)) +- return outer == SET ? 2 : -1; +- else if ((outer == COMPARE || outer == PLUS) +- && neg_const_double_rtx_ok_for_fpa (x)) +- return -1; +- return 7; ++ if (TARGET_HARD_FLOAT && vfp3_const_double_rtx (x)) ++ *total = COSTS_N_INSNS (1); ++ else ++ *total = COSTS_N_INSNS (4); ++ return true; + + default: +- return 99; ++ *total = COSTS_N_INSNS (4); ++ return false; + } + } + +@@ -5063,14 +6715,14 @@ static bool + arm_size_rtx_costs (rtx x, int code, int outer_code, int *total) + { + enum machine_mode mode = GET_MODE (x); +- +- if (TARGET_THUMB) ++ if (TARGET_THUMB1) + { + /* XXX TBD. For now, use the standard costs. */ + *total = thumb1_rtx_costs (x, code, outer_code); + return true; + } + ++ /* FIXME: This makes no attempt to prefer narrow Thumb-2 instructions. */ + switch (code) + { + case MEM: +@@ -5181,7 +6833,10 @@ arm_size_rtx_costs (rtx x, int code, int + + case NEG: + if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT) +- *total = COSTS_N_INSNS (1); ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } + /* Fall through */ + case NOT: + *total = COSTS_N_INSNS (ARM_NUM_REGS (mode)); +@@ -5270,6 +6925,13 @@ arm_size_rtx_costs (rtx x, int code, int + *total = COSTS_N_INSNS (4); + return true; + ++ case HIGH: ++ case LO_SUM: ++ /* We prefer constant pool entries to MOVW/MOVT pairs, so bump the ++ cost of these slightly. */ ++ *total = COSTS_N_INSNS (1) + 1; ++ return true; ++ + default: + if (mode != VOIDmode) + *total = COSTS_N_INSNS (ARM_NUM_REGS (mode)); +@@ -5279,11 +6941,22 @@ arm_size_rtx_costs (rtx x, int code, int + } + } + ++/* RTX costs when optimizing for size. */ ++static bool ++arm_rtx_costs (rtx x, int code, int outer_code, int *total) ++{ ++ if (optimize_size) ++ return arm_size_rtx_costs (x, code, outer_code, total); ++ else ++ return all_cores[(int)arm_tune].rtx_costs (x, code, outer_code, total); ++} ++ + /* RTX costs for cores with a slow MUL implementation. Thumb-2 is not + supported on any "slowmul" cores, so it can be ignored. */ + + static bool +-arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total) ++arm_slowmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ++ int *total) + { + enum machine_mode mode = GET_MODE (x); + +@@ -5299,8 +6972,8 @@ arm_slowmul_rtx_costs (rtx x, int code, + if (GET_MODE_CLASS (mode) == MODE_FLOAT + || mode == DImode) + { +- *total = 30; +- return true; ++ *total = COSTS_N_INSNS (20); ++ return false; + } + + if (GET_CODE (XEXP (x, 1)) == CONST_INT) +@@ -5316,20 +6989,19 @@ arm_slowmul_rtx_costs (rtx x, int code, + for (j = 0; i && j < 32; j += booth_unit_size) + { + i >>= booth_unit_size; +- cost += 2; ++ cost++; + } + +- *total = cost; ++ *total = COSTS_N_INSNS (cost); ++ *total += rtx_cost (XEXP (x, 0), code); + return true; + } + +- *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4) +- + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4); +- return true; ++ *total = COSTS_N_INSNS (20); ++ return false; + + default: +- *total = arm_rtx_costs_1 (x, code, outer_code); +- return true; ++ return arm_rtx_costs_1 (x, outer_code, total);; + } + } + +@@ -5337,7 +7009,8 @@ arm_slowmul_rtx_costs (rtx x, int code, + /* RTX cost for cores with a fast multiply unit (M variants). */ + + static bool +-arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total) ++arm_fastmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ++ int *total) + { + enum machine_mode mode = GET_MODE (x); + +@@ -5358,16 +7031,15 @@ arm_fastmul_rtx_costs (rtx x, int code, + && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND + || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) + { +- *total = 8; +- return true; ++ *total = COSTS_N_INSNS(2); ++ return false; + } + + +- if (GET_MODE_CLASS (mode) == MODE_FLOAT +- || mode == DImode) ++ if (mode == DImode) + { +- *total = 30; +- return true; ++ *total = COSTS_N_INSNS (5); ++ return false; + } + + if (GET_CODE (XEXP (x, 1)) == CONST_INT) +@@ -5383,20 +7055,34 @@ arm_fastmul_rtx_costs (rtx x, int code, + for (j = 0; i && j < 32; j += booth_unit_size) + { + i >>= booth_unit_size; +- cost += 2; ++ cost++; + } + +- *total = cost; +- return true; ++ *total = COSTS_N_INSNS(cost); ++ return false; + } + +- *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4) +- + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4); +- return true; ++ if (mode == SImode) ++ { ++ *total = COSTS_N_INSNS (4); ++ return false; ++ } ++ ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ } ++ ++ /* Requires a lib call */ ++ *total = COSTS_N_INSNS (20); ++ return false; + + default: +- *total = arm_rtx_costs_1 (x, code, outer_code); +- return true; ++ return arm_rtx_costs_1 (x, outer_code, total); + } + } + +@@ -5405,7 +7091,7 @@ arm_fastmul_rtx_costs (rtx x, int code, + so it can be ignored. */ + + static bool +-arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total) ++arm_xscale_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, int *total) + { + enum machine_mode mode = GET_MODE (x); + +@@ -5417,6 +7103,15 @@ arm_xscale_rtx_costs (rtx x, int code, i + + switch (code) + { ++ case COMPARE: ++ if (GET_CODE (XEXP (x, 0)) != MULT) ++ return arm_rtx_costs_1 (x, outer_code, total); ++ ++ /* A COMPARE of a MULT is slow on XScale; the muls instruction ++ will stall until the multiplication is complete. */ ++ *total = COSTS_N_INSNS (3); ++ return false; ++ + case MULT: + /* There is no point basing this on the tuning, since it is always the + fast variant if it exists at all. */ +@@ -5425,60 +7120,58 @@ arm_xscale_rtx_costs (rtx x, int code, i + && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND + || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) + { +- *total = 8; +- return true; ++ *total = COSTS_N_INSNS (2); ++ return false; + } + + +- if (GET_MODE_CLASS (mode) == MODE_FLOAT +- || mode == DImode) ++ if (mode == DImode) + { +- *total = 30; +- return true; ++ *total = COSTS_N_INSNS (5); ++ return false; + } + + if (GET_CODE (XEXP (x, 1)) == CONST_INT) + { +- unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1)) +- & (unsigned HOST_WIDE_INT) 0xffffffff); +- int cost, const_ok = const_ok_for_arm (i); ++ /* If operand 1 is a constant we can more accurately ++ calculate the cost of the multiply. The multiplier can ++ retire 15 bits on the first cycle and a further 12 on the ++ second. We do, of course, have to load the constant into ++ a register first. */ ++ unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1)); ++ /* There's a general overhead of one cycle. */ ++ int cost = 1; + unsigned HOST_WIDE_INT masked_const; + +- /* The cost will be related to two insns. +- First a load of the constant (MOV or LDR), then a multiply. */ +- cost = 2; +- if (! const_ok) +- cost += 1; /* LDR is probably more expensive because +- of longer result latency. */ ++ if (i & 0x80000000) ++ i = ~i; ++ ++ i &= (unsigned HOST_WIDE_INT) 0xffffffff; ++ + masked_const = i & 0xffff8000; +- if (masked_const != 0 && masked_const != 0xffff8000) ++ if (masked_const != 0) + { ++ cost++; + masked_const = i & 0xf8000000; +- if (masked_const == 0 || masked_const == 0xf8000000) +- cost += 1; +- else +- cost += 2; ++ if (masked_const != 0) ++ cost++; + } +- *total = cost; +- return true; ++ *total = COSTS_N_INSNS (cost); ++ return false; + } + +- *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4) +- + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4); +- return true; ++ if (mode == SImode) ++ { ++ *total = COSTS_N_INSNS (3); ++ return false; ++ } + +- case COMPARE: +- /* A COMPARE of a MULT is slow on XScale; the muls instruction +- will stall until the multiplication is complete. */ +- if (GET_CODE (XEXP (x, 0)) == MULT) +- *total = 4 + rtx_cost (XEXP (x, 0), code); +- else +- *total = arm_rtx_costs_1 (x, code, outer_code); +- return true; ++ /* Requires a lib call */ ++ *total = COSTS_N_INSNS (20); ++ return false; + + default: +- *total = arm_rtx_costs_1 (x, code, outer_code); +- return true; ++ return arm_rtx_costs_1 (x, outer_code, total); + } + } + +@@ -5486,11 +7179,9 @@ arm_xscale_rtx_costs (rtx x, int code, i + /* RTX costs for 9e (and later) cores. */ + + static bool +-arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total) ++arm_9e_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, int *total) + { + enum machine_mode mode = GET_MODE (x); +- int nonreg_cost; +- int cost; + + if (TARGET_THUMB1) + { +@@ -5516,35 +7207,37 @@ arm_9e_rtx_costs (rtx x, int code, int o + && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND + || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) + { +- *total = 3; +- return true; ++ *total = COSTS_N_INSNS (2); ++ return false; + } + + +- if (GET_MODE_CLASS (mode) == MODE_FLOAT) +- { +- *total = 30; +- return true; +- } + if (mode == DImode) + { +- cost = 7; +- nonreg_cost = 8; ++ *total = COSTS_N_INSNS (5); ++ return false; + } +- else ++ ++ if (mode == SImode) + { +- cost = 2; +- nonreg_cost = 4; ++ *total = COSTS_N_INSNS (2); ++ return false; + } + ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ } + +- *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost) +- + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost); +- return true; ++ *total = COSTS_N_INSNS (20); ++ return false; + + default: +- *total = arm_rtx_costs_1 (x, code, outer_code); +- return true; ++ return arm_rtx_costs_1 (x, outer_code, total); + } + } + /* All address computations that can be done are free, but rtx cost returns +@@ -6356,10 +8049,13 @@ arm_coproc_mem_operand (rtx op, bool wb) + } + + /* Return TRUE if OP is a memory operand which we can load or store a vector +- to/from. If CORE is true, we're moving from ARM registers not Neon +- registers. */ ++ to/from. TYPE is one of the following values: ++ 0 - Vector load/stor (vldr) ++ 1 - Core registers (ldm) ++ 2 - Element/structure loads (vld1) ++ */ + int +-neon_vector_mem_operand (rtx op, bool core) ++neon_vector_mem_operand (rtx op, int type) + { + rtx ind; + +@@ -6392,23 +8088,15 @@ neon_vector_mem_operand (rtx op, bool co + return arm_address_register_rtx_p (ind, 0); + + /* Allow post-increment with Neon registers. */ +- if (!core && GET_CODE (ind) == POST_INC) ++ if (type != 1 && (GET_CODE (ind) == POST_INC || GET_CODE (ind) == PRE_DEC)) + return arm_address_register_rtx_p (XEXP (ind, 0), 0); + +-#if 0 +- /* FIXME: We can support this too if we use VLD1/VST1. */ +- if (!core +- && GET_CODE (ind) == POST_MODIFY +- && arm_address_register_rtx_p (XEXP (ind, 0), 0) +- && GET_CODE (XEXP (ind, 1)) == PLUS +- && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0))) +- ind = XEXP (ind, 1); +-#endif ++ /* FIXME: vld1 allows register post-modify. */ + + /* Match: + (plus (reg) + (const)). */ +- if (!core ++ if (type == 0 + && GET_CODE (ind) == PLUS + && GET_CODE (XEXP (ind, 0)) == REG + && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode) +@@ -6475,10 +8163,17 @@ arm_eliminable_register (rtx x) + enum reg_class + coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb) + { ++ if (mode == HFmode) ++ { ++ if (s_register_operand (x, mode) || neon_vector_mem_operand (x, 2)) ++ return NO_REGS; ++ return GENERAL_REGS; ++ } ++ + if (TARGET_NEON + && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT + || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) +- && neon_vector_mem_operand (x, FALSE)) ++ && neon_vector_mem_operand (x, 0)) + return NO_REGS; + + if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode)) +@@ -6875,6 +8570,9 @@ load_multiple_sequence (rtx *operands, i + int base_reg = -1; + int i; + ++ if (low_irq_latency) ++ return 0; ++ + /* Can only handle 2, 3, or 4 insns at present, + though could be easily extended if required. */ + gcc_assert (nops >= 2 && nops <= 4); +@@ -7102,6 +8800,9 @@ store_multiple_sequence (rtx *operands, + int base_reg = -1; + int i; + ++ if (low_irq_latency) ++ return 0; ++ + /* Can only handle 2, 3, or 4 insns at present, though could be easily + extended if required. */ + gcc_assert (nops >= 2 && nops <= 4); +@@ -7307,7 +9008,7 @@ arm_gen_load_multiple (int base_regno, i + + As a compromise, we use ldr for counts of 1 or 2 regs, and ldm + for counts of 3 or 4 regs. */ +- if (arm_tune_xscale && count <= 2 && ! optimize_size) ++ if (low_irq_latency || (arm_tune_xscale && count <= 2 && ! optimize_size)) + { + rtx seq; + +@@ -7370,7 +9071,7 @@ arm_gen_store_multiple (int base_regno, + + /* See arm_gen_load_multiple for discussion of + the pros/cons of ldm/stm usage for XScale. */ +- if (arm_tune_xscale && count <= 2 && ! optimize_size) ++ if (low_irq_latency || (arm_tune_xscale && count <= 2 && ! optimize_size)) + { + rtx seq; + +@@ -8739,17 +10440,20 @@ add_minipool_backward_ref (Mfix *fix) + its maximum address (which can happen if we have + re-located a forwards fix); force the new fix to come + after it. */ +- min_mp = mp; +- min_address = mp->min_address + fix->fix_size; ++ if (ARM_DOUBLEWORD_ALIGN ++ && fix->fix_size >= 8 && mp->fix_size < 8) ++ return NULL; ++ else ++ { ++ min_mp = mp; ++ min_address = mp->min_address + fix->fix_size; ++ } + } +- /* If we are inserting an 8-bytes aligned quantity and +- we have not already found an insertion point, then +- make sure that all such 8-byte aligned quantities are +- placed at the start of the pool. */ ++ /* Do not insert a non-8-byte aligned quantity before 8-byte ++ aligned quantities. */ + else if (ARM_DOUBLEWORD_ALIGN +- && min_mp == NULL +- && fix->fix_size >= 8 +- && mp->fix_size < 8) ++ && fix->fix_size < 8 ++ && mp->fix_size >= 8) + { + min_mp = mp; + min_address = mp->min_address + fix->fix_size; +@@ -8985,7 +10689,10 @@ create_fix_barrier (Mfix *fix, HOST_WIDE + gcc_assert (GET_CODE (from) != BARRIER); + + /* Count the length of this insn. */ +- count += get_attr_length (from); ++ if (LABEL_P (from) && (align_jumps > 0 || align_loops > 0)) ++ count += MAX (align_jumps, align_loops); ++ else ++ count += get_attr_length (from); + + /* If there is a jump table, add its length. */ + tmp = is_jump_table (from); +@@ -9297,6 +11004,8 @@ arm_reorg (void) + insn = table; + } + } ++ else if (LABEL_P (insn) && (align_jumps > 0 || align_loops > 0)) ++ address += MAX (align_jumps, align_loops); + } + + fix = minipool_fix_head; +@@ -9502,6 +11211,21 @@ static void + vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count) + { + int i; ++ int offset; ++ ++ if (low_irq_latency) ++ { ++ /* Output a sequence of FLDD instructions. */ ++ offset = 0; ++ for (i = reg; i < reg + count; ++i, offset += 8) ++ { ++ fputc ('\t', stream); ++ asm_fprintf (stream, "fldd\td%d, [%r,#%d]\n", i, base, offset); ++ } ++ asm_fprintf (stream, "\tadd\tsp, sp, #%d\n", count * 8); ++ return; ++ } ++ + + /* Workaround ARM10 VFPr1 bug. */ + if (count == 2 && !arm_arch6) +@@ -9572,6 +11296,53 @@ vfp_emit_fstmd (int base_reg, int count) + rtx tmp, reg; + int i; + ++ if (low_irq_latency) ++ { ++ if (!count) ++ return 0; ++ ++ int saved_size = count * GET_MODE_SIZE (DFmode); ++ ++ /* Since fstd does not have postdecrement addressing mode, ++ we first decrement stack pointer and then use base+offset ++ stores for VFP registers. The ARM EABI unwind information ++ can't easily describe base+offset loads, so we attach ++ a note for the effects of the whole block in the first insn, ++ and avoid marking the subsequent instructions ++ with RTX_FRAME_RELATED_P. */ ++ rtx sp_insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-saved_size)); ++ sp_insn = emit_insn (sp_insn); ++ RTX_FRAME_RELATED_P (sp_insn) = 1; ++ ++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1)); ++ XVECEXP (dwarf, 0, 0) = ++ gen_rtx_SET (VOIDmode, stack_pointer_rtx, ++ plus_constant (stack_pointer_rtx, -saved_size)); ++ ++ /* push double VFP registers to stack */ ++ for (i = 0; i < count; ++i ) ++ { ++ rtx reg; ++ rtx mem; ++ rtx addr; ++ rtx insn; ++ reg = gen_rtx_REG (DFmode, base_reg + 2*i); ++ addr = (i == 0) ? stack_pointer_rtx ++ : gen_rtx_PLUS (SImode, stack_pointer_rtx, ++ GEN_INT (i * GET_MODE_SIZE (DFmode))); ++ mem = gen_frame_mem (DFmode, addr); ++ insn = emit_move_insn (mem, reg); ++ XVECEXP (dwarf, 0, i+1) = ++ gen_rtx_SET (VOIDmode, mem, reg); ++ } ++ ++ REG_NOTES (sp_insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, ++ REG_NOTES (sp_insn)); ++ ++ return saved_size; ++ } ++ + /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two + register pairs are stored by a store multiple insn. We avoid this + by pushing an extra pair. */ +@@ -9729,6 +11500,14 @@ output_call_mem (rtx *operands) + } + + ++/* Emit a MOVW/MOVT pair. */ ++void arm_emit_movpair (rtx dest, rtx src) ++{ ++ emit_set_insn (dest, gen_rtx_HIGH (SImode, src)); ++ emit_set_insn (dest, gen_rtx_LO_SUM (SImode, dest, src)); ++} ++ ++ + /* Output a move from arm registers to an fpa registers. + OPERANDS[0] is an fpa register. + OPERANDS[1] is the first registers of an arm register pair. */ +@@ -9862,7 +11641,11 @@ output_move_double (rtx *operands) + switch (GET_CODE (XEXP (operands[1], 0))) + { + case REG: +- output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands); ++ if (TARGET_LDRD ++ && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0)))) ++ output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands); ++ else ++ output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands); + break; + + case PRE_INC: +@@ -9878,7 +11661,10 @@ output_move_double (rtx *operands) + break; + + case POST_INC: +- output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands); ++ if (TARGET_LDRD) ++ output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands); ++ else ++ output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands); + break; + + case POST_DEC: +@@ -9888,6 +11674,10 @@ output_move_double (rtx *operands) + + case PRE_MODIFY: + case POST_MODIFY: ++ /* Autoicrement addressing modes should never have overlapping ++ base and destination registers, and overlapping index registers ++ are already prohibited, so this doesn't need to worry about ++ fix_cm3_ldrd. */ + otherops[0] = operands[0]; + otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0); + otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1); +@@ -9902,9 +11692,9 @@ output_move_double (rtx *operands) + } + else + { +- /* IWMMXT allows offsets larger than ldrd can handle, ++ /* IWMMXT allows offsets larger than ARM ldrd can handle, + fix these up with a pair of ldr. */ +- if (GET_CODE (otherops[2]) == CONST_INT ++ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT + && (INTVAL(otherops[2]) <= -256 + || INTVAL(otherops[2]) >= 256)) + { +@@ -9918,9 +11708,9 @@ output_move_double (rtx *operands) + } + else + { +- /* IWMMXT allows offsets larger than ldrd can handle, ++ /* IWMMXT allows offsets larger than ARM ldrd can handle, + fix these up with a pair of ldr. */ +- if (GET_CODE (otherops[2]) == CONST_INT ++ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT + && (INTVAL(otherops[2]) <= -256 + || INTVAL(otherops[2]) >= 256)) + { +@@ -9937,8 +11727,15 @@ output_move_double (rtx *operands) + + case LABEL_REF: + case CONST: +- output_asm_insn ("adr%?\t%0, %1", operands); +- output_asm_insn ("ldm%(ia%)\t%0, %M0", operands); ++ /* Use the second register of the pair to avoid problematic ++ overlap. */ ++ otherops[1] = operands[1]; ++ output_asm_insn ("adr%?\t%0, %1", otherops); ++ operands[1] = otherops[0]; ++ if (TARGET_LDRD) ++ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands); ++ else ++ output_asm_insn ("ldm%(ia%)\t%1, %M0", operands); + break; + + /* ??? This needs checking for thumb2. */ +@@ -9952,7 +11749,7 @@ output_move_double (rtx *operands) + + if (GET_CODE (XEXP (operands[1], 0)) == PLUS) + { +- if (GET_CODE (otherops[2]) == CONST_INT) ++ if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD) + { + switch ((int) INTVAL (otherops[2])) + { +@@ -9971,30 +11768,37 @@ output_move_double (rtx *operands) + return ""; + } + } ++ otherops[0] = gen_rtx_REG(SImode, REGNO(operands[0]) + 1); ++ operands[1] = otherops[0]; + if (TARGET_LDRD + && (GET_CODE (otherops[2]) == REG + || (GET_CODE (otherops[2]) == CONST_INT + && INTVAL (otherops[2]) > -256 + && INTVAL (otherops[2]) < 256))) + { +- if (reg_overlap_mentioned_p (otherops[0], ++ if (reg_overlap_mentioned_p (operands[0], + otherops[2])) + { ++ rtx tmp; + /* Swap base and index registers over to + avoid a conflict. */ +- otherops[1] = XEXP (XEXP (operands[1], 0), 1); +- otherops[2] = XEXP (XEXP (operands[1], 0), 0); ++ tmp = otherops[1]; ++ otherops[1] = otherops[2]; ++ otherops[2] = tmp; + } + /* If both registers conflict, it will usually + have been fixed by a splitter. */ +- if (reg_overlap_mentioned_p (otherops[0], otherops[2])) ++ if (reg_overlap_mentioned_p (operands[0], otherops[2]) ++ || (fix_cm3_ldrd && reg0 == REGNO (otherops[1]))) + { +- output_asm_insn ("add%?\t%1, %1, %2", otherops); +- output_asm_insn ("ldr%(d%)\t%0, [%1]", +- otherops); ++ output_asm_insn ("add%?\t%0, %1, %2", otherops); ++ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands); + } + else +- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops); ++ { ++ otherops[0] = operands[0]; ++ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops); ++ } + return ""; + } + +@@ -10011,7 +11815,10 @@ output_move_double (rtx *operands) + else + output_asm_insn ("sub%?\t%0, %1, %2", otherops); + +- return "ldm%(ia%)\t%0, %M0"; ++ if (TARGET_LDRD) ++ return "ldr%(d%)\t%0, [%1]"; ++ ++ return "ldm%(ia%)\t%1, %M0"; + } + else + { +@@ -10039,7 +11846,10 @@ output_move_double (rtx *operands) + switch (GET_CODE (XEXP (operands[0], 0))) + { + case REG: +- output_asm_insn ("stm%(ia%)\t%m0, %M1", operands); ++ if (TARGET_LDRD) ++ output_asm_insn ("str%(d%)\t%1, [%m0]", operands); ++ else ++ output_asm_insn ("stm%(ia%)\t%m0, %M1", operands); + break; + + case PRE_INC: +@@ -10055,7 +11865,10 @@ output_move_double (rtx *operands) + break; + + case POST_INC: +- output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands); ++ if (TARGET_LDRD) ++ output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands); ++ else ++ output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands); + break; + + case POST_DEC: +@@ -10069,9 +11882,9 @@ output_move_double (rtx *operands) + otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0); + otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1); + +- /* IWMMXT allows offsets larger than ldrd can handle, ++ /* IWMMXT allows offsets larger than ARM ldrd can handle, + fix these up with a pair of ldr. */ +- if (GET_CODE (otherops[2]) == CONST_INT ++ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT + && (INTVAL(otherops[2]) <= -256 + || INTVAL(otherops[2]) >= 256)) + { +@@ -10099,7 +11912,7 @@ output_move_double (rtx *operands) + + case PLUS: + otherops[2] = XEXP (XEXP (operands[0], 0), 1); +- if (GET_CODE (otherops[2]) == CONST_INT) ++ if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD) + { + switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1))) + { +@@ -10145,7 +11958,7 @@ output_move_double (rtx *operands) + } + + /* Output a move, load or store for quad-word vectors in ARM registers. Only +- handles MEMs accepted by neon_vector_mem_operand with CORE=true. */ ++ handles MEMs accepted by neon_vector_mem_operand with TYPE=1. */ + + const char * + output_move_quad (rtx *operands) +@@ -10343,6 +12156,13 @@ output_move_neon (rtx *operands) + ops[1] = reg; + break; + ++ case PRE_DEC: ++ /* FIXME: We should be using vld1/vst1 here in BE mode? */ ++ template = "v%smdb%%?\t%%0!, %%h1"; ++ ops[0] = XEXP (addr, 0); ++ ops[1] = reg; ++ break; ++ + case POST_MODIFY: + /* FIXME: Not currently enabled in neon_vector_mem_operand. */ + gcc_unreachable (); +@@ -10700,25 +12520,14 @@ arm_compute_save_reg0_reg12_mask (void) + } + else + { +- /* In arm mode we handle r11 (FP) as a special case. */ +- unsigned last_reg = TARGET_ARM ? 10 : 11; +- + /* In the normal case we only need to save those registers + which are call saved and which are used by this function. */ +- for (reg = 0; reg <= last_reg; reg++) ++ for (reg = 0; reg <= 11; reg++) + if (df_regs_ever_live_p (reg) && ! call_used_regs[reg]) + save_reg_mask |= (1 << reg); + + /* Handle the frame pointer as a special case. */ +- if (! TARGET_APCS_FRAME +- && ! frame_pointer_needed +- && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM) +- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM]) +- save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM; +- else if (! TARGET_APCS_FRAME +- && ! frame_pointer_needed +- && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM) +- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM]) ++ if (frame_pointer_needed) + save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM; + + /* If we aren't loading the PIC register, +@@ -10753,8 +12562,27 @@ arm_compute_save_reg0_reg12_mask (void) + } + + ++/* Compute the number of bytes used to store the static chain register on the ++ stack, above the stack frame. We need to know this accurately to get the ++ alignment of the rest of the stack frame correct. */ ++ ++static int arm_compute_static_chain_stack_bytes (void) ++{ ++ unsigned long func_type = arm_current_func_type (); ++ int static_chain_stack_bytes = 0; ++ ++ if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM && ++ IS_NESTED (func_type) && ++ df_regs_ever_live_p (3) && current_function_pretend_args_size == 0) ++ static_chain_stack_bytes = 4; ++ ++ return static_chain_stack_bytes; ++} ++ ++ + /* Compute a bit mask of which registers need to be +- saved on the stack for the current function. */ ++ saved on the stack for the current function. ++ This is used by arm_get_frame_offsets, which may add extra registers. */ + + static unsigned long + arm_compute_save_reg_mask (void) +@@ -10769,7 +12597,7 @@ arm_compute_save_reg_mask (void) + + /* If we are creating a stack frame, then we must save the frame pointer, + IP (which will hold the old stack pointer), LR and the PC. */ +- if (frame_pointer_needed && TARGET_ARM) ++ if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM) + save_reg_mask |= + (1 << ARM_HARD_FRAME_POINTER_REGNUM) + | (1 << IP_REGNUM) +@@ -10804,7 +12632,9 @@ arm_compute_save_reg_mask (void) + + if (TARGET_REALLY_IWMMXT + && ((bit_count (save_reg_mask) +- + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0) ++ + ARM_NUM_INTS (current_function_pretend_args_size + ++ arm_compute_static_chain_stack_bytes()) ++ ) % 2) != 0) + { + /* The total number of registers that are going to be pushed + onto the stack is odd. We need to ensure that the stack +@@ -10882,13 +12712,33 @@ thumb1_compute_save_reg_mask (void) + reg = thumb_find_work_register (1 << LAST_LO_REGNUM); + /* Make sure the register returned by thumb_find_work_register is + not part of the return value. */ +- if (reg * UNITS_PER_WORD <= arm_size_return_regs ()) ++ if (reg * UNITS_PER_WORD <= (unsigned) arm_size_return_regs ()) + reg = LAST_LO_REGNUM; + + if (! call_used_regs[reg]) + mask |= 1 << reg; + } + ++ /* The 504 below is 8 bytes less than 512 because there are two possible ++ alignment words. We can't tell here if they will be present or not so we ++ have to play it safe and assume that they are. */ ++ if ((CALLER_INTERWORKING_SLOT_SIZE + ++ ROUND_UP_WORD (get_frame_size ()) + ++ current_function_outgoing_args_size) >= 504) ++ { ++ /* This is the same as the code in thumb1_expand_prologue() which ++ determines which register to use for stack decrement. */ ++ for (reg = LAST_ARG_REGNUM + 1; reg <= LAST_LO_REGNUM; reg++) ++ if (mask & (1 << reg)) ++ break; ++ ++ if (reg > LAST_LO_REGNUM) ++ { ++ /* Make sure we have a register available for stack decrement. */ ++ mask |= 1 << LAST_LO_REGNUM; ++ } ++ } ++ + return mask; + } + +@@ -10916,7 +12766,7 @@ arm_get_vfp_saved_size (void) + if (count > 0) + { + /* Workaround ARM10 VFPr1 bug. */ +- if (count == 2 && !arm_arch6) ++ if (count == 2 && !arm_arch6 && !low_irq_latency) + count++; + saved += count * 8; + } +@@ -10979,7 +12829,8 @@ output_return_instruction (rtx operand, + + return_used_this_function = 1; + +- live_regs_mask = arm_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ live_regs_mask = offsets->saved_regs_mask; + + if (live_regs_mask) + { +@@ -11041,7 +12892,6 @@ output_return_instruction (rtx operand, + { + unsigned HOST_WIDE_INT stack_adjust; + +- offsets = arm_get_frame_offsets (); + stack_adjust = offsets->outgoing_args - offsets->saved_regs; + gcc_assert (stack_adjust == 0 || stack_adjust == 4); + +@@ -11245,6 +13095,41 @@ arm_output_function_prologue (FILE *f, H + return_used_this_function = 0; + } + ++/* Generate to STREAM a code sequence that pops registers identified ++ in REGS_MASK from SP. SP is incremented as the result. ++*/ ++static void ++print_pop_reg_by_ldr (FILE *stream, int regs_mask, int rfe) ++{ ++ int reg; ++ ++ gcc_assert (! (regs_mask & (1 << SP_REGNUM))); ++ ++ for (reg = 0; reg < PC_REGNUM; ++reg) ++ if (regs_mask & (1 << reg)) ++ asm_fprintf (stream, "\tldr\t%r, [%r], #4\n", ++ reg, SP_REGNUM); ++ ++ if (regs_mask & (1 << PC_REGNUM)) ++ { ++ if (rfe) ++ /* When returning from exception, we need to ++ copy SPSR to CPSR. There are two ways to do ++ that: the ldm instruction with "^" suffix, ++ and movs instruction. The latter would ++ require that we load from stack to some ++ scratch register, and then move to PC. ++ Therefore, we'd need extra instruction and ++ have to make sure we actually have a spare ++ register. Using ldm with a single register ++ is simler. */ ++ asm_fprintf (stream, "\tldm\tsp!, {pc}^\n"); ++ else ++ asm_fprintf (stream, "\tldr\t%r, [%r], #4\n", ++ PC_REGNUM, SP_REGNUM); ++ } ++} ++ + const char * + arm_output_epilogue (rtx sibling) + { +@@ -11289,7 +13174,7 @@ arm_output_epilogue (rtx sibling) + gcc_assert (!current_function_calls_eh_return || really_return); + + offsets = arm_get_frame_offsets (); +- saved_regs_mask = arm_compute_save_reg_mask (); ++ saved_regs_mask = offsets->saved_regs_mask; + + if (TARGET_IWMMXT) + lrm_count = bit_count (saved_regs_mask); +@@ -11300,7 +13185,7 @@ arm_output_epilogue (rtx sibling) + if (saved_regs_mask & (1 << reg)) + floats_offset += 4; + +- if (frame_pointer_needed && TARGET_ARM) ++ if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM) + { + /* This variable is for the Virtual Frame Pointer, not VFP regs. */ + int vfp_offset = offsets->frame; +@@ -11446,32 +13331,88 @@ arm_output_epilogue (rtx sibling) + } + else + { ++ /* This branch is executed for ARM mode (non-apcs frames) and ++ Thumb-2 mode. Frame layout is essentially the same for those ++ cases, except that in ARM mode frame pointer points to the ++ first saved register, while in Thumb-2 mode the frame pointer points ++ to the last saved register. ++ ++ It is possible to make frame pointer point to last saved ++ register in both cases, and remove some conditionals below. ++ That means that fp setup in prologue would be just "mov fp, sp" ++ and sp restore in epilogue would be just "mov sp, fp", whereas ++ now we have to use add/sub in those cases. However, the value ++ of that would be marginal, as both mov and add/sub are 32-bit ++ in ARM mode, and it would require extra conditionals ++ in arm_expand_prologue to distingish ARM-apcs-frame case ++ (where frame pointer is required to point at first register) ++ and ARM-non-apcs-frame. Therefore, such change is postponed ++ until real need arise. */ + HOST_WIDE_INT amount; + int rfe; + /* Restore stack pointer if necessary. */ +- if (frame_pointer_needed) ++ if (TARGET_ARM && frame_pointer_needed) + { +- /* For Thumb-2 restore sp from the frame pointer. +- Operand restrictions mean we have to increment FP, then copy +- to SP. */ +- amount = offsets->locals_base - offsets->saved_regs; +- operands[0] = hard_frame_pointer_rtx; ++ operands[0] = stack_pointer_rtx; ++ operands[1] = hard_frame_pointer_rtx; ++ ++ operands[2] = GEN_INT (offsets->frame - offsets->saved_regs); ++ output_add_immediate (operands); + } + else + { +- operands[0] = stack_pointer_rtx; +- amount = offsets->outgoing_args - offsets->saved_regs; +- } ++ if (frame_pointer_needed) ++ { ++ /* For Thumb-2 restore sp from the frame pointer. ++ Operand restrictions mean we have to incrememnt FP, then copy ++ to SP. */ ++ amount = offsets->locals_base - offsets->saved_regs; ++ operands[0] = hard_frame_pointer_rtx; ++ } ++ else ++ { ++ unsigned long count; ++ operands[0] = stack_pointer_rtx; ++ amount = offsets->outgoing_args - offsets->saved_regs; ++ /* Pop call clobbered registers if it avoids a ++ separate stack adjustment. */ ++ count = offsets->saved_regs - offsets->saved_args; ++ if (optimize_size ++ && count != 0 ++ && !current_function_calls_eh_return ++ && bit_count (saved_regs_mask) * 4 == count ++ && !IS_INTERRUPT (func_type) ++ && !cfun->tail_call_emit) ++ { ++ unsigned long mask; ++ mask = (1 << (arm_size_return_regs () / 4)) - 1; ++ mask ^= 0xf; ++ mask &= ~saved_regs_mask; ++ reg = 0; ++ while (bit_count (mask) * 4 > amount) ++ { ++ while ((mask & (1 << reg)) == 0) ++ reg++; ++ mask &= ~(1 << reg); ++ } ++ if (bit_count (mask) * 4 == amount) ++ { ++ amount = 0; ++ saved_regs_mask |= mask; ++ } ++ } ++ } + +- if (amount) +- { +- operands[1] = operands[0]; +- operands[2] = GEN_INT (amount); +- output_add_immediate (operands); ++ if (amount) ++ { ++ operands[1] = operands[0]; ++ operands[2] = GEN_INT (amount); ++ output_add_immediate (operands); ++ } ++ if (frame_pointer_needed) ++ asm_fprintf (f, "\tmov\t%r, %r\n", ++ SP_REGNUM, HARD_FRAME_POINTER_REGNUM); + } +- if (frame_pointer_needed) +- asm_fprintf (f, "\tmov\t%r, %r\n", +- SP_REGNUM, HARD_FRAME_POINTER_REGNUM); + + if (arm_fpu_arch == FPUTYPE_FPA_EMU2) + { +@@ -11557,22 +13498,19 @@ arm_output_epilogue (rtx sibling) + to load use the LDR instruction - it is faster. For Thumb-2 + always use pop and the assembler will pick the best instruction.*/ + if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM) +- && !IS_INTERRUPT(func_type)) ++ && !IS_INTERRUPT (func_type)) + { + asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM); + } + else if (saved_regs_mask) + { +- if (saved_regs_mask & (1 << SP_REGNUM)) +- /* Note - write back to the stack register is not enabled +- (i.e. "ldmfd sp!..."). We know that the stack pointer is +- in the list of registers and if we add writeback the +- instruction becomes UNPREDICTABLE. */ +- print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, +- rfe); +- else if (TARGET_ARM) +- print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask, +- rfe); ++ gcc_assert ( ! (saved_regs_mask & (1 << SP_REGNUM))); ++ if (TARGET_ARM) ++ if (low_irq_latency) ++ print_pop_reg_by_ldr (f, saved_regs_mask, rfe); ++ else ++ print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask, ++ rfe); + else + print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0); + } +@@ -11693,6 +13631,32 @@ emit_multi_reg_push (unsigned long mask) + + gcc_assert (num_regs && num_regs <= 16); + ++ if (low_irq_latency) ++ { ++ rtx insn = 0; ++ ++ /* Emit a series of ldr instructions rather rather than a single ldm. */ ++ /* TODO: Use ldrd where possible. */ ++ gcc_assert (! (mask & (1 << SP_REGNUM))); ++ ++ for (i = LAST_ARM_REGNUM; i >= 0; --i) ++ { ++ if (mask & (1 << i)) ++ ++ { ++ rtx reg, where, mem; ++ ++ reg = gen_rtx_REG (SImode, i); ++ where = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx); ++ mem = gen_rtx_MEM (SImode, where); ++ insn = emit_move_insn (mem, reg); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ } ++ } ++ ++ return insn; ++ } ++ + /* We don't record the PC in the dwarf frame information. */ + num_dwarf_regs = num_regs; + if (mask & (1 << PC_REGNUM)) +@@ -11930,7 +13894,8 @@ thumb_force_lr_save (void) + + + /* Calculate stack offsets. These are used to calculate register elimination +- offsets and in prologue/epilogue code. */ ++ offsets and in prologue/epilogue code. Also calculates which registers ++ should be saved. */ + + static arm_stack_offsets * + arm_get_frame_offsets (void) +@@ -11939,7 +13904,9 @@ arm_get_frame_offsets (void) + unsigned long func_type; + int leaf; + int saved; ++ int core_saved; + HOST_WIDE_INT frame_size; ++ int i; + + offsets = &cfun->machine->stack_offsets; + +@@ -11966,13 +13933,16 @@ arm_get_frame_offsets (void) + offsets->saved_args = current_function_pretend_args_size; + + /* In Thumb mode this is incorrect, but never used. */ +- offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0); ++ offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0) + ++ arm_compute_static_chain_stack_bytes(); + + if (TARGET_32BIT) + { + unsigned int regno; + +- saved = bit_count (arm_compute_save_reg_mask ()) * 4; ++ offsets->saved_regs_mask = arm_compute_save_reg_mask (); ++ core_saved = bit_count (offsets->saved_regs_mask) * 4; ++ saved = core_saved; + + /* We know that SP will be doubleword aligned on entry, and we must + preserve that condition at any subroutine call. We also require the +@@ -12003,13 +13973,16 @@ arm_get_frame_offsets (void) + } + else /* TARGET_THUMB1 */ + { +- saved = bit_count (thumb1_compute_save_reg_mask ()) * 4; ++ offsets->saved_regs_mask = thumb1_compute_save_reg_mask (); ++ core_saved = bit_count (offsets->saved_regs_mask) * 4; ++ saved = core_saved; + if (TARGET_BACKTRACE) + saved += 16; + } + + /* Saved registers include the stack frame. */ +- offsets->saved_regs = offsets->saved_args + saved; ++ offsets->saved_regs = offsets->saved_args + saved + ++ arm_compute_static_chain_stack_bytes(); + offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE; + /* A leaf function does not need any stack alignment if it has nothing + on the stack. */ +@@ -12023,7 +13996,39 @@ arm_get_frame_offsets (void) + /* Ensure SFP has the correct alignment. */ + if (ARM_DOUBLEWORD_ALIGN + && (offsets->soft_frame & 7)) +- offsets->soft_frame += 4; ++ { ++ offsets->soft_frame += 4; ++ /* Try to align stack by pushing an extra reg. Don't bother doing this ++ when there is a stack frame as the alignment will be rolled into ++ the normal stack adjustment. */ ++ if (frame_size + current_function_outgoing_args_size == 0) ++ { ++ int reg = -1; ++ ++ for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++) ++ { ++ if ((offsets->saved_regs_mask & (1 << i)) == 0) ++ { ++ reg = i; ++ break; ++ } ++ } ++ ++ if (reg == -1 && arm_size_return_regs () <= 12 ++ && !cfun->tail_call_emit) ++ { ++ /* Push/pop an argument register (r3) if all callee saved ++ registers are already being pushed. */ ++ reg = 3; ++ } ++ ++ if (reg != -1) ++ { ++ offsets->saved_regs += 4; ++ offsets->saved_regs_mask |= (1 << reg); ++ } ++ } ++ } + + offsets->locals_base = offsets->soft_frame + frame_size; + offsets->outgoing_args = (offsets->locals_base +@@ -12069,14 +14074,9 @@ arm_compute_initial_elimination_offset ( + return offsets->soft_frame - offsets->saved_args; + + case ARM_HARD_FRAME_POINTER_REGNUM: +- /* If there is no stack frame then the hard +- frame pointer and the arg pointer coincide. */ +- if (offsets->frame == offsets->saved_regs) +- return 0; +- /* FIXME: Not sure about this. Maybe we should always return 0 ? */ +- return (frame_pointer_needed +- && cfun->static_chain_decl != NULL +- && ! cfun->machine->uses_anonymous_args) ? 4 : 0; ++ /* This is only non-zero in the case where the static chain register ++ is stored above the frame. */ ++ return offsets->frame - offsets->saved_args - 4; + + case STACK_POINTER_REGNUM: + /* If nothing has been pushed on the stack at all +@@ -12229,9 +14229,20 @@ thumb_set_frame_pointer (arm_stack_offse + else + { + emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount))); +- insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, +- hard_frame_pointer_rtx, +- stack_pointer_rtx)); ++ /* Thumb-2 RTL patterns expect sp as the first input. Thumb-1 ++ expects the first two operands to be the same. */ ++ if (TARGET_THUMB2) ++ { ++ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ++ stack_pointer_rtx, ++ hard_frame_pointer_rtx)); ++ } ++ else ++ { ++ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ++ hard_frame_pointer_rtx, ++ stack_pointer_rtx)); ++ } + dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx, + plus_constant (stack_pointer_rtx, amount)); + RTX_FRAME_RELATED_P (dwarf) = 1; +@@ -12268,7 +14279,8 @@ arm_expand_prologue (void) + args_to_push = current_function_pretend_args_size; + + /* Compute which register we will have to save onto the stack. */ +- live_regs_mask = arm_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ live_regs_mask = offsets->saved_regs_mask; + + ip_rtx = gen_rtx_REG (SImode, IP_REGNUM); + +@@ -12292,7 +14304,9 @@ arm_expand_prologue (void) + + r0 = gen_rtx_REG (SImode, 0); + r1 = gen_rtx_REG (SImode, 1); +- dwarf = gen_rtx_UNSPEC (SImode, NULL_RTVEC, UNSPEC_STACK_ALIGN); ++ /* Use a real rtvec rather than NULL_RTVEC so the rest of the ++ compiler won't choke. */ ++ dwarf = gen_rtx_UNSPEC (SImode, rtvec_alloc (0), UNSPEC_STACK_ALIGN); + dwarf = gen_rtx_SET (VOIDmode, r0, dwarf); + insn = gen_movsi (r0, stack_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; +@@ -12303,7 +14317,10 @@ arm_expand_prologue (void) + emit_insn (gen_movsi (stack_pointer_rtx, r1)); + } + +- if (frame_pointer_needed && TARGET_ARM) ++ /* For APCS frames, if IP register is clobbered ++ when creating frame, save that register in a special ++ way. */ ++ if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM) + { + if (IS_INTERRUPT (func_type)) + { +@@ -12347,6 +14364,9 @@ arm_expand_prologue (void) + insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx); + else if (args_to_push == 0) + { ++ gcc_assert(arm_compute_static_chain_stack_bytes() == 4); ++ saved_regs += 4; ++ + rtx dwarf; + + insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx); +@@ -12402,13 +14422,13 @@ arm_expand_prologue (void) + } + + /* If this is an interrupt service routine, and the link register +- is going to be pushed, and we are not creating a stack frame, +- (which would involve an extra push of IP and a pop in the epilogue) ++ is going to be pushed, and we're not generating extra ++ push of IP (needed when frame is needed and frame layout if apcs), + subtracting four from LR now will mean that the function return + can be done with a single instruction. */ + if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ) + && (live_regs_mask & (1 << LR_REGNUM)) != 0 +- && ! frame_pointer_needed ++ && !(frame_pointer_needed && TARGET_APCS_FRAME) + && TARGET_ARM) + { + rtx lr = gen_rtx_REG (SImode, LR_REGNUM); +@@ -12418,8 +14438,28 @@ arm_expand_prologue (void) + + if (live_regs_mask) + { +- insn = emit_multi_reg_push (live_regs_mask); + saved_regs += bit_count (live_regs_mask) * 4; ++ if (optimize_size && !frame_pointer_needed ++ && saved_regs == offsets->saved_regs - offsets->saved_args) ++ { ++ /* If no coprocessor registers are being pushed and we don't have ++ to worry about a frame pointer then push extra registers to ++ create the stack frame. This is done is a way that does not ++ alter teh frame layout, so is independent of the epilogue. */ ++ int n; ++ int frame; ++ n = 0; ++ while (n < 8 && (live_regs_mask & (1 << n)) == 0) ++ n++; ++ frame = offsets->outgoing_args - (offsets->saved_args + saved_regs); ++ if (frame && n * 4 >= frame) ++ { ++ n = frame / 4; ++ live_regs_mask |= (1 << n) - 1; ++ saved_regs += frame; ++ } ++ } ++ insn = emit_multi_reg_push (live_regs_mask); + RTX_FRAME_RELATED_P (insn) = 1; + } + +@@ -12429,6 +14469,7 @@ arm_expand_prologue (void) + if (frame_pointer_needed && TARGET_ARM) + { + /* Create the new frame pointer. */ ++ if (TARGET_APCS_FRAME) + { + insn = GEN_INT (-(4 + args_to_push + fp_offset)); + insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn)); +@@ -12450,9 +14491,15 @@ arm_expand_prologue (void) + emit_insn (gen_prologue_use (ip_rtx)); + } + } ++ else ++ { ++ insn = GEN_INT (saved_regs - 4); ++ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ++ stack_pointer_rtx, insn)); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ } + } + +- offsets = arm_get_frame_offsets (); + if (offsets->outgoing_args != offsets->saved_args + saved_regs) + { + /* This add can produce multiple insns for a large constant, so we +@@ -12633,10 +14680,21 @@ arm_print_operand (FILE *stream, rtx x, + } + return; + +- /* An integer without a preceding # sign. */ ++ /* An integer or symbol address without a preceding # sign. */ + case 'c': +- gcc_assert (GET_CODE (x) == CONST_INT); +- fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); ++ switch (GET_CODE (x)) ++ { ++ case CONST_INT: ++ fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); ++ break; ++ ++ case SYMBOL_REF: ++ output_addr_const (stream, x); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } + return; + + case 'B': +@@ -12693,7 +14751,11 @@ arm_print_operand (FILE *stream, rtx x, + { + fprintf (stream, ", %s ", shift); + if (val == -1) +- arm_print_operand (stream, XEXP (x, 1), 0); ++ { ++ arm_print_operand (stream, XEXP (x, 1), 0); ++ if (janus2_code) ++ fprintf(stream, "\n\tnop"); ++ } + else + fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val); + } +@@ -13031,6 +15093,49 @@ arm_print_operand (FILE *stream, rtx x, + } + return; + ++ /* Memory operand for vld1/vst1 instruction. */ ++ case 'A': ++ { ++ rtx addr; ++ bool postinc = FALSE; ++ gcc_assert (GET_CODE (x) == MEM); ++ addr = XEXP (x, 0); ++ if (GET_CODE (addr) == POST_INC) ++ { ++ postinc = 1; ++ addr = XEXP (addr, 0); ++ } ++ asm_fprintf (stream, "[%r]", REGNO (addr)); ++ if (postinc) ++ fputs("!", stream); ++ } ++ return; ++ ++ /* Register specifier for vld1.16/vst1.16. Translate the S register ++ number into a D register number and element index. */ ++ case 'z': ++ { ++ int mode = GET_MODE (x); ++ int regno; ++ ++ if (GET_MODE_SIZE (mode) != 2 || GET_CODE (x) != REG) ++ { ++ output_operand_lossage ("invalid operand for code '%c'", code); ++ return; ++ } ++ ++ regno = REGNO (x); ++ if (!VFP_REGNO_OK_FOR_SINGLE (regno)) ++ { ++ output_operand_lossage ("invalid operand for code '%c'", code); ++ return; ++ } ++ ++ regno = regno - FIRST_VFP_REGNUM; ++ fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0)); ++ } ++ return; ++ + default: + if (x == 0) + { +@@ -13064,6 +15169,12 @@ arm_print_operand (FILE *stream, rtx x, + default: + gcc_assert (GET_CODE (x) != NEG); + fputc ('#', stream); ++ if (GET_CODE (x) == HIGH) ++ { ++ fputs (":lower16:", stream); ++ x = XEXP (x, 0); ++ } ++ + output_addr_const (stream, x); + break; + } +@@ -13104,28 +15215,16 @@ arm_assemble_integer (rtx x, unsigned in + if (arm_vector_mode_supported_p (mode)) + { + int i, units; +- unsigned int invmask = 0, parts_per_word; + + gcc_assert (GET_CODE (x) == CONST_VECTOR); + + units = CONST_VECTOR_NUNITS (x); + size = GET_MODE_SIZE (GET_MODE_INNER (mode)); + +- /* For big-endian Neon vectors, we must permute the vector to the form +- which, when loaded by a VLDR or VLDM instruction, will give a vector +- with the elements in the right order. */ +- if (TARGET_NEON && WORDS_BIG_ENDIAN) +- { +- parts_per_word = UNITS_PER_WORD / size; +- /* FIXME: This might be wrong for 64-bit vector elements, but we don't +- support those anywhere yet. */ +- invmask = (parts_per_word == 0) ? 0 : (1 << (parts_per_word - 1)) - 1; +- } +- + if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT) + for (i = 0; i < units; i++) + { +- rtx elt = CONST_VECTOR_ELT (x, i ^ invmask); ++ rtx elt = CONST_VECTOR_ELT (x, i); + assemble_integer + (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1); + } +@@ -13467,6 +15566,10 @@ arm_final_prescan_insn (rtx insn) + first insn after the following code_label if REVERSE is true. */ + rtx start_insn = insn; + ++ /* Don't do this if we're not considering conditional execution. */ ++ if (TARGET_NO_SINGLE_COND_EXEC) ++ return; ++ + /* If in state 4, check if the target branch is reached, in order to + change back to state 0. */ + if (arm_ccfsm_state == 4) +@@ -13840,6 +15943,12 @@ arm_hard_regno_mode_ok (unsigned int reg + if (mode == DFmode) + return VFP_REGNO_OK_FOR_DOUBLE (regno); + ++ /* VFP registers can hold HFmode values, but there is no point in ++ putting them there unless we have the NEON extensions for ++ loading/storing them, too. */ ++ if (mode == HFmode) ++ return TARGET_NEON_FP16 && VFP_REGNO_OK_FOR_SINGLE (regno); ++ + if (TARGET_NEON) + return (VALID_NEON_DREG_MODE (mode) && VFP_REGNO_OK_FOR_DOUBLE (regno)) + || (VALID_NEON_QREG_MODE (mode) +@@ -13859,16 +15968,16 @@ arm_hard_regno_mode_ok (unsigned int reg + return mode == SImode; + + if (IS_IWMMXT_REGNUM (regno)) +- return VALID_IWMMXT_REG_MODE (mode); ++ return VALID_IWMMXT_REG_MODE (mode) && mode != SImode; + } + +- /* We allow any value to be stored in the general registers. ++ /* We allow almost any value to be stored in the general registers. + Restrict doubleword quantities to even register pairs so that we can +- use ldrd. Do not allow Neon structure opaque modes in general registers; +- they would use too many. */ ++ use ldrd. Do not allow very large Neon structure opaque modes in ++ general registers; they would use too many. */ + if (regno <= LAST_ARM_REGNUM) + return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0) +- && !VALID_NEON_STRUCT_MODE (mode); ++ && ARM_NUM_REGS (mode) <= 4; + + if (regno == FRAME_POINTER_REGNUM + || regno == ARG_POINTER_REGNUM) +@@ -14913,6 +17022,24 @@ arm_init_neon_builtins (void) + TYPE_PRECISION (neon_float_type_node) = FLOAT_TYPE_SIZE; + layout_type (neon_float_type_node); + ++ /* Define typedefs which exactly correspond to the modes we are basing vector ++ types on. If you change these names you'll need to change ++ the table used by arm_mangle_type too. */ ++ (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node, ++ "__builtin_neon_qi"); ++ (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node, ++ "__builtin_neon_hi"); ++ (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node, ++ "__builtin_neon_si"); ++ (*lang_hooks.types.register_builtin_type) (neon_float_type_node, ++ "__builtin_neon_sf"); ++ (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node, ++ "__builtin_neon_di"); ++ (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node, ++ "__builtin_neon_poly8"); ++ (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node, ++ "__builtin_neon_poly16"); ++ + intQI_pointer_node = build_pointer_type (neon_intQI_type_node); + intHI_pointer_node = build_pointer_type (neon_intHI_type_node); + intSI_pointer_node = build_pointer_type (neon_intSI_type_node); +@@ -14965,12 +17092,32 @@ arm_init_neon_builtins (void) + intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode)); + intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode)); + ++ (*lang_hooks.types.register_builtin_type) (intUQI_type_node, ++ "__builtin_neon_uqi"); ++ (*lang_hooks.types.register_builtin_type) (intUHI_type_node, ++ "__builtin_neon_uhi"); ++ (*lang_hooks.types.register_builtin_type) (intUSI_type_node, ++ "__builtin_neon_usi"); ++ (*lang_hooks.types.register_builtin_type) (intUDI_type_node, ++ "__builtin_neon_udi"); ++ + /* Opaque integer types for structures of vectors. */ + intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode)); + intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode)); + intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode)); + intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode)); + ++ (*lang_hooks.types.register_builtin_type) (intTI_type_node, ++ "__builtin_neon_ti"); ++ (*lang_hooks.types.register_builtin_type) (intEI_type_node, ++ "__builtin_neon_ei"); ++ (*lang_hooks.types.register_builtin_type) (intOI_type_node, ++ "__builtin_neon_oi"); ++ (*lang_hooks.types.register_builtin_type) (intCI_type_node, ++ "__builtin_neon_ci"); ++ (*lang_hooks.types.register_builtin_type) (intXI_type_node, ++ "__builtin_neon_xi"); ++ + /* Pointers to vector types. */ + V8QI_pointer_node = build_pointer_type (V8QI_type_node); + V4HI_pointer_node = build_pointer_type (V4HI_type_node); +@@ -15014,44 +17161,6 @@ arm_init_neon_builtins (void) + build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node, + V2DI_type_node, NULL); + +- /* Define typedefs which exactly correspond to the modes we are basing vector +- types on. If you change these names you'll need to change +- the table used by arm_mangle_type too. */ +- (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node, +- "__builtin_neon_qi"); +- (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node, +- "__builtin_neon_hi"); +- (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node, +- "__builtin_neon_si"); +- (*lang_hooks.types.register_builtin_type) (neon_float_type_node, +- "__builtin_neon_sf"); +- (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node, +- "__builtin_neon_di"); +- +- (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node, +- "__builtin_neon_poly8"); +- (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node, +- "__builtin_neon_poly16"); +- (*lang_hooks.types.register_builtin_type) (intUQI_type_node, +- "__builtin_neon_uqi"); +- (*lang_hooks.types.register_builtin_type) (intUHI_type_node, +- "__builtin_neon_uhi"); +- (*lang_hooks.types.register_builtin_type) (intUSI_type_node, +- "__builtin_neon_usi"); +- (*lang_hooks.types.register_builtin_type) (intUDI_type_node, +- "__builtin_neon_udi"); +- +- (*lang_hooks.types.register_builtin_type) (intTI_type_node, +- "__builtin_neon_ti"); +- (*lang_hooks.types.register_builtin_type) (intEI_type_node, +- "__builtin_neon_ei"); +- (*lang_hooks.types.register_builtin_type) (intOI_type_node, +- "__builtin_neon_oi"); +- (*lang_hooks.types.register_builtin_type) (intCI_type_node, +- "__builtin_neon_ci"); +- (*lang_hooks.types.register_builtin_type) (intXI_type_node, +- "__builtin_neon_xi"); +- + dreg_types[0] = V8QI_type_node; + dreg_types[1] = V4HI_type_node; + dreg_types[2] = V2SI_type_node; +@@ -15325,6 +17434,15 @@ arm_init_neon_builtins (void) + } + + static void ++arm_init_fp16_builtins (void) ++{ ++ tree fp16_type = make_node (REAL_TYPE); ++ TYPE_PRECISION (fp16_type) = 16; ++ layout_type (fp16_type); ++ (*lang_hooks.types.register_builtin_type) (fp16_type, "__fp16"); ++} ++ ++static void + arm_init_builtins (void) + { + arm_init_tls_builtins (); +@@ -15334,6 +17452,52 @@ arm_init_builtins (void) + + if (TARGET_NEON) + arm_init_neon_builtins (); ++ ++ if (arm_fp16_format) ++ arm_init_fp16_builtins (); ++} ++ ++/* Implement TARGET_INVALID_PARAMETER_TYPE. */ ++ ++static const char * ++arm_invalid_parameter_type (const_tree t) ++{ ++ if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16) ++ return N_("function parameters cannot have __fp16 type"); ++ return NULL; ++} ++ ++/* Implement TARGET_INVALID_PARAMETER_TYPE. */ ++ ++static const char * ++arm_invalid_return_type (const_tree t) ++{ ++ if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16) ++ return N_("functions cannot return __fp16 type"); ++ return NULL; ++} ++ ++/* Implement TARGET_PROMOTED_TYPE. */ ++ ++static tree ++arm_promoted_type (const_tree t) ++{ ++ if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16) ++ return float_type_node; ++ return NULL_TREE; ++} ++ ++/* Implement TARGET_CONVERT_TO_TYPE. */ ++static tree ++arm_convert_to_type (tree type, tree expr) ++{ ++ tree fromtype = TREE_TYPE (expr); ++ if (!SCALAR_FLOAT_TYPE_P (fromtype) || !SCALAR_FLOAT_TYPE_P (type)) ++ return NULL_TREE; ++ if ((TYPE_PRECISION (fromtype) == 16 && TYPE_PRECISION (type) > 32) ++ || (TYPE_PRECISION (type) == 16 && TYPE_PRECISION (fromtype) > 32)) ++ return convert (type, convert (float_type_node, expr)); ++ return NULL_TREE; + } + + /* Errors in the source file can cause expand_expr to return const0_rtx +@@ -16514,6 +18678,7 @@ is_called_in_ARM_mode (tree func) + const char * + thumb_unexpanded_epilogue (void) + { ++ arm_stack_offsets *offsets; + int regno; + unsigned long live_regs_mask = 0; + int high_regs_pushed = 0; +@@ -16526,7 +18691,8 @@ thumb_unexpanded_epilogue (void) + if (IS_NAKED (arm_current_func_type ())) + return ""; + +- live_regs_mask = thumb1_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ live_regs_mask = offsets->saved_regs_mask; + high_regs_pushed = bit_count (live_regs_mask & 0x0f00); + + /* If we can deduce the registers used from the function's return value. +@@ -16788,7 +18954,8 @@ thumb1_expand_prologue (void) + return; + } + +- live_regs_mask = thumb1_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ live_regs_mask = offsets->saved_regs_mask; + /* Load the pic register before setting the frame pointer, + so we can use r7 as a temporary work register. */ + if (flag_pic && arm_pic_register != INVALID_REGNUM) +@@ -16798,7 +18965,6 @@ thumb1_expand_prologue (void) + emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM), + stack_pointer_rtx); + +- offsets = arm_get_frame_offsets (); + amount = offsets->outgoing_args - offsets->saved_regs; + if (amount) + { +@@ -16827,62 +18993,25 @@ thumb1_expand_prologue (void) + been pushed at the start of the prologue and so we can corrupt + it now. */ + for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++) +- if (live_regs_mask & (1 << regno) +- && !(frame_pointer_needed +- && (regno == THUMB_HARD_FRAME_POINTER_REGNUM))) ++ if (live_regs_mask & (1 << regno)) + break; + +- if (regno > LAST_LO_REGNUM) /* Very unlikely. */ +- { +- rtx spare = gen_rtx_REG (SImode, IP_REGNUM); +- +- /* Choose an arbitrary, non-argument low register. */ +- reg = gen_rtx_REG (SImode, LAST_LO_REGNUM); +- +- /* Save it by copying it into a high, scratch register. */ +- emit_insn (gen_movsi (spare, reg)); +- /* Add a USE to stop propagate_one_insn() from barfing. */ +- emit_insn (gen_prologue_use (spare)); ++ gcc_assert(regno <= LAST_LO_REGNUM); + +- /* Decrement the stack. */ +- emit_insn (gen_movsi (reg, GEN_INT (- amount))); +- insn = emit_insn (gen_addsi3 (stack_pointer_rtx, +- stack_pointer_rtx, reg)); +- RTX_FRAME_RELATED_P (insn) = 1; +- dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, +- plus_constant (stack_pointer_rtx, +- -amount)); +- RTX_FRAME_RELATED_P (dwarf) = 1; +- REG_NOTES (insn) +- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, +- REG_NOTES (insn)); +- +- /* Restore the low register's original value. */ +- emit_insn (gen_movsi (reg, spare)); +- +- /* Emit a USE of the restored scratch register, so that flow +- analysis will not consider the restore redundant. The +- register won't be used again in this function and isn't +- restored by the epilogue. */ +- emit_insn (gen_prologue_use (reg)); +- } +- else +- { +- reg = gen_rtx_REG (SImode, regno); ++ reg = gen_rtx_REG (SImode, regno); + +- emit_insn (gen_movsi (reg, GEN_INT (- amount))); ++ emit_insn (gen_movsi (reg, GEN_INT (- amount))); + +- insn = emit_insn (gen_addsi3 (stack_pointer_rtx, +- stack_pointer_rtx, reg)); +- RTX_FRAME_RELATED_P (insn) = 1; +- dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, +- plus_constant (stack_pointer_rtx, +- -amount)); +- RTX_FRAME_RELATED_P (dwarf) = 1; +- REG_NOTES (insn) +- = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, +- REG_NOTES (insn)); +- } ++ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, ++ stack_pointer_rtx, reg)); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, ++ plus_constant (stack_pointer_rtx, ++ -amount)); ++ RTX_FRAME_RELATED_P (dwarf) = 1; ++ REG_NOTES (insn) ++ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, ++ REG_NOTES (insn)); + } + } + +@@ -16960,6 +19089,7 @@ thumb1_expand_epilogue (void) + static void + thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED) + { ++ arm_stack_offsets *offsets; + unsigned long live_regs_mask = 0; + unsigned long l_mask; + unsigned high_regs_pushed = 0; +@@ -17044,7 +19174,8 @@ thumb1_output_function_prologue (FILE *f + } + + /* Get the registers we are going to push. */ +- live_regs_mask = thumb1_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ live_regs_mask = offsets->saved_regs_mask; + /* Extract a mask of the ones we can give to the Thumb's push instruction. */ + l_mask = live_regs_mask & 0x40ff; + /* Then count how many other high registers will need to be pushed. */ +@@ -17551,14 +19682,22 @@ arm_file_start (void) + fpu_name = "vfp"; + set_float_abi_attributes = 1; + break; ++ case FPUTYPE_VFP3D16: ++ fpu_name = "vfpv3-d16"; ++ set_float_abi_attributes = 1; ++ break; + case FPUTYPE_VFP3: +- fpu_name = "vfp3"; ++ fpu_name = "vfpv3"; + set_float_abi_attributes = 1; + break; + case FPUTYPE_NEON: + fpu_name = "neon"; + set_float_abi_attributes = 1; + break; ++ case FPUTYPE_NEON_FP16: ++ fpu_name = "neon-fp16"; ++ set_float_abi_attributes = 1; ++ break; + default: + abort(); + } +@@ -17612,6 +19751,11 @@ arm_file_start (void) + val = 6; + asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val); + ++ /* Tag_ABI_FP_16bit_format. */ ++ if (arm_fp16_format) ++ asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n", ++ (int)arm_fp16_format); ++ + if (arm_lang_output_object_attributes_hook) + arm_lang_output_object_attributes_hook(); + } +@@ -17694,12 +19838,23 @@ arm_output_mi_thunk (FILE *file, tree th + ? 1 : 0); + if (mi_delta < 0) + mi_delta = - mi_delta; +- /* When generating 16-bit thumb code, thunks are entered in arm mode. */ ++ + if (TARGET_THUMB1) + { + int labelno = thunk_label++; + ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno); +- fputs ("\tldr\tr12, ", file); ++ /* Thunks are entered in arm mode when avaiable. */ ++ if (TARGET_THUMB1_ONLY) ++ { ++ /* push r3 so we can use it as a temporary. */ ++ /* TODO: Omit this save if r3 is not used. */ ++ fputs ("\tpush {r3}\n", file); ++ fputs ("\tldr\tr3, ", file); ++ } ++ else ++ { ++ fputs ("\tldr\tr12, ", file); ++ } + assemble_name (file, label); + fputc ('\n', file); + if (flag_pic) +@@ -17713,29 +19868,63 @@ arm_output_mi_thunk (FILE *file, tree th + + Note that we have "+ 1" because some versions of GNU ld + don't set the low bit of the result for R_ARM_REL32 +- relocations against thumb function symbols. */ ++ relocations against thumb function symbols. ++ On ARMV6M this is +4, not +8. */ + ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno); + assemble_name (file, labelpc); + fputs (":\n", file); +- fputs ("\tadd\tr12, pc, r12\n", file); ++ if (TARGET_THUMB1_ONLY) ++ { ++ /* This is 2 insns after the start of the thunk, so we know it ++ is 4-byte aligned. */ ++ fputs ("\tadd\tr3, pc, r3\n", file); ++ fputs ("\tmov r12, r3\n", file); ++ } ++ else ++ fputs ("\tadd\tr12, pc, r12\n", file); + } ++ else if (TARGET_THUMB1_ONLY) ++ fputs ("\tmov r12, r3\n", file); + } +- /* TODO: Use movw/movt for large constants when available. */ +- while (mi_delta != 0) ++ if (TARGET_THUMB1_ONLY) + { +- if ((mi_delta & (3 << shift)) == 0) +- shift += 2; +- else +- { +- asm_fprintf (file, "\t%s\t%r, %r, #%d\n", +- mi_op, this_regno, this_regno, +- mi_delta & (0xff << shift)); +- mi_delta &= ~(0xff << shift); +- shift += 8; +- } ++ if (mi_delta > 255) ++ { ++ fputs ("\tldr\tr3, ", file); ++ assemble_name (file, label); ++ fputs ("+4\n", file); ++ asm_fprintf (file, "\t%s\t%r, %r, r3\n", ++ mi_op, this_regno, this_regno); ++ } ++ else if (mi_delta != 0) ++ { ++ asm_fprintf (file, "\t%s\t%r, %r, #%d\n", ++ mi_op, this_regno, this_regno, ++ mi_delta); ++ } ++ } ++ else ++ { ++ /* TODO: Use movw/movt for large constants when available. */ ++ while (mi_delta != 0) ++ { ++ if ((mi_delta & (3 << shift)) == 0) ++ shift += 2; ++ else ++ { ++ asm_fprintf (file, "\t%s\t%r, %r, #%d\n", ++ mi_op, this_regno, this_regno, ++ mi_delta & (0xff << shift)); ++ mi_delta &= ~(0xff << shift); ++ shift += 8; ++ } ++ } + } + if (TARGET_THUMB1) + { ++ if (TARGET_THUMB1_ONLY) ++ fputs ("\tpop\t{r3}\n", file); ++ + fprintf (file, "\tbx\tr12\n"); + ASM_OUTPUT_ALIGN (file, 2); + assemble_name (file, label); +@@ -17754,6 +19943,9 @@ arm_output_mi_thunk (FILE *file, tree th + else + /* Output ".word .LTHUNKn". */ + assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1); ++ ++ if (TARGET_THUMB1_ONLY && mi_delta > 255) ++ assemble_integer (GEN_INT(mi_delta), 4, BITS_PER_WORD, 1); + } + else + { +@@ -17793,6 +19985,23 @@ arm_emit_vector_const (FILE *file, rtx x + return 1; + } + ++/* Emit a fp16 constant appropriately padded to occupy a 4-byte word. ++ HFmode constant pool entries are actually loaded with ldr. */ ++void ++arm_emit_fp16_const (rtx c) ++{ ++ REAL_VALUE_TYPE r; ++ long bits; ++ ++ REAL_VALUE_FROM_CONST_DOUBLE (r, c); ++ bits = real_to_target (NULL, &r, HFmode); ++ if (WORDS_BIG_ENDIAN) ++ assemble_zeros (2); ++ assemble_integer (GEN_INT (bits), 2, BITS_PER_WORD, 1); ++ if (!WORDS_BIG_ENDIAN) ++ assemble_zeros (2); ++} ++ + const char * + arm_output_load_gr (rtx *operands) + { +@@ -17830,19 +20039,24 @@ arm_output_load_gr (rtx *operands) + that way. */ + + static void +-arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum, ++arm_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, + enum machine_mode mode, + tree type, + int *pretend_size, + int second_time ATTRIBUTE_UNUSED) + { +- int nregs = cum->nregs; +- if (nregs & 1 +- && ARM_DOUBLEWORD_ALIGN +- && arm_needs_doubleword_align (mode, type)) +- nregs++; +- ++ int nregs; ++ + cfun->machine->uses_anonymous_args = 1; ++ if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) ++ { ++ nregs = pcum->aapcs_ncrn; ++ if ((nregs & 1) && arm_needs_doubleword_align (mode, type)) ++ nregs++; ++ } ++ else ++ nregs = pcum->nregs; ++ + if (nregs < NUM_ARG_REGS) + *pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD; + } +@@ -17956,8 +20170,14 @@ arm_no_early_mul_dep (rtx producer, rtx + op = XVECEXP (op, 0, 0); + op = XEXP (op, 1); + +- return (GET_CODE (op) == PLUS +- && !reg_overlap_mentioned_p (value, XEXP (op, 0))); ++ if (GET_CODE (op) == PLUS || GET_CODE (op) == MINUS) ++ { ++ if (GET_CODE (XEXP (op, 0)) == MULT) ++ return !reg_overlap_mentioned_p (value, XEXP (op, 0)); ++ else ++ return !reg_overlap_mentioned_p (value, XEXP (op, 1)); ++ } ++ return 0; + } + + /* We can't rely on the caller doing the proper promotion when +@@ -18084,7 +20304,8 @@ arm_cxx_key_method_may_be_inline (void) + static void + arm_cxx_determine_class_data_visibility (tree decl) + { +- if (!TARGET_AAPCS_BASED) ++ if (!TARGET_AAPCS_BASED ++ || !TARGET_DLLIMPORT_DECL_ATTRIBUTES) + return; + + /* In general, \S 3.2.5.5 of the ARM EABI requires that class data +@@ -18124,7 +20345,8 @@ arm_set_return_address (rtx source, rtx + rtx addr; + unsigned long saved_regs; + +- saved_regs = arm_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ saved_regs = offsets->saved_regs_mask; + + if ((saved_regs & (1 << LR_REGNUM)) == 0) + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source); +@@ -18135,7 +20357,6 @@ arm_set_return_address (rtx source, rtx + else + { + /* LR will be the first saved register. */ +- offsets = arm_get_frame_offsets (); + delta = offsets->outgoing_args - (offsets->frame + 4); + + +@@ -18168,11 +20389,10 @@ thumb_set_return_address (rtx source, rt + + emit_insn (gen_rtx_USE (VOIDmode, source)); + +- mask = thumb1_compute_save_reg_mask (); ++ offsets = arm_get_frame_offsets (); ++ mask = offsets->saved_regs_mask; + if (mask & (1 << LR_REGNUM)) + { +- offsets = arm_get_frame_offsets (); +- + limit = 1024; + /* Find the saved regs. */ + if (frame_pointer_needed) +@@ -18219,9 +20439,10 @@ arm_vector_mode_supported_p (enum machin + || mode == V16QImode || mode == V4SFmode || mode == V2DImode)) + return true; + +- if ((mode == V2SImode) +- || (mode == V4HImode) +- || (mode == V8QImode)) ++ if ((TARGET_NEON || TARGET_IWMMXT) ++ && ((mode == V2SImode) ++ || (mode == V4HImode) ++ || (mode == V8QImode))) + return true; + + return false; +@@ -18252,9 +20473,14 @@ arm_dbx_register_number (unsigned int re + if (IS_FPA_REGNUM (regno)) + return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM; + +- /* FIXME: VFPv3 register numbering. */ + if (IS_VFP_REGNUM (regno)) +- return 64 + regno - FIRST_VFP_REGNUM; ++ { ++ /* See comment in arm_dwarf_register_span. */ ++ if (VFP_REGNO_OK_FOR_SINGLE (regno)) ++ return 64 + regno - FIRST_VFP_REGNUM; ++ else ++ return 256 + (regno - FIRST_VFP_REGNUM) / 2; ++ } + + if (IS_IWMMXT_GR_REGNUM (regno)) + return 104 + regno - FIRST_IWMMXT_GR_REGNUM; +@@ -18265,6 +20491,39 @@ arm_dbx_register_number (unsigned int re + gcc_unreachable (); + } + ++/* Dwarf models VFPv3 registers as 32 64-bit registers. ++ GCC models tham as 64 32-bit registers, so we need to describe this to ++ the DWARF generation code. Other registers can use the default. */ ++static rtx ++arm_dwarf_register_span(rtx rtl) ++{ ++ unsigned regno; ++ int nregs; ++ int i; ++ rtx p; ++ ++ regno = REGNO (rtl); ++ if (!IS_VFP_REGNUM (regno)) ++ return NULL_RTX; ++ ++ /* The EABI defines two VFP register ranges: ++ 64-95: Legacy VFPv2 numbering for S0-S31 (obsolescent) ++ 256-287: D0-D31 ++ The recommended encodings for s0-s31 is a DW_OP_bit_piece of the ++ corresponding D register. However gdb6.6 does not support this, so ++ we use the legacy encodings. We also use these encodings for D0-D15 ++ for compatibility with older debuggers. */ ++ if (VFP_REGNO_OK_FOR_SINGLE (regno)) ++ return NULL_RTX; ++ ++ nregs = GET_MODE_SIZE (GET_MODE (rtl)) / 8; ++ p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs)); ++ regno = (regno - FIRST_VFP_REGNUM) / 2; ++ for (i = 0; i < nregs; i++) ++ XVECEXP (p, 0, i) = gen_rtx_REG (DImode, 256 + regno + i); ++ ++ return p; ++} + + #ifdef TARGET_UNWIND_INFO + /* Emit unwind directives for a store-multiple instruction or stack pointer +@@ -18483,6 +20742,11 @@ arm_unwind_emit (FILE * asm_out_file, rt + if (!ARM_EABI_UNWIND_TABLES) + return; + ++ if (!(flag_unwind_tables || cfun->uses_eh_lsda) ++ && (TREE_NOTHROW (current_function_decl) ++ || cfun->all_throwers_are_sibcalls)) ++ return; ++ + if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn)) + return; + +@@ -18563,7 +20827,17 @@ arm_output_fn_unwind (FILE * f, bool pro + if (prologue) + fputs ("\t.fnstart\n", f); + else +- fputs ("\t.fnend\n", f); ++ { ++ /* If this function will never be unwound, then mark it as such. ++ The came condition is used in arm_unwind_emit to suppress ++ the frame annotations. */ ++ if (!(flag_unwind_tables || cfun->uses_eh_lsda) ++ && (TREE_NOTHROW (current_function_decl) ++ || cfun->all_throwers_are_sibcalls)) ++ fputs("\t.cantunwind\n", f); ++ ++ fputs ("\t.fnend\n", f); ++ } + } + + static bool +@@ -18757,6 +21031,17 @@ arm_mangle_type (const_tree type) + { + arm_mangle_map_entry *pos = arm_mangle_map; + ++ /* Half-precision float. */ ++ if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) == 16) ++ return "Dh"; ++ ++ /* Although the ARM ABI documents do not specifically say that ++ "__va_list" has to be managled as if it is in the "std" ++ namespace, that is what RealView does. */ ++ if (TARGET_AAPCS_BASED ++ && lang_hooks.types_compatible_p (type, va_list_type)) ++ return "St9__va_list"; ++ + if (TREE_CODE (type) != VECTOR_TYPE) + return NULL; + +@@ -18779,5 +21064,91 @@ arm_mangle_type (const_tree type) + vector types. */ + return NULL; + } ++ ++/* Return how many instructions the machine can issue per cycle. */ ++static int ++arm_issue_rate (void) ++{ ++ switch (arm_tune) ++ { ++ case marvell_f: ++ case cortexr4: ++ case cortexr4f: ++ case cortexa8: ++ case cortexa9: ++ return 2; ++ default: ++ return 1; ++ } ++} ++ ++/* Return how many instructions to look ahead for better insn ++ scheduling. */ ++static int ++arm_multipass_dfa_lookahead (void) ++{ ++ return (arm_tune == marvell_f) ? 4 : 0; ++} ++ ++/* Set default optimization options. */ ++void ++arm_optimization_options (int level, int size ATTRIBUTE_UNUSED) ++{ ++ /* Enable section anchors by default at -O1 or higher. */ ++ flag_section_anchors = (level > 0 ? 1 : 0); ++ ++ if (size) ++ { ++ /* Select optimizations that are a win for code size. ++ ++ The inlining options set below have two important ++ consequences for functions not explicitly marked ++ inline: ++ - Static functions used once are inlined if ++ sufficiently small. Static functions used twice ++ are not inlined. ++ - Non-static functions are never inlined. ++ So in effect, inlining will never cause two copies ++ of function bodies to be created. */ ++ /* Empirical results show that these options benefit code ++ size on arm. */ ++ /* FIXME: -fsee seems to be broken for Thumb-2. */ ++ /* flag_see = 1; */ ++ flag_move_loop_invariants = 0; ++ /* In Thumb mode the function call code size overhead is typically very ++ small, and narrow branch instructions have very limited range. ++ Inlining even medium sized functions tends to bloat the caller and ++ require the use of long branch instructions. On average the long ++ branches cost more than eliminating the function call overhead saves, ++ so we use extremely restrictive automatic inlining heuristics. In ARM ++ mode the results are fairly neutral, probably due to better constant ++ pool placement. */ ++ set_param_value ("max-inline-insns-single", 1); ++ set_param_value ("max-inline-insns-auto", 1); ++ } ++} ++ ++/* Order of allocation of core registers for Thumb: this allocation is ++ written over the corresponding initial entries of the array ++ initialized with REG_ALLOC_ORDER. We allocate all low registers ++ first. Saving and restoring a low register is usually cheaper than ++ using a call-clobbered high register. */ ++ ++static const int thumb_core_reg_alloc_order[] = ++{ ++ 3, 2, 1, 0, 4, 5, 6, 7, ++ 14, 12, 8, 9, 10, 11, 13, 15 ++}; ++ ++/* Adjust register allocation order when compiling for Thumb. */ ++ ++void ++arm_adjust_reg_alloc_order (int *order) ++{ ++ if (TARGET_THUMB) ++ memcpy (order, thumb_core_reg_alloc_order, ++ sizeof (thumb_core_reg_alloc_order)); ++} + + #include "gt-arm.h" ++ +--- a/gcc/config/arm/arm.h ++++ b/gcc/config/arm/arm.h +@@ -84,6 +84,10 @@ extern char arm_arch_name[]; + builtin_define ("__IWMMXT__"); \ + if (TARGET_AAPCS_BASED) \ + builtin_define ("__ARM_EABI__"); \ ++ if (arm_tune_marvell_f) \ ++ builtin_define ("__ARM_TUNE_MARVELL_F__"); \ ++ if (low_irq_latency) \ ++ builtin_define ("__low_irq_latency__"); \ + } while (0) + + /* The various ARM cores. */ +@@ -198,6 +202,13 @@ extern void (*arm_lang_output_object_att + #define TARGET_AAPCS_BASED \ + (arm_abi != ARM_ABI_APCS && arm_abi != ARM_ABI_ATPCS) + ++/* True if we should avoid generating conditional execution instructions. */ ++#define TARGET_NO_COND_EXEC (arm_tune_marvell_f && !optimize_size) ++/* Avoid most conditional instructions, but allow pairs with opposite ++ conditions and the same destination. */ ++#define TARGET_NO_SINGLE_COND_EXEC \ ++ ((arm_tune_cortex_a9 || arm_tune_marvell_f) && !optimize_size) ++ + #define TARGET_HARD_TP (target_thread_pointer == TP_CP15) + #define TARGET_SOFT_TP (target_thread_pointer == TP_SOFT) + +@@ -207,24 +218,36 @@ extern void (*arm_lang_output_object_att + #define TARGET_32BIT (TARGET_ARM || arm_arch_thumb2) + /* 32-bit Thumb-2 code. */ + #define TARGET_THUMB2 (TARGET_THUMB && arm_arch_thumb2) ++/* Thumb-1 only. */ ++#define TARGET_THUMB1_ONLY (TARGET_THUMB1 && !arm_arch_notm) + + /* The following two macros concern the ability to execute coprocessor +- instructions for VFPv3 or NEON. TARGET_VFP3 is currently only ever +- tested when we know we are generating for VFP hardware; we need to +- be more careful with TARGET_NEON as noted below. */ ++ instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently ++ only ever tested when we know we are generating for VFP hardware; we need ++ to be more careful with TARGET_NEON as noted below. */ ++ ++/* FPU is has the full VFPv3/NEON register file of 32 D registers. */ ++#define TARGET_VFPD32 (arm_fp_model == ARM_FP_MODEL_VFP \ ++ && (arm_fpu_arch == FPUTYPE_VFP3 \ ++ || arm_fpu_arch == FPUTYPE_NEON \ ++ || arm_fpu_arch == FPUTYPE_NEON_FP16)) + +-/* FPU is VFPv3 (with twice the number of D registers). Setting the FPU to +- Neon automatically enables VFPv3 too. */ ++/* FPU supports VFPv3 instructions. */ + #define TARGET_VFP3 (arm_fp_model == ARM_FP_MODEL_VFP \ +- && (arm_fpu_arch == FPUTYPE_VFP3 \ +- || arm_fpu_arch == FPUTYPE_NEON)) ++ && (arm_fpu_arch == FPUTYPE_VFP3D16 \ ++ || TARGET_VFPD32)) ++ ++/* FPU supports NEON/VFP half-precision floating-point. */ ++#define TARGET_NEON_FP16 (arm_fpu_arch == FPUTYPE_NEON_FP16) ++ + /* FPU supports Neon instructions. The setting of this macro gets + revealed via __ARM_NEON__ so we add extra guards upon TARGET_32BIT + and TARGET_HARD_FLOAT to ensure that NEON instructions are + available. */ + #define TARGET_NEON (TARGET_32BIT && TARGET_HARD_FLOAT \ + && arm_fp_model == ARM_FP_MODEL_VFP \ +- && arm_fpu_arch == FPUTYPE_NEON) ++ && (arm_fpu_arch == FPUTYPE_NEON \ ++ || arm_fpu_arch == FPUTYPE_NEON_FP16)) + + /* "DSP" multiply instructions, eg. SMULxy. */ + #define TARGET_DSP_MULTIPLY \ +@@ -233,6 +256,9 @@ extern void (*arm_lang_output_object_att + #define TARGET_INT_SIMD \ + (TARGET_32BIT && arm_arch6 && arm_arch_notm) + ++/* Should MOVW/MOVT be used in preference to a constant pool. */ ++#define TARGET_USE_MOVT (arm_arch_thumb2 && !optimize_size) ++ + /* We could use unified syntax for arm mode, but for now we just use it + for Thumb-2. */ + #define TARGET_UNIFIED_ASM TARGET_THUMB2 +@@ -296,10 +322,14 @@ enum fputype + FPUTYPE_MAVERICK, + /* VFP. */ + FPUTYPE_VFP, ++ /* VFPv3-D16. */ ++ FPUTYPE_VFP3D16, + /* VFPv3. */ + FPUTYPE_VFP3, + /* Neon. */ +- FPUTYPE_NEON ++ FPUTYPE_NEON, ++ /* Neon with half-precision float extensions. */ ++ FPUTYPE_NEON_FP16 + }; + + /* Recast the floating point class to be the floating point attribute. */ +@@ -324,6 +354,21 @@ extern enum float_abi_type arm_float_abi + #define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT + #endif + ++/* Which __fp16 format to use. ++ The enumeration values correspond to the numbering for the ++ Tag_ABI_FP_16bit_format attribute. ++ */ ++enum arm_fp16_format_type ++{ ++ ARM_FP16_FORMAT_NONE = 0, ++ ARM_FP16_FORMAT_IEEE = 1, ++ ARM_FP16_FORMAT_ALTERNATIVE = 2 ++}; ++ ++extern enum arm_fp16_format_type arm_fp16_format; ++#define LARGEST_EXPONENT_IS_NORMAL(bits) \ ++ ((bits) == 16 && arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE) ++ + /* Which ABI to use. */ + enum arm_abi_type + { +@@ -376,6 +421,9 @@ extern int arm_ld_sched; + /* Nonzero if generating thumb code. */ + extern int thumb_code; + ++/* Nonzero if generating Janus2 code. */ ++extern int janus2_code; ++ + /* Nonzero if this chip is a StrongARM. */ + extern int arm_tune_strongarm; + +@@ -391,9 +439,15 @@ extern int arm_arch_xscale; + /* Nonzero if tuning for XScale. */ + extern int arm_tune_xscale; + ++/* Nonzero if tuning for Marvell Feroceon. */ ++extern int arm_tune_marvell_f; ++ + /* Nonzero if tuning for stores via the write buffer. */ + extern int arm_tune_wbuf; + ++/* Nonzero if tuning for Cortex-A9. */ ++extern int arm_tune_cortex_a9; ++ + /* Nonzero if we should define __THUMB_INTERWORK__ in the + preprocessor. + XXX This is a bit of a hack, it's intended to help work around +@@ -407,6 +461,10 @@ extern int arm_arch_thumb2; + /* Nonzero if chip supports integer division instruction. */ + extern int arm_arch_hwdiv; + ++/* Nonzero if we should minimize interrupt latency of the ++ generated code. */ ++extern int low_irq_latency; ++ + #ifndef TARGET_DEFAULT + #define TARGET_DEFAULT (MASK_APCS_FRAME) + #endif +@@ -417,6 +475,9 @@ extern int arm_arch_hwdiv; + + #define OVERRIDE_OPTIONS arm_override_options () + ++#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \ ++ arm_optimization_options ((LEVEL), (SIZE)) ++ + /* Nonzero if PIC code requires explicit qualifiers to generate + PLT and GOT relocs rather than the assembler doing so implicitly. + Subtargets can override these if required. */ +@@ -725,12 +786,11 @@ extern int arm_structure_size_boundary; + fixed_regs[regno] = call_used_regs[regno] = 1; \ + } \ + \ +- if (TARGET_THUMB && optimize_size) \ +- { \ +- /* When optimizing for size, it's better not to use \ +- the HI regs, because of the overhead of stacking \ +- them. */ \ +- /* ??? Is this still true for thumb2? */ \ ++ if (TARGET_THUMB1 && optimize_size) \ ++ { \ ++ /* When optimizing for size on Thumb-1, it's better not \ ++ to use the HI regs, because of the overhead of \ ++ stacking them. */ \ + for (regno = FIRST_HI_REGNUM; \ + regno <= LAST_HI_REGNUM; ++regno) \ + fixed_regs[regno] = call_used_regs[regno] = 1; \ +@@ -849,6 +909,9 @@ extern int arm_structure_size_boundary; + /* The number of (integer) argument register available. */ + #define NUM_ARG_REGS 4 + ++/* And similarly for the VFP. */ ++#define NUM_VFP_ARG_REGS 16 ++ + /* Return the register number of the N'th (integer) argument. */ + #define ARG_REGISTER(N) (N - 1) + +@@ -942,7 +1005,7 @@ extern int arm_structure_size_boundary; + #define FIRST_VFP_REGNUM 63 + #define D7_VFP_REGNUM 78 /* Registers 77 and 78 == VFP reg D7. */ + #define LAST_VFP_REGNUM \ +- (TARGET_VFP3 ? LAST_HI_VFP_REGNUM : LAST_LO_VFP_REGNUM) ++ (TARGET_VFPD32 ? LAST_HI_VFP_REGNUM : LAST_LO_VFP_REGNUM) + + #define IS_VFP_REGNUM(REGNUM) \ + (((REGNUM) >= FIRST_VFP_REGNUM) && ((REGNUM) <= LAST_VFP_REGNUM)) +@@ -1027,7 +1090,7 @@ extern int arm_structure_size_boundary; + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) + + #define VALID_IWMMXT_REG_MODE(MODE) \ +- (arm_vector_mode_supported_p (MODE) || (MODE) == DImode) ++ (arm_vector_mode_supported_p (MODE) || (MODE) == DImode || (MODE) == SImode) + + /* Modes valid for Neon D registers. */ + #define VALID_NEON_DREG_MODE(MODE) \ +@@ -1053,7 +1116,10 @@ extern int arm_structure_size_boundary; + For VFP/VFPv3, allocate D16-D31 first, then caller-saved registers (D0-D7), + then D8-D15. The reason for doing this is to attempt to reduce register + pressure when both single- and double-precision registers are used in a +- function. */ ++ function. ++ ++ The allocation order for Thumb differs from that given here: ++ see arm.c:adjust_reg_alloc_order. */ + + #define REG_ALLOC_ORDER \ + { \ +@@ -1106,6 +1172,7 @@ enum reg_class + CC_REG, + VFPCC_REG, + GENERAL_REGS, ++ CORE_REGS, + ALL_REGS, + LIM_REG_CLASSES + }; +@@ -1131,6 +1198,7 @@ enum reg_class + "CC_REG", \ + "VFPCC_REG", \ + "GENERAL_REGS", \ ++ "CORE_REGS", \ + "ALL_REGS", \ + } + +@@ -1151,10 +1219,11 @@ enum reg_class + { 0x000000FF, 0x00000000, 0x00000000, 0x00000000 }, /* LO_REGS */ \ + { 0x00002000, 0x00000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ + { 0x000020FF, 0x00000000, 0x00000000, 0x00000000 }, /* BASE_REGS */ \ +- { 0x0000FF00, 0x00000000, 0x00000000, 0x00000000 }, /* HI_REGS */ \ ++ { 0x0000DF00, 0x00000000, 0x00000000, 0x00000000 }, /* HI_REGS */ \ + { 0x01000000, 0x00000000, 0x00000000, 0x00000000 }, /* CC_REG */ \ + { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, /* VFPCC_REG */ \ +- { 0x0200FFFF, 0x00000000, 0x00000000, 0x00000000 }, /* GENERAL_REGS */ \ ++ { 0x0200DFFF, 0x00000000, 0x00000000, 0x00000000 }, /* GENERAL_REGS */ \ ++ { 0x0200FFFF, 0x00000000, 0x00000000, 0x00000000 }, /* CORE_REGS */ \ + { 0xFAFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF } /* ALL_REGS */ \ + } + +@@ -1178,22 +1247,25 @@ enum reg_class + || reg_classes_intersect_p (VFP_REGS, (CLASS)) \ + : 0) + +-/* We need to define this for LO_REGS on thumb. Otherwise we can end up +- using r0-r4 for function arguments, r7 for the stack frame and don't +- have enough left over to do doubleword arithmetic. */ ++/* We need to define this for LO_REGS on Thumb-1. Otherwise we can end up ++ using r0-r4 for function arguments, r7 for the stack frame and don't have ++ enough left over to do doubleword arithmetic. For Thumb-2 all the ++ potentially problematic instructions accept high registers so this is not ++ necessary. Care needs to be taken to avoid adding new Thumb-2 patterns ++ that require many low registers. */ + #define CLASS_LIKELY_SPILLED_P(CLASS) \ +- ((TARGET_THUMB && (CLASS) == LO_REGS) \ ++ ((TARGET_THUMB1 && (CLASS) == LO_REGS) \ + || (CLASS) == CC_REG) + + /* The class value for index registers, and the one for base regs. */ + #define INDEX_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS) +-#define BASE_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS) ++#define BASE_REG_CLASS (TARGET_THUMB1 ? LO_REGS : CORE_REGS) + + /* For the Thumb the high registers cannot be used as base registers + when addressing quantities in QI or HI mode; if we don't know the + mode, then we must be conservative. */ + #define MODE_BASE_REG_CLASS(MODE) \ +- (TARGET_32BIT ? GENERAL_REGS : \ ++ (TARGET_32BIT ? (TARGET_THUMB2 ? LO_REGS : CORE_REGS) : \ + (((MODE) == SImode) ? BASE_REGS : LO_REGS)) + + /* For Thumb we can not support SP+reg addressing, so we return LO_REGS +@@ -1213,7 +1285,8 @@ enum reg_class + #define PREFERRED_RELOAD_CLASS(X, CLASS) \ + (TARGET_ARM ? (CLASS) : \ + ((CLASS) == GENERAL_REGS || (CLASS) == HI_REGS \ +- || (CLASS) == NO_REGS ? LO_REGS : (CLASS))) ++ || (CLASS) == NO_REGS || (CLASS) == STACK_REG \ ++ ? LO_REGS : (CLASS))) + + /* Must leave BASE_REGS reloads alone */ + #define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ +@@ -1293,6 +1366,9 @@ enum reg_class + else if (TARGET_MAVERICK && TARGET_HARD_FLOAT) \ + /* Need to be careful, -256 is not a valid offset. */ \ + low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \ ++ else if (TARGET_REALLY_IWMMXT && MODE == SImode) \ ++ /* Need to be careful, -1024 is not a valid offset. */ \ ++ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \ + else if (MODE == SImode \ + || (MODE == SFmode && TARGET_SOFT_FLOAT) \ + || ((MODE == HImode || MODE == QImode) && ! arm_arch4)) \ +@@ -1438,9 +1514,10 @@ do { \ + + /* Define how to find the value returned by a library function + assuming the value has mode MODE. */ +-#define LIBCALL_VALUE(MODE) \ +- (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \ +- && GET_MODE_CLASS (MODE) == MODE_FLOAT \ ++#define LIBCALL_VALUE(MODE) \ ++ (TARGET_AAPCS_BASED ? aapcs_libcall_value (MODE) \ ++ : (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \ ++ && GET_MODE_CLASS (MODE) == MODE_FLOAT) \ + ? gen_rtx_REG (MODE, FIRST_FPA_REGNUM) \ + : TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK \ + && GET_MODE_CLASS (MODE) == MODE_FLOAT \ +@@ -1449,33 +1526,22 @@ do { \ + ? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \ + : gen_rtx_REG (MODE, ARG_REGISTER (1))) + +-/* Define how to find the value returned by a function. +- VALTYPE is the data type of the value (as a tree). +- If the precise function being called is known, FUNC is its FUNCTION_DECL; +- otherwise, FUNC is 0. */ +-#define FUNCTION_VALUE(VALTYPE, FUNC) \ +- arm_function_value (VALTYPE, FUNC); +- +-/* 1 if N is a possible register number for a function value. +- On the ARM, only r0 and f0 can return results. */ +-/* On a Cirrus chip, mvf0 can return results. */ +-#define FUNCTION_VALUE_REGNO_P(REGNO) \ +- ((REGNO) == ARG_REGISTER (1) \ +- || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \ +- && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \ +- || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \ +- || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \ ++/* 1 if REGNO is a possible register number for a function value. */ ++#define FUNCTION_VALUE_REGNO_P(REGNO) \ ++ ((REGNO) == ARG_REGISTER (1) \ ++ || (TARGET_AAPCS_BASED && TARGET_32BIT \ ++ && TARGET_VFP && TARGET_HARD_FLOAT \ ++ && (REGNO) == FIRST_VFP_REGNUM) \ ++ || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \ ++ && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \ ++ || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \ ++ || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \ + && TARGET_HARD_FLOAT_ABI && TARGET_FPA)) + + /* Amount of memory needed for an untyped call to save all possible return + registers. */ + #define APPLY_RESULT_SIZE arm_apply_result_size() + +-/* How large values are returned */ +-/* A C expression which can inhibit the returning of certain function values +- in registers, based on the type of value. */ +-#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE) +- + /* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return + values must be in memory. On the ARM, they need only do so if larger + than a word, or if they contain elements offset from zero in the struct. */ +@@ -1531,6 +1597,7 @@ typedef struct arm_stack_offsets GTY(()) + int soft_frame; /* FRAME_POINTER_REGNUM. */ + int locals_base; /* THUMB_HARD_FRAME_POINTER_REGNUM. */ + int outgoing_args; /* STACK_POINTER_REGNUM. */ ++ unsigned int saved_regs_mask; + } + arm_stack_offsets; + +@@ -1568,9 +1635,27 @@ machine_function; + that is in text_section. */ + extern GTY(()) rtx thumb_call_via_label[14]; + ++/* The number of potential ways of assigning to a co-processor. */ ++#define ARM_NUM_COPROC_SLOTS 1 ++ ++/* Enumeration of procedure calling standard variants. We don't really ++ support all of these yet. */ ++enum arm_pcs ++{ ++ ARM_PCS_AAPCS, /* Base standard AAPCS. */ ++ ARM_PCS_AAPCS_VFP, /* Use VFP registers for floating point values. */ ++ ARM_PCS_AAPCS_IWMMXT, /* Use iWMMXT registers for vectors. */ ++ /* This must be the last AAPCS variant. */ ++ ARM_PCS_AAPCS_LOCAL, /* Private call within this compilation unit. */ ++ ARM_PCS_ATPCS, /* ATPCS. */ ++ ARM_PCS_APCS, /* APCS (legacy Linux etc). */ ++ ARM_PCS_UNKNOWN ++}; ++ ++/* We can't define this inside a generator file because it needs enum ++ machine_mode. */ + /* A C type for declaring a variable that is used as the first argument of +- `FUNCTION_ARG' and other related values. For some target machines, the +- type `int' suffices and can hold the number of bytes of argument so far. */ ++ `FUNCTION_ARG' and other related values. */ + typedef struct + { + /* This is the number of registers of arguments scanned so far. */ +@@ -1579,9 +1664,33 @@ typedef struct + int iwmmxt_nregs; + int named_count; + int nargs; +- int can_split; ++ /* Which procedure call variant to use for this call. */ ++ enum arm_pcs pcs_variant; ++ ++ /* AAPCS related state tracking. */ ++ int aapcs_arg_processed; /* No need to lay out this argument again. */ ++ int aapcs_cprc_slot; /* Index of co-processor rules to handle ++ this argument, or -1 if using core ++ registers. */ ++ int aapcs_ncrn; ++ int aapcs_next_ncrn; ++ rtx aapcs_reg; /* Register assigned to this argument. */ ++ int aapcs_partial; /* How many bytes are passed in regs (if ++ split between core regs and stack. ++ Zero otherwise. */ ++ int aapcs_cprc_failed[ARM_NUM_COPROC_SLOTS]; ++ int can_split; /* Argument can be split between core regs ++ and the stack. */ ++ /* Private data for tracking VFP register allocation */ ++ unsigned aapcs_vfp_regs_free; ++ unsigned aapcs_vfp_reg_alloc; ++ int aapcs_vfp_rcount; ++ /* Can't include insn-modes.h because this header is needed before we ++ generate it. */ ++ int /* enum machine_mode */ aapcs_vfp_rmode; + } CUMULATIVE_ARGS; + ++ + /* Define where to put the arguments to a function. + Value is zero to push the argument on the stack, + or a hard register in which to store the argument. +@@ -1625,13 +1734,7 @@ typedef struct + of mode MODE and data type TYPE. + (TYPE is null for libcalls where that information may not be available.) */ + #define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ +- (CUM).nargs += 1; \ +- if (arm_vector_mode_supported_p (MODE) \ +- && (CUM).named_count > (CUM).nargs \ +- && TARGET_IWMMXT_ABI) \ +- (CUM).iwmmxt_nregs += 1; \ +- else \ +- (CUM).nregs += ARM_NUM_REGS2 (MODE, TYPE) ++ arm_function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED)) + + /* If defined, a C expression that gives the alignment boundary, in bits, of an + argument with the specified mode and type. If it is not defined, +@@ -1643,9 +1746,11 @@ typedef struct + + /* 1 if N is a possible register number for function argument passing. + On the ARM, r0-r3 are used to pass args. */ +-#define FUNCTION_ARG_REGNO_P(REGNO) \ +- (IN_RANGE ((REGNO), 0, 3) \ +- || (TARGET_IWMMXT_ABI \ ++#define FUNCTION_ARG_REGNO_P(REGNO) \ ++ (IN_RANGE ((REGNO), 0, 3) \ ++ || (TARGET_AAPCS_BASED && TARGET_VFP && TARGET_HARD_FLOAT \ ++ && IN_RANGE ((REGNO), FIRST_VFP_REGNUM, FIRST_VFP_REGNUM + 15)) \ ++ || (TARGET_IWMMXT_ABI \ + && IN_RANGE ((REGNO), FIRST_IWMMXT_REGNUM, FIRST_IWMMXT_REGNUM + 9))) + + +@@ -1908,12 +2013,13 @@ typedef struct + /* Nonzero if X can be the base register in a reg+reg addressing mode. + For Thumb, we can not use SP + reg, so reject SP. */ + #define REGNO_MODE_OK_FOR_REG_BASE_P(X, MODE) \ +- REGNO_OK_FOR_INDEX_P (X) ++ REGNO_MODE_OK_FOR_BASE_P (X, QImode) + + /* For ARM code, we don't care about the mode, but for Thumb, the index + must be suitable for use in a QImode load. */ + #define REGNO_OK_FOR_INDEX_P(REGNO) \ +- REGNO_MODE_OK_FOR_BASE_P (REGNO, QImode) ++ (REGNO_MODE_OK_FOR_BASE_P (REGNO, QImode) \ ++ && !TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM)) + + /* Maximum number of registers that can appear in a valid memory address. + Shifts in addresses can't be by a register. */ +@@ -1931,6 +2037,11 @@ typedef struct + SYMBOL's section. */ + #define ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 0 + ++/* Nonzero if all target requires all absolute relocations be R_ARM_ABS32. */ ++#ifndef TARGET_DEFAULT_WORD_RELOCATIONS ++#define TARGET_DEFAULT_WORD_RELOCATIONS 0 ++#endif ++ + /* Nonzero if the constant value X is a legitimate general operand. + It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. + +@@ -2051,6 +2162,13 @@ typedef struct + || REGNO (X) == FRAME_POINTER_REGNUM \ + || REGNO (X) == ARG_POINTER_REGNUM) + ++#define ARM_REG_OK_FOR_INDEX_P(X) \ ++ ((REGNO (X) <= LAST_ARM_REGNUM \ ++ && REGNO (X) != STACK_POINTER_REGNUM) \ ++ || REGNO (X) >= FIRST_PSEUDO_REGISTER \ ++ || REGNO (X) == FRAME_POINTER_REGNUM \ ++ || REGNO (X) == ARG_POINTER_REGNUM) ++ + #define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \ + (REGNO (X) <= LAST_LO_REGNUM \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER \ +@@ -2066,6 +2184,9 @@ typedef struct + #define ARM_REG_OK_FOR_BASE_P(X) \ + ARM_REGNO_OK_FOR_BASE_P (REGNO (X)) + ++#define ARM_REG_OK_FOR_INDEX_P(X) \ ++ ARM_REGNO_OK_FOR_INDEX_P (REGNO (X)) ++ + #define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \ + THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE) + +@@ -2080,8 +2201,6 @@ typedef struct + ? THUMB1_REG_MODE_OK_FOR_BASE_P (X, MODE) \ + : ARM_REG_OK_FOR_BASE_P (X)) + +-#define ARM_REG_OK_FOR_INDEX_P(X) ARM_REG_OK_FOR_BASE_P (X) +- + /* For 16-bit Thumb, a valid index register is anything that can be used in + a byte load instruction. */ + #define THUMB1_REG_OK_FOR_INDEX_P(X) \ +@@ -2259,7 +2378,8 @@ do { \ + /* Try to generate sequences that don't involve branches, we can then use + conditional instructions */ + #define BRANCH_COST \ +- (TARGET_32BIT ? 4 : (optimize > 0 ? 2 : 0)) ++ (TARGET_32BIT ? (TARGET_THUMB2 && optimize_size ? 1 : 4) \ ++ : (optimize > 0 ? 2 : 0)) + + /* Position Independent Code. */ + /* We decide which register to use based on the compilation options and +@@ -2339,6 +2459,19 @@ extern int making_const_table; + if (TARGET_ARM) \ + asm_fprintf (STREAM,"\tstmfd\t%r!,{%r}\n", \ + STACK_POINTER_REGNUM, REGNO); \ ++ else if (TARGET_THUMB1 \ ++ && (REGNO) == STATIC_CHAIN_REGNUM) \ ++ { \ ++ /* We can't push STATIC_CHAIN_REGNUM (r12) directly with Thumb-1. ++ We know that ASM_OUTPUT_REG_PUSH will be matched with ++ ASM_OUTPUT_REG_POP, and that r7 isn't used by the function ++ profiler, so we can use it as a scratch reg. WARNING: This isn't ++ safe in the general case! It may be sensitive to future changes ++ in final.c:profile_function. */ \ ++ asm_fprintf (STREAM, "\tpush\t{r7}\n"); \ ++ asm_fprintf (STREAM, "\tmov\tr7, %r\n", REGNO);\ ++ asm_fprintf (STREAM, "\tpush\t{r7}\n"); \ ++ } \ + else \ + asm_fprintf (STREAM, "\tpush {%r}\n", REGNO); \ + } while (0) +@@ -2350,6 +2483,14 @@ extern int making_const_table; + if (TARGET_ARM) \ + asm_fprintf (STREAM, "\tldmfd\t%r!,{%r}\n", \ + STACK_POINTER_REGNUM, REGNO); \ ++ else if (TARGET_THUMB1 \ ++ && (REGNO) == STATIC_CHAIN_REGNUM) \ ++ { \ ++ /* See comment in ASM_OUTPUT_REG_PUSH. */ \ ++ asm_fprintf (STREAM, "\tpop\t{r7}\n"); \ ++ asm_fprintf (STREAM, "\tmov\t%r, r7\n", REGNO);\ ++ asm_fprintf (STREAM, "\tpop\t{r7}\n"); \ ++ } \ + else \ + asm_fprintf (STREAM, "\tpop {%r}\n", REGNO); \ + } while (0) +@@ -2384,7 +2525,8 @@ extern int making_const_table; + if (TARGET_THUMB) \ + { \ + if (is_called_in_ARM_mode (DECL) \ +- || (TARGET_THUMB1 && current_function_is_thunk)) \ ++ || (TARGET_THUMB1 && !TARGET_THUMB1_ONLY \ ++ && current_function_is_thunk)) \ + fprintf (STREAM, "\t.code 32\n") ; \ + else if (TARGET_THUMB1) \ + fprintf (STREAM, "\t.code\t16\n\t.thumb_func\n") ; \ +@@ -2479,10 +2621,12 @@ extern int making_const_table; + rtx base = XEXP (X, 0); \ + rtx index = XEXP (X, 1); \ + HOST_WIDE_INT offset = 0; \ +- if (GET_CODE (base) != REG) \ ++ if (GET_CODE (base) != REG \ ++ || (GET_CODE (index) == REG && REGNO (index) == SP_REGNUM)) \ + { \ + /* Ensure that BASE is a register. */ \ + /* (one of them must be). */ \ ++ /* Also ensure the SP is not used as in index register. */ \ + rtx temp = base; \ + base = index; \ + index = temp; \ +--- a/gcc/config/arm/arm.md ++++ b/gcc/config/arm/arm.md +@@ -93,9 +93,9 @@ + (UNSPEC_TLS 20) ; A symbol that has been treated properly for TLS usage. + (UNSPEC_PIC_LABEL 21) ; A label used for PIC access that does not appear in the + ; instruction stream. +- (UNSPEC_STACK_ALIGN 20) ; Doubleword aligned stack pointer. Used to ++ (UNSPEC_STACK_ALIGN 22) ; Doubleword aligned stack pointer. Used to + ; generate correct unwind information. +- (UNSPEC_PIC_OFFSET 22) ; A symbolic 12-bit OFFSET that has been treated ++ (UNSPEC_PIC_OFFSET 23) ; A symbolic 12-bit OFFSET that has been treated + ; correctly for PIC usage. + ] + ) +@@ -129,6 +129,8 @@ + (VUNSPEC_WCMP_EQ 12) ; Used by the iWMMXt WCMPEQ instructions + (VUNSPEC_WCMP_GTU 13) ; Used by the iWMMXt WCMPGTU instructions + (VUNSPEC_WCMP_GT 14) ; Used by the iwMMXT WCMPGT instructions ++ (VUNSPEC_ALIGN16 15) ; Used to force 16-byte alignment. ++ (VUNSPEC_ALIGN32 16) ; Used to force 32-byte alignment. + (VUNSPEC_EH_RETURN 20); Use to override the return address for exception + ; handling. + ] +@@ -142,6 +144,10 @@ + ; patterns that share the same RTL in both ARM and Thumb code. + (define_attr "is_thumb" "no,yes" (const (symbol_ref "thumb_code"))) + ++; FIX_JANUS is set to 'yes' when compiling for Janus2, it causes to ++; add a nop after shifts, in order to work around a Janus2 bug ++(define_attr "fix_janus" "no,yes" (const (symbol_ref "janus2_code"))) ++ + ; IS_STRONGARM is set to 'yes' when compiling for StrongARM, it affects + ; scheduling decisions for the load unit and the multiplier. + (define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_tune_strongarm"))) +@@ -156,7 +162,7 @@ + ; Floating Point Unit. If we only have floating point emulation, then there + ; is no point in scheduling the floating point insns. (Well, for best + ; performance we should try and group them together). +-(define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp" ++(define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp,vfpv3d16,vfpv3,neon,neon_fp16" + (const (symbol_ref "arm_fpu_attr"))) + + ; LENGTH of an instruction (in bytes) +@@ -183,7 +189,7 @@ + ;; scheduling information. + + (define_attr "insn" +- "mov,mvn,smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,umaal,smlald,smlsld,clz,mrs,msr,xtab,other" ++ "mov,mvn,and,orr,eor,smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,smmls,umaal,smlald,smlsld,clz,mrs,msr,xtab,sdiv,udiv,other" + (const_string "other")) + + ; TYPE attribute is used to detect floating point instructions which, if +@@ -192,6 +198,8 @@ + ; scheduling of writes. + + ; Classification of each insn ++; Note: vfp.md has different meanings for some of these, and some further ++; types as well. See that file for details. + ; alu any alu instruction that doesn't hit memory or fp + ; regs or have a shifted source operand + ; alu_shift any data instruction that doesn't hit memory or fp +@@ -236,7 +244,7 @@ + ; + + (define_attr "type" +- "alu,alu_shift,alu_shift_reg,mult,block,float,fdivx,fdivd,fdivs,fmul,fmuls,fmuld,fmacs,fmacd,ffmul,farith,ffarith,f_flag,float_em,f_load,f_store,f_loads,f_loadd,f_stores,f_stored,f_mem_r,r_mem_f,f_2_r,r_2_f,f_cvt,branch,call,load_byte,load1,load2,load3,load4,store1,store2,store3,store4,mav_farith,mav_dmult" ++ "alu,alu_shift,alu_shift_reg,mult,block,float,fdivx,fdivd,fdivs,fmul,fmuls,fmuld,fmacs,fmacd,ffmul,farith,ffarith,f_flag,float_em,f_load,f_store,f_loads,f_loadd,f_stores,f_stored,f_mem_r,r_mem_f,f_2_r,r_2_f,f_cvt,branch,call,load_byte,load1,load2,load3,load4,store1,store2,store3,store4,mav_farith,mav_dmult,fconsts,fconstd,fadds,faddd,ffariths,ffarithd,fcmps,fcmpd,fcpys" + (if_then_else + (eq_attr "insn" "smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals") + (const_string "mult") +@@ -246,6 +254,73 @@ + ; initialized by arm_override_options() + (define_attr "ldsched" "no,yes" (const (symbol_ref "arm_ld_sched"))) + ++;; Classification of NEON instructions for scheduling purposes. ++(define_attr "neon_type" ++ "neon_int_1,\ ++ neon_int_2,\ ++ neon_int_3,\ ++ neon_int_4,\ ++ neon_int_5,\ ++ neon_vqneg_vqabs,\ ++ neon_vmov,\ ++ neon_vaba,\ ++ neon_vsma,\ ++ neon_vaba_qqq,\ ++ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\ ++ neon_mul_qqq_8_16_32_ddd_32,\ ++ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,\ ++ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\ ++ neon_mla_qqq_8_16,\ ++ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,\ ++ neon_mla_qqq_32_qqd_32_scalar,\ ++ neon_mul_ddd_16_scalar_32_16_long_scalar,\ ++ neon_mul_qqd_32_scalar,\ ++ neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,\ ++ neon_shift_1,\ ++ neon_shift_2,\ ++ neon_shift_3,\ ++ neon_vshl_ddd,\ ++ neon_vqshl_vrshl_vqrshl_qqq,\ ++ neon_vsra_vrsra,\ ++ neon_fp_vadd_ddd_vabs_dd,\ ++ neon_fp_vadd_qqq_vabs_qq,\ ++ neon_fp_vsum,\ ++ neon_fp_vmul_ddd,\ ++ neon_fp_vmul_qqd,\ ++ neon_fp_vmla_ddd,\ ++ neon_fp_vmla_qqq,\ ++ neon_fp_vmla_ddd_scalar,\ ++ neon_fp_vmla_qqq_scalar,\ ++ neon_fp_vrecps_vrsqrts_ddd,\ ++ neon_fp_vrecps_vrsqrts_qqq,\ ++ neon_bp_simple,\ ++ neon_bp_2cycle,\ ++ neon_bp_3cycle,\ ++ neon_ldr,\ ++ neon_str,\ ++ neon_vld1_1_2_regs,\ ++ neon_vld1_3_4_regs,\ ++ neon_vld2_2_regs_vld1_vld2_all_lanes,\ ++ neon_vld2_4_regs,\ ++ neon_vld3_vld4,\ ++ neon_vst1_1_2_regs_vst2_2_regs,\ ++ neon_vst1_3_4_regs,\ ++ neon_vst2_4_regs_vst3_vst4,\ ++ neon_vst3_vst4,\ ++ neon_vld1_vld2_lane,\ ++ neon_vld3_vld4_lane,\ ++ neon_vst1_vst2_lane,\ ++ neon_vst3_vst4_lane,\ ++ neon_vld3_vld4_all_lanes,\ ++ neon_mcr,\ ++ neon_mcr_2_mcrr,\ ++ neon_mrc,\ ++ neon_mrrc,\ ++ neon_ldm_2,\ ++ neon_stm_2,\ ++ none" ++ (const_string "none")) ++ + ; condition codes: this one is used by final_prescan_insn to speed up + ; conditionalizing instructions. It saves having to scan the rtl to see if + ; it uses or alters the condition codes. +@@ -263,13 +338,17 @@ + ; JUMP_CLOB is used when the condition cannot be represented by a single + ; instruction (UNEQ and LTGT). These cannot be predicated. + ; ++; UNCONDITIONAL means the instions can not be conditionally executed. ++; + ; NOCOND means that the condition codes are neither altered nor affect the + ; output of this insn + +-(define_attr "conds" "use,set,clob,jump_clob,nocond" ++(define_attr "conds" "use,set,clob,jump_clob,unconditional,nocond" + (if_then_else (eq_attr "type" "call") + (const_string "clob") +- (const_string "nocond"))) ++ (if_then_else (eq_attr "neon_type" "none") ++ (const_string "nocond") ++ (const_string "unconditional")))) + + ; Predicable means that the insn can be conditionally executed based on + ; an automatically added predicate (additional patterns are generated by +@@ -328,18 +407,26 @@ + ;; Processor type. This is created automatically from arm-cores.def. + (include "arm-tune.md") + ++(define_attr "tune_cortexr4" "yes,no" ++ (const (if_then_else ++ (eq_attr "tune" "cortexr4,cortexr4f") ++ (const_string "yes") ++ (const_string "no")))) ++ + ;; True if the generic scheduling description should be used. + + (define_attr "generic_sched" "yes,no" + (const (if_then_else +- (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa8") ++ (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,marvell_f,cortexa8,cortexa9") ++ (eq_attr "tune_cortexr4" "yes")) + (const_string "no") + (const_string "yes")))) + + (define_attr "generic_vfp" "yes,no" + (const (if_then_else + (and (eq_attr "fpu" "vfp") +- (eq_attr "tune" "!arm1020e,arm1022e,cortexa8")) ++ (eq_attr "tune" "!arm1020e,arm1022e,marvell_f,cortexa8,cortexa9") ++ (eq_attr "tune_cortexr4" "no")) + (const_string "yes") + (const_string "no")))) + +@@ -348,7 +435,13 @@ + (include "arm1020e.md") + (include "arm1026ejs.md") + (include "arm1136jfs.md") ++(include "marvell-f.md") ++(include "marvell-f-vfp.md") + (include "cortex-a8.md") ++(include "cortex-a9.md") ++(include "cortex-r4.md") ++(include "cortex-r4f.md") ++(include "vfp11.md") + + + ;;--------------------------------------------------------------------------- +@@ -516,13 +609,19 @@ + "" + ) + ++;; The r/r/k alternative is required when reloading the address ++;; (plus (reg rN) (reg sp)) into (reg rN). In this case reload will ++;; put the duplicated register first, and not try the commutative version. + (define_insn_and_split "*arm_addsi3" +- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") +- (plus:SI (match_operand:SI 1 "s_register_operand" "%r,r,r") +- (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))] ++ [(set (match_operand:SI 0 "s_register_operand" "=r, !k, r,r, !k,r") ++ (plus:SI (match_operand:SI 1 "s_register_operand" "%rk,!k, r,rk,!k,rk") ++ (match_operand:SI 2 "reg_or_int_operand" "rI, rI,!k,L, L,?n")))] + "TARGET_32BIT" + "@ + add%?\\t%0, %1, %2 ++ add%?\\t%0, %1, %2 ++ add%?\\t%0, %2, %1 ++ sub%?\\t%0, %1, #%n2 + sub%?\\t%0, %1, #%n2 + #" + "TARGET_32BIT && +@@ -536,7 +635,7 @@ + operands[1], 0); + DONE; + " +- [(set_attr "length" "4,4,16") ++ [(set_attr "length" "4,4,4,4,4,16") + (set_attr "predicable" "yes")] + ) + +@@ -545,9 +644,9 @@ + ;; so never allow those alternatives to match if reloading is needed. + + (define_insn "*thumb1_addsi3" +- [(set (match_operand:SI 0 "register_operand" "=l,l,l,*r,*h,l,!k") ++ [(set (match_operand:SI 0 "register_operand" "=l,l,l,*rk,*hk,l,!k") + (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k") +- (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))] ++ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*hk,*rk,!M,!O")))] + "TARGET_THUMB1" + "* + static const char * const asms[] = +@@ -759,7 +858,11 @@ + [(set_attr "conds" "use") + (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*addsi3_carryin_alt1" +@@ -991,12 +1094,13 @@ + + ; ??? Check Thumb-2 split length + (define_insn_and_split "*arm_subsi3_insn" +- [(set (match_operand:SI 0 "s_register_operand" "=r,r") +- (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n") +- (match_operand:SI 2 "s_register_operand" "r,r")))] ++ [(set (match_operand:SI 0 "s_register_operand" "=r,rk,r") ++ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,!k,?n") ++ (match_operand:SI 2 "s_register_operand" "r, r, r")))] + "TARGET_32BIT" + "@ + rsb%?\\t%0, %2, %1 ++ sub%?\\t%0, %1, %2 + #" + "TARGET_32BIT + && GET_CODE (operands[1]) == CONST_INT +@@ -1007,7 +1111,7 @@ + INTVAL (operands[1]), operands[0], operands[2], 0); + DONE; + " +- [(set_attr "length" "4,16") ++ [(set_attr "length" "4,4,16") + (set_attr "predicable" "yes")] + ) + +@@ -1236,6 +1340,49 @@ + (set_attr "predicable" "yes")] + ) + ++; The combiner cannot combine the first and last insns in the ++; following sequence because of the intervening insn, so help the ++; combiner with this splitter. The combiner does attempt to split ++; this particular combination but does not know this exact split. ++; Note that the combiner puts the constant at the outermost operation ++; as a part of canonicalization. ++; ++; mul r3, r2, r1 ++; r3, r3, ++; add r3, r3, r4 ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (match_operator:SI 1 "plusminus_operator" ++ [(plus:SI (mult:SI (match_operand:SI 2 "s_register_operand" "") ++ (match_operand:SI 3 "s_register_operand" "")) ++ (match_operand:SI 4 "s_register_operand" "")) ++ (match_operand:SI 5 "arm_immediate_operand" "")]))] ++ "TARGET_32BIT" ++ [(set (match_dup 0) ++ (plus:SI (mult:SI (match_dup 2) (match_dup 3)) ++ (match_dup 4))) ++ (set (match_dup 0) ++ (match_op_dup:SI 1 [(match_dup 0) (match_dup 5)]))] ++ "") ++ ++; Likewise for MLS. MLS is available only on select architectures. ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (match_operator:SI 1 "plusminus_operator" ++ [(minus:SI (match_operand:SI 2 "s_register_operand" "") ++ (mult:SI (match_operand:SI 3 "s_register_operand" "") ++ (match_operand:SI 4 "s_register_operand" ""))) ++ (match_operand:SI 5 "arm_immediate_operand" "")]))] ++ "TARGET_32BIT && arm_arch_thumb2" ++ [(set (match_dup 0) ++ (minus:SI (match_dup 2) ++ (mult:SI (match_dup 3) (match_dup 4)))) ++ (set (match_dup 0) ++ (match_op_dup:SI 1 [(match_dup 0) (match_dup 5)]))] ++ "") ++ + (define_insn "*mulsi3addsi_compare0" + [(set (reg:CC_NOOV CC_REGNUM) + (compare:CC_NOOV +@@ -1864,6 +2011,7 @@ + DONE; + " + [(set_attr "length" "4,4,16") ++ (set_attr "insn" "and") + (set_attr "predicable" "yes")] + ) + +@@ -1873,7 +2021,8 @@ + (match_operand:SI 2 "register_operand" "l")))] + "TARGET_THUMB1" + "and\\t%0, %0, %2" +- [(set_attr "length" "2")] ++ [(set_attr "length" "2") ++ (set_attr "insn" "and")] + ) + + (define_insn "*andsi3_compare0" +@@ -1888,7 +2037,8 @@ + "@ + and%.\\t%0, %1, %2 + bic%.\\t%0, %1, #%B2" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "and,*")] + ) + + (define_insn "*andsi3_compare0_scratch" +@@ -2140,13 +2290,12 @@ + ;;; the value before we insert. This loses some of the advantage of having + ;;; this insv pattern, so this pattern needs to be reevalutated. + +-; ??? Use Thumb-2 bitfield insert/extract instructions + (define_expand "insv" + [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "general_operand" "") + (match_operand:SI 2 "general_operand" "")) + (match_operand:SI 3 "reg_or_int_operand" ""))] +- "TARGET_ARM" ++ "TARGET_ARM || arm_arch_thumb2" + " + { + int start_bit = INTVAL (operands[2]); +@@ -2154,7 +2303,38 @@ + HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1; + rtx target, subtarget; + +- target = operands[0]; ++ if (arm_arch_thumb2) ++ { ++ bool use_bfi = TRUE; ++ ++ if (GET_CODE (operands[3]) == CONST_INT) ++ { ++ HOST_WIDE_INT val = INTVAL (operands[3]) & mask; ++ ++ if (val == 0) ++ { ++ emit_insn (gen_insv_zero (operands[0], operands[1], ++ operands[2])); ++ DONE; ++ } ++ ++ /* See if the set can be done with a single orr instruction. */ ++ if (val == mask && const_ok_for_arm (val << start_bit)) ++ use_bfi = FALSE; ++ } ++ ++ if (use_bfi) ++ { ++ if (GET_CODE (operands[3]) != REG) ++ operands[3] = force_reg (SImode, operands[3]); ++ ++ emit_insn (gen_insv_t2 (operands[0], operands[1], operands[2], ++ operands[3])); ++ DONE; ++ } ++ } ++ ++ target = copy_rtx (operands[0]); + /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical + subreg as the final target. */ + if (GET_CODE (target) == SUBREG) +@@ -2277,6 +2457,28 @@ + }" + ) + ++(define_insn "insv_zero" ++ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "+r") ++ (match_operand:SI 1 "const_int_operand" "M") ++ (match_operand:SI 2 "const_int_operand" "M")) ++ (const_int 0))] ++ "arm_arch_thumb2" ++ "bfc%?\t%0, %2, %1" ++ [(set_attr "length" "4") ++ (set_attr "predicable" "yes")] ++) ++ ++(define_insn "insv_t2" ++ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "+r") ++ (match_operand:SI 1 "const_int_operand" "M") ++ (match_operand:SI 2 "const_int_operand" "M")) ++ (match_operand:SI 3 "s_register_operand" "r"))] ++ "arm_arch_thumb2" ++ "bfi%?\t%0, %3, %2, %1" ++ [(set_attr "length" "4") ++ (set_attr "predicable" "yes")] ++) ++ + ; constants for op 2 will never be given to these patterns. + (define_insn_and_split "*anddi_notdi_di" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") +@@ -2380,7 +2582,11 @@ + (set_attr "shift" "2") + (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*andsi_notsi_si_compare0" +@@ -2428,6 +2634,7 @@ + orr%?\\t%Q0, %Q1, %2 + #" + [(set_attr "length" "4,8") ++ (set_attr "insn" "orr") + (set_attr "predicable" "yes")] + ) + +@@ -2490,7 +2697,8 @@ + (match_operand:SI 2 "register_operand" "l")))] + "TARGET_THUMB1" + "orr\\t%0, %0, %2" +- [(set_attr "length" "2")] ++ [(set_attr "length" "2") ++ (set_attr "insn" "orr")] + ) + + (define_peephole2 +@@ -2515,7 +2723,8 @@ + (ior:SI (match_dup 1) (match_dup 2)))] + "TARGET_32BIT" + "orr%.\\t%0, %1, %2" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "orr")] + ) + + (define_insn "*iorsi3_compare0_scratch" +@@ -2526,7 +2735,8 @@ + (clobber (match_scratch:SI 0 "=r"))] + "TARGET_32BIT" + "orr%.\\t%0, %1, %2" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "orr")] + ) + + (define_insn "xordi3" +@@ -2549,7 +2759,8 @@ + eor%?\\t%Q0, %Q1, %2 + #" + [(set_attr "length" "4,8") +- (set_attr "predicable" "yes")] ++ (set_attr "predicable" "yes") ++ (set_attr "insn" "eor")] + ) + + (define_insn "*xordi_sesidi_di" +@@ -2580,7 +2791,8 @@ + (match_operand:SI 2 "arm_rhs_operand" "rI")))] + "TARGET_32BIT" + "eor%?\\t%0, %1, %2" +- [(set_attr "predicable" "yes")] ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "eor")] + ) + + (define_insn "*thumb1_xorsi3" +@@ -2589,7 +2801,8 @@ + (match_operand:SI 2 "register_operand" "l")))] + "TARGET_THUMB1" + "eor\\t%0, %0, %2" +- [(set_attr "length" "2")] ++ [(set_attr "length" "2") ++ (set_attr "insn" "eor")] + ) + + (define_insn "*xorsi3_compare0" +@@ -2601,7 +2814,8 @@ + (xor:SI (match_dup 1) (match_dup 2)))] + "TARGET_32BIT" + "eor%.\\t%0, %1, %2" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "eor")] + ) + + (define_insn "*xorsi3_compare0_scratch" +@@ -2758,7 +2972,7 @@ + (smax:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" ""))) + (clobber (reg:CC CC_REGNUM))])] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + " + if (operands[2] == const0_rtx || operands[2] == constm1_rtx) + { +@@ -2785,7 +2999,8 @@ + (const_int -1)))] + "TARGET_32BIT" + "orr%?\\t%0, %1, %1, asr #31" +- [(set_attr "predicable" "yes")] ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "orr")] + ) + + (define_insn "*arm_smax_insn" +@@ -2793,7 +3008,7 @@ + (smax:SI (match_operand:SI 1 "s_register_operand" "%0,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "@ + cmp\\t%1, %2\;movlt\\t%0, %2 + cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2" +@@ -2807,7 +3022,7 @@ + (smin:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" ""))) + (clobber (reg:CC CC_REGNUM))])] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + " + if (operands[2] == const0_rtx) + { +@@ -2825,7 +3040,8 @@ + (const_int 0)))] + "TARGET_32BIT" + "and%?\\t%0, %1, %1, asr #31" +- [(set_attr "predicable" "yes")] ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "and")] + ) + + (define_insn "*arm_smin_insn" +@@ -2833,7 +3049,7 @@ + (smin:SI (match_operand:SI 1 "s_register_operand" "%0,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "@ + cmp\\t%1, %2\;movge\\t%0, %2 + cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2" +@@ -2847,7 +3063,7 @@ + (umax:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" ""))) + (clobber (reg:CC CC_REGNUM))])] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "" + ) + +@@ -2856,7 +3072,7 @@ + (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "@ + cmp\\t%1, %2\;movcc\\t%0, %2 + cmp\\t%1, %2\;movcs\\t%0, %1 +@@ -2871,7 +3087,7 @@ + (umin:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "arm_rhs_operand" ""))) + (clobber (reg:CC CC_REGNUM))])] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "" + ) + +@@ -2880,7 +3096,7 @@ + (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "@ + cmp\\t%1, %2\;movcs\\t%0, %2 + cmp\\t%1, %2\;movcc\\t%0, %1 +@@ -2895,7 +3111,7 @@ + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "s_register_operand" "r")])) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "* + operands[3] = gen_rtx_fmt_ee (minmax_code (operands[3]), SImode, + operands[1], operands[2]); +@@ -3015,11 +3231,23 @@ + [(set (match_operand:SI 0 "register_operand" "=l,l") + (ashift:SI (match_operand:SI 1 "register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] +- "TARGET_THUMB1" ++ "TARGET_THUMB1 && !janus2_code" + "lsl\\t%0, %1, %2" + [(set_attr "length" "2")] + ) + ++(define_insn "*thumb1_ashlsi3_janus2" ++ [(set (match_operand:SI 0 "register_operand" "=l,l") ++ (ashift:SI (match_operand:SI 1 "register_operand" "l,0") ++ (match_operand:SI 2 "nonmemory_operand" "N,l")))] ++ "TARGET_THUMB1 && janus2_code" ++ "@ ++ lsl\\t%0, %1, %2 ++ lsl\\t%0, %1, %2\;nop" ++ [(set_attr "length" "2,4")] ++) ++ ++ + (define_expand "ashrdi3" + [(set (match_operand:DI 0 "s_register_operand" "") + (ashiftrt:DI (match_operand:DI 1 "s_register_operand" "") +@@ -3052,6 +3280,7 @@ + "TARGET_32BIT" + "movs\\t%R0, %R1, asr #1\;mov\\t%Q0, %Q1, rrx" + [(set_attr "conds" "clob") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -3071,11 +3300,22 @@ + [(set (match_operand:SI 0 "register_operand" "=l,l") + (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] +- "TARGET_THUMB1" ++ "TARGET_THUMB1 && !janus2_code" + "asr\\t%0, %1, %2" + [(set_attr "length" "2")] + ) + ++(define_insn "*thumb1_ashrsi3_janus2" ++ [(set (match_operand:SI 0 "register_operand" "=l,l") ++ (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0") ++ (match_operand:SI 2 "nonmemory_operand" "N,l")))] ++ "TARGET_THUMB1 && janus2_code" ++ "@ ++ asr\\t%0, %1, %2 ++ asr\\t%0, %1, %2\;nop" ++ [(set_attr "length" "2,4")] ++) ++ + (define_expand "lshrdi3" + [(set (match_operand:DI 0 "s_register_operand" "") + (lshiftrt:DI (match_operand:DI 1 "s_register_operand" "") +@@ -3108,6 +3348,7 @@ + "TARGET_32BIT" + "movs\\t%R0, %R1, lsr #1\;mov\\t%Q0, %Q1, rrx" + [(set_attr "conds" "clob") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -3130,11 +3371,22 @@ + [(set (match_operand:SI 0 "register_operand" "=l,l") + (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] +- "TARGET_THUMB1" ++ "TARGET_THUMB1 && !janus2_code" + "lsr\\t%0, %1, %2" + [(set_attr "length" "2")] + ) + ++(define_insn "*thumb1_lshrsi3_janus2" ++ [(set (match_operand:SI 0 "register_operand" "=l,l") ++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0") ++ (match_operand:SI 2 "nonmemory_operand" "N,l")))] ++ "TARGET_THUMB1 && janus2_code" ++ "@ ++ lsr\\t%0, %1, %2 ++ lsr\\t%0, %1, %2; nop" ++ [(set_attr "length" "2,4")] ++) ++ + (define_expand "rotlsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (rotatert:SI (match_operand:SI 1 "s_register_operand" "") +@@ -3176,11 +3428,20 @@ + [(set (match_operand:SI 0 "register_operand" "=l") + (rotatert:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "register_operand" "l")))] +- "TARGET_THUMB1" ++ "TARGET_THUMB1 && !janus2_code" + "ror\\t%0, %0, %2" + [(set_attr "length" "2")] + ) + ++(define_insn "*thumb1_rotrsi3_janus2" ++ [(set (match_operand:SI 0 "register_operand" "=l") ++ (rotatert:SI (match_operand:SI 1 "register_operand" "0") ++ (match_operand:SI 2 "register_operand" "l")))] ++ "TARGET_THUMB1 && janus2_code" ++ "ror\\t%0, %0, %2; nop" ++ [(set_attr "length" "4")] ++) ++ + (define_insn "*arm_shiftsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 3 "shift_operator" +@@ -3192,7 +3453,11 @@ + (set_attr "shift" "1") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*shiftsi3_compare0" +@@ -3209,7 +3474,11 @@ + (set_attr "shift" "1") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*shiftsi3_compare0_scratch" +@@ -3222,7 +3491,11 @@ + "TARGET_32BIT" + "* return arm_output_shift(operands, 1);" + [(set_attr "conds" "set") +- (set_attr "shift" "1")] ++ (set_attr "shift" "1") ++ (set (attr "length") (if_then_else (and (match_operand 2 "s_register_operand" "") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arm_notsi_shiftsi" +@@ -3234,9 +3507,14 @@ + "mvn%?\\t%0, %1%S3" + [(set_attr "predicable" "yes") + (set_attr "shift" "1") ++ (set_attr "insn" "mvn") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arm_notsi_shiftsi_compare0" +@@ -3251,9 +3529,14 @@ + "mvn%.\\t%0, %1%S3" + [(set_attr "conds" "set") + (set_attr "shift" "1") ++ (set_attr "insn" "mvn") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arm_not_shiftsi_compare0_scratch" +@@ -3267,9 +3550,14 @@ + "mvn%.\\t%0, %1%S3" + [(set_attr "conds" "set") + (set_attr "shift" "1") ++ (set_attr "insn" "mvn") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + ;; We don't really have extzv, but defining this using shifts helps +@@ -3282,12 +3570,19 @@ + (set (match_operand:SI 0 "register_operand" "") + (lshiftrt:SI (match_dup 4) + (match_operand:SI 3 "const_int_operand" "")))] +- "TARGET_THUMB1" ++ "TARGET_THUMB1 || arm_arch_thumb2" + " + { + HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]); + HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]); + ++ if (arm_arch_thumb2) ++ { ++ emit_insn (gen_extzv_t2 (operands[0], operands[1], operands[2], ++ operands[3])); ++ DONE; ++ } ++ + operands[3] = GEN_INT (rshift); + + if (lshift == 0) +@@ -3301,6 +3596,28 @@ + }" + ) + ++(define_insn "extv" ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r") ++ (match_operand:SI 2 "const_int_operand" "M") ++ (match_operand:SI 3 "const_int_operand" "M")))] ++ "arm_arch_thumb2" ++ "sbfx%?\t%0, %1, %3, %2" ++ [(set_attr "length" "4") ++ (set_attr "predicable" "yes")] ++) ++ ++(define_insn "extzv_t2" ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (zero_extract:SI (match_operand:SI 1 "s_register_operand" "r") ++ (match_operand:SI 2 "const_int_operand" "M") ++ (match_operand:SI 3 "const_int_operand" "M")))] ++ "arm_arch_thumb2" ++ "ubfx%?\t%0, %1, %3, %2" ++ [(set_attr "length" "4") ++ (set_attr "predicable" "yes")] ++) ++ + + ;; Unary arithmetic insns + +@@ -3378,7 +3695,7 @@ + + ;; abssi2 doesn't really clobber the condition codes if a different register + ;; is being set. To keep things simple, assume during rtl manipulations that +-;; it does, but tell the final scan operator the truth. Similarly for ++;; it does, and the splitter will eliminate it. Similarly for + ;; (neg (abs...)) + + (define_expand "abssi2" +@@ -3390,22 +3707,28 @@ + " + if (TARGET_THUMB1) + operands[2] = gen_rtx_SCRATCH (SImode); ++ else if (TARGET_NO_SINGLE_COND_EXEC) ++ { ++ emit_insn(gen_rtx_SET(VOIDmode, operands[0], ++ gen_rtx_ABS(SImode, operands[1]))); ++ DONE; ++ } + else + operands[2] = gen_rtx_REG (CCmode, CC_REGNUM); + ") + + (define_insn "*arm_abssi2" +- [(set (match_operand:SI 0 "s_register_operand" "=r,&r") +- (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))) ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (abs:SI (match_operand:SI 1 "s_register_operand" "r"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" +- "@ +- cmp\\t%0, #0\;rsblt\\t%0, %0, #0 +- eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31" +- [(set_attr "conds" "clob,*") +- (set_attr "shift" "1") ++ "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC" ++ "#" ++ [(set_attr "shift" "1") + ;; predicable can't be set based on the variant, so left as no +- (set_attr "length" "8")] ++ (set (attr "length") ++ (if_then_else (eq_attr "is_thumb" "yes") ++ (const_int 10) ++ (const_int 8)))] + ) + + (define_insn_and_split "*thumb1_abssi2" +@@ -3423,17 +3746,17 @@ + ) + + (define_insn "*arm_neg_abssi2" +- [(set (match_operand:SI 0 "s_register_operand" "=r,&r") +- (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))) ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "r")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" +- "@ +- cmp\\t%0, #0\;rsbgt\\t%0, %0, #0 +- eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31" +- [(set_attr "conds" "clob,*") +- (set_attr "shift" "1") ++ "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC" ++ "#" ++ [(set_attr "shift" "1") + ;; predicable can't be set based on the variant, so left as no +- (set_attr "length" "8")] ++ (set (attr "length") ++ (if_then_else (eq_attr "is_thumb" "yes") ++ (const_int 10) ++ (const_int 8)))] + ) + + (define_insn_and_split "*thumb1_neg_abssi2" +@@ -3450,6 +3773,93 @@ + [(set_attr "length" "6")] + ) + ++;; Simplified version for when avoiding conditional execution ++(define_insn "*arm_nocond_abssi2" ++ [(set (match_operand:SI 0 "s_register_operand" "=&r") ++ (abs:SI (match_operand:SI 1 "s_register_operand" "r")))] ++ "TARGET_32BIT && TARGET_NO_SINGLE_COND_EXEC" ++ "#" ++ [(set_attr "shift" "1") ++ (set_attr "length" "8") ++ (set_attr "predicable" "yes")] ++) ++ ++(define_insn "*arm_nocond_neg_abssi2" ++ [(set (match_operand:SI 0 "s_register_operand" "=&r") ++ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "r"))))] ++ "TARGET_32BIT && TARGET_NO_SINGLE_COND_EXEC" ++ "#" ++ [(set_attr "shift" "1") ++ (set_attr "length" "8") ++ (set_attr "predicable" "yes")] ++) ++ ++;; Splitters for ABS patterns. ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (abs:SI (match_operand:SI 1 "s_register_operand" ""))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_32BIT && reload_completed && rtx_equal_p(operands[0], operands[1])" ++ [(set (reg:CC CC_REGNUM) (compare:CC (match_dup 1) (const_int 0))) ++ (cond_exec (lt (reg:CC CC_REGNUM) (const_int 0)) ++ (set (match_dup 0) (neg:SI (match_dup 1))))] ++) ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "")))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_32BIT && reload_completed && rtx_equal_p(operands[0], operands[1])" ++ [(set (reg:CC CC_REGNUM) (compare:CC (match_dup 1) (const_int 0))) ++ (cond_exec (gt (reg:CC CC_REGNUM) (const_int 0)) ++ (set (match_dup 0) (neg:SI (match_dup 1))))] ++) ++ ++;; GCC does not add/remove clobbers when matching splitters, so we need ++;; variants with and without the CC clobber. ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (abs:SI (match_operand:SI 1 "s_register_operand" "")))] ++ "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])" ++ [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 1))) ++ (set (match_dup 0) (minus:SI (match_dup 0) ++ (ashiftrt:SI (match_dup 1) (const_int 31))))] ++) ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (abs:SI (match_operand:SI 1 "s_register_operand" ""))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])" ++ [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 1))) ++ (set (match_dup 0) (minus:SI (match_dup 0) ++ (ashiftrt:SI (match_dup 1) (const_int 31))))] ++) ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" ""))))] ++ "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])" ++ [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 1))) ++ (set (match_dup 0) (minus:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 0)))] ++) ++ ++(define_split ++ [(set (match_operand:SI 0 "s_register_operand" "") ++ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "")))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_32BIT && reload_completed && !rtx_equal_p(operands[0], operands[1])" ++ [(set (match_dup 0) (xor:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 1))) ++ (set (match_dup 0) (minus:SI (ashiftrt:SI (match_dup 1) (const_int 31)) ++ (match_dup 0)))] ++) ++ + (define_expand "abssf2" + [(set (match_operand:SF 0 "s_register_operand" "") + (abs:SF (match_operand:SF 1 "s_register_operand" "")))] +@@ -3505,7 +3915,8 @@ + (not:SI (match_operand:SI 1 "s_register_operand" "r")))] + "TARGET_32BIT" + "mvn%?\\t%0, %1" +- [(set_attr "predicable" "yes")] ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "mvn")] + ) + + (define_insn "*thumb1_one_cmplsi2" +@@ -3513,7 +3924,8 @@ + (not:SI (match_operand:SI 1 "register_operand" "l")))] + "TARGET_THUMB1" + "mvn\\t%0, %1" +- [(set_attr "length" "2")] ++ [(set_attr "length" "2") ++ (set_attr "insn" "mvn")] + ) + + (define_insn "*notsi_compare0" +@@ -3524,7 +3936,8 @@ + (not:SI (match_dup 1)))] + "TARGET_32BIT" + "mvn%.\\t%0, %1" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "mvn")] + ) + + (define_insn "*notsi_compare0_scratch" +@@ -3534,11 +3947,40 @@ + (clobber (match_scratch:SI 0 "=r"))] + "TARGET_32BIT" + "mvn%.\\t%0, %1" +- [(set_attr "conds" "set")] ++ [(set_attr "conds" "set") ++ (set_attr "insn" "mvn")] + ) + + ;; Fixed <--> Floating conversion insns + ++(define_expand "floatsihf2" ++ [(set (match_operand:HF 0 "general_operand" "") ++ (float:HF (match_operand:SI 1 "general_operand" "")))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1 = gen_reg_rtx (SFmode); ++ expand_float (op1, operands[1], 0); ++ op1 = convert_to_mode (HFmode, op1, 0); ++ emit_move_insn (operands[0], op1); ++ DONE; ++ }" ++) ++ ++(define_expand "floatdihf2" ++ [(set (match_operand:HF 0 "general_operand" "") ++ (float:HF (match_operand:DI 1 "general_operand" "")))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1 = gen_reg_rtx (SFmode); ++ expand_float (op1, operands[1], 0); ++ op1 = convert_to_mode (HFmode, op1, 0); ++ emit_move_insn (operands[0], op1); ++ DONE; ++ }" ++) ++ + (define_expand "floatsisf2" + [(set (match_operand:SF 0 "s_register_operand" "") + (float:SF (match_operand:SI 1 "s_register_operand" "")))] +@@ -3563,6 +4005,30 @@ + } + ") + ++(define_expand "fix_trunchfsi2" ++ [(set (match_operand:SI 0 "general_operand" "") ++ (fix:SI (fix:HF (match_operand:HF 1 "general_operand" ""))))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1 = convert_to_mode (SFmode, operands[1], 0); ++ expand_fix (operands[0], op1, 0); ++ DONE; ++ }" ++) ++ ++(define_expand "fix_trunchfdi2" ++ [(set (match_operand:DI 0 "general_operand" "") ++ (fix:DI (fix:HF (match_operand:HF 1 "general_operand" ""))))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1 = convert_to_mode (SFmode, operands[1], 0); ++ expand_fix (operands[0], op1, 0); ++ DONE; ++ }" ++) ++ + (define_expand "fix_truncsfsi2" + [(set (match_operand:SI 0 "s_register_operand" "") + (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" ""))))] +@@ -3602,6 +4068,22 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT" + "" + ) ++ ++/* DFmode -> HFmode conversions have to go through SFmode. */ ++(define_expand "truncdfhf2" ++ [(set (match_operand:HF 0 "general_operand" "") ++ (float_truncate:HF ++ (match_operand:DF 1 "general_operand" "")))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1; ++ op1 = convert_to_mode (SFmode, operands[1], 0); ++ op1 = convert_to_mode (HFmode, op1, 0); ++ emit_move_insn (operands[0], op1); ++ DONE; ++ }" ++) + + ;; Zero and sign extension instructions. + +@@ -3623,6 +4105,7 @@ + return \"mov%?\\t%R0, #0\"; + " + [(set_attr "length" "8") ++ (set_attr "insn" "mov") + (set_attr "predicable" "yes")] + ) + +@@ -3666,6 +4149,7 @@ + " + [(set_attr "length" "8") + (set_attr "shift" "1") ++ (set_attr "insn" "mov") + (set_attr "predicable" "yes")] + ) + +@@ -4464,6 +4948,21 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT" + "" + ) ++ ++/* HFmode -> DFmode conversions have to go through SFmode. */ ++(define_expand "extendhfdf2" ++ [(set (match_operand:DF 0 "general_operand" "") ++ (float_extend:DF (match_operand:HF 1 "general_operand" "")))] ++ "TARGET_EITHER" ++ " ++ { ++ rtx op1; ++ op1 = convert_to_mode (SFmode, operands[1], 0); ++ op1 = convert_to_mode (DFmode, op1, 0); ++ emit_insn (gen_movdf (operands[0], op1)); ++ DONE; ++ }" ++) + + ;; Move insns (including loads and stores) + +@@ -4699,6 +5198,7 @@ + }" + [(set_attr "length" "4,4,6,2,2,6,4,4") + (set_attr "type" "*,*,*,load2,store2,load2,store2,*") ++ (set_attr "insn" "*,mov,*,*,*,*,*,mov") + (set_attr "pool_range" "*,*,*,*,*,1020,*,*")] + ) + +@@ -4785,23 +5285,38 @@ + " + ) + ++;; The ARM LO_SUM and HIGH are backwards - HIGH sets the low bits, and ++;; LO_SUM adds in the high bits. Fortunately these are opaque opearsions ++;; so this does not matter. ++(define_insn "*arm_movt" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r") ++ (lo_sum:SI (match_operand:SI 1 "nonimmediate_operand" "0") ++ (match_operand:SI 2 "general_operand" "i")))] ++ "TARGET_32BIT" ++ "movt%?\t%0, #:upper16:%c2" ++ [(set_attr "predicable" "yes") ++ (set_attr "length" "4")] ++) ++ + (define_insn "*arm_movsi_insn" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r, m") +- (match_operand:SI 1 "general_operand" "rI,K,N,mi,r"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m") ++ (match_operand:SI 1 "general_operand" " rk,I,K,j,mi,rk"))] + "TARGET_ARM && ! TARGET_IWMMXT + && !(TARGET_HARD_FLOAT && TARGET_VFP) + && ( register_operand (operands[0], SImode) + || register_operand (operands[1], SImode))" + "@ + mov%?\\t%0, %1 ++ mov%?\\t%0, %1 + mvn%?\\t%0, #%B1 + movw%?\\t%0, %1 + ldr%?\\t%0, %1 + str%?\\t%1, %0" +- [(set_attr "type" "*,*,*,load1,store1") ++ [(set_attr "type" "*,*,*,*,load1,store1") ++ (set_attr "insn" "mov,mov,mvn,mov,*,*") + (set_attr "predicable" "yes") +- (set_attr "pool_range" "*,*,*,4096,*") +- (set_attr "neg_pool_range" "*,*,*,4084,*")] ++ (set_attr "pool_range" "*,*,*,*,4096,*") ++ (set_attr "neg_pool_range" "*,*,*,*,4084,*")] + ) + + (define_split +@@ -4818,9 +5333,22 @@ + " + ) + ++(define_split ++ [(set (match_operand:SI 0 "arm_general_register_operand" "") ++ (match_operand:SI 1 "general_operand" ""))] ++ "TARGET_32BIT ++ && TARGET_USE_MOVT && GET_CODE (operands[1]) == SYMBOL_REF ++ && !flag_pic && !target_word_relocations ++ && !arm_tls_referenced_p (operands[1])" ++ [(clobber (const_int 0))] ++{ ++ arm_emit_movpair (operands[0], operands[1]); ++ DONE; ++}) ++ + (define_insn "*thumb1_movsi_insn" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*lh") +- (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*lh"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*lhk") ++ (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*lhk"))] + "TARGET_THUMB1 + && ( register_operand (operands[0], SImode) + || register_operand (operands[1], SImode))" +@@ -5418,6 +5946,7 @@ + ldr%(h%)\\t%0, %1\\t%@ movhi" + [(set_attr "type" "*,*,store1,load1") + (set_attr "predicable" "yes") ++ (set_attr "insn" "mov,mvn,*,*") + (set_attr "pool_range" "*,*,*,256") + (set_attr "neg_pool_range" "*,*,*,244")] + ) +@@ -5429,7 +5958,8 @@ + "@ + mov%?\\t%0, %1\\t%@ movhi + mvn%?\\t%0, #%B1\\t%@ movhi" +- [(set_attr "predicable" "yes")] ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "mov,mvn")] + ) + + (define_expand "thumb_movhi_clobber" +@@ -5560,6 +6090,7 @@ + ldr%(b%)\\t%0, %1 + str%(b%)\\t%1, %0" + [(set_attr "type" "*,*,load1,store1") ++ (set_attr "insn" "mov,mvn,*,*") + (set_attr "predicable" "yes")] + ) + +@@ -5578,9 +6109,111 @@ + mov\\t%0, %1" + [(set_attr "length" "2") + (set_attr "type" "*,load1,store1,*,*,*") ++ (set_attr "insn" "*,*,*,mov,mov,mov") + (set_attr "pool_range" "*,32,*,*,*,*")] + ) + ++;; HFmode moves ++(define_expand "movhf" ++ [(set (match_operand:HF 0 "general_operand" "") ++ (match_operand:HF 1 "general_operand" ""))] ++ "TARGET_EITHER" ++ " ++ if (TARGET_32BIT) ++ { ++ if (GET_CODE (operands[0]) == MEM) ++ operands[1] = force_reg (HFmode, operands[1]); ++ } ++ else /* TARGET_THUMB1 */ ++ { ++ if (can_create_pseudo_p ()) ++ { ++ if (GET_CODE (operands[0]) != REG) ++ operands[1] = force_reg (HFmode, operands[1]); ++ } ++ } ++ " ++) ++ ++(define_insn "*arm32_movhf" ++ [(set (match_operand:HF 0 "nonimmediate_operand" "=r,m,r,r") ++ (match_operand:HF 1 "general_operand" " m,r,r,F"))] ++ "TARGET_32BIT && !(TARGET_HARD_FLOAT && TARGET_NEON_FP16) ++ && ( s_register_operand (operands[0], HFmode) ++ || s_register_operand (operands[1], HFmode))" ++ "* ++ switch (which_alternative) ++ { ++ case 0: /* ARM register from memory */ ++ return \"ldr%(h%)\\t%0, %1\\t%@ __fp16\"; ++ case 1: /* memory from ARM register */ ++ return \"str%(h%)\\t%1, %0\\t%@ __fp16\"; ++ case 2: /* ARM register from ARM register */ ++ return \"mov%?\\t%0, %1\\t%@ __fp16\"; ++ case 3: /* ARM register from constant */ ++ { ++ REAL_VALUE_TYPE r; ++ long bits; ++ rtx ops[4]; ++ ++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); ++ bits = real_to_target (NULL, &r, HFmode); ++ ops[0] = operands[0]; ++ ops[1] = GEN_INT (bits); ++ ops[2] = GEN_INT (bits & 0xff00); ++ ops[3] = GEN_INT (bits & 0x00ff); ++ ++ if (arm_arch_thumb2) ++ output_asm_insn (\"movw%?\\t%0, %1\", ops); ++ else ++ output_asm_insn (\"mov%?\\t%0, %2\;orr%?\\t%0, %0, %3\", ops); ++ return \"\"; ++ } ++ default: ++ gcc_unreachable (); ++ } ++ " ++ [(set_attr "conds" "unconditional") ++ (set_attr "type" "load1,store1,*,*") ++ (set_attr "length" "4,4,4,8") ++ (set_attr "predicable" "yes") ++ ] ++) ++ ++(define_insn "*thumb1_movhf" ++ [(set (match_operand:HF 0 "nonimmediate_operand" "=l,l,m,*r,*h") ++ (match_operand:HF 1 "general_operand" "l,mF,l,*h,*r"))] ++ "TARGET_THUMB1 ++ && ( s_register_operand (operands[0], HFmode) ++ || s_register_operand (operands[1], HFmode))" ++ "* ++ switch (which_alternative) ++ { ++ case 1: ++ { ++ rtx addr; ++ gcc_assert (GET_CODE(operands[1]) == MEM); ++ addr = XEXP (operands[1], 0); ++ if (GET_CODE (addr) == LABEL_REF ++ || (GET_CODE (addr) == CONST ++ && GET_CODE (XEXP (addr, 0)) == PLUS ++ && GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF ++ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)) ++ { ++ /* Constant pool entry. */ ++ return \"ldr\\t%0, %1\"; ++ } ++ return \"ldrh\\t%0, %1\"; ++ } ++ case 2: return \"strh\\t%1, %0\"; ++ default: return \"mov\\t%0, %1\"; ++ } ++ " ++ [(set_attr "length" "2") ++ (set_attr "type" "*,load1,store1,*,*") ++ (set_attr "pool_range" "*,1020,*,*,*")] ++) ++ + (define_expand "movsf" + [(set (match_operand:SF 0 "general_operand" "") + (match_operand:SF 1 "general_operand" ""))] +@@ -5633,6 +6266,7 @@ + [(set_attr "length" "4,4,4") + (set_attr "predicable" "yes") + (set_attr "type" "*,load1,store1") ++ (set_attr "insn" "mov,*,*") + (set_attr "pool_range" "*,4096,*") + (set_attr "neg_pool_range" "*,4084,*")] + ) +@@ -6088,7 +6722,7 @@ + (match_operand:BLK 1 "general_operand" "") + (match_operand:SI 2 "const_int_operand" "") + (match_operand:SI 3 "const_int_operand" "")] +- "TARGET_EITHER" ++ "TARGET_EITHER && !low_irq_latency" + " + if (TARGET_32BIT) + { +@@ -7298,7 +7932,11 @@ + (set_attr "shift" "1") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arm_cmpsi_shiftsi_swp" +@@ -7313,7 +7951,11 @@ + (set_attr "shift" "1") + (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arm_cmpsi_negshiftsi_si" +@@ -7328,7 +7970,11 @@ + [(set_attr "conds" "set") + (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + ;; Cirrus SF compare instruction +@@ -7670,77 +8316,77 @@ + (define_expand "seq" + [(set (match_operand:SI 0 "s_register_operand" "") + (eq:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sne" + [(set (match_operand:SI 0 "s_register_operand" "") + (ne:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (NE, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sgt" + [(set (match_operand:SI 0 "s_register_operand" "") + (gt:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (GT, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sle" + [(set (match_operand:SI 0 "s_register_operand" "") + (le:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (LE, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sge" + [(set (match_operand:SI 0 "s_register_operand" "") + (ge:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (GE, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "slt" + [(set (match_operand:SI 0 "s_register_operand" "") + (lt:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (LT, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sgtu" + [(set (match_operand:SI 0 "s_register_operand" "") + (gtu:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sleu" + [(set (match_operand:SI 0 "s_register_operand" "") + (leu:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sgeu" + [(set (match_operand:SI 0 "s_register_operand" "") + (geu:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sltu" + [(set (match_operand:SI 0 "s_register_operand" "") + (ltu:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1);" + ) + + (define_expand "sunordered" + [(set (match_operand:SI 0 "s_register_operand" "") + (unordered:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (UNORDERED, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7748,7 +8394,7 @@ + (define_expand "sordered" + [(set (match_operand:SI 0 "s_register_operand" "") + (ordered:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (ORDERED, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7756,7 +8402,7 @@ + (define_expand "sungt" + [(set (match_operand:SI 0 "s_register_operand" "") + (ungt:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (UNGT, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7764,7 +8410,7 @@ + (define_expand "sunge" + [(set (match_operand:SI 0 "s_register_operand" "") + (unge:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (UNGE, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7772,7 +8418,7 @@ + (define_expand "sunlt" + [(set (match_operand:SI 0 "s_register_operand" "") + (unlt:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (UNLT, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7780,7 +8426,7 @@ + (define_expand "sunle" + [(set (match_operand:SI 0 "s_register_operand" "") + (unle:SI (match_dup 1) (const_int 0)))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + "operands[1] = arm_gen_compare_reg (UNLE, arm_compare_op0, + arm_compare_op1);" + ) +@@ -7809,6 +8455,7 @@ + "TARGET_ARM" + "mov%D1\\t%0, #0\;mov%d1\\t%0, #1" + [(set_attr "conds" "use") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -7819,6 +8466,7 @@ + "TARGET_ARM" + "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0" + [(set_attr "conds" "use") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -7829,6 +8477,7 @@ + "TARGET_ARM" + "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1" + [(set_attr "conds" "use") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -8032,7 +8681,7 @@ + (if_then_else:SI (match_operand 1 "arm_comparison_operator" "") + (match_operand:SI 2 "arm_not_operand" "") + (match_operand:SI 3 "arm_not_operand" "")))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + " + { + enum rtx_code code = GET_CODE (operands[1]); +@@ -8051,7 +8700,7 @@ + (if_then_else:SF (match_operand 1 "arm_comparison_operator" "") + (match_operand:SF 2 "s_register_operand" "") + (match_operand:SF 3 "nonmemory_operand" "")))] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_COND_EXEC" + " + { + enum rtx_code code = GET_CODE (operands[1]); +@@ -8076,7 +8725,7 @@ + (if_then_else:DF (match_operand 1 "arm_comparison_operator" "") + (match_operand:DF 2 "s_register_operand" "") + (match_operand:DF 3 "arm_float_add_operand" "")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP) && !TARGET_NO_COND_EXEC" + " + { + enum rtx_code code = GET_CODE (operands[1]); +@@ -8108,7 +8757,8 @@ + mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2 + mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2" + [(set_attr "length" "4,4,4,4,8,8,8,8") +- (set_attr "conds" "use")] ++ (set_attr "conds" "use") ++ (set_attr "insn" "mov,mvn,mov,mvn,mov,mov,mvn,mvn")] + ) + + (define_insn "*movsfcc_soft_insn" +@@ -8121,7 +8771,8 @@ + "@ + mov%D3\\t%0, %2 + mov%d3\\t%0, %1" +- [(set_attr "conds" "use")] ++ [(set_attr "conds" "use") ++ (set_attr "insn" "mov")] + ) + + +@@ -8524,7 +9175,7 @@ + [(match_operand 1 "cc_register" "") (const_int 0)]) + (return) + (pc)))] +- "TARGET_ARM && USE_RETURN_INSN (TRUE)" ++ "TARGET_ARM && USE_RETURN_INSN (TRUE) && !TARGET_NO_COND_EXEC" + "* + { + if (arm_ccfsm_state == 2) +@@ -8545,7 +9196,7 @@ + [(match_operand 1 "cc_register" "") (const_int 0)]) + (pc) + (return)))] +- "TARGET_ARM && USE_RETURN_INSN (TRUE)" ++ "TARGET_ARM && USE_RETURN_INSN (TRUE) && !TARGET_NO_COND_EXEC" + "* + { + if (arm_ccfsm_state == 2) +@@ -8864,7 +9515,11 @@ + (set_attr "shift" "4") + (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_split +@@ -8902,7 +9557,11 @@ + (set_attr "shift" "4") + (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*arith_shiftsi_compare0_scratch" +@@ -8920,7 +9579,11 @@ + (set_attr "shift" "4") + (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*sub_shiftsi" +@@ -8935,7 +9598,11 @@ + (set_attr "shift" "3") + (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*sub_shiftsi_compare0" +@@ -8955,7 +9622,11 @@ + (set_attr "shift" "3") + (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + (define_insn "*sub_shiftsi_compare0_scratch" +@@ -8973,7 +9644,11 @@ + (set_attr "shift" "3") + (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)))] + ) + + +@@ -8986,6 +9661,7 @@ + "TARGET_ARM" + "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1" + [(set_attr "conds" "use") ++ (set_attr "insn" "mov") + (set_attr "length" "8")] + ) + +@@ -8999,6 +9675,7 @@ + orr%d2\\t%0, %1, #1 + mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1" + [(set_attr "conds" "use") ++ (set_attr "insn" "orr") + (set_attr "length" "4,8")] + ) + +@@ -9008,7 +9685,7 @@ + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rI,L")])) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "* + if (operands[3] == const0_rtx) + { +@@ -9063,6 +9740,7 @@ + return \"\"; + " + [(set_attr "conds" "use") ++ (set_attr "insn" "mov") + (set_attr "length" "4,4,8")] + ) + +@@ -9074,7 +9752,7 @@ + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "s_register_operand" "0,?r")])) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "* + if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx) + return \"%i5\\t%0, %1, %2, lsr #31\"; +@@ -9470,7 +10148,7 @@ + (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_COND_EXEC" + "* + if (GET_CODE (operands[5]) == LT + && (operands[4] == const0_rtx)) +@@ -9536,7 +10214,7 @@ + (match_operand:SI 3 "arm_add_operand" "rIL,rIL")) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9572,7 +10250,7 @@ + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rIL,rIL")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9610,7 +10288,7 @@ + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rI")]))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "12")] +@@ -9760,7 +10438,7 @@ + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9779,6 +10457,7 @@ + mov%d4\\t%0, %1\;mvn%D4\\t%0, %2 + mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2" + [(set_attr "conds" "use") ++ (set_attr "insn" "mvn") + (set_attr "length" "4,8,8")] + ) + +@@ -9792,7 +10471,7 @@ + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:SI 1 "arm_not_operand" "0,?rIK"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9811,6 +10490,7 @@ + mov%D4\\t%0, %1\;mvn%d4\\t%0, %2 + mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2" + [(set_attr "conds" "use") ++ (set_attr "insn" "mvn") + (set_attr "length" "4,8,8")] + ) + +@@ -9825,7 +10505,7 @@ + (match_operand:SI 3 "arm_rhs_operand" "rM,rM")]) + (match_operand:SI 1 "arm_not_operand" "0,?rIK"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9847,10 +10527,23 @@ + mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4" + [(set_attr "conds" "use") + (set_attr "shift" "2") +- (set_attr "length" "4,8,8") ++ (set_attr "insn" "mov") + (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set_attr_alternative "length" ++ [(if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)) ++ (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 12) ++ (const_int 8)) ++ (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 12) ++ (const_int 8))])] + ) + + (define_insn "*ifcompare_move_shift" +@@ -9864,7 +10557,7 @@ + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rM,rM")]))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -9886,10 +10579,24 @@ + mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4" + [(set_attr "conds" "use") + (set_attr "shift" "2") +- (set_attr "length" "4,8,8") ++ (set_attr "insn" "mov") + (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "") + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set_attr_alternative "length" ++ [(if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 8) ++ (const_int 4)) ++ (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 12) ++ (const_int 8)) ++ (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 12) ++ (const_int 8))]) ++ (set_attr "insn" "mov")] + ) + + (define_insn "*ifcompare_shift_shift" +@@ -9905,7 +10612,7 @@ + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rM")]))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "12")] +@@ -9926,12 +10633,16 @@ + "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7" + [(set_attr "conds" "use") + (set_attr "shift" "1") +- (set_attr "length" "8") ++ (set_attr "insn" "mov") + (set (attr "type") (if_then_else + (and (match_operand 2 "const_int_operand" "") + (match_operand 4 "const_int_operand" "")) + (const_string "alu_shift") +- (const_string "alu_shift_reg")))] ++ (const_string "alu_shift_reg"))) ++ (set (attr "length") (if_then_else (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "fix_janus" "yes")) ++ (const_int 16) ++ (const_int 8)))] + ) + + (define_insn "*ifcompare_not_arith" +@@ -9945,7 +10656,7 @@ + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "12")] +@@ -9963,6 +10674,7 @@ + "TARGET_ARM" + "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3" + [(set_attr "conds" "use") ++ (set_attr "insn" "mvn") + (set_attr "length" "8")] + ) + +@@ -9977,7 +10689,7 @@ + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (not:SI (match_operand:SI 1 "s_register_operand" "r")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "12")] +@@ -9995,6 +10707,7 @@ + "TARGET_ARM" + "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3" + [(set_attr "conds" "use") ++ (set_attr "insn" "mvn") + (set_attr "length" "8")] + ) + +@@ -10007,7 +10720,7 @@ + (neg:SI (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:SI 1 "arm_not_operand" "0,?rIK"))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -10038,7 +10751,7 @@ + (match_operand:SI 1 "arm_not_operand" "0,?rIK") + (neg:SI (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM" ++ "TARGET_ARM && !TARGET_NO_SINGLE_COND_EXEC" + "#" + [(set_attr "conds" "clob") + (set_attr "length" "8,12")] +@@ -10181,13 +10894,13 @@ + ; reversed, check that the memory references aren't volatile. + + (define_peephole +- [(set (match_operand:SI 0 "s_register_operand" "=r") ++ [(set (match_operand:SI 0 "s_register_operand" "=rk") + (match_operand:SI 4 "memory_operand" "m")) +- (set (match_operand:SI 1 "s_register_operand" "=r") ++ (set (match_operand:SI 1 "s_register_operand" "=rk") + (match_operand:SI 5 "memory_operand" "m")) +- (set (match_operand:SI 2 "s_register_operand" "=r") ++ (set (match_operand:SI 2 "s_register_operand" "=rk") + (match_operand:SI 6 "memory_operand" "m")) +- (set (match_operand:SI 3 "s_register_operand" "=r") ++ (set (match_operand:SI 3 "s_register_operand" "=rk") + (match_operand:SI 7 "memory_operand" "m"))] + "TARGET_ARM && load_multiple_sequence (operands, 4, NULL, NULL, NULL)" + "* +@@ -10196,11 +10909,11 @@ + ) + + (define_peephole +- [(set (match_operand:SI 0 "s_register_operand" "=r") ++ [(set (match_operand:SI 0 "s_register_operand" "=rk") + (match_operand:SI 3 "memory_operand" "m")) +- (set (match_operand:SI 1 "s_register_operand" "=r") ++ (set (match_operand:SI 1 "s_register_operand" "=rk") + (match_operand:SI 4 "memory_operand" "m")) +- (set (match_operand:SI 2 "s_register_operand" "=r") ++ (set (match_operand:SI 2 "s_register_operand" "=rk") + (match_operand:SI 5 "memory_operand" "m"))] + "TARGET_ARM && load_multiple_sequence (operands, 3, NULL, NULL, NULL)" + "* +@@ -10209,9 +10922,9 @@ + ) + + (define_peephole +- [(set (match_operand:SI 0 "s_register_operand" "=r") ++ [(set (match_operand:SI 0 "s_register_operand" "=rk") + (match_operand:SI 2 "memory_operand" "m")) +- (set (match_operand:SI 1 "s_register_operand" "=r") ++ (set (match_operand:SI 1 "s_register_operand" "=rk") + (match_operand:SI 3 "memory_operand" "m"))] + "TARGET_ARM && load_multiple_sequence (operands, 2, NULL, NULL, NULL)" + "* +@@ -10221,13 +10934,13 @@ + + (define_peephole + [(set (match_operand:SI 4 "memory_operand" "=m") +- (match_operand:SI 0 "s_register_operand" "r")) ++ (match_operand:SI 0 "s_register_operand" "rk")) + (set (match_operand:SI 5 "memory_operand" "=m") +- (match_operand:SI 1 "s_register_operand" "r")) ++ (match_operand:SI 1 "s_register_operand" "rk")) + (set (match_operand:SI 6 "memory_operand" "=m") +- (match_operand:SI 2 "s_register_operand" "r")) ++ (match_operand:SI 2 "s_register_operand" "rk")) + (set (match_operand:SI 7 "memory_operand" "=m") +- (match_operand:SI 3 "s_register_operand" "r"))] ++ (match_operand:SI 3 "s_register_operand" "rk"))] + "TARGET_ARM && store_multiple_sequence (operands, 4, NULL, NULL, NULL)" + "* + return emit_stm_seq (operands, 4); +@@ -10236,11 +10949,11 @@ + + (define_peephole + [(set (match_operand:SI 3 "memory_operand" "=m") +- (match_operand:SI 0 "s_register_operand" "r")) ++ (match_operand:SI 0 "s_register_operand" "rk")) + (set (match_operand:SI 4 "memory_operand" "=m") +- (match_operand:SI 1 "s_register_operand" "r")) ++ (match_operand:SI 1 "s_register_operand" "rk")) + (set (match_operand:SI 5 "memory_operand" "=m") +- (match_operand:SI 2 "s_register_operand" "r"))] ++ (match_operand:SI 2 "s_register_operand" "rk"))] + "TARGET_ARM && store_multiple_sequence (operands, 3, NULL, NULL, NULL)" + "* + return emit_stm_seq (operands, 3); +@@ -10249,9 +10962,9 @@ + + (define_peephole + [(set (match_operand:SI 2 "memory_operand" "=m") +- (match_operand:SI 0 "s_register_operand" "r")) ++ (match_operand:SI 0 "s_register_operand" "rk")) + (set (match_operand:SI 3 "memory_operand" "=m") +- (match_operand:SI 1 "s_register_operand" "r"))] ++ (match_operand:SI 1 "s_register_operand" "rk"))] + "TARGET_ARM && store_multiple_sequence (operands, 2, NULL, NULL, NULL)" + "* + return emit_stm_seq (operands, 2); +@@ -10406,7 +11119,7 @@ + (match_dup 0) + (match_operand 4 "" ""))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM && reload_completed" ++ "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC" + [(set (match_dup 5) (match_dup 6)) + (cond_exec (match_dup 7) + (set (match_dup 0) (match_dup 4)))] +@@ -10434,7 +11147,7 @@ + (match_operand 4 "" "") + (match_dup 0))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM && reload_completed" ++ "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC" + [(set (match_dup 5) (match_dup 6)) + (cond_exec (match_op_dup 1 [(match_dup 5) (const_int 0)]) + (set (match_dup 0) (match_dup 4)))] +@@ -10455,7 +11168,7 @@ + (match_operand 4 "" "") + (match_operand 5 "" ""))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM && reload_completed" ++ "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC" + [(set (match_dup 6) (match_dup 7)) + (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)]) + (set (match_dup 0) (match_dup 4))) +@@ -10487,7 +11200,7 @@ + (not:SI + (match_operand:SI 5 "s_register_operand" "")))) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_ARM && reload_completed" ++ "TARGET_ARM && reload_completed && !TARGET_NO_SINGLE_COND_EXEC" + [(set (match_dup 6) (match_dup 7)) + (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)]) + (set (match_dup 0) (match_dup 4))) +@@ -10522,6 +11235,7 @@ + mvn%D4\\t%0, %2 + mov%d4\\t%0, %1\;mvn%D4\\t%0, %2" + [(set_attr "conds" "use") ++ (set_attr "insn" "mvn") + (set_attr "length" "4,8")] + ) + +@@ -10610,8 +11324,8 @@ + + (define_insn "stack_tie" + [(set (mem:BLK (scratch)) +- (unspec:BLK [(match_operand:SI 0 "s_register_operand" "r") +- (match_operand:SI 1 "s_register_operand" "r")] ++ (unspec:BLK [(match_operand:SI 0 "s_register_operand" "rk") ++ (match_operand:SI 1 "s_register_operand" "rk")] + UNSPEC_PRLG_STK))] + "" + "" +@@ -10656,6 +11370,24 @@ + " + ) + ++(define_insn "align_16" ++ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN16)] ++ "TARGET_EITHER" ++ "* ++ assemble_align (128); ++ return \"\"; ++ " ++) ++ ++(define_insn "align_32" ++ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN32)] ++ "TARGET_EITHER" ++ "* ++ assemble_align (256); ++ return \"\"; ++ " ++) ++ + (define_insn "consttable_end" + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)] + "TARGET_EITHER" +@@ -10682,6 +11414,7 @@ + "TARGET_THUMB1" + "* + making_const_table = TRUE; ++ gcc_assert (GET_MODE_CLASS (GET_MODE (operands[0])) != MODE_FLOAT); + assemble_integer (operands[0], 2, BITS_PER_WORD, 1); + assemble_zeros (2); + return \"\"; +@@ -10694,18 +11427,29 @@ + "TARGET_EITHER" + "* + { ++ rtx x = operands[0]; + making_const_table = TRUE; +- switch (GET_MODE_CLASS (GET_MODE (operands[0]))) ++ switch (GET_MODE_CLASS (GET_MODE (x))) + { + case MODE_FLOAT: +- { +- REAL_VALUE_TYPE r; +- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]); +- assemble_real (r, GET_MODE (operands[0]), BITS_PER_WORD); +- break; +- } ++ if (GET_MODE (x) == HFmode) ++ arm_emit_fp16_const (x); ++ else ++ { ++ REAL_VALUE_TYPE r; ++ REAL_VALUE_FROM_CONST_DOUBLE (r, x); ++ assemble_real (r, GET_MODE (x), BITS_PER_WORD); ++ } ++ break; + default: +- assemble_integer (operands[0], 4, BITS_PER_WORD, 1); ++ /* XXX: Sometimes gcc does something really dumb and ends up with ++ a HIGH in a constant pool entry, usually because it's trying to ++ load into a VFP register. We know this will always be used in ++ combination with a LO_SUM which ignores the high bits, so just ++ strip off the HIGH. */ ++ if (GET_CODE (x) == HIGH) ++ x = XEXP (x, 0); ++ assemble_integer (x, 4, BITS_PER_WORD, 1); + break; + } + return \"\"; +@@ -10808,13 +11552,15 @@ + "TARGET_32BIT && arm_arch5e" + "pld\\t%a0") + +-;; General predication pattern ++;; General predication pattern. ++;; Conditional branches are available as both arm_cond_branch and ++;; predicated arm_jump, so it doesn't matter if we disable the latter. + + (define_cond_exec + [(match_operator 0 "arm_comparison_operator" + [(match_operand 1 "cc_register" "") + (const_int 0)])] +- "TARGET_32BIT" ++ "TARGET_32BIT && !TARGET_NO_SINGLE_COND_EXEC" + "" + ) + +--- a/gcc/config/arm/arm.opt ++++ b/gcc/config/arm/arm.opt +@@ -78,6 +78,10 @@ Specify if floating point hardware shoul + mfp= + Target RejectNegative Joined Undocumented Var(target_fpe_name) + ++mfp16-format= ++Target RejectNegative Joined Var(target_fp16_format_name) ++Specify the __fp16 floating-point format ++ + ;; Now ignored. + mfpe + Target RejectNegative Mask(FPE) Undocumented +@@ -93,6 +97,10 @@ mhard-float + Target RejectNegative + Alias for -mfloat-abi=hard + ++mfix-janus-2cc ++Target Report Mask(FIX_JANUS) ++Work around hardware errata for Avalent Janus 2CC cores. ++ + mlittle-endian + Target Report RejectNegative InverseMask(BIG_END) + Assume target CPU is configured as little endian +@@ -101,6 +109,10 @@ mlong-calls + Target Report Mask(LONG_CALLS) + Generate call insns as indirect calls, if necessary + ++mmarvell-div ++Target Report Mask(MARVELL_DIV) ++Generate hardware integer division instructions supported by some Marvell cores. ++ + mpic-register= + Target RejectNegative Joined Var(arm_pic_register_string) + Specify the register to be used for PIC addressing +@@ -156,3 +168,16 @@ Assume big endian bytes, little endian w + mvectorize-with-neon-quad + Target Report Mask(NEON_VECTORIZE_QUAD) + Use Neon quad-word (rather than double-word) registers for vectorization ++ ++mlow-irq-latency ++Target Report Var(low_irq_latency) ++Try to reduce interrupt latency of the generated code ++ ++mword-relocations ++Target Report Var(target_word_relocations) Init(TARGET_DEFAULT_WORD_RELOCATIONS) ++Only generate absolute relocations on word sized values. ++ ++mfix-cortex-m3-ldrd ++Target Report Var(fix_cm3_ldrd) Init(2) ++Avoid overlapping destination and address registers on LDRD instructions ++that may trigger Cortex-M3 errata. +--- a/gcc/config/arm/arm1020e.md ++++ b/gcc/config/arm/arm1020e.md +@@ -281,12 +281,12 @@ + ;; first execute state. We model this by using 1020a_e in the first cycle. + (define_insn_reservation "v10_ffarith" 5 + (and (eq_attr "vfp10" "yes") +- (eq_attr "type" "ffarith")) ++ (eq_attr "type" "fcpys,ffariths,ffarithd,fcmps,fcmpd")) + "1020a_e+v10_fmac") + + (define_insn_reservation "v10_farith" 5 + (and (eq_attr "vfp10" "yes") +- (eq_attr "type" "farith")) ++ (eq_attr "type" "faddd,fadds")) + "1020a_e+v10_fmac") + + (define_insn_reservation "v10_cvt" 5 +--- a/gcc/config/arm/arm_neon.h ++++ b/gcc/config/arm/arm_neon.h +@@ -39,7 +39,11 @@ + extern "C" { + #endif + ++#if defined (__vxworks) && defined (_WRS_KERNEL) ++#include ++#else + #include ++#endif + + typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8))); + typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8))); +--- /dev/null ++++ b/gcc/config/arm/bpabi-v6m.S +@@ -0,0 +1,325 @@ ++/* Miscellaneous BPABI functions. ARMv6M implementation ++ ++ Copyright (C) 2006 Free Software Foundation, Inc. ++ Contributed by CodeSourcery, LLC. ++ ++ This file is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by the ++ Free Software Foundation; either version 2, or (at your option) any ++ later version. ++ ++ In addition to the permissions in the GNU General Public License, the ++ Free Software Foundation gives you unlimited permission to link the ++ compiled version of this file into combinations with other programs, ++ and to distribute those combinations without any restriction coming ++ from the use of this file. (The General Public License restrictions ++ do apply in other respects; for example, they cover modification of ++ the file, and distribution when not linked into a combine ++ executable.) ++ ++ This file is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; see the file COPYING. If not, write to ++ the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ Boston, MA 02110-1301, USA. */ ++ ++#ifdef __ARMEB__ ++#define xxh r0 ++#define xxl r1 ++#define yyh r2 ++#define yyl r3 ++#else ++#define xxh r1 ++#define xxl r0 ++#define yyh r3 ++#define yyl r2 ++#endif ++ ++#ifdef L_aeabi_lcmp ++ ++FUNC_START aeabi_lcmp ++ cmp xxh, yyh ++ beq 1f ++ bgt 2f ++ mov r0, #1 ++ neg r0, r0 ++ RET ++2: ++ mov r0, #1 ++ RET ++1: ++ sub r0, xxl, yyl ++ beq 1f ++ bhi 2f ++ mov r0, #1 ++ neg r0, r0 ++ RET ++2: ++ mov r0, #1 ++1: ++ RET ++ FUNC_END aeabi_lcmp ++ ++#endif /* L_aeabi_lcmp */ ++ ++#ifdef L_aeabi_ulcmp ++ ++FUNC_START aeabi_ulcmp ++ cmp xxh, yyh ++ bne 1f ++ sub r0, xxl, yyl ++ beq 2f ++1: ++ bcs 1f ++ mov r0, #1 ++ neg r0, r0 ++ RET ++1: ++ mov r0, #1 ++2: ++ RET ++ FUNC_END aeabi_ulcmp ++ ++#endif /* L_aeabi_ulcmp */ ++ ++.macro test_div_by_zero signed ++ cmp yyh, #0 ++ bne 7f ++ cmp yyl, #0 ++ bne 7f ++ cmp xxh, #0 ++ bne 2f ++ cmp xxl, #0 ++2: ++ .ifc \signed, unsigned ++ beq 3f ++ mov xxh, #0 ++ mvn xxh, xxh @ 0xffffffff ++ mov xxl, xxh ++3: ++ .else ++ beq 5f ++ blt 6f ++ mov xxl, #0 ++ mvn xxl, xxl @ 0xffffffff ++ lsr xxh, xxl, #1 @ 0x7fffffff ++ b 5f ++6: mov xxh, #0x80 ++ lsl xxh, xxh, #24 @ 0x80000000 ++ mov xxl, #0 ++5: ++ .endif ++ @ tailcalls are tricky on v6-m. ++ push {r0, r1, r2} ++ ldr r0, 1f ++ adr r1, 1f ++ add r0, r1 ++ str r0, [sp, #8] ++ @ We know we are not on armv4t, so pop pc is safe. ++ pop {r0, r1, pc} ++ .align 2 ++1: ++ .word __aeabi_ldiv0 - 1b ++7: ++.endm ++ ++#ifdef L_aeabi_ldivmod ++ ++FUNC_START aeabi_ldivmod ++ test_div_by_zero signed ++ ++ push {r0, r1} ++ mov r0, sp ++ push {r0, lr} ++ ldr r0, [sp, #8] ++ bl SYM(__gnu_ldivmod_helper) ++ ldr r3, [sp, #4] ++ mov lr, r3 ++ add sp, sp, #8 ++ pop {r2, r3} ++ RET ++ FUNC_END aeabi_ldivmod ++ ++#endif /* L_aeabi_ldivmod */ ++ ++#ifdef L_aeabi_uldivmod ++ ++FUNC_START aeabi_uldivmod ++ test_div_by_zero unsigned ++ ++ push {r0, r1} ++ mov r0, sp ++ push {r0, lr} ++ ldr r0, [sp, #8] ++ bl SYM(__gnu_uldivmod_helper) ++ ldr r3, [sp, #4] ++ mov lr, r3 ++ add sp, sp, #8 ++ pop {r2, r3} ++ RET ++ FUNC_END aeabi_uldivmod ++ ++#endif /* L_aeabi_uldivmod */ ++ ++#ifdef L_arm_addsubsf3 ++ ++FUNC_START aeabi_frsub ++ ++ push {r4, lr} ++ mov r4, #1 ++ lsl r4, #31 ++ eor r0, r0, r4 ++ bl __aeabi_fadd ++ pop {r4, pc} ++ ++ FUNC_END aeabi_frsub ++ ++#endif /* L_arm_addsubsf3 */ ++ ++#ifdef L_arm_cmpsf2 ++ ++FUNC_START aeabi_cfrcmple ++ ++ mov ip, r0 ++ mov r0, r1 ++ mov r1, ip ++ b 6f ++ ++FUNC_START aeabi_cfcmpeq ++FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq ++ ++ @ The status-returning routines are required to preserve all ++ @ registers except ip, lr, and cpsr. ++6: push {r0, r1, r2, r3, r4, lr} ++ bl __lesf2 ++ @ Set the Z flag correctly, and the C flag unconditionally. ++ cmp r0, #0 ++ @ Clear the C flag if the return value was -1, indicating ++ @ that the first operand was smaller than the second. ++ bmi 1f ++ mov r1, #0 ++ cmn r0, r1 ++1: ++ pop {r0, r1, r2, r3, r4, pc} ++ ++ FUNC_END aeabi_cfcmple ++ FUNC_END aeabi_cfcmpeq ++ FUNC_END aeabi_cfrcmple ++ ++FUNC_START aeabi_fcmpeq ++ ++ push {r4, lr} ++ bl __eqsf2 ++ neg r0, r0 ++ add r0, r0, #1 ++ pop {r4, pc} ++ ++ FUNC_END aeabi_fcmpeq ++ ++.macro COMPARISON cond, helper, mode=sf2 ++FUNC_START aeabi_fcmp\cond ++ ++ push {r4, lr} ++ bl __\helper\mode ++ cmp r0, #0 ++ b\cond 1f ++ mov r0, #0 ++ pop {r4, pc} ++1: ++ mov r0, #1 ++ pop {r4, pc} ++ ++ FUNC_END aeabi_fcmp\cond ++.endm ++ ++COMPARISON lt, le ++COMPARISON le, le ++COMPARISON gt, ge ++COMPARISON ge, ge ++ ++#endif /* L_arm_cmpsf2 */ ++ ++#ifdef L_arm_addsubdf3 ++ ++FUNC_START aeabi_drsub ++ ++ push {r4, lr} ++ mov r4, #1 ++ lsl r4, #31 ++ eor xxh, xxh, r4 ++ bl __aeabi_dadd ++ pop {r4, pc} ++ ++ FUNC_END aeabi_drsub ++ ++#endif /* L_arm_addsubdf3 */ ++ ++#ifdef L_arm_cmpdf2 ++ ++FUNC_START aeabi_cdrcmple ++ ++ mov ip, r0 ++ mov r0, r2 ++ mov r2, ip ++ mov ip, r1 ++ mov r1, r3 ++ mov r3, ip ++ b 6f ++ ++FUNC_START aeabi_cdcmpeq ++FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq ++ ++ @ The status-returning routines are required to preserve all ++ @ registers except ip, lr, and cpsr. ++6: push {r0, r1, r2, r3, r4, lr} ++ bl __ledf2 ++ @ Set the Z flag correctly, and the C flag unconditionally. ++ cmp r0, #0 ++ @ Clear the C flag if the return value was -1, indicating ++ @ that the first operand was smaller than the second. ++ bmi 1f ++ mov r1, #0 ++ cmn r0, r1 ++1: ++ pop {r0, r1, r2, r3, r4, pc} ++ ++ FUNC_END aeabi_cdcmple ++ FUNC_END aeabi_cdcmpeq ++ FUNC_END aeabi_cdrcmple ++ ++FUNC_START aeabi_dcmpeq ++ ++ push {r4, lr} ++ bl __eqdf2 ++ neg r0, r0 ++ add r0, r0, #1 ++ pop {r4, pc} ++ ++ FUNC_END aeabi_dcmpeq ++ ++.macro COMPARISON cond, helper, mode=df2 ++FUNC_START aeabi_dcmp\cond ++ ++ push {r4, lr} ++ bl __\helper\mode ++ cmp r0, #0 ++ b\cond 1f ++ mov r0, #0 ++ pop {r4, pc} ++1: ++ mov r0, #1 ++ pop {r4, pc} ++ ++ FUNC_END aeabi_dcmp\cond ++.endm ++ ++COMPARISON lt, le ++COMPARISON le, le ++COMPARISON gt, ge ++COMPARISON ge, ge ++ ++#endif /* L_arm_cmpdf2 */ +--- a/gcc/config/arm/bpabi.S ++++ b/gcc/config/arm/bpabi.S +@@ -81,20 +81,69 @@ ARM_FUNC_START aeabi_ulcmp + + #endif /* L_aeabi_ulcmp */ + ++.macro test_div_by_zero signed ++/* Tail-call to divide-by-zero handlers which may be overridden by the user, ++ so unwinding works properly. */ ++#if defined(__thumb2__) ++ cbnz yyh, 1f ++ cbnz yyl, 1f ++ cmp xxh, #0 ++ do_it eq ++ cmpeq xxl, #0 ++ .ifc \signed, unsigned ++ beq 2f ++ mov xxh, #0xffffffff ++ mov xxl, xxh ++2: ++ .else ++ do_it lt, t ++ movlt xxl, #0 ++ movlt xxh, #0x80000000 ++ do_it gt, t ++ movgt xxh, #0x7fffffff ++ movgt xxl, #0xffffffff ++ .endif ++ b SYM (__aeabi_ldiv0) __PLT__ ++1: ++#else ++ /* Note: Thumb-1 code calls via an ARM shim on processors which ++ support ARM mode. */ ++ cmp yyh, #0 ++ cmpeq yyl, #0 ++ bne 2f ++ cmp xxh, #0 ++ cmpeq xxl, #0 ++ .ifc \signed, unsigned ++ movne xxh, #0xffffffff ++ movne xxl, #0xffffffff ++ .else ++ movlt xxh, #0x80000000 ++ movlt xxl, #0 ++ movgt xxh, #0x7fffffff ++ movgt xxl, #0xffffffff ++ .endif ++ b SYM (__aeabi_ldiv0) __PLT__ ++2: ++#endif ++.endm ++ + #ifdef L_aeabi_ldivmod + + ARM_FUNC_START aeabi_ldivmod ++ test_div_by_zero signed ++ + sub sp, sp, #8 +-#if defined(__thumb2__) ++/* Low latency and Thumb-2 do_push implementations can't push sp directly. */ ++#if defined(__thumb2__) || defined(__irq_low_latency__) + mov ip, sp +- push {ip, lr} ++ do_push (ip, lr) + #else +- do_push {sp, lr} ++ stmfd sp!, {sp, lr} + #endif + bl SYM(__gnu_ldivmod_helper) __PLT__ + ldr lr, [sp, #4] + add sp, sp, #8 +- do_pop {r2, r3} ++ do_pop (r2, r3) + RET + + #endif /* L_aeabi_ldivmod */ +@@ -102,17 +151,20 @@ ARM_FUNC_START aeabi_ldivmod + #ifdef L_aeabi_uldivmod + + ARM_FUNC_START aeabi_uldivmod ++ test_div_by_zero unsigned ++ + sub sp, sp, #8 +-#if defined(__thumb2__) ++/* Low latency and Thumb-2 do_push implementations can't push sp directly. */ ++#if defined(__thumb2__) || defined(__irq_low_latency__) + mov ip, sp +- push {ip, lr} ++ do_push (ip, lr) + #else +- do_push {sp, lr} ++ stmfd sp!, {sp, lr} + #endif + bl SYM(__gnu_uldivmod_helper) __PLT__ + ldr lr, [sp, #4] + add sp, sp, #8 +- do_pop {r2, r3} ++ do_pop (r2, r3) + RET + + #endif /* L_aeabi_divmod */ +--- a/gcc/config/arm/bpabi.h ++++ b/gcc/config/arm/bpabi.h +@@ -51,15 +51,25 @@ + /* The BPABI integer comparison routines return { -1, 0, 1 }. */ + #define TARGET_LIB_INT_CMP_BIASED !TARGET_BPABI + ++#define TARGET_FIX_V4BX_SPEC " %{mcpu=arm8|mcpu=arm810|mcpu=strongarm*|march=armv4:--fix-v4bx}" ++ ++#define BE8_LINK_SPEC " %{mbig-endian:%{march=armv7-a|mcpu=cortex-a8|mcpu=cortex-a9:%{!r:--be8}}}" ++ + /* Tell the assembler to build BPABI binaries. */ + #undef SUBTARGET_EXTRA_ASM_SPEC +-#define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=4}" ++#define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=5}" TARGET_FIX_V4BX_SPEC ++ ++#ifndef SUBTARGET_EXTRA_LINK_SPEC ++#define SUBTARGET_EXTRA_LINK_SPEC "" ++#endif + + /* The generic link spec in elf.h does not support shared libraries. */ + #undef LINK_SPEC + #define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} " \ + "%{static:-Bstatic} %{shared:-shared} %{symbolic:-Bsymbolic} " \ +- "-X" ++ "-X" SUBTARGET_EXTRA_LINK_SPEC TARGET_FIX_V4BX_SPEC \ ++ BE8_LINK_SPEC \ ++ " %{mfix-janus-2cc:--fix-janus-2cc}" + + #if defined (__thumb__) + #define RENAME_LIBRARY_SET ".thumb_set" +@@ -81,16 +91,22 @@ + #define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, lmul) + #endif + #ifdef L_fixdfdi +-#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, d2lz) ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, d2lz) \ ++ extern DWtype __fixdfdi (DFtype) __attribute__((pcs("aapcs"))); \ ++ extern UDWtype __fixunsdfdi (DFtype) __asm__("__aeabi_d2ulz") __attribute__((pcs("aapcs"))); + #endif + #ifdef L_fixunsdfdi +-#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, d2ulz) ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, d2ulz) \ ++ extern UDWtype __fixunsdfdi (DFtype) __attribute__((pcs("aapcs"))); + #endif + #ifdef L_fixsfdi +-#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f2lz) ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f2lz) \ ++ extern DWtype __fixsfdi (SFtype) __attribute__((pcs("aapcs"))); \ ++ extern UDWtype __fixunssfdi (SFtype) __asm__("__aeabi_f2ulz") __attribute__((pcs("aapcs"))); + #endif + #ifdef L_fixunssfdi +-#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f2ulz) ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f2ulz) \ ++ extern UDWtype __fixunssfdi (SFtype) __attribute__((pcs("aapcs"))); + #endif + #ifdef L_floatdidf + #define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, l2d) +@@ -99,6 +115,21 @@ + #define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, l2f) + #endif + ++/* These renames are needed on ARMv6M. Other targets get them from ++ assembly routines. */ ++#ifdef L_fixunsdfsi ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfsi, d2uiz) ++#endif ++#ifdef L_fixunssfsi ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfsi, f2uiz) ++#endif ++#ifdef L_floatundidf ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatundidf, ul2d) ++#endif ++#ifdef L_floatundisf ++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatundisf, ul2f) ++#endif ++ + /* The BPABI requires that we always use an out-of-line implementation + of RTTI comparison, even if the target supports weak symbols, + because the same object file might be used on a target that does +@@ -123,3 +154,26 @@ + #undef FINI_SECTION_ASM_OP + #define INIT_ARRAY_SECTION_ASM_OP ARM_EABI_CTORS_SECTION_OP + #define FINI_ARRAY_SECTION_ASM_OP ARM_EABI_DTORS_SECTION_OP ++ ++/* The legacy _mcount implementation assumes r11 points to a ++ 4-word APCS frame. This is generally not true for EABI targets, ++ particularly not in Thumb mode. We assume the mcount ++ implementation does not require a counter variable (No Counter). ++ Note that __gnu_mcount_nc will be entered with a misaligned stack. ++ This is OK because it uses a special calling convention anyway. */ ++ ++#undef NO_PROFILE_COUNTERS ++#define NO_PROFILE_COUNTERS 1 ++#undef ARM_FUNCTION_PROFILER ++#define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \ ++{ \ ++ fprintf (STREAM, "\tpush\t{lr}\n"); \ ++ fprintf (STREAM, "\tbl\t__gnu_mcount_nc\n"); \ ++} ++ ++#undef SUBTARGET_FRAME_POINTER_REQUIRED ++#define SUBTARGET_FRAME_POINTER_REQUIRED 0 ++ ++/* __gnu_mcount_nc restores the original LR value before returning. Ensure ++ that there is no unnecessary hook set up. */ ++#undef PROFILE_HOOK +--- a/gcc/config/arm/constraints.md ++++ b/gcc/config/arm/constraints.md +@@ -20,19 +20,19 @@ + + ;; The following register constraints have been used: + ;; - in ARM/Thumb-2 state: f, t, v, w, x, y, z +-;; - in Thumb state: h, k, b +-;; - in both states: l, c ++;; - in Thumb state: h, b ++;; - in both states: l, c, k + ;; In ARM state, 'l' is an alias for 'r' + + ;; The following normal constraints have been used: +-;; in ARM/Thumb-2 state: G, H, I, J, K, L, M ++;; in ARM/Thumb-2 state: G, H, I, j, J, K, L, M + ;; in Thumb-1 state: I, J, K, L, M, N, O + + ;; The following multi-letter normal constraints have been used: + ;; in ARM/Thumb-2 state: Da, Db, Dc, Dn, Dl, DL, Dv + + ;; The following memory constraints have been used: +-;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Us ++;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Um, Us + ;; in ARM state: Uq + + +@@ -46,7 +46,7 @@ + "The Cirrus Maverick co-processor registers.") + + (define_register_constraint "w" +- "TARGET_32BIT ? (TARGET_VFP3 ? VFP_REGS : VFP_LO_REGS) : NO_REGS" ++ "TARGET_32BIT ? (TARGET_VFPD32 ? VFP_REGS : VFP_LO_REGS) : NO_REGS" + "The VFP registers @code{d0}-@code{d15}, or @code{d0}-@code{d31} for VFPv3.") + + (define_register_constraint "x" "TARGET_32BIT ? VFP_D0_D7_REGS : NO_REGS" +@@ -65,9 +65,15 @@ + (define_register_constraint "h" "TARGET_THUMB ? HI_REGS : NO_REGS" + "In Thumb state the core registers @code{r8}-@code{r15}.") + +-(define_register_constraint "k" "TARGET_THUMB ? STACK_REG : NO_REGS" +- "@internal +- Thumb only. The stack register.") ++(define_constraint "j" ++ "A constant suitable for a MOVW instruction. (ARM/Thumb-2)" ++ (and (match_test "TARGET_32BIT && arm_arch_thumb2") ++ (ior (match_code "high") ++ (and (match_code "const_int") ++ (match_test "(ival & 0xffff0000) == 0"))))) ++ ++(define_register_constraint "k" "STACK_REG" ++ "@internal The stack register.") + + (define_register_constraint "b" "TARGET_THUMB ? BASE_REGS : NO_REGS" + "@internal +@@ -117,11 +123,9 @@ + : ((ival >= 0 && ival <= 1020) && ((ival & 3) == 0))"))) + + (define_constraint "N" +- "In ARM/Thumb-2 state a constant suitable for a MOVW instruction. +- In Thumb-1 state a constant in the range 0-31." ++ "Thumb-1 state a constant in the range 0-31." + (and (match_code "const_int") +- (match_test "TARGET_32BIT ? arm_arch_thumb2 && ((ival & 0xffff0000) == 0) +- : (ival >= 0 && ival <= 31)"))) ++ (match_test "!TARGET_32BIT && (ival >= 0 && ival <= 31)"))) + + (define_constraint "O" + "In Thumb-1 state a constant that is a multiple of 4 in the range +@@ -215,17 +219,24 @@ + + (define_memory_constraint "Un" + "@internal ++ In ARM/Thumb-2 state a valid address for Neon doubleword vector ++ load/store instructions." ++ (and (match_code "mem") ++ (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 0)"))) ++ ++(define_memory_constraint "Um" ++ "@internal + In ARM/Thumb-2 state a valid address for Neon element and structure + load/store instructions." + (and (match_code "mem") +- (match_test "TARGET_32BIT && neon_vector_mem_operand (op, FALSE)"))) ++ (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 2)"))) + + (define_memory_constraint "Us" + "@internal + In ARM/Thumb-2 state a valid address for non-offset loads/stores of + quad-word values in four ARM registers." + (and (match_code "mem") +- (match_test "TARGET_32BIT && neon_vector_mem_operand (op, TRUE)"))) ++ (match_test "TARGET_32BIT && neon_vector_mem_operand (op, 1)"))) + + (define_memory_constraint "Uq" + "@internal +--- a/gcc/config/arm/cortex-a8-neon.md ++++ b/gcc/config/arm/cortex-a8-neon.md +@@ -134,7 +134,7 @@ + + (define_insn_reservation "cortex_a8_vfp_add_sub" 10 + (and (eq_attr "tune" "cortexa8") +- (eq_attr "type" "farith")) ++ (eq_attr "type" "fconsts,fconstd,fadds,faddd")) + "cortex_a8_vfp,cortex_a8_vfplite*9") + + (define_insn_reservation "cortex_a8_vfp_muls" 12 +@@ -172,7 +172,7 @@ + ;; take four cycles, we pick that latency. + (define_insn_reservation "cortex_a8_vfp_farith" 4 + (and (eq_attr "tune" "cortexa8") +- (eq_attr "type" "ffarith")) ++ (eq_attr "type" "fcpys,ffariths,ffarithd,fconsts,fconstd,fcmps,fcmpd")) + "cortex_a8_vfp,cortex_a8_vfplite*3") + + (define_insn_reservation "cortex_a8_vfp_cvt" 7 +--- /dev/null ++++ b/gcc/config/arm/cortex-a9.md +@@ -0,0 +1,65 @@ ++;; ARM Cortex-A9 VFP pipeline description ++;; Copyright (C) 2008 Free Software Foundation, Inc. ++;; Written by CodeSourcery. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, but ++;; WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++;; General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_automaton "cortex_a9") ++ ++;; FIXME: We model aingle pipeline for all instructions. ++;; Is dual-issue possible, and do we have other pipelines? ++(define_cpu_unit "cortex_a9_vfp" "cortex_a9") ++ ++(define_insn_reservation "cortex_a9_ffarith" 1 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fcpys,ffariths,ffarithd,fcmps,fcmpd,fconsts,fconstd")) ++ "cortex_a9_vfp") ++ ++(define_insn_reservation "cortex_a9_fadd" 4 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fadds,faddd,f_cvt")) ++ "cortex_a9_vfp") ++ ++(define_insn_reservation "cortex_a9_fmuls" 5 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fmuls")) ++ "cortex_a9_vfp") ++ ++(define_insn_reservation "cortex_a9_fmuld" 6 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fmuld")) ++ "cortex_a9_vfp*2") ++ ++(define_insn_reservation "cortex_a9_fmacs" 8 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fmacs")) ++ "cortex_a9_vfp") ++ ++(define_insn_reservation "cortex_a9_fmacd" 8 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fmacd")) ++ "cortex_a9_vfp*2") ++ ++(define_insn_reservation "cortex_a9_fdivs" 15 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fdivs")) ++ "cortex_a9_vfp*10") ++ ++(define_insn_reservation "cortex_a9_fdivd" 25 ++ (and (eq_attr "tune" "cortexa9") ++ (eq_attr "type" "fdivd")) ++ "cortex_a9_vfp*20") +--- /dev/null ++++ b/gcc/config/arm/cortex-r4.md +@@ -0,0 +1,292 @@ ++;; ARM Cortex-R4 scheduling description. ++;; Copyright (C) 2007 Free Software Foundation, Inc. ++;; Contributed by CodeSourcery. ++ ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_automaton "cortex_r4") ++ ++;; We approximate the dual-issue constraints of this core using four ++;; "issue units" and a reservation matrix as follows. The numbers indicate ++;; the instruction groups' preferences in order. Multiple entries for ++;; the same numbered preference indicate units that must be reserved ++;; together. ++;; ++;; Issue unit: A B C ALU ++;; ++;; ALU w/o reg shift 1st 2nd 1st and 2nd ++;; ALU w/ reg shift 1st 2nd 2nd 1st and 2nd ++;; Moves 1st 2nd 2nd ++;; Multiplication 1st 1st ++;; Division 1st 1st ++;; Load/store single 1st 1st ++;; Other load/store 1st 1st ++;; Branches 1st ++ ++(define_cpu_unit "cortex_r4_issue_a" "cortex_r4") ++(define_cpu_unit "cortex_r4_issue_b" "cortex_r4") ++(define_cpu_unit "cortex_r4_issue_c" "cortex_r4") ++(define_cpu_unit "cortex_r4_issue_alu" "cortex_r4") ++ ++(define_reservation "cortex_r4_alu" ++ "(cortex_r4_issue_a+cortex_r4_issue_alu)|\ ++ (cortex_r4_issue_b+cortex_r4_issue_alu)") ++(define_reservation "cortex_r4_alu_shift_reg" ++ "(cortex_r4_issue_a+cortex_r4_issue_alu)|\ ++ (cortex_r4_issue_b+cortex_r4_issue_c+\ ++ cortex_r4_issue_alu)") ++(define_reservation "cortex_r4_mov" ++ "cortex_r4_issue_a|(cortex_r4_issue_b+\ ++ cortex_r4_issue_alu)") ++(define_reservation "cortex_r4_mul" "cortex_r4_issue_a+cortex_r4_issue_alu") ++(define_reservation "cortex_r4_mul_2" ++ "(cortex_r4_issue_a+cortex_r4_issue_alu)*2") ++;; Division instructions execute out-of-order with respect to the ++;; rest of the pipeline and only require reservations on their first and ++;; final cycles. ++(define_reservation "cortex_r4_div_9" ++ "cortex_r4_issue_a+cortex_r4_issue_alu,\ ++ nothing*7,\ ++ cortex_r4_issue_a+cortex_r4_issue_alu") ++(define_reservation "cortex_r4_div_10" ++ "cortex_r4_issue_a+cortex_r4_issue_alu,\ ++ nothing*8,\ ++ cortex_r4_issue_a+cortex_r4_issue_alu") ++(define_reservation "cortex_r4_load_store" ++ "cortex_r4_issue_a+cortex_r4_issue_c") ++(define_reservation "cortex_r4_load_store_2" ++ "(cortex_r4_issue_a+cortex_r4_issue_b)*2") ++(define_reservation "cortex_r4_branch" "cortex_r4_issue_b") ++ ++;; We assume that all instructions are unconditional. ++ ++;; Data processing instructions. Moves without shifts are kept separate ++;; for the purposes of the dual-issue constraints above. ++(define_insn_reservation "cortex_r4_alu" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (and (eq_attr "type" "alu") ++ (not (eq_attr "insn" "mov")))) ++ "cortex_r4_alu") ++ ++(define_insn_reservation "cortex_r4_mov" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (and (eq_attr "type" "alu") ++ (eq_attr "insn" "mov"))) ++ "cortex_r4_mov") ++ ++(define_insn_reservation "cortex_r4_alu_shift" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "alu_shift")) ++ "cortex_r4_alu") ++ ++(define_insn_reservation "cortex_r4_alu_shift_reg" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "alu_shift_reg")) ++ "cortex_r4_alu_shift_reg") ++ ++;; An ALU instruction followed by an ALU instruction with no early dep. ++(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\ ++ cortex_r4_mov" ++ "cortex_r4_alu") ++(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\ ++ cortex_r4_mov" ++ "cortex_r4_alu_shift" ++ "arm_no_early_alu_shift_dep") ++(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\ ++ cortex_r4_mov" ++ "cortex_r4_alu_shift_reg" ++ "arm_no_early_alu_shift_value_dep") ++ ++;; In terms of availabilities, a consumer mov could theoretically be ++;; issued together with a producer ALU instruction, without stalls. ++;; In practice this cannot happen because mov;add (in that order) is not ++;; eligible for dual issue and furthermore dual issue is not permitted ++;; when a dependency is involved. We therefore note it as latency one. ++;; A mov followed by another of the same is also latency one. ++(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\ ++ cortex_r4_mov" ++ "cortex_r4_mov") ++ ++;; qadd, qdadd, qsub and qdsub are not currently emitted, and neither are ++;; media data processing instructions nor sad instructions. ++ ++;; Multiplication instructions. ++ ++(define_insn_reservation "cortex_r4_mul_4" 4 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "mul,smmul")) ++ "cortex_r4_mul_2") ++ ++(define_insn_reservation "cortex_r4_mul_3" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "smulxy,smulwy,smuad,smusd")) ++ "cortex_r4_mul") ++ ++(define_insn_reservation "cortex_r4_mla_4" 4 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "mla,smmla,smmls")) ++ "cortex_r4_mul_2") ++ ++(define_insn_reservation "cortex_r4_mla_3" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "smlaxy,smlawy,smlad,smlsd")) ++ "cortex_r4_mul") ++ ++(define_insn_reservation "cortex_r4_smlald" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "smlald,smlsld")) ++ "cortex_r4_mul") ++ ++(define_insn_reservation "cortex_r4_mull" 4 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "smull,umull,umlal,umaal")) ++ "cortex_r4_mul_2") ++ ++;; A multiply or an MLA with a single-register result, followed by an ++;; MLA with an accumulator dependency, has its result forwarded. ++(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3" ++ "cortex_r4_mla_3,cortex_r4_mla_4" ++ "arm_mac_accumulator_is_mul_result") ++ ++(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4" ++ "cortex_r4_mla_3,cortex_r4_mla_4" ++ "arm_mac_accumulator_is_mul_result") ++ ++;; A multiply followed by an ALU instruction needing the multiply ++;; result only at ALU has lower latency than one needing it at Shift. ++(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald" ++ "cortex_r4_alu") ++(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald" ++ "cortex_r4_alu_shift" ++ "arm_no_early_alu_shift_dep") ++(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald" ++ "cortex_r4_alu_shift_reg" ++ "arm_no_early_alu_shift_value_dep") ++(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull" ++ "cortex_r4_alu") ++(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull" ++ "cortex_r4_alu_shift" ++ "arm_no_early_alu_shift_dep") ++(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull" ++ "cortex_r4_alu_shift_reg" ++ "arm_no_early_alu_shift_value_dep") ++ ++;; A multiply followed by a mov has one cycle lower latency again. ++(define_bypass 1 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald" ++ "cortex_r4_mov") ++(define_bypass 2 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull" ++ "cortex_r4_mov") ++ ++;; We guess that division of A/B using sdiv or udiv, on average, ++;; is performed with B having ten more leading zeros than A. ++;; This gives a latency of nine for udiv and ten for sdiv. ++(define_insn_reservation "cortex_r4_udiv" 9 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "udiv")) ++ "cortex_r4_div_9") ++ ++(define_insn_reservation "cortex_r4_sdiv" 10 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "insn" "sdiv")) ++ "cortex_r4_div_10") ++ ++;; Branches. We assume correct prediction. ++ ++(define_insn_reservation "cortex_r4_branch" 0 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "branch")) ++ "cortex_r4_branch") ++ ++;; Call latencies are not predictable. A semi-arbitrary very large ++;; number is used as "positive infinity" so that everything should be ++;; finished by the time of return. ++(define_insn_reservation "cortex_r4_call" 32 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "call")) ++ "nothing") ++ ++;; Status register access instructions are not currently emitted. ++ ++;; Load instructions. ++;; We do not model the "addr_md_3cycle" cases and assume that ++;; accesses following are correctly aligned. ++ ++(define_insn_reservation "cortex_r4_load_1_2" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "load1,load2")) ++ "cortex_r4_load_store") ++ ++(define_insn_reservation "cortex_r4_load_3_4" 4 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "load3,load4")) ++ "cortex_r4_load_store_2") ++ ++;; If a producing load is followed by an instruction consuming only ++;; as a Normal Reg, there is one fewer cycle of latency. ++ ++(define_bypass 2 "cortex_r4_load_1_2" ++ "cortex_r4_alu") ++(define_bypass 2 "cortex_r4_load_1_2" ++ "cortex_r4_alu_shift" ++ "arm_no_early_alu_shift_dep") ++(define_bypass 2 "cortex_r4_load_1_2" ++ "cortex_r4_alu_shift_reg" ++ "arm_no_early_alu_shift_value_dep") ++ ++(define_bypass 3 "cortex_r4_load_3_4" ++ "cortex_r4_alu") ++(define_bypass 3 "cortex_r4_load_3_4" ++ "cortex_r4_alu_shift" ++ "arm_no_early_alu_shift_dep") ++(define_bypass 3 "cortex_r4_load_3_4" ++ "cortex_r4_alu_shift_reg" ++ "arm_no_early_alu_shift_value_dep") ++ ++;; If a producing load is followed by an instruction consuming only ++;; as a Late Reg, there are two fewer cycles of latency. Such consumer ++;; instructions are moves and stores. ++ ++(define_bypass 1 "cortex_r4_load_1_2" ++ "cortex_r4_mov,cortex_r4_store_1_2,cortex_r4_store_3_4") ++(define_bypass 2 "cortex_r4_load_3_4" ++ "cortex_r4_mov,cortex_r4_store_1_2,cortex_r4_store_3_4") ++ ++;; If a producer's result is required as the base or offset of a load, ++;; there is an extra cycle latency. ++ ++(define_bypass 3 "cortex_r4_alu,cortex_r4_mov,cortex_r4_alu_shift,\ ++ cortex_r4_alu_shift_reg" ++ "cortex_r4_load_1_2,cortex_r4_load_3_4") ++ ++(define_bypass 4 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald" ++ "cortex_r4_load_1_2,cortex_r4_load_3_4") ++ ++(define_bypass 5 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull" ++ "cortex_r4_load_1_2,cortex_r4_load_3_4") ++ ++;; Store instructions. ++ ++(define_insn_reservation "cortex_r4_store_1_2" 0 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "store1,store2")) ++ "cortex_r4_load_store") ++ ++(define_insn_reservation "cortex_r4_store_3_4" 0 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "store3,store4")) ++ "cortex_r4_load_store_2") ++ +--- /dev/null ++++ b/gcc/config/arm/cortex-r4f.md +@@ -0,0 +1,161 @@ ++;; ARM Crotex-R4F VFP pipeline description ++;; Copyright (C) 2007 Free Software Foundation, Inc. ++;; Written by CodeSourcery. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, but ++;; WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++;; General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;; With the exception of simple VMOV , instructions and ++;; the accululate operand of a multiply-accumulate instruction, all ++;; registers are early registers. Thus base latencies are 1 more than ++;; those listed in the TRM. ++ ++;; We use the A, B abd C units from the integer core, plus two additional ++;; units to enforce VFP dual issue constraints. ++ ++;; A B C V1 VMLA ++;; fcpy 1 2 ++;; farith 1 2 1 ++;; fmrc 1 2 ++;; fconst 1 2 * * ++;; ffarith 1 2 * * ++;; fmac 1 2 1 2 ++;; fdiv 1 2 * ++;; f_loads * * * ++;; f_stores * * * ++ ++(define_cpu_unit "cortex_r4_v1" "cortex_r4") ++ ++(define_cpu_unit "cortex_r4_vmla" "cortex_r4") ++ ++(define_reservation "cortex_r4_issue_ab" ++ "(cortex_r4_issue_a|cortex_r4_issue_b)") ++(define_reservation "cortex_r4_single_issue" ++ "cortex_r4_issue_a+cortex_r4_issue_b") ++ ++(define_insn_reservation "cortex_r4_fcpys" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fcpys")) ++ "cortex_r4_issue_ab") ++ ++(define_insn_reservation "cortex_r4_ffariths" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "ffariths,fconsts,fcmps")) ++ "cortex_r4_issue_ab+cortex_r4_issue_c+cortex_r4_v1") ++ ++(define_insn_reservation "cortex_r4_fariths" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fadds,fmuls")) ++ "(cortex_r4_issue_a+cortex_r4_v1)|cortex_r4_issue_b") ++ ++(define_insn_reservation "cortex_r4_fmacs" 6 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fmacs")) ++ "(cortex_r4_issue_a+cortex_r4_v1)|(cortex_r4_issue_b+cortex_r4_vmla)") ++ ++(define_insn_reservation "cortex_r4_fdivs" 17 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fdivs")) ++ "cortex_r4_issue_ab+cortex_r4_v1,cortex_r4_issue_a+cortex_r4_v1") ++ ++(define_insn_reservation "cortex_r4_floads" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_loads")) ++ "cortex_r4_issue_a+cortex_r4_issue_c+cortex_r4_v1") ++ ++(define_insn_reservation "cortex_r4_fstores" 1 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_stores")) ++ "cortex_r4_issue_a+cortex_r4_issue_c+cortex_r4_vmla") ++ ++(define_insn_reservation "cortex_r4_mcr" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "r_2_f")) ++ "cortex_r4_issue_ab") ++ ++(define_insn_reservation "cortex_r4_mrc" 3 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_2_r")) ++ "cortex_r4_issue_ab") ++ ++;; Bypasses for normal (not early) regs. ++(define_bypass 1 "cortex_r4_ffariths,cortex_r4_fcpys,cortex_r4_mcr" ++ "cortex_r4_fcpys") ++(define_bypass 2 "cortex_r4_fariths" ++ "cortex_r4_fcpys") ++(define_bypass 5 "cortex_r4_fmacs" ++ "cortex_r4_fcpys") ++(define_bypass 16 "cortex_r4_fdivs" ++ "cortex_r4_fcpys") ++ ++(define_bypass 1 "cortex_r4_ffariths,cortex_r4_fcpys,cortex_r4_mcr" ++ "cortex_r4_fmacs" ++ "arm_no_early_mul_dep") ++(define_bypass 2 "cortex_r4_fariths" ++ "cortex_r4_fmacs" ++ "arm_no_early_mul_dep") ++;; mac->mac has an extra forwarding path. ++(define_bypass 3 "cortex_r4_fmacs" ++ "cortex_r4_fmacs" ++ "arm_no_early_mul_dep") ++(define_bypass 16 "cortex_r4_fdivs" ++ "cortex_r4_fmacs" ++ "arm_no_early_mul_dep") ++ ++;; Double precision operations. These can not dual issue. ++ ++(define_insn_reservation "cortex_r4_fmacd" 20 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fmacd")) ++ "cortex_r4_single_issue*13") ++ ++(define_insn_reservation "cortex_r4_farith" 10 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "faddd,fmuld")) ++ "cortex_r4_single_issue*3") ++ ++;; FIXME: The short cycle count suggests these instructions complete ++;; out of order. Chances are this is not a pipelined operation. ++(define_insn_reservation "cortex_r4_fdivd" 97 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fdivd")) ++ "cortex_r4_single_issue*3") ++ ++(define_insn_reservation "cortex_r4_ffarithd" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "ffarithd,fconstd")) ++ "cortex_r4_single_issue") ++ ++(define_insn_reservation "cortex_r4_fcmpd" 2 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "fcmpd")) ++ "cortex_r4_single_issue*2") ++ ++(define_insn_reservation "cortex_r4_f_cvt" 8 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_cvt")) ++ "cortex_r4_single_issue*3") ++ ++(define_insn_reservation "cortex_r4_f_memd" 8 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_loadd,f_stored")) ++ "cortex_r4_single_issue") ++ ++(define_insn_reservation "cortex_r4_f_flag" 1 ++ (and (eq_attr "tune_cortexr4" "yes") ++ (eq_attr "type" "f_stores")) ++ "cortex_r4_single_issue") ++ +--- a/gcc/config/arm/crti.asm ++++ b/gcc/config/arm/crti.asm +@@ -64,8 +64,6 @@ + #endif + .endm + +- .file "crti.asm" +- + .section ".init" + .align 2 + .global _init +--- a/gcc/config/arm/crtn.asm ++++ b/gcc/config/arm/crtn.asm +@@ -72,8 +72,6 @@ + .endm + + +- .file "crtn.asm" +- + .section ".init" + ;; + FUNC_END +--- a/gcc/config/arm/elf.h ++++ b/gcc/config/arm/elf.h +@@ -145,3 +145,17 @@ + } \ + while (0) + ++/* Horrible hack: We want to prevent some libgcc routines being included ++ for some multilibs. */ ++#ifndef __ARM_ARCH_6M__ ++#undef L_fixdfsi ++#undef L_fixunsdfsi ++#undef L_truncdfsf2 ++#undef L_fixsfsi ++#undef L_fixunssfsi ++#undef L_floatdidf ++#undef L_floatdisf ++#undef L_floatundidf ++#undef L_floatundisf ++#endif ++ +--- /dev/null ++++ b/gcc/config/arm/fp16.c +@@ -0,0 +1,150 @@ ++/* Half-float conversion routines. ++ ++ Copyright (C) 2008 Free Software Foundation, Inc. ++ Contributed by CodeSourcery. ++ ++ This file is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by the ++ Free Software Foundation; either version 2, or (at your option) any ++ later version. ++ ++ In addition to the permissions in the GNU General Public License, the ++ Free Software Foundation gives you unlimited permission to link the ++ compiled version of this file into combinations with other programs, ++ and to distribute those combinations without any restriction coming ++ from the use of this file. (The General Public License restrictions ++ do apply in other respects; for example, they cover modification of ++ the file, and distribution when not linked into a combine ++ executable.) ++ ++ This file is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; see the file COPYING. If not, write to ++ the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ Boston, MA 02110-1301, USA. */ ++ ++static inline unsigned short ++__gnu_f2h_internal(unsigned int a, int ieee) ++{ ++ unsigned short sign = (a >> 16) & 0x8000; ++ int aexp = (a >> 23) & 0xff; ++ unsigned int mantissa = a & 0x007fffff; ++ unsigned int mask; ++ unsigned int increment; ++ ++ if (aexp == 0xff) ++ { ++ if (!ieee) ++ return sign; ++ return sign | 0x7e00 | (mantissa >> 13); ++ } ++ ++ if (aexp == 0 && mantissa == 0) ++ return sign; ++ ++ aexp -= 127; ++ ++ /* Decimal point between bits 22 and 23. */ ++ mantissa |= 0x00800000; ++ if (aexp < -14) ++ { ++ mask = 0x007fffff; ++ if (aexp < -25) ++ aexp = -26; ++ else if (aexp != -25) ++ mask >>= 24 + aexp; ++ } ++ else ++ mask = 0x00001fff; ++ ++ /* Round. */ ++ if (mantissa & mask) ++ { ++ increment = (mask + 1) >> 1; ++ if ((mantissa & mask) == increment) ++ increment = mantissa & (increment << 1); ++ mantissa += increment; ++ if (mantissa >= 0x01000000) ++ { ++ mantissa >>= 1; ++ aexp++; ++ } ++ } ++ ++ if (ieee) ++ { ++ if (aexp > 15) ++ return sign | 0x7c00; ++ } ++ else ++ { ++ if (aexp > 16) ++ return sign | 0x7fff; ++ } ++ ++ if (aexp < -24) ++ return sign; ++ ++ if (aexp < -14) ++ { ++ mantissa >>= -14 - aexp; ++ aexp = -14; ++ } ++ ++ /* We leave the leading 1 in the mantissa, and subtract one ++ from the exponent bias to compensate. */ ++ return sign | (((aexp + 14) << 10) + (mantissa >> 13)); ++} ++ ++unsigned int ++__gnu_h2f_internal(unsigned short a, int ieee) ++{ ++ unsigned int sign = (unsigned int)(a & 0x8000) << 16; ++ int aexp = (a >> 10) & 0x1f; ++ unsigned int mantissa = a & 0x3ff; ++ ++ if (aexp == 0x1f && ieee) ++ return sign | 0x7f800000 | (mantissa << 13); ++ ++ if (aexp == 0) ++ { ++ int shift; ++ ++ if (mantissa == 0) ++ return sign; ++ ++ shift = __builtin_clz(mantissa) - 21; ++ mantissa <<= shift; ++ aexp = -shift; ++ } ++ ++ return sign | (((aexp + 0x70) << 23) + (mantissa << 13)); ++} ++ ++unsigned short ++__gnu_f2h_ieee(unsigned int a) ++{ ++ return __gnu_f2h_internal(a, 1); ++} ++ ++unsigned int ++__gnu_h2f_ieee(unsigned short a) ++{ ++ return __gnu_h2f_internal(a, 1); ++} ++ ++unsigned short ++__gnu_f2h_alternative(unsigned int x) ++{ ++ return __gnu_f2h_internal(x, 0); ++} ++ ++unsigned int ++__gnu_h2f_alternative(unsigned short a) ++{ ++ return __gnu_h2f_internal(a, 0); ++} +--- /dev/null ++++ b/gcc/config/arm/hwdiv.md +@@ -0,0 +1,40 @@ ++;; ARM instruction patterns for hardware division ++;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc. ++;; Written by CodeSourcery, LLC. ++;; ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_insn "divsi3" ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (div:SI (match_operand:SI 1 "s_register_operand" "r") ++ (match_operand:SI 2 "s_register_operand" "r")))] ++ "arm_arch_hwdiv" ++ "sdiv%?\t%0, %1, %2" ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "sdiv")] ++) ++ ++(define_insn "udivsi3" ++ [(set (match_operand:SI 0 "s_register_operand" "=r") ++ (udiv:SI (match_operand:SI 1 "s_register_operand" "r") ++ (match_operand:SI 2 "s_register_operand" "r")))] ++ "arm_arch_hwdiv" ++ "udiv%?\t%0, %1, %2" ++ [(set_attr "predicable" "yes") ++ (set_attr "insn" "udiv")] ++) ++ +--- a/gcc/config/arm/ieee754-df.S ++++ b/gcc/config/arm/ieee754-df.S +@@ -56,7 +56,7 @@ + #endif + + +-#ifdef L_negdf2 ++#ifdef L_arm_negdf2 + + ARM_FUNC_START negdf2 + ARM_FUNC_ALIAS aeabi_dneg negdf2 +@@ -70,7 +70,7 @@ ARM_FUNC_ALIAS aeabi_dneg negdf2 + + #endif + +-#ifdef L_addsubdf3 ++#ifdef L_arm_addsubdf3 + + ARM_FUNC_START aeabi_drsub + +@@ -88,7 +88,7 @@ ARM_FUNC_ALIAS aeabi_dsub subdf3 + ARM_FUNC_START adddf3 + ARM_FUNC_ALIAS aeabi_dadd adddf3 + +-1: do_push {r4, r5, lr} ++1: do_push (r4, r5, lr) + + @ Look for zeroes, equal values, INF, or NAN. + shift1 lsl, r4, xh, #1 +@@ -432,7 +432,7 @@ ARM_FUNC_ALIAS aeabi_ui2d floatunsidf + do_it eq, t + moveq r1, #0 + RETc(eq) +- do_push {r4, r5, lr} ++ do_push (r4, r5, lr) + mov r4, #0x400 @ initial exponent + add r4, r4, #(52-1 - 1) + mov r5, #0 @ sign bit is 0 +@@ -452,7 +452,7 @@ ARM_FUNC_ALIAS aeabi_i2d floatsidf + do_it eq, t + moveq r1, #0 + RETc(eq) +- do_push {r4, r5, lr} ++ do_push (r4, r5, lr) + mov r4, #0x400 @ initial exponent + add r4, r4, #(52-1 - 1) + ands r5, r0, #0x80000000 @ sign bit in r5 +@@ -486,7 +486,7 @@ ARM_FUNC_ALIAS aeabi_f2d extendsfdf2 + RETc(eq) @ we are done already. + + @ value was denormalized. We can normalize it now. +- do_push {r4, r5, lr} ++ do_push (r4, r5, lr) + mov r4, #0x380 @ setup corresponding exponent + and r5, xh, #0x80000000 @ move sign bit in r5 + bic xh, xh, #0x80000000 +@@ -513,9 +513,9 @@ ARM_FUNC_ALIAS aeabi_ul2d floatundidf + @ compatibility. + adr ip, LSYM(f0_ret) + @ Push pc as well so that RETLDM works correctly. +- do_push {r4, r5, ip, lr, pc} ++ do_push (r4, r5, ip, lr, pc) + #else +- do_push {r4, r5, lr} ++ do_push (r4, r5, lr) + #endif + + mov r5, #0 +@@ -539,9 +539,9 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf + @ compatibility. + adr ip, LSYM(f0_ret) + @ Push pc as well so that RETLDM works correctly. +- do_push {r4, r5, ip, lr, pc} ++ do_push (r4, r5, ip, lr, pc) + #else +- do_push {r4, r5, lr} ++ do_push (r4, r5, lr) + #endif + + ands r5, ah, #0x80000000 @ sign bit in r5 +@@ -590,7 +590,7 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf + @ Legacy code expects the result to be returned in f0. Copy it + @ there as well. + LSYM(f0_ret): +- do_push {r0, r1} ++ do_push (r0, r1) + ldfd f0, [sp], #8 + RETLDM + +@@ -603,11 +603,11 @@ LSYM(f0_ret): + + #endif /* L_addsubdf3 */ + +-#ifdef L_muldivdf3 ++#ifdef L_arm_muldivdf3 + + ARM_FUNC_START muldf3 + ARM_FUNC_ALIAS aeabi_dmul muldf3 +- do_push {r4, r5, r6, lr} ++ do_push (r4, r5, r6, lr) + + @ Mask out exponents, trap any zero/denormal/INF/NAN. + mov ip, #0xff +@@ -840,7 +840,7 @@ LSYM(Lml_d): + orr xh, xh, r6 + teq r5, #0 + do_it ne +- movne pc, lr ++ RETc(ne) + 2: and r6, yh, #0x80000000 + 3: movs yl, yl, lsl #1 + adc yh, yh, yh +@@ -849,7 +849,7 @@ LSYM(Lml_d): + subeq r5, r5, #1 + beq 3b + orr yh, yh, r6 +- mov pc, lr ++ RET + + LSYM(Lml_s): + @ Isolate the INF and NAN cases away +@@ -915,7 +915,7 @@ LSYM(Lml_n): + ARM_FUNC_START divdf3 + ARM_FUNC_ALIAS aeabi_ddiv divdf3 + +- do_push {r4, r5, r6, lr} ++ do_push (r4, r5, r6, lr) + + @ Mask out exponents, trap any zero/denormal/INF/NAN. + mov ip, #0xff +@@ -1103,7 +1103,7 @@ LSYM(Ldv_s): + + #endif /* L_muldivdf3 */ + +-#ifdef L_cmpdf2 ++#ifdef L_arm_cmpdf2 + + @ Note: only r0 (return value) and ip are clobbered here. + +@@ -1122,7 +1122,7 @@ ARM_FUNC_ALIAS nedf2 cmpdf2 + ARM_FUNC_ALIAS eqdf2 cmpdf2 + mov ip, #1 @ how should we specify unordered here? + +-1: str ip, [sp, #-4] ++1: str ip, [sp, #-4]! + + @ Trap any INF/NAN first. + mov ip, xh, lsl #1 +@@ -1134,7 +1134,8 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 + + @ Test for equality. + @ Note that 0.0 is equal to -0.0. +-2: orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0 ++2: add sp, sp, #4 ++ orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0 + do_it eq, e + COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0 + teqne xh, yh @ or xh == yh +@@ -1173,7 +1174,7 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 + bne 2b + orrs ip, yl, yh, lsl #12 + beq 2b @ y is not NAN +-5: ldr r0, [sp, #-4] @ unordered return code ++5: ldr r0, [sp], #4 @ unordered return code + RET + + FUNC_END gedf2 +@@ -1199,7 +1200,7 @@ ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmp + + @ The status-returning routines are required to preserve all + @ registers except ip, lr, and cpsr. +-6: do_push {r0, lr} ++6: do_push (r0, lr) + ARM_CALL cmpdf2 + @ Set the Z flag correctly, and the C flag unconditionally. + cmp r0, #0 +@@ -1271,7 +1272,7 @@ ARM_FUNC_START aeabi_dcmpgt + + #endif /* L_cmpdf2 */ + +-#ifdef L_unorddf2 ++#ifdef L_arm_unorddf2 + + ARM_FUNC_START unorddf2 + ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 +@@ -1297,7 +1298,7 @@ ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 + + #endif /* L_unorddf2 */ + +-#ifdef L_fixdfsi ++#ifdef L_arm_fixdfsi + + ARM_FUNC_START fixdfsi + ARM_FUNC_ALIAS aeabi_d2iz fixdfsi +@@ -1339,7 +1340,7 @@ ARM_FUNC_ALIAS aeabi_d2iz fixdfsi + + #endif /* L_fixdfsi */ + +-#ifdef L_fixunsdfsi ++#ifdef L_arm_fixunsdfsi + + ARM_FUNC_START fixunsdfsi + ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi +@@ -1377,7 +1378,7 @@ ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi + + #endif /* L_fixunsdfsi */ + +-#ifdef L_truncdfsf2 ++#ifdef L_arm_truncdfsf2 + + ARM_FUNC_START truncdfsf2 + ARM_FUNC_ALIAS aeabi_d2f truncdfsf2 +--- a/gcc/config/arm/ieee754-sf.S ++++ b/gcc/config/arm/ieee754-sf.S +@@ -38,7 +38,7 @@ + * if necessary without impacting performances. + */ + +-#ifdef L_negsf2 ++#ifdef L_arm_negsf2 + + ARM_FUNC_START negsf2 + ARM_FUNC_ALIAS aeabi_fneg negsf2 +@@ -51,7 +51,7 @@ ARM_FUNC_ALIAS aeabi_fneg negsf2 + + #endif + +-#ifdef L_addsubsf3 ++#ifdef L_arm_addsubsf3 + + ARM_FUNC_START aeabi_frsub + +@@ -448,7 +448,7 @@ LSYM(f0_ret): + + #endif /* L_addsubsf3 */ + +-#ifdef L_muldivsf3 ++#ifdef L_arm_muldivsf3 + + ARM_FUNC_START mulsf3 + ARM_FUNC_ALIAS aeabi_fmul mulsf3 +@@ -486,7 +486,7 @@ LSYM(Lml_x): + and r3, ip, #0x80000000 + + @ Well, no way to make it shorter without the umull instruction. +- do_push {r3, r4, r5} ++ do_push (r3, r4, r5) + mov r4, r0, lsr #16 + mov r5, r1, lsr #16 + bic r0, r0, r4, lsl #16 +@@ -497,7 +497,7 @@ LSYM(Lml_x): + mla r0, r4, r1, r0 + adds r3, r3, r0, lsl #16 + adc r1, ip, r0, lsr #16 +- do_pop {r0, r4, r5} ++ do_pop (r0, r4, r5) + + #else + +@@ -795,7 +795,7 @@ LSYM(Ldv_s): + + #endif /* L_muldivsf3 */ + +-#ifdef L_cmpsf2 ++#ifdef L_arm_cmpsf2 + + @ The return value in r0 is + @ +@@ -827,7 +827,7 @@ ARM_FUNC_ALIAS nesf2 cmpsf2 + ARM_FUNC_ALIAS eqsf2 cmpsf2 + mov ip, #1 @ how should we specify unordered here? + +-1: str ip, [sp, #-4] ++1: str ip, [sp, #-4]! + + @ Trap any INF/NAN first. + mov r2, r0, lsl #1 +@@ -839,7 +839,8 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 + + @ Compare values. + @ Note that 0.0 is equal to -0.0. +-2: orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag ++2: add sp, sp, #4 ++ orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag + do_it ne + teqne r0, r1 @ if not 0 compare sign + do_it pl +@@ -863,7 +864,7 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 + bne 2b + movs ip, r1, lsl #9 + beq 2b @ r1 is not NAN +-5: ldr r0, [sp, #-4] @ return unordered code. ++5: ldr r0, [sp], #4 @ return unordered code. + RET + + FUNC_END gesf2 +@@ -886,7 +887,7 @@ ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmp + + @ The status-returning routines are required to preserve all + @ registers except ip, lr, and cpsr. +-6: do_push {r0, r1, r2, r3, lr} ++6: do_push (r0, r1, r2, r3, lr) + ARM_CALL cmpsf2 + @ Set the Z flag correctly, and the C flag unconditionally. + cmp r0, #0 +@@ -958,7 +959,7 @@ ARM_FUNC_START aeabi_fcmpgt + + #endif /* L_cmpsf2 */ + +-#ifdef L_unordsf2 ++#ifdef L_arm_unordsf2 + + ARM_FUNC_START unordsf2 + ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 +@@ -983,7 +984,7 @@ ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 + + #endif /* L_unordsf2 */ + +-#ifdef L_fixsfsi ++#ifdef L_arm_fixsfsi + + ARM_FUNC_START fixsfsi + ARM_FUNC_ALIAS aeabi_f2iz fixsfsi +@@ -1025,7 +1026,7 @@ ARM_FUNC_ALIAS aeabi_f2iz fixsfsi + + #endif /* L_fixsfsi */ + +-#ifdef L_fixunssfsi ++#ifdef L_arm_fixunssfsi + + ARM_FUNC_START fixunssfsi + ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi +--- a/gcc/config/arm/iwmmxt.md ++++ b/gcc/config/arm/iwmmxt.md +@@ -105,8 +105,8 @@ + ) + + (define_insn "*iwmmxt_movsi_insn" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m,z,r,?z,Uy,z") +- (match_operand:SI 1 "general_operand" "rI,K,mi,r,r,z,Uy,z,z"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,rk, m,z,r,?z,Uy,z") ++ (match_operand:SI 1 "general_operand" "rk, I,K,mi,rk,r,z,Uy,z, z"))] + "TARGET_REALLY_IWMMXT + && ( register_operand (operands[0], SImode) + || register_operand (operands[1], SImode))" +@@ -114,19 +114,20 @@ + switch (which_alternative) + { + case 0: return \"mov\\t%0, %1\"; +- case 1: return \"mvn\\t%0, #%B1\"; +- case 2: return \"ldr\\t%0, %1\"; +- case 3: return \"str\\t%1, %0\"; +- case 4: return \"tmcr\\t%0, %1\"; +- case 5: return \"tmrc\\t%0, %1\"; +- case 6: return arm_output_load_gr (operands); +- case 7: return \"wstrw\\t%1, %0\"; ++ case 1: return \"mov\\t%0, %1\"; ++ case 2: return \"mvn\\t%0, #%B1\"; ++ case 3: return \"ldr\\t%0, %1\"; ++ case 4: return \"str\\t%1, %0\"; ++ case 5: return \"tmcr\\t%0, %1\"; ++ case 6: return \"tmrc\\t%0, %1\"; ++ case 7: return arm_output_load_gr (operands); ++ case 8: return \"wstrw\\t%1, %0\"; + default:return \"wstrw\\t%1, [sp, #-4]!\;wldrw\\t%0, [sp], #4\\t@move CG reg\"; + }" +- [(set_attr "type" "*,*,load1,store1,*,*,load1,store1,*") +- (set_attr "length" "*,*,*, *,*,*, 16, *,8") +- (set_attr "pool_range" "*,*,4096, *,*,*,1024, *,*") +- (set_attr "neg_pool_range" "*,*,4084, *,*,*, *, 1012,*") ++ [(set_attr "type" "*,*,*,load1,store1,*,*,load1,store1,*") ++ (set_attr "length" "*,*,*,*, *,*,*, 16, *,8") ++ (set_attr "pool_range" "*,*,*,4096, *,*,*,1024, *,*") ++ (set_attr "neg_pool_range" "*,*,*,4084, *,*,*, *, 1012,*") + ;; Note - the "predicable" attribute is not allowed to have alternatives. + ;; Since the wSTRw wCx instruction is not predicable, we cannot support + ;; predicating any of the alternatives in this template. Instead, +@@ -166,9 +167,9 @@ + (set_attr "neg_pool_range" "*,*,4084, *,*,*")] + ) + +-(define_insn "movv8qi_internal" +- [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m") +- (match_operand:V8QI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))] ++(define_insn "mov_internal" ++ [(set (match_operand:VMMX 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m") ++ (match_operand:VMMX 1 "general_operand" "y,y,mi,y,r,r,mi,r"))] + "TARGET_REALLY_IWMMXT" + "* + switch (which_alternative) +@@ -187,64 +188,6 @@ + (set_attr "pool_range" "*, *, 256,*,*,*, 256,*") + (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")]) + +-(define_insn "movv4hi_internal" +- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m") +- (match_operand:V4HI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))] +- "TARGET_REALLY_IWMMXT" +- "* +- switch (which_alternative) +- { +- case 0: return \"wmov%?\\t%0, %1\"; +- case 1: return \"wstrd%?\\t%1, %0\"; +- case 2: return \"wldrd%?\\t%0, %1\"; +- case 3: return \"tmrrc%?\\t%Q0, %R0, %1\"; +- case 4: return \"tmcrr%?\\t%0, %Q1, %R1\"; +- case 5: return \"#\"; +- default: return output_move_double (operands); +- }" +- [(set_attr "predicable" "yes") +- (set_attr "length" "4, 4, 4,4,4,8, 8,8") +- (set_attr "type" "*,store1,load1,*,*,*,load1,store1") +- (set_attr "pool_range" "*, *, 256,*,*,*, 256,*") +- (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")]) +- +-(define_insn "movv2si_internal" +- [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m") +- (match_operand:V2SI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))] +- "TARGET_REALLY_IWMMXT" +- "* +- switch (which_alternative) +- { +- case 0: return \"wmov%?\\t%0, %1\"; +- case 1: return \"wstrd%?\\t%1, %0\"; +- case 2: return \"wldrd%?\\t%0, %1\"; +- case 3: return \"tmrrc%?\\t%Q0, %R0, %1\"; +- case 4: return \"tmcrr%?\\t%0, %Q1, %R1\"; +- case 5: return \"#\"; +- default: return output_move_double (operands); +- }" +- [(set_attr "predicable" "yes") +- (set_attr "length" "4, 4, 4,4,4,8, 24,8") +- (set_attr "type" "*,store1,load1,*,*,*,load1,store1") +- (set_attr "pool_range" "*, *, 256,*,*,*, 256,*") +- (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")]) +- +-;; This pattern should not be needed. It is to match a +-;; wierd case generated by GCC when no optimizations are +-;; enabled. (Try compiling gcc/testsuite/gcc.c-torture/ +-;; compile/simd-5.c at -O0). The mode for operands[1] is +-;; deliberately omitted. +-(define_insn "movv2si_internal_2" +- [(set (match_operand:V2SI 0 "nonimmediate_operand" "=?r") +- (match_operand 1 "immediate_operand" "mi"))] +- "TARGET_REALLY_IWMMXT" +- "* return output_move_double (operands);" +- [(set_attr "predicable" "yes") +- (set_attr "length" "8") +- (set_attr "type" "load1") +- (set_attr "pool_range" "256") +- (set_attr "neg_pool_range" "244")]) +- + ;; Vector add/subtract + + (define_insn "*add3_iwmmxt" +--- a/gcc/config/arm/lib1funcs.asm ++++ b/gcc/config/arm/lib1funcs.asm +@@ -94,7 +94,8 @@ Boston, MA 02110-1301, USA. */ + + #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ +- || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ ++ || defined(__ARM_ARCH_6M__) + # define __ARM_ARCH__ 6 + #endif + +@@ -237,8 +238,8 @@ LSYM(Lend_fde): + .macro shift1 op, arg0, arg1, arg2 + \op \arg0, \arg1, \arg2 + .endm +-#define do_push push +-#define do_pop pop ++#define do_push(...) push {__VA_ARGS__} ++#define do_pop(...) pop {__VA_ARGS__} + #define COND(op1, op2, cond) op1 ## op2 ## cond + /* Perform an arithmetic operation with a variable shift operand. This + requires two instructions and a scratch register on Thumb-2. */ +@@ -252,24 +253,133 @@ LSYM(Lend_fde): + .macro shift1 op, arg0, arg1, arg2 + mov \arg0, \arg1, \op \arg2 + .endm +-#define do_push stmfd sp!, +-#define do_pop ldmfd sp!, ++#if defined(__low_irq_latency__) ++#define do_push(...) \ ++ _buildN1(do_push, _buildC1(__VA_ARGS__))( __VA_ARGS__) ++#define _buildN1(BASE, X) _buildN2(BASE, X) ++#define _buildN2(BASE, X) BASE##X ++#define _buildC1(...) _buildC2(__VA_ARGS__,9,8,7,6,5,4,3,2,1) ++#define _buildC2(a1,a2,a3,a4,a5,a6,a7,a8,a9,c,...) c ++ ++#define do_push1(r1) str r1, [sp, #-4]! ++#define do_push2(r1, r2) str r2, [sp, #-4]! ; str r1, [sp, #-4]! ++#define do_push3(r1, r2, r3) str r3, [sp, #-4]! ; str r2, [sp, #-4]!; str r1, [sp, #-4]! ++#define do_push4(r1, r2, r3, r4) \ ++ do_push3 (r2, r3, r4);\ ++ do_push1 (r1) ++#define do_push5(r1, r2, r3, r4, r5) \ ++ do_push4 (r2, r3, r4, r5);\ ++ do_push1 (r1) ++ ++#define do_pop(...) \ ++_buildN1(do_pop, _buildC1(__VA_ARGS__))( __VA_ARGS__) ++ ++#define do_pop1(r1) ldr r1, [sp], #4 ++#define do_pop2(r1, r2) ldr r1, [sp], #4 ; ldr r2, [sp], #4 ++#define do_pop3(r1, r2, r3) ldr r1, [sp], #4 ; str r2, [sp], #4; str r3, [sp], #4 ++#define do_pop4(r1, r2, r3, r4) \ ++ do_pop1 (r1);\ ++ do_pup3 (r2, r3, r4) ++#define do_pop5(r1, r2, r3, r4, r5) \ ++ do_pop1 (r1);\ ++ do_pop4 (r2, r3, r4, r5) ++#else ++#define do_push(...) stmfd sp!, { __VA_ARGS__} ++#define do_pop(...) ldmfd sp!, {__VA_ARGS__} ++#endif ++ ++ + #define COND(op1, op2, cond) op1 ## cond ## op2 + .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp + \name \dest, \src1, \src2, \shiftop \shiftreg + .endm + #endif + +-.macro ARM_LDIV0 name ++#ifdef __ARM_EABI__ ++.macro ARM_LDIV0 name signed ++ cmp r0, #0 ++ .ifc \signed, unsigned ++ movne r0, #0xffffffff ++ .else ++ movgt r0, #0x7fffffff ++ movlt r0, #0x80000000 ++ .endif ++ b SYM (__aeabi_idiv0) __PLT__ ++.endm ++#else ++.macro ARM_LDIV0 name signed + str lr, [sp, #-8]! + 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8 + bl SYM (__div0) __PLT__ + mov r0, #0 @ About as wrong as it could be. + RETLDM unwind=98b + .endm ++#endif + + +-.macro THUMB_LDIV0 name ++#ifdef __ARM_EABI__ ++.macro THUMB_LDIV0 name signed ++#if defined(__ARM_ARCH_6M__) ++ .ifc \signed, unsigned ++ cmp r0, #0 ++ beq 1f ++ mov r0, #0 ++ mvn r0, r0 @ 0xffffffff ++1: ++ .else ++ cmp r0, #0 ++ beq 2f ++ blt 3f ++ mov r0, #0 ++ mvn r0, r0 ++ lsr r0, r0, #1 @ 0x7fffffff ++ b 2f ++3: mov r0, #0x80 ++ lsl r0, r0, #24 @ 0x80000000 ++2: ++ .endif ++ push {r0, r1, r2} ++ ldr r0, 4f ++ adr r1, 4f ++ add r0, r1 ++ str r0, [sp, #8] ++ @ We know we are not on armv4t, so pop pc is safe. ++ pop {r0, r1, pc} ++ .align 2 ++4: ++ .word __aeabi_idiv0 - 4b ++#elif defined(__thumb2__) ++ .syntax unified ++ .ifc \signed, unsigned ++ cbz r0, 1f ++ mov r0, #0xffffffff ++1: ++ .else ++ cmp r0, #0 ++ do_it gt ++ movgt r0, #0x7fffffff ++ do_it lt ++ movlt r0, #0x80000000 ++ .endif ++ b.w SYM(__aeabi_idiv0) __PLT__ ++#else ++ .align 2 ++ bx pc ++ nop ++ .arm ++ cmp r0, #0 ++ .ifc \signed, unsigned ++ movne r0, #0xffffffff ++ .else ++ movgt r0, #0x7fffffff ++ movlt r0, #0x80000000 ++ .endif ++ b SYM(__aeabi_idiv0) __PLT__ ++ .thumb ++#endif ++.endm ++#else ++.macro THUMB_LDIV0 name signed + push { r1, lr } + 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8 + bl SYM (__div0) +@@ -281,18 +391,19 @@ LSYM(Lend_fde): + pop { r1, pc } + #endif + .endm ++#endif + + .macro FUNC_END name + SIZE (__\name) + .endm + +-.macro DIV_FUNC_END name ++.macro DIV_FUNC_END name signed + cfi_start __\name, LSYM(Lend_div0) + LSYM(Ldiv0): + #ifdef __thumb__ +- THUMB_LDIV0 \name ++ THUMB_LDIV0 \name \signed + #else +- ARM_LDIV0 \name ++ ARM_LDIV0 \name \signed + #endif + cfi_end LSYM(Lend_div0) + FUNC_END \name +@@ -367,6 +478,9 @@ _L__\name: + + #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */ + ++#ifdef __ARM_ARCH_6M__ ++#define EQUIV .thumb_set ++#else + .macro ARM_FUNC_START name + .text + .globl SYM (__\name) +@@ -379,6 +493,7 @@ SYM (__\name): + .macro ARM_CALL name + bl __\name + .endm ++#endif + + #endif + +@@ -391,6 +506,7 @@ SYM (__\name): + #endif + .endm + ++#ifndef __ARM_ARCH_6M__ + .macro ARM_FUNC_ALIAS new old + .globl SYM (__\new) + EQUIV SYM (__\new), SYM (__\old) +@@ -398,6 +514,13 @@ SYM (__\name): + .set SYM (_L__\new), SYM (_L__\old) + #endif + .endm ++#endif ++ ++#ifdef __ARM_EABI__ ++.macro WEAK name ++ .weak SYM (__\name) ++.endm ++#endif + + #ifdef __thumb__ + /* Register aliases. */ +@@ -423,6 +546,23 @@ pc .req r15 + + #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__) + ++#if defined(__ARM_TUNE_MARVELL_F__) ++ clz \curbit, \dividend ++ clz \result, \divisor ++ sub \curbit, \result, \curbit ++ mov \divisor, \divisor, lsl \curbit ++ rsb \curbit, \curbit, #31 ++ mov \curbit, \curbit, lsl #2 ++ mov \result, #0 ++ add pc, pc, \curbit, lsl #2 ++ nop ++ .rept 32 ++ cmp \dividend, \divisor ++ subcs \dividend, \dividend, \divisor ++ mov \divisor, \divisor, lsr #1 ++ adc \result, \result, \result ++ .endr ++#else /* ! defined(__ARM_TUNE_MARVELL_F__) */ + clz \curbit, \dividend + clz \result, \divisor + sub \curbit, \result, \curbit +@@ -438,6 +578,7 @@ pc .req r15 + adc \result, \result, \result + subcs \dividend, \dividend, \divisor, lsl #shift + .endr ++#endif /* defined(__ARM_TUNE_MARVELL_F__) */ + + #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */ + #if __ARM_ARCH__ >= 5 +@@ -792,6 +933,7 @@ LSYM(Lgot_result): + + cmp divisor, #0 + beq LSYM(Ldiv0) ++LSYM(udivsi3_nodiv0): + mov curbit, #1 + mov result, #0 + +@@ -807,6 +949,9 @@ LSYM(Lgot_result): + + #else /* ARM version. */ + ++ /* Note: if called via udivsi3_nodiv0, this will unnecessarily check ++ for division-by-zero a second time. */ ++LSYM(udivsi3_nodiv0): + subs r2, r1, #1 + RETc(eq) + bcc LSYM(Ldiv0) +@@ -831,19 +976,23 @@ LSYM(Lgot_result): + + #endif /* ARM version */ + +- DIV_FUNC_END udivsi3 ++ DIV_FUNC_END udivsi3 unsigned + + FUNC_START aeabi_uidivmod + #ifdef __thumb__ ++ cmp r1, #0 ++ beq LSYM(Ldiv0) + push {r0, r1, lr} +- bl SYM(__udivsi3) ++ bl LSYM(udivsi3_nodiv0) + POP {r1, r2, r3} + mul r2, r0 + sub r1, r1, r2 + bx r3 + #else ++ cmp r1, #0 ++ beq LSYM(Ldiv0) + stmfd sp!, { r0, r1, lr } +- bl SYM(__udivsi3) ++ bl LSYM(udivsi3_nodiv0) + ldmfd sp!, { r1, r2, lr } + mul r3, r2, r0 + sub r1, r1, r3 +@@ -890,7 +1039,7 @@ LSYM(Lover10): + + #endif /* ARM version. */ + +- DIV_FUNC_END umodsi3 ++ DIV_FUNC_END umodsi3 unsigned + + #endif /* L_umodsi3 */ + /* ------------------------------------------------------------------------ */ +@@ -902,7 +1051,7 @@ LSYM(Lover10): + #ifdef __thumb__ + cmp divisor, #0 + beq LSYM(Ldiv0) +- ++LSYM(divsi3_nodiv0): + push { work } + mov work, dividend + eor work, divisor @ Save the sign of the result. +@@ -934,8 +1083,9 @@ LSYM(Lover12): + #else /* ARM version. */ + + cmp r1, #0 +- eor ip, r0, r1 @ save the sign of the result. + beq LSYM(Ldiv0) ++LSYM(divsi3_nodiv0): ++ eor ip, r0, r1 @ save the sign of the result. + rsbmi r1, r1, #0 @ loops below use unsigned. + subs r2, r1, #1 @ division by 1 or -1 ? + beq 10f +@@ -970,19 +1120,23 @@ LSYM(Lover12): + + #endif /* ARM version */ + +- DIV_FUNC_END divsi3 ++ DIV_FUNC_END divsi3 signed + + FUNC_START aeabi_idivmod + #ifdef __thumb__ ++ cmp r1, #0 ++ beq LSYM(Ldiv0) + push {r0, r1, lr} +- bl SYM(__divsi3) ++ bl LSYM(divsi3_nodiv0) + POP {r1, r2, r3} + mul r2, r0 + sub r1, r1, r2 + bx r3 + #else ++ cmp r1, #0 ++ beq LSYM(Ldiv0) + stmfd sp!, { r0, r1, lr } +- bl SYM(__divsi3) ++ bl LSYM(divsi3_nodiv0) + ldmfd sp!, { r1, r2, lr } + mul r3, r2, r0 + sub r1, r1, r3 +@@ -1048,21 +1202,25 @@ LSYM(Lover12): + + #endif /* ARM version */ + +- DIV_FUNC_END modsi3 ++ DIV_FUNC_END modsi3 signed + + #endif /* L_modsi3 */ + /* ------------------------------------------------------------------------ */ + #ifdef L_dvmd_tls + +- FUNC_START div0 +- FUNC_ALIAS aeabi_idiv0 div0 +- FUNC_ALIAS aeabi_ldiv0 div0 +- ++#ifdef __ARM_EABI__ ++ WEAK aeabi_idiv0 ++ WEAK aeabi_ldiv0 ++ FUNC_START aeabi_idiv0 ++ FUNC_START aeabi_ldiv0 + RET +- + FUNC_END aeabi_ldiv0 + FUNC_END aeabi_idiv0 ++#else ++ FUNC_START div0 ++ RET + FUNC_END div0 ++#endif + + #endif /* L_divmodsi_tools */ + /* ------------------------------------------------------------------------ */ +@@ -1072,14 +1230,26 @@ LSYM(Lover12): + /* Constant taken from . */ + #define SIGFPE 8 + ++#ifdef __ARM_EABI__ ++ WEAK aeabi_idiv0 ++ WEAK aeabi_ldiv0 ++ ARM_FUNC_START aeabi_idiv0 ++ ARM_FUNC_START aeabi_ldiv0 ++#else + ARM_FUNC_START div0 ++#endif + +- do_push {r1, lr} ++ do_push (r1, lr) + mov r0, #SIGFPE + bl SYM(raise) __PLT__ + RETLDM r1 + ++#ifdef __ARM_EABI__ ++ FUNC_END aeabi_ldiv0 ++ FUNC_END aeabi_idiv0 ++#else + FUNC_END div0 ++#endif + + #endif /* L_dvmd_lnx */ + /* ------------------------------------------------------------------------ */ +@@ -1256,8 +1426,8 @@ LSYM(Lover12): + #endif /* L_call_via_rX */ + + /* Don't bother with the old interworking routines for Thumb-2. */ +-/* ??? Maybe only omit these on v7m. */ +-#ifndef __thumb2__ ++/* ??? Maybe only omit these on "m" variants. */ ++#if !defined(__thumb2__) && !defined(__ARM_ARCH_6M__) + + #if defined L_interwork_call_via_rX + +@@ -1387,7 +1557,11 @@ LSYM(Lchange_\register): + #endif /* Arch supports thumb. */ + + #ifndef __symbian__ ++#ifndef __ARM_ARCH_6M__ + #include "ieee754-df.S" + #include "ieee754-sf.S" + #include "bpabi.S" +-#endif /* __symbian__ */ ++#else /* __ARM_ARCH_6M__ */ ++#include "bpabi-v6m.S" ++#endif /* __ARM_ARCH_6M__ */ ++#endif /* !__symbian__ */ +--- a/gcc/config/arm/libunwind.S ++++ b/gcc/config/arm/libunwind.S +@@ -53,6 +53,119 @@ + #endif + #endif + ++#ifdef __ARM_ARCH_6M__ ++ ++/* r0 points to a 16-word block. Upload these values to the actual core ++ state. */ ++FUNC_START restore_core_regs ++ mov r1, r0 ++ add r1, r1, #52 ++ ldmia r1!, {r3, r4, r5} ++ sub r3, r3, #4 ++ mov ip, r3 ++ str r5, [r3] ++ mov lr, r4 ++ /* Restore r8-r11. */ ++ mov r1, r0 ++ add r1, r1, #32 ++ ldmia r1!, {r2, r3, r4, r5} ++ mov r8, r2 ++ mov r9, r3 ++ mov sl, r4 ++ mov fp, r5 ++ mov r1, r0 ++ add r1, r1, #8 ++ ldmia r1!, {r2, r3, r4, r5, r6, r7} ++ ldr r1, [r0, #4] ++ ldr r0, [r0] ++ mov sp, ip ++ pop {pc} ++ FUNC_END restore_core_regs ++ UNPREFIX restore_core_regs ++ ++/* ARMV6M does not have coprocessors, so these should never be used. */ ++FUNC_START gnu_Unwind_Restore_VFP ++ RET ++ ++/* Store VFR regsters d0-d15 to the address in r0. */ ++FUNC_START gnu_Unwind_Save_VFP ++ RET ++ ++/* Load VFP registers d0-d15 from the address in r0. ++ Use this to load from FSTMD format. */ ++FUNC_START gnu_Unwind_Restore_VFP_D ++ RET ++ ++/* Store VFP registers d0-d15 to the address in r0. ++ Use this to store in FLDMD format. */ ++FUNC_START gnu_Unwind_Save_VFP_D ++ RET ++ ++/* Load VFP registers d16-d31 from the address in r0. ++ Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */ ++FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31 ++ RET ++ ++/* Store VFP registers d16-d31 to the address in r0. ++ Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */ ++FUNC_START gnu_Unwind_Save_VFP_D_16_to_31 ++ RET ++ ++FUNC_START gnu_Unwind_Restore_WMMXD ++ RET ++ ++FUNC_START gnu_Unwind_Save_WMMXD ++ RET ++ ++FUNC_START gnu_Unwind_Restore_WMMXC ++ RET ++ ++FUNC_START gnu_Unwind_Save_WMMXC ++ RET ++ ++.macro UNWIND_WRAPPER name nargs ++ FUNC_START \name ++ /* Create a phase2_vrs structure. */ ++ /* Save r0 in the PC slot so we can use it as a scratch register. */ ++ push {r0} ++ add r0, sp, #4 ++ push {r0, lr} /* Push original SP and LR. */ ++ /* Make space for r8-r12. */ ++ sub sp, sp, #20 ++ /* Save low registers. */ ++ push {r0, r1, r2, r3, r4, r5, r6, r7} ++ /* Save high registers. */ ++ add r0, sp, #32 ++ mov r1, r8 ++ mov r2, r9 ++ mov r3, sl ++ mov r4, fp ++ mov r5, ip ++ stmia r0!, {r1, r2, r3, r4, r5} ++ /* Restore original low register values. */ ++ add r0, sp, #4 ++ ldmia r0!, {r1, r2, r3, r4, r5} ++ /* Restore orginial r0. */ ++ ldr r0, [sp, #60] ++ str r0, [sp] ++ /* Demand-save flags, plus an extra word for alignment. */ ++ mov r3, #0 ++ push {r2, r3} ++ /* Point r1 at the block. Pass r[0..nargs) unchanged. */ ++ add r\nargs, sp, #4 ++ ++ bl SYM (__gnu\name) ++ ++ ldr r3, [sp, #64] ++ add sp, sp, #72 ++ bx r3 ++ ++ FUNC_END \name ++ UNPREFIX \name ++.endm ++ ++#else /* !__ARM_ARCH_6M__ */ ++ + /* r0 points to a 16-word block. Upload these values to the actual core + state. */ + ARM_FUNC_START restore_core_regs +@@ -233,6 +346,8 @@ ARM_FUNC_START gnu_Unwind_Save_WMMXC + UNPREFIX \name + .endm + ++#endif /* !__ARM_ARCH_6M__ */ ++ + UNWIND_WRAPPER _Unwind_RaiseException 1 + UNWIND_WRAPPER _Unwind_Resume 1 + UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1 +--- /dev/null ++++ b/gcc/config/arm/linux-atomic.c +@@ -0,0 +1,280 @@ ++/* Linux-specific atomic operations for ARM EABI. ++ Copyright (C) 2008 Free Software Foundation, Inc. ++ Contributed by CodeSourcery. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 2, or (at your option) any later ++version. ++ ++In addition to the permissions in the GNU General Public License, the ++Free Software Foundation gives you unlimited permission to link the ++compiled version of this file into combinations with other programs, ++and to distribute those combinations without any restriction coming ++from the use of this file. (The General Public License restrictions ++do apply in other respects; for example, they cover modification of ++the file, and distribution when not linked into a combine ++executable.) ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING. If not, write to the Free ++Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA ++02110-1301, USA. */ ++ ++/* Kernel helper for compare-and-exchange. */ ++typedef int (__kernel_cmpxchg_t) (int oldval, int newval, int *ptr); ++#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) ++ ++/* Kernel helper for memory barrier. */ ++typedef void (__kernel_dmb_t) (void); ++#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) ++ ++/* Note: we implement byte, short and int versions of atomic operations using ++ the above kernel helpers, but there is no support for "long long" (64-bit) ++ operations as yet. */ ++ ++#define HIDDEN __attribute__ ((visibility ("hidden"))) ++ ++#ifdef __ARMEL__ ++#define INVERT_MASK_1 0 ++#define INVERT_MASK_2 0 ++#else ++#define INVERT_MASK_1 24 ++#define INVERT_MASK_2 16 ++#endif ++ ++#define MASK_1 0xffu ++#define MASK_2 0xffffu ++ ++#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \ ++ int HIDDEN \ ++ __sync_fetch_and_##OP##_4 (int *ptr, int val) \ ++ { \ ++ int failure, tmp; \ ++ \ ++ do { \ ++ tmp = *ptr; \ ++ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \ ++ } while (failure != 0); \ ++ \ ++ return tmp; \ ++ } ++ ++FETCH_AND_OP_WORD (add, , +) ++FETCH_AND_OP_WORD (sub, , -) ++FETCH_AND_OP_WORD (or, , |) ++FETCH_AND_OP_WORD (and, , &) ++FETCH_AND_OP_WORD (xor, , ^) ++FETCH_AND_OP_WORD (nand, ~, &) ++ ++#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH ++#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH ++ ++/* Implement both __sync__and_fetch and __sync_fetch_and_ for ++ subword-sized quantities. */ ++ ++#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \ ++ TYPE HIDDEN \ ++ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \ ++ { \ ++ int *wordptr = (int *) ((unsigned int) ptr & ~3); \ ++ unsigned int mask, shift, oldval, newval; \ ++ int failure; \ ++ \ ++ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ ++ mask = MASK_##WIDTH << shift; \ ++ \ ++ do { \ ++ oldval = *wordptr; \ ++ newval = ((PFX_OP ((oldval & mask) >> shift) \ ++ INF_OP (unsigned int) val) << shift) & mask; \ ++ newval |= oldval & ~mask; \ ++ failure = __kernel_cmpxchg (oldval, newval, wordptr); \ ++ } while (failure != 0); \ ++ \ ++ return (RETURN & mask) >> shift; \ ++ } ++ ++SUBWORD_SYNC_OP (add, , +, short, 2, oldval) ++SUBWORD_SYNC_OP (sub, , -, short, 2, oldval) ++SUBWORD_SYNC_OP (or, , |, short, 2, oldval) ++SUBWORD_SYNC_OP (and, , &, short, 2, oldval) ++SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval) ++SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval) ++ ++SUBWORD_SYNC_OP (add, , +, char, 1, oldval) ++SUBWORD_SYNC_OP (sub, , -, char, 1, oldval) ++SUBWORD_SYNC_OP (or, , |, char, 1, oldval) ++SUBWORD_SYNC_OP (and, , &, char, 1, oldval) ++SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval) ++SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval) ++ ++#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \ ++ int HIDDEN \ ++ __sync_##OP##_and_fetch_4 (int *ptr, int val) \ ++ { \ ++ int tmp, failure; \ ++ \ ++ do { \ ++ tmp = *ptr; \ ++ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \ ++ } while (failure != 0); \ ++ \ ++ return PFX_OP tmp INF_OP val; \ ++ } ++ ++OP_AND_FETCH_WORD (add, , +) ++OP_AND_FETCH_WORD (sub, , -) ++OP_AND_FETCH_WORD (or, , |) ++OP_AND_FETCH_WORD (and, , &) ++OP_AND_FETCH_WORD (xor, , ^) ++OP_AND_FETCH_WORD (nand, ~, &) ++ ++SUBWORD_SYNC_OP (add, , +, short, 2, newval) ++SUBWORD_SYNC_OP (sub, , -, short, 2, newval) ++SUBWORD_SYNC_OP (or, , |, short, 2, newval) ++SUBWORD_SYNC_OP (and, , &, short, 2, newval) ++SUBWORD_SYNC_OP (xor, , ^, short, 2, newval) ++SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval) ++ ++SUBWORD_SYNC_OP (add, , +, char, 1, newval) ++SUBWORD_SYNC_OP (sub, , -, char, 1, newval) ++SUBWORD_SYNC_OP (or, , |, char, 1, newval) ++SUBWORD_SYNC_OP (and, , &, char, 1, newval) ++SUBWORD_SYNC_OP (xor, , ^, char, 1, newval) ++SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval) ++ ++int HIDDEN ++__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval) ++{ ++ int actual_oldval, fail; ++ ++ while (1) ++ { ++ actual_oldval = *ptr; ++ ++ if (oldval != actual_oldval) ++ return actual_oldval; ++ ++ fail = __kernel_cmpxchg (actual_oldval, newval, ptr); ++ ++ if (!fail) ++ return oldval; ++ } ++} ++ ++#define SUBWORD_VAL_CAS(TYPE, WIDTH) \ ++ TYPE HIDDEN \ ++ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ ++ TYPE newval) \ ++ { \ ++ int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \ ++ unsigned int mask, shift, actual_oldval, actual_newval; \ ++ \ ++ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ ++ mask = MASK_##WIDTH << shift; \ ++ \ ++ while (1) \ ++ { \ ++ actual_oldval = *wordptr; \ ++ \ ++ if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \ ++ return (actual_oldval & mask) >> shift; \ ++ \ ++ actual_newval = (actual_oldval & ~mask) \ ++ | (((unsigned int) newval << shift) & mask); \ ++ \ ++ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \ ++ wordptr); \ ++ \ ++ if (!fail) \ ++ return oldval; \ ++ } \ ++ } ++ ++SUBWORD_VAL_CAS (short, 2) ++SUBWORD_VAL_CAS (char, 1) ++ ++typedef unsigned char bool; ++ ++bool HIDDEN ++__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval) ++{ ++ int failure = __kernel_cmpxchg (oldval, newval, ptr); ++ return (failure == 0); ++} ++ ++#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \ ++ bool HIDDEN \ ++ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ ++ TYPE newval) \ ++ { \ ++ TYPE actual_oldval \ ++ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \ ++ return (oldval == actual_oldval); \ ++ } ++ ++SUBWORD_BOOL_CAS (short, 2) ++SUBWORD_BOOL_CAS (char, 1) ++ ++void HIDDEN ++__sync_synchronize (void) ++{ ++ __kernel_dmb (); ++} ++ ++int HIDDEN ++__sync_lock_test_and_set_4 (int *ptr, int val) ++{ ++ int failure, oldval; ++ ++ do { ++ oldval = *ptr; ++ failure = __kernel_cmpxchg (oldval, val, ptr); ++ } while (failure != 0); ++ ++ return oldval; ++} ++ ++#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \ ++ TYPE HIDDEN \ ++ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \ ++ { \ ++ int failure; \ ++ unsigned int oldval, newval, shift, mask; \ ++ int *wordptr = (int *) ((unsigned int) ptr & ~3); \ ++ \ ++ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ ++ mask = MASK_##WIDTH << shift; \ ++ \ ++ do { \ ++ oldval = *wordptr; \ ++ newval = (oldval & ~mask) \ ++ | (((unsigned int) val << shift) & mask); \ ++ failure = __kernel_cmpxchg (oldval, newval, wordptr); \ ++ } while (failure != 0); \ ++ \ ++ return (oldval & mask) >> shift; \ ++ } ++ ++SUBWORD_TEST_AND_SET (short, 2) ++SUBWORD_TEST_AND_SET (char, 1) ++ ++#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \ ++ void HIDDEN \ ++ __sync_lock_release_##WIDTH (TYPE *ptr) \ ++ { \ ++ *ptr = 0; \ ++ __kernel_dmb (); \ ++ } ++ ++SYNC_LOCK_RELEASE (int, 4) ++SYNC_LOCK_RELEASE (short, 2) ++SYNC_LOCK_RELEASE (char, 1) +--- a/gcc/config/arm/linux-eabi.h ++++ b/gcc/config/arm/linux-eabi.h +@@ -66,7 +66,7 @@ + /* At this point, bpabi.h will have clobbered LINK_SPEC. We want to + use the GNU/Linux version, not the generic BPABI version. */ + #undef LINK_SPEC +-#define LINK_SPEC LINUX_TARGET_LINK_SPEC ++#define LINK_SPEC LINUX_TARGET_LINK_SPEC BE8_LINK_SPEC + + /* Use the default LIBGCC_SPEC, not the version in linux-elf.h, as we + do not use -lfloat. */ +--- /dev/null ++++ b/gcc/config/arm/marvell-f-vfp.md +@@ -0,0 +1,157 @@ ++;; Marvell 2850 VFP pipeline description ++;; Copyright (C) 2007 Free Software Foundation, Inc. ++;; Written by CodeSourcery, Inc. ++ ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;; This automaton provides a pipeline description for the Marvell ++;; 2850 core. ++;; ++;; The model given here assumes that the condition for all conditional ++;; instructions is "true", i.e., that all of the instructions are ++;; actually executed. ++ ++(define_automaton "marvell_f_vfp") ++ ++;; This is a single-issue VFPv2 implementation with the following execution ++;; units: ++;; ++;; 1. Addition/subtraction unit; takes three cycles, pipelined. ++;; 2. Multiplication unit; takes four cycles, pipelined. ++;; 3. Add buffer, used for multiply-accumulate (see below). ++;; 4. Divide/square root unit, not pipelined. ++;; For single-precision: takes sixteen cycles, can accept another insn ++;; after fifteen cycles. ++;; For double-precision: takes thirty-one cycles, can accept another insn ++;; after thirty cycles. ++;; 5. Single-cycle unit, pipelined. ++;; This does absolute value/copy/negate/compare in one cycle and ++;; conversion in two cycles. ++;; ++;; When all three operands of a multiply-accumulate instruction are ready, ++;; one is issued to the add buffer (which can hold six operands in a FIFO) ++;; and the two to be multiplied are issued to the multiply unit. After ++;; four cycles in the multiply unit, one cycle is taken to issue the ++;; operand from the add buffer plus the multiplication result to the ++;; addition/subtraction unit. That issue takes priority over any add/sub ++;; instruction waiting at the normal issue stage, but may be performed in ++;; parallel with the issue of a non-add/sub instruction. The total time ++;; for a multiply-accumulate instruction to pass through the execution ++;; units is hence eight cycles. ++;; ++;; We do not need to explicitly model the add buffer because it can ++;; always issue the instruction at the head of its FIFO (due to the above ++;; priority rule) and there are more spaces in the add buffer (six) than ++;; there are stages (four) in the multiplication unit. ++;; ++;; Two instructions may be retired at once from the head of an 8-entry ++;; reorder buffer. Data from these first two instructions only may be ++;; forwarded to the inputs of the issue unit. We assume that the ++;; pressure on the reorder buffer will be sufficiently low that every ++;; instruction entering it will be eligible for data forwarding. Since ++;; data is forwarded to the issue unit and not the execution units (so ++;; for example single-cycle instructions cannot be issued back-to-back), ++;; the latencies given below are the cycle counts above plus one. ++ ++(define_cpu_unit "mf_vfp_issue" "marvell_f_vfp") ++(define_cpu_unit "mf_vfp_add" "marvell_f_vfp") ++(define_cpu_unit "mf_vfp_mul" "marvell_f_vfp") ++(define_cpu_unit "mf_vfp_div" "marvell_f_vfp") ++(define_cpu_unit "mf_vfp_single_cycle" "marvell_f_vfp") ++ ++;; An attribute to indicate whether our reservations are applicable. ++ ++(define_attr "marvell_f_vfp" "yes,no" ++ (const (if_then_else (and (eq_attr "tune" "marvell_f") ++ (eq_attr "fpu" "vfp")) ++ (const_string "yes") (const_string "no")))) ++ ++;; Reservations of functional units. The nothing*2 reservations at the ++;; start of many of the reservation strings correspond to the decode ++;; stages. We need to have these reservations so that we can correctly ++;; reserve parts of the core's A1 pipeline for loads and stores. For ++;; that case (since loads skip E1) the pipelines line up thus: ++;; A1 pipe: Issue E2 OF WR WB ... ++;; VFP pipe: Fetch Decode1 Decode2 Issue Execute1 ... ++;; For a load, we need to make a reservation of E2, and thus we must ++;; use Decode1 as the starting point for all VFP reservations here. ++;; ++;; For reservations of pipelined VFP execution units we only reserve ++;; the execution unit for the first execution cycle, omitting any trailing ++;; "nothing" reservations. ++ ++(define_insn_reservation "marvell_f_vfp_add" 4 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fadds,faddd")) ++ "nothing*2,mf_vfp_issue,mf_vfp_add") ++ ++(define_insn_reservation "marvell_f_vfp_mul" 5 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fmuls,fmuld")) ++ "nothing*2,mf_vfp_issue,mf_vfp_mul") ++ ++(define_insn_reservation "marvell_f_vfp_divs" 17 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fdivs")) ++ "nothing*2,mf_vfp_issue,mf_vfp_div*15") ++ ++(define_insn_reservation "marvell_f_vfp_divd" 32 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fdivd")) ++ "nothing*2,mf_vfp_issue,mf_vfp_div*30") ++ ++;; The DFA lookahead is small enough that the "add" reservation here ++;; will always take priority over any addition/subtraction instruction ++;; issued five cycles after the multiply-accumulate instruction, as ++;; required. ++(define_insn_reservation "marvell_f_vfp_mac" 9 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fmacs,fmacd")) ++ "nothing*2,mf_vfp_issue,mf_vfp_mul,nothing*4,mf_vfp_add") ++ ++(define_insn_reservation "marvell_f_vfp_single" 2 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "fcpys,ffariths,ffarithd,fcmps,fcmpd")) ++ "nothing*2,mf_vfp_issue,mf_vfp_single_cycle") ++ ++(define_insn_reservation "marvell_f_vfp_convert" 3 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "f_cvt")) ++ "nothing*2,mf_vfp_issue,mf_vfp_single_cycle") ++ ++(define_insn_reservation "marvell_f_vfp_load" 2 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "f_loads,f_loadd")) ++ "a1_e2+sram,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle") ++ ++(define_insn_reservation "marvell_f_vfp_from_core" 2 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "r_2_f")) ++ "a1_e2,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle") ++ ++;; The interaction between the core and VFP pipelines during VFP ++;; store operations and core <-> VFP moves is not clear, so we guess. ++(define_insn_reservation "marvell_f_vfp_store" 3 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "f_stores,f_stored")) ++ "a1_e2,a1_of,mf_vfp_issue,a1_wr+sram+mf_vfp_single_cycle") ++ ++(define_insn_reservation "marvell_f_vfp_to_core" 4 ++ (and (eq_attr "marvell_f_vfp" "yes") ++ (eq_attr "type" "f_2_r")) ++ "a1_e2,a1_of,a1_wr+mf_vfp_issue,a1_wb+mf_vfp_single_cycle") ++ +--- /dev/null ++++ b/gcc/config/arm/marvell-f.md +@@ -0,0 +1,364 @@ ++;; Marvell 2850 pipeline description ++;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc. ++;; Written by Marvell and CodeSourcery, Inc. ++ ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;; This automaton provides a pipeline description for the Marvell ++;; 2850 core. ++;; ++;; The model given here assumes that the condition for all conditional ++;; instructions is "true", i.e., that all of the instructions are ++;; actually executed. ++ ++(define_automaton "marvell_f") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;; Pipelines ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; This is a dual-issue processor with three pipelines: ++;; ++;; 1. Arithmetic and load/store pipeline A1. ++;; Issue | E1 | E2 | OF | WR | WB for load-store instructions ++;; Issue | E1 | E2 | WB for arithmetic instructions ++;; ++;; 2. Arithmetic pipeline A2. ++;; Issue | E1 | E2 | WB ++;; ++;; 3. Multiply and multiply-accumulate pipeline. ++;; Issue | MAC1 | MAC2 | MAC3 | WB ++;; ++;; There are various bypasses modelled to a greater or lesser extent. ++;; ++;; Latencies in this file correspond to the number of cycles after ++;; the issue stage that it takes for the result of the instruction to ++;; be computed, or for its side-effects to occur. ++ ++(define_cpu_unit "a1_e1,a1_e2,a1_of,a1_wr,a1_wb" "marvell_f") ; ALU 1 ++(define_cpu_unit "a2_e1,a2_e2,a2_wb" "marvell_f") ; ALU 2 ++(define_cpu_unit "m_1,m_2,m_3,m_wb" "marvell_f") ; MAC ++ ++;; We define an SRAM cpu unit to enable us to describe conflicts ++;; between loads at the E2 stage and stores at the WR stage. ++ ++(define_cpu_unit "sram" "marvell_f") ++ ++;; Handling of dual-issue constraints. ++;; ++;; Certain pairs of instructions can be issued in parallel, and certain ++;; pairs cannot. We divide a subset of the instructions into groups as ++;; follows. ++;; ++;; - data processing 1 (mov, mvn); ++;; - data processing 2 (adc, add, and, bic, cmn, cmp, eor, orr, rsb, ++;; rsc, sbc, sub, teq, tst); ++;; - load single (ldr, ldrb, ldrbt, ldrt, ldrh, ldrsb, ldrsh); ++;; - store single (str, strb, strbt, strt, strh); ++;; - swap (swp, swpb); ++;; - pld; ++;; - count leading zeros and DSP add/sub (clz, qadd, qdadd, qsub, qdsub); ++;; - multiply 2 (mul, muls, smull, umull, smulxy, smulls, umulls); ++;; - multiply 3 (mla, mlas, smlal, umlal, smlaxy, smlalxy, smlawx, ++;; smlawy, smlals, umlals); ++;; - branches (b, bl, blx, bx). ++;; ++;; Ignoring conditional execution, it is a good approximation to the core ++;; to model that two instructions may only be issued in parallel if the ++;; following conditions are met. ++;; I. The instructions both fall into one of the above groups and their ++;; corresponding groups have a entry in the matrix below that is not X. ++;; II. The second instruction does not read any register updated by the ++;; first instruction (already enforced by the GCC scheduler). ++;; III. The second instruction does not need the carry flag updated by the ++;; first instruction. Currently we do not model this. ++;; ++;; First Second instruction group ++;; insn ++;; DP1 DP2 L S SWP PLD CLZ M2 M3 B ++;; ++;; DP1 ok ok ok ok ok ok ok ok ok ok ++;; DP2(1) ok ok ok ok ok ok ok ok ok ok ++;; DP2(2) ok (2) ok (4) ok ok ok ok X ok ++;; L } ++;; SWP } ok ok X X X X ok ok ok ok ++;; PLD } ++;; S(3) ok ok X X X X ok ok ok ok ++;; S(4) ok (2) X X X X ok ok X ok ++;; CLZ ok ok ok ok ok ok ok ok ok ok ++;; M2 ok ok ok ok ok ok ok X X ok ++;; M3 ok (2) ok (4) ok ok ok X X ok ++;; B ok ok ok ok ok ok ok ok ok ok ++;; ++;; (1) without register shift ++;; (2) with register shift ++;; (3) with immediate offset ++;; (4) with register offset ++;; ++;; We define a fake cpu unit "reg_shift_lock" to enforce constraints ++;; between instructions in groups DP2(2) and M3. All other ++;; constraints are enforced automatically by virtue of the limited ++;; number of pipelines available for the various operations, with ++;; the exception of constraints involving S(4) that we do not model. ++ ++(define_cpu_unit "reg_shift_lock" "marvell_f") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;; ALU instructions ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; 1. Certain logic operations can be retired after the E1 stage if ++;; the pipeline is not already retiring another instruction. In this ++;; model we assume this behaviour always holds for mov, mvn, and, orr, eor ++;; instructions. If a register shift is involved and the instruction is ++;; not mov or mvn, then a dual-issue constraint must be enforced. ++ ++;; The first two cases are separate so they can be identified for ++;; bypasses below. ++ ++(define_insn_reservation "marvell_f_alu_early_retire" 1 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu") ++ (eq_attr "insn" "mov,mvn,and,orr,eor"))) ++ "(a1_e1,a1_wb)|(a2_e1,a2_wb)") ++ ++(define_insn_reservation "marvell_f_alu_early_retire_shift" 1 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "insn" "mov,mvn,and,orr,eor"))) ++ "(a1_e1,a1_wb)|(a2_e1,a2_wb)") ++ ++(define_insn_reservation "marvell_f_alu_early_retire_reg_shift1" 1 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "insn" "mov,mvn"))) ++ "(a1_e1,a1_wb)|(a2_e1,a2_wb)") ++ ++(define_insn_reservation "marvell_f_alu_early_retire_reg_shift2" 1 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu_shift_reg") ++ (eq_attr "insn" "and,orr,eor"))) ++ "(reg_shift_lock+a1_e1,a1_wb)|(reg_shift_lock+a2_e1,a2_wb)") ++ ++;; 2. ALU operations with no shifted operand. These bypass the E1 stage if ++;; the E2 stage of the corresponding pipeline is clear; here, we always ++;; model this scenario [*]. We give the operation a latency of 1 yet reserve ++;; both E1 and E2 for it (thus preventing the GCC scheduler, in the case ++;; where both E1 and E2 of one pipeline are clear, from issuing one ++;; instruction to each). ++;; ++;; [*] The non-bypass case is a latency of two, reserving E1 on the first ++;; cycle and E2 on the next. Due to the way the scheduler works we ++;; have to choose between taking this as the default and taking the ++;; above case (with latency one) as the default; we choose the latter. ++ ++(define_insn_reservation "marvell_f_alu_op_bypass_e1" 1 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu") ++ (not (eq_attr "insn" "mov,mvn,and,orr,eor")))) ++ "(a1_e1+a1_e2,a1_wb)|(a2_e1+a2_e2,a2_wb)") ++ ++;; 3. ALU operations with a shift-by-constant operand. ++ ++(define_insn_reservation "marvell_f_alu_shift_op" 2 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu_shift") ++ (not (eq_attr "insn" "mov,mvn,and,orr,eor")))) ++ "(a1_e1,a1_e2,a1_wb)|(a2_e1,a2_e2,a2_wb)") ++ ++;; 4. ALU operations with a shift-by-register operand. Since the ++;; instruction is never mov or mvn, a dual-issue constraint must ++;; be enforced. ++ ++(define_insn_reservation "marvell_f_alu_shift_reg_op" 2 ++ (and (eq_attr "tune" "marvell_f") ++ (and (eq_attr "type" "alu_shift_reg") ++ (not (eq_attr "insn" "mov,mvn,and,orr,eor")))) ++ "(reg_shift_lock+a1_e1,a1_e2,a1_wb)|(reg_shift_lock+a2_e1,a2_e2,a2_wb)") ++ ++;; Given an ALU operation with shift (I1) followed by another ALU ++;; operation (I2), with I2 depending on the destination register Rd of I1 ++;; and with I2 not using that value as the amount or the starting value for ++;; a shift, then I1 and I2 may be issued to the same pipeline on ++;; consecutive cycles. In terms of this model that corresponds to I1 ++;; having a latency of one cycle. There are three cases for various ++;; I1 and I2 as follows. ++ ++;; (a) I1 has a constant or register shift and I2 doesn't have a shift at all. ++(define_bypass 1 "marvell_f_alu_shift_op,\ ++ marvell_f_alu_shift_reg_op" ++ "marvell_f_alu_op_bypass_e1,marvell_f_alu_early_retire") ++ ++;; (b) I1 has a constant or register shift and I2 has a constant shift. ++;; Rd must not provide the starting value for the shift. ++(define_bypass 1 "marvell_f_alu_shift_op,\ ++ marvell_f_alu_shift_reg_op" ++ "marvell_f_alu_shift_op,marvell_f_alu_early_retire_shift" ++ "arm_no_early_alu_shift_value_dep") ++ ++;; (c) I1 has a constant or register shift and I2 has a register shift. ++;; Rd must not provide the amount by which to shift. ++(define_bypass 1 "marvell_f_alu_shift_op,\ ++ marvell_f_alu_shift_reg_op" ++ "marvell_f_alu_shift_reg_op,\ ++ marvell_f_alu_early_retire_reg_shift1,\ ++ marvell_f_alu_early_retire_reg_shift2" ++ "arm_no_early_alu_shift_dep") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;; Multiplication instructions ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; Multiplication instructions in group "Multiply 2". ++ ++(define_insn_reservation "marvell_f_multiply_2" 3 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "insn" "mul,muls,smull,umull,smulxy,smulls,umulls")) ++ "m_1,m_2,m_3,m_wb") ++ ++;; Multiplication instructions in group "Multiply 3". There is a ++;; dual-issue constraint with non-multiplication ALU instructions ++;; to be respected here. ++ ++(define_insn_reservation "marvell_f_multiply_3" 3 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "insn" "mla,mlas,smlal,umlal,smlaxy,smlalxy,smlawx,\ ++ smlawy,smlals,umlals")) ++ "reg_shift_lock+m_1,m_2,m_3,m_wb") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;; Branch instructions ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; Conditional backward b instructions can have a zero-cycle penalty, and ++;; other conditional b and bl instructions have a one-cycle penalty if ++;; predicted correctly. Currently we model the zero-cycle case for all ++;; branches. ++ ++(define_insn_reservation "marvell_f_branches" 0 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "branch")) ++ "nothing") ++ ++;; Call latencies are not predictable; a semi-arbitrary very large ++;; number is used as "positive infinity" for such latencies. ++ ++(define_insn_reservation "marvell_f_call" 32 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "call")) ++ "nothing") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;; Load/store instructions ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; The models for load/store instructions do not accurately describe ++;; the difference between operations with a base register writeback. ++;; These models assume that all memory references hit in dcache. ++ ++;; 1. Load/store for single registers. ++ ++;; The worst case for a load is when the load result is needed in E1 ++;; (for example for a register shift), giving a latency of four. Loads ++;; skip E1 and access memory at the E2 stage. ++ ++(define_insn_reservation "marvell_f_load1" 4 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "load1,load_byte")) ++ "a1_e2+sram,a1_of,a1_wr,a1_wb") ++ ++;; The result for a load may be bypassed (to be available at the same ++;; time as the load arrives in the WR stage, so effectively at the OF ++;; stage) to the Rn operand at E2 with a latency of two. The result may ++;; be bypassed to a non-Rn operand at E2 with a latency of three. For ++;; instructions without shifts, detection of an Rn bypass situation is ++;; difficult (because some of the instruction patterns switch their ++;; operands), and so we do not model that here. For instructions with ++;; shifts, the operand used at E2 will always be Rn, and so we can ++;; model the latency-two bypass for these. ++ ++(define_bypass 2 "marvell_f_load1" ++ "marvell_f_alu_shift_op" ++ "arm_no_early_alu_shift_value_dep") ++ ++(define_bypass 2 "marvell_f_load1" ++ "marvell_f_alu_shift_reg_op" ++ "arm_no_early_alu_shift_dep") ++ ++;; Stores write at the WR stage and loads read at the E2 stage, giving ++;; a store latency of three. ++ ++(define_insn_reservation "marvell_f_store1" 3 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "store1")) ++ "a1_e2,a1_of,a1_wr+sram,a1_wb") ++ ++;; 2. Load/store for two consecutive registers. These may be dealt ++;; with in the same number of cycles as single loads and stores. ++ ++(define_insn_reservation "marvell_f_load2" 4 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "load2")) ++ "a1_e2+sram,a1_of,a1_wr,a1_wb") ++ ++(define_insn_reservation "marvell_f_store2" 3 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "store2")) ++ "a1_e2,a1_of,a1_wr+sram,a1_wb") ++ ++;; The first word of a doubleword load is eligible for the latency-two ++;; bypass described above for single loads, but this is not modelled here. ++;; We do however assume that either word may also be bypassed with ++;; latency three for ALU operations with shifts (where the shift value and ++;; amount do not depend on the loaded value) and latency four for ALU ++;; operations without shifts. The latency four case is of course the default. ++ ++(define_bypass 3 "marvell_f_load2" ++ "marvell_f_alu_shift_op" ++ "arm_no_early_alu_shift_value_dep") ++ ++(define_bypass 3 "marvell_f_load2" ++ "marvell_f_alu_shift_reg_op" ++ "arm_no_early_alu_shift_dep") ++ ++;; 3. Load/store for more than two registers. ++ ++;; These instructions stall for an extra cycle in the decode stage; ++;; individual load/store instructions for each register are then issued. ++;; The load/store multiple instruction itself is removed from the decode ++;; stage at the same time as the final load/store instruction is issued. ++;; To complicate matters, pairs of loads/stores referencing two ++;; consecutive registers will be issued together as doubleword operations. ++;; We model a 3-word load as an LDR plus an LDRD, and a 4-word load ++;; as two LDRDs; thus, these are allocated the same latencies (the ++;; latency for two consecutive loads plus one for the setup stall). ++;; The extra stall is modelled by reserving E1. ++ ++(define_insn_reservation "marvell_f_load3_4" 6 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "load3,load4")) ++ "a1_e1,a1_e1+a1_e2+sram,a1_e2+sram+a1_of,a1_of+a1_wr,a1_wr+a1_wb,a1_wb") ++ ++;; Bypasses are possible for ldm as for single loads, but we do not ++;; model them here since the order of the constituent loads is ++;; difficult to predict. ++ ++(define_insn_reservation "marvell_f_store3_4" 5 ++ (and (eq_attr "tune" "marvell_f") ++ (eq_attr "type" "store3,store4")) ++ "a1_e1,a1_e1+a1_e2,a1_e2+a1_of,a1_of+a1_wr+sram,a1_wr+sram+a1_wb,a1_wb") ++ +--- /dev/null ++++ b/gcc/config/arm/montavista-linux.h +@@ -0,0 +1,33 @@ ++/* MontaVista GNU/Linux Configuration. ++ Copyright (C) 2009 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Add -tarmv6 and -tthumb2 options for convenience in generating multilibs. ++*/ ++#undef CC1_SPEC ++#define CC1_SPEC " \ ++ %{tarmv6: -march=armv6 -mfloat-abi=softfp ; \ ++ tthumb2: -mthumb -march=armv7-a -mfloat-abi=softfp ; \ ++ : -march=armv5t}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{tarmv6:/armv6 ; \ ++ tthumb2:/thumb2}" +--- a/gcc/config/arm/neon-gen.ml ++++ b/gcc/config/arm/neon-gen.ml +@@ -402,7 +402,11 @@ let _ = + "extern \"C\" {"; + "#endif"; + ""; ++"#if defined (__vxworks) && defined (_WRS_KERNEL)"; ++"#include "; ++"#else"; + "#include "; ++"#endif"; + ""]; + deftypes (); + arrtypes (); +--- a/gcc/config/arm/neon-testgen.ml ++++ b/gcc/config/arm/neon-testgen.ml +@@ -51,8 +51,8 @@ let emit_prologue chan test_name = + Printf.fprintf chan "/* This file was autogenerated by neon-testgen. */\n\n"; + Printf.fprintf chan "/* { dg-do assemble } */\n"; + Printf.fprintf chan "/* { dg-require-effective-target arm_neon_ok } */\n"; +- Printf.fprintf chan +- "/* { dg-options \"-save-temps -O0 -mfpu=neon -mfloat-abi=softfp\" } */\n"; ++ Printf.fprintf chan "/* { dg-options \"-save-temps -O0\" } */\n"; ++ Printf.fprintf chan "/* { dg-add-options arm_neon } */\n"; + Printf.fprintf chan "\n#include \"arm_neon.h\"\n\n"; + Printf.fprintf chan "void test_%s (void)\n{\n" test_name + +--- a/gcc/config/arm/neon.md ++++ b/gcc/config/arm/neon.md +@@ -159,7 +159,8 @@ + (UNSPEC_VUZP1 201) + (UNSPEC_VUZP2 202) + (UNSPEC_VZIP1 203) +- (UNSPEC_VZIP2 204)]) ++ (UNSPEC_VZIP2 204) ++ (UNSPEC_MISALIGNED_ACCESS 205)]) + + ;; Double-width vector modes. + (define_mode_iterator VD [V8QI V4HI V2SI V2SF]) +@@ -427,76 +428,7 @@ + ;; neon_type attribute definitions. + (define_attr "vqh_mnem" "vadd,vmin,vmax" (const_string "vadd")) + +-;; Classification of NEON instructions for scheduling purposes. +-;; Do not set this attribute and the "type" attribute together in +-;; any one instruction pattern. +-(define_attr "neon_type" +- "neon_int_1,\ +- neon_int_2,\ +- neon_int_3,\ +- neon_int_4,\ +- neon_int_5,\ +- neon_vqneg_vqabs,\ +- neon_vmov,\ +- neon_vaba,\ +- neon_vsma,\ +- neon_vaba_qqq,\ +- neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\ +- neon_mul_qqq_8_16_32_ddd_32,\ +- neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,\ +- neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\ +- neon_mla_qqq_8_16,\ +- neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,\ +- neon_mla_qqq_32_qqd_32_scalar,\ +- neon_mul_ddd_16_scalar_32_16_long_scalar,\ +- neon_mul_qqd_32_scalar,\ +- neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,\ +- neon_shift_1,\ +- neon_shift_2,\ +- neon_shift_3,\ +- neon_vshl_ddd,\ +- neon_vqshl_vrshl_vqrshl_qqq,\ +- neon_vsra_vrsra,\ +- neon_fp_vadd_ddd_vabs_dd,\ +- neon_fp_vadd_qqq_vabs_qq,\ +- neon_fp_vsum,\ +- neon_fp_vmul_ddd,\ +- neon_fp_vmul_qqd,\ +- neon_fp_vmla_ddd,\ +- neon_fp_vmla_qqq,\ +- neon_fp_vmla_ddd_scalar,\ +- neon_fp_vmla_qqq_scalar,\ +- neon_fp_vrecps_vrsqrts_ddd,\ +- neon_fp_vrecps_vrsqrts_qqq,\ +- neon_bp_simple,\ +- neon_bp_2cycle,\ +- neon_bp_3cycle,\ +- neon_ldr,\ +- neon_str,\ +- neon_vld1_1_2_regs,\ +- neon_vld1_3_4_regs,\ +- neon_vld2_2_regs_vld1_vld2_all_lanes,\ +- neon_vld2_4_regs,\ +- neon_vld3_vld4,\ +- neon_vst1_1_2_regs_vst2_2_regs,\ +- neon_vst1_3_4_regs,\ +- neon_vst2_4_regs_vst3_vst4,\ +- neon_vst3_vst4,\ +- neon_vld1_vld2_lane,\ +- neon_vld3_vld4_lane,\ +- neon_vst1_vst2_lane,\ +- neon_vst3_vst4_lane,\ +- neon_vld3_vld4_all_lanes,\ +- neon_mcr,\ +- neon_mcr_2_mcrr,\ +- neon_mrc,\ +- neon_mrrc,\ +- neon_ldm_2,\ +- neon_stm_2,\ +- none" +- (const_string "none")) +- +-;; Predicates used for setting the above attribute. ++;; Predicates used for setting neon_type + + (define_mode_attr Is_float_mode [(V8QI "false") (V16QI "false") + (V4HI "false") (V8HI "false") +@@ -550,7 +482,7 @@ + + /* FIXME: If the memory layout is changed in big-endian mode, output_move_vfp + below must be changed to output_move_neon (which will use the +- element/structure loads/stores), and the constraint changed to 'Un' instead ++ element/structure loads/stores), and the constraint changed to 'Um' instead + of 'Uv'. */ + + switch (which_alternative) +@@ -639,7 +571,8 @@ + default: gcc_unreachable (); + } + } +- [(set_attr "length" ",,")]) ++ [(set_attr "neon_type" "neon_int_1,neon_stm_2,neon_ldm_2") ++ (set_attr "length" ",,")]) + + (define_split + [(set (match_operand:EI 0 "s_register_operand" "") +@@ -726,6 +659,41 @@ + neon_disambiguate_copy (operands, dest, src, 4); + }) + ++(define_expand "movmisalign" ++ [(set (match_operand:VDQX 0 "nonimmediate_operand" "") ++ (unspec:VDQX [(match_operand:VDQX 1 "general_operand" "")] ++ UNSPEC_MISALIGNED_ACCESS))] ++ "TARGET_NEON && !BYTES_BIG_ENDIAN" ++{ ++ if (!s_register_operand (operands[0], mode) ++ && !s_register_operand (operands[1], mode)) ++ FAIL; ++}) ++ ++(define_insn "*movmisalign_neon" ++ [(set (match_operand:VDX 0 "nonimmediate_operand" "=Um,w") ++ (unspec:VDX [(match_operand:VDX 1 "general_operand" " w, Um")] ++ UNSPEC_MISALIGNED_ACCESS))] ++ "TARGET_NEON && !BYTES_BIG_ENDIAN ++ && ( s_register_operand (operands[0], mode) ++ || s_register_operand (operands[1], mode))" ++ "@ ++ vst1.\t{%P1}, %A0 ++ vld1.\t{%P0}, %A1" ++ [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs,neon_vld1_1_2_regs")]) ++ ++(define_insn "*movmisalign_neon" ++ [(set (match_operand:VQX 0 "nonimmediate_operand" "=Um,w") ++ (unspec:VQX [(match_operand:VQX 1 "general_operand" " w, Um")] ++ UNSPEC_MISALIGNED_ACCESS))] ++ "TARGET_NEON && !BYTES_BIG_ENDIAN ++ && ( s_register_operand (operands[0], mode) ++ || s_register_operand (operands[1], mode))" ++ "@ ++ vst1.\t{%q1}, %A0 ++ vld1.\t{%q0}, %A1" ++ [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs,neon_vld1_1_2_regs")]) ++ + (define_insn "vec_set_internal" + [(set (match_operand:VD 0 "s_register_operand" "=w") + (vec_merge:VD +@@ -735,7 +703,10 @@ + (match_operand:SI 2 "immediate_operand" "i")))] + "TARGET_NEON" + { +- operands[2] = GEN_INT (ffs ((int) INTVAL (operands[2]) - 1)); ++ int elt = ffs ((int) INTVAL (operands[2]) - 1); ++ if (BYTES_BIG_ENDIAN) ++ elt = GET_MODE_NUNITS (mode) - 1 - elt; ++ operands[2] = GEN_INT (elt); + + return "vmov%?.\t%P0[%c2], %1"; + } +@@ -757,6 +728,9 @@ + int hi = (elem / half_elts) * 2; + int regno = REGNO (operands[0]); + ++ if (BYTES_BIG_ENDIAN) ++ elt = half_elts - 1 - elt; ++ + operands[0] = gen_rtx_REG (mode, regno + hi); + operands[2] = GEN_INT (elt); + +@@ -804,7 +778,15 @@ + (match_operand:VD 1 "s_register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))] + "TARGET_NEON" +- "vmov%?.\t%0, %P1[%c2]" ++{ ++ if (BYTES_BIG_ENDIAN) ++ { ++ int elt = INTVAL (operands[2]); ++ elt = GET_MODE_NUNITS (mode) - 1 - elt; ++ operands[2] = GEN_INT (elt); ++ } ++ return "vmov%?.\t%0, %P1[%c2]"; ++} + [(set_attr "predicable" "yes") + (set_attr "neon_type" "neon_bp_simple")] + ) +@@ -821,6 +803,9 @@ + int hi = (INTVAL (operands[2]) / half_elts) * 2; + int regno = REGNO (operands[1]); + ++ if (BYTES_BIG_ENDIAN) ++ elt = half_elts - 1 - elt; ++ + operands[1] = gen_rtx_REG (mode, regno + hi); + operands[2] = GEN_INT (elt); + +@@ -913,6 +898,50 @@ + (const_string "neon_mul_qqq_8_16_32_ddd_32")))))] + ) + ++(define_insn "*mul3add_neon" ++ [(set (match_operand:VDQ 0 "s_register_operand" "=w") ++ (plus:VDQ (mult:VDQ (match_operand:VDQ 2 "s_register_operand" "w") ++ (match_operand:VDQ 3 "s_register_operand" "w")) ++ (match_operand:VDQ 1 "s_register_operand" "0")))] ++ "TARGET_NEON" ++ "vmla.\t%0, %2, %3" ++ [(set (attr "neon_type") ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_fp_vmla_ddd") ++ (const_string "neon_fp_vmla_qqq")) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (if_then_else ++ (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long") ++ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_mla_qqq_8_16") ++ (const_string "neon_mla_qqq_32_qqd_32_scalar")))))] ++) ++ ++(define_insn "*mul3negadd_neon" ++ [(set (match_operand:VDQ 0 "s_register_operand" "=w") ++ (minus:VDQ (match_operand:VDQ 1 "s_register_operand" "0") ++ (mult:VDQ (match_operand:VDQ 2 "s_register_operand" "w") ++ (match_operand:VDQ 3 "s_register_operand" "w"))))] ++ "TARGET_NEON" ++ "vmls.\t%0, %2, %3" ++ [(set (attr "neon_type") ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_fp_vmla_ddd") ++ (const_string "neon_fp_vmla_qqq")) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (if_then_else ++ (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long") ++ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")) ++ (if_then_else (ne (symbol_ref "") (const_int 0)) ++ (const_string "neon_mla_qqq_8_16") ++ (const_string "neon_mla_qqq_32_qqd_32_scalar")))))] ++) ++ + (define_insn "ior3" + [(set (match_operand:VDQ 0 "s_register_operand" "=w,w") + (ior:VDQ (match_operand:VDQ 1 "s_register_operand" "w,0") +@@ -2413,7 +2442,15 @@ + (match_operand:VD 1 "s_register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] + "TARGET_NEON" +- "vmov%?.s\t%0, %P1[%c2]" ++{ ++ if (BYTES_BIG_ENDIAN) ++ { ++ int elt = INTVAL (operands[2]); ++ elt = GET_MODE_NUNITS (mode) - 1 - elt; ++ operands[2] = GEN_INT (elt); ++ } ++ return "vmov%?.s\t%0, %P1[%c2]"; ++} + [(set_attr "predicable" "yes") + (set_attr "neon_type" "neon_bp_simple")] + ) +@@ -2425,7 +2462,15 @@ + (match_operand:VD 1 "s_register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] + "TARGET_NEON" +- "vmov%?.u\t%0, %P1[%c2]" ++{ ++ if (BYTES_BIG_ENDIAN) ++ { ++ int elt = INTVAL (operands[2]); ++ elt = GET_MODE_NUNITS (mode) - 1 - elt; ++ operands[2] = GEN_INT (elt); ++ } ++ return "vmov%?.u\t%0, %P1[%c2]"; ++} + [(set_attr "predicable" "yes") + (set_attr "neon_type" "neon_bp_simple")] + ) +@@ -2442,10 +2487,14 @@ + int regno = REGNO (operands[1]); + unsigned int halfelts = GET_MODE_NUNITS (mode) / 2; + unsigned int elt = INTVAL (operands[2]); ++ unsigned int elt_adj = elt % halfelts; ++ ++ if (BYTES_BIG_ENDIAN) ++ elt_adj = halfelts - 1 - elt_adj; + + ops[0] = operands[0]; + ops[1] = gen_rtx_REG (mode, regno + 2 * (elt / halfelts)); +- ops[2] = GEN_INT (elt % halfelts); ++ ops[2] = GEN_INT (elt_adj); + output_asm_insn ("vmov%?.s\t%0, %P1[%c2]", ops); + + return ""; +@@ -2466,10 +2515,14 @@ + int regno = REGNO (operands[1]); + unsigned int halfelts = GET_MODE_NUNITS (mode) / 2; + unsigned int elt = INTVAL (operands[2]); ++ unsigned int elt_adj = elt % halfelts; ++ ++ if (BYTES_BIG_ENDIAN) ++ elt_adj = halfelts - 1 - elt_adj; + + ops[0] = operands[0]; + ops[1] = gen_rtx_REG (mode, regno + 2 * (elt / halfelts)); +- ops[2] = GEN_INT (elt % halfelts); ++ ops[2] = GEN_INT (elt_adj); + output_asm_insn ("vmov%?.u\t%0, %P1[%c2]", ops); + + return ""; +@@ -2490,6 +2543,20 @@ + + neon_lane_bounds (operands[2], 0, GET_MODE_NUNITS (mode)); + ++ if (BYTES_BIG_ENDIAN) ++ { ++ /* The intrinsics are defined in terms of a model where the ++ element ordering in memory is vldm order, whereas the generic ++ RTL is defined in terms of a model where the element ordering ++ in memory is array order. Convert the lane number to conform ++ to this model. */ ++ unsigned int elt = INTVAL (operands[2]); ++ unsigned int reg_nelts ++ = 64 / GET_MODE_BITSIZE (GET_MODE_INNER (mode)); ++ elt ^= reg_nelts - 1; ++ operands[2] = GEN_INT (elt); ++ } ++ + if ((magic & 3) == 3 || GET_MODE_BITSIZE (GET_MODE_INNER (mode)) == 32) + insn = gen_vec_extract (operands[0], operands[1], operands[2]); + else +--- a/gcc/config/arm/netbsd.h ++++ b/gcc/config/arm/netbsd.h +@@ -101,7 +101,7 @@ + /* Although not normally relevant (since by default, all aggregates + are returned in memory) compiling some parts of libc requires + non-APCS style struct returns. */ +-#undef RETURN_IN_MEMORY ++#undef TARGET_RETURN_IN_MEMORY + + /* VERY BIG NOTE : Change of structure alignment for RiscBSD. + There are consequences you should be aware of... +--- /dev/null ++++ b/gcc/config/arm/nocrt0.h +@@ -0,0 +1,24 @@ ++/* Definitions for generic libgloss based cofigs where crt0 is supplied by ++ the linker script. ++ Copyright (C) 2006 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ . */ ++ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC " crti%O%s crtbegin%O%s" ++ ++#define LIB_SPEC "-lc" +--- a/gcc/config/arm/predicates.md ++++ b/gcc/config/arm/predicates.md +@@ -168,6 +168,11 @@ + (and (match_code "plus,minus,ior,xor,and") + (match_test "mode == GET_MODE (op)"))) + ++;; True for plus/minus operators ++(define_special_predicate "plusminus_operator" ++ (and (match_code "plus,minus") ++ (match_test "mode == GET_MODE (op)"))) ++ + ;; True for logical binary operators. + (define_special_predicate "logical_binary_operator" + (and (match_code "ior,xor,and") +@@ -291,6 +296,9 @@ + HOST_WIDE_INT i = 1, base = 0; + rtx elt; + ++ if (low_irq_latency) ++ return false; ++ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return false; +@@ -348,6 +356,9 @@ + HOST_WIDE_INT i = 1, base = 0; + rtx elt; + ++ if (low_irq_latency) ++ return false; ++ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return false; +--- /dev/null ++++ b/gcc/config/arm/sfp-machine.h +@@ -0,0 +1,100 @@ ++#define _FP_W_TYPE_SIZE 32 ++#define _FP_W_TYPE unsigned long ++#define _FP_WS_TYPE signed long ++#define _FP_I_TYPE long ++ ++#define _FP_MUL_MEAT_S(R,X,Y) \ ++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_D(R,X,Y) \ ++ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_Q(R,X,Y) \ ++ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) ++ ++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y) ++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) ++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) ++ ++#define _FP_NANFRAC_H ((_FP_QNANBIT_H << 1) - 1) ++#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) ++#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 ++#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 ++#define _FP_NANSIGN_H 0 ++#define _FP_NANSIGN_S 0 ++#define _FP_NANSIGN_D 0 ++#define _FP_NANSIGN_Q 0 ++ ++#define _FP_KEEPNANFRACP 1 ++ ++/* Someone please check this. */ ++#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ ++ do { \ ++ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \ ++ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \ ++ { \ ++ R##_s = Y##_s; \ ++ _FP_FRAC_COPY_##wc(R,Y); \ ++ } \ ++ else \ ++ { \ ++ R##_s = X##_s; \ ++ _FP_FRAC_COPY_##wc(R,X); \ ++ } \ ++ R##_c = FP_CLS_NAN; \ ++ } while (0) ++ ++#define __LITTLE_ENDIAN 1234 ++#define __BIG_ENDIAN 4321 ++ ++#if defined __ARMEB__ ++# define __BYTE_ORDER __BIG_ENDIAN ++#else ++# define __BYTE_ORDER __LITTLE_ENDIAN ++#endif ++ ++ ++/* Define ALIASNAME as a strong alias for NAME. */ ++# define strong_alias(name, aliasname) _strong_alias(name, aliasname) ++# define _strong_alias(name, aliasname) \ ++ extern __typeof (name) aliasname __attribute__ ((alias (#name))); ++ ++#ifdef __ARM_EABI__ ++/* Rename functions to their EABI names. */ ++/* The comparison functions need wrappers for EABI semantics, so ++ leave them unmolested. */ ++#define __negsf2 __aeabi_fneg ++#define __subsf3 __aeabi_fsub ++#define __addsf3 __aeabi_fadd ++#define __floatunsisf __aeabi_ui2f ++#define __floatsisf __aeabi_i2f ++#define __floatundisf __aeabi_ul2f ++#define __floatdisf __aeabi_l2f ++#define __mulsf3 __aeabi_fmul ++#define __divsf3 __aeabi_fdiv ++#define __unordsf2 __aeabi_fcmpun ++#define __fixsfsi __aeabi_f2iz ++#define __fixunssfsi __aeabi_f2uiz ++#define __fixsfdi __aeabi_f2lz ++#define __fixunssfdi __aeabi_f2ulz ++#define __floatdisf __aeabi_l2f ++ ++#define __negdf2 __aeabi_dneg ++#define __subdf3 __aeabi_dsub ++#define __adddf3 __aeabi_dadd ++#define __floatunsidf __aeabi_ui2d ++#define __floatsidf __aeabi_i2d ++#define __extendsfdf2 __aeabi_f2d ++#define __truncdfsf2 __aeabi_d2f ++#define __floatundidf __aeabi_ul2d ++#define __floatdidf __aeabi_l2d ++#define __muldf3 __aeabi_dmul ++#define __divdf3 __aeabi_ddiv ++#define __unorddf2 __aeabi_dcmpun ++#define __fixdfsi __aeabi_d2iz ++#define __fixunsdfsi __aeabi_d2uiz ++#define __fixdfdi __aeabi_d2lz ++#define __fixunsdfdi __aeabi_d2ulz ++#define __floatdidf __aeabi_l2d ++#define __extendhfsf2 __gnu_h2f_ieee ++#define __truncsfhf2 __gnu_f2h_ieee ++ ++#endif /* __ARM_EABI__ */ +--- a/gcc/config/arm/symbian.h ++++ b/gcc/config/arm/symbian.h +@@ -101,3 +101,5 @@ + + /* SymbianOS cannot merge entities with vague linkage at runtime. */ + #define TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P false ++ ++#define TARGET_DEFAULT_WORD_RELOCATIONS 1 +--- a/gcc/config/arm/t-arm ++++ b/gcc/config/arm/t-arm +@@ -13,7 +13,9 @@ MD_INCLUDES= $(srcdir)/config/arm/arm-t + $(srcdir)/config/arm/iwmmxt.md \ + $(srcdir)/config/arm/vfp.md \ + $(srcdir)/config/arm/neon.md \ +- $(srcdir)/config/arm/thumb2.md ++ $(srcdir)/config/arm/thumb2.md \ ++ $(srcdir)/config/arm/marvell-f.md \ ++ $(srcdir)/config/arm/hwdiv.md + + s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \ + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES) +--- a/gcc/config/arm/t-arm-elf ++++ b/gcc/config/arm/t-arm-elf +@@ -1,25 +1,68 @@ + LIB1ASMSRC = arm/lib1funcs.asm ++# For most CPUs we have an assembly soft-float implementations. ++# However this is not true for ARMv6M. Here we want to use the soft-fp C ++# implementation. The soft-fp code is only build for ARMv6M. This pulls ++# in the asm implementation for other CPUs. + LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func \ + _call_via_rX _interwork_call_via_rX \ + _lshrdi3 _ashrdi3 _ashldi3 \ +- _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \ +- _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \ +- _fixsfsi _fixunssfsi _floatdidf _floatdisf _floatundidf _floatundisf ++ _arm_negdf2 _arm_addsubdf3 _arm_muldivdf3 _arm_cmpdf2 _arm_unorddf2 \ ++ _arm_fixdfsi _arm_fixunsdfsi \ ++ _arm_truncdfsf2 _arm_negsf2 _arm_addsubsf3 _arm_muldivsf3 \ ++ _arm_cmpsf2 _arm_unordsf2 _arm_fixsfsi _arm_fixunssfsi \ ++ _arm_floatdidf _arm_floatdisf _arm_floatundidf _arm_floatundisf ++ ++# We build 4 multilibs: ++# ./ (default) ++# thumb/ -mthumb ++# thumb2/ -mthumb -march=armv7 ++# armv6-m/ -mthumb -march=armv6-m + +-MULTILIB_OPTIONS = marm/mthumb +-MULTILIB_DIRNAMES = arm thumb ++MULTILIB_OPTIONS = mthumb ++MULTILIB_DIRNAMES = thumb + MULTILIB_EXCEPTIONS = + MULTILIB_MATCHES = + +-#MULTILIB_OPTIONS += march=armv7 +-#MULTILIB_DIRNAMES += thumb2 +-#MULTILIB_EXCEPTIONS += march=armv7* marm/*march=armv7* +-#MULTILIB_MATCHES += march?armv7=march?armv7-a +-#MULTILIB_MATCHES += march?armv7=march?armv7-r +-#MULTILIB_MATCHES += march?armv7=march?armv7-m +-#MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8 +-#MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4 +-#MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3 ++MULTILIB_OPTIONS += march=armv7/march=armv6-m ++MULTILIB_DIRNAMES += v7 v6-m ++MULTILIB_EXCEPTIONS += march=armv7* ++MULTILIB_MATCHES += march?armv7=march?armv7-a ++MULTILIB_MATCHES += march?armv7=march?armv7-r ++MULTILIB_MATCHES += march?armv7=march?armv7-m ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a9 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4f ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3 ++ ++MULTILIB_EXCEPTIONS += march=armv6-m ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m1 ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m0 ++ ++# FIXME: We need a sane way of doing this. ++# This isn't really a multilib, it's a hack to add an extra option ++# to the v7-m multilib. ++MULTILIB_OPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_DIRNAMES += broken_ldrd ++ ++MULTILIB_EXCEPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += mthumb/mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *march=armv6-m*mfix-cortex-m3-ldrd ++ ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7 ++ ++# As of at least 4.2, gcc passes the wrong -L options if some multilibs are ++# omitted from MULTILIB_OSDIRNAMES ++MULTILIB_OSDIRNAMES = mthumb=!thumb ++MULTILIB_OSDIRNAMES += mthumb/march.armv7/mfix-cortex-m3-ldrd=!thumb2 ++MULTILIB_OSDIRNAMES += mthumb/march.armv6-m=!armv6-m ++ ++# Not quite true. We can support hard-vfp calling in Thumb2, but how do we ++# express that here? Also, we really need architecture v5e or later ++# (mcrr etc). ++# MULTILIB_OPTIONS += mfloat-abi=hard ++# MULTILIB_DIRNAMES += fpu ++# MULTILIB_EXCEPTIONS += *mthumb/*mfloat-abi=hard* + + # MULTILIB_OPTIONS += mcpu=ep9312 + # MULTILIB_DIRNAMES += ep9312 +--- /dev/null ++++ b/gcc/config/arm/t-arm-softfp +@@ -0,0 +1,11 @@ ++softfp_float_modes := sf df ++softfp_int_modes := si di ++softfp_extensions := sfdf ++softfp_truncations := dfsf ++softfp_machine_header := arm/sfp-machine.h ++softfp_exclude_libgcc2 := y ++softfp_wrap_start := '\#ifdef __ARM_ARCH_6M__' ++softfp_wrap_end := '\#endif' ++ ++# softfp seems to be missing a whole bunch of prototypes. ++TARGET_LIBGCC2_CFLAGS += -Wno-missing-prototypes +--- /dev/null ++++ b/gcc/config/arm/t-asa +@@ -0,0 +1,45 @@ ++# Overrides for ASA ++ ++# Here is the expected output from xgcc -print-multi-lib. ++# ++# .;@fno-omit-frame-pointer@mapcs-frame ++# armv4t;@march=armv4t@fno-omit-frame-pointer@mapcs-frame ++# armv6;@march=armv6@fno-omit-frame-pointer@mapcs-frame ++# armv7a;@march=armv7-a@fno-omit-frame-pointer@mapcs-frame ++# armv6f;@march=armv6@mfloat-abi=softfp@fno-omit-frame-pointer@mapcs-frame ++# armv7af;@march=armv7-a@mfpu=neon@mfloat-abi=softfp@fno-omit-frame-pointer@mapcs-frame ++# thumb2;@mthumb@march=armv7-a@fno-omit-frame-pointer@mapcs-frame ++# thumb2f;@mthumb@march=armv7-a@mfpu=neon@mfloat-abi=softfp@fno-omit-frame-pointer@mapcs-frame ++ ++MULTILIB_OPTIONS = mthumb march=armv4t/march=armv6/march=armv7-a mfpu=neon mfloat-abi=softfp ++MULTILIB_DIRNAMES = thumb v4t v6 v7a neon softfp ++MULTILIB_MATCHES = ++ ++MULTILIB_EXTRA_OPTS = fno-omit-frame-pointer mapcs-frame ++ ++MULTILIB_EXCEPTIONS = mthumb ++MULTILIB_EXCEPTIONS += mfpu=neon* ++MULTILIB_EXCEPTIONS += mfloat-abi=softfp ++MULTILIB_EXCEPTIONS += *march=armv4t*/*mfpu=neon* ++MULTILIB_EXCEPTIONS += *march=armv4t*/*mfloat-abi=softfp* ++MULTILIB_EXCEPTIONS += march=armv6/*mfpu=neon* ++MULTILIB_EXCEPTIONS += mthumb/mfpu=neon ++MULTILIB_EXCEPTIONS += mthumb/mfloat-abi=softfp ++MULTILIB_EXCEPTIONS += mthumb/mfpu=neon* ++MULTILIB_EXCEPTIONS += mthumb/march=armv6/mfpu=neon* ++ ++MULTILIB_OSDIRNAMES = march.armv4t=!armv4t ++MULTILIB_OSDIRNAMES += march.armv6=!armv6 ++MULTILIB_OSDIRNAMES += march.armv6/mfloat-abi.softfp=!armv6f ++MULTILIB_OSDIRNAMES += march.armv7-a=!armv7a ++MULTILIB_OSDIRNAMES += march.armv7-a/mfpu.neon/mfloat-abi.softfp=!armv7af ++MULTILIB_OSDIRNAMES += mthumb/march.armv7-a=!thumb2 ++MULTILIB_OSDIRNAMES += mthumb/march.armv7-a/mfpu.neon/mfloat-abi.softfp=!thumb2f ++ ++MULTILIB_ALIASES = march?armv4t=mthumb/march?armv4t ++MULTILIB_ALIASES += march?armv6=mthumb/march?armv6 ++MULTILIB_ALIASES += march?armv6/mfloat-abi?softfp=mthumb/march?armv6/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv7-a/mfpu?neon/mfloat-abi?softfp=march?armv7-a/mfpu?neon ++MULTILIB_ALIASES += march?armv7-a/mfpu?neon/mfloat-abi?softfp=march?armv7-a/mfloat-abi?softfp ++MULTILIB_ALIASES += mthumb/march?armv7-a/mfpu?neon/mfloat-abi?softfp=mthumb/march?armv7-a/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7-a/mfpu?neon/mfloat-abi?softfp=mthumb/march?armv7-a/mfloat-abi?softfp +--- a/gcc/config/arm/t-bpabi ++++ b/gcc/config/arm/t-bpabi +@@ -1,10 +1,13 @@ + # Add the bpabi.S functions. +-LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod ++LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod \ ++ _aeabi_idiv0 _aeabi_ldiv0 + + # Add the BPABI C functions. + LIB2FUNCS_EXTRA = $(srcdir)/config/arm/bpabi.c \ + $(srcdir)/config/arm/unaligned-funcs.c + ++LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/arm/fp16.c ++ + UNWIND_H = $(srcdir)/config/arm/unwind-arm.h + LIB2ADDEH = $(srcdir)/config/arm/unwind-arm.c \ + $(srcdir)/config/arm/libunwind.S \ +--- /dev/null ++++ b/gcc/config/arm/t-cs-eabi +@@ -0,0 +1,193 @@ ++# Multilibs for SourceryG++ arm-none-eabi ++ ++MULTILIB_OPTIONS = mthumb ++MULTILIB_DIRNAMES = t ++MULTILIB_EXCEPTIONS = ++MULTILIB_MATCHES = ++MULTILIB_ALIASES = ++ ++MULTILIB_OPTIONS += march=armv7/march=armv7-a/march=armv5t/march=armv6-m ++MULTILIB_DIRNAMES += v7 v7a v5t v6m ++MULTILIB_MATCHES += march?armv7-a=march?armv7a ++MULTILIB_MATCHES += march?armv7=march?armv7r ++MULTILIB_MATCHES += march?armv7=march?armv7m ++MULTILIB_MATCHES += march?armv7=march?armv7-r ++MULTILIB_MATCHES += march?armv7=march?armv7-m ++MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a9 ++MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a8 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4f ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3 ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m1 ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m0 ++MULTILIB_MATCHES += march?armv5t=march?armv5te ++MULTILIB_MATCHES += march?armv5t=march?armv6 ++MULTILIB_MATCHES += march?armv5t=march?armv6j ++MULTILIB_MATCHES += march?armv5t=march?armv6k ++MULTILIB_MATCHES += march?armv5t=march?armv6z ++MULTILIB_MATCHES += march?armv5t=march?armv6zk ++MULTILIB_MATCHES += march?armv5t=march?armv6t2 ++MULTILIB_MATCHES += march?armv5t=march?iwmmxt ++MULTILIB_MATCHES += march?armv5t=march?iwmmxt2 ++MULTILIB_MATCHES += march?armv5t=mcpu?arm10tdmi ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1020t ++MULTILIB_MATCHES += march?armv5t=mcpu?arm9e ++MULTILIB_MATCHES += march?armv5t=mcpu?arm946e-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm966e-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm968e-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm10e ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1020e ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1022e ++MULTILIB_MATCHES += march?armv5t=mcpu?xscale ++MULTILIB_MATCHES += march?armv5t=mcpu?iwmmxt ++MULTILIB_MATCHES += march?armv5t=mcpu?iwmmxt2 ++MULTILIB_MATCHES += march?armv5t=mcpu?marvell-f ++MULTILIB_MATCHES += march?armv5t=mcpu?arm926ej-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1026ej-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1136j-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1136jf-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1176jz-s ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1176jzf-s ++MULTILIB_MATCHES += march?armv5t=mcpu?mpcorenovfp ++MULTILIB_MATCHES += march?armv5t=mcpu?mpcore ++MULTILIB_MATCHES += march?armv5t=mcpu?arm1156t2-s ++ ++MULTILIB_OPTIONS += mfloat-abi=softfp/mfloat-abi=hard ++MULTILIB_DIRNAMES += softfp hard ++MULTILIB_MATCHES += mfloat-abi?hard=mhard-float ++ ++MULTILIB_OPTIONS += mfpu=neon ++MULTILIB_DIRNAMES += neon ++MULTILIB_EXCEPTIONS += mfpu=neon ++MULTILIB_MATCHES += mfpu?neon=mfpu?neon-fp16 ++ ++MULTILIB_ALIASES += mthumb=mthumb/mfpu?neon ++MULTILIB_ALIASES += mthumb=mthumb/march?armv5t/mfpu?neon ++MULTILIB_ALIASES += mbig-endian=mthumb/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp=mthumb/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mfloat-abi?softfp=mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mthumb/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7-a/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7-a/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += march?armv7-a/mfloat-abi?softfp/mfpu?neon=mthumb/march?armv7-a/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += march?armv7-a/mfloat-abi?hard/mfpu?neon=mthumb/march?armv7-a/mfloat-abi?hard/mfpu?neon ++ ++MULTILIB_OPTIONS += mbig-endian ++MULTILIB_DIRNAMES += be ++MULTILIB_ALIASES += mbig-endian=mfpu?neon/mbig-endian ++ ++# ARMv6-M does not have ARM mode. ++MULTILIB_EXCEPTIONS += march=armv6-m ++ ++# Some ARMv7 variants have ARM mode. Use the ARM libraries. ++MULTILIB_EXCEPTIONS += march=armv7 march=armv7/* ++MULTILIB_ALIASES += mbig-endian=march?armv7/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp=march?armv7/mfloat-abi?softfp ++MULTILIB_ALIASES += mfloat-abi?softfp=march?armv7/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7/mfloat-abi?softfp/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mbig-endian=march?armv7/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7/mfloat-abi?softfp/mfpu?neon/mbig-endian ++ ++# ARMv7-A is specially useful used with VFPv3 (enabled by NEON). Rest of the cases behaves as ARMv7. ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7-a ++MULTILIB_ALIASES += mbig-endian=march?armv7-a/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7-a/mfloat-abi?softfp/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7-a/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7-a/mfloat-abi?softfp ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7-a/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7-a/mfloat-abi?softfp/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv5t=march?armv7-a ++MULTILIB_ALIASES += march?armv5t=march?armv7-a/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv5t=march?armv7-a/mfpu?neon ++MULTILIB_ALIASES += mbig-endian=march?armv7-a/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7-a/mfloat-abi?softfp/mfpu?neon/mbig-endian ++ ++# ARMv5T thumb uses the ARMv5T ARM libraries (with or without VFP). ++MULTILIB_ALIASES += mthumb=mthumb/march?armv5t ++MULTILIB_ALIASES += march?armv5t/mfloat-abi?softfp=mthumb/march?armv5t/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv5t/mfloat-abi?softfp=march?armv5t/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += march?armv5t/mfloat-abi?softfp=mthumb/march?armv5t/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += march?armv5t=march?armv5t/mfpu?neon ++MULTILIB_ALIASES += mbig-endian=march?armv5t/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mbig-endian=march?armv5t/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mbig-endian=mthumb/march?armv5t/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mbig-endian=mthumb/march?armv5t/mfloat-abi?softfp/mfpu?neon/mbig-endian ++ ++# ARMv6-M and VFP are incompatible. ++# FIXME: The compiler should probably error. ++MULTILIB_EXCEPTIONS += *march=armv6-m/mfloat-abi=softfp ++MULTILIB_ALIASES += mthumb/march?armv6-m=mthumb/march?armv6-m/mfpu?neon ++MULTILIB_EXCEPTIONS += march=armv6-m*mfpu=neon ++MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfloat-abi=softfp/mfpu=neon ++ ++# Thumb-1 VFP isn't really a meaningful combination. Use the ARM VFP. ++MULTILIB_ALIASES += mfloat-abi?softfp=mthumb/mfloat-abi?softfp ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mthumb/mfloat-abi?softfp/mbig-endian ++ ++# We don't have a big-endian ARMv6-M compatible multilibs. ++MULTILIB_EXCEPTIONS += *march=armv6-m*mbig-endian ++ ++# Use the generic libraries for big-endian ARMv5T ++MULTILIB_ALIASES += mbig-endian=march?armv5t/mbig-endian ++MULTILIB_ALIASES += mbig-endian=march?armv5t/mfloat-abi?softfp/mbig-endian ++MULTILIB_ALIASES += mbig-endian=mthumb/march?armv5t/mbig-endian ++MULTILIB_ALIASES += mbig-endian=mthumb/march?armv5t/mfloat-abi?softfp/mbig-endian ++ ++# Use ARM libraries for big-endian Thumb. ++MULTILIB_ALIASES += mbig-endian=mthumb/mbig-endian ++ ++# Don't bother with big-endian Thumb-2 VFP. Use the soft-float libraries ++# for now. ++MULTILIB_ALIASES += mthumb/march?armv7/mbig-endian=mthumb/march?armv7/mfloat-abi?softfp/mbig-endian ++ ++# The only -mfloat-abi=hard libraries provided are for little-endian ++# v7-A NEON. ++MULTILIB_EXCEPTIONS += mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *march=armv5t*mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *march=armv7/*mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *march=armv6-m*mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += mthumb/mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *mfloat-abi=hard*mbig-endian ++MULTILIB_EXCEPTIONS += *mfloat-abi=hard ++ ++# FIXME: We need a sane way of doing this. ++# This isn't really a multilib, it's a hack to add an extra option ++# to the v7-m multilib. ++MULTILIB_OPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_DIRNAMES += broken_ldrd ++ ++MULTILIB_EXCEPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += mthumb/mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *march=armv6-m*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *march=armv7-a*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *mcpu=*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *mbig-endian*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *mfloat-abi=softfp*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += mfloat-abi=softfp*mfpu=neon* ++MULTILIB_EXCEPTIONS += *march=armv5t*mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *mfpu=neon*mfix-cortex-m3-ldrd ++ ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7 ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7-a/mfix-cortex-m3-ldrd ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7/mfpu?neon/mfix-cortex-m3-ldrd ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7-a/mfpu?neon/mfix-cortex-m3-ldrd ++ ++# As of at least 4.2, gcc passes the wrong -L options if some multilibs are ++# omitted from MULTILIB_OSDIRNAMES ++MULTILIB_OSDIRNAMES = mthumb=!thumb ++MULTILIB_OSDIRNAMES += mbig-endian=!be ++MULTILIB_OSDIRNAMES += mfloat-abi.softfp=!vfp ++MULTILIB_OSDIRNAMES += mfloat-abi.softfp/mbig-endian=!vfp-be ++MULTILIB_OSDIRNAMES += march.armv5t=!armv5t ++MULTILIB_OSDIRNAMES += march.armv5t/mfloat-abi.softfp=!armv5t-vfp ++MULTILIB_OSDIRNAMES += mthumb/march.armv7/mfix-cortex-m3-ldrd=!thumb2 ++MULTILIB_OSDIRNAMES += march.armv7-a/mfloat-abi.softfp/mfpu.neon=!armv7-a-neon ++MULTILIB_OSDIRNAMES += march.armv7-a/mfloat-abi.hard/mfpu.neon=!armv7-a-hard ++MULTILIB_OSDIRNAMES += mthumb/march.armv7/mbig-endian=!thumb2-be ++MULTILIB_OSDIRNAMES += mthumb/march.armv6-m=!armv6-m +--- /dev/null ++++ b/gcc/config/arm/t-cs-linux +@@ -0,0 +1,106 @@ ++# Multilibs for SourceryG++ arm-none-linux-gnueabi ++ ++MULTILIB_OPTIONS = mthumb ++MULTILIB_DIRNAMES = t ++MULTILIB_EXCEPTIONS = ++MULTILIB_MATCHES = ++MULTILIB_ALIASES = ++ ++MULTILIB_OPTIONS += march=armv4t/march=armv7-a ++MULTILIB_DIRNAMES += v4t v7a ++ ++MULTILIB_MATCHES += march?armv7-a=march?armv7a ++MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a9 ++MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a8 ++MULTILIB_MATCHES += march?armv4t=march?ep9312 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm7tdmi ++MULTILIB_MATCHES += march?armv4t=mcpu?arm7tdmi-s ++MULTILIB_MATCHES += march?armv4t=mcpu?arm710t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm720t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm740t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm9 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm9tdmi ++MULTILIB_MATCHES += march?armv4t=mcpu?arm920 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm920t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm922t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm940t ++MULTILIB_MATCHES += march?armv4t=mcpu?ep9312 ++ ++MULTILIB_OPTIONS += mfloat-abi=softfp/mfloat-abi=hard ++MULTILIB_DIRNAMES += softfp hard ++MULTILIB_MATCHES += mfloat-abi?hard=mhard-float ++ ++MULTILIB_OPTIONS += mfpu=neon ++MULTILIB_DIRNAMES += neon ++MULTILIB_EXCEPTIONS += mfpu=neon ++MULTILIB_MATCHES += mfpu?neon=mfpu?neon-fp16 ++MULTILIB_ALIASES += mfloat-abi?softfp=mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mfloat-abi?softfp=mthumb/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += march?armv7-a/mfloat-abi?hard/mfpu?neon=mthumb/march?armv7-a/mfloat-abi?hard/mfpu?neon ++ ++MULTILIB_OPTIONS += mbig-endian ++MULTILIB_DIRNAMES += be ++MULTILIB_ALIASES += mbig-endian=mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mbig-endian=mthumb/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mthumb/mfloat-abi?softfp/mfpu?neon/mbig-endian ++ ++# Do not build Thumb libraries. ++MULTILIB_EXCEPTIONS += mthumb ++MULTILIB_EXCEPTIONS += mthumb/mfpu=neon ++ ++# Use ARM libraries for ARMv4t Thumb and VFP. ++MULTILIB_ALIASES += march?armv4t=mthumb/march?armv4t ++MULTILIB_ALIASES += march?armv4t=march?armv4t/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv4t=mthumb/march?armv4t/mfloat-abi?softfp ++MULTILIB_ALIASES += march?armv4t=march?armv4t/mfpu?neon ++MULTILIB_ALIASES += march?armv4t=march?armv4t/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += march?armv4t=mthumb/march?armv4t/mfpu?neon ++MULTILIB_ALIASES += march?armv4t=mthumb/march?armv4t/mfloat-abi?softfp/mfpu?neon ++ ++# We do not support ARMv4t big-endian. ++MULTILIB_EXCEPTIONS += *march=armv4t*mbig-endian ++ ++# Behave ARMv7-A as ARMv7 for some cases. ++MULTILIB_EXCEPTIONS += march=armv7-a ++MULTILIB_EXCEPTIONS += march=armv7-a/mfpu=neon ++MULTILIB_ALIASES += mfloat-abi?softfp=march?armv7-a/mfloat-abi?softfp ++MULTILIB_ALIASES += mbig-endian=march?armv7-a/mbig-endian ++MULTILIB_ALIASES += mbig-endian=march?armv7-a/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7-a/mfloat-abi?softfp/mbig-endian ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=march?armv7-a/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7-a=mthumb/march?armv7-a/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7-a/mbig-endian=mthumb/march?armv7-a/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += mthumb/march?armv7-a/mbig-endian=mthumb/march?armv7-a/mfloat-abi?softfp/mfpu?neon/mbig-endian ++MULTILIB_ALIASES += march?armv7-a/mfloat-abi?softfp/mfpu?neon=mthumb/march?armv7-a/mfloat-abi?softfp/mfpu?neon ++MULTILIB_ALIASES += mthumb/march?armv7-a=mthumb/march?armv7-a/mfloat-abi?softfp ++ ++# Thumb-1 VFP isn't really a meaningful combination. Use the ARM VFP. ++MULTILIB_ALIASES += mfloat-abi?softfp=mthumb/mfloat-abi?softfp ++MULTILIB_ALIASES += mfloat-abi?softfp/mbig-endian=mthumb/mfloat-abi?softfp/mbig-endian ++ ++# Use ARM libraries for big-endian Thumb. ++MULTILIB_ALIASES += mbig-endian=mthumb/mbig-endian ++ ++# Don't bother with big-endian Thumb-2 VFP. Use the soft-float libraries ++# for now. ++MULTILIB_ALIASES += mthumb/march?armv7-a/mbig-endian=mthumb/march?armv7-a/mfloat-abi?softfp/mbig-endian ++ ++# The only -mfloat-abi=hard libraries provided are for little-endian ++# v7-A NEON. ++MULTILIB_EXCEPTIONS += mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *march=armv4t*mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += mthumb/mfloat-abi=hard* ++MULTILIB_EXCEPTIONS += *mfloat-abi=hard*mbig-endian ++MULTILIB_EXCEPTIONS += *mfloat-abi=hard ++ ++# As of at least 4.2, gcc passes the wrong -L options if some multilibs are ++# omitted from MULTILIB_OSDIRNAMES ++MULTILIB_OSDIRNAMES = march.armv4t=!armv4t ++MULTILIB_OSDIRNAMES += mbig-endian=!be ++MULTILIB_OSDIRNAMES += mfloat-abi.softfp=!vfp ++MULTILIB_OSDIRNAMES += mfloat-abi.softfp/mbig-endian=!vfp-be ++MULTILIB_OSDIRNAMES += mthumb/march.armv7-a=!thumb2 ++MULTILIB_OSDIRNAMES += march.armv7-a/mfloat-abi.softfp/mfpu.neon=!armv7-a-neon ++MULTILIB_OSDIRNAMES += march.armv7-a/mfloat-abi.hard/mfpu.neon=!armv7-a-hard ++MULTILIB_OSDIRNAMES += mthumb/march.armv7-a/mbig-endian=!thumb2-be +--- a/gcc/config/arm/t-linux-eabi ++++ b/gcc/config/arm/t-linux-eabi +@@ -1,10 +1,48 @@ + # These functions are included in shared libraries. + TARGET_LIBGCC2_CFLAGS = -fPIC + +-# We do not build a Thumb multilib for Linux because the definition of +-# CLEAR_INSN_CACHE in linux-gas.h does not work in Thumb mode. +-MULTILIB_OPTIONS = +-MULTILIB_DIRNAMES = ++# We build 3 multilibs: ++# ./ (default) ++# armv4t/ -march=armv4t [-mthumb] ++# thumb2/ -mthumb -march=armv7 ++MULTILIB_OPTIONS = mthumb ++MULTILIB_DIRNAMES = thumb ++MULTILIB_OPTIONS += march=armv4t/march=armv7 ++MULTILIB_DIRNAMES += v4t v7 ++MULTILIB_EXCEPTIONS += march=armv7 ++MULTILIB_EXCEPTIONS += mthumb ++ ++MULTILIB_ALIASES = march?armv4t=mthumb/march?armv4t ++ ++# As of at least 4.2, gcc passes the wrong -L options if some multilibs are ++# omitted from MULTILIB_OSDIRNAMES ++MULTILIB_OSDIRNAMES = march.armv4t=!armv4t ++MULTILIB_OSDIRNAMES += mthumb/march.armv7=!thumb2 ++ ++MULTILIB_MATCHES += march?armv7=march?armv7a ++MULTILIB_MATCHES += march?armv7=march?armv7r ++MULTILIB_MATCHES += march?armv7=march?armv7m ++MULTILIB_MATCHES += march?armv7=march?armv7-a ++MULTILIB_MATCHES += march?armv7=march?armv7-r ++MULTILIB_MATCHES += march?armv7=march?armv7-m ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a9 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4f ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3 ++MULTILIB_MATCHES += march?armv4t=march?ep9312 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm7tdmi ++MULTILIB_MATCHES += march?armv4t=mcpu?arm7tdmi-s ++MULTILIB_MATCHES += march?armv4t=mcpu?arm710t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm720t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm740t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm9 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm9tdmi ++MULTILIB_MATCHES += march?armv4t=mcpu?arm920 ++MULTILIB_MATCHES += march?armv4t=mcpu?arm920t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm922t ++MULTILIB_MATCHES += march?armv4t=mcpu?arm940t ++MULTILIB_MATCHES += march?armv4t=mcpu?ep9312 + + # Use a version of div0 which raises SIGFPE. + LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx +@@ -12,3 +50,5 @@ LIB1ASMFUNCS := $(filter-out _dvmd_tls,$ + # Multilib the standard Linux files. Don't include crti.o or crtn.o, + # which are provided by glibc. + EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ ++LIB2FUNCS_STATIC_EXTRA += $(srcdir)/config/arm/linux-atomic.c +--- /dev/null ++++ b/gcc/config/arm/t-montavista-linux +@@ -0,0 +1,33 @@ ++# MontaVista GNU/Linux Configuration. ++# Copyright (C) 2009 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++MULTILIB_OPTIONS = tarmv6/tthumb2 ++MULTILIB_DIRNAMES = armv6 thumb2 ++ ++MULTILIB_EXCEPTIONS = ++ ++MULTILIB_OSDIRNAMES = ++ ++MULTILIB_ALIASES = ++ ++MULTILIB_MATCHES = ++ ++# These files must be built for each multilib. ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o +--- a/gcc/config/arm/t-symbian ++++ b/gcc/config/arm/t-symbian +@@ -17,12 +17,18 @@ UNWIND_H = $(srcdir)/config/arm/unwind-a + LIB2ADDEH = $(srcdir)/unwind-c.c $(srcdir)/config/arm/pr-support.c + LIB2ADDEHDEP = $(UNWIND_H) + ++# Include half-float helpers. ++LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/arm/fp16.c ++ + # Create a multilib for processors with VFP floating-point, and a + # multilib for those without -- using the soft-float ABI in both + # cases. Symbian OS object should be compiled with interworking + # enabled, so there are no separate thumb-mode libraries. + MULTILIB_OPTIONS = mfloat-abi=softfp + MULTILIB_DIRNAMES = softfp ++MULTILIB_EXCEPTIONS = ++MULTILIB_MATCHES = ++MULTILIB_ALIASES = + + # There is no C library to link against on Symbian OS -- at least when + # building GCC. +--- /dev/null ++++ b/gcc/config/arm/t-timesys +@@ -0,0 +1,10 @@ ++# Overrides for timesys ++ ++MULTILIB_OPTIONS = march=armv5t/march=armv6/mcpu=xscale mbig-endian ++MULTILIB_DIRNAMES = armv5t armv6 xscale be ++MULTILIB_MATCHES = mbig-endian=mbe ++MULTILIB_EXCEPTIONS = mbig-endian march=*/mbig-endian mcpu=xscale ++MULTILIB_OSDIRNAMES = march.armv5t=!armv5t ++MULTILIB_OSDIRNAMES += march.armv6=!armv6 ++MULTILIB_OSDIRNAMES += mcpu.xscale/mbig-endian=!xscale/be ++MULTILIB_ALIASES = +--- /dev/null ++++ b/gcc/config/arm/t-uclinux-eabi +@@ -0,0 +1,53 @@ ++# EABI uClinux multilib selection. Other setting are inherited from t-arm-elf ++ ++# We build 3 multilibs: ++# . (default) ++# thumb2/ -mthumb -march=armv7 -mfix-cortex-m3-ldrd ++# armv6-m/ -mthumb -march=armv6-m ++ ++MULTILIB_OPTIONS = mthumb ++MULTILIB_DIRNAMES = thumb ++MULTILIB_EXCEPTIONS = ++MULTILIB_MATCHES = ++ ++MULTILIB_OPTIONS += march=armv7/march=armv6-m ++MULTILIB_DIRNAMES += armv7 armv6-m ++ ++MULTILIB_EXCEPTIONS += mthumb ++ ++MULTILIB_EXCEPTIONS += march=armv7 ++MULTILIB_MATCHES += march?armv7=march?armv7a ++MULTILIB_MATCHES += march?armv7=march?armv7r ++MULTILIB_MATCHES += march?armv7=march?armv7m ++MULTILIB_MATCHES += march?armv7=march?armv7-a ++MULTILIB_MATCHES += march?armv7=march?armv7-r ++MULTILIB_MATCHES += march?armv7=march?armv7-m ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a9 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4 ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4f ++MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3 ++ ++MULTILIB_EXCEPTIONS += march=armv6-m ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m1 ++MULTILIB_MATCHES += march?armv6-m=mcpu?cortex-m0 ++ ++MULTILIB_ALIASES = ++ ++# FIXME: We need a sane way of doing this. ++# This isn't really a multilib, it's a hack to add an extra option ++# to the v7-m multilib. ++MULTILIB_OPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_DIRNAMES += broken_ldrd ++ ++MULTILIB_EXCEPTIONS += mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += mthumb/mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += march=armv7/mfix-cortex-m3-ldrd ++MULTILIB_EXCEPTIONS += *march=armv6-m*mfix-cortex-m3-ldrd ++ ++MULTILIB_ALIASES += mthumb/march?armv7/mfix-cortex-m3-ldrd=mthumb/march?armv7 ++ ++ ++MULTILIB_OSDIRNAMES = mthumb/march.armv7/mfix-cortex-m3-ldrd=!thumb2 ++MULTILIB_OSDIRNAMES += mthumb/march.armv6-m=!armv6-m ++ +--- /dev/null ++++ b/gcc/config/arm/t-wrs-linux +@@ -0,0 +1,43 @@ ++# Wind River GNU/Linux Configuration. ++# Copyright (C) 2006, 2007, 2008 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++MULTILIB_OPTIONS = muclibc ++MULTILIB_OPTIONS += tarm926ej-s/tiwmmxt/txscale/tarm920t/tthumb2/tcortex-a8-be8 ++MULTILIB_OPTIONS += mfloat-abi=softfp ++MULTILIB_DIRNAMES = uclibc ++MULTILIB_DIRNAMES += tarm926ej-s tiwmmxt txscale tarm920t thumb2 cortex-a8-be8 ++MULTILIB_DIRNAMES += softfp ++ ++MULTILIB_EXCEPTIONS = *muclibc*/*tarm920t* ++MULTILIB_EXCEPTIONS += *muclibc*/*cortex-a8-be8* ++ ++MULTILIB_EXCEPTIONS += *tiwmmxt*/*mfloat-abi=softfp* ++MULTILIB_EXCEPTIONS += *txscale*/*mfloat-abi=softfp* ++MULTILIB_EXCEPTIONS += *tarm920t*/*mfloat-abi=softfp* ++MULTILIB_EXCEPTIONS += *thumb2*/*mfloat-abi=softfp* ++ ++MULTILIB_MATCHES = tiwmmxt=tiwmmxt2 ++ ++MULTILIB_ALIASES = tcortex-a8-be8=tcortex-a8-be8/mfloat-abi?softfp ++MULTILIB_OSDIRNAMES = ++ ++# These files must be built for each multilib. ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ +--- a/gcc/config/arm/thumb2.md ++++ b/gcc/config/arm/thumb2.md +@@ -1,5 +1,5 @@ + ;; ARM Thumb-2 Machine Description +-;; Copyright (C) 2007 Free Software Foundation, Inc. ++;; Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc. + ;; Written by CodeSourcery, LLC. + ;; + ;; This file is part of GCC. +@@ -24,6 +24,8 @@ + ;; changes made in armv5t as "thumb2". These are considered part + ;; the 16-bit Thumb-1 instruction set. + ++(include "hwdiv.md") ++ + (define_insn "*thumb2_incscc" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI (match_operator:SI 2 "arm_comparison_operator" +@@ -172,34 +174,6 @@ + (set_attr "length" "8")] + ) + +-(define_insn "*thumb2_abssi2" +- [(set (match_operand:SI 0 "s_register_operand" "=r,&r") +- (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))) +- (clobber (reg:CC CC_REGNUM))] +- "TARGET_THUMB2" +- "@ +- cmp\\t%0, #0\;it\tlt\;rsblt\\t%0, %0, #0 +- eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31" +- [(set_attr "conds" "clob,*") +- (set_attr "shift" "1") +- ;; predicable can't be set based on the variant, so left as no +- (set_attr "length" "10,8")] +-) +- +-(define_insn "*thumb2_neg_abssi2" +- [(set (match_operand:SI 0 "s_register_operand" "=r,&r") +- (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))) +- (clobber (reg:CC CC_REGNUM))] +- "TARGET_THUMB2" +- "@ +- cmp\\t%0, #0\;it\\tgt\;rsbgt\\t%0, %0, #0 +- eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31" +- [(set_attr "conds" "clob,*") +- (set_attr "shift" "1") +- ;; predicable can't be set based on the variant, so left as no +- (set_attr "length" "10,8")] +-) +- + (define_insn "*thumb2_movdi" + [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, r, r, m") + (match_operand:DI 1 "di_operand" "rDa,Db,Dc,mi,r"))] +@@ -223,23 +197,31 @@ + (set_attr "neg_pool_range" "*,*,*,0,*")] + ) + ++;; We have two alternatives here for memory loads (and similarly for stores) ++;; to reflect the fact that the permissible constant pool ranges differ ++;; between ldr instructions taking low regs and ldr instructions taking high ++;; regs. The high register alternatives are not taken into account when ++;; choosing register preferences in order to reflect their expense. + (define_insn "*thumb2_movsi_insn" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r, m") +- (match_operand:SI 1 "general_operand" "rI,K,N,mi,r"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,l,*hk,m,*m") ++ (match_operand:SI 1 "general_operand" "rk ,I,K,j,mi,*mi,l,*hk"))] + "TARGET_THUMB2 && ! TARGET_IWMMXT + && !(TARGET_HARD_FLOAT && TARGET_VFP) + && ( register_operand (operands[0], SImode) + || register_operand (operands[1], SImode))" + "@ + mov%?\\t%0, %1 ++ mov%?\\t%0, %1 + mvn%?\\t%0, #%B1 + movw%?\\t%0, %1 + ldr%?\\t%0, %1 ++ ldr%?\\t%0, %1 ++ str%?\\t%1, %0 + str%?\\t%1, %0" +- [(set_attr "type" "*,*,*,load1,store1") ++ [(set_attr "type" "*,*,*,*,load1,load1,store1,store1") + (set_attr "predicable" "yes") +- (set_attr "pool_range" "*,*,*,4096,*") +- (set_attr "neg_pool_range" "*,*,*,0,*")] ++ (set_attr "pool_range" "*,*,*,*,1020,4096,*,*") ++ (set_attr "neg_pool_range" "*,*,*,*,0,0,*,*")] + ) + + ;; ??? We can probably do better with thumb2 +@@ -754,15 +736,12 @@ + (clobber (reg:CC CC_REGNUM))] + "TARGET_THUMB2" + "* +- if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx) ++ if (GET_CODE (operands[3]) == LT && operands[2] == const0_rtx) + return \"asr\\t%0, %1, #31\"; + + if (GET_CODE (operands[3]) == NE) + return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, #0\"; + +- if (GET_CODE (operands[3]) == GT) +- return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, %0, asr #31\"; +- + output_asm_insn (\"cmp\\t%1, %2\", operands); + output_asm_insn (\"ite\\t%D3\", operands); + output_asm_insn (\"mov%D3\\t%0, #0\", operands); +@@ -951,7 +930,7 @@ + (label_ref (match_operand 2 "" "")))) + (label_ref (match_operand 3 "" "")))) + (clobber (reg:CC CC_REGNUM)) +- (clobber (match_scratch:SI 4 "=r")) ++ (clobber (match_scratch:SI 4 "=&r")) + (use (label_ref (match_dup 2)))])] + "TARGET_THUMB2 && !flag_pic" + "* return thumb2_output_casesi(operands);" +@@ -968,7 +947,7 @@ + (label_ref (match_operand 2 "" "")))) + (label_ref (match_operand 3 "" "")))) + (clobber (reg:CC CC_REGNUM)) +- (clobber (match_scratch:SI 4 "=r")) ++ (clobber (match_scratch:SI 4 "=&r")) + (clobber (match_scratch:SI 5 "=r")) + (use (label_ref (match_dup 2)))])] + "TARGET_THUMB2 && flag_pic" +@@ -1001,7 +980,10 @@ + (match_operator:SI 3 "thumb_16bit_operator" + [(match_operand:SI 1 "low_register_operand" "") + (match_operand:SI 2 "low_register_operand" "")]))] +- "TARGET_THUMB2 && rtx_equal_p(operands[0], operands[1]) ++ "TARGET_THUMB2 ++ && (rtx_equal_p(operands[0], operands[1]) ++ || GET_CODE(operands[3]) == PLUS ++ || GET_CODE(operands[3]) == MINUS) + && peep2_regno_dead_p(0, CC_REGNUM)" + [(parallel + [(set (match_dup 0) +@@ -1018,7 +1000,9 @@ + [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "l")])) + (clobber (reg:CC CC_REGNUM))] +- "TARGET_THUMB2 && reload_completed" ++ "TARGET_THUMB2 && reload_completed ++ && GET_CODE(operands[3]) != PLUS ++ && GET_CODE(operands[3]) != MINUS" + "%I3%!\\t%0, %1, %2" + [(set_attr "predicable" "yes") + (set_attr "length" "2")] +@@ -1104,16 +1088,20 @@ + "" + ) + +-(define_insn "*thumb2_addsi_shortim" ++(define_insn "*thumb2_addsi_short" + [(set (match_operand:SI 0 "low_register_operand" "=l") + (plus:SI (match_operand:SI 1 "low_register_operand" "l") +- (match_operand:SI 2 "const_int_operand" "IL"))) ++ (match_operand:SI 2 "low_reg_or_int_operand" "lIL"))) + (clobber (reg:CC CC_REGNUM))] + "TARGET_THUMB2 && reload_completed" + "* + HOST_WIDE_INT val; + +- val = INTVAL(operands[2]); ++ if (GET_CODE (operands[2]) == CONST_INT) ++ val = INTVAL(operands[2]); ++ else ++ val = 0; ++ + /* We prefer eg. subs rn, rn, #1 over adds rn, rn, #0xffffffff. */ + if (val < 0 && const_ok_for_arm(ARM_SIGN_EXTEND (-val))) + return \"sub%!\\t%0, %1, #%n2\"; +@@ -1124,24 +1112,82 @@ + (set_attr "length" "2")] + ) + +-(define_insn "divsi3" +- [(set (match_operand:SI 0 "s_register_operand" "=r") +- (div:SI (match_operand:SI 1 "s_register_operand" "r") +- (match_operand:SI 2 "s_register_operand" "r")))] +- "TARGET_THUMB2 && arm_arch_hwdiv" +- "sdiv%?\t%0, %1, %2" +- [(set_attr "predicable" "yes")] +-) +- +-(define_insn "udivsi3" +- [(set (match_operand:SI 0 "s_register_operand" "=r") +- (udiv:SI (match_operand:SI 1 "s_register_operand" "r") +- (match_operand:SI 2 "s_register_operand" "r")))] +- "TARGET_THUMB2 && arm_arch_hwdiv" +- "udiv%?\t%0, %1, %2" +- [(set_attr "predicable" "yes")] ++(define_insn "*thumb2_subsi_short" ++ [(set (match_operand:SI 0 "low_register_operand" "=l") ++ (minus:SI (match_operand:SI 1 "low_register_operand" "l") ++ (match_operand:SI 2 "low_register_operand" "l"))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_THUMB2 && reload_completed" ++ "sub%!\\t%0, %1, %2" ++ [(set_attr "predicable" "yes") ++ (set_attr "length" "2")] ++) ++ ++;; 16-bit encodings of "muls" and "mul". We only use these when ++;; optimizing for size since "muls" is slow on all known ++;; implementations and since "mul" will be generated by ++;; "*arm_mulsi3_v6" anyhow. The assembler will use a 16-bit encoding ++;; for "mul" whenever possible anyhow. ++(define_peephole2 ++ [(set (match_operand:SI 0 "low_register_operand" "") ++ (mult:SI (match_operand:SI 1 "low_register_operand" "") ++ (match_dup 0)))] ++ "TARGET_THUMB2 && optimize_size && peep2_regno_dead_p (0, CC_REGNUM)" ++ [(parallel ++ [(set (match_dup 0) ++ (mult:SI (match_dup 0) (match_dup 1))) ++ (clobber (reg:CC CC_REGNUM))])] ++ "" + ) + ++(define_peephole2 ++ [(set (match_operand:SI 0 "low_register_operand" "") ++ (mult:SI (match_dup 0) ++ (match_operand:SI 1 "low_register_operand" "")))] ++ "TARGET_THUMB2 && optimize_size && peep2_regno_dead_p (0, CC_REGNUM)" ++ [(parallel ++ [(set (match_dup 0) ++ (mult:SI (match_dup 0) (match_dup 1))) ++ (clobber (reg:CC CC_REGNUM))])] ++ "" ++) ++ ++(define_insn "*thumb2_mulsi_short" ++ [(set (match_operand:SI 0 "low_register_operand" "=l") ++ (mult:SI (match_operand:SI 1 "low_register_operand" "%0") ++ (match_operand:SI 2 "low_register_operand" "l"))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_THUMB2 && optimize_size && reload_completed" ++ "mul%!\\t%0, %2, %0" ++ [(set_attr "predicable" "yes") ++ (set_attr "length" "2") ++ (set_attr "insn" "muls")]) ++ ++(define_insn "*thumb2_mulsi_short_compare0" ++ [(set (reg:CC_NOOV CC_REGNUM) ++ (compare:CC_NOOV ++ (mult:SI (match_operand:SI 1 "register_operand" "%0") ++ (match_operand:SI 2 "register_operand" "l")) ++ (const_int 0))) ++ (set (match_operand:SI 0 "register_operand" "=l") ++ (mult:SI (match_dup 1) (match_dup 2)))] ++ "TARGET_THUMB2 && optimize_size" ++ "muls\\t%0, %2, %0" ++ [(set_attr "length" "2") ++ (set_attr "insn" "muls")]) ++ ++(define_insn "*thumb2_mulsi_short_compare0_scratch" ++ [(set (reg:CC_NOOV CC_REGNUM) ++ (compare:CC_NOOV ++ (mult:SI (match_operand:SI 1 "register_operand" "%0") ++ (match_operand:SI 2 "register_operand" "l")) ++ (const_int 0))) ++ (clobber (match_scratch:SI 0 "=r"))] ++ "TARGET_THUMB2 && optimize_size" ++ "muls\\t%0, %2, %0" ++ [(set_attr "length" "2") ++ (set_attr "insn" "muls")]) ++ + (define_insn "*thumb2_cbz" + [(set (pc) (if_then_else + (eq (match_operand:SI 0 "s_register_operand" "l,?r") +@@ -1185,3 +1231,50 @@ + (const_int 2) + (const_int 8)))] + ) ++ ++;; 16-bit complement ++(define_peephole2 ++ [(set (match_operand:SI 0 "low_register_operand" "") ++ (not:SI (match_operand:SI 1 "low_register_operand" "")))] ++ "TARGET_THUMB2 ++ && peep2_regno_dead_p(0, CC_REGNUM)" ++ [(parallel ++ [(set (match_dup 0) ++ (not:SI (match_dup 1))) ++ (clobber (reg:CC CC_REGNUM))])] ++ "" ++) ++ ++(define_insn "*thumb2_one_cmplsi2_short" ++ [(set (match_operand:SI 0 "low_register_operand" "=l") ++ (not:SI (match_operand:SI 1 "low_register_operand" "l"))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_THUMB2 && reload_completed" ++ "mvn%!\t%0, %1" ++ [(set_attr "predicable" "yes") ++ (set_attr "length" "2")] ++) ++ ++;; 16-bit negate ++(define_peephole2 ++ [(set (match_operand:SI 0 "low_register_operand" "") ++ (neg:SI (match_operand:SI 1 "low_register_operand" "")))] ++ "TARGET_THUMB2 ++ && peep2_regno_dead_p(0, CC_REGNUM)" ++ [(parallel ++ [(set (match_dup 0) ++ (neg:SI (match_dup 1))) ++ (clobber (reg:CC CC_REGNUM))])] ++ "" ++) ++ ++(define_insn "*thumb2_negsi2_short" ++ [(set (match_operand:SI 0 "low_register_operand" "=l") ++ (neg:SI (match_operand:SI 1 "low_register_operand" "l"))) ++ (clobber (reg:CC CC_REGNUM))] ++ "TARGET_THUMB2 && reload_completed" ++ "neg%!\t%0, %1" ++ [(set_attr "predicable" "yes") ++ (set_attr "length" "2")] ++) ++ +--- a/gcc/config/arm/uclinux-eabi.h ++++ b/gcc/config/arm/uclinux-eabi.h +@@ -42,7 +42,8 @@ + while (false) + + #undef SUBTARGET_EXTRA_LINK_SPEC +-#define SUBTARGET_EXTRA_LINK_SPEC " -m armelf_linux_eabi" ++#define SUBTARGET_EXTRA_LINK_SPEC " -m armelf_linux_eabi -elf2flt" \ ++ " --pic-veneer --target2=abs" + + /* We default to the "aapcs-linux" ABI so that enums are int-sized by + default. */ +@@ -62,4 +63,3 @@ + : "=r" (_beg) \ + : "0" (_beg), "r" (_end), "r" (_flg), "r" (_scno)); \ + } +- +--- a/gcc/config/arm/uclinux-elf.h ++++ b/gcc/config/arm/uclinux-elf.h +@@ -83,3 +83,5 @@ + "%{pthread:-lpthread} \ + %{shared:-lc} \ + %{!shared:%{profile:-lc_p}%{!profile:-lc}}" ++ ++#define TARGET_DEFAULT_WORD_RELOCATIONS 1 +--- a/gcc/config/arm/unwind-arm.c ++++ b/gcc/config/arm/unwind-arm.c +@@ -1201,8 +1201,6 @@ __gnu_unwind_pr_common (_Unwind_State st + ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1]; + + if (data[0] & uint32_highbit) +- phase2_call_unexpected_after_unwind = 1; +- else + { + data += rtti_count + 1; + /* Setup for entry to the handler. */ +@@ -1212,6 +1210,8 @@ __gnu_unwind_pr_common (_Unwind_State st + _Unwind_SetGR (context, 0, (_uw) ucbp); + return _URC_INSTALL_CONTEXT; + } ++ else ++ phase2_call_unexpected_after_unwind = 1; + } + if (data[0] & uint32_highbit) + data++; +--- a/gcc/config/arm/unwind-arm.h ++++ b/gcc/config/arm/unwind-arm.h +@@ -232,11 +232,11 @@ extern "C" { + if (!tmp) + return 0; + +-#if defined(linux) || defined(__NetBSD__) ++#if (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__) + /* Pc-relative indirect. */ + tmp += ptr; + tmp = *(_Unwind_Word *) tmp; +-#elif defined(__symbian__) ++#elif defined(__symbian__) || defined(__uClinux__) + /* Absolute pointer. Nothing more to do. */ + #else + /* Pc-relative pointer. */ +--- a/gcc/config/arm/vfp.md ++++ b/gcc/config/arm/vfp.md +@@ -1,6 +1,6 @@ +-;; ARM VFP coprocessor Machine Description +-;; Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc. +-;; Written by CodeSourcery, LLC. ++;; ARM VFP instruction patterns ++;; Copyright (C) 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. ++;; Written by CodeSourcery. + ;; + ;; This file is part of GCC. + ;; +@@ -23,45 +23,20 @@ + [(VFPCC_REGNUM 127)] + ) + +-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +-;; Pipeline description +-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +- +-(define_automaton "vfp11") +- +-;; There are 3 pipelines in the VFP11 unit. +-;; +-;; - A 8-stage FMAC pipeline (7 execute + writeback) with forward from +-;; fourth stage for simple operations. +-;; +-;; - A 5-stage DS pipeline (4 execute + writeback) for divide/sqrt insns. +-;; These insns also uses first execute stage of FMAC pipeline. +-;; +-;; - A 4-stage LS pipeline (execute + 2 memory + writeback) with forward from +-;; second memory stage for loads. +- +-;; We do not model Write-After-Read hazards. +-;; We do not do write scheduling with the arm core, so it is only necessary +-;; to model the first stage of each pipeline +-;; ??? Need to model LS pipeline properly for load/store multiple? +-;; We do not model fmstat properly. This could be done by modeling pipelines +-;; properly and defining an absence set between a dummy fmstat unit and all +-;; other vfp units. +- +-(define_cpu_unit "fmac" "vfp11") +- +-(define_cpu_unit "ds" "vfp11") +- +-(define_cpu_unit "vfp_ls" "vfp11") +- +-(define_cpu_unit "fmstat" "vfp11") +- +-(exclusion_set "fmac,ds" "fmstat") +- + ;; The VFP "type" attributes differ from those used in the FPA model. +-;; ffarith Fast floating point insns, e.g. abs, neg, cpy, cmp. +-;; farith Most arithmetic insns. +-;; fmul Double precision multiply. ++;; fcpys Single precision cpy. ++;; ffariths Single precision abs, neg. ++;; ffarithd Double precision abs, neg, cpy. ++;; fadds Single precision add/sub. ++;; faddd Double precision add/sub. ++;; fconsts Single precision load immediate. ++;; fconstd Double precision load immediate. ++;; fcmps Single precision comparison. ++;; fcmpd Double precision comparison. ++;; fmuls Single precision multiply. ++;; fmuld Double precision multiply. ++;; fmacs Single precision multiply-accumulate. ++;; fmacd Double precision multiply-accumulate. + ;; fdivs Single precision sqrt or division. + ;; fdivd Double precision sqrt or division. + ;; f_flag fmstat operation +@@ -71,126 +46,89 @@ + ;; r_2_f Transfer arm to vfp reg. + ;; f_cvt Convert floating<->integral + +-(define_insn_reservation "vfp_ffarith" 4 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "ffarith")) +- "fmac") +- +-(define_insn_reservation "vfp_farith" 8 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "farith,f_cvt")) +- "fmac") +- +-(define_insn_reservation "vfp_fmul" 9 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "fmul")) +- "fmac*2") +- +-(define_insn_reservation "vfp_fdivs" 19 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "fdivs")) +- "ds*15") +- +-(define_insn_reservation "vfp_fdivd" 33 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "fdivd")) +- "fmac+ds*29") +- +-;; Moves to/from arm regs also use the load/store pipeline. +-(define_insn_reservation "vfp_fload" 4 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "f_loads,f_loadd,r_2_f")) +- "vfp_ls") +- +-(define_insn_reservation "vfp_fstore" 4 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "f_stores,f_stored,f_2_r")) +- "vfp_ls") +- +-(define_insn_reservation "vfp_to_cpsr" 4 +- (and (eq_attr "generic_vfp" "yes") +- (eq_attr "type" "f_flag")) +- "fmstat,vfp_ls*3") +- +-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +-;; Insn pattern +-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +- + ;; SImode moves + ;; ??? For now do not allow loading constants into vfp regs. This causes + ;; problems because small constants get converted into adds. + (define_insn "*arm_movsi_vfp" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r ,m,*t,r,*t,*t, *Uv") +- (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m ,*t,r,*t,*t, *Uv") ++ (match_operand:SI 1 "general_operand" "rk, I,K,j,mi,rk,r,*t,*t,*Uvi,*t"))] + "TARGET_ARM && TARGET_VFP && TARGET_HARD_FLOAT + && ( s_register_operand (operands[0], SImode) + || s_register_operand (operands[1], SImode))" + "* + switch (which_alternative) + { +- case 0: ++ case 0: case 1: + return \"mov%?\\t%0, %1\"; +- case 1: +- return \"mvn%?\\t%0, #%B1\"; + case 2: +- return \"movw%?\\t%0, %1\"; ++ return \"mvn%?\\t%0, #%B1\"; + case 3: +- return \"ldr%?\\t%0, %1\"; ++ return \"movw%?\\t%0, %1\"; + case 4: +- return \"str%?\\t%1, %0\"; ++ return \"ldr%?\\t%0, %1\"; + case 5: +- return \"fmsr%?\\t%0, %1\\t%@ int\"; ++ return \"str%?\\t%1, %0\"; + case 6: +- return \"fmrs%?\\t%0, %1\\t%@ int\"; ++ return \"fmsr%?\\t%0, %1\\t%@ int\"; + case 7: ++ return \"fmrs%?\\t%0, %1\\t%@ int\"; ++ case 8: + return \"fcpys%?\\t%0, %1\\t%@ int\"; +- case 8: case 9: ++ case 9: case 10: + return output_move_vfp (operands); + default: + gcc_unreachable (); + } + " + [(set_attr "predicable" "yes") +- (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_loads,f_stores") +- (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*") +- (set_attr "neg_pool_range" "*,*,*,4084,*,*,*,*,1008,*")] ++ (set_attr "type" "*,*,*,*,load1,store1,r_2_f,f_2_r,fcpys,f_loads,f_stores") ++ (set_attr "neon_type" "*,*,*,*,*,*,neon_mcr,neon_mrc,neon_vmov,*,*") ++ (set_attr "insn" "mov,mov,mvn,mov,*,*,*,*,*,*,*") ++ (set_attr "pool_range" "*,*,*,*,4096,*,*,*,*,1020,*") ++ (set_attr "neg_pool_range" "*,*,*,*,4084,*,*,*,*,1008,*")] + ) + ++;; See thumb2.md:thumb2_movsi_insn for an explanation of the split ++;; high/low register alternatives for loads and stores here. + (define_insn "*thumb2_movsi_vfp" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,m,*t,r,*t,*t, *Uv") +- (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,l,*hk,m,*m,*t,r, *t,*t, *Uv") ++ (match_operand:SI 1 "general_operand" "rk, I,K,j,mi,*mi,l,*hk,r,*t,*t,*Uvi,*t"))] + "TARGET_THUMB2 && TARGET_VFP && TARGET_HARD_FLOAT + && ( s_register_operand (operands[0], SImode) + || s_register_operand (operands[1], SImode))" + "* + switch (which_alternative) + { +- case 0: ++ case 0: case 1: + return \"mov%?\\t%0, %1\"; +- case 1: +- return \"mvn%?\\t%0, #%B1\"; + case 2: +- return \"movw%?\\t%0, %1\"; ++ return \"mvn%?\\t%0, #%B1\"; + case 3: +- return \"ldr%?\\t%0, %1\"; ++ return \"movw%?\\t%0, %1\"; + case 4: +- return \"str%?\\t%1, %0\"; + case 5: +- return \"fmsr%?\\t%0, %1\\t%@ int\"; ++ return \"ldr%?\\t%0, %1\"; + case 6: +- return \"fmrs%?\\t%0, %1\\t%@ int\"; + case 7: ++ return \"str%?\\t%1, %0\"; ++ case 8: ++ return \"fmsr%?\\t%0, %1\\t%@ int\"; ++ case 9: ++ return \"fmrs%?\\t%0, %1\\t%@ int\"; ++ case 10: + return \"fcpys%?\\t%0, %1\\t%@ int\"; +- case 8: case 9: ++ case 11: case 12: + return output_move_vfp (operands); + default: + gcc_unreachable (); + } + " + [(set_attr "predicable" "yes") +- (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_load,f_store") +- (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*") +- (set_attr "neg_pool_range" "*,*,*, 0,*,*,*,*,1008,*")] ++ (set_attr "type" "*,*,*,*,load1,load1,store1,store1,r_2_f,f_2_r,fcpys,f_load,f_store") ++ (set_attr "neon_type" "*,*,*,*,*,*,*,*,neon_mcr,neon_mrc,neon_vmov,*,*") ++ (set_attr "insn" "mov,mov,mvn,mov,*,*,*,*,*,*,*,*,*") ++ (set_attr "pool_range" "*,*,*,*,1020,4096,*,*,*,*,*,1020,*") ++ (set_attr "neg_pool_range" "*,*,*,*, 0, 0,*,*,*,*,*,1008,*")] + ) + + +@@ -222,7 +160,8 @@ + gcc_unreachable (); + } + " +- [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarith,f_loadd,f_stored") ++ [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarithd,f_loadd,f_stored") ++ (set_attr "neon_type" "*,*,*,neon_mcr_2_mcrr,neon_mrrc,neon_vmov,*,*") + (set_attr "length" "8,8,8,4,4,4,4,4") + (set_attr "pool_range" "*,1020,*,*,*,*,1020,*") + (set_attr "neg_pool_range" "*,1008,*,*,*,*,1008,*")] +@@ -249,12 +188,68 @@ + abort (); + } + " +- [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarith,f_load,f_store") ++ [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarithd,f_load,f_store") ++ (set_attr "neon_type" "*,*,*,neon_mcr_2_mcrr,neon_mrrc,neon_vmov,*,*") + (set_attr "length" "8,8,8,4,4,4,4,4") + (set_attr "pool_range" "*,4096,*,*,*,*,1020,*") + (set_attr "neg_pool_range" "*, 0,*,*,*,*,1008,*")] + ) + ++;; HFmode moves ++(define_insn "*movhf_vfp" ++ [(set (match_operand:HF 0 "nonimmediate_operand" "= t,Um,r,m,t,r,t,r,r") ++ (match_operand:HF 1 "general_operand" " Um, t,m,r,t,r,r,t,F"))] ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON_FP16 ++ && ( s_register_operand (operands[0], HFmode) ++ || s_register_operand (operands[1], HFmode))" ++ "* ++ switch (which_alternative) ++ { ++ case 0: /* S register from memory */ ++ return \"vld1.16\\t{%z0}, %A1\"; ++ case 1: /* memory from S register */ ++ return \"vst1.16\\t{%z1}, %A0\"; ++ case 2: /* ARM register from memory */ ++ return \"ldrh\\t%0, %1\\t%@ __fp16\"; ++ case 3: /* memory from ARM register */ ++ return \"strh\\t%1, %0\\t%@ __fp16\"; ++ case 4: /* S register from S register */ ++ return \"fcpys\\t%0, %1\"; ++ case 5: /* ARM register from ARM register */ ++ return \"mov\\t%0, %1\\t%@ __fp16\"; ++ case 6: /* S register from ARM register */ ++ return \"fmsr\\t%0, %1\"; ++ case 7: /* ARM register from S register */ ++ return \"fmrs\\t%0, %1\"; ++ case 8: /* ARM register from constant */ ++ { ++ REAL_VALUE_TYPE r; ++ long bits; ++ rtx ops[4]; ++ ++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); ++ bits = real_to_target (NULL, &r, HFmode); ++ ops[0] = operands[0]; ++ ops[1] = GEN_INT (bits); ++ ops[2] = GEN_INT (bits & 0xff00); ++ ops[3] = GEN_INT (bits & 0x00ff); ++ ++ if (arm_arch_thumb2) ++ output_asm_insn (\"movw\\t%0, %1\", ops); ++ else ++ output_asm_insn (\"mov\\t%0, %2\;orr\\t%0, %0, %3\", ops); ++ return \"\"; ++ } ++ default: ++ gcc_unreachable (); ++ } ++ " ++ [(set_attr "conds" "unconditional") ++ (set_attr "type" "*,*,load1,store1,fcpys,*,r_2_f,f_2_r,*") ++ (set_attr "neon_type" "neon_vld1_1_2_regs,neon_vst1_1_2_regs_vst2_2_regs,*,*,*,*,*,*,*") ++ (set_attr "length" "4,4,4,4,4,4,4,4,8")] ++) ++ + + ;; SFmode moves + ;; Disparage the w<->r cases because reloading an invalid address is +@@ -291,7 +286,8 @@ + " + [(set_attr "predicable" "yes") + (set_attr "type" +- "r_2_f,f_2_r,farith,f_loads,f_stores,load1,store1,ffarith,*") ++ "r_2_f,f_2_r,fconsts,f_loads,f_stores,load1,store1,fcpys,*") ++ (set_attr "neon_type" "neon_mcr,neon_mrc,*,*,*,*,*,neon_vmov,*") + (set_attr "pool_range" "*,*,*,1020,*,4096,*,*,*") + (set_attr "neg_pool_range" "*,*,*,1008,*,4080,*,*,*")] + ) +@@ -327,7 +323,8 @@ + " + [(set_attr "predicable" "yes") + (set_attr "type" +- "r_2_f,f_2_r,farith,f_load,f_store,load1,store1,ffarith,*") ++ "r_2_f,f_2_r,fconsts,f_load,f_store,load1,store1,fcpys,*") ++ (set_attr "neon_type" "neon_mcr,neon_mrc,*,*,*,*,*,neon_vmov,*") + (set_attr "pool_range" "*,*,*,1020,*,4092,*,*,*") + (set_attr "neg_pool_range" "*,*,*,1008,*,0,*,*,*")] + ) +@@ -365,7 +362,8 @@ + } + " + [(set_attr "type" +- "r_2_f,f_2_r,farith,f_loadd,f_stored,load2,store2,ffarith,*") ++ "r_2_f,f_2_r,fconstd,f_loadd,f_stored,load2,store2,ffarithd,*") ++ (set_attr "neon_type" "neon_mcr_2_mcrr,neon_mrrc,*,*,*,*,*,neon_vmov,*") + (set_attr "length" "4,4,4,8,8,4,4,4,8") + (set_attr "pool_range" "*,*,*,1020,*,1020,*,*,*") + (set_attr "neg_pool_range" "*,*,*,1008,*,1008,*,*,*")] +@@ -397,7 +395,8 @@ + } + " + [(set_attr "type" +- "r_2_f,f_2_r,farith,load2,store2,f_load,f_store,ffarith,*") ++ "r_2_f,f_2_r,fconstd,load2,store2,f_load,f_store,ffarithd,*") ++ (set_attr "neon_type" "neon_mcr_2_mcrr,neon_mrrc,*,*,*,*,*,neon_vmov,*") + (set_attr "length" "4,4,4,8,8,4,4,4,8") + (set_attr "pool_range" "*,*,*,4096,*,1020,*,*,*") + (set_attr "neg_pool_range" "*,*,*,0,*,1008,*,*,*")] +@@ -426,7 +425,8 @@ + fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1" + [(set_attr "conds" "use") + (set_attr "length" "4,4,8,4,4,8,4,4,8") +- (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")] ++ (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r") ++ (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr,neon_mcr,neon_mcr,neon_mrc,neon_mrc,neon_mrc")] + ) + + (define_insn "*thumb2_movsfcc_vfp" +@@ -449,7 +449,8 @@ + ite\\t%D3\;fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1" + [(set_attr "conds" "use") + (set_attr "length" "6,6,10,6,6,10,6,6,10") +- (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")] ++ (set_attr "type" "fcpys,fcpys,fcpys,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r") ++ (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr,neon_mcr,neon_mcr,neon_mrc,neon_mrc,neon_mrc")] + ) + + (define_insn "*movdfcc_vfp" +@@ -472,7 +473,8 @@ + fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1" + [(set_attr "conds" "use") + (set_attr "length" "4,4,8,4,4,8,4,4,8") +- (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")] ++ (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r") ++ (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mrrc,neon_mrrc,neon_mrrc")] + ) + + (define_insn "*thumb2_movdfcc_vfp" +@@ -495,7 +497,8 @@ + ite\\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1" + [(set_attr "conds" "use") + (set_attr "length" "6,6,10,6,6,10,6,6,10") +- (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")] ++ (set_attr "type" "ffarithd,ffarithd,ffarithd,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r") ++ (set_attr "neon_type" "neon_vmov,neon_vmov,neon_vmov,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mcr_2_mcrr,neon_mrrc,neon_mrrc,neon_mrrc")] + ) + + +@@ -507,7 +510,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fabss%?\\t%0, %1" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "ffariths")] + ) + + (define_insn "*absdf2_vfp" +@@ -516,7 +519,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fabsd%?\\t%P0, %P1" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "ffarithd")] + ) + + (define_insn "*negsf2_vfp" +@@ -527,7 +530,7 @@ + fnegs%?\\t%0, %1 + eor%?\\t%0, %1, #-2147483648" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "ffariths")] + ) + + (define_insn_and_split "*negdf2_vfp" +@@ -573,7 +576,7 @@ + " + [(set_attr "predicable" "yes") + (set_attr "length" "4,4,8") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "ffarithd")] + ) + + +@@ -586,7 +589,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fadds%?\\t%0, %1, %2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fadds")] + ) + + (define_insn "*adddf3_vfp" +@@ -596,7 +599,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "faddd%?\\t%P0, %P1, %P2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "faddd")] + ) + + +@@ -607,7 +610,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fsubs%?\\t%0, %1, %2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fadds")] + ) + + (define_insn "*subdf3_vfp" +@@ -617,7 +620,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fsubd%?\\t%P0, %P1, %P2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "faddd")] + ) + + +@@ -653,7 +656,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fmuls%?\\t%0, %1, %2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmuls")] + ) + + (define_insn "*muldf3_vfp" +@@ -663,7 +666,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fmuld%?\\t%P0, %P1, %P2" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmuld")] + ) + + +@@ -674,7 +677,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fnmuls%?\\t%0, %1, %2" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmuls")] + ) + + (define_insn "*muldf3negdf_vfp" +@@ -684,7 +687,7 @@ + "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" + "fnmuld%?\\t%P0, %P1, %P2" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmuld")] + ) + + +@@ -696,10 +699,11 @@ + (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t") + (match_operand:SF 3 "s_register_operand" "t")) + (match_operand:SF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fmacs%?\\t%0, %2, %3" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmacs")] + ) + + (define_insn "*muldf3adddf_vfp" +@@ -707,10 +711,11 @@ + (plus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w") + (match_operand:DF 3 "s_register_operand" "w")) + (match_operand:DF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fmacd%?\\t%P0, %P2, %P3" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmacd")] + ) + + ;; 0 = 1 * 2 - 0 +@@ -719,10 +724,11 @@ + (minus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t") + (match_operand:SF 3 "s_register_operand" "t")) + (match_operand:SF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fmscs%?\\t%0, %2, %3" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmacs")] + ) + + (define_insn "*muldf3subdf_vfp" +@@ -730,10 +736,11 @@ + (minus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w") + (match_operand:DF 3 "s_register_operand" "w")) + (match_operand:DF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fmscd%?\\t%P0, %P2, %P3" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmacd")] + ) + + ;; 0 = -(1 * 2) + 0 +@@ -742,10 +749,11 @@ + (minus:SF (match_operand:SF 1 "s_register_operand" "0") + (mult:SF (match_operand:SF 2 "s_register_operand" "t") + (match_operand:SF 3 "s_register_operand" "t"))))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fnmacs%?\\t%0, %2, %3" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmacs")] + ) + + (define_insn "*fmuldf3negdfadddf_vfp" +@@ -753,10 +761,11 @@ + (minus:DF (match_operand:DF 1 "s_register_operand" "0") + (mult:DF (match_operand:DF 2 "s_register_operand" "w") + (match_operand:DF 3 "s_register_operand" "w"))))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fnmacd%?\\t%P0, %P2, %P3" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmacd")] + ) + + +@@ -767,10 +776,11 @@ + (neg:SF (match_operand:SF 2 "s_register_operand" "t")) + (match_operand:SF 3 "s_register_operand" "t")) + (match_operand:SF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fnmscs%?\\t%0, %2, %3" + [(set_attr "predicable" "yes") +- (set_attr "type" "farith")] ++ (set_attr "type" "fmacs")] + ) + + (define_insn "*muldf3negdfsubdf_vfp" +@@ -779,10 +789,11 @@ + (neg:DF (match_operand:DF 2 "s_register_operand" "w")) + (match_operand:DF 3 "s_register_operand" "w")) + (match_operand:DF 1 "s_register_operand" "0")))] +- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP ++ && (!arm_tune_marvell_f || optimize_size)" + "fnmscd%?\\t%P0, %P2, %P3" + [(set_attr "predicable" "yes") +- (set_attr "type" "fmul")] ++ (set_attr "type" "fmacd")] + ) + + +@@ -806,6 +817,24 @@ + (set_attr "type" "f_cvt")] + ) + ++(define_insn "extendhfsf2" ++ [(set (match_operand:SF 0 "s_register_operand" "=t") ++ (float_extend:SF (match_operand:HF 1 "s_register_operand" "t")))] ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON_FP16" ++ "vcvtb%?.f32.f16\\t%0, %1" ++ [(set_attr "predicable" "yes") ++ (set_attr "type" "f_cvt")] ++) ++ ++(define_insn "truncsfhf2" ++ [(set (match_operand:HF 0 "s_register_operand" "=t") ++ (float_truncate:HF (match_operand:SF 1 "s_register_operand" "t")))] ++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON_FP16" ++ "vcvtb%?.f16.f32\\t%0, %1" ++ [(set_attr "predicable" "yes") ++ (set_attr "type" "f_cvt")] ++) ++ + (define_insn "*truncsisf2_vfp" + [(set (match_operand:SI 0 "s_register_operand" "=t") + (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))] +@@ -986,7 +1015,7 @@ + fcmps%?\\t%0, %1 + fcmpzs%?\\t%0" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "fcmps")] + ) + + (define_insn "*cmpsf_trap_vfp" +@@ -998,7 +1027,7 @@ + fcmpes%?\\t%0, %1 + fcmpezs%?\\t%0" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "fcmpd")] + ) + + (define_insn "*cmpdf_vfp" +@@ -1010,7 +1039,7 @@ + fcmpd%?\\t%P0, %P1 + fcmpzd%?\\t%P0" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "fcmpd")] + ) + + (define_insn "*cmpdf_trap_vfp" +@@ -1022,7 +1051,7 @@ + fcmped%?\\t%P0, %P1 + fcmpezd%?\\t%P0" + [(set_attr "predicable" "yes") +- (set_attr "type" "ffarith")] ++ (set_attr "type" "fcmpd")] + ) + + +--- /dev/null ++++ b/gcc/config/arm/vfp11.md +@@ -0,0 +1,92 @@ ++;; ARM VFP11 pipeline description ++;; Copyright (C) 2003, 2005, 2007, 2008 Free Software Foundation, Inc. ++;; Written by CodeSourcery. ++;; ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_automaton "vfp11") ++ ++;; There are 3 pipelines in the VFP11 unit. ++;; ++;; - A 8-stage FMAC pipeline (7 execute + writeback) with forward from ++;; fourth stage for simple operations. ++;; ++;; - A 5-stage DS pipeline (4 execute + writeback) for divide/sqrt insns. ++;; These insns also uses first execute stage of FMAC pipeline. ++;; ++;; - A 4-stage LS pipeline (execute + 2 memory + writeback) with forward from ++;; second memory stage for loads. ++ ++;; We do not model Write-After-Read hazards. ++;; We do not do write scheduling with the arm core, so it is only necessary ++;; to model the first stage of each pipeline ++;; ??? Need to model LS pipeline properly for load/store multiple? ++;; We do not model fmstat properly. This could be done by modeling pipelines ++;; properly and defining an absence set between a dummy fmstat unit and all ++;; other vfp units. ++ ++(define_cpu_unit "fmac" "vfp11") ++ ++(define_cpu_unit "ds" "vfp11") ++ ++(define_cpu_unit "vfp_ls" "vfp11") ++ ++(define_cpu_unit "fmstat" "vfp11") ++ ++(exclusion_set "fmac,ds" "fmstat") ++ ++(define_insn_reservation "vfp_ffarith" 4 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "fcpys,ffariths,ffarithd,fcmps,fcmpd")) ++ "fmac") ++ ++(define_insn_reservation "vfp_farith" 8 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "fadds,faddd,fconsts,fconstd,f_cvt,fmuls,fmacs")) ++ "fmac") ++ ++(define_insn_reservation "vfp_fmul" 9 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "fmuld,fmacd")) ++ "fmac*2") ++ ++(define_insn_reservation "vfp_fdivs" 19 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "fdivs")) ++ "ds*15") ++ ++(define_insn_reservation "vfp_fdivd" 33 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "fdivd")) ++ "fmac+ds*29") ++ ++;; Moves to/from arm regs also use the load/store pipeline. ++(define_insn_reservation "vfp_fload" 4 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "f_loads,f_loadd,r_2_f")) ++ "vfp_ls") ++ ++(define_insn_reservation "vfp_fstore" 4 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "f_stores,f_stored,f_2_r")) ++ "vfp_ls") ++ ++(define_insn_reservation "vfp_to_cpsr" 4 ++ (and (eq_attr "generic_vfp" "yes") ++ (eq_attr "type" "f_flag")) ++ "fmstat,vfp_ls*3") ++ +--- a/gcc/config/arm/vxworks.h ++++ b/gcc/config/arm/vxworks.h +@@ -113,3 +113,6 @@ along with GCC; see the file COPYING3. + cannot allow arbitrary offsets for shared libraries either. */ + #undef ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P + #define ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 1 ++ ++#undef TARGET_DEFAULT_WORD_RELOCATIONS ++#define TARGET_DEFAULT_WORD_RELOCATIONS 1 +--- /dev/null ++++ b/gcc/config/arm/wrs-linux.h +@@ -0,0 +1,76 @@ ++/* Wind River GNU/Linux Configuration. ++ Copyright (C) 2006, 2007, 2008 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Use the ARM926EJ-S by default. */ ++#undef SUBTARGET_CPU_DEFAULT ++#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm926ejs ++ ++/* Add a -tiwmmxt option for convenience in generating multilibs. ++ This option generates big-endian IWMMXT code. */ ++#undef CC1_SPEC ++#define CC1_SPEC " \ ++ %{tarm926ej-s: -mcpu=arm926ej-s ; \ ++ tiwmmxt: -mcpu=iwmmxt ; \ ++ tiwmmxt2: -mcpu=iwmmxt ; \ ++ txscale: -mcpu=xscale -mbig-endian ; \ ++ tarm920t: -mcpu=arm920t ; \ ++ tthumb2: %{!mcpu=*:%{!march=*:-march=armv6t2}} -mthumb ; \ ++ tcortex-a8-be8: -mcpu=cortex-a8 -mbig-endian -mfloat-abi=softfp \ ++ -mfpu=neon } \ ++ %{txscale:%{mfloat-abi=softfp:%eXScale VFP multilib not provided}} \ ++ %{tarm920t:%{mfloat-abi=softfp:%eARM920T VFP multilib not provided}} \ ++ %{profile:-p}" ++ ++/* Since the ARM926EJ-S is the default processor, we do not need to ++ provide an explicit multilib for that processor. */ ++#undef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ { "tarm926ej-s" } ++ ++/* The GLIBC headers are in /usr/include, relative to the sysroot; the ++ uClibc headers are in /uclibc/usr/include. */ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" ++ ++/* Translate -tiwmmxt appropriately for the assembler. The -meabi=5 ++ option is the relevant part of SUBTARGET_EXTRA_ASM_SPEC in bpabi.h. */ ++#undef SUBTARGET_EXTRA_ASM_SPEC ++#define SUBTARGET_EXTRA_ASM_SPEC \ ++ "%{tiwmmxt2:-mcpu=iwmmxt2} %{tiwmmxt:-mcpu=iwmmxt} %{txscale:-mcpu=xscale -EB} %{tcortex-a8-be8:-mcpu=cortex-a8 -EB} -meabi=5" ++ ++/* Translate -tiwmmxt for the linker. */ ++#undef SUBTARGET_EXTRA_LINK_SPEC ++#define SUBTARGET_EXTRA_LINK_SPEC \ ++ " %{tiwmmxt:-m armelf_linux_eabi ; \ ++ txscale:-m armelfb_linux_eabi ; \ ++ tcortex-a8-be8:-m armelfb_linux_eabi %{!r:--be8} ; \ ++ : -m armelf_linux_eabi}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}%{tiwmmxt:/tiwmmxt ; \ ++ tiwmmxt2:/tiwmmxt ; \ ++ txscale:/txscale ; \ ++ tarm920t:/tarm920t ; \ ++ tthumb2:/thumb2 ; \ ++ tcortex-a8-be8:/cortex-a8-be8}%{!tthumb2:%{!tcortex-a8-be8:%{mfloat-abi=softfp:/softfp}}}" ++ +--- /dev/null ++++ b/gcc/config/i386/cs-linux.h +@@ -0,0 +1,41 @@ ++/* Sourcery G++ IA32 GNU/Linux Configuration. ++ Copyright (C) 2007 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* This configuration may be used either with the system glibc (in ++ system32 and system64 subdirectories) or with the included glibc ++ (in the sgxx-glibc subdirectory). */ ++ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{msgxx-glibc:/sgxx-glibc ; \ ++ m64:/system64 ; \ ++ mrhel3:/system64 ; \ ++ mrh73:/system32-old ; \ ++ :/system32}" ++ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC SYSROOT_SUFFIX_SPEC ++ ++/* See mips/wrs-linux.h for details on this use of ++ STARTFILE_PREFIX_SPEC. */ ++#undef STARTFILE_PREFIX_SPEC ++#define STARTFILE_PREFIX_SPEC \ ++ "%{m64: /usr/local/lib64/ /lib64/ /usr/lib64/} \ ++ %{!m64: /usr/local/lib/ /lib/ /usr/lib/}" +--- /dev/null ++++ b/gcc/config/i386/cs-linux.opt +@@ -0,0 +1,11 @@ ++; Additional options for Sourcery G++. ++ ++mrh73 ++Target Undocumented ++ ++mrhel3 ++Target Undocumented ++ ++msgxx-glibc ++Target ++Use included version of GLIBC +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -2700,6 +2700,18 @@ override_options (void) + target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS; + } + ++ /* If stack probes are required, the space used for large function ++ arguments on the stack must also be probed, so enable ++ -maccumulate-outgoing-args so this happens in the prologue. */ ++ if (TARGET_STACK_PROBE ++ && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS)) ++ { ++ if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS) ++ warning (0, "stack probing requires -maccumulate-outgoing-args " ++ "for correctness"); ++ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS; ++ } ++ + /* For sane SSE instruction set generation we need fcomi instruction. + It is safe to enable all CMOVE instructions. */ + if (TARGET_SSE) +--- a/gcc/config/i386/i386.h ++++ b/gcc/config/i386/i386.h +@@ -476,13 +476,23 @@ extern const char *host_detect_local_cpu + #define HAVE_LOCAL_CPU_DETECT + #endif + ++#if TARGET_64BIT_DEFAULT ++#define OPT_ARCH64 "!m32" ++#define OPT_ARCH32 "m32" ++#else ++#define OPT_ARCH64 "m64" ++#define OPT_ARCH32 "!m64" ++#endif ++ + /* Support for configure-time defaults of some command line options. + The order here is important so that -march doesn't squash the + tune or cpu values. */ + #define OPTION_DEFAULT_SPECS \ + {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ + {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ +- {"arch", "%{!march=*:-march=%(VALUE)}"} ++ {"arch", "%{!march=*:-march=%(VALUE)}"}, \ ++ {"arch32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \ ++ {"arch64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"}, + + /* Specs for the compiler proper */ + +--- a/gcc/config/i386/mingw32.h ++++ b/gcc/config/i386/mingw32.h +@@ -79,7 +79,7 @@ along with GCC; see the file COPYING3. + /* Include in the mingw32 libraries with libgcc */ + #undef LIBGCC_SPEC + #define LIBGCC_SPEC \ +- "%{mthreads:-lmingwthrd} -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt" ++ "-lgcc %{mthreads:-lmingwthrd} -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt" + + #undef STARTFILE_SPEC + #define STARTFILE_SPEC "%{shared|mdll:dllcrt2%O%s} \ +--- /dev/null ++++ b/gcc/config/i386/t-cs-linux +@@ -0,0 +1,25 @@ ++# Sourcery G++ IA32 GNU/Linux Configuration. ++# Copyright (C) 2007 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++MULTILIB_OPTIONS = m64/m32 msgxx-glibc/mrh73/mrhel3 ++MULTILIB_DIRNAMES = 64 32 sgxx-glibc rh73 rhel3 ++MULTILIB_OSDIRNAMES = ../lib64 ../lib sgxx-glibc rh73 rhel3 ++MULTILIB_EXCEPTIONS = m64/mrh73 m64/mrhel3 ++ +--- a/gcc/config/i386/x-mingw32 ++++ b/gcc/config/i386/x-mingw32 +@@ -8,6 +8,6 @@ local_includedir=$(libsubdir)/$(unlibsub + WERROR_FLAGS += -Wno-format + + host-mingw32.o : $(srcdir)/config/i386/host-mingw32.c $(CONFIG_H) $(SYSTEM_H) \ +- coretypes.h hosthooks.h hosthooks-def.h toplev.h diagnostic.h $(HOOKS_H) ++ coretypes.h hosthooks.h hosthooks-def.h toplev.h $(DIAGNOSTIC_H) $(HOOKS_H) + $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ + $(srcdir)/config/i386/host-mingw32.c +--- a/gcc/config/m68k/cf.md ++++ b/gcc/config/m68k/cf.md +@@ -1,6 +1,6 @@ +-;; ColdFire V2 DFA description. ++;; ColdFire V1, V2, V3 and V4/V4e DFA description. + ;; Copyright (C) 2007 Free Software Foundation, Inc. +-;; Contributed by CodeSourcery Inc. ++;; Contributed by CodeSourcery Inc., www.codesourcery.com + ;; + ;; This file is part of GCC. + ;; +@@ -19,661 +19,2236 @@ + ;; the Free Software Foundation, 51 Franklin Street, Fifth Floor, + ;; Boston, MA 02110-1301, USA. + +-;; ??? To let genattrtab live, implement this attribute in C. +-(define_attr "type2" +- "alu, alu_l, bcc, bra, call, jmp, lea, move, move_l, mul, pea, rts, unlk, +- unknown" +- (symbol_ref "m68k_sched_attr_type2 (insn)")) +- + ;; Instruction Buffer +-(define_automaton "cf_v2_ib") ++(define_automaton "cfv123_ib") + +-;; If one of these cpu units is occupied, that means that corresponding +-;; word in the buffer is empty. +-(define_cpu_unit "cf_v2_ib_w0, cf_v2_ib_w1, cf_v2_ib_w2, cf_v2_ib_w3, cf_v2_ib_w4, cf_v2_ib_w5" "cf_v2_ib") +- +-(final_presence_set "cf_v2_ib_w1, cf_v2_ib_w2, cf_v2_ib_w3, cf_v2_ib_w4, cf_v2_ib_w5" "cf_v2_ib_w0") +-(final_presence_set "cf_v2_ib_w2, cf_v2_ib_w3, cf_v2_ib_w4, cf_v2_ib_w5" "cf_v2_ib_w1") +-(final_presence_set "cf_v2_ib_w3, cf_v2_ib_w4, cf_v2_ib_w5" "cf_v2_ib_w2") +-(final_presence_set "cf_v2_ib_w4, cf_v2_ib_w5" "cf_v2_ib_w3") +-(final_presence_set "cf_v2_ib_w5" "cf_v2_ib_w4") +- +-;; Occupy 1 word. +-(define_reservation "cf_v2_ib1" "cf_v2_ib_w0|cf_v2_ib_w1|cf_v2_ib_w2|cf_v2_ib_w3|cf_v2_ib_w4|cf_v2_ib_w5") +- +-;; Occupy 2 words. +-(define_reservation "cf_v2_ib2" "(cf_v2_ib_w0+cf_v2_ib_w1)|(cf_v2_ib_w1+cf_v2_ib_w2)|(cf_v2_ib_w2+cf_v2_ib_w3)|(cf_v2_ib_w3+cf_v2_ib_w4)|(cf_v2_ib_w4+cf_v2_ib_w5)") +- +-;; Occupy 3 words. +-(define_reservation "cf_v2_ib3" "(cf_v2_ib_w0+cf_v2_ib_w1+cf_v2_ib_w2)|(cf_v2_ib_w1+cf_v2_ib_w2+cf_v2_ib_w3)|(cf_v2_ib_w2+cf_v2_ib_w3+cf_v2_ib_w4)|(cf_v2_ib_w3+cf_v2_ib_w4+cf_v2_ib_w5)") +- +-;; Reservation to subscribe 1 word in the instruction buffer. If a given +-;; word in the instruction buffer is subscribed, that means it is empty. +-;; This reservation is used at the start of each cycle to setup the number +-;; of prefetched instruction words in the instruction buffer. +-;; At each cycle, given that memory bus is available (i.e. there is no +-;; pending memory operation), IFP prefetches two instruction words into IB. +-(define_insn_reservation "cf_v2_ib" 0 +- (and (eq_attr "cpu" "cf_v2") ++;; These pseudo units are used to model instruction buffer of ColdFire cores. ++;; Instruction of size N can be issued only when cf_ib_wN is available. ++(define_cpu_unit "cf_ib_w1, cf_ib_w2, cf_ib_w3" "cfv123_ib") ++ ++;; Instruction occupies 1 word in the instruction buffer. ++(define_reservation "cf_ib1" "cf_ib_w1") ++;; Instruction occupies 2 words in the instruction buffer. ++(define_reservation "cf_ib2" "cf_ib_w1+cf_ib_w2") ++;; Instruction occupies 3 words in the instruction buffer. ++(define_reservation "cf_ib3" "cf_ib_w1+cf_ib_w2+cf_ib_w3") ++ ++;; This reservation is used at the start of each cycle to setup the maximal ++;; length of instruction that can be issued on current cycle. ++;; E.g., when this reservation is applied for the first time, cf_ib_w3 ++;; resource is marked busy, thus filtering out all 3-word insns. ++;; ++;; This reservation requires deterministic automaton. ++;; ++;; At each cycle, given that memory bus is available (i.e., there is no ++;; pending memory operation), instruction fetch pipeline (IFP) prefetches ++;; two instruction words into instruction buffer (IB). ++(define_insn_reservation "cf_ib1" 0 ++ (and (eq_attr "cpu" "cfv1,cfv2,cfv3") + (eq_attr "type" "ib")) +- "cf_v2_ib1") ++ "cf_ib_w3|cf_ib_w2|cf_ib_w1") + + ;; Operand Execution Pipeline +-(define_automaton "cf_v2_oep") ++(define_automaton "cfv123_oep") + +-(define_cpu_unit "cf_v2_dsoc, cf_v2_agex" "cf_v2_oep") ++(define_cpu_unit "cf_dsoc,cf_agex" "cfv123_oep") + + ;; A memory unit that is reffered to as 'certain hardware resources' in + ;; ColdFire reference manuals. This unit remains occupied for two cycles + ;; after last dsoc cycle of a store - hence there is a 2 cycle delay between + ;; two consecutive stores. +-(define_automaton "cf_v2_chr") ++(define_automaton "cfv123_chr") + +-(define_cpu_unit "cf_v2_chr" "cf_v2_chr") ++(define_cpu_unit "cf_chr" "cfv123_chr") + + ;; Memory bus +-(define_automaton "cf_v2_mem") ++(define_automaton "cfv123_mem") + + ;; When memory bus is subscribed, that implies that instruction buffer won't +-;; get its portion this cycle. To model that we query if cf_v2_mem unit is ++;; get its portion this cycle. To model that we query if cf_mem unit is + ;; subscribed and adjust number of prefetched instruction words accordingly. + ;; +-(define_query_cpu_unit "cf_v2_mem" "cf_v2_mem") ++(define_query_cpu_unit "cf_mem1, cf_mem2" "cfv123_mem") ++ ++(define_reservation "cf_mem" "cf_mem1+cf_mem2") ++ ++(define_automaton "cf_mac") ++ ++(define_cpu_unit "cf_mac1,cf_mac2,cf_mac3,cf_mac4" ++ "cf_mac") ++ ++(define_automaton "cfv123_guess") ++ ++(define_query_cpu_unit "cfv123_guess" "cfv123_guess") + + ;; Register to register move. + ;; Takes 1 cycle. +-(define_reservation "cf_v2_move_00" +- "cf_v2_dsoc+cf_v2_agex") ++(define_reservation "cfv123_alu_00" ++ "cf_dsoc,cf_agex") + + ;; Load from a memory location. + ;; Takes 3 cycles. +-(define_reservation "cf_v2_move_10" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex") +- +-;; Long load from a memory location. ++(define_reservation "cfv12_alu_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex") + ;; Takes 2 cycles. +-(define_reservation "cf_v2_move_l_10" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex") ++(define_reservation "cfv12_omove_10" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex") ++;; Takes 4 cycles. ++(define_reservation "cfv3_alu_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex") ++;; Takes 3 cycles. ++(define_reservation "cfv3_omove_10" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex") + + ;; Load from an indexed location. + ;; Takes 4 cycles. +-(define_reservation "cf_v2_move_i0" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex") +- +-;; Long load from an indexed location. ++(define_reservation "cfv12_alu_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex") + ;; Takes 3 cycles. +-(define_reservation "cf_v2_move_l_i0" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex") ++(define_reservation "cfv12_omove_i0" ++ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex") ++;; Takes 5 cycles. ++(define_reservation "cfv3_alu_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex") ++;; Takes 4 cycles. ++(define_reservation "cfv3_omove_i0" ++ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex") + + ;; Store to a memory location. + ;; Takes 1 cycle. +-(define_reservation "cf_v2_move_01" +- "cf_v2_dsoc+cf_v2_agex+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") ++(define_reservation "cfv12_alu_01" ++ "cf_dsoc+cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 1 cycle. ++(define_reservation "cfv3_alu_01" ++ "cf_dsoc+cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Store to an indexed location. +-;; Takes 2 cycle. +-(define_reservation "cf_v2_move_0i" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") ++;; Takes 2 cycles. ++(define_reservation "cfv12_alu_0i" ++ "cf_dsoc+cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 2 cycles. ++(define_reservation "cfv3_alu_0i" ++ "cf_dsoc+cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Load from a memory location and store to a memory location. + ;; Takes 3 cycles +-(define_reservation "cf_v2_move_11" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") +- +-;; Long load from a memory location and store to a memory location. ++(define_reservation "cfv12_alu_11" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") + ;; Takes 2 cycles. +-(define_reservation "cf_v2_move_l_11" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") ++(define_reservation "cfv12_omove_11" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 4 cycles ++(define_reservation "cfv3_alu_11" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") ++;; Takes 3 cycles. ++(define_reservation "cfv3_omove_11" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Load from an indexed location and store to a memory location. + ;; Takes 4 cycles. +-(define_reservation "cf_v2_move_i1" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") +- +-;; Long load from an indexed location and store to a memory location. ++(define_reservation "cfv12_alu_i1" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") + ;; Takes 3 cycles. +-(define_reservation "cf_v2_move_l_i1" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") ++(define_reservation "cfv12_omove_i1" ++ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 5 cycles. ++(define_reservation "cfv3_alu_i1" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") ++;; Takes 4 cycles. ++(define_reservation "cfv3_omove_i1" ++ "cf_dsoc+cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Load from a memory location and store to an indexed location. + ;; Takes 4 cycles. +-(define_reservation "cf_v2_move_1i" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem,cf_v2_agex,cf_v2_mem") +- +-;; Long load from a memory location and store to an indexed location. ++(define_reservation "cfv12_alu_1i" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") + ;; Takes 3 cycles. +-(define_reservation "cf_v2_move_l_1i" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_dsoc+cf_v2_agex+cf_v2_mem,cf_v2_agex,cf_v2_mem") ++(define_reservation "cfv12_omove_1i" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 5 cycles. ++(define_reservation "cfv3_alu_1i" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") ++;; Takes 4 cycles. ++(define_reservation "cfv3_omove_1i" ++ "cf_dsoc+cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Lea operation for a memory location. + ;; Takes 1 cycle. +-(define_reservation "cf_v2_lea_10" +- "cf_v2_dsoc+cf_v2_agex") ++(define_reservation "cfv123_lea_10" ++ "cf_dsoc,cf_agex") + + ;; Lea operation for an indexed location. + ;; Takes 2 cycles. +-(define_reservation "cf_v2_lea_i0" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex") ++(define_reservation "cfv123_lea_i0" ++ "cf_dsoc,cf_agex,cf_agex") + + ;; Pea operation for a memory location. +-;; Takes 2 cycle. +-(define_reservation "cf_v2_pea_11" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") ++;; Takes 2 cycles. ++(define_reservation "cfv12_pea_11" ++ "cf_dsoc,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 2 cycles. ++(define_reservation "cfv3_pea_11" ++ "cf_dsoc,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + + ;; Pea operation for an indexed location. + ;; Takes 3 cycles. +-(define_reservation "cf_v2_pea_i1" +- "cf_v2_dsoc+cf_v2_agex,cf_v2_agex,cf_v2_agex+cf_v2_chr,cf_v2_mem+cf_v2_chr,cf_v2_chr") +- +-(define_automaton "cf_v2_emac") ++(define_reservation "cfv12_pea_i1" ++ "cf_dsoc,cf_agex,cf_agex,cf_agex+cf_chr,cf_mem+cf_chr,cf_chr") ++;; Takes 3 cycles. ++(define_reservation "cfv3_pea_i1" ++ "cf_dsoc,cf_agex,cf_agex,cf_agex+cf_chr,cf_mem1+cf_chr,cf_mem2+cf_chr") + +-(define_cpu_unit "cf_v2_emac1,cf_v2_emac2,cf_v2_emac3,cf_v2_emac4" +- "cf_v2_emac") ++;; Long multiplication with no mac. ++;; Takes 9-18 cycles. ++(define_reservation "cfv123_mul_l_00" ++ "cf_dsoc,(cf_agex+cf_dsoc)*17,cf_agex") ++ ++;; Word multiplication with no mac. ++;; Takes 9 cycles. ++(define_reservation "cfv123_mul_w_00" ++ "cf_dsoc,(cf_agex+cf_dsoc)*8,cf_agex") ++ ++;; Long multiplication with no mac. ++;; Takes 11-20 cycles. ++(define_reservation "cfv12_mul_l_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*17,cf_agex") ++;; Takes 12-21 cycles. ++(define_reservation "cfv3_mul_l_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*17,cf_agex") ++ ++;; Word multiplication with no mac. ++;; Takes 11 cycles. ++(define_reservation "cfv12_mul_w_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*8,cf_agex") ++;; Takes 12 cycles. ++(define_reservation "cfv3_mul_w_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*8,cf_agex") ++ ++;; Word multiplication with no mac. ++;; Takes 12 cycles. ++(define_reservation "cfv12_mul_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,(cf_agex+cf_dsoc)*8,cf_agex") ++;; Takes 13 cycles. ++(define_reservation "cfv3_mul_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,(cf_agex+cf_dsoc)*8,cf_agex") ++ ++;; Long multiplication with mac. ++;; Takes 5 cycles. ++(define_reservation "cfv123_mac_l_00" ++ "cf_dsoc,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4") + +-;; Mul operation with register operands. +-;; Takes 4 cycles. +-(define_reservation "cf_v2_mul_00" +- "cf_v2_dsoc,cf_v2_agex+cf_v2_emac1,cf_v2_emac2,cf_v2_emac3,cf_v2_emac4") ++;; Word multiplication with mac. ++;; Takes 3 cycles. ++(define_reservation "cfv123_mac_w_00" ++ "cf_dsoc,cf_agex,cf_mac1,cf_mac2") + +-;; Mul operation with implicit load from a memory location. ++;; Long multiplication with mac. ++;; Takes 7 cycles. ++(define_reservation "cfv12_mac_l_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4") ++;; Takes 8 cycles. ++(define_reservation "cfv3_mac_l_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2,cf_mac3,cf_mac4") ++ ++;; Word multiplication with mac. ++;; Takes 5 cycles. ++(define_reservation "cfv12_mac_w_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2") + ;; Takes 6 cycles. +-(define_reservation "cf_v2_mul_10" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex+cf_v2_emac1,cf_v2_emac2,cf_v2_emac3,cf_v2_emac4") ++(define_reservation "cfv3_mac_w_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2") + +-;; Mul operation with implicit load from an indexed location. ++;; Word multiplication with mac. ++;; Takes 6 cycles. ++(define_reservation "cfv12_mac_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex,cf_mac1,cf_mac2") + ;; Takes 7 cycles. +-(define_reservation "cf_v2_mul_i0" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_agex,cf_v2_dsoc+cf_v2_mem,cf_v2_agex+cf_v2_emac1,cf_v2_emac2,cf_v2_emac3,cf_v2_emac4") +- +-;; Instruction reservations. +- +-;; Below reservations are simple derivation from the above reservations. +-;; Each reservation from the above expands into 3 reservations below - one +-;; for each instruction size. +-;; A number in the end of reservation's name is the size of the instruction. +- +-(define_insn_reservation "cf_v2_move_00_1" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu,alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "00")) +- "cf_v2_ib1+cf_v2_move_00") +- +-(define_insn_reservation "cf_v2_move_00_2" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu,alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "00")) +- "cf_v2_ib2+cf_v2_move_00") +- +-(define_insn_reservation "cf_v2_move_00_3" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu,alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "00")) +- "cf_v2_ib3+cf_v2_move_00") +- +-(define_insn_reservation "cf_v2_move_10_1" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib1+cf_v2_move_10") +- +-(define_insn_reservation "cf_v2_move_10_2" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib2+cf_v2_move_10") +- +-(define_insn_reservation "cf_v2_move_10_3" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib3+cf_v2_move_10") +- +-(define_insn_reservation "cf_v2_move_l_10_1" 3 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib1+cf_v2_move_l_10") +- +-(define_insn_reservation "cf_v2_move_l_10_2" 3 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib2+cf_v2_move_l_10") +- +-(define_insn_reservation "cf_v2_move_l_10_3" 3 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib3+cf_v2_move_l_10") +- +-(define_insn_reservation "cf_v2_move_i0_2" 5 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib2+cf_v2_move_i0") +- +-(define_insn_reservation "cf_v2_move_i0_3" 5 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib3+cf_v2_move_i0") +- +-(define_insn_reservation "cf_v2_move_l_i0_2" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib2+cf_v2_move_l_i0") +- +-(define_insn_reservation "cf_v2_move_l_i0_3" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib3+cf_v2_move_l_i0") +- +-(define_insn_reservation "cf_v2_move_01_1" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "01")) +- "cf_v2_ib1+cf_v2_move_01") +- +-(define_insn_reservation "cf_v2_move_01_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "01")) +- "cf_v2_ib2+cf_v2_move_01") +- +-(define_insn_reservation "cf_v2_move_01_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "01")) +- "cf_v2_ib3+cf_v2_move_01") ++(define_reservation "cfv3_mac_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex,cf_mac1,cf_mac2") + +-(define_insn_reservation "cf_v2_move_0i_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "0i")) +- "cf_v2_ib2+cf_v2_move_0i") +- +-(define_insn_reservation "cf_v2_move_0i_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move,move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "0i")) +- "cf_v2_ib3+cf_v2_move_0i") +- +-(define_insn_reservation "cf_v2_move_11_1" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib1+cf_v2_move_11") +- +-(define_insn_reservation "cf_v2_move_11_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib2+cf_v2_move_11") +- +-(define_insn_reservation "cf_v2_move_11_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib3+cf_v2_move_11") +- +-(define_insn_reservation "cf_v2_move_l_11_1" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib1+cf_v2_move_l_11") +- +-(define_insn_reservation "cf_v2_move_l_11_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib2+cf_v2_move_l_11") +- +-(define_insn_reservation "cf_v2_move_l_11_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib3+cf_v2_move_l_11") +- +-(define_insn_reservation "cf_v2_move_i1_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib2+cf_v2_move_i1") +- +-(define_insn_reservation "cf_v2_move_i1_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib3+cf_v2_move_i1") +- +-(define_insn_reservation "cf_v2_move_l_i1_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib2+cf_v2_move_l_i1") +- +-(define_insn_reservation "cf_v2_move_l_i1_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib3+cf_v2_move_l_i1") ++;; Multiplication with emac. ++;; Takes 4 cycles. ++(define_reservation "cfv123_emac_00" ++ "cf_dsoc,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4") + +-(define_insn_reservation "cf_v2_move_1i_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "1i")) +- "cf_v2_ib2+cf_v2_move_1i") ++;; Multiplication with emac. ++;; Takes 6 cycles. ++(define_reservation "cfv12_emac_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4") ++;; Takes 7 cycles. ++(define_reservation "cfv3_emac_10" ++ "cf_dsoc,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4") + +-(define_insn_reservation "cf_v2_move_1i_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "alu_l,move")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "1i")) +- "cf_v2_ib3+cf_v2_move_1i") ++;; Word multiplication with emac. ++;; Takes 7 cycles. ++(define_reservation "cfv12_emac_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4") ++;; Takes 8 cycles. ++(define_reservation "cfv3_emac_w_i0" ++ "cf_dsoc,cf_agex,cf_agex,cf_dsoc+cf_mem1,cf_dsoc+cf_mem2,cf_agex+cf_mac1,cf_mac2,cf_mac3,cf_mac4") + +-(define_insn_reservation "cf_v2_move_l_1i_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "1i")) +- "cf_v2_ib2+cf_v2_move_l_1i") ++;; Return instruction. ++;; ??? As return reads target address from stack, use a mem-read reservation ++;; ??? for it. ++;; ??? It's not clear what the core does during these 5 cycles. ++;; ??? Luckily, we don't care that much about an insn that won't be moved. ++;; Takes 5 cycles. ++(define_reservation "cfv12_rts" "cfv12_alu_10") ++;; Takes 8 cycles. ++(define_reservation "cfv3_rts" "cfv3_alu_10") + +-(define_insn_reservation "cf_v2_move_l_1i_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "move_l")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "1i")) +- "cf_v2_ib3+cf_v2_move_l_1i") ++;; Call instruction. ++;; ??? It's not clear what reservation is best to use for calls. ++;; ??? For now we use mem-write + return reservations to reflect the fact of ++;; ??? pushing and poping return address to and from the stack. ++;; Takes 3 cycles. ++(define_reservation "cfv12_call" "cfv12_alu_01,cfv12_rts") ++;; Takes 1/5 cycles. ++(define_reservation "cfv3_call" "cfv3_alu_01,cfv3_rts") + +-(define_insn_reservation "cf_v2_lea_10_1" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "lea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib1+cf_v2_lea_10") ++;; Conditional branch instruction. ++;; ??? Branch reservations are unclear to me so far. Luckily, we don't care ++;; ??? that much about branches. ++;; Takes 2 cycles. ++(define_reservation "cfv12_bcc" "cfv123_alu_00") ++;; Takes 1 cycles. ++(define_reservation "cfv3_bcc" "cfv123_alu_00") + +-(define_insn_reservation "cf_v2_lea_10_2" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "lea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib2+cf_v2_lea_10") ++;; Unconditional branch instruciton. ++;; Takes 2 cycles. ++(define_reservation "cfv12_bra" "cfv12_alu_01") ++;; Takes 1 cycles. ++(define_reservation "cfv3_bra" "cfv3_alu_01") + +-(define_insn_reservation "cf_v2_lea_10_3" 1 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "lea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib3+cf_v2_lea_10") ++;; Computed jump instruction. ++;; Takes 3 cycles. ++(define_reservation "cfv12_jmp" ++ "(cf_dsoc+cf_agex)*3") ++;; Takes 5 cycles. ++(define_reservation "cfv3_jmp" ++ "(cf_dsoc+cf_agex)*5") + +-(define_insn_reservation "cf_v2_lea_i0_2" 2 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "lea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib2+cf_v2_lea_i0") ++;; Instruction reservations. + +-(define_insn_reservation "cf_v2_lea_i0_3" 2 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "lea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i0")) +- "cf_v2_ib3+cf_v2_lea_i0") ++;; Below reservations are simple derivation from the above reservations. ++;; Each reservation from the above expands into 3 reservations below - one ++;; for each instruction size. ++;; A number in the end of reservation's name is the size of the instruction. + +-(define_insn_reservation "cf_v2_pea_11_1" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "pea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) ++(define_insn_reservation "cfv123_alu_00_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "00")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_alu_00") ++ ++(define_insn_reservation "cfv123_alu_00_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "00")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_alu_00") ++ ++(define_insn_reservation "cfv123_alu_00_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "00")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_alu_00") ++ ++(define_insn_reservation "cfv1_alu_10_1" 3 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_alu_10") ++ ++(define_insn_reservation "cfv1_alu_10_2" 3 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_alu_10") ++ ++(define_insn_reservation "cfv1_alu_10_3" 3 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_10") ++ ++(define_insn_reservation "cfv1_omove_10_1" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_omove_10") ++ ++(define_insn_reservation "cfv1_omove_10_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_omove_10") ++ ++(define_insn_reservation "cfv1_omove_10_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_10") ++ ++(define_insn_reservation "cfv2_alu_10_1" 3 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_alu_10") ++ ++(define_insn_reservation "cfv2_alu_10_2" 3 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_alu_10") ++ ++(define_insn_reservation "cfv2_alu_10_3" 3 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_10") ++ ++(define_insn_reservation "cfv2_omove_10_1" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_omove_10") ++ ++(define_insn_reservation "cfv2_omove_10_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_omove_10") ++ ++(define_insn_reservation "cfv2_omove_10_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_10") ++ ++(define_insn_reservation "cfv3_alu_10_1" 4 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_alu_10") ++ ++(define_insn_reservation "cfv3_alu_10_2" 4 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_alu_10") ++ ++(define_insn_reservation "cfv3_alu_10_3" 4 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_10") ++ ++(define_insn_reservation "cfv3_omove_10_1" 3 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_omove_10") ++ ++(define_insn_reservation "cfv3_omove_10_2" 3 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_omove_10") ++ ++(define_insn_reservation "cfv3_omove_10_3" 3 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "10")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_omove_10") ++ ++(define_insn_reservation "cfv1_alu_i0_2" 4 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_i0") ++ ++(define_insn_reservation "cfv1_alu_i0_3" 4 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_i0") ++ ++(define_insn_reservation "cfv1_omove_i0_2" 3 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_i0") ++ ++(define_insn_reservation "cfv1_omove_i0_3" 3 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_i0") ++ ++(define_insn_reservation "cfv2_alu_i0_2" 4 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_i0") ++ ++(define_insn_reservation "cfv2_alu_i0_3" 4 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_i0") ++ ++(define_insn_reservation "cfv2_omove_i0_2" 3 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_i0") ++ ++(define_insn_reservation "cfv2_omove_i0_3" 3 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_i0") ++ ++(define_insn_reservation "cfv3_alu_i0_2" 5 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_alu_i0") ++ ++(define_insn_reservation "cfv3_alu_i0_3" 5 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_i0") ++ ++(define_insn_reservation "cfv3_omove_i0_2" 4 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_omove_i0") ++ ++(define_insn_reservation "cfv3_omove_i0_3" 4 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i0")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_omove_i0") ++ ++(define_insn_reservation "cfv12_alu_01_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_alu_01") ++ ++(define_insn_reservation "cfv12_alu_01_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_alu_01") ++ ++(define_insn_reservation "cfv12_alu_01_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_01") ++ ++(define_insn_reservation "cfv3_alu_01_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_alu_01") ++ ++(define_insn_reservation "cfv3_alu_01_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_alu_01") ++ ++(define_insn_reservation "cfv3_alu_01_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "01")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_01") ++ ++(define_insn_reservation "cfv12_alu_0i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "0i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_0i") ++ ++(define_insn_reservation "cfv12_alu_0i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_0i") ++ ++(define_insn_reservation "cfv3_alu_0i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "0i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_alu_0i") ++ ++(define_insn_reservation "cfv3_alu_0i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_0i") ++ ++(define_insn_reservation "cfv1_alu_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_alu_11") ++ ++(define_insn_reservation "cfv1_alu_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_alu_11") ++ ++(define_insn_reservation "cfv1_alu_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_11") ++ ++(define_insn_reservation "cfv1_omove_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_omove_11") ++ ++(define_insn_reservation "cfv1_omove_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_omove_11") ++ ++(define_insn_reservation "cfv1_omove_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_11") ++ ++(define_insn_reservation "cfv2_alu_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_alu_11") ++ ++(define_insn_reservation "cfv2_alu_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_alu_11") ++ ++(define_insn_reservation "cfv2_alu_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_11") ++ ++(define_insn_reservation "cfv2_omove_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_omove_11") ++ ++(define_insn_reservation "cfv2_omove_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_omove_11") ++ ++(define_insn_reservation "cfv2_omove_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_11") ++ ++(define_insn_reservation "cfv3_alu_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_alu_11") ++ ++(define_insn_reservation "cfv3_alu_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "size" "2")) + (eq_attr "op_mem" "11")) +- "cf_v2_ib1+cf_v2_pea_11") ++ "cf_ib2+cfv3_alu_11") + +-(define_insn_reservation "cf_v2_pea_11_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "pea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) ++(define_insn_reservation "cfv3_alu_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_11") ++ ++(define_insn_reservation "cfv3_omove_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_omove_11") ++ ++(define_insn_reservation "cfv3_omove_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "size" "2")) + (eq_attr "op_mem" "11")) +- "cf_v2_ib2+cf_v2_pea_11") ++ "cf_ib2+cfv3_omove_11") + +-(define_insn_reservation "cf_v2_pea_11_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "pea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "11")) +- "cf_v2_ib3+cf_v2_pea_11") ++(define_insn_reservation "cfv3_omove_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_omove_11") ++ ++(define_insn_reservation "cfv1_alu_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_i1") ++ ++(define_insn_reservation "cfv1_alu_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_i1") ++ ++(define_insn_reservation "cfv1_omove_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_i1") ++ ++(define_insn_reservation "cfv1_omove_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_i1") ++ ++(define_insn_reservation "cfv2_alu_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_i1") ++ ++(define_insn_reservation "cfv2_alu_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_i1") ++ ++(define_insn_reservation "cfv2_omove_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_i1") ++ ++(define_insn_reservation "cfv2_omove_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_i1") ++ ++(define_insn_reservation "cfv3_alu_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_alu_i1") ++ ++(define_insn_reservation "cfv3_alu_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_i1") ++ ++(define_insn_reservation "cfv3_omove_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_omove_i1") ++ ++(define_insn_reservation "cfv3_omove_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_omove_i1") ++ ++(define_insn_reservation "cfv1_alu_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_1i") ++ ++(define_insn_reservation "cfv1_alu_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_1i") ++ ++(define_insn_reservation "cfv1_omove_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_1i") ++ ++(define_insn_reservation "cfv1_omove_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1") ++ (eq_attr "type" " ++clr,clr_l,mov3q_l,move,moveq_l,tst, ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_1i") ++ ++(define_insn_reservation "cfv2_alu_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_alu_1i") ++ ++(define_insn_reservation "cfv2_alu_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_alu_1i") ++ ++(define_insn_reservation "cfv2_omove_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_omove_1i") ++ ++(define_insn_reservation "cfv2_omove_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv2") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_omove_1i") ++ ++(define_insn_reservation "cfv3_alu_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_alu_1i") ++ ++(define_insn_reservation "cfv3_alu_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++alu_l,aluq_l,bitr,bitrw,cmp,cmp_l,alux_l,ext,neg_l,scc,shift, ++clr,clr_l,mov3q_l,move,moveq_l,tst")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_alu_1i") ++ ++(define_insn_reservation "cfv3_omove_1i_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_omove_1i") ++ ++(define_insn_reservation "cfv3_omove_1i_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" " ++move_l,tst_l")) ++ (eq_attr "op_mem" "1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_omove_1i") ++ ++(define_insn_reservation "cfv123_lea_10_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "lea")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_lea_10") ++ ++(define_insn_reservation "cfv123_lea_10_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "lea")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_lea_10") ++ ++(define_insn_reservation "cfv123_lea_10_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "lea")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_lea_10") ++ ++(define_insn_reservation "cfv123_lea_i0_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "lea")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv123_lea_i0") ++ ++(define_insn_reservation "cfv123_lea_i0_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "lea")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_lea_i0") ++ ++(define_insn_reservation "cfv12_pea_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_pea_11") ++ ++(define_insn_reservation "cfv12_pea_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_pea_11") ++ ++(define_insn_reservation "cfv12_pea_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_pea_11") ++ ++(define_insn_reservation "cfv3_pea_11_1" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_pea_11") ++ ++(define_insn_reservation "cfv3_pea_11_2" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_pea_11") ++ ++(define_insn_reservation "cfv3_pea_11_3" 1 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_pea_11") ++ ++(define_insn_reservation "cfv12_pea_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_pea_i1") ++ ++(define_insn_reservation "cfv12_pea_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_pea_i1") ++ ++(define_insn_reservation "cfv3_pea_i1_2" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_pea_i1") ++ ++(define_insn_reservation "cfv3_pea_i1_3" 2 ++ (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_pea_i1") ++ ++(define_insn_reservation "cfv123_mul_l_00_1" 18 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_mul_l_00") ++ ++(define_insn_reservation "cfv123_mul_l_00_2" 18 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_mul_l_00") ++ ++(define_insn_reservation "cfv123_mul_l_00_3" 18 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_mul_l_00") ++ ++(define_insn_reservation "cfv123_mul_w_00_1" 9 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_mul_w_00") ++ ++(define_insn_reservation "cfv123_mul_w_00_2" 9 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_mul_w_00") ++ ++(define_insn_reservation "cfv123_mul_w_00_3" 9 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_mul_w_00") ++ ++(define_insn_reservation "cfv12_mul_l_10_1" 20 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_mul_l_10") ++ ++(define_insn_reservation "cfv12_mul_l_10_2" 20 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_mul_l_10") ++ ++(define_insn_reservation "cfv12_mul_l_10_3" 20 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mul_l_10") ++ ++(define_insn_reservation "cfv3_mul_l_10_1" 21 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_mul_l_10") ++ ++(define_insn_reservation "cfv3_mul_l_10_2" 21 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_mul_l_10") ++ ++(define_insn_reservation "cfv3_mul_l_10_3" 21 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mul_l_10") ++ ++(define_insn_reservation "cfv12_mul_w_10_1" 11 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_mul_w_10") ++ ++(define_insn_reservation "cfv12_mul_w_10_2" 11 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_mul_w_10") ++ ++(define_insn_reservation "cfv12_mul_w_10_3" 11 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mul_w_10") ++ ++(define_insn_reservation "cfv3_mul_w_10_1" 12 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_mul_w_10") ++ ++(define_insn_reservation "cfv3_mul_w_10_2" 12 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_mul_w_10") ++ ++(define_insn_reservation "cfv3_mul_w_10_3" 12 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mul_w_10") ++ ++(define_insn_reservation "cfv12_mul_w_i0_2" 12 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_mul_w_i0") ++ ++(define_insn_reservation "cfv12_mul_w_i0_3" 12 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mul_w_i0") ++ ++(define_insn_reservation "cfv3_mul_w_i0_2" 13 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_mul_w_i0") ++ ++(define_insn_reservation "cfv3_mul_w_i0_3" 13 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "no")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mul_w_i0") ++ ++(define_insn_reservation "cfv123_mac_l_00_1" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_mac_l_00") ++ ++(define_insn_reservation "cfv123_mac_l_00_2" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_mac_l_00") ++ ++(define_insn_reservation "cfv123_mac_l_00_3" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_mac_l_00") ++ ++(define_insn_reservation "cfv123_mac_w_00_1" 3 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_mac_w_00") ++ ++(define_insn_reservation "cfv123_mac_w_00_2" 3 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_mac_w_00") ++ ++(define_insn_reservation "cfv123_mac_w_00_3" 3 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_mac_w_00") ++ ++(define_insn_reservation "cfv12_mac_l_10_1" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_mac_l_10") ++ ++(define_insn_reservation "cfv12_mac_l_10_2" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_mac_l_10") ++ ++(define_insn_reservation "cfv12_mac_l_10_3" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mac_l_10") ++ ++(define_insn_reservation "cfv3_mac_l_10_1" 8 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_mac_l_10") ++ ++(define_insn_reservation "cfv3_mac_l_10_2" 8 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_mac_l_10") ++ ++(define_insn_reservation "cfv3_mac_l_10_3" 8 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mac_l_10") ++ ++(define_insn_reservation "cfv12_mac_w_10_1" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_mac_w_10") ++ ++(define_insn_reservation "cfv12_mac_w_10_2" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_mac_w_10") ++ ++(define_insn_reservation "cfv12_mac_w_10_3" 5 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mac_w_10") ++ ++(define_insn_reservation "cfv3_mac_w_10_1" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_mac_w_10") ++ ++(define_insn_reservation "cfv3_mac_w_10_2" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_mac_w_10") ++ ++(define_insn_reservation "cfv3_mac_w_10_3" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mac_w_10") ++ ++(define_insn_reservation "cfv12_mac_w_i0_2" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_mac_w_i0") ++ ++(define_insn_reservation "cfv12_mac_w_i0_3" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_mac_w_i0") ++ ++(define_insn_reservation "cfv3_mac_w_i0_2" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_mac_w_i0") ++ ++(define_insn_reservation "cfv3_mac_w_i0_3" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_mac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_mac_w_i0") ++ ++(define_insn_reservation "cfv123_emac_00_1" 4 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l,mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv123_emac_00") ++ ++(define_insn_reservation "cfv123_emac_00_2" 4 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l,mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv123_emac_00") ++ ++(define_insn_reservation "cfv123_emac_00_3" 4 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l,mul_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv123_emac_00") ++ ++(define_insn_reservation "cfv12_emac_l_10_1" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_emac_10") ++ ++(define_insn_reservation "cfv12_emac_l_10_2" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_emac_10") ++ ++(define_insn_reservation "cfv12_emac_l_10_3" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_emac_10") ++ ++(define_insn_reservation "cfv3_emac_l_10_1" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_emac_10") ++ ++(define_insn_reservation "cfv3_emac_l_10_2" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_emac_10") ++ ++(define_insn_reservation "cfv3_emac_l_10_3" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_l")) ++ (eq_attr "op_mem" "10,i0,i1,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_emac_10") ++ ++(define_insn_reservation "cfv12_emac_w_10_1" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_emac_10") ++ ++(define_insn_reservation "cfv12_emac_w_10_2" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_emac_10") ++ ++(define_insn_reservation "cfv12_emac_w_10_3" 6 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_emac_10") ++ ++(define_insn_reservation "cfv3_emac_w_10_1" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_emac_10") ++ ++(define_insn_reservation "cfv3_emac_w_10_2" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_emac_10") ++ ++(define_insn_reservation "cfv3_emac_w_10_3" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_emac_10") ++ ++(define_insn_reservation "cfv12_emac_w_i0_2" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv12_emac_w_i0") ++ ++(define_insn_reservation "cfv12_emac_w_i0_3" 7 ++ (and (and (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_emac_w_i0") ++ ++(define_insn_reservation "cfv3_emac_w_i0_2" 8 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "1,2")) ++ "cf_ib2+cfv3_emac_w_i0") ++ ++(define_insn_reservation "cfv3_emac_w_i0_3" 8 ++ (and (and (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "mac" "cf_emac")) ++ (eq_attr "type" "mul_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_emac_w_i0") ++ ++(define_insn_reservation "cfv12_rts" 5 ++ (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "rts")) ++ "cf_ib1+cfv12_rts") ++ ++(define_insn_reservation "cfv3_rts" 8 ++ (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "rts")) ++ "cf_ib1+cfv3_rts") ++ ++(define_insn_reservation "cfv12_call_1" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_call") ++ ++(define_insn_reservation "cfv12_call_2" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_call") ++ ++(define_insn_reservation "cfv12_call_3" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_call") ++ ++(define_insn_reservation "cfv3_call_1" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_call") ++ ++(define_insn_reservation "cfv3_call_2" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_call") ++ ++(define_insn_reservation "cfv3_call_3" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bsr,jsr")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_call") ++ ++(define_insn_reservation "cfv12_bcc_1" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_bcc") ++ ++(define_insn_reservation "cfv12_bcc_2" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_bcc") ++ ++(define_insn_reservation "cfv12_bcc_3" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_bcc") ++ ++(define_insn_reservation "cfv3_bcc_1" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_bcc") ++ ++(define_insn_reservation "cfv3_bcc_2" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_bcc") ++ ++(define_insn_reservation "cfv3_bcc_3" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bcc")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_bcc") ++ ++(define_insn_reservation "cfv12_bra_1" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_bra") ++ ++(define_insn_reservation "cfv12_bra_2" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_bra") ++ ++(define_insn_reservation "cfv12_bra_3" 2 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_bra") ++ ++(define_insn_reservation "cfv3_bra_1" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_bra") ++ ++(define_insn_reservation "cfv3_bra_2" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_bra") ++ ++(define_insn_reservation "cfv3_bra_3" 1 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "bra")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_bra") ++ ++(define_insn_reservation "cfv12_jmp_1" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv12_jmp") ++ ++(define_insn_reservation "cfv12_jmp_2" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv12_jmp") ++ ++(define_insn_reservation "cfv12_jmp_3" 3 ++ (and (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv12_jmp") ++ ++(define_insn_reservation "cfv3_jmp_1" 5 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "1")) ++ "cf_ib1+cfv3_jmp") ++ ++(define_insn_reservation "cfv3_jmp_2" 5 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "2")) ++ "cf_ib2+cfv3_jmp") ++ ++(define_insn_reservation "cfv3_jmp_3" 5 ++ (and (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "jmp")) ++ (eq_attr "size" "3")) ++ "cf_ib3+cfv3_jmp") ++ ++(define_insn_reservation "cfv12_unlk" 2 ++ (and (eq_attr "cpu" "cfv1,cfv2") ++ (eq_attr "type" "unlk")) ++ "cf_ib1+cfv12_alu_10") ++ ++(define_insn_reservation "cfv3_unlk" 3 ++ (and (eq_attr "cpu" "cfv3") ++ (eq_attr "type" "unlk")) ++ "cf_ib1+cfv3_alu_10") ++ ++;; Dummy reservation for instructions that are not handled. ++(define_insn_reservation "cfv123_guess" 3 ++ (and (eq_attr "cpu" "cfv1,cfv2,cfv3") ++ (eq_attr "type" "falu,fbcc,fcmp,fdiv,fmove,fmul,fneg,fsqrt,ftst, ++ div_w,div_l,link,mvsz,nop,trap,unknown")) ++ "cf_ib3+cfv123_guess+cf_dsoc+cf_agex+cf_mem") ++ ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++;; Below is pipeline description of ColdFire V4 core. ++;; It is substantially different from the description of V1, V2 or V3 cores, ++;; primarily due to no need to model the instruction buffer. ++;; ++;; V4 pipeline model uses a completely separate set of cpu units. + +-(define_insn_reservation "cf_v2_pea_i1_2" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "pea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib2+cf_v2_pea_i1") ++;; Operand Execution Pipeline. ++(define_automaton "cfv4_oep") + +-(define_insn_reservation "cf_v2_pea_i1_3" 0 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "pea")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- (eq_attr "op_mem" "i1")) +- "cf_v2_ib3+cf_v2_pea_i1") ++(define_cpu_unit "cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da" ++ "cfv4_oep") + +-(define_insn_reservation "cf_v2_mul_00_1" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) ++;; This automaton is used to support CFv4 dual-issue. ++(define_automaton "cfv4_ds") ++ ++;; V4 has 3 cases of dual-issue. ++;; After issuing a cfv4_pOEPx instruction, it'll be possible to issue ++;; a cfv4_sOEPx instruction on the same cycle (see final_presence_sets below). ++(define_cpu_unit "cfv4_pOEP1,cfv4_sOEP1, ++ cfv4_pOEP2,cfv4_sOEP2, ++ cfv4_pOEP3,cfv4_sOEP3" "cfv4_ds") ++ ++(final_presence_set "cfv4_sOEP1" "cfv4_pOEP1") ++(final_presence_set "cfv4_sOEP2" "cfv4_pOEP2") ++(final_presence_set "cfv4_sOEP3" "cfv4_pOEP3") ++ ++;; Reservation for instructions that don't allow dual-issue. ++(define_reservation "cfv4_ds" "cfv4_pOEP1+cfv4_sOEP1+ ++ cfv4_pOEP2+cfv4_sOEP2+ ++ cfv4_pOEP3+cfv4_sOEP3") ++ ++;; Memory access resource. ++(define_automaton "cfv4_mem") ++ ++(define_cpu_unit "cfv4_mem" "cfv4_mem") ++ ++;; EMAC. ++(define_automaton "cfv4_emac") ++ ++(define_cpu_unit "cfv4_emac" "cfv4_emac") ++ ++;; FPU. ++(define_automaton "cfv4_fp") ++ ++(define_cpu_unit "cfv4_fp" "cfv4_fp") ++ ++;; Automaton for unknown instruction. ++(define_automaton "cfv4_guess") ++ ++(define_query_cpu_unit "cfv4_guess" "cfv4_guess") ++ ++;; This bypass allows 1st case of dual-issue. ++(define_bypass 0 "cfv4_00_oag_pOEP1,cfv4_10_pOEP1,cfv4_i0_pOEP1" ++ "cfv4_00_oag,cfv4_00_oag_pOEP3_sOEP12,cfv4_00_oag_pOEP1, ++ cfv4_00_oag_moveql,cfv4_00_ex_sOEP13") ++ ++;; The following bypasses decrease the latency of producers if it modifies ++;; a target register in the EX stage and the consumer also uses ++;; that register in the EX stage. ++(define_bypass 1 "cfv4_00_ex" "cfv4_00_ex,cfv4_00_ex_sOEP13") ++(define_bypass 1 "cfv4_00_ex" "cfv4_10,cfv4_10_pOEP1,cfv4_i0,cfv4_i0_pOEP1" ++ "!m68k_sched_address_bypass_p") ++ ++;; Indexed loads with scale factors 2 and 4 require an update of the index ++;; register in the register file. Considering that the index register is ++;; only needed at the second cycle of address generation, we get ++;; a latency of 4. ++;; Producers for indexed loads with scale factor 1 should have ++;; a latency of 3. Since we're only allowed one bypass, we handle it ++;; in the adjust_cost hook. ++(define_bypass 4 ++ "cfv4_00_oag,cfv4_00_oag_pOEP3_sOEP12,cfv4_00_oag_lea,cfv4_00_oag_pOEP1, ++ cfv4_00_oag_moveql" ++ "cfv4_i0,cfv4_i0_pOEP1" ++ "m68k_sched_indexed_address_bypass_p") ++ ++;; First part of cfv4_00. ++;; If issued in pairs with cfv4_movel_?0, the cost should be increased. ++;; ??? Is it possible that combined cfv4_movel_00 and cfv4_oag_00 instructions ++;; have longer latency than the two instructions emitted sequentially? ++;; Due to register renaming, the result of the sequence would be available ++;; after 3 cycles, instead of 4 for combined instruction? ++(define_insn_reservation "cfv4_00_oag" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,clr_l,cmp_l,mov3q_l,neg_l")) + (eq_attr "op_mem" "00")) +- "cf_v2_ib1+cf_v2_mul_00") ++ "cfv4_sOEP1|cfv4_sOEP3|(cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)") + +-(define_insn_reservation "cf_v2_mul_00_2" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) ++(define_insn_reservation "cfv4_00_oag_pOEP3_sOEP12" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "move_l,mov3q_l,clr_l")) ++ (and (eq_attr "op_mem" "00") ++ (and (eq_attr "opx_type" "Rn") ++ (eq_attr "opy_type" "none,imm_q,imm_w,imm_l")))) ++ "cfv4_sOEP1|cfv4_sOEP2|(cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)") ++ ++(define_insn_reservation "cfv4_00_oag_lea" 1 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "lea")) ++ "cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_00_oag_pOEP1" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "move_l,mov3q_l,clr_l")) ++ (and (eq_attr "op_mem" "00") ++ (ior (eq_attr "opx_type" "!Rn") ++ (eq_attr "opy_type" "!none,imm_q,imm_w,imm_l")))) ++ "cfv4_sOEP1|(cfv4_pOEP1,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)") ++ ++(define_insn_reservation "cfv4_00_oag_moveql" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "moveq_l")) + (eq_attr "op_mem" "00")) +- "cf_v2_ib2+cf_v2_mul_00") ++ "cfv4_sOEP1|cfv4_sOEP2|cfv4_sOEP3|(cfv4_pOEP3,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)") + +-(define_insn_reservation "cf_v2_mul_00_3" 4 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) ++;; Second part of cfv4_00. ++;; Latency is either 1 or 4 depending on which stage the consumer ++;; will need the data. ++ ++(define_insn_reservation "cfv4_00_ex" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "bitr,bitrw,clr,cmp,move,mvsz,scc,tst")) + (eq_attr "op_mem" "00")) +- "cf_v2_ib3+cf_v2_mul_00") ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex") + +-(define_insn_reservation "cf_v2_mul_10_1" 6 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- (eq_attr "op_mem" "10")) +- "cf_v2_ib1+cf_v2_mul_10") ++(define_insn_reservation "cfv4_00_ex_sOEP13" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alux_l,ext,shift,tst_l")) ++ (eq_attr "op_mem" "00")) ++ "cfv4_sOEP1|cfv4_sOEP3|(cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex)") + +-(define_insn_reservation "cf_v2_mul_10_2" 6 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) ++;; Several types mentioned in this reservation (e.g., ext and shift) don't ++;; support implicit load. But we handle them anyway due to first scheduling ++;; pass, which handles non-strict rtl. ++;; ++;; Latency is either 1 or 4 depending in which stage the consumer ++;; will need the data. ++(define_insn_reservation "cfv4_10" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,moveq_l,mvsz,neg_l, ++ shift,tst,tst_l")) + (eq_attr "op_mem" "10")) +- "cf_v2_ib2+cf_v2_mul_10") ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") + +-(define_insn_reservation "cf_v2_mul_10_3" 6 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) ++;; Specialization of cfv4_10. ++;; move.l has OC2-to-DS forwarding path, that saves one cycle of latency. ++(define_insn_reservation "cfv4_10_pOEP1" 3 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "move_l")) + (eq_attr "op_mem" "10")) +- "cf_v2_ib3+cf_v2_mul_10") ++ "cfv4_pOEP1,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") + +-(define_insn_reservation "cf_v2_mul_i0_2" 7 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) ++;; Same here. But +1 to latency due to longer OAG. ++(define_insn_reservation "cfv4_i0" 5 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,moveq_l,mvsz,neg_l, ++ shift,tst,tst_l")) + (eq_attr "op_mem" "i0")) +- "cf_v2_ib2+cf_v2_mul_i0") ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") + +-(define_insn_reservation "cf_v2_mul_i0_3" 7 +- (and (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "mul")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) ++;; ??? Does indexed load trigger dual-issue? ++;; ??? Does OC2-to-DS forwarding path saves a cycle? ++(define_insn_reservation "cfv4_i0_pOEP1" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "move_l")) + (eq_attr "op_mem" "i0")) +- "cf_v2_ib3+cf_v2_mul_i0") ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") + +-;; ??? As return reads target address from stack, use a mem-read reservation +-;; for it. +-(define_reservation "cf_v2_rts" "cf_v2_move_10") +- +-;; ??? It's not clear what the core does during these 5 cycles. +-;; Luckily, we don't care that much about an insn that won't be moved. +-(define_insn_reservation "cf_v2_rts_1" 5 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "rts")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_rts") ++;; This reservation is for moves and clr. Arithmetic instructions ++;; don't write to memory unless they also read from it. ++;; But, before reload we can have all sorts of things. ++;; With cfv4_pOEP2 allow dual-issue for type 2 cases. ++(define_insn_reservation "cfv4_01" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,move_l,moveq_l,mvsz,neg_l, ++ shift")) ++ (eq_attr "op_mem" "01")) ++ "cfv4_pOEP2,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") + +-;; Call instructions reservations. ++;; ??? Does indexed store trigger dual-issue? ++(define_insn_reservation "cfv4_0i" 2 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,move_l,moveq_l,mvsz,neg_l, ++ shift")) ++ (eq_attr "op_mem" "0i")) ++ "cfv4_pOEP2,cfv4_oag,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") + +-;; ??? It's not clear what reservation is best to use for calls. +-;; For now we use mem-write + return reservations to reflect the fact of +-;; pushing and poping return address to and from the stack. ++(define_insn_reservation "cfv4_11" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,move_l,moveq_l,mvsz,neg_l, ++ shift")) ++ (eq_attr "op_mem" "11")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") + +-(define_insn_reservation "cf_v2_call_1" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "call")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_move_10,cf_v2_rts") +- +-(define_insn_reservation "cf_v2_call_2" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "call")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- "cf_v2_ib2+cf_v2_move_10,cf_v2_rts") +- +-(define_insn_reservation "cf_v2_call_3" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "call")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- "cf_v2_ib3+cf_v2_move_10,cf_v2_rts") ++;; Latency is 2 due to long OAG stage. ++(define_insn_reservation "cfv4_i1" 2 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,move_l,moveq_l,mvsz,neg_l, ++ shift")) ++ (eq_attr "op_mem" "i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") + +-;; Branch reservations. ++;; This one is the same as cfv4_i1. ++;; ??? Should it be different? ++(define_insn_reservation "cfv4_1i" 2 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitr,bitrw, ++ clr,clr_l,cmp,cmp_l,ext, ++ mov3q_l,move,move_l,moveq_l,mvsz,neg_l, ++ shift")) ++ (eq_attr "op_mem" "1i")) ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") + +-;; ??? Branch reservations are unclear to me so far. Luckily, we don't care +-;; ??? that much about branches. +-(define_reservation "cf_v2_bcc" "cf_v2_move_00") ++;; ??? Does pea indeed support case 2 of dual-issue? ++(define_insn_reservation "cfv4_11_pea" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "11,00,01,0i,10")) ++ "cfv4_pOEP2,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") ++ ++;; ??? Does pea indeed support case 2 of dual-issue? ++;; ??? Does indexed store trigger dual-issue? ++(define_insn_reservation "cfv4_i1_pea" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "pea")) ++ (eq_attr "op_mem" "i1,1i")) ++ "cfv4_pOEP2,cfv4_oag,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_da,cfv4_mem") ++ ++(define_insn_reservation "cfv4_link" 2 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "link")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_ex,cfv4_da,cfv4_mem") ++ ++(define_insn_reservation "cfv4_unlink" 2 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "unlk")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_divw_00" 20 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "div_w")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex*15") ++ ++(define_insn_reservation "cfv4_divw_10" 20 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "div_w")) ++ (eq_attr "op_mem" "10,11,1i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*15") ++ ++(define_insn_reservation "cfv4_divw_i0" 21 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "div_w")) ++ (eq_attr "op_mem" "i0,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*15") ++ ++(define_insn_reservation "cfv4_divl_00" 35 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "div_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex*30") ++ ++(define_insn_reservation "cfv4_divl_10" 35 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "div_l")) ++ (eq_attr "op_mem" "10,11,1i,i0,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex*30") ++ ++(define_insn_reservation "cfv4_emac_mul_00" 7 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "mul_w,mul_l")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_emac") ++ ++(define_insn_reservation "cfv4_emac_mul_10" 7 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "mul_w,mul_l")) ++ (eq_attr "op_mem" "10,11,1i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac") ++ ++(define_insn_reservation "cfv4_emac_mul_i0" 8 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "mul_w,mul_l")) ++ (eq_attr "op_mem" "i0,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac") ++ ++(define_insn_reservation "cfv4_falu_00" 7 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "falu,fcmp,fmul")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp") ++ ++(define_insn_reservation "cfv4_falu_10" 7 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "falu,fcmp,fmul")) ++ (eq_attr "op_mem" "10,i0,11,1i,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp") ++ ++(define_insn_reservation "cfv4_fneg_00" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fmove,fneg,ftst")) ++ (eq_attr "op_mem" "00")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp") + +-(define_insn_reservation "cf_v2_bcc_1" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bcc")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_bcc") +- +-(define_insn_reservation "cf_v2_bcc_2" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bcc")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- "cf_v2_ib2+cf_v2_bcc") +- +-(define_insn_reservation "cf_v2_bcc_3" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bcc")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- "cf_v2_ib3+cf_v2_bcc") +- +-(define_reservation "cf_v2_bra" "cf_v2_move_01") +- +-(define_insn_reservation "cf_v2_bra_1" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bra")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_bra") +- +-(define_insn_reservation "cf_v2_bra_2" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bra")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- "cf_v2_ib2+cf_v2_bra") +- +-(define_insn_reservation "cf_v2_bra_3" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "bra")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- "cf_v2_ib3+cf_v2_bra") +- +-;; Computed jump. +-;; Takes 3 cycles. +-(define_reservation "cf_v2_jmp" +- "cf_v2_dsoc,cf_v2_agex,cf_v2_dsoc,cf_v2_agex") +- +-(define_insn_reservation "cf_v2_jmp_1" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "jmp")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_jmp") +- +-(define_insn_reservation "cf_v2_jmp_2" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "jmp")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- "cf_v2_ib2+cf_v2_jmp") +- +-(define_insn_reservation "cf_v2_jmp_3" 3 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "jmp")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- "cf_v2_ib3+cf_v2_jmp") +- +-;; Misc reservations. +- +-(define_insn_reservation "cf_v2_unlk_1" 2 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "type2" "unlk")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_move_l_10") +- +-;; This automaton is used to gather statistics on insns that need reservations. +-(define_automaton "cf_v2_guess") +- +-(define_query_cpu_unit "cf_v2_guess" "cf_v2_guess") +- +-;; Dummy reservation for instructions that are not handled yet. +- +-(define_insn_reservation "cf_v2_guess_1" 1 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "guess" "yes")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 1))) +- "cf_v2_ib1+cf_v2_guess+cf_v2_dsoc+cf_v2_agex") +- +-(define_insn_reservation "cf_v2_guess_2" 1 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "guess" "yes")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 2))) +- "cf_v2_ib2+cf_v2_guess+cf_v2_dsoc+cf_v2_agex") +- +-(define_insn_reservation "cf_v2_guess_3" 1 +- (and (and (eq_attr "cpu" "cf_v2") +- (eq_attr "guess" "yes")) +- (eq (symbol_ref "get_attr_size (insn)") (const_int 3))) +- "cf_v2_ib3+cf_v2_guess+cf_v2_dsoc+cf_v2_agex") ++(define_insn_reservation "cfv4_fmove_fneg_10" 4 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fmove,fneg,ftst")) ++ (eq_attr "op_mem" "10,i0,11,1i,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp") ++ ++(define_insn_reservation "cfv4_fmove_01" 1 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fmove,fneg,ftst")) ++ (eq_attr "op_mem" "01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp,cfv4_da,cfv4_mem") ++ ++(define_insn_reservation "cfv4_fdiv_00" 23 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fdiv")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp*17") ++ ++(define_insn_reservation "cfv4_fdiv_10" 23 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fdiv")) ++ (eq_attr "op_mem" "10,i0,11,1i,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp*17") ++ ++(define_insn_reservation "cfv4_fsqrt_00" 56 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fsqrt")) ++ (eq_attr "op_mem" "00,01,0i")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp*50") ++ ++(define_insn_reservation "cfv4_fsqrt_10" 56 ++ (and (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fsqrt")) ++ (eq_attr "op_mem" "10,i0,11,1i,i1")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_fp*50") ++ ++(define_insn_reservation "cfv4_bcc" 0 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "bcc")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_fbcc" 2 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "fbcc")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex,cfv4_fp") ++ ++;; ??? Why is bra said to write to memory: 1(0/1) ? ++(define_insn_reservation "cfv4_bra_bsr" 1 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "bra,bsr")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_jmp_jsr" 5 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "jmp,jsr")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_rts" 2 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "rts")) ++ "cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex") ++ ++(define_insn_reservation "cfv4_nop" 1 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "nop")) ++ "cfv4_ds+cfv4_oag+cfv4_oc1+cfv4_mem+cfv4_oc2+cfv4_ex") ++ ++(define_insn_reservation "cfv4_guess" 10 ++ (and (eq_attr "cpu" "cfv4") ++ (eq_attr "type" "trap,unknown")) ++ "cfv4_guess+cfv4_ds,cfv4_oag,cfv4_oc1+cfv4_mem,cfv4_oc2,cfv4_ex,cfv4_emac+cfv4_fp") ++ ++(define_insn_reservation "ignore" 0 ++ (eq_attr "type" "ignore") ++ "nothing") +--- a/gcc/config/m68k/constraints.md ++++ b/gcc/config/m68k/constraints.md +@@ -124,6 +124,11 @@ + (and (match_code "const_int") + (match_test "ival < -0x8000 || ival > 0x7FFF"))) + ++(define_constraint "Cu" ++ "16-bit offset for wrapped symbols" ++ (and (match_code "const") ++ (match_test "m68k_unwrap_symbol (op, false) != op"))) ++ + (define_constraint "CQ" + "Integers valid for mvq." + (and (match_code "const_int") +--- a/gcc/config/m68k/lb1sf68.asm ++++ b/gcc/config/m68k/lb1sf68.asm +@@ -129,10 +129,48 @@ Boston, MA 02110-1301, USA. */ + + #else /* __PIC__ */ + +- /* Common for -mid-shared-libary and -msep-data */ ++# if defined (__uClinux__) ++ ++ /* Versions for uClinux */ ++ ++# if defined(__ID_SHARED_LIBRARY__) ++ ++ /* -mid-shared-library versions */ ++ ++ .macro PICLEA sym, reg ++ movel a5@(_current_shared_library_a5_offset_), \reg ++ movel \sym@GOT(\reg), \reg ++ .endm ++ ++ .macro PICPEA sym, areg ++ movel a5@(_current_shared_library_a5_offset_), \areg ++ movel \sym@GOT(\areg), sp@- ++ .endm + + .macro PICCALL addr +-#if defined (__mcoldfire__) && !defined (__mcfisab__) ++ PICLEA \addr,a0 ++ jsr a0@ ++ .endm ++ ++ .macro PICJUMP addr ++ PICLEA \addr,a0 ++ jmp a0@ ++ .endm ++ ++# else /* !__ID_SHARED_LIBRARY__ */ ++ ++ /* Versions for -msep-data */ ++ ++ .macro PICLEA sym, reg ++ movel \sym@GOT(a5), \reg ++ .endm ++ ++ .macro PICPEA sym, areg ++ movel \sym@GOT(a5), sp@- ++ .endm ++ ++ .macro PICCALL addr ++#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__) + lea \addr-.-8,a0 + jsr pc@(a0) + #else +@@ -141,6 +179,9 @@ Boston, MA 02110-1301, USA. */ + .endm + + .macro PICJUMP addr ++ /* ISA C has no bra.l instruction, and since this assembly file ++ gets assembled into multiple object files, we avoid the ++ bra instruction entirely. */ + #if defined (__mcoldfire__) && !defined (__mcfisab__) + lea \addr-.-8,a0 + jmp pc@(a0) +@@ -149,33 +190,46 @@ Boston, MA 02110-1301, USA. */ + #endif + .endm + +-# if defined(__ID_SHARED_LIBRARY__) ++# endif + +- /* -mid-shared-library versions */ ++# else /* !__uClinux__ */ ++ ++ /* Versions for Linux */ + + .macro PICLEA sym, reg +- movel a5@(_current_shared_library_a5_offset_), \reg ++ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg ++ lea (-6, pc, \reg), \reg + movel \sym@GOT(\reg), \reg + .endm + + .macro PICPEA sym, areg +- movel a5@(_current_shared_library_a5_offset_), \areg ++ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg ++ lea (-6, pc, \areg), \areg + movel \sym@GOT(\areg), sp@- + .endm + +-# else /* !__ID_SHARED_LIBRARY__ */ +- +- /* Versions for -msep-data */ +- +- .macro PICLEA sym, reg +- movel \sym@GOT(a5), \reg ++ .macro PICCALL addr ++#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__) ++ lea \addr-.-8,a0 ++ jsr pc@(a0) ++#else ++ bsr \addr ++#endif + .endm + +- .macro PICPEA sym, areg +- movel \sym@GOT(a5), sp@- ++ .macro PICJUMP addr ++ /* ISA C has no bra.l instruction, and since this assembly file ++ gets assembled into multiple object files, we avoid the ++ bra instruction entirely. */ ++#if defined (__mcoldfire__) && !defined (__mcfisab__) ++ lea \addr-.-8,a0 ++ jmp pc@(a0) ++#else ++ bra \addr ++#endif + .endm + +-# endif /* !__ID_SHARED_LIBRARY__ */ ++# endif + #endif /* __PIC__ */ + + +@@ -622,6 +676,7 @@ ROUND_TO_MINUS = 3 | round result tow + .globl SYM (__negdf2) + .globl SYM (__cmpdf2) + .globl SYM (__cmpdf2_internal) ++ .hidden SYM (__cmpdf2_internal) + + .text + .even +@@ -2384,7 +2439,7 @@ SYM (__cmpdf2): + movl a6@(16),sp@- + movl a6@(12),sp@- + movl a6@(8),sp@- +- bsr SYM (__cmpdf2_internal) ++ PICCALL SYM (__cmpdf2_internal) + unlk a6 + rts + +@@ -2536,6 +2591,7 @@ ROUND_TO_MINUS = 3 | round result tow + .globl SYM (__negsf2) + .globl SYM (__cmpsf2) + .globl SYM (__cmpsf2_internal) ++ .hidden SYM (__cmpsf2_internal) + + | These are common routines to return and signal exceptions. + +@@ -3790,7 +3846,7 @@ SYM (__cmpsf2): + pea 1 + movl a6@(12),sp@- + movl a6@(8),sp@- +- bsr (__cmpsf2_internal) ++ PICCALL SYM (__cmpsf2_internal) + unlk a6 + rts + +@@ -4063,3 +4119,8 @@ SYM (__lesf2): + unlk a6 + rts + #endif /* L_lesf2 */ ++ ++#if defined (__ELF__) && defined (__linux__) ++ /* Make stack non-executable for ELF linux targets. */ ++ .section .note.GNU-stack,"",@progbits ++#endif +--- a/gcc/config/m68k/m68k-devices.def ++++ b/gcc/config/m68k/m68k-devices.def +@@ -63,13 +63,17 @@ + + There is a bit of duplication between devices in the same family, + but this approach makes scripting easier. We keep each entry on +- a single line for the same reason. */ ++ a single line for the same reason. ++ ++ As the compiler does not (currently) generate MAC or EMAC commands, ++ we do not need separate multilibs for cores that only differ in ++ their MAC functionality. */ + + /* 680x0 series processors. */ + M68K_DEVICE ("68000", m68000, "68000", "68000", 68000, isa_00, 0) + M68K_DEVICE ("68010", m68010, "68010", "68000", 68010, isa_10, 0) +-M68K_DEVICE ("68020", m68020, "68020", "68020", 68020, isa_20, FL_MMU) +-M68K_DEVICE ("68030", m68030, "68030", "68020", 68030, isa_20, FL_MMU) ++M68K_DEVICE ("68020", m68020, "68020", "68020", 68020, isa_20, FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("68030", m68030, "68030", "68020", 68030, isa_20, FL_MMU | FL_UCLINUX) + M68K_DEVICE ("68040", m68040, "68040", "68040", 68040, isa_40, FL_MMU) + M68K_DEVICE ("68060", m68060, "68060", "68060", 68060, isa_40, FL_MMU) + M68K_DEVICE ("68302", m68302, "68302", "68000", 68000, isa_00, FL_MMU) +@@ -77,7 +81,13 @@ M68K_DEVICE ("68332", m68332, "68332", + M68K_DEVICE ("cpu32", cpu32, "cpu32", "cpu32", cpu32, isa_cpu32, FL_MMU) + + /* ColdFire CFV1 processor. */ +-M68K_DEVICE ("51qe", mcf51qe, "51qe", "51qe", cfv1, isa_c, FL_CF_USP) ++/* For historical reasons, the 51 multilib is named 51qe. */ ++M68K_DEVICE ("51", mcf51, "51", "51qe", cfv1, isa_c, FL_CF_USP) ++M68K_DEVICE ("51ac", mcf51ac, "51", "51qe", cfv1, isa_c, FL_CF_USP) ++M68K_DEVICE ("51cn", mcf51cn, "51", "51qe", cfv1, isa_c, FL_CF_USP) ++M68K_DEVICE ("51em", mcf51em, "51", "51qe", cfv1, isa_c, FL_CF_USP | FL_CF_MAC) ++M68K_DEVICE ("51jm", mcf51jm, "51", "51qe", cfv1, isa_c, FL_CF_USP) ++M68K_DEVICE ("51qe", mcf51qe, "51", "51qe", cfv1, isa_c, FL_CF_USP) + + /* ColdFire CFV2 processors. */ + M68K_DEVICE ("5202", mcf5202, "5206", "5206", cfv2, isa_a, 0) +@@ -86,31 +96,39 @@ M68K_DEVICE ("5206", mcf5206, "5206", + M68K_DEVICE ("5206e", mcf5206e, "5206e", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_MAC) + M68K_DEVICE ("5207", mcf5207, "5208", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5208", mcf5208, "5208", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("5210a", mcf5210a, "5211a", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("5211a", mcf5211a, "5211a", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("5211", mcf5211, "5213", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("5212", mcf5212, "5213", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("5213", mcf5213, "5213", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5210a", mcf5210a, "5211a", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5211a", mcf5211a, "5211a", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5211", mcf5211, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5212", mcf5212, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5213", mcf5213, "5213", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) + M68K_DEVICE ("5214", mcf5214, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5216", mcf5216, "5216", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("52221", mcf52221, "52223", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("52223", mcf52223, "52223", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("52221", mcf52221, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("52223", mcf52223, "52223", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) + M68K_DEVICE ("52230", mcf52230, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("52231", mcf52231, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("52232", mcf52232, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("52233", mcf52233, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("52234", mcf52234, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("52235", mcf52235, "52235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("5224", mcf5224, "5225", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) +-M68K_DEVICE ("5225", mcf5225, "5225", "5213", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5224", mcf5224, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("5225", mcf5225, "5225", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_MAC) ++M68K_DEVICE ("52252", mcf52252, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52254", mcf52254, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52255", mcf52255, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52256", mcf52256, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52258", mcf52258, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52259", mcf52259, "52259", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52274", mcf52274, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("52277", mcf52277, "52277", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5232", mcf5232, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5233", mcf5233, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5234", mcf5234, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5235", mcf5235, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("523x", mcf523x, "5235", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("5249", mcf5249, "5249", "5249", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("5250", mcf5250, "5250", "5249", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) +-M68K_DEVICE ("5253", mcf5253, "5253", "5249", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("5249", mcf5249, "5249", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("5250", mcf5250, "5250", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("5253", mcf5253, "5253", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5270", mcf5270, "5271", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5271", mcf5271, "5271", "5208", cfv2, isa_aplus, FL_CF_HWDIV) + M68K_DEVICE ("5272", mcf5272, "5272", "5206e", cfv2, isa_a, FL_CF_HWDIV | FL_CF_MAC) +@@ -122,6 +140,13 @@ M68K_DEVICE ("5282", mcf5282, "5282", + M68K_DEVICE ("528x", mcf528x, "5282", "5208", cfv2, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + + /* CFV3 processors. */ ++M68K_DEVICE ("53011", mcf53011, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53012", mcf53012, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53013", mcf53013, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53014", mcf53014, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53015", mcf53015, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53016", mcf53016, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) ++M68K_DEVICE ("53017", mcf53017, "53017", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5307", mcf5307, "5307", "5307", cfv3, isa_a, FL_CF_HWDIV | FL_CF_MAC) + M68K_DEVICE ("5327", mcf5327, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) + M68K_DEVICE ("5328", mcf5328, "5329", "5329", cfv3, isa_aplus, FL_CF_HWDIV | FL_CF_EMAC) +@@ -133,12 +158,12 @@ M68K_DEVICE ("537x", mcf537x, "5373", + + /* CFV4/CFV4e processors. */ + M68K_DEVICE ("5407", mcf5407, "5407", "5407", cfv4, isa_b, FL_CF_MAC) +-M68K_DEVICE ("54450", mcf54450, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) +-M68K_DEVICE ("54451", mcf54451, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) +-M68K_DEVICE ("54452", mcf54452, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) +-M68K_DEVICE ("54453", mcf54453, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) +-M68K_DEVICE ("54454", mcf54454, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) +-M68K_DEVICE ("54455", mcf54455, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU) ++M68K_DEVICE ("54450", mcf54450, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("54451", mcf54451, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("54452", mcf54452, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("54453", mcf54453, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("54454", mcf54454, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) ++M68K_DEVICE ("54455", mcf54455, "54455", "54455", cfv4, isa_c, FL_CF_HWDIV | FL_CF_USP | FL_CF_EMAC | FL_MMU | FL_UCLINUX) + M68K_DEVICE ("5470", mcf5470, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU) + M68K_DEVICE ("5471", mcf5471, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU) + M68K_DEVICE ("5472", mcf5472, "5475", "5475", cfv4e, isa_b, FL_CF_USP | FL_CF_EMAC | FL_CF_FPU | FL_MMU) +--- a/gcc/config/m68k/m68k-protos.h ++++ b/gcc/config/m68k/m68k-protos.h +@@ -50,14 +50,19 @@ extern bool strict_low_part_peephole_ok + extern int standard_68881_constant_p (rtx); + extern void print_operand_address (FILE *, rtx); + extern void print_operand (FILE *, rtx, int); ++extern bool m68k_output_addr_const_extra (FILE *, rtx); + extern void notice_update_cc (rtx, rtx); + extern bool m68k_legitimate_base_reg_p (rtx, bool); +-extern bool m68k_legitimate_index_reg_p (rtx, bool); ++extern bool m68k_legitimate_index_reg_p (enum machine_mode, rtx, bool); + extern bool m68k_illegitimate_symbolic_constant_p (rtx); + extern bool m68k_legitimate_address_p (enum machine_mode, rtx, bool); + extern bool m68k_matches_q_p (rtx); + extern bool m68k_matches_u_p (rtx); + extern rtx legitimize_pic_address (rtx, enum machine_mode, rtx); ++extern rtx m68k_legitimize_tls_address (rtx); ++extern bool m68k_tls_referenced_p (rtx); ++extern bool m68k_tls_mentioned_p (rtx); ++extern rtx m68k_legitimize_address (rtx, rtx, enum machine_mode); + extern int valid_dbcc_comparison_p_2 (rtx, enum machine_mode); + extern rtx m68k_libcall_value (enum machine_mode); + extern rtx m68k_function_value (const_tree, const_tree); +@@ -65,15 +70,19 @@ extern int emit_move_sequence (rtx *, en + extern bool m68k_movem_pattern_p (rtx, rtx, HOST_WIDE_INT, bool); + extern const char *m68k_output_movem (rtx *, rtx, HOST_WIDE_INT, bool); + ++/* Functions from m68k.c used in constraints.md. */ ++extern rtx m68k_unwrap_symbol (rtx, bool); ++ ++/* Functions from m68k.c used in genattrtab. */ + #ifdef HAVE_ATTR_cpu + extern enum attr_cpu m68k_sched_cpu; ++extern enum attr_mac m68k_sched_mac; + + extern enum attr_opx_type m68k_sched_attr_opx_type (rtx, int); + extern enum attr_opy_type m68k_sched_attr_opy_type (rtx, int); +-extern int m68k_sched_attr_size (rtx); ++extern enum attr_size m68k_sched_attr_size (rtx); + extern enum attr_op_mem m68k_sched_attr_op_mem (rtx); + extern enum attr_type m68k_sched_branch_type (rtx); +-extern enum attr_type2 m68k_sched_attr_type2 (rtx); + #endif /* HAVE_ATTR_cpu */ + + #endif /* RTX_CODE */ +--- a/gcc/config/m68k/m68k.c ++++ b/gcc/config/m68k/m68k.c +@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3. + /* ??? Need to add a dependency between m68k.o and sched-int.h. */ + #include "sched-int.h" + #include "insn-codes.h" ++#include "ggc.h" + + enum reg_class regno_reg_class[] = + { +@@ -122,12 +123,14 @@ struct m68k_address { + }; + + static int m68k_sched_adjust_cost (rtx, rtx, rtx, int); ++static int m68k_sched_issue_rate (void); + static int m68k_sched_variable_issue (FILE *, int, rtx, int); + static void m68k_sched_md_init_global (FILE *, int, int); + static void m68k_sched_md_finish_global (FILE *, int); + static void m68k_sched_md_init (FILE *, int, int); + static void m68k_sched_dfa_pre_advance_cycle (void); + static void m68k_sched_dfa_post_advance_cycle (void); ++static int m68k_sched_first_cycle_multipass_dfa_lookahead (void); + + static bool m68k_handle_option (size_t, const char *, int); + static rtx find_addr_reg (rtx); +@@ -146,8 +149,9 @@ static bool m68k_save_reg (unsigned int + static bool m68k_ok_for_sibcall_p (tree, tree); + static bool m68k_rtx_costs (rtx, int, int, int *); + #if M68K_HONOR_TARGET_STRICT_ALIGNMENT +-static bool m68k_return_in_memory (tree, tree); ++static bool m68k_return_in_memory (const_tree, const_tree); + #endif ++static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; + + + /* Specify the identification number of the library being built */ +@@ -199,6 +203,9 @@ int m68k_last_compare_had_fp_operands; + #undef TARGET_SCHED_ADJUST_COST + #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost + ++#undef TARGET_SCHED_ISSUE_RATE ++#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate ++ + #undef TARGET_SCHED_VARIABLE_ISSUE + #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue + +@@ -217,6 +224,10 @@ int m68k_last_compare_had_fp_operands; + #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE + #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle + ++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ++ m68k_sched_first_cycle_multipass_dfa_lookahead ++ + #undef TARGET_HANDLE_OPTION + #define TARGET_HANDLE_OPTION m68k_handle_option + +@@ -243,6 +254,14 @@ int m68k_last_compare_had_fp_operands; + #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory + #endif + ++#ifdef HAVE_AS_TLS ++#undef TARGET_HAVE_TLS ++#define TARGET_HAVE_TLS (true) ++ ++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL ++#define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel ++#endif ++ + static const struct attribute_spec m68k_attribute_table[] = + { + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ +@@ -382,6 +401,9 @@ enum fpu_type m68k_fpu; + /* The set of FL_* flags that apply to the target processor. */ + unsigned int m68k_cpu_flags; + ++/* The set of FL_* flags that apply to the processor to be tuned for. */ ++unsigned int m68k_tune_flags; ++ + /* Asm templates for calling or jumping to an arbitrary symbolic address, + or NULL if such calls or jumps are not supported. The address is held + in operand 0. */ +@@ -562,13 +584,23 @@ override_options (void) + /* Set the directly-usable versions of the -mcpu and -mtune settings. */ + m68k_cpu = entry->device; + if (m68k_tune_entry) +- m68k_tune = m68k_tune_entry->microarch; ++ { ++ m68k_tune = m68k_tune_entry->microarch; ++ m68k_tune_flags = m68k_tune_entry->flags; ++ } + #ifdef M68K_DEFAULT_TUNE + else if (!m68k_cpu_entry && !m68k_arch_entry) +- m68k_tune = M68K_DEFAULT_TUNE; ++ { ++ enum target_device dev; ++ dev = all_microarchs[M68K_DEFAULT_TUNE].device; ++ m68k_tune_flags = all_devices[dev]->flags; ++ } + #endif + else +- m68k_tune = entry->microarch; ++ { ++ m68k_tune = entry->microarch; ++ m68k_tune_flags = entry->flags; ++ } + + /* Set the type of FPU. */ + m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE +@@ -666,8 +698,14 @@ override_options (void) + SUBTARGET_OVERRIDE_OPTIONS; + + /* Setup scheduling options. */ +- if (TUNE_CFV2) +- m68k_sched_cpu = CPU_CF_V2; ++ if (TUNE_CFV1) ++ m68k_sched_cpu = CPU_CFV1; ++ else if (TUNE_CFV2) ++ m68k_sched_cpu = CPU_CFV2; ++ else if (TUNE_CFV3) ++ m68k_sched_cpu = CPU_CFV3; ++ else if (TUNE_CFV4) ++ m68k_sched_cpu = CPU_CFV4; + else + { + m68k_sched_cpu = CPU_UNKNOWN; +@@ -675,6 +713,16 @@ override_options (void) + flag_schedule_insns_after_reload = 0; + flag_modulo_sched = 0; + } ++ ++ if (m68k_sched_cpu != CPU_UNKNOWN) ++ { ++ if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0) ++ m68k_sched_mac = MAC_CF_EMAC; ++ else if ((m68k_cpu_flags & FL_CF_MAC) != 0) ++ m68k_sched_mac = MAC_CF_MAC; ++ else ++ m68k_sched_mac = MAC_NO; ++ } + } + + /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the +@@ -1023,6 +1071,11 @@ m68k_expand_prologue (void) + stack_pointer_rtx, + GEN_INT (-fsize_with_regs)))); + } ++ ++ /* If the frame pointer is needed, emit a special barrier that ++ will prevent the scheduler from moving stores to the frame ++ before the stack adjustment. */ ++ emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx)); + } + else if (fsize_with_regs != 0) + m68k_set_frame_related +@@ -1103,8 +1156,7 @@ m68k_expand_prologue (void) + current_frame.reg_mask, true, true)); + } + +- if (flag_pic +- && !TARGET_SEP_DATA ++ if (!TARGET_SEP_DATA + && current_function_uses_pic_offset_table) + insn = emit_insn (gen_load_got (pic_offset_table_rtx)); + } +@@ -1666,15 +1718,16 @@ m68k_legitimate_base_reg_p (rtx x, bool + whether we need strict checking. */ + + bool +-m68k_legitimate_index_reg_p (rtx x, bool strict_p) ++m68k_legitimate_index_reg_p (enum machine_mode mode, rtx x, bool strict_p) + { + if (!strict_p && GET_CODE (x) == SUBREG) + x = SUBREG_REG (x); + + return (REG_P (x) + && (strict_p +- ? REGNO_OK_FOR_INDEX_P (REGNO (x)) +- : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))); ++ ? REGNO_MODE_OK_FOR_INDEX_P (REGNO (x), mode) ++ : (MODE_OK_FOR_INDEX_P (mode) ++ && REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))))); + } + + /* Return true if X is a legitimate index expression for a (d8,An,Xn) or +@@ -1682,7 +1735,8 @@ m68k_legitimate_index_reg_p (rtx x, bool + ADDRESS if so. STRICT_P says whether we need strict checking. */ + + static bool +-m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address) ++m68k_decompose_index (enum machine_mode mode, rtx x, bool strict_p, ++ struct m68k_address *address) + { + int scale; + +@@ -1706,7 +1760,7 @@ m68k_decompose_index (rtx x, bool strict + && GET_MODE (XEXP (x, 0)) == HImode) + x = XEXP (x, 0); + +- if (m68k_legitimate_index_reg_p (x, strict_p)) ++ if (m68k_legitimate_index_reg_p (mode, x, strict_p)) + { + address->scale = scale; + address->index = x; +@@ -1730,7 +1784,7 @@ m68k_illegitimate_symbolic_constant_p (r + && !offset_within_block_p (base, INTVAL (offset))) + return true; + } +- return false; ++ return m68k_tls_referenced_p (x); + } + + /* Return true if X is a legitimate constant address that can reach +@@ -1758,7 +1812,7 @@ m68k_legitimate_constant_address_p (rtx + return false; + } + +- return true; ++ return !m68k_tls_referenced_p (x); + } + + /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced +@@ -1778,6 +1832,40 @@ m68k_jump_table_ref_p (rtx x) + return x && JUMP_TABLE_DATA_P (x); + } + ++/* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p, ++ UNSPEC_RELOC32 wrappers. */ ++ ++rtx ++m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p) ++{ ++ if (GET_CODE (orig) == CONST) ++ { ++ rtx x; ++ ++ x = XEXP (orig, 0); ++ ++ if (GET_CODE (x) == UNSPEC) ++ { ++ switch (XINT (x, 1)) ++ { ++ case UNSPEC_RELOC16: ++ orig = XVECEXP (x, 0, 0); ++ break; ++ ++ case UNSPEC_RELOC32: ++ if (unwrap_reloc32_p) ++ orig = XVECEXP (x, 0, 0); ++ break; ++ ++ default: ++ break; ++ } ++ } ++ } ++ ++ return orig; ++} ++ + /* Return true if X is a legitimate address for values of mode MODE. + STRICT_P says whether strict checking is needed. If the address + is valid, describe its components in *ADDRESS. */ +@@ -1825,15 +1913,23 @@ m68k_decompose_address (enum machine_mod + /* Check for GOT loads. These are (bd,An,Xn) addresses if + TARGET_68020 && flag_pic == 2, otherwise they are (d16,An) + addresses. */ +- if (flag_pic ++ if (pic_offset_table_rtx != NULL_RTX + && GET_CODE (x) == PLUS +- && XEXP (x, 0) == pic_offset_table_rtx +- && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF +- || GET_CODE (XEXP (x, 1)) == LABEL_REF)) ++ && XEXP (x, 0) == pic_offset_table_rtx) + { +- address->base = XEXP (x, 0); +- address->offset = XEXP (x, 1); +- return true; ++ rtx sym; ++ ++ /* As we are processing a PLUS, do not unwrap RELOC32 ++ symbols here; they are invalid in this context. */ ++ sym = m68k_unwrap_symbol (XEXP (x, 1), false); ++ ++ if (GET_CODE (sym) == SYMBOL_REF ++ || GET_CODE (sym) == LABEL_REF) ++ { ++ address->base = XEXP (x, 0); ++ address->offset = XEXP (x, 1); ++ return true; ++ } + } + + /* The ColdFire FPU only accepts addressing modes 2-5. */ +@@ -1858,7 +1954,7 @@ m68k_decompose_address (enum machine_mod + accesses to unplaced labels in other cases. */ + if (GET_CODE (x) == PLUS + && m68k_jump_table_ref_p (XEXP (x, 1)) +- && m68k_decompose_index (XEXP (x, 0), strict_p, address)) ++ && m68k_decompose_index (mode, XEXP (x, 0), strict_p, address)) + { + address->offset = XEXP (x, 1); + return true; +@@ -1890,7 +1986,7 @@ m68k_decompose_address (enum machine_mod + worse code. */ + if (address->offset + && symbolic_operand (address->offset, VOIDmode) +- && m68k_decompose_index (x, strict_p, address)) ++ && m68k_decompose_index (mode, x, strict_p, address)) + return true; + } + else +@@ -1909,14 +2005,14 @@ m68k_decompose_address (enum machine_mod + if (GET_CODE (x) == PLUS) + { + if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p) +- && m68k_decompose_index (XEXP (x, 1), strict_p, address)) ++ && m68k_decompose_index (mode, XEXP (x, 1), strict_p, address)) + { + address->base = XEXP (x, 0); + return true; + } + + if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p) +- && m68k_decompose_index (XEXP (x, 0), strict_p, address)) ++ && m68k_decompose_index (mode, XEXP (x, 0), strict_p, address)) + { + address->base = XEXP (x, 1); + return true; +@@ -1978,6 +2074,115 @@ m68k_matches_u_p (rtx x) + && !address.index); + } + ++/* Return GOT pointer. */ ++ ++static rtx ++m68k_get_gp (void) ++{ ++ if (pic_offset_table_rtx == NULL_RTX) ++ pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG); ++ ++ current_function_uses_pic_offset_table = 1; ++ ++ return pic_offset_table_rtx; ++} ++ ++/* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC ++ wrappers. */ ++enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO, ++ RELOC_TLSIE, RELOC_TLSLE }; ++ ++#define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT) ++ ++/* Wrap symbol X into unspec representing relocation RELOC. ++ If USE_X_P, use 32-bit relocations, otherwise use 16-bit relocs. ++ BASE_REG - register that should be added to the result. ++ TEMP_REG - if non-null, temporary register. */ ++ ++static rtx ++m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg) ++{ ++ bool use_x_p; ++ ++ use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS; ++ ++ if (TARGET_COLDFIRE && use_x_p) ++ /* When compiling with -mx{got, tls} switch the code will look like this: ++ ++ move.l @, ++ add.l , */ ++ { ++ /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra ++ to put @RELOC after reference. */ ++ x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), ++ UNSPEC_RELOC32); ++ x = gen_rtx_CONST (Pmode, x); ++ ++ if (temp_reg == NULL) ++ { ++ gcc_assert (can_create_pseudo_p ()); ++ temp_reg = gen_reg_rtx (Pmode); ++ } ++ ++ emit_move_insn (temp_reg, x); ++ emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg)); ++ x = temp_reg; ++ } ++ else ++ { ++ /* ??? It would be simplier to wrap 16-bit GOT relocs into UNSPEC too, ++ historically, we don't do this, but I'm not aware of any downside ++ of such a change. */ ++ if (reloc != RELOC_GOT) ++ /* Wrap X into (const (unspec (X))). */ ++ { ++ x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)), ++ UNSPEC_RELOC16); ++ x = gen_rtx_CONST (Pmode, x); ++ } ++ ++ x = gen_rtx_PLUS (Pmode, base_reg, x); ++ } ++ ++ return x; ++} ++ ++/* Move X to a register and add REG_EQUAL note pointing to ORIG. ++ If REG is non-null, use it; generate new pseudo otherwise. */ ++ ++static rtx ++m68k_move_to_reg (rtx x, rtx orig, rtx reg) ++{ ++ rtx insn; ++ ++ if (reg == NULL_RTX) ++ { ++ gcc_assert (can_create_pseudo_p ()); ++ reg = gen_reg_rtx (Pmode); ++ } ++ ++ insn = emit_move_insn (reg, x); ++ /* Put a REG_EQUAL note on this insn, so that it can be optimized ++ by loop. */ ++ set_unique_reg_note (insn, REG_EQUAL, orig); ++ ++ return reg; ++} ++ ++/* Does the same as m68k_wrap_symbol, but returns a memory reference to ++ GOT slot. */ ++ ++static rtx ++m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg) ++{ ++ x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg); ++ ++ x = gen_rtx_MEM (Pmode, x); ++ MEM_READONLY_P (x) = 1; ++ ++ return x; ++} ++ + /* Legitimize PIC addresses. If the address is already + position-independent, we return ORIG. Newly generated + position-independent addresses go to REG. If we need more +@@ -2029,13 +2234,8 @@ legitimize_pic_address (rtx orig, enum m + { + gcc_assert (reg); + +- pic_ref = gen_rtx_MEM (Pmode, +- gen_rtx_PLUS (Pmode, +- pic_offset_table_rtx, orig)); +- current_function_uses_pic_offset_table = 1; +- MEM_READONLY_P (pic_ref) = 1; +- emit_move_insn (reg, pic_ref); +- return reg; ++ pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg); ++ pic_ref = m68k_move_to_reg (pic_ref, orig, reg); + } + else if (GET_CODE (orig) == CONST) + { +@@ -2046,6 +2246,10 @@ legitimize_pic_address (rtx orig, enum m + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) + return orig; + ++ /* Handle the case where we have: const (UNSPEC_RELOC??). */ ++ if (m68k_unwrap_symbol (orig, true) != orig) ++ return orig; ++ + gcc_assert (reg); + + /* legitimize both operands of the PLUS */ +@@ -2056,13 +2260,372 @@ legitimize_pic_address (rtx orig, enum m + base == reg ? 0 : reg); + + if (GET_CODE (orig) == CONST_INT) +- return plus_constant (base, INTVAL (orig)); +- pic_ref = gen_rtx_PLUS (Pmode, base, orig); +- /* Likewise, should we set special REG_NOTEs here? */ ++ pic_ref = plus_constant (base, INTVAL (orig)); ++ else ++ pic_ref = gen_rtx_PLUS (Pmode, base, orig); + } ++ + return pic_ref; + } + ++/* The __tls_get_addr symbol. */ ++static GTY(()) rtx m68k_tls_get_addr; ++ ++/* Return SYMBOL_REF for __tls_get_addr. */ ++ ++static rtx ++m68k_get_tls_get_addr (void) ++{ ++ if (m68k_tls_get_addr == NULL_RTX) ++ m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr"); ++ ++ return m68k_tls_get_addr; ++} ++ ++/* Return libcall result in A0 instead of usual D0. */ ++static bool m68k_libcall_value_in_a0_p = false; ++ ++/* Emit instruction sequence that calls __tls_get_addr. X is ++ the TLS symbol we are referencing and RELOC is the symbol type to use ++ (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence ++ emitted. A pseudo register with result of __tls_get_addr call is ++ returned. */ ++ ++static rtx ++m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc) ++{ ++ rtx a0; ++ rtx insns; ++ rtx dest; ++ ++ /* Emit the call sequence. */ ++ start_sequence (); ++ ++ /* FIXME: Unfortunately, emit_library_call_value does not ++ consider (plus (%a5) (const (unspec))) to be a good enough ++ operand for push, so it forces it into a register. The bad ++ thing about this is that combiner, due to copy propagation and other ++ optimizations, sometimes can not later fix this. As a consequence, ++ additional register may be allocated resulting in a spill. ++ For reference, see args processing loops in ++ calls.c:emit_library_call_value_1. ++ For testcase, see gcc.target/m68k/tls-{gd, ld}.c */ ++ x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX); ++ ++ /* __tls_get_addr() is not a libcall, but emitting a libcall_value ++ is the simpliest way of generating a call. The difference between ++ __tls_get_addr() and libcall is that the result is returned in D0 ++ instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p ++ which temporarily switches returning the result to A0. */ ++ ++ m68k_libcall_value_in_a0_p = true; ++ a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE, ++ Pmode, 1, x, Pmode); ++ m68k_libcall_value_in_a0_p = false; ++ ++ insns = get_insns (); ++ end_sequence (); ++ ++ gcc_assert (can_create_pseudo_p ()); ++ dest = gen_reg_rtx (Pmode); ++ emit_libcall_block (insns, dest, a0, eqv); ++ ++ return dest; ++} ++ ++/* The __tls_get_addr symbol. */ ++static GTY(()) rtx m68k_read_tp; ++ ++/* Return SYMBOL_REF for __m68k_read_tp. */ ++ ++static rtx ++m68k_get_m68k_read_tp (void) ++{ ++ if (m68k_read_tp == NULL_RTX) ++ m68k_read_tp = init_one_libfunc ("__m68k_read_tp"); ++ ++ return m68k_read_tp; ++} ++ ++/* Emit instruction sequence that calls __m68k_read_tp. ++ A pseudo register with result of __m68k_read_tp call is returned. */ ++ ++static rtx ++m68k_call_m68k_read_tp (void) ++{ ++ rtx a0; ++ rtx eqv; ++ rtx insns; ++ rtx dest; ++ ++ start_sequence (); ++ ++ /* __m68k_read_tp() is not a libcall, but emitting a libcall_value ++ is the simpliest way of generating a call. The difference between ++ __m68k_read_tp() and libcall is that the result is returned in D0 ++ instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p ++ which temporarily switches returning the result to A0. */ ++ ++ /* Emit the call sequence. */ ++ m68k_libcall_value_in_a0_p = true; ++ a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE, ++ Pmode, 0); ++ m68k_libcall_value_in_a0_p = false; ++ insns = get_insns (); ++ end_sequence (); ++ ++ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to ++ share the m68k_read_tp result with other IE/LE model accesses. */ ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32); ++ ++ gcc_assert (can_create_pseudo_p ()); ++ dest = gen_reg_rtx (Pmode); ++ emit_libcall_block (insns, dest, a0, eqv); ++ ++ return dest; ++} ++ ++/* Return a legitimized address for accessing TLS SYMBOL_REF X. ++ For explanations on instructions sequences see TLS/NPTL ABI for m68k and ++ ColdFire. */ ++ ++rtx ++m68k_legitimize_tls_address (rtx orig) ++{ ++ switch (SYMBOL_REF_TLS_MODEL (orig)) ++ { ++ case TLS_MODEL_GLOBAL_DYNAMIC: ++ orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD); ++ break; ++ ++ case TLS_MODEL_LOCAL_DYNAMIC: ++ { ++ rtx eqv; ++ rtx a0; ++ rtx x; ++ ++ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to ++ share the LDM result with other LD model accesses. */ ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), ++ UNSPEC_RELOC32); ++ ++ a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM); ++ ++ x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX); ++ ++ if (can_create_pseudo_p ()) ++ x = m68k_move_to_reg (x, orig, NULL_RTX); ++ ++ orig = x; ++ break; ++ } ++ ++ case TLS_MODEL_INITIAL_EXEC: ++ { ++ rtx a0; ++ rtx x; ++ ++ a0 = m68k_call_m68k_read_tp (); ++ ++ x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX); ++ x = gen_rtx_PLUS (Pmode, x, a0); ++ ++ if (can_create_pseudo_p ()) ++ x = m68k_move_to_reg (x, orig, NULL_RTX); ++ ++ orig = x; ++ break; ++ } ++ ++ case TLS_MODEL_LOCAL_EXEC: ++ { ++ rtx a0; ++ rtx x; ++ ++ a0 = m68k_call_m68k_read_tp (); ++ ++ x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX); ++ ++ if (can_create_pseudo_p ()) ++ x = m68k_move_to_reg (x, orig, NULL_RTX); ++ ++ orig = x; ++ break; ++ } ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ return orig; ++} ++ ++/* Return true if X is a TLS symbol. */ ++ ++static bool ++m68k_tls_symbol_p (rtx x) ++{ ++ if (!TARGET_HAVE_TLS) ++ return false; ++ ++ if (GET_CODE (x) != SYMBOL_REF) ++ return false; ++ ++ return SYMBOL_REF_TLS_MODEL (x) != 0; ++} ++ ++/* Helper for m68k_tls_referenced_p. */ ++ ++static int ++m68k_tls_referenced_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED) ++{ ++ if (GET_CODE (*x) == SYMBOL_REF) ++ return SYMBOL_REF_TLS_MODEL (*x) != 0; ++ ++ /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are ++ TLS offsets, not real symbol references. */ ++ if (GET_CODE (*x) == UNSPEC ++ && (XINT (*x, 1) == UNSPEC_RELOC16 || XINT (*x, 1) == UNSPEC_RELOC32) ++ && TLS_RELOC_P (INTVAL (XVECEXP (*x, 0, 1)))) ++ return -1; ++ ++ return 0; ++} ++ ++/* Return true if X contains any TLS symbol references. */ ++ ++bool ++m68k_tls_referenced_p (rtx x) ++{ ++ if (!TARGET_HAVE_TLS) ++ return false; ++ ++ return for_each_rtx (&x, m68k_tls_referenced_p_1, NULL); ++} ++ ++/* Return true if X is legitimate TLS symbol reference. */ ++ ++bool ++m68k_tls_mentioned_p (rtx x) ++{ ++ switch (GET_CODE (x)) ++ { ++ case CONST: ++ return m68k_tls_mentioned_p (XEXP (x, 0)); ++ ++ case UNSPEC: ++ if ((XINT (x, 1) == UNSPEC_RELOC16 || XINT (x, 1) == UNSPEC_RELOC32) ++ && TLS_RELOC_P (INTVAL (XVECEXP (x, 0, 1)))) ++ return 1; ++ ++ default: ++ return 0; ++ } ++} ++ ++/* Legitimize X. */ ++ ++rtx ++m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode) ++{ ++ if (m68k_tls_symbol_p (x)) ++ return m68k_legitimize_tls_address (x); ++ ++ if (GET_CODE (x) == PLUS) ++ { ++ bool ch; ++ bool copied; ++ ++ ch = (x != oldx); ++ copied = 0; ++ ++ /* For the 68000, we handle X+REG by loading X into a register R and ++ using R+REG. R will go in an address reg and indexing will be used. ++ However, if REG is a broken-out memory address or multiplication, ++ nothing needs to be done because REG can certainly go in an address ++ reg. */ ++#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = true; } ++ ++ if (GET_CODE (XEXP (x, 0)) == MULT) ++ { ++ COPY_ONCE (x); ++ XEXP (x, 0) = force_operand (XEXP (x, 0), 0); ++ } ++ ++ if (GET_CODE (XEXP (x, 1)) == MULT) ++ { ++ COPY_ONCE (x); ++ XEXP (x, 1) = force_operand (XEXP (x, 1), 0); ++ } ++ ++ if (ch ++ && GET_CODE (XEXP (x, 1)) == REG ++ && GET_CODE (XEXP (x, 0)) == REG) ++ { ++ if (TARGET_COLDFIRE_FPU ++ && GET_MODE_CLASS (mode) == MODE_FLOAT) ++ { ++ COPY_ONCE (x); ++ x = force_operand (x, 0); ++ } ++ ++ return x; ++ } ++ ++ if (ch && m68k_legitimate_address_p (mode, x, REG_STRICT_P)) ++ return x; ++ ++ if (GET_CODE (XEXP (x, 0)) == REG ++ || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND ++ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG ++ && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)) ++ { ++ rtx temp; ++ rtx val; ++ ++ temp = gen_reg_rtx (Pmode); ++ val = force_operand (XEXP (x, 1), 0); ++ ++ emit_move_insn (temp, val); ++ COPY_ONCE (x); ++ XEXP (x, 1) = temp; ++ ++ if (TARGET_COLDFIRE_FPU ++ && GET_MODE_CLASS (mode) == MODE_FLOAT ++ && GET_CODE (XEXP (x, 0)) == REG) ++ x = force_operand (x, 0); ++ ++ return x; ++ } ++ else if (GET_CODE (XEXP (x, 1)) == REG ++ || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND ++ && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG ++ && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode)) ++ { ++ rtx temp; ++ rtx val; ++ ++ temp = gen_reg_rtx (Pmode); ++ val = force_operand (XEXP (x, 0), 0); ++ ++ emit_move_insn (temp, val); ++ COPY_ONCE (x); ++ XEXP (x, 0) = temp; ++ ++ if (TARGET_COLDFIRE_FPU ++ && GET_MODE_CLASS (mode) == MODE_FLOAT ++ && GET_CODE (XEXP (x, 1)) == REG) ++ x = force_operand (x, 0); ++ ++ return x; ++ } ++ ++#undef COPY_ONCE ++ } ++ ++ return NULL_RTX; ++} ++ + + + #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255) +@@ -2175,13 +2738,18 @@ m68k_rtx_costs (rtx x, int code, int out + #define MULL_COST \ + (TUNE_68060 ? 2 \ + : TUNE_68040 ? 5 \ +- : TUNE_CFV2 ? 10 \ ++ : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ ++ : (TUNE_CFV2 && TUNE_MAC) ? 4 \ ++ : TUNE_CFV2 ? 8 \ + : TARGET_COLDFIRE ? 3 : 13) + + #define MULW_COST \ + (TUNE_68060 ? 2 \ + : TUNE_68040 ? 3 \ +- : TUNE_68000_10 || TUNE_CFV2 ? 5 \ ++ : TUNE_68000_10 ? 5 \ ++ : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ ++ : (TUNE_CFV2 && TUNE_MAC) ? 2 \ ++ : TUNE_CFV2 ? 8 \ + : TARGET_COLDFIRE ? 2 : 8) + + #define DIVW_COST \ +@@ -3531,9 +4099,7 @@ notice_update_cc (rtx exp, rtx insn) + case ROTATE: case ROTATERT: + /* These instructions always clear the overflow bit, and set + the carry to the bit shifted out. */ +- /* ??? We don't currently have a way to signal carry not valid, +- nor do we check for it in the branch insns. */ +- CC_STATUS_INIT; ++ cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY; + break; + + case PLUS: case MINUS: case MULT: +@@ -3839,7 +4405,75 @@ print_operand (FILE *file, rtx op, int l + } + } + +- ++/* Return string for TLS relocation RELOC. */ ++ ++static const char * ++m68k_get_reloc_decoration (enum m68k_reloc reloc) ++{ ++ switch (reloc) ++ { ++ case RELOC_GOT: ++ return "@GOT"; ++ ++ case RELOC_TLSGD: ++ return "@TLSGD"; ++ ++ case RELOC_TLSLDM: ++ return "@TLSLDM"; ++ ++ case RELOC_TLSLDO: ++ return "@TLSLDO"; ++ ++ case RELOC_TLSIE: ++ return "@TLSIE"; ++ ++ case RELOC_TLSLE: ++ return "@TLSLE"; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */ ++ ++bool ++m68k_output_addr_const_extra (FILE *file, rtx x) ++{ ++ if (GET_CODE (x) == UNSPEC) ++ { ++ switch (XINT (x, 1)) ++ { ++ /* ??? It would be cleaner to wrap normal GOT references into ++ UNSPEC_GOT too, then we won't have to handle them separately ++ in print_operand_address. I'm not aware of any downside of ++ such clean up. */ ++ case UNSPEC_RELOC16: ++ case UNSPEC_RELOC32: ++ output_addr_const (file, XVECEXP (x, 0, 0)); ++ fputs (m68k_get_reloc_decoration (INTVAL (XVECEXP (x, 0, 1))), file); ++ return true; ++ ++ default: ++ break; ++ } ++ } ++ ++ return false; ++} ++ ++/* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */ ++ ++static void ++m68k_output_dwarf_dtprel (FILE *file, int size, rtx x) ++{ ++ gcc_assert (size == 4); ++ fputs ("\t.long\t", file); ++ output_addr_const (file, x); ++ fputs ("@TLSLDO+0x8000", file); ++} ++ ++ + /* A C compound statement to output to stdio stream STREAM the + assembler syntax for an instruction operand that is a memory + reference whose address is ADDR. ADDR is an RTL expression. +@@ -3928,7 +4562,9 @@ print_operand_address (FILE *file, rtx a + if (address.offset) + { + output_addr_const (file, address.offset); +- if (flag_pic && address.base == pic_offset_table_rtx) ++ if (flag_pic && address.base == pic_offset_table_rtx ++ && (m68k_unwrap_symbol (address.offset, false) ++ == address.offset)) + { + fprintf (file, "@GOT"); + if (flag_pic == 1 && TARGET_68020) +@@ -4486,7 +5122,8 @@ m68k_libcall_value (enum machine_mode mo + default: + break; + } +- return gen_rtx_REG (mode, D0_REG); ++ ++ return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG); + } + + rtx +@@ -4533,7 +5170,7 @@ m68k_function_value (const_tree valtype, + /* Worker function for TARGET_RETURN_IN_MEMORY. */ + #if M68K_HONOR_TARGET_STRICT_ALIGNMENT + static bool +-m68k_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) ++m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) + { + enum machine_mode mode = TYPE_MODE (type); + +@@ -4555,14 +5192,20 @@ m68k_return_in_memory (tree type, tree f + /* CPU to schedule the program for. */ + enum attr_cpu m68k_sched_cpu; + ++/* MAC to schedule the program for. */ ++enum attr_mac m68k_sched_mac; ++ + /* Operand type. */ + enum attr_op_type + { + /* No operand. */ + OP_TYPE_NONE, + +- /* Register. */ +- OP_TYPE_REG, ++ /* Integer register. */ ++ OP_TYPE_RN, ++ ++ /* FP register. */ ++ OP_TYPE_FPN, + + /* Implicit mem reference (e.g. stack). */ + OP_TYPE_MEM1, +@@ -4589,19 +5232,19 @@ enum attr_op_type + OP_TYPE_IMM_L + }; + +-/* True if current insn doesn't have complete pipeline description. */ +-static bool sched_guess_p; +- + /* Return type of memory ADDR_RTX refers to. */ + static enum attr_op_type + sched_address_type (enum machine_mode mode, rtx addr_rtx) + { + struct m68k_address address; + ++ if (symbolic_operand (addr_rtx, VOIDmode)) ++ return OP_TYPE_MEM7; ++ + if (!m68k_decompose_address (mode, addr_rtx, + reload_completed, &address)) + { +- gcc_assert (sched_guess_p); ++ gcc_assert (!reload_completed); + /* Reload will likely fix the address to be in the register. */ + return OP_TYPE_MEM234; + } +@@ -4622,12 +5265,42 @@ sched_address_type (enum machine_mode mo + return OP_TYPE_MEM7; + } + +-/* Return type of the operand OP. +- If ADDRESS_P is true, return type of memory location OP refers to. */ ++/* Return X or Y (depending on OPX_P) operand of INSN. */ ++static rtx ++sched_get_operand (rtx insn, bool opx_p) ++{ ++ int i; ++ ++ if (recog_memoized (insn) < 0) ++ gcc_unreachable (); ++ ++ extract_constrain_insn_cached (insn); ++ ++ if (opx_p) ++ i = get_attr_opx (insn); ++ else ++ i = get_attr_opy (insn); ++ ++ if (i >= recog_data.n_operands) ++ return NULL; ++ ++ return recog_data.operand[i]; ++} ++ ++/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P). ++ If ADDRESS_P is true, return type of memory location operand refers to. */ + static enum attr_op_type +-sched_operand_type (rtx op, bool address_p) ++sched_attr_op_type (rtx insn, bool opx_p, bool address_p) + { +- gcc_assert (op != NULL_RTX); ++ rtx op; ++ ++ op = sched_get_operand (insn, opx_p); ++ ++ if (op == NULL) ++ { ++ gcc_assert (!reload_completed); ++ return OP_TYPE_RN; ++ } + + if (address_p) + return sched_address_type (QImode, op); +@@ -4636,13 +5309,49 @@ sched_operand_type (rtx op, bool address + return sched_address_type (GET_MODE (op), XEXP (op, 0)); + + if (register_operand (op, VOIDmode)) +- return OP_TYPE_REG; ++ { ++ if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op))) ++ || (reload_completed && FP_REG_P (op))) ++ return OP_TYPE_FPN; ++ ++ return OP_TYPE_RN; ++ } + + if (GET_CODE (op) == CONST_INT) + { +- /* ??? Below condition should probably check if the operation is +- signed or unsigned. */ +- if (IN_RANGE (INTVAL (op), -0x8000, 0x7fff)) ++ int ival; ++ ++ ival = INTVAL (op); ++ ++ /* Check for quick constants. */ ++ switch (get_attr_type (insn)) ++ { ++ case TYPE_ALUQ_L: ++ if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1)) ++ return OP_TYPE_IMM_Q; ++ ++ gcc_assert (!reload_completed); ++ break; ++ ++ case TYPE_MOVEQ_L: ++ if (USE_MOVQ (ival)) ++ return OP_TYPE_IMM_Q; ++ ++ gcc_assert (!reload_completed); ++ break; ++ ++ case TYPE_MOV3Q_L: ++ if (valid_mov3q_const (ival)) ++ return OP_TYPE_IMM_Q; ++ ++ gcc_assert (!reload_completed); ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (IN_RANGE (ival, -0x8000, 0x7fff)) + return OP_TYPE_IMM_W; + + return OP_TYPE_IMM_L; +@@ -4664,7 +5373,8 @@ sched_operand_type (rtx op, bool address + } + } + +- if (symbolic_operand (op, VOIDmode) ++ if (GET_CODE (op) == CONST ++ || symbolic_operand (op, VOIDmode) + || LABEL_P (op)) + { + switch (GET_MODE (op)) +@@ -4679,41 +5389,20 @@ sched_operand_type (rtx op, bool address + return OP_TYPE_IMM_L; + + default: +- if (GET_CODE (op) == SYMBOL_REF) +- /* ??? Just a guess. Probably we can guess better using length +- attribute of the instructions. */ ++ if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode)) ++ /* Just a guess. */ + return OP_TYPE_IMM_W; + + return OP_TYPE_IMM_L; + } + } + +- gcc_assert (sched_guess_p); +- +- return OP_TYPE_REG; +-} +- +-/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P). +- If ADDRESS_P is true, return type of memory location operand refers to. */ +-static enum attr_op_type +-sched_attr_op_type (rtx insn, bool opx_p, bool address_p) +-{ +- int i; +- +- extract_constrain_insn_cached (insn); +- +- if (opx_p) +- i = get_attr_opx (insn); +- else +- i = get_attr_opy (insn); ++ gcc_assert (!reload_completed); + +- if (i >= recog_data.n_operands) +- { +- gcc_assert (sched_guess_p); +- return OP_TYPE_REG; +- } ++ if (FLOAT_MODE_P (GET_MODE (op))) ++ return OP_TYPE_FPN; + +- return sched_operand_type (recog_data.operand[i], address_p); ++ return OP_TYPE_RN; + } + + /* Implement opx_type attribute. +@@ -4722,12 +5411,13 @@ sched_attr_op_type (rtx insn, bool opx_p + enum attr_opx_type + m68k_sched_attr_opx_type (rtx insn, int address_p) + { +- sched_guess_p = (get_attr_guess (insn) == GUESS_YES); +- + switch (sched_attr_op_type (insn, true, address_p != 0)) + { +- case OP_TYPE_REG: +- return OPX_TYPE_REG; ++ case OP_TYPE_RN: ++ return OPX_TYPE_RN; ++ ++ case OP_TYPE_FPN: ++ return OPX_TYPE_FPN; + + case OP_TYPE_MEM1: + return OPX_TYPE_MEM1; +@@ -4765,12 +5455,13 @@ m68k_sched_attr_opx_type (rtx insn, int + enum attr_opy_type + m68k_sched_attr_opy_type (rtx insn, int address_p) + { +- sched_guess_p = (get_attr_guess (insn) == GUESS_YES); +- + switch (sched_attr_op_type (insn, false, address_p != 0)) + { +- case OP_TYPE_REG: +- return OPY_TYPE_REG; ++ case OP_TYPE_RN: ++ return OPY_TYPE_RN; ++ ++ case OP_TYPE_FPN: ++ return OPY_TYPE_FPN; + + case OP_TYPE_MEM1: + return OPY_TYPE_MEM1; +@@ -4802,17 +5493,21 @@ m68k_sched_attr_opy_type (rtx insn, int + } + } + +-/* Return the size of INSN. */ +-int +-m68k_sched_attr_size (rtx insn) ++/* Return size of INSN as int. */ ++static int ++sched_get_attr_size_int (rtx insn) + { + int size; + +- sched_guess_p = (get_attr_guess (insn) == GUESS_YES); +- +- switch (get_attr_type1 (insn)) ++ switch (get_attr_type (insn)) + { +- case TYPE1_MUL_L: ++ case TYPE_IGNORE: ++ /* There should be no references to m68k_sched_attr_size for 'ignore' ++ instructions. */ ++ gcc_unreachable (); ++ return 0; ++ ++ case TYPE_MUL_L: + size = 2; + break; + +@@ -4824,7 +5519,8 @@ m68k_sched_attr_size (rtx insn) + switch (get_attr_opx_type (insn)) + { + case OPX_TYPE_NONE: +- case OPX_TYPE_REG: ++ case OPX_TYPE_RN: ++ case OPX_TYPE_FPN: + case OPX_TYPE_MEM1: + case OPX_TYPE_MEM234: + case OPY_TYPE_IMM_Q: +@@ -4849,7 +5545,8 @@ m68k_sched_attr_size (rtx insn) + switch (get_attr_opy_type (insn)) + { + case OPY_TYPE_NONE: +- case OPY_TYPE_REG: ++ case OPY_TYPE_RN: ++ case OPY_TYPE_FPN: + case OPY_TYPE_MEM1: + case OPY_TYPE_MEM234: + case OPY_TYPE_IMM_Q: +@@ -4873,7 +5570,7 @@ m68k_sched_attr_size (rtx insn) + + if (size > 3) + { +- gcc_assert (sched_guess_p); ++ gcc_assert (!reload_completed); + + size = 3; + } +@@ -4881,22 +5578,100 @@ m68k_sched_attr_size (rtx insn) + return size; + } + ++/* Return size of INSN as attribute enum value. */ ++enum attr_size ++m68k_sched_attr_size (rtx insn) ++{ ++ switch (sched_get_attr_size_int (insn)) ++ { ++ case 1: ++ return SIZE_1; ++ ++ case 2: ++ return SIZE_2; ++ ++ case 3: ++ return SIZE_3; ++ ++ default: ++ gcc_unreachable (); ++ return 0; ++ } ++} ++ ++/* Return operand X or Y (depending on OPX_P) of INSN, ++ if it is a MEM, or NULL overwise. */ ++static enum attr_op_type ++sched_get_opxy_mem_type (rtx insn, bool opx_p) ++{ ++ if (opx_p) ++ { ++ switch (get_attr_opx_type (insn)) ++ { ++ case OPX_TYPE_NONE: ++ case OPX_TYPE_RN: ++ case OPX_TYPE_FPN: ++ case OPX_TYPE_IMM_Q: ++ case OPX_TYPE_IMM_W: ++ case OPX_TYPE_IMM_L: ++ return OP_TYPE_RN; ++ ++ case OPX_TYPE_MEM1: ++ case OPX_TYPE_MEM234: ++ case OPX_TYPE_MEM5: ++ case OPX_TYPE_MEM7: ++ return OP_TYPE_MEM1; ++ ++ case OPX_TYPE_MEM6: ++ return OP_TYPE_MEM6; ++ ++ default: ++ gcc_unreachable (); ++ return 0; ++ } ++ } ++ else ++ { ++ switch (get_attr_opy_type (insn)) ++ { ++ case OPY_TYPE_NONE: ++ case OPY_TYPE_RN: ++ case OPY_TYPE_FPN: ++ case OPY_TYPE_IMM_Q: ++ case OPY_TYPE_IMM_W: ++ case OPY_TYPE_IMM_L: ++ return OP_TYPE_RN; ++ ++ case OPY_TYPE_MEM1: ++ case OPY_TYPE_MEM234: ++ case OPY_TYPE_MEM5: ++ case OPY_TYPE_MEM7: ++ return OP_TYPE_MEM1; ++ ++ case OPY_TYPE_MEM6: ++ return OP_TYPE_MEM6; ++ ++ default: ++ gcc_unreachable (); ++ return 0; ++ } ++ } ++} ++ + /* Implement op_mem attribute. */ + enum attr_op_mem + m68k_sched_attr_op_mem (rtx insn) + { +- enum attr_opy_mem opy; +- enum attr_opx_mem opx; ++ enum attr_op_type opx; ++ enum attr_op_type opy; + +- sched_guess_p = (get_attr_guess (insn) == GUESS_YES); ++ opx = sched_get_opxy_mem_type (insn, true); ++ opy = sched_get_opxy_mem_type (insn, false); + +- opy = get_attr_opy_mem (insn); +- opx = get_attr_opx_mem (insn); +- +- if (opy == OPY_MEM_R && opx == OPX_MEM_R) ++ if (opy == OP_TYPE_RN && opx == OP_TYPE_RN) + return OP_MEM_00; + +- if (opy == OPY_MEM_R && opx == OPX_MEM_M) ++ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1) + { + switch (get_attr_opx_access (insn)) + { +@@ -4910,12 +5685,12 @@ m68k_sched_attr_op_mem (rtx insn) + return OP_MEM_11; + + default: +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_unreachable (); ++ return 0; + } + } + +- if (opy == OPY_MEM_R && opx == OPX_MEM_I) ++ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6) + { + switch (get_attr_opx_access (insn)) + { +@@ -4929,15 +5704,15 @@ m68k_sched_attr_op_mem (rtx insn) + return OP_MEM_I1; + + default: +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_unreachable (); ++ return 0; + } + } + +- if (opy == OPY_MEM_M && opx == OPX_MEM_R) ++ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN) + return OP_MEM_10; + +- if (opy == OPY_MEM_M && opx == OPX_MEM_M) ++ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1) + { + switch (get_attr_opx_access (insn)) + { +@@ -4945,12 +5720,12 @@ m68k_sched_attr_op_mem (rtx insn) + return OP_MEM_11; + + default: +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_assert (!reload_completed); ++ return OP_MEM_11; + } + } + +- if (opy == OPY_MEM_M && opx == OPX_MEM_I) ++ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6) + { + switch (get_attr_opx_access (insn)) + { +@@ -4958,16 +5733,15 @@ m68k_sched_attr_op_mem (rtx insn) + return OP_MEM_1I; + + default: +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_assert (!reload_completed); ++ return OP_MEM_1I; + } + } + +- if (opy == OPY_MEM_I && opx == OPX_MEM_R) ++ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN) + return OP_MEM_I0; + +- +- if (opy == OPY_MEM_I && opx == OPX_MEM_M) ++ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1) + { + switch (get_attr_opx_access (insn)) + { +@@ -4975,13 +5749,14 @@ m68k_sched_attr_op_mem (rtx insn) + return OP_MEM_I1; + + default: +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_assert (!reload_completed); ++ return OP_MEM_I1; + } + } + +- gcc_assert (sched_guess_p); +- return OP_MEM_UNKNOWN; ++ gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6); ++ gcc_assert (!reload_completed); ++ return OP_MEM_I1; + } + + /* Jump instructions types. Indexed by INSN_UID. +@@ -5004,66 +5779,21 @@ m68k_sched_branch_type (rtx insn) + return type; + } + +-/* Implement type2 attribute. */ +-enum attr_type2 +-m68k_sched_attr_type2 (rtx insn) ++/* Data for ColdFire V4 index bypass. ++ Producer modifies register that is used as index in consumer with ++ specified scale. */ ++static struct + { +- switch (get_attr_type1 (insn)) +- { +- case TYPE1_ALU_REG1: +- case TYPE1_ALU_REGX: +- return TYPE2_ALU; +- +- case TYPE1_ALU_L: +- case TYPE1_ALUQ_L: +- case TYPE1_CMP_L: +- return TYPE2_ALU_L; +- +- case TYPE1_BCC: +- return TYPE2_BCC; +- +- case TYPE1_BRA: +- return TYPE2_BRA; +- +- case TYPE1_BSR: +- case TYPE1_JSR: +- return TYPE2_CALL; +- +- case TYPE1_JMP: +- return TYPE2_JMP; +- +- case TYPE1_LEA: +- return TYPE2_LEA; +- +- case TYPE1_CLR: +- case TYPE1_MOV3Q_L: +- case TYPE1_MOVE: +- case TYPE1_MOVEQ_L: +- case TYPE1_TST: +- return TYPE2_MOVE; +- +- case TYPE1_MOVE_L: +- case TYPE1_TST_L: +- return TYPE2_MOVE_L; ++ /* Producer instruction. */ ++ rtx pro; + +- case TYPE1_MUL_W: +- case TYPE1_MUL_L: +- return TYPE2_MUL; ++ /* Consumer instruction. */ ++ rtx con; + +- case TYPE1_PEA: +- return TYPE2_PEA; +- +- case TYPE1_RTS: +- return TYPE2_RTS; +- +- case TYPE1_UNLK: +- return TYPE2_UNLK; +- +- default: +- gcc_assert (get_attr_guess (insn) == GUESS_YES); +- return TYPE2_UNKNOWN; +- } +-} ++ /* Scale of indexed memory access within consumer. ++ Or zero if bypass should not be effective at the moment. */ ++ int scale; ++} sched_cfv4_bypass_data; + + /* An empty state that is used in m68k_sched_adjust_cost. */ + static state_t sched_adjust_cost_state; +@@ -5080,13 +5810,33 @@ m68k_sched_adjust_cost (rtx insn, rtx li + || recog_memoized (insn) < 0) + return cost; + ++ if (sched_cfv4_bypass_data.scale == 1) ++ /* Handle ColdFire V4 bypass for indexed address with 1x scale. */ ++ { ++ /* haifa-sched.c: insn_cost () calls bypass_p () just before ++ targetm.sched.adjust_cost (). Hence, we can be relatively sure ++ that the data in sched_cfv4_bypass_data is up to date. */ ++ gcc_assert (sched_cfv4_bypass_data.pro == def_insn ++ && sched_cfv4_bypass_data.con == insn); ++ ++ if (cost < 3) ++ cost = 3; ++ ++ sched_cfv4_bypass_data.pro = NULL; ++ sched_cfv4_bypass_data.con = NULL; ++ sched_cfv4_bypass_data.scale = 0; ++ } ++ else ++ gcc_assert (sched_cfv4_bypass_data.pro == NULL ++ && sched_cfv4_bypass_data.con == NULL ++ && sched_cfv4_bypass_data.scale == 0); ++ + /* Don't try to issue INSN earlier than DFA permits. + This is especially useful for instructions that write to memory, + as their true dependence (default) latency is better to be set to 0 + to workaround alias analysis limitations. + This is, in fact, a machine independent tweak, so, probably, + it should be moved to haifa-sched.c: insn_cost (). */ +- + delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn); + if (delay > cost) + cost = delay; +@@ -5094,237 +5844,147 @@ m68k_sched_adjust_cost (rtx insn, rtx li + return cost; + } + +-/* Size of the instruction buffer in words. */ +-static int sched_ib_size; +- +-/* Number of filled words in the instruction buffer. */ +-static int sched_ib_filled; +- +-/* An insn that reserves (marks empty) one word in the instruction buffer. */ +-static rtx sched_ib_insn; +- +-/* ID of memory unit. */ +-static int sched_mem_unit_code; +- +-/* Implementation of the targetm.sched.variable_issue () hook. +- It is called after INSN was issued. It returns the number of insns +- that can possibly get scheduled on the current cycle. +- It is used here to determine the effect of INSN on the instruction +- buffer. */ ++/* Return maximal number of insns that can be scheduled on a single cycle. */ + static int +-m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED, +- int sched_verbose ATTRIBUTE_UNUSED, +- rtx insn, int can_issue_more) +-{ +- int insn_size; +- +- if (recog_memoized (insn) >= 0) +- { +- insn_size = get_attr_size (insn); +- +- gcc_assert (insn_size <= sched_ib_filled); +- +- --can_issue_more; +- } +- else if (GET_CODE (PATTERN (insn)) == ASM_INPUT +- || asm_noperands (PATTERN (insn)) >= 0) +- insn_size = sched_ib_filled; +- else +- insn_size = 0; +- +- sched_ib_filled -= insn_size; +- +- return can_issue_more; +-} +- +-/* Statistics gatherer. */ +- +-typedef enum +- { +- /* Something needs to be done for this insn. */ +- SCHED_DUMP_TODO, +- +- /* Support for this insn is complete. */ +- SCHED_DUMP_DONE, +- +- /* This insn didn't require much effort to support it. */ +- SCHED_DUMP_NOTHING +- } sched_dump_class_def; +- +-/* Pointer to functions that classifies insns into 3 above classes. */ +-typedef sched_dump_class_def (*sched_dump_class_func_t) (rtx); +- +-/* Return statistical type of INSN regarding splits. */ +-static sched_dump_class_def +-sched_dump_split_class (rtx insn) ++m68k_sched_issue_rate (void) + { +- int i; +- +- i = recog_memoized (insn); +- gcc_assert (i >= 0); +- +- switch (get_attr_split (insn)) ++ switch (m68k_sched_cpu) + { +- case SPLIT_TODO: +- return SCHED_DUMP_TODO; +- +- case SPLIT_DONE: +- return SCHED_DUMP_DONE; ++ case CPU_CFV1: ++ case CPU_CFV2: ++ case CPU_CFV3: ++ return 1; + +- case SPLIT_NOTHING: +- return SCHED_DUMP_NOTHING; ++ case CPU_CFV4: ++ return 2; + + default: + gcc_unreachable (); ++ return 0; + } + } + +-/* ID of the guess unit. */ +-static int sched_dump_dfa_guess_unit_code; ++/* Maximal length of instruction for current CPU. ++ E.g. it is 3 for any ColdFire core. */ ++static int max_insn_size; + +-/* DFA state for use in sched_dump_dfa_class (). */ +-static state_t sched_dump_dfa_state; +- +-/* Return statistical type of INSN regarding DFA reservations. */ +-static sched_dump_class_def +-sched_dump_dfa_class (rtx insn) ++/* Data to model instruction buffer of CPU. */ ++struct _sched_ib + { +- int i; ++ /* True if instruction buffer model is modeled for current CPU. */ ++ bool enabled_p; + +- i = recog_memoized (insn); +- gcc_assert (i >= 0 && insn_has_dfa_reservation_p (insn)); ++ /* Size of the instruction buffer in words. */ ++ int size; + +- if (sched_dump_split_class (insn) == SCHED_DUMP_TODO) +- /* Insn is not yet ready for reservations. */ +- return SCHED_DUMP_NOTHING; ++ /* Number of filled words in the instruction buffer. */ ++ int filled; + +- state_reset (sched_dump_dfa_state); ++ /* Additional information about instruction buffer for CPUs that have ++ a buffer of instruction records, rather then a plain buffer ++ of instruction words. */ ++ struct _sched_ib_records ++ { ++ /* Size of buffer in records. */ ++ int n_insns; + +- if (state_transition (sched_dump_dfa_state, insn) >= 0) +- gcc_unreachable (); ++ /* Array to hold data on adjustements made to the size of the buffer. */ ++ int *adjust; + +- if (cpu_unit_reservation_p (sched_dump_dfa_state, +- sched_dump_dfa_guess_unit_code)) +- return SCHED_DUMP_TODO; ++ /* Index of the above array. */ ++ int adjust_index; ++ } records; + +- return SCHED_DUMP_DONE; +-} +- +-/* Dump statistics on current function into file DUMP_FILENAME and prefix +- each entry with PREFIX. +- Instructions are classified with DUMP_CLASS. */ +-static void +-m68k_sched_dump (sched_dump_class_func_t dump_class, +- const char *prefix, FILE *dump) +-{ +- sbitmap present; +- int *todos; +- int *dones; +- int *nothings; ++ /* An insn that reserves (marks empty) one word in the instruction buffer. */ + rtx insn; ++}; + +- gcc_assert (dump != NULL); ++static struct _sched_ib sched_ib; + +- present = sbitmap_alloc (CODE_FOR_nothing); +- sbitmap_zero (present); ++/* ID of memory unit. */ ++static int sched_mem_unit_code; + +- todos = xcalloc (CODE_FOR_nothing, sizeof (*todos)); +- dones = xcalloc (CODE_FOR_nothing, sizeof (*dones)); +- nothings = xcalloc (CODE_FOR_nothing, sizeof (*nothings)); ++/* Implementation of the targetm.sched.variable_issue () hook. ++ It is called after INSN was issued. It returns the number of insns ++ that can possibly get scheduled on the current cycle. ++ It is used here to determine the effect of INSN on the instruction ++ buffer. */ ++static int ++m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED, ++ int sched_verbose ATTRIBUTE_UNUSED, ++ rtx insn, int can_issue_more) ++{ ++ int insn_size; + +- /* Gather statistics. */ +- for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) ++ if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE) + { +- if (INSN_P (insn) && recog_memoized (insn) >= 0) ++ switch (m68k_sched_cpu) + { +- enum insn_code code; ++ case CPU_CFV1: ++ case CPU_CFV2: ++ insn_size = sched_get_attr_size_int (insn); ++ break; ++ ++ case CPU_CFV3: ++ insn_size = sched_get_attr_size_int (insn); ++ ++ /* ColdFire V3 and V4 cores have instruction buffers that can ++ accumulate up to 8 instructions regardless of instructions' ++ sizes. So we should take care not to "prefetch" 24 one-word ++ or 12 two-words instructions. ++ To model this behavior we temporarily decrease size of the ++ buffer by (max_insn_size - insn_size) for next 7 instructions. */ ++ { ++ int adjust; + +- code = INSN_CODE (insn); +- gcc_assert (code < CODE_FOR_nothing); ++ adjust = max_insn_size - insn_size; ++ sched_ib.size -= adjust; + +- SET_BIT (present, code); ++ if (sched_ib.filled > sched_ib.size) ++ sched_ib.filled = sched_ib.size; + +- switch (dump_class (insn)) +- { +- case SCHED_DUMP_TODO: +- ++todos[code]; +- break; ++ sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust; ++ } + +- case SCHED_DUMP_DONE: +- ++dones[code]; +- break; ++ ++sched_ib.records.adjust_index; ++ if (sched_ib.records.adjust_index == sched_ib.records.n_insns) ++ sched_ib.records.adjust_index = 0; ++ ++ /* Undo adjustement we did 7 instructions ago. */ ++ sched_ib.size ++ += sched_ib.records.adjust[sched_ib.records.adjust_index]; ++ ++ break; ++ ++ case CPU_CFV4: ++ gcc_assert (!sched_ib.enabled_p); ++ insn_size = 0; ++ break; + +- case SCHED_DUMP_NOTHING: +- ++nothings[code]; +- break; +- } ++ default: ++ gcc_unreachable (); + } +- } +- +- /* Print statisctics. */ +- { +- unsigned int i; +- sbitmap_iterator si; +- int total_todo; +- int total_done; +- int total_nothing; +- +- total_todo = 0; +- total_done = 0; +- total_nothing = 0; +- +- EXECUTE_IF_SET_IN_SBITMAP (present, 0, i, si) +- { +- int todo; +- int done; +- int nothing; +- enum insn_code code; +- +- code = (enum insn_code) i; +- +- todo = todos[code]; +- done = dones[code]; +- nothing = nothings[code]; +- +- total_todo += todo; +- total_done += done; +- total_nothing += nothing; + +- if (todo != 0) +- { +- fprintf (dump, +- "%s: %3d: %d / %d / %d ;", +- prefix, code, todo, done, nothing); +- +- { +- const char *name; +- +- name = get_insn_name (code); +- +- if (name != NULL) +- fprintf (dump, " {%s}\n", name); +- else +- fprintf (dump, " {unknown}\n"); +- } +- } +- } +- +- gcc_assert (CODE_FOR_nothing < 999); ++ gcc_assert (insn_size <= sched_ib.filled); ++ --can_issue_more; ++ } ++ else if (GET_CODE (PATTERN (insn)) == ASM_INPUT ++ || asm_noperands (PATTERN (insn)) >= 0) ++ insn_size = sched_ib.filled; ++ else ++ insn_size = 0; + +- fprintf (dump, +- "%s: 999: %d / %d / %d ; {total}\n", +- prefix, total_todo, total_done, total_nothing); +- } ++ sched_ib.filled -= insn_size; + +- free (nothings); +- nothings = NULL; +- free (dones); +- dones = NULL; +- free (todos); +- todos = NULL; ++ return can_issue_more; ++} + +- sbitmap_free (present); +- present = NULL; ++/* Return how many instructions should scheduler lookahead to choose the ++ best one. */ ++static int ++m68k_sched_first_cycle_multipass_dfa_lookahead (void) ++{ ++ return m68k_sched_issue_rate () - 1; + } + + /* Implementation of targetm.sched.md_init_global () hook. +@@ -5350,40 +6010,69 @@ m68k_sched_md_init_global (FILE *sched_d + } + } + +- if (reload_completed && sched_verbose >= 8) +- /* Dump statistics. */ +- { +- m68k_sched_dump (sched_dump_split_class, "m68k_sched_split", +- sched_dump); ++#ifdef ENABLE_CHECKING ++ /* Check that all instructions have DFA reservations and ++ that all instructions can be issued from a clean state. */ ++ { ++ rtx insn; ++ state_t state; + +- sched_dump_dfa_guess_unit_code = get_cpu_unit_code ("cf_v2_guess"); +- sched_dump_dfa_state = alloca (state_size ()); ++ state = alloca (state_size ()); + +- m68k_sched_dump (sched_dump_dfa_class, "m68k_sched_dfa", +- sched_dump); ++ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) ++ { ++ if (INSN_P (insn) && recog_memoized (insn) >= 0) ++ { ++ gcc_assert (insn_has_dfa_reservation_p (insn)); + +- sched_dump_dfa_state = NULL; +- sched_dump_dfa_guess_unit_code = 0; +- } ++ state_reset (state); ++ if (state_transition (state, insn) >= 0) ++ gcc_unreachable (); ++ } ++ } ++ } ++#endif + + /* Setup target cpu. */ ++ ++ /* ColdFire V4 has a set of features to keep its instruction buffer full ++ (e.g., a separate memory bus for instructions) and, hence, we do not model ++ buffer for this CPU. */ ++ sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4); ++ + switch (m68k_sched_cpu) + { +- case CPU_CF_V2: +- sched_ib_size = 6; +- sched_mem_unit_code = get_cpu_unit_code ("cf_v2_mem"); ++ case CPU_CFV4: ++ sched_ib.filled = 0; ++ ++ /* FALLTHRU */ ++ ++ case CPU_CFV1: ++ case CPU_CFV2: ++ max_insn_size = 3; ++ sched_ib.records.n_insns = 0; ++ sched_ib.records.adjust = NULL; ++ break; ++ ++ case CPU_CFV3: ++ max_insn_size = 3; ++ sched_ib.records.n_insns = 8; ++ sched_ib.records.adjust = xmalloc (sched_ib.records.n_insns ++ * sizeof (*sched_ib.records.adjust)); + break; + + default: + gcc_unreachable (); + } + ++ sched_mem_unit_code = get_cpu_unit_code ("cf_mem1"); ++ + sched_adjust_cost_state = xmalloc (state_size ()); + state_reset (sched_adjust_cost_state); + + start_sequence (); + emit_insn (gen_ib ()); +- sched_ib_insn = get_insns (); ++ sched_ib.insn = get_insns (); + end_sequence (); + } + +@@ -5392,13 +6081,17 @@ static void + m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED, + int verbose ATTRIBUTE_UNUSED) + { +- sched_ib_insn = NULL; ++ sched_ib.insn = NULL; + + free (sched_adjust_cost_state); + sched_adjust_cost_state = NULL; + + sched_mem_unit_code = 0; +- sched_ib_size = 0; ++ ++ free (sched_ib.records.adjust); ++ sched_ib.records.adjust = NULL; ++ sched_ib.records.n_insns = 0; ++ max_insn_size = 0; + + free (sched_branch_type); + sched_branch_type = NULL; +@@ -5412,9 +6105,34 @@ m68k_sched_md_init (FILE *sched_dump ATT + int sched_verbose ATTRIBUTE_UNUSED, + int n_insns ATTRIBUTE_UNUSED) + { +- /* haifa-sched.c: schedule_block () calls advance_cycle () just before +- the first cycle. Workaround that. */ +- sched_ib_filled = -2; ++ switch (m68k_sched_cpu) ++ { ++ case CPU_CFV1: ++ case CPU_CFV2: ++ sched_ib.size = 6; ++ break; ++ ++ case CPU_CFV3: ++ sched_ib.size = sched_ib.records.n_insns * max_insn_size; ++ ++ memset (sched_ib.records.adjust, 0, ++ sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust)); ++ sched_ib.records.adjust_index = 0; ++ break; ++ ++ case CPU_CFV4: ++ gcc_assert (!sched_ib.enabled_p); ++ sched_ib.size = 0; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (sched_ib.enabled_p) ++ /* haifa-sched.c: schedule_block () calls advance_cycle () just before ++ the first cycle. Workaround that. */ ++ sched_ib.filled = -2; + } + + /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook. +@@ -5423,12 +6141,15 @@ m68k_sched_md_init (FILE *sched_dump ATT + static void + m68k_sched_dfa_pre_advance_cycle (void) + { ++ if (!sched_ib.enabled_p) ++ return; ++ + if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code)) + { +- sched_ib_filled += 2; ++ sched_ib.filled += 2; + +- if (sched_ib_filled > sched_ib_size) +- sched_ib_filled = sched_ib_size; ++ if (sched_ib.filled > sched_ib.size) ++ sched_ib.filled = sched_ib.size; + } + } + +@@ -5441,13 +6162,180 @@ static void + m68k_sched_dfa_post_advance_cycle (void) + { + int i; +- int n; ++ ++ if (!sched_ib.enabled_p) ++ return; + + /* Setup number of prefetched instruction words in the instruction + buffer. */ +- for (i = sched_ib_filled, n = sched_ib_size; i < n; ++i) ++ i = max_insn_size - sched_ib.filled; ++ ++ while (--i >= 0) + { +- if (state_transition (curr_state, sched_ib_insn) >= 0) ++ if (state_transition (curr_state, sched_ib.insn) >= 0) + gcc_unreachable (); + } + } ++ ++/* Return X or Y (depending on OPX_P) operand of INSN, ++ if it is an integer register, or NULL overwise. */ ++static rtx ++sched_get_reg_operand (rtx insn, bool opx_p) ++{ ++ rtx op = NULL; ++ ++ if (opx_p) ++ { ++ if (get_attr_opx_type (insn) == OPX_TYPE_RN) ++ { ++ op = sched_get_operand (insn, true); ++ gcc_assert (op != NULL); ++ ++ if (!reload_completed && !REG_P (op)) ++ return NULL; ++ } ++ } ++ else ++ { ++ if (get_attr_opy_type (insn) == OPY_TYPE_RN) ++ { ++ op = sched_get_operand (insn, false); ++ gcc_assert (op != NULL); ++ ++ if (!reload_completed && !REG_P (op)) ++ return NULL; ++ } ++ } ++ ++ return op; ++} ++ ++/* Return true, if X or Y (depending on OPX_P) operand of INSN ++ is a MEM. */ ++static bool ++sched_mem_operand_p (rtx insn, bool opx_p) ++{ ++ switch (sched_get_opxy_mem_type (insn, opx_p)) ++ { ++ case OP_TYPE_MEM1: ++ case OP_TYPE_MEM6: ++ return true; ++ ++ default: ++ return false; ++ } ++} ++ ++/* Return X or Y (depending on OPX_P) operand of INSN, ++ if it is a MEM, or NULL overwise. */ ++static rtx ++sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p) ++{ ++ bool opx_p; ++ bool opy_p; ++ ++ opx_p = false; ++ opy_p = false; ++ ++ if (must_read_p) ++ { ++ opx_p = true; ++ opy_p = true; ++ } ++ ++ if (must_write_p) ++ { ++ opx_p = true; ++ opy_p = false; ++ } ++ ++ if (opy_p && sched_mem_operand_p (insn, false)) ++ return sched_get_operand (insn, false); ++ ++ if (opx_p && sched_mem_operand_p (insn, true)) ++ return sched_get_operand (insn, true); ++ ++ gcc_unreachable (); ++ return NULL; ++} ++ ++/* Return non-zero if PRO modifies register used as part of ++ address in CON. */ ++int ++m68k_sched_address_bypass_p (rtx pro, rtx con) ++{ ++ rtx pro_x; ++ rtx con_mem_read; ++ ++ pro_x = sched_get_reg_operand (pro, true); ++ if (pro_x == NULL) ++ return 0; ++ ++ con_mem_read = sched_get_mem_operand (con, true, false); ++ gcc_assert (con_mem_read != NULL); ++ ++ if (reg_mentioned_p (pro_x, con_mem_read)) ++ return 1; ++ ++ return 0; ++} ++ ++/* Helper function for m68k_sched_indexed_address_bypass_p. ++ if PRO modifies register used as index in CON, ++ return scale of indexed memory access in CON. Return zero overwise. */ ++static int ++sched_get_indexed_address_scale (rtx pro, rtx con) ++{ ++ rtx reg; ++ rtx mem; ++ struct m68k_address address; ++ ++ reg = sched_get_reg_operand (pro, true); ++ if (reg == NULL) ++ return 0; ++ ++ mem = sched_get_mem_operand (con, true, false); ++ gcc_assert (mem != NULL && MEM_P (mem)); ++ ++ if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed, ++ &address)) ++ gcc_unreachable (); ++ ++ if (REGNO (reg) == REGNO (address.index)) ++ { ++ gcc_assert (address.scale != 0); ++ return address.scale; ++ } ++ ++ return 0; ++} ++ ++/* Return non-zero if PRO modifies register used ++ as index with scale 2 or 4 in CON. */ ++int ++m68k_sched_indexed_address_bypass_p (rtx pro, rtx con) ++{ ++ gcc_assert (sched_cfv4_bypass_data.pro == NULL ++ && sched_cfv4_bypass_data.con == NULL ++ && sched_cfv4_bypass_data.scale == 0); ++ ++ switch (sched_get_indexed_address_scale (pro, con)) ++ { ++ case 1: ++ /* We can't have a variable latency bypass, so ++ remember to adjust the insn cost in adjust_cost hook. */ ++ sched_cfv4_bypass_data.pro = pro; ++ sched_cfv4_bypass_data.con = con; ++ sched_cfv4_bypass_data.scale = 1; ++ return 0; ++ ++ case 2: ++ case 4: ++ return 1; ++ ++ default: ++ return 0; ++ } ++} ++ ++#include "gt-m68k.h" +--- a/gcc/config/m68k/m68k.h ++++ b/gcc/config/m68k/m68k.h +@@ -232,6 +232,7 @@ along with GCC; see the file COPYING3. + #define FL_ISA_C (1 << 16) + #define FL_FIDOA (1 << 17) + #define FL_MMU 0 /* Used by multilib machinery. */ ++#define FL_UCLINUX 0 /* Used by multilib machinery. */ + + #define TARGET_68010 ((m68k_cpu_flags & FL_ISA_68010) != 0) + #define TARGET_68020 ((m68k_cpu_flags & FL_ISA_68020) != 0) +@@ -266,6 +267,11 @@ along with GCC; see the file COPYING3. + #define TUNE_CPU32 (m68k_tune == ucpu32) + #define TUNE_CFV1 (m68k_tune == ucfv1) + #define TUNE_CFV2 (m68k_tune == ucfv2) ++#define TUNE_CFV3 (m68k_tune == ucfv3) ++#define TUNE_CFV4 (m68k_tune == ucfv4 || m68k_tune == ucfv4e) ++ ++#define TUNE_MAC ((m68k_tune_flags & FL_CF_MAC) != 0) ++#define TUNE_EMAC ((m68k_tune_flags & FL_CF_EMAC) != 0) + + #define OVERRIDE_OPTIONS override_options() + +@@ -496,7 +502,8 @@ enum reg_class { + + extern enum reg_class regno_reg_class[]; + #define REGNO_REG_CLASS(REGNO) (regno_reg_class[(REGNO)]) +-#define INDEX_REG_CLASS GENERAL_REGS ++#define MODE_INDEX_REG_CLASS(MODE) \ ++ (MODE_OK_FOR_INDEX_P (MODE) ? GENERAL_REGS : NO_REGS) + #define BASE_REG_CLASS ADDR_REGS + + #define PREFERRED_RELOAD_CLASS(X,CLASS) \ +@@ -665,6 +672,10 @@ __transfer_from_trampoline () \ + #define HAVE_POST_INCREMENT 1 + #define HAVE_PRE_DECREMENT 1 + ++/* Return true if addresses of mode MODE can have an index register. */ ++#define MODE_OK_FOR_INDEX_P(MODE) \ ++ (!TARGET_COLDFIRE_FPU || GET_MODE_CLASS (MODE) != MODE_FLOAT) ++ + /* Macros to check register numbers against specific register classes. */ + + /* True for data registers, D0 through D7. */ +@@ -679,9 +690,10 @@ __transfer_from_trampoline () \ + /* True for floating point registers, FP0 through FP7. */ + #define FP_REGNO_P(REGNO) IN_RANGE (REGNO, 16, 23) + +-#define REGNO_OK_FOR_INDEX_P(REGNO) \ +- (INT_REGNO_P (REGNO) \ +- || INT_REGNO_P (reg_renumber[REGNO])) ++#define REGNO_MODE_OK_FOR_INDEX_P(REGNO, MODE) \ ++ (MODE_OK_FOR_INDEX_P (MODE) \ ++ && (INT_REGNO_P (REGNO) \ ++ || INT_REGNO_P (reg_renumber[REGNO]))) + + #define REGNO_OK_FOR_BASE_P(REGNO) \ + (ADDRESS_REGNO_P (REGNO) \ +@@ -741,13 +753,14 @@ __transfer_from_trampoline () \ + + #define LEGITIMATE_PIC_OPERAND_P(X) \ + (!symbolic_operand (X, VOIDmode) \ +- || (TARGET_PCREL && REG_STRICT_P)) ++ || (TARGET_PCREL && REG_STRICT_P) \ ++ || m68k_tls_mentioned_p (X)) + + #define REG_OK_FOR_BASE_P(X) \ + m68k_legitimate_base_reg_p (X, REG_STRICT_P) + +-#define REG_OK_FOR_INDEX_P(X) \ +- m68k_legitimate_index_reg_p (X, REG_STRICT_P) ++#define REG_MODE_OK_FOR_INDEX_P(X, MODE) \ ++ m68k_legitimate_index_reg_p (MODE, X, REG_STRICT_P) + + #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ + do \ +@@ -760,52 +773,19 @@ __transfer_from_trampoline () \ + /* This address is OK as it stands. */ + #define PIC_CASE_VECTOR_ADDRESS(index) index + +-/* For the 68000, we handle X+REG by loading X into a register R and +- using R+REG. R will go in an address reg and indexing will be used. +- However, if REG is a broken-out memory address or multiplication, +- nothing needs to be done because REG can certainly go in an address reg. */ +-#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; } +-#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \ +-{ register int ch = (X) != (OLDX); \ +- if (GET_CODE (X) == PLUS) \ +- { int copied = 0; \ +- if (GET_CODE (XEXP (X, 0)) == MULT) \ +- { COPY_ONCE (X); XEXP (X, 0) = force_operand (XEXP (X, 0), 0);} \ +- if (GET_CODE (XEXP (X, 1)) == MULT) \ +- { COPY_ONCE (X); XEXP (X, 1) = force_operand (XEXP (X, 1), 0);} \ +- if (ch && GET_CODE (XEXP (X, 1)) == REG \ +- && GET_CODE (XEXP (X, 0)) == REG) \ +- { if (TARGET_COLDFIRE_FPU \ +- && GET_MODE_CLASS (MODE) == MODE_FLOAT) \ +- { COPY_ONCE (X); X = force_operand (X, 0);} \ +- goto WIN; } \ +- if (ch) { GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); } \ +- if (GET_CODE (XEXP (X, 0)) == REG \ +- || (GET_CODE (XEXP (X, 0)) == SIGN_EXTEND \ +- && GET_CODE (XEXP (XEXP (X, 0), 0)) == REG \ +- && GET_MODE (XEXP (XEXP (X, 0), 0)) == HImode)) \ +- { register rtx temp = gen_reg_rtx (Pmode); \ +- register rtx val = force_operand (XEXP (X, 1), 0); \ +- emit_move_insn (temp, val); \ +- COPY_ONCE (X); \ +- XEXP (X, 1) = temp; \ +- if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \ +- && GET_CODE (XEXP (X, 0)) == REG) \ +- X = force_operand (X, 0); \ +- goto WIN; } \ +- else if (GET_CODE (XEXP (X, 1)) == REG \ +- || (GET_CODE (XEXP (X, 1)) == SIGN_EXTEND \ +- && GET_CODE (XEXP (XEXP (X, 1), 0)) == REG \ +- && GET_MODE (XEXP (XEXP (X, 1), 0)) == HImode)) \ +- { register rtx temp = gen_reg_rtx (Pmode); \ +- register rtx val = force_operand (XEXP (X, 0), 0); \ +- emit_move_insn (temp, val); \ +- COPY_ONCE (X); \ +- XEXP (X, 0) = temp; \ +- if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \ +- && GET_CODE (XEXP (X, 1)) == REG) \ +- X = force_operand (X, 0); \ +- goto WIN; }}} ++#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ ++do { \ ++ rtx __x; \ ++ \ ++ __x = m68k_legitimize_address (X, OLDX, MODE); \ ++ if (__x != NULL_RTX) \ ++ { \ ++ X = __x; \ ++ \ ++ if (memory_address_p (MODE, X)) \ ++ goto WIN; \ ++ } \ ++} while (0) + + /* On the 68000, only predecrement and postincrement address depend thus + (the amount of decrement or increment being the length of the operand). +@@ -845,6 +825,14 @@ __transfer_from_trampoline () \ + some or all of the saved cc's so they won't be used. */ + #define NOTICE_UPDATE_CC(EXP,INSN) notice_update_cc (EXP, INSN) + ++/* The shift instructions always clear the overflow bit. */ ++#define CC_OVERFLOW_UNUSABLE 01000 ++ ++/* The shift instructions use the carry bit in a way not compatible with ++ conditional branches. conditions.h uses CC_NO_OVERFLOW for this purpose. ++ Rename it to something more understandable. */ ++#define CC_NO_CARRY CC_NO_OVERFLOW ++ + #define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \ + do { if (cc_prev_status.flags & CC_IN_68881) \ + return FLOAT; \ +@@ -1077,6 +1065,12 @@ do { if (cc_prev_status.flags & CC_IN_68 + + #define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR) + ++#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \ ++do { \ ++ if (! m68k_output_addr_const_extra (FILE, (X))) \ ++ goto FAIL; \ ++} while (0); ++ + /* Values used in the MICROARCH argument to M68K_DEVICE. */ + enum uarch_type + { +@@ -1129,6 +1123,7 @@ extern enum target_device m68k_cpu; + extern enum uarch_type m68k_tune; + extern enum fpu_type m68k_fpu; + extern unsigned int m68k_cpu_flags; ++extern unsigned int m68k_tune_flags; + extern const char *m68k_symbolic_call; + extern const char *m68k_symbolic_jump; + +@@ -1148,4 +1143,7 @@ extern M68K_CONST_METHOD m68k_const_meth + + extern void m68k_emit_move_double (rtx [2]); + ++extern int m68k_sched_address_bypass_p (rtx, rtx); ++extern int m68k_sched_indexed_address_bypass_p (rtx, rtx); ++ + #define CPU_UNITS_QUERY 1 +--- a/gcc/config/m68k/m68k.md ++++ b/gcc/config/m68k/m68k.md +@@ -115,6 +115,9 @@ + (UNSPEC_COS 2) + (UNSPEC_GOT 3) + (UNSPEC_IB 4) ++ (UNSPEC_TIE 5) ++ (UNSPEC_RELOC16 6) ++ (UNSPEC_RELOC32 7) + ]) + + ;; UNSPEC_VOLATILE usage: +@@ -144,197 +147,104 @@ + ;; :::::::::::::::::::: + + ;; Processor type. +-(define_attr "cpu" "cf_v2, unknown" (const (symbol_ref "m68k_sched_cpu"))) ++(define_attr "cpu" "cfv1, cfv2, cfv3, cfv4, unknown" ++ (const (symbol_ref "m68k_sched_cpu"))) + +-;; Instruction type. +-;; Basically, an asm pattern. +-(define_attr "type" +- "add_l, addq_l, asr_l, bcc, bclr, bra, bset, bsr, +- clr_b, clr_w, clr_l, cmp_l, +- ext_w, extb_l, ext_l, +- fadd, fcmp, fdiv, ff1, fintrz, fmove, fmul, fsqrt, fsub, ftst, jmp, jsr, +- ib, +- lea, lsr_l, +- move_b, move_w, move_l, moveq_l, mov3q_l, mvs_b, mvs_w, mvz_b, mvz_w, +- muls_w, muls_l, mulu_w, mulu_l, +- neg_l, nop, not_l, +- pea, rts, +- scc, sub_l, subq_l, +- trap, tst_b, tst_l, tst_w, +- unlk, unknown" +- (const_string "unknown")) ++;; MAC type. ++(define_attr "mac" "no, cf_mac, cf_emac" ++ (const (symbol_ref "m68k_sched_mac"))) + + ;; Instruction type for use in scheduling description. + ;; _l and _w suffixes indicate size of the operands of instruction. + ;; alu - usual arithmetic or logic instruction. +-;; alu_reg1 - arithmetic or logic instruction with one operand that is +-;; a register. +-;; alu_regx - arithmetic or logic instruction which has a register for its +-;; X operand. + ;; aluq - arithmetic or logic instruction which has a quick immediate (the one + ;; that is encoded in the instruction word) for its Y operand. +-;; - corresponding asm instructions. +-(define_attr "type1" +- "alu_l, alu_reg1, alu_regx, aluq_l, bcc, bra, bsr, clr, cmp_l, jmp, jsr, lea, +- mov3q_l, move, move_l, moveq_l, mul_l, mul_w, pea, rts, tst, tst_l, unlk, ++;; alux - Arithmetic instruction that uses carry bit (e.g., addx and subx). ++;; bcc - conditional branch. ++;; bitr - bit operation that only updates flags. ++;; bitrw - bit operation that updates flags and output operand. ++;; bra, bsr, clr, cmp, div, ext - corresponding instruction. ++;; falu, fbcc, fcmp, fdiv, fmove, fmul, fneg, fsqrt, ftst - corresponding ++;; instruction. ++;; ib - fake instruction to subscribe slots in ColdFire V1,V2,V3 instruction ++;; buffer. ++;; ignore - fake instruction. ++;; jmp, jsr, lea, link, mov3q, move, moveq, mul - corresponding instruction. ++;; mvsz - mvs or mvz instruction. ++;; neg, nop, pea, rts, scc - corresponding instruction. ++;; shift - arithmetic or logical shift instruction. ++;; trap, tst, unlk - corresponding instruction. ++(define_attr "type" ++ "alu_l,aluq_l,alux_l,bcc,bitr,bitrw,bra,bsr,clr,clr_l,cmp,cmp_l, ++ div_w,div_l,ext, ++ falu,fbcc,fcmp,fdiv,fmove,fmul,fneg,fsqrt,ftst, ++ ib,ignore, ++ jmp,jsr,lea,link,mov3q_l,move,move_l,moveq_l,mul_w,mul_l,mvsz,neg_l,nop, ++ pea,rts,scc,shift, ++ trap,tst,tst_l,unlk, + unknown" +- (cond [(eq_attr "type" "add_l,sub_l") (const_string "alu_l") +- (eq_attr "type" "ext_w,extb_l,ext_l,neg_l,not_l") +- (const_string "alu_reg1") +- (eq_attr "type" "asr_l,lsr_l") (const_string "alu_regx") +- (eq_attr "type" "addq_l,subq_l") (const_string "aluq_l") +- (eq_attr "type" "bcc") (const_string "bcc") +- (eq_attr "type" "bra") (const_string "bra") +- (eq_attr "type" "bsr") (const_string "bsr") +- (eq_attr "type" "clr_b,clr_l,clr_w") (const_string "clr") +- (eq_attr "type" "cmp_l") (const_string "cmp_l") +- (eq_attr "type" "jmp") (const_string "jmp") +- (eq_attr "type" "jsr") (const_string "jsr") +- (eq_attr "type" "lea") (const_string "lea") +- (eq_attr "type" "mov3q_l") (const_string "mov3q_l") +- (eq_attr "type" "move_b,move_w") (const_string "move") +- (eq_attr "type" "move_l") (const_string "move_l") +- (eq_attr "type" "moveq_l") (const_string "moveq_l") +- (eq_attr "type" "muls_l,mulu_l") (const_string "mul_l") +- (eq_attr "type" "muls_w,mulu_w") (const_string "mul_w") +- (eq_attr "type" "pea") (const_string "pea") +- (eq_attr "type" "rts") (const_string "rts") +- (eq_attr "type" "tst_b,tst_w") (const_string "tst") +- (eq_attr "type" "tst_l") (const_string "tst_l") +- (eq_attr "type" "unlk") (const_string "unlk")] +- (const_string "unknown"))) ++ (const_string "unknown")) + + ;; Index of the X or Y operand in recog_data.operand[]. + ;; Should be used only within opx_type and opy_type. + (define_attr "opx" "" (const_int 0)) + (define_attr "opy" "" (const_int 1)) + +-;; Type of the X operand. +-;; See m68k.c: enum attr_op_type. +-(define_attr "opx_type" +- "none, reg, mem1, mem234, mem5, mem6, mem7, imm_q, imm_w, imm_l" +- (cond [(eq_attr "type1" "rts,unlk") (const_string "none") +- (eq_attr "type1" "alu_reg1,alu_regx,lea,moveq_l,mul_l,mul_w") +- (const_string "reg") +- (eq_attr "type1" "pea") (const_string "mem1") +- (eq_attr "type1" "bcc") (const_string "imm_q") +- (eq_attr "type1" "bra,bsr") (const_string "imm_w") +- (eq_attr "type1" "jmp,jsr") +- (symbol_ref "m68k_sched_attr_opx_type (insn, 1)")] +- (symbol_ref "m68k_sched_attr_opx_type (insn, 0)"))) +- + ;; Type of the Y operand. + ;; See m68k.c: enum attr_op_type. + (define_attr "opy_type" +- "none, reg, mem1, mem234, mem5, mem6, mem7, imm_q, imm_w, imm_l" +- (cond [(eq_attr "type1" "alu_reg1,bcc,bra,bsr,clr,jmp,jsr,rts,tst,tst_l, +- unlk") (const_string "none") +- (eq_attr "type1" "mov3q_l,moveq_l,aluq_l") (const_string "imm_q") +- (eq_attr "type1" "lea,pea") ++ "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l" ++ (cond [(eq_attr "type" "ext,fbcc,ftst,neg_l,bcc,bra,bsr,clr,clr_l,ib,ignore, ++ jmp,jsr,nop,rts,scc,trap,tst,tst_l, ++ unlk,unknown") (const_string "none") ++ (eq_attr "type" "lea,pea") + (symbol_ref "m68k_sched_attr_opy_type (insn, 1)")] + (symbol_ref "m68k_sched_attr_opy_type (insn, 0)"))) + +-;; Instruction size in words. +-(define_attr "size" "" +- (cond [(eq_attr "type1" "alu_reg1,moveq_l,rts,unlk") (const_int 1)] +- (symbol_ref "m68k_sched_attr_size (insn)"))) ++;; Type of the X operand. ++;; See m68k.c: enum attr_op_type. ++(define_attr "opx_type" ++ "none,Rn,FPn,mem1,mem234,mem5,mem6,mem7,imm_q,imm_w,imm_l" ++ (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk, ++ unknown") (const_string "none") ++ (eq_attr "type" "pea") (const_string "mem1") ++ (eq_attr "type" "jmp,jsr") ++ (symbol_ref "m68k_sched_attr_opx_type (insn, 1)")] ++ (symbol_ref "m68k_sched_attr_opx_type (insn, 0)"))) + + ;; Access to the X operand: none, read, write, read/write, unknown. + ;; Access to the Y operand is either none (if opy_type is none) + ;; or read otherwise. +-(define_attr "opx_access" "none, r, w, rw, unknown" +- (cond [(eq_attr "type1" "rts,unlk") (const_string "none") +- (eq_attr "type1" "bcc,bra,bsr,cmp_l,jmp,jsr,tst,tst_l") +- (const_string "r") +- (eq_attr "type1" "clr,lea,mov3q_l,move,move_l,moveq_l,pea") +- (const_string "w") +- (eq_attr "type1" "alu_l,alu_reg1,alu_regx,aluq_l") +- (const_string "rw")] +- (const_string "unknown"))) +- +-;; Memory relation of operands: +-;; r - register or immediate operand +-;; m - non-indexed memory location +-;; i - indexed memory location +- +-(define_attr "opx_mem" "r, m, i, unknown" +- (cond [(eq_attr "opx_type" "none,reg,imm_q,imm_w,imm_l") (const_string "r") +- (eq_attr "opx_type" "mem1,mem234,mem5,mem7") (const_string "m") +- (eq_attr "opx_type" "mem6") (const_string "i")] +- (const_string "unknown"))) +- +-(define_attr "opy_mem" "r, m, i, unknown" +- (cond [(eq_attr "opy_type" "none,reg,imm_q,imm_w,imm_l") (const_string "r") +- (eq_attr "opy_type" "mem1,mem234,mem5,mem7") (const_string "m") +- (eq_attr "opy_type" "mem6") (const_string "i")] +- (const_string "unknown"))) ++(define_attr "opx_access" "none, r, w, rw" ++ (cond [(eq_attr "type" "ib,ignore,nop,rts,trap,unlk, ++ unknown") (const_string "none") ++ (eq_attr "type" "bcc,bra,bsr,bitr,cmp,cmp_l,fbcc,fcmp,ftst, ++ jmp,jsr,tst,tst_l") (const_string "r") ++ (eq_attr "type" "clr,clr_l,fneg,fmove,lea, ++ mov3q_l,move,move_l,moveq_l,mvsz, ++ pea,scc") (const_string "w") ++ (eq_attr "type" "alu_l,aluq_l,alux_l,bitrw,div_w,div_l,ext, ++ falu,fdiv,fmul,fsqrt,link,mul_w,mul_l, ++ neg_l,shift") (const_string "rw")] ++ ;; Should never be used. ++ (symbol_ref "(gcc_unreachable (), OPX_ACCESS_NONE)"))) + + ;; Memory accesses of the insn. + ;; 00 - no memory references + ;; 10 - memory is read +-;; i10 - indexed memory is read ++;; i0 - indexed memory is read + ;; 01 - memory is written +-;; 0i1 - indexed memory is written ++;; 0i - indexed memory is written + ;; 11 - memory is read, memory is written +-;; i11 - indexed memory is read, memory is written +-;; 1i1 - memory is read, indexed memory is written +-;; +-;; unknown - should now occur on normal insn. +-;; ??? This attribute is implemented in C to spare genattrtab from +-;; ??? optimizing it. +-(define_attr "op_mem" "00, 10, i0, 01, 0i, 11, i1, 1i, unknown" +-; (cond [(and (eq_attr "opy_mem" "r") (eq_attr "opx_mem" "r")) +-; (const_string "00") +-; +-; (and (eq_attr "opy_mem" "r") (eq_attr "opx_mem" "m")) +-; (cond [(eq_attr "opx_access" "r") (const_string "10") +-; (eq_attr "opx_access" "w") (const_string "01") +-; (eq_attr "opx_access" "rw") (const_string "11")] +-; (const_string "unknown")) +-; +-; (and (eq_attr "opy_mem" "r") (eq_attr "opx_mem" "i")) +-; (cond [(eq_attr "opx_access" "r") (const_string "i0") +-; (eq_attr "opx_access" "w") (const_string "0i") +-; (eq_attr "opx_access" "rw") (const_string "i1")] +-; (const_string "unknown")) +-; +-; (and (eq_attr "opy_mem" "m") (eq_attr "opx_mem" "r")) +-; (const_string "10") +-; +-; (and (eq_attr "opy_mem" "m") (eq_attr "opx_mem" "m")) +-; (cond [(eq_attr "opx_access" "w") (const_string "11")] +-; (const_string "unknown")) +-; +-; (and (eq_attr "opy_mem" "m") (eq_attr "opx_mem" "i")) +-; (cond [(eq_attr "opx_access" "w") (const_string "1i")] +-; (const_string "unknown")) +-; +-; (and (eq_attr "opy_mem" "i") (eq_attr "opx_mem" "r")) +-; (const_string "i0") +-; +-; (and (eq_attr "opy_mem" "i") (eq_attr "opx_mem" "m")) +-; (cond [(eq_attr "opx_access" "w") (const_string "i1")] +-; (const_string "unknown"))] +-; (const_string "unknown")) ++;; i1 - indexed memory is read, memory is written ++;; 1i - memory is read, indexed memory is written ++(define_attr "op_mem" "00, 10, i0, 01, 0i, 11, i1, 1i" + (symbol_ref "m68k_sched_attr_op_mem (insn)")) + +-;; Attribute to support partial automata description. +-;; This attribute has value 'yes' for instructions that are not +-;; fully handled yet. +-(define_attr "guess" "yes, no" +- (cond [(ior (eq (symbol_ref "reload_completed") (const_int 0)) +- (eq_attr "type1" "unknown")) +- (const_string "yes")] +- (const_string "no"))) +- +-;; Attribute to support statistics gathering. +-;; Todo means that insn lacks something to get pipeline description. +-;; Done means that insn was transformed to suit pipeline description. +-;; Nothing means that insn was originally good enough for scheduling. +-(define_attr "split" "todo, done, nothing" +- (if_then_else (eq_attr "type" "unknown") +- (const_string "todo") +- (const_string "nothing"))) ++;; Instruction size in words. ++(define_attr "size" "1,2,3" ++ (symbol_ref "m68k_sched_attr_size (insn)")) ++ + + ;; Mode macros for floating point operations. + ;; Valid floating point modes +@@ -364,8 +274,7 @@ + m68k_emit_move_double (operands); + DONE; + } +- [(set_attr "type" "fmove,*") +- (set_attr "split" "done,*")]) ++ [(set_attr "type" "fmove,*")]) + + (define_insn_and_split "pushdi" + [(set (match_operand:DI 0 "push_operand" "=m") +@@ -445,7 +354,7 @@ + "@ + tst%.l %0 + cmp%.w #0,%0" +- [(set_attr "type" "tst_l,*")]) ++ [(set_attr "type" "tst_l,cmp")]) + + ;; This can't use an address register, because comparisons + ;; with address registers as second operand always test the whole word. +@@ -460,7 +369,7 @@ + (match_operand:HI 0 "nonimmediate_operand" "dm"))] + "" + "tst%.w %0" +- [(set_attr "type" "tst_w")]) ++ [(set_attr "type" "tst")]) + + (define_expand "tstqi" + [(set (cc0) +@@ -473,7 +382,7 @@ + (match_operand:QI 0 "nonimmediate_operand" "dm"))] + "" + "tst%.b %0" +- [(set_attr "type" "tst_b")]) ++ [(set_attr "type" "tst")]) + + (define_expand "tst" + [(set (cc0) +@@ -492,11 +401,12 @@ + if (FP_REG_P (operands[0])) + return "ftst%.x %0"; + return "ftst%. %0"; +-}) ++} ++ [(set_attr "type" "ftst")]) + + (define_insn "tst_cf" + [(set (cc0) +- (match_operand:FP 0 "general_operand" "fU"))] ++ (match_operand:FP 0 "general_operand" "fm"))] + "TARGET_COLDFIRE_FPU" + { + cc_status.flags = CC_IN_68881; +@@ -514,15 +424,15 @@ + [(set (cc0) + (compare (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))) +- (clobber (match_dup 2))])] ++ (clobber (match_scratch:DI 2 ""))])] + "" +- "m68k_last_compare_had_fp_operands = 0; operands[2] = gen_reg_rtx (DImode);") ++ "m68k_last_compare_had_fp_operands = 0;") + + (define_insn "" + [(set (cc0) + (compare (match_operand:DI 1 "nonimmediate_operand" "0,d") + (match_operand:DI 2 "general_operand" "d,0"))) +- (clobber (match_operand:DI 0 "register_operand" "=d,d"))] ++ (clobber (match_scratch:DI 0 "=d,d"))] + "" + { + if (rtx_equal_p (operands[0], operands[1])) +@@ -600,7 +510,7 @@ + if ((REG_P (operands[1]) && !ADDRESS_REG_P (operands[1])) + || (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM)) + { +- cc_status.flags |= CC_REVERSED; ++ cc_status.flags |= CC_REVERSED; /*|*/ + return "cmp%.w %d0,%d1"; + } + return "cmp%.w %d1,%d0"; +@@ -652,8 +562,8 @@ + + (define_insn "*cmp_cf" + [(set (cc0) +- (compare (match_operand:FP 0 "fp_src_operand" "f,f,U") +- (match_operand:FP 1 "fp_src_operand" "f,U,f")))] ++ (compare (match_operand:FP 0 "fp_src_operand" "f,f,m") ++ (match_operand:FP 1 "fp_src_operand" "f,m,f")))] + "TARGET_COLDFIRE_FPU + && (register_operand (operands[0], mode) + || register_operand (operands[1], mode))" +@@ -792,8 +702,7 @@ + clr%.l %0 + mov3q%.l %1,%- + pea %a1" +- [(set_attr "type" "clr_l,mov3q_l,pea") +- (set_attr "split" "done")]) ++ [(set_attr "type" "clr_l,mov3q_l,pea")]) + + ;This is never used. + ;(define_insn "swapsi" +@@ -813,9 +722,8 @@ + moveq #0,%0 + sub%.l %0,%0 + clr%.l %0" +- [(set_attr "type" "moveq_l,sub_l,clr_l") +- (set_attr "opy_type" "imm_q,reg,*") +- (set_attr "split" "done")]) ++ [(set_attr "type" "moveq_l,alu_l,clr_l") ++ (set_attr "opy" "*,0,*")]) + + ;; Special case of fullword move when source is zero for 68040_60. + ;; On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 +@@ -834,9 +742,7 @@ + return ""; + } + } +- [(set_attr "type" "lea,clr_l") +- (set_attr "opy_type" "imm_w,*") +- (set_attr "split" "done")]) ++ [(set_attr "type" "lea,clr_l")]) + + ;; Special case of fullword move when source is zero. + (define_insn "*movsi_const0" +@@ -846,9 +752,8 @@ + "@ + sub%.l %0,%0 + clr%.l %0" +- [(set_attr "type" "sub_l,clr_l") +- (set_attr "opy_type" "reg,*") +- (set_attr "split" "done")]) ++ [(set_attr "type" "alu_l,clr_l") ++ (set_attr "opy" "0,*")]) + + ;; General case of fullword move. + ;; +@@ -866,7 +771,41 @@ + { + rtx tmp, base, offset; + +- if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode)) ++ /* Recognize the case where operand[1] is a reference to thread-local ++ data and load its address to a register. */ ++ if (!TARGET_PCREL && m68k_tls_referenced_p (operands[1])) ++ { ++ rtx tmp = operands[1]; ++ rtx addend = NULL; ++ ++ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS) ++ { ++ addend = XEXP (XEXP (tmp, 0), 1); ++ tmp = XEXP (XEXP (tmp, 0), 0); ++ } ++ ++ gcc_assert (GET_CODE (tmp) == SYMBOL_REF); ++ gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0); ++ ++ tmp = m68k_legitimize_tls_address (tmp); ++ ++ if (addend) ++ { ++ if (!REG_P (tmp)) ++ { ++ rtx reg; ++ ++ reg = gen_reg_rtx (Pmode); ++ emit_move_insn (reg, tmp); ++ tmp = reg; ++ } ++ ++ tmp = gen_rtx_PLUS (SImode, tmp, addend); ++ } ++ ++ operands[1] = tmp; ++ } ++ else if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode)) + { + /* The source is an address which requires PIC relocation. + Call legitimize_pic_address with the source, mode, and a relocation +@@ -973,11 +912,7 @@ + return ""; + } + } +- [(set_attr "type" "mov3q_l, moveq_l,*, mvz_w, mvs_w, move_l, move_w, pea, lea, move_l, move_l, move_l") +- (set (attr "split") +- (if_then_else (eq_attr "alternative" "2") +- (const_string "*") +- (const_string "done")))]) ++ [(set_attr "type" "mov3q_l,moveq_l,*,mvsz,mvsz,move_l,move,pea,lea,move_l,move_l,move_l")]) + + ;; Special case of fullword move, where we need to get a non-GOT PIC + ;; reference into an address register. +@@ -1066,8 +1001,7 @@ + clr%.b %0 + move%.b %1,%0 + move%.b %1,%0" +- [(set_attr "type" "clr_b,clr_b,move_b,move_b") +- (set_attr "split" "done")]) ++ [(set_attr "type" "clr,clr,move,move")]) + + (define_expand "pushqi1" + [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2))) +@@ -1162,10 +1096,8 @@ + ;; SFmode MEMs are restricted to modes 2-4 if TARGET_COLDFIRE_FPU. + ;; The move instructions can handle all combinations. + (define_insn "movsf_cf_hard" +- [(set (match_operand:SF 0 "nonimmediate_operand" "=rU, f, f,mr,f,r,f +-,m") +- (match_operand:SF 1 "general_operand" " f, rU,f,rm,F,F, m +-,f"))] ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=rm,f, f,rm,f,r,f,m") ++ (match_operand:SF 1 "general_operand" " f, rm,f,rm,F,F, m,f"))] + "TARGET_COLDFIRE_FPU" + { + if (which_alternative == 4 || which_alternative == 5) { +@@ -1307,8 +1239,8 @@ + }) + + (define_insn "movdf_cf_hard" +- [(set (match_operand:DF 0 "nonimmediate_operand" "=f, U,r,f,r,r,m,f") +- (match_operand:DF 1 "general_operand" " fU,f, f,r,r,m,r,E"))] ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f, m,r,f,r,r,m,f") ++ (match_operand:DF 1 "general_operand" " fm,f,f,r,r,m,r,E"))] + "TARGET_COLDFIRE_FPU" + { + rtx xoperands[3]; +@@ -1688,7 +1620,7 @@ + (zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))] + "ISA_HAS_MVS_MVZ" + "mvz%.w %1,%0" +- [(set_attr "type" "mvz_w")]) ++ [(set_attr "type" "mvsz")]) + + (define_insn "zero_extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=d") +@@ -1713,7 +1645,7 @@ + (zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))] + "ISA_HAS_MVS_MVZ" + "mvz%.b %1,%0" +- [(set_attr "type" "mvz_b")]) ++ [(set_attr "type" "mvsz")]) + + (define_insn "zero_extendqisi2" + [(set (match_operand:SI 0 "register_operand" "=d") +@@ -1794,28 +1726,73 @@ + return "move%.w %1,%2\;ext%.l %2\;smi %0\;ext%.w %0\;ext%.l %0"; + }) + +-(define_insn "extendsidi2" +- [(set (match_operand:DI 0 "register_operand" "=d") +- (sign_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "rm")))] +- "" ++(define_expand "extendsidi2" ++ [(parallel ++ [(set (match_operand:DI 0 "nonimmediate_operand") ++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_src_operand"))) ++ (clobber (match_scratch:SI 2 "")) ++ (clobber (match_scratch:SI 3 ""))])]) ++ ++(define_insn "*extendsidi2_m68k" ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,<,o") ++ (sign_extend:DI ++ (match_operand:SI 1 "nonimmediate_src_operand" "rm,rm,rm"))) ++ (clobber (match_scratch:SI 2 "=X,d,d")) ++ (clobber (match_scratch:SI 3 "=X,X,X"))] ++ "!TARGET_COLDFIRE" + { + CC_STATUS_INIT; ++ ++ if (which_alternative == 0) ++ /* Handle alternative 0. */ ++ { ++ if (TARGET_68020 || TARGET_COLDFIRE) ++ return "move%.l %1,%R0\;smi %0\;extb%.l %0"; ++ else ++ return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0"; ++ } ++ ++ /* Handle alternatives 1 and 2. We don't need to adjust address by 4 ++ in alternative 1 because autodecrement will do that for us. */ ++ operands[3] = adjust_address (operands[0], SImode, ++ which_alternative == 1 ? 0 : 4); ++ operands[0] = adjust_address (operands[0], SImode, 0); ++ + if (TARGET_68020 || TARGET_COLDFIRE) +- return "move%.l %1,%R0\;smi %0\;extb%.l %0"; ++ return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0"; + else +- return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0"; ++ return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0"; + }) + +-(define_insn "*extendsidi2_mem" +- [(set (match_operand:DI 0 "memory_operand" "=o,<") +- (sign_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "rm,rm"))) +- (clobber (match_scratch:SI 2 "=d,d"))] +- "" ++;; This is a copy of extendsidi2_m68k except for that we can't ++;; fully handle the last alternative on ColdFire. ++;; FIXME: when 'enabled' attribute is available (in GCC 4.4) merge the ++;; two define_insns. ++(define_insn "*extendsidi2_cf" ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,<,o") ++ (sign_extend:DI ++ (match_operand:SI 1 "nonimmediate_src_operand" "rm,rm,r"))) ++ (clobber (match_scratch:SI 2 "=X,d,d")) ++ (clobber (match_scratch:SI 3 "=X,X,X"))] ++ "TARGET_COLDFIRE" + { + CC_STATUS_INIT; ++ ++ if (which_alternative == 0) ++ /* Handle alternative 0. */ ++ { ++ if (TARGET_68020 || TARGET_COLDFIRE) ++ return "move%.l %1,%R0\;smi %0\;extb%.l %0"; ++ else ++ return "move%.l %1,%R0\;smi %0\;ext%.w %0\;ext%.l %0"; ++ } ++ ++ /* Handle alternatives 1 and 2. We don't need to adjust address by 4 ++ in alternative 1 because autodecrement will do that for us. */ + operands[3] = adjust_address (operands[0], SImode, +- which_alternative == 0 ? 4 : 0); ++ which_alternative == 1 ? 0 : 4); + operands[0] = adjust_address (operands[0], SImode, 0); ++ + if (TARGET_68020 || TARGET_COLDFIRE) + return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0"; + else +@@ -1866,7 +1843,7 @@ + (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))] + "ISA_HAS_MVS_MVZ" + "mvs%.w %1,%0" +- [(set_attr "type" "mvs_w")]) ++ [(set_attr "type" "mvsz")]) + + (define_insn "*68k_extendhisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=*d,a") +@@ -1876,14 +1853,14 @@ + "@ + ext%.l %0 + move%.w %1,%0" +- [(set_attr "type" "ext_l,move_w")]) ++ [(set_attr "type" "ext,move")]) + + (define_insn "extendqihi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=d") + (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0")))] + "" + "ext%.w %0" +- [(set_attr "type" "ext_w")]) ++ [(set_attr "type" "ext")]) + + (define_expand "extendqisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "") +@@ -1896,14 +1873,14 @@ + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "rms")))] + "ISA_HAS_MVS_MVZ" + "mvs%.b %1,%0" +- [(set_attr "type" "mvs_b")]) ++ [(set_attr "type" "mvsz")]) + + (define_insn "*68k_extendqisi2" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0")))] + "TARGET_68020 || (TARGET_COLDFIRE && !ISA_HAS_MVS_MVZ)" + "extb%.l %0" +- [(set_attr "type" "extb_l")]) ++ [(set_attr "type" "ext")]) + + ;; Conversions between float and double. + +@@ -1946,7 +1923,7 @@ + (define_insn "extendsfdf2_cf" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f") + (float_extend:DF +- (match_operand:SF 1 "general_operand" "f,U")))] ++ (match_operand:SF 1 "general_operand" "f,m")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[0]) && FP_REG_P (operands[1])) +@@ -1986,9 +1963,9 @@ + }) + + (define_insn "truncdfsf2_cf" +- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,dU") ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,dm") + (float_truncate:SF +- (match_operand:DF 1 "general_operand" "U,f")))] ++ (match_operand:DF 1 "general_operand" "m,f")))] + "TARGET_COLDFIRE_FPU" + "@ + fsmove%.d %1,%0 +@@ -2021,7 +1998,8 @@ + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (float:FP (match_operand:SI 1 "general_operand" "dmi")))] + "TARGET_68881" +- "fmove%.l %1,%0") ++ "fmove%.l %1,%0" ++ [(set_attr "type" "fmove")]) + + (define_insn "floatsi2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +@@ -2128,18 +2106,19 @@ + if (FP_REG_P (operands[1])) + return "fintrz%.x %f1,%0"; + return "fintrz%. %f1,%0"; +-}) ++} ++ [(set_attr "type" "falu")]) + + (define_insn "ftrunc2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +- (fix:FP (match_operand:FP 1 "general_operand" "fU")))] ++ (fix:FP (match_operand:FP 1 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[1])) + return "fintrz%.d %f1,%0"; + return "fintrz%. %f1,%0"; + } +- [(set_attr "type" "fintrz")]) ++ [(set_attr "type" "falu")]) + + ;; Convert a float whose value is an integer + ;; to an actual integer. Second stage of converting float to integer type. +@@ -2153,7 +2132,8 @@ + [(set (match_operand:QI 0 "nonimmediate_operand" "=dm") + (fix:QI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" +- "fmove%.b %1,%0") ++ "fmove%.b %1,%0" ++ [(set_attr "type" "fmove")]) + + (define_insn "fixqi2_cf" + [(set (match_operand:QI 0 "nonimmediate_operand" "=dU") +@@ -2172,7 +2152,8 @@ + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") + (fix:HI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" +- "fmove%.w %1,%0") ++ "fmove%.w %1,%0" ++ [(set_attr "type" "fmove")]) + + (define_insn "fixhi2_cf" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dU") +@@ -2191,7 +2172,8 @@ + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") + (fix:SI (match_operand:FP 1 "general_operand" "f")))] + "TARGET_68881" +- "fmove%.l %1,%0") ++ "fmove%.l %1,%0" ++ [(set_attr "type" "fmove")]) + + (define_insn "fixsi2_cf" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dU") +@@ -2297,7 +2279,7 @@ + operands[1] = adjust_address (operands[1], SImode, 4); + return "add%.l %1,%0"; + } +- [(set_attr "type" "add_l")]) ++ [(set_attr "type" "alu_l")]) + + (define_insn "adddi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d") +@@ -2376,12 +2358,44 @@ + } + }) + +-(define_insn "addsi_lshrsi_31" ++(define_expand "addsi_lshrsi_31" ++ [(set (match_operand:SI 0 "nonimmediate_operand") ++ (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand") ++ (const_int 31)) ++ (match_dup 1)))]) ++ ++(define_insn "*addsi_lshrsi_31_m68k" + [(set (match_operand:SI 0 "nonimmediate_operand" "=dm") +- (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "rm") +- (const_int 31)) +- (match_dup 1)))] +- "" ++ (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "rm") ++ (const_int 31)) ++ (match_dup 1)))] ++ "!TARGET_COLDFIRE" ++{ ++ operands[2] = operands[0]; ++ operands[3] = gen_label_rtx(); ++ if (GET_CODE (operands[0]) == MEM) ++ { ++ if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) ++ operands[0] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0)); ++ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) ++ operands[2] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0)); ++ } ++ output_asm_insn ("move%.l %1,%0", operands); ++ output_asm_insn ("jpl %l3", operands); ++ output_asm_insn ("addq%.l #1,%2", operands); ++ (*targetm.asm_out.internal_label) (asm_out_file, "L", ++ CODE_LABEL_NUMBER (operands[3])); ++ return ""; ++}) ++ ++;; FIXME: When 'enabled' attribute is available (in GCC 4.4) merge ++;; this with previous pattern. ++(define_insn "*addsi_lshrsi_31_cf" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=dm,d") ++ (plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "r,rm") ++ (const_int 31)) ++ (match_dup 1)))] ++ "TARGET_COLDFIRE" + { + operands[2] = operands[0]; + operands[3] = gen_label_rtx(); +@@ -2421,9 +2435,9 @@ + "* return output_addsi3 (operands);") + + (define_insn_and_split "*addsi3_5200" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,m,r, ?a,?a,?a,?a") +- (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0,0, a, a, r, a") +- (match_operand:SI 2 "general_src_operand" " I, L, d,mrKi,Cj,r, a, J")))] ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=mr,mr,a, m,r, ?a, ?a,?a,?a") ++ (plus:SI (match_operand:SI 1 "general_operand" "%0, 0, 0, 0,0, a, a, r, a") ++ (match_operand:SI 2 "general_src_operand" " I, L, JCu,d,mrKi,Cj, r, a, JCu")))] + "TARGET_COLDFIRE" + { + switch (which_alternative) +@@ -2435,21 +2449,22 @@ + operands[2] = GEN_INT (- INTVAL (operands[2])); + return "subq%.l %2,%0"; + +- case 2: + case 3: ++ case 4: + return "add%.l %2,%0"; + +- case 4: ++ case 5: + /* move%.l %2,%0\n\tadd%.l %1,%0 */ + return "#"; + +- case 5: ++ case 6: + return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0"; + +- case 6: ++ case 7: + return MOTOROLA ? "lea (%2,%1.l),%0" : "lea %2@(0,%1:l),%0"; + +- case 7: ++ case 2: ++ case 8: + return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0"; + + default: +@@ -2457,17 +2472,16 @@ + return ""; + } + } +- "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 4) && !operands_match_p (operands[0], operands[1])" ++ "&& reload_completed && (extract_constrain_insn_cached (insn), which_alternative == 5) && !operands_match_p (operands[0], operands[1])" + [(set (match_dup 0) + (match_dup 2)) + (set (match_dup 0) + (plus:SI (match_dup 0) + (match_dup 1)))] + "" +- [(set_attr "type" "addq_l,subq_l,add_l,add_l,*,lea,lea,lea") +- (set_attr "opy" "2,2,2,2,*,*,*,*") +- (set_attr "opy_type" "*,*,*,*,*,mem6,mem6,mem5") +- (set_attr "split" "done,done,done,done,*,done,done,done")]) ++ [(set_attr "type" "aluq_l,aluq_l,lea, alu_l,alu_l,*,lea, lea, lea") ++ (set_attr "opy" "2, 2, *, 2, 2, *,*, *, *") ++ (set_attr "opy_type" "*, *, mem5,*, *, *,mem6,mem6,mem5")]) + + (define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=a") +@@ -2711,21 +2725,27 @@ + (plus:FP (float:FP (match_operand:SI 2 "general_operand" "dmi")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +- "fadd%.l %2,%0") ++ "fadd%.l %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "add3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (float:FP (match_operand:HI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +- "fadd%.w %2,%0") ++ "fadd%.w %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "add3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (float:FP (match_operand:QI 2 "general_operand" "dmn")) + (match_operand:FP 1 "general_operand" "0")))] + "TARGET_68881" +- "fadd%.b %2,%0") ++ "fadd%.b %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "add3_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +@@ -2736,19 +2756,22 @@ + if (FP_REG_P (operands[2])) + return "fadd%.x %2,%0"; + return "fadd%. %f2,%0"; +-}) ++} ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "add3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (plus:FP (match_operand:FP 1 "general_operand" "%0") +- (match_operand:FP 2 "general_operand" "fU")))] ++ (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[2])) + return "fadd%.d %2,%0"; + return "fadd%. %2,%0"; + } +- [(set_attr "type" "fadd")]) ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + ;; subtract instructions + +@@ -2783,7 +2806,7 @@ + operands[1] = adjust_address (operands[1], SImode, 4); + return "sub%.l %1,%0"; + } +- [(set_attr "type" "sub_l")]) ++ [(set_attr "type" "alu_l")]) + + (define_insn "subdi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,d,d,d") +@@ -2874,7 +2897,7 @@ + sub%.l %2,%0 + sub%.l %2,%0 + sub%.l %2,%0" +- [(set_attr "type" "subq_l,sub_l,sub_l,sub_l") ++ [(set_attr "type" "aluq_l,alu_l,alu_l,alu_l") + (set_attr "opy" "2")]) + + (define_insn "" +@@ -2925,21 +2948,27 @@ + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:SI 2 "general_operand" "dmi"))))] + "TARGET_68881" +- "fsub%.l %2,%0") ++ "fsub%.l %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "sub3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:HI 2 "general_operand" "dmn"))))] + "TARGET_68881" +- "fsub%.w %2,%0") ++ "fsub%.w %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "sub3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") + (float:FP (match_operand:QI 2 "general_operand" "dmn"))))] + "TARGET_68881" +- "fsub%.b %2,%0") ++ "fsub%.b %2,%0" ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "sub3_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +@@ -2950,19 +2979,22 @@ + if (FP_REG_P (operands[2])) + return "fsub%.x %2,%0"; + return "fsub%. %f2,%0"; +-}) ++} ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + (define_insn "sub3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (minus:FP (match_operand:FP 1 "general_operand" "0") +- (match_operand:FP 2 "general_operand" "fU")))] ++ (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[2])) + return "fsub%.d %2,%0"; + return "fsub%. %2,%0"; + } +- [(set_attr "type" "fsub")]) ++ [(set_attr "type" "falu") ++ (set_attr "opy" "2")]) + + ;; multiply instructions + +@@ -2974,7 +3006,7 @@ + { + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; + } +- [(set_attr "type" "muls_w") ++ [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + + (define_insn "mulhisi3" +@@ -2987,7 +3019,7 @@ + { + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; + } +- [(set_attr "type" "muls_w") ++ [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + + (define_insn "*mulhisisi3_s" +@@ -2999,7 +3031,7 @@ + { + return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0"; + } +- [(set_attr "type" "muls_w") ++ [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + + (define_expand "mulsi3" +@@ -3016,7 +3048,7 @@ + + "TARGET_68020" + "muls%.l %2,%0" +- [(set_attr "type" "muls_l") ++ [(set_attr "type" "mul_l") + (set_attr "opy" "2")]) + + (define_insn "*mulsi3_cf" +@@ -3025,7 +3057,7 @@ + (match_operand:SI 2 "general_operand" "d")))] + "TARGET_COLDFIRE" + "muls%.l %2,%0" +- [(set_attr "type" "muls_l") ++ [(set_attr "type" "mul_l") + (set_attr "opy" "2")]) + + (define_insn "umulhisi3" +@@ -3038,7 +3070,7 @@ + { + return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0"; + } +- [(set_attr "type" "mulu_w") ++ [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + + (define_insn "*mulhisisi3_z" +@@ -3050,7 +3082,7 @@ + { + return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0"; + } +- [(set_attr "type" "mulu_w") ++ [(set_attr "type" "mul_w") + (set_attr "opy" "2")]) + + ;; We need a separate DEFINE_EXPAND for u?mulsidi3 to be able to use the +@@ -3235,7 +3267,9 @@ + return TARGET_68040 + ? "fmul%.l %2,%0" + : "fmul%.l %2,%0"; +-}) ++} ++ [(set_attr "type" "fmul") ++ (set_attr "opy" "2")]) + + (define_insn "mul3_floathi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +@@ -3246,7 +3280,9 @@ + return TARGET_68040 + ? "fmul%.w %2,%0" + : "fmul%.w %2,%0"; +-}) ++} ++ [(set_attr "type" "fmul") ++ (set_attr "opy" "2")]) + + (define_insn "mul3_floatqi_68881" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +@@ -3257,7 +3293,9 @@ + return TARGET_68040 + ? "fmul%.b %2,%0" + : "fmul%.b %2,%0"; +-}) ++} ++ [(set_attr "type" "fmul") ++ (set_attr "opy" "2")]) + + (define_insn "muldf_68881" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f") +@@ -3304,14 +3342,15 @@ + (define_insn "fmul3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (mult:FP (match_operand:FP 1 "general_operand" "%0") +- (match_operand:FP 2 "general_operand" "fU")))] ++ (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[2])) + return "fmul%.d %2,%0"; + return "fmul%. %2,%0"; + } +- [(set_attr "type" "fmul")]) ++ [(set_attr "type" "fmul") ++ (set_attr "opy" "2")]) + + ;; divide instructions + +@@ -3373,14 +3412,15 @@ + (define_insn "div3_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") + (div:FP (match_operand:FP 1 "general_operand" "0") +- (match_operand:FP 2 "general_operand" "fU")))] ++ (match_operand:FP 2 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[2])) + return "fdiv%.d %2,%0"; + return "fdiv%. %2,%0"; + } +- [(set_attr "type" "fdiv")]) ++ [(set_attr "type" "fdiv") ++ (set_attr "opy" "2")]) + + ;; Remainder instructions. + +@@ -3408,7 +3448,9 @@ + return "rems%.l %2,%3:%0"; + else + return "rems%.l %2,%3:%0\;divs%.l %2,%0"; +-}) ++} ++ [(set_attr "type" "div_l") ++ (set_attr "opy" "2")]) + + (define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") +@@ -3448,7 +3490,9 @@ + return "remu%.l %2,%3:%0"; + else + return "remu%.l %2,%3:%0\;divu%.l %2,%0"; +-}) ++} ++ [(set_attr "type" "div_l") ++ (set_attr "opy" "2")]) + + (define_insn "" + [(set (match_operand:SI 0 "nonimmediate_operand" "=d") +@@ -4216,7 +4260,7 @@ + + (define_insn "neg2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") +- (neg:FP (match_operand:FP 1 "general_operand" "fU,0")))] ++ (neg:FP (match_operand:FP 1 "general_operand" "fm,0")))] + "TARGET_COLDFIRE_FPU" + { + if (DATA_REG_P (operands[0])) +@@ -4250,13 +4294,14 @@ + + (define_insn "sqrt2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f") +- (sqrt:FP (match_operand:FP 1 "general_operand" "fU")))] ++ (sqrt:FP (match_operand:FP 1 "general_operand" "fm")))] + "TARGET_COLDFIRE_FPU" + { + if (FP_REG_P (operands[1])) + return "fsqrt%.d %1,%0"; + return "fsqrt%. %1,%0"; +-}) ++} ++ [(set_attr "type" "fsqrt")]) + ;; Absolute value instructions + ;; If using software floating point, just zero the sign bit. + +@@ -4368,7 +4413,7 @@ + + (define_insn "abs2_cf" + [(set (match_operand:FP 0 "nonimmediate_operand" "=f,d") +- (abs:FP (match_operand:FP 1 "general_operand" "fU,0")))] ++ (abs:FP (match_operand:FP 1 "general_operand" "fm,0")))] + "TARGET_COLDFIRE_FPU" + { + if (DATA_REG_P (operands[0])) +@@ -4379,7 +4424,8 @@ + if (FP_REG_P (operands[1])) + return "fabs%.d %1,%0"; + return "fabs%. %1,%0"; +-}) ++} ++ [(set_attr "type" "bitrw,fneg")]) + + ;; bit indexing instructions + +@@ -4389,7 +4435,7 @@ + (clz:SI (match_operand:SI 1 "register_operand" "0")))] + "ISA_HAS_FF1" + "ff1 %0" +- [(set_attr "type" "ff1")]) ++ [(set_attr "type" "ext")]) + + ;; one complement instructions + +@@ -4433,7 +4479,7 @@ + (not:SI (match_operand:SI 1 "general_operand" "0")))] + "TARGET_COLDFIRE" + "not%.l %0" +- [(set_attr "type" "not_l")]) ++ [(set_attr "type" "neg_l")]) + + (define_insn "one_cmplhi2" + [(set (match_operand:HI 0 "nonimmediate_operand" "=dm") +@@ -4785,7 +4831,7 @@ + operands[1] = adjust_address (operands[1], HImode, 2); + return "move%.w %1,%0"; + } +- [(set_attr "type" "move_w")]) ++ [(set_attr "type" "move")]) + + (define_insn "subregsi1ashrdi_const32" + [(set (match_operand:SI 0 "nonimmediate_operand" "=rm") +@@ -4965,7 +5011,7 @@ + (match_operand:SI 2 "general_operand" "dI")))] + "" + "asr%.l %2,%0" +- [(set_attr "type" "asr_l") ++ [(set_attr "type" "shift") + (set_attr "opy" "2")]) + + (define_insn "ashrhi3" +@@ -5261,7 +5307,7 @@ + (match_operand:SI 2 "general_operand" "dI")))] + "" + "lsr%.l %2,%0" +- [(set_attr "type" "lsr_l") ++ [(set_attr "type" "shift") + (set_attr "opy" "2")]) + + (define_insn "lshrhi3" +@@ -5420,7 +5466,7 @@ + CC_STATUS_INIT; + return "bset %1,%0"; + } +- [(set_attr "type" "bset")]) ++ [(set_attr "type" "bitrw")]) + + ;; set bit, bit number is (sign/zero)_extended from HImode/QImode + (define_insn "*bsetmemqi_ext" +@@ -5434,7 +5480,7 @@ + CC_STATUS_INIT; + return "bset %1,%0"; + } +- [(set_attr "type" "bset")]) ++ [(set_attr "type" "bitrw")]) + + ;; clear bit, bit number is int + (define_insn "bclrmemqi" +@@ -5448,7 +5494,7 @@ + CC_STATUS_INIT; + return "bclr %1,%0"; + } +- [(set_attr "type" "bclr")]) ++ [(set_attr "type" "bitrw")]) + + ;; clear bit, bit number is (sign/zero)_extended from HImode/QImode + (define_insn "*bclrmemqi_ext" +@@ -5463,7 +5509,7 @@ + CC_STATUS_INIT; + return "bclr %1,%0"; + } +- [(set_attr "type" "bclr")]) ++ [(set_attr "type" "bitrw")]) + + ;; Special cases of bit-field insns which we should + ;; recognize in preference to the general case. +@@ -6413,8 +6459,7 @@ + { + OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0"); + } +- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)")) +- (set_attr "split" "done")]) ++ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + + (define_insn "bne" + [(set (pc) +@@ -6426,8 +6471,7 @@ + { + OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0"); + } +- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)")) +- (set_attr "split" "done")]) ++ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + + (define_insn "bgt" + [(set (pc) +@@ -6437,10 +6481,15 @@ + (pc)))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jgt %l0", "fjgt %l0", 0); + } +- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)")) +- (set_attr "split" "done")]) ++ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + + (define_insn "bgtu" + [(set (pc) +@@ -6449,7 +6498,15 @@ + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +- "jhi %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jhi %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "blt" +@@ -6460,10 +6517,15 @@ + (pc)))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jlt %l0", "fjlt %l0", "jmi %l0"); + } +- [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)")) +- (set_attr "split" "done")]) ++ [(set (attr "type") (symbol_ref "m68k_sched_branch_type (insn)"))]) + + (define_insn "bltu" + [(set (pc) +@@ -6472,7 +6534,15 @@ + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +- "jcs %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jcs %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "bge" +@@ -6483,6 +6553,12 @@ + (pc)))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jge %l0", "fjge %l0", "jpl %l0"); + }) + +@@ -6493,7 +6569,15 @@ + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +- "jcc %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jcc %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "ble" +@@ -6504,6 +6588,12 @@ + (pc)))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jle %l0", "fjle %l0", 0); + } + [(set_attr "type" "bcc")]) +@@ -6515,7 +6605,15 @@ + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +- "jls %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jls %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "bordered" +@@ -6527,7 +6625,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjor %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bunordered" + [(set (pc) +@@ -6538,7 +6637,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjun %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "buneq" + [(set (pc) +@@ -6549,7 +6649,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjueq %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bunge" + [(set (pc) +@@ -6560,7 +6661,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjuge %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bungt" + [(set (pc) +@@ -6571,7 +6673,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjugt %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bunle" + [(set (pc) +@@ -6582,7 +6685,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjule %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bunlt" + [(set (pc) +@@ -6593,7 +6697,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjult %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "bltgt" + [(set (pc) +@@ -6604,7 +6709,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogl %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + ;; Negated conditional jump instructions. + +@@ -6640,6 +6746,12 @@ + (label_ref (match_operand 0 "" ""))))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jle %l0", "fjngt %l0", 0); + } + [(set_attr "type" "bcc")]) +@@ -6651,7 +6763,15 @@ + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +- "jls %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jls %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "*blt_rev" +@@ -6662,6 +6782,12 @@ + (label_ref (match_operand 0 "" ""))))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jge %l0", "fjnlt %l0", "jpl %l0"); + } + [(set_attr "type" "bcc")]) +@@ -6673,7 +6799,15 @@ + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +- "jcc %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jcc %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "*bge_rev" +@@ -6684,6 +6818,12 @@ + (label_ref (match_operand 0 "" ""))))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jlt %l0", "fjnge %l0", "jmi %l0"); + } + [(set_attr "type" "bcc")]) +@@ -6695,7 +6835,15 @@ + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +- "jcs %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jcs %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "*ble_rev" +@@ -6706,6 +6854,12 @@ + (label_ref (match_operand 0 "" ""))))] + "" + { ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ + OUTPUT_JUMP ("jgt %l0", "fjnle %l0", 0); + } + [(set_attr "type" "bcc")]) +@@ -6717,7 +6871,15 @@ + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +- "jhi %l0" ++{ ++ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0) ++ { ++ cc_status.flags &= ~CC_OVERFLOW_UNUSABLE; ++ return 0; ++ } ++ ++ return "jhi %l0"; ++} + [(set_attr "type" "bcc")]) + + (define_insn "*bordered_rev" +@@ -6729,7 +6891,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjun %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bunordered_rev" + [(set (pc) +@@ -6740,7 +6903,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjor %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*buneq_rev" + [(set (pc) +@@ -6751,7 +6915,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogl %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bunge_rev" + [(set (pc) +@@ -6762,7 +6927,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjolt %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bungt_rev" + [(set (pc) +@@ -6773,7 +6939,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjole %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bunle_rev" + [(set (pc) +@@ -6784,7 +6951,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjogt %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bunlt_rev" + [(set (pc) +@@ -6795,7 +6963,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjoge %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + (define_insn "*bltgt_rev" + [(set (pc) +@@ -6806,7 +6975,8 @@ + { + gcc_assert (cc_prev_status.flags & CC_IN_68881); + return "fjueq %l0"; +-}) ++} ++ [(set_attr "type" "fbcc")]) + + ;; Unconditional and other jump instructions + (define_insn "jump" +@@ -6835,7 +7005,7 @@ + { + return MOTOROLA ? "jmp (%0)" : "jmp %0@"; + } +- [(set_attr "type" "bra")]) ++ [(set_attr "type" "jmp")]) + + ;; Jump to variable address from dispatch table of relative addresses. + (define_insn "" +@@ -7013,7 +7183,8 @@ + "!SIBLING_CALL_P (insn)" + { + return output_call (operands[0]); +-}) ++} ++ [(set_attr "type" "jsr")]) + + ;; Call subroutine, returning value in operand 0 + ;; (which must be a hard register). +@@ -7035,7 +7206,6 @@ + "!SIBLING_CALL_P (insn)" + "jsr %a1" + [(set_attr "type" "jsr") +- (set_attr "split" "done") + (set_attr "opx" "1")]) + + (define_insn "*symbolic_call_value_jsr" +@@ -7049,7 +7219,6 @@ + return m68k_symbolic_call; + } + [(set_attr "type" "jsr") +- (set_attr "split" "done") + (set_attr "opx" "1")]) + + (define_insn "*symbolic_call_value_bsr" +@@ -7065,7 +7234,6 @@ + return m68k_symbolic_call; + } + [(set_attr "type" "bsr") +- (set_attr "split" "done") + (set_attr "opx" "1")]) + + ;; Call subroutine returning any type. +@@ -7231,7 +7399,8 @@ + return "link.w %0,%1"; + else + return "link.l %0,%1"; +-}) ++} ++ [(set_attr "type" "link")]) + + (define_expand "unlink" + [(parallel +@@ -7721,6 +7890,17 @@ + } + }) + ++;; These are to prevent the scheduler from moving stores to the frame ++;; before the stack adjustment. ++(define_insn "stack_tie" ++ [(set (mem:BLK (scratch)) ++ (unspec:BLK [(match_operand:SI 0 "register_operand" "r") ++ (match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_TIE))] ++ "" ++ "" ++ [(set_attr "type" "ignore")]) ++ + ;; Instruction that subscribes one word in ColdFire instruction buffer. + ;; This instruction is used within scheduler only and should not appear + ;; in the instruction stream. +--- a/gcc/config/m68k/m68k.opt ++++ b/gcc/config/m68k/m68k.opt +@@ -178,3 +178,11 @@ Do not use unaligned memory references + mtune= + Target RejectNegative Joined + Tune for the specified target CPU or architecture ++ ++mxgot ++Target Report Mask(XGOT) ++Support more than 8192 GOT entries on ColdFire ++ ++mxtls ++Target Report Mask(XTLS) ++Support TLS segment larger than 64K +--- a/gcc/config/m68k/predicates.md ++++ b/gcc/config/m68k/predicates.md +@@ -130,7 +130,9 @@ + (match_code "sign_extend,zero_extend")) + + ;; Returns true if OP is either a symbol reference or a sum of a +-;; symbol reference and a constant. ++;; symbol reference and a constant. This predicate is for "raw" ++;; symbol references not yet processed by legitimize*_address, ++;; hence we do not handle UNSPEC_{XGOT, TLS, XTLS} here. + + (define_predicate "symbolic_operand" + (match_code "symbol_ref,label_ref,const") +--- a/gcc/config/m68k/t-cf ++++ b/gcc/config/m68k/t-cf +@@ -2,3 +2,6 @@ + + M68K_MLIB_CPU += && (CPU ~ "^mcf") + M68K_ARCH := cf ++# Do not stamp the multilibs with a MAC type, as we never use those ++# instructions in compiler-generated code. ++MULTILIB_EXTRA_OPTS += Wa,-mno-mac +--- /dev/null ++++ b/gcc/config/m68k/t-linux +@@ -0,0 +1,11 @@ ++EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ ++# Only include multilibs for CPUs with an MMU. ++M68K_MLIB_CPU += && match(FLAGS, "FL_MMU") ++ ++# This rule uses MULTILIB_MATCHES to generate a definition of ++# SYSROOT_SUFFIX_SPEC. ++sysroot-suffix.h: $(srcdir)/config/m68k/print-sysroot-suffix.sh ++ $(SHELL) $(srcdir)/config/m68k/print-sysroot-suffix.sh \ ++ "$(SYSTEM_HEADER_DIR)/../.." "$(MULTILIB_MATCHES)" \ ++ "$(MULTILIB_OPTIONS)" > $@ +--- a/gcc/config/m68k/t-uclinux ++++ b/gcc/config/m68k/t-uclinux +@@ -1,8 +1,8 @@ + # crti and crtn are provided by uClibc. + EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o + +-# Only include multilibs for the 68020 and for CPUs without an MMU. +-M68K_MLIB_CPU += && (MLIB == "68020" || !match(FLAGS, "FL_MMU")) ++# Include multilibs for CPUs without an MMU or with FL_UCLINUX ++M68K_MLIB_CPU += && (!match(FLAGS, "FL_MMU") || match(FLAGS, "FL_UCLINUX")) + + # Add multilibs for execute-in-place and shared-library code. + M68K_MLIB_OPTIONS += msep-data/mid-shared-library +--- a/gcc/config/mips/74k.md ++++ b/gcc/config/mips/74k.md +@@ -118,8 +118,7 @@ + ;; stores + (define_insn_reservation "r74k_int_store" 1 + (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") +- (and (eq_attr "type" "store") +- (eq_attr "mode" "!unknown"))) ++ (eq_attr "type" "store")) + "r74k_agen") + + +@@ -145,33 +144,123 @@ + ;; load->load base: 4 cycles + ;; load->store base: 4 cycles + (define_bypass 4 "r74k_int_load" "r74k_int_load") +-(define_bypass 4 "r74k_int_load" "r74k_int_store" "!store_data_bypass_p") ++(define_bypass 4 "r74k_int_load" "r74k_int_store" "!mips_store_data_bypass_p") + + ;; logical/move/slt/signext->next use : 1 cycles (Default) + ;; logical/move/slt/signext->load base: 2 cycles + ;; logical/move/slt/signext->store base: 2 cycles + (define_bypass 2 "r74k_int_logical" "r74k_int_load") +-(define_bypass 2 "r74k_int_logical" "r74k_int_store" "!store_data_bypass_p") ++(define_bypass 2 "r74k_int_logical" "r74k_int_store" ++ "!mips_store_data_bypass_p") + + ;; arith->next use : 2 cycles (Default) + ;; arith->load base: 3 cycles + ;; arith->store base: 3 cycles + (define_bypass 3 "r74k_int_arith" "r74k_int_load") +-(define_bypass 3 "r74k_int_arith" "r74k_int_store" "!store_data_bypass_p") ++(define_bypass 3 "r74k_int_arith" "r74k_int_store" "!mips_store_data_bypass_p") + + ;; cmove->next use : 4 cycles (Default) + ;; cmove->load base: 5 cycles + ;; cmove->store base: 5 cycles + (define_bypass 5 "r74k_int_cmove" "r74k_int_load") +-(define_bypass 5 "r74k_int_cmove" "r74k_int_store" "!store_data_bypass_p") ++(define_bypass 5 "r74k_int_cmove" "r74k_int_store" ++ "!mips_store_data_bypass_p") + + ;; mult/madd/msub->int_mfhilo : 4 cycles (default) + ;; mult->madd/msub : 1 cycles + ;; madd/msub->madd/msub : 1 cycles +-(define_bypass 1 "r74k_int_mult,r74k_int_mul3" "r74k_int_madd" +- "mips_linked_madd_p") +-(define_bypass 1 "r74k_int_madd" "r74k_int_madd" +- "mips_linked_madd_p") ++(define_bypass 1 "r74k_int_mult" "r74k_int_madd") ++(define_bypass 1 "r74k_int_madd" "r74k_int_madd") ++ ++(define_bypass 1 "r74k_int_mul3" "r74k_int_madd" ++ "mips_mult_madd_chain_bypass_p") ++ ++ ++;; -------------------------------------------------------------- ++;; DSP instructins ++;; -------------------------------------------------------------- ++ ++;; Non-saturating insn have the same latency as normal ALU operations, ++(define_insn_reservation "r74k_dsp_alu" 2 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "dspalu")) ++ "r74k_alu") ++ ++;; Saturating insn takes an extra cycle. ++(define_insn_reservation "r74k_dsp_alu_sat" 3 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "dspalusat")) ++ "r74k_alu") ++ ++;; dpaq_s, dpau, dpsq_s, dpsu, maq_s, mulsaq ++;; - delivers result to hi/lo in 6 cycle (bypass at M4) ++(define_insn_reservation "r74k_dsp_mac" 6 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "dspmac")) ++ "r74k_alu+r74k_mul") ++ ++;; dpaq_sa, dpsq_sa, maq_sa ++;; - delivers result to hi/lo in 7 cycle (bypass at WB) ++(define_insn_reservation "r74k_dsp_mac_sat" 7 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "dspmacsat")) ++ "r74k_alu+r74k_mul") ++ ++;; extp, extpdp, extpdpv, extpv, extr, extrv ++;; - same latency as "mul" ++(define_insn_reservation "r74k_dsp_acc_ext" 7 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "accext")) ++ "r74k_alu+r74k_mul") ++ ++;; mthlip, shilo, shilov ++;; - same latency as "mul" ++(define_insn_reservation "r74k_dsp_acc_mod" 7 ++ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2") ++ (eq_attr "type" "accmod")) ++ "r74k_alu+r74k_mul") ++ ++;; dspalu ->load/store base ++;; dspalusat->load/store base ++;; - we should never see these in real life. ++ ++;; dsp_mac->dsp_mac : 1 cycles (repeat rate of 1) ++;; dsp_mac->dsp_mac_sat : 1 cycles (repeat rate of 1) ++(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac") ++(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac_sat") ++ ++;; dsp_mac_sat->dsp_mac_sat : 2 cycles (repeat rate of 2) ++;; dsp_mac_sat->dsp_mac : 2 cycles (repeat rate of 2) ++(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac_sat") ++(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac") ++ ++(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac") ++(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac_sat") ++ ++;; Before reload, all multiplier is registered as imul3 (which has a long ++;; latency). We temporary jig the latency such that the macc groups ++;; are scheduled closely together during the first scheduler pass. ++(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac" ++ "mips_mult_madd_chain_bypass_p") ++(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac_sat" ++ "mips_mult_madd_chain_bypass_p") ++ ++;; Assuming the following is true (bypass at M4) ++;; AP AF AM MB M1 M2 M3 M4 WB GR GC ++;; AP AF AM MB M1 M2 M3 M4 WB GR GC ++;; dsp_mac->dsp_acc_ext : 4 cycles ++;; dsp_mac->dsp_acc_mod : 4 cycles ++(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_ext") ++(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_mod") ++ ++;; Assuming the following is true (bypass at WB) ++;; AP AF AM MB M1 M2 M3 M4 WB GR GC ++;; AP AF AM MB M1 M2 M3 M4 WB GR GC ++;; dsp_mac_sat->dsp_acc_ext : 5 cycles ++;; dsp_mac_sat->dsp_acc_mod : 5 cycles ++(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_ext") ++(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_mod") ++ + + ;; -------------------------------------------------------------- + ;; Floating Point Instructions +--- /dev/null ++++ b/gcc/config/mips/crtfastmath.c +@@ -0,0 +1,62 @@ ++/* ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 3, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with GCC; see the file COPYING3. If not see ++ * . ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++#ifdef __mips_hard_float ++ ++/* flush denormalized numbers to zero */ ++#define _FPU_FLUSH_TZ 0x1000000 ++ ++/* rounding control */ ++#define _FPU_RC_NEAREST 0x0 /* RECOMMENDED */ ++#define _FPU_RC_ZERO 0x1 ++#define _FPU_RC_UP 0x2 ++#define _FPU_RC_DOWN 0x3 ++ ++/* enable interrupts for IEEE exceptions */ ++#define _FPU_IEEE 0x00000F80 ++ ++/* Macros for accessing the hardware control word. */ ++#define _FPU_GETCW(cw) __asm__ ("cfc1 %0,$31" : "=r" (cw)) ++#define _FPU_SETCW(cw) __asm__ ("ctc1 %0,$31" : : "r" (cw)) ++ ++static void __attribute__((constructor)) ++set_fast_math (void) ++{ ++ unsigned int fcr; ++ ++ /* fastmath: flush to zero, round to nearest, ieee exceptions disabled */ ++ fcr = _FPU_FLUSH_TZ | _FPU_RC_NEAREST; ++ ++ _FPU_SETCW(fcr); ++} ++ ++#endif /* __mips_hard_float */ +--- /dev/null ++++ b/gcc/config/mips/cs-sgxx-linux.h +@@ -0,0 +1,40 @@ ++/* MIPS SourceryG++ GNU/Linux Configuration. ++ Copyright (C) 2008 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* We do not need to provide an explicit big-endian multilib. */ ++#undef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ { "EB" } ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++"%{muclibc:/uclibc}\ ++%{mips2|mips3|mips4|march=mips2|march=mips3|march=mips4|march=r6000|\ ++march=r4000|march=vr4100|march=vr4111|march=vr4120|march=vr4130|\ ++march=vr4300|march=r4400|march=r4600|march=orion|march=r4650|march=r8000|\ ++march=vr5000|march=vr5400|march=vr5500|march=rm7000|\ ++march=rm9000:/mips2;\ ++mips32|march=mips32|march=4kc|march=4km|march=4kp|march=4ks:/mips32}\ ++%{msoft-float:/soft-float}%{mel|EL:/el}" ++ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" +--- /dev/null ++++ b/gcc/config/mips/cs-sgxxlite-linux.h +@@ -0,0 +1,33 @@ ++/* MIPS SourceryG++ GNU/Linux Configuration. ++ Copyright (C) 2008 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* We do not need to provide an explicit big-endian multilib. */ ++#undef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ { "EB" } ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++"%{muclibc:/uclibc}%{msoft-float:/soft-float}%{mel|EL:/el}" ++ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" +--- a/gcc/config/mips/elfoabi.h ++++ b/gcc/config/mips/elfoabi.h +@@ -19,7 +19,7 @@ You should have received a copy of the G + along with GCC; see the file COPYING3. If not see + . */ + +-#define DRIVER_SELF_SPECS \ ++#define SUBTARGET_SELF_SPECS \ + /* Make sure a -mips option is present. This helps us to pick \ + the right multilib, and also makes the later specs easier \ + to write. */ \ +--- a/gcc/config/mips/iris6.h ++++ b/gcc/config/mips/iris6.h +@@ -29,7 +29,7 @@ along with GCC; see the file COPYING3. + + /* Force the default ABI onto the command line in order to make the specs + easier to write. Default to the mips2 ISA for the O32 ABI. */ +-#define DRIVER_SELF_SPECS \ ++#define SUBTARGET_SELF_SPECS \ + "%{!mabi=*: -mabi=n32}", \ + "%{mabi=32: %{!mips*: %{!march*: -mips2}}}" + +--- a/gcc/config/mips/linux.h ++++ b/gcc/config/mips/linux.h +@@ -37,10 +37,6 @@ along with GCC; see the file COPYING3. + #undef MD_EXEC_PREFIX + #undef MD_STARTFILE_PREFIX + +-/* If we don't set MASK_ABICALLS, we can't default to PIC. */ +-#undef TARGET_DEFAULT +-#define TARGET_DEFAULT MASK_ABICALLS +- + #define TARGET_OS_CPP_BUILTINS() \ + do { \ + LINUX_TARGET_OS_CPP_BUILTINS(); \ +@@ -79,7 +75,8 @@ along with GCC; see the file COPYING3. + %{static:-static}}}" + + #undef SUBTARGET_ASM_SPEC +-#define SUBTARGET_ASM_SPEC "%{mabi=64: -64} %{!mno-abicalls:-KPIC}" ++#define SUBTARGET_ASM_SPEC \ ++ "%{mabi=64: -64} %{mabicalls:%{fpic|fPIC|fpie|fPIE:-KPIC;:-mnon-pic-abicalls}}" + + /* The MIPS assembler has different syntax for .set. We set it to + .dummy to trap any errors. */ +@@ -145,7 +142,15 @@ along with GCC; see the file COPYING3. + /* Default to -mno-shared for non-PIC. */ + #define NO_SHARED_SPECS \ + "%{mshared|mno-shared|fpic|fPIC|fpie|fPIE:;:-mno-shared}" +-#define DRIVER_SELF_SPECS NO_SHARED_SPECS ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS NO_SHARED_SPECS + #else + #define NO_SHARED_SPECS + #endif ++ ++/* Similar to standard Linux, but adding -ffast-math support. */ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC \ ++ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \ ++ %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" ++ +--- a/gcc/config/mips/linux64.h ++++ b/gcc/config/mips/linux64.h +@@ -20,15 +20,15 @@ along with GCC; see the file COPYING3. + + /* Force the default endianness and ABI flags onto the command line + in order to make the other specs easier to write. */ +-#undef DRIVER_SELF_SPECS +-#define DRIVER_SELF_SPECS \ ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS \ + NO_SHARED_SPECS \ + " %{!EB:%{!EL:%(endian_spec)}}" \ + " %{!mabi=*: -mabi=n32}" + + #undef SUBTARGET_ASM_SPEC + #define SUBTARGET_ASM_SPEC "\ +-%{!fno-PIC:%{!fno-pic:-KPIC}} \ ++%{mabicalls:%{fpic|fPIC|fpie|fPIE:-KPIC;:-mnon-pic-abicalls}} \ + %{fno-PIC:-non_shared} %{fno-pic:-non_shared}" + + #undef LIB_SPEC +@@ -72,3 +72,9 @@ NO_SHARED_SPECS \ + ieee_quad_format is the default, but let's put this here to make + sure nobody thinks we just forgot to set it to something else. */ + #define MIPS_TFMODE_FORMAT mips_quad_format ++ ++/* Similar to standard Linux, but adding -ffast-math support. */ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC \ ++ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \ ++ %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" +--- a/gcc/config/mips/mips-dsp.md ++++ b/gcc/config/mips/mips-dsp.md +@@ -42,9 +42,9 @@ + (match_operand:DSPV 2 "register_operand" "d"))) + (set (reg:CCDSP CCDSP_OU_REGNUM) + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ))])] +- "" ++ "ISA_HAS_DSP" + "add.\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_add_s_" +@@ -55,9 +55,9 @@ + UNSPEC_ADDQ_S)) + (set (reg:CCDSP CCDSP_OU_REGNUM) + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ_S))])] +- "" ++ "ISA_HAS_DSP" + "add_s.\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + ;; SUBQ* +@@ -70,7 +70,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ))])] + "ISA_HAS_DSP" + "sub.\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_sub_s_" +@@ -83,7 +83,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ_S))])] + "ISA_HAS_DSP" + "sub_s.\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + ;; ADDSC +@@ -97,7 +97,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDSC))])] + "ISA_HAS_DSP" + "addsc\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; ADDWC +@@ -112,7 +112,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDWC))])] + "ISA_HAS_DSP" + "addwc\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; MODSUB +@@ -123,7 +123,7 @@ + UNSPEC_MODSUB))] + "ISA_HAS_DSP" + "modsub\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; RADDU* +@@ -133,7 +133,7 @@ + UNSPEC_RADDU_W_QB))] + "ISA_HAS_DSP" + "raddu.w.qb\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; ABSQ* +@@ -146,7 +146,7 @@ + (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S))])] + "ISA_HAS_DSP" + "absq_s.\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + ;; PRECRQ* +@@ -157,7 +157,7 @@ + UNSPEC_PRECRQ_QB_PH))] + "ISA_HAS_DSP" + "precrq.qb.ph\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precrq_ph_w" +@@ -167,7 +167,7 @@ + UNSPEC_PRECRQ_PH_W))] + "ISA_HAS_DSP" + "precrq.ph.w\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precrq_rs_ph_w" +@@ -181,7 +181,7 @@ + UNSPEC_PRECRQ_RS_PH_W))])] + "ISA_HAS_DSP" + "precrq_rs.ph.w\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; PRECRQU* +@@ -196,7 +196,7 @@ + UNSPEC_PRECRQU_S_QB_PH))])] + "ISA_HAS_DSP" + "precrqu_s.qb.ph\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + ;; PRECEQ* +@@ -206,7 +206,7 @@ + UNSPEC_PRECEQ_W_PHL))] + "ISA_HAS_DSP" + "preceq.w.phl\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_preceq_w_phr" +@@ -215,7 +215,7 @@ + UNSPEC_PRECEQ_W_PHR))] + "ISA_HAS_DSP" + "preceq.w.phr\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; PRECEQU* +@@ -225,7 +225,7 @@ + UNSPEC_PRECEQU_PH_QBL))] + "ISA_HAS_DSP" + "precequ.ph.qbl\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precequ_ph_qbr" +@@ -234,7 +234,7 @@ + UNSPEC_PRECEQU_PH_QBR))] + "ISA_HAS_DSP" + "precequ.ph.qbr\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precequ_ph_qbla" +@@ -243,7 +243,7 @@ + UNSPEC_PRECEQU_PH_QBLA))] + "ISA_HAS_DSP" + "precequ.ph.qbla\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precequ_ph_qbra" +@@ -252,7 +252,7 @@ + UNSPEC_PRECEQU_PH_QBRA))] + "ISA_HAS_DSP" + "precequ.ph.qbra\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; PRECEU* +@@ -262,7 +262,7 @@ + UNSPEC_PRECEU_PH_QBL))] + "ISA_HAS_DSP" + "preceu.ph.qbl\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_preceu_ph_qbr" +@@ -271,7 +271,7 @@ + UNSPEC_PRECEU_PH_QBR))] + "ISA_HAS_DSP" + "preceu.ph.qbr\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_preceu_ph_qbla" +@@ -280,7 +280,7 @@ + UNSPEC_PRECEU_PH_QBLA))] + "ISA_HAS_DSP" + "preceu.ph.qbla\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_preceu_ph_qbra" +@@ -289,7 +289,7 @@ + UNSPEC_PRECEU_PH_QBRA))] + "ISA_HAS_DSP" + "preceu.ph.qbra\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; Table 2-2. MIPS DSP ASE Instructions: Shift +@@ -313,7 +313,7 @@ + } + return "shllv.\t%0,%1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_shll_s_" +@@ -335,7 +335,7 @@ + } + return "shllv_s.\t%0,%1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + ;; SHRL* +@@ -354,7 +354,7 @@ + } + return "shrlv.qb\t%0,%1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; SHRA* +@@ -373,7 +373,7 @@ + } + return "shrav.ph\t%0,%1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_shra_r_" +@@ -392,7 +392,7 @@ + } + return "shrav_r.\t%0,%1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; Table 2-3. MIPS DSP ASE Instructions: Multiply +@@ -478,7 +478,7 @@ + UNSPEC_DPAU_H_QBL))] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpau.h.qbl\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpau_h_qbr" +@@ -489,7 +489,7 @@ + UNSPEC_DPAU_H_QBR))] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpau.h.qbr\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; DPSU* +@@ -501,7 +501,7 @@ + UNSPEC_DPSU_H_QBL))] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpsu.h.qbl\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpsu_h_qbr" +@@ -512,7 +512,7 @@ + UNSPEC_DPSU_H_QBR))] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpsu.h.qbr\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; DPAQ* +@@ -528,7 +528,7 @@ + UNSPEC_DPAQ_S_W_PH))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpaq_s.w.ph\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; DPSQ* +@@ -544,7 +544,7 @@ + UNSPEC_DPSQ_S_W_PH))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpsq_s.w.ph\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; MULSAQ* +@@ -560,7 +560,7 @@ + UNSPEC_MULSAQ_S_W_PH))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "mulsaq_s.w.ph\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; DPAQ* +@@ -576,7 +576,7 @@ + UNSPEC_DPAQ_SA_L_W))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpaq_sa.l.w\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") + (set_attr "mode" "SI")]) + + ;; DPSQ* +@@ -592,7 +592,7 @@ + UNSPEC_DPSQ_SA_L_W))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "dpsq_sa.l.w\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") + (set_attr "mode" "SI")]) + + ;; MAQ* +@@ -608,7 +608,7 @@ + UNSPEC_MAQ_S_W_PHL))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "maq_s.w.phl\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_maq_s_w_phr" +@@ -623,7 +623,7 @@ + UNSPEC_MAQ_S_W_PHR))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "maq_s.w.phr\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + ;; MAQ_SA* +@@ -639,7 +639,7 @@ + UNSPEC_MAQ_SA_W_PHL))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "maq_sa.w.phl\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") + (set_attr "mode" "SI")]) + + (define_insn "mips_maq_sa_w_phr" +@@ -654,7 +654,7 @@ + UNSPEC_MAQ_SA_W_PHR))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "maq_sa.w.phr\t%q0,%2,%3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") + (set_attr "mode" "SI")]) + + ;; Table 2-4. MIPS DSP ASE Instructions: General Bit/Manipulation +@@ -665,7 +665,7 @@ + UNSPEC_BITREV))] + "ISA_HAS_DSP" + "bitrev\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; INSV +@@ -678,7 +678,7 @@ + UNSPEC_INSV))] + "ISA_HAS_DSP" + "insv\t%0,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; REPL* +@@ -696,7 +696,7 @@ + } + return "replv.qb\t%0,%1"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_repl_ph" +@@ -707,7 +707,7 @@ + "@ + repl.ph\t%0,%1 + replv.ph\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; Table 2-5. MIPS DSP ASE Instructions: Compare-Pick +@@ -720,7 +720,7 @@ + UNSPEC_CMP_EQ))] + "ISA_HAS_DSP" + "cmp.eq.\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmp_lt_" +@@ -731,7 +731,7 @@ + UNSPEC_CMP_LT))] + "ISA_HAS_DSP" + "cmp.lt.\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmp_le_" +@@ -742,7 +742,7 @@ + UNSPEC_CMP_LE))] + "ISA_HAS_DSP" + "cmp.le.\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgu_eq_qb" +@@ -752,7 +752,7 @@ + UNSPEC_CMPGU_EQ_QB))] + "ISA_HAS_DSP" + "cmpgu.eq.qb\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgu_lt_qb" +@@ -762,7 +762,7 @@ + UNSPEC_CMPGU_LT_QB))] + "ISA_HAS_DSP" + "cmpgu.lt.qb\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgu_le_qb" +@@ -772,7 +772,7 @@ + UNSPEC_CMPGU_LE_QB))] + "ISA_HAS_DSP" + "cmpgu.le.qb\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; PICK* +@@ -784,7 +784,7 @@ + UNSPEC_PICK))] + "ISA_HAS_DSP" + "pick.\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; PACKRL* +@@ -795,7 +795,7 @@ + UNSPEC_PACKRL_PH))] + "ISA_HAS_DSP" + "packrl.ph\t%0,%1,%2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; Table 2-6. MIPS DSP ASE Instructions: Accumulator and DSPControl Access +@@ -818,7 +818,7 @@ + } + return "extrv.w\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + (define_insn "mips_extr_r_w" +@@ -839,7 +839,7 @@ + } + return "extrv_r.w\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + (define_insn "mips_extr_rs_w" +@@ -860,7 +860,7 @@ + } + return "extrv_rs.w\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + ;; EXTR*_S.H +@@ -882,7 +882,7 @@ + } + return "extrv_s.h\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + ;; EXTP* +@@ -905,7 +905,7 @@ + } + return "extpv\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + (define_insn "mips_extpdp" +@@ -930,7 +930,7 @@ + } + return "extpdpv\t%0,%q1,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accext") + (set_attr "mode" "SI")]) + + ;; SHILO* +@@ -949,7 +949,7 @@ + } + return "shilov\t%q0,%2"; + } +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accmod") + (set_attr "mode" "SI")]) + + ;; MTHLIP* +@@ -965,7 +965,7 @@ + (reg:CCDSP CCDSP_PO_REGNUM)] UNSPEC_MTHLIP))])] + "ISA_HAS_DSP && !TARGET_64BIT" + "mthlip\t%2,%q0" +- [(set_attr "type" "mfhilo") ++ [(set_attr "type" "accmod") + (set_attr "mode" "SI")]) + + ;; WRDSP +@@ -987,7 +987,7 @@ + (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))])] + "ISA_HAS_DSP" + "wrdsp\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; RDDSP +@@ -1003,7 +1003,7 @@ + UNSPEC_RDDSP))] + "ISA_HAS_DSP" + "rddsp\t%0,%1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + ;; Table 2-7. MIPS DSP ASE Instructions: Indexed-Load +--- a/gcc/config/mips/mips-dspr2.md ++++ b/gcc/config/mips/mips-dspr2.md +@@ -9,7 +9,7 @@ + (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S_QB))])] + "ISA_HAS_DSPR2" + "absq_s.qb\t%0,%z1" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + (define_insn "mips_addu_ph" +@@ -21,7 +21,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_PH))])] + "ISA_HAS_DSPR2" + "addu.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_addu_s_ph" +@@ -34,7 +34,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_S_PH))])] + "ISA_HAS_DSPR2" + "addu_s.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + (define_insn "mips_adduh_qb" +@@ -44,7 +44,7 @@ + UNSPEC_ADDUH_QB))] + "ISA_HAS_DSPR2" + "adduh.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_adduh_r_qb" +@@ -54,7 +54,7 @@ + UNSPEC_ADDUH_R_QB))] + "ISA_HAS_DSPR2" + "adduh_r.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + (define_insn "mips_append" +@@ -69,7 +69,7 @@ + operands[2] = GEN_INT (INTVAL (operands[2]) & 31); + return "append\t%0,%z2,%3"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_balign" +@@ -84,7 +84,7 @@ + operands[2] = GEN_INT (INTVAL (operands[2]) & 3); + return "balign\t%0,%z2,%3"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgdu_eq_qb" +@@ -99,7 +99,7 @@ + UNSPEC_CMPGDU_EQ_QB))])] + "ISA_HAS_DSPR2" + "cmpgdu.eq.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgdu_lt_qb" +@@ -114,7 +114,7 @@ + UNSPEC_CMPGDU_LT_QB))])] + "ISA_HAS_DSPR2" + "cmpgdu.lt.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_cmpgdu_le_qb" +@@ -129,7 +129,7 @@ + UNSPEC_CMPGDU_LE_QB))])] + "ISA_HAS_DSPR2" + "cmpgdu.le.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpa_w_ph" +@@ -140,7 +140,7 @@ + UNSPEC_DPA_W_PH))] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpa.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dps_w_ph" +@@ -151,7 +151,7 @@ + UNSPEC_DPS_W_PH))] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dps.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_expand "mips_madd" +@@ -247,7 +247,7 @@ + UNSPEC_MULSA_W_PH))] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "mulsa.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_mult" +@@ -277,7 +277,7 @@ + UNSPEC_PRECR_QB_PH))] + "ISA_HAS_DSPR2" + "precr.qb.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precr_sra_ph_w" +@@ -292,7 +292,7 @@ + operands[2] = GEN_INT (INTVAL (operands[2]) & 31); + return "precr_sra.ph.w\t%0,%z2,%3"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_precr_sra_r_ph_w" +@@ -307,7 +307,7 @@ + operands[2] = GEN_INT (INTVAL (operands[2]) & 31); + return "precr_sra_r.ph.w\t%0,%z2,%3"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_prepend" +@@ -322,7 +322,7 @@ + operands[2] = GEN_INT (INTVAL (operands[2]) & 31); + return "prepend\t%0,%z2,%3"; + } +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_shra_qb" +@@ -340,7 +340,7 @@ + } + return "shrav.qb\t%0,%z1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + +@@ -359,7 +359,7 @@ + } + return "shrav_r.qb\t%0,%z1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_shrl_ph" +@@ -377,7 +377,7 @@ + } + return "shrlv.ph\t%0,%z1,%2"; + } +- [(set_attr "type" "shift") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subu_ph" +@@ -390,7 +390,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_PH))])] + "ISA_HAS_DSPR2" + "subu.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subu_s_ph" +@@ -403,7 +403,7 @@ + (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_S_PH))])] + "ISA_HAS_DSPR2" + "subu_s.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalusat") + (set_attr "mode" "SI")]) + + (define_insn "mips_subuh_qb" +@@ -413,7 +413,7 @@ + UNSPEC_SUBUH_QB))] + "ISA_HAS_DSPR2" + "subuh.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subuh_r_qb" +@@ -423,7 +423,7 @@ + UNSPEC_SUBUH_R_QB))] + "ISA_HAS_DSPR2" + "subuh_r.qb\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_addqh_ph" +@@ -433,7 +433,7 @@ + UNSPEC_ADDQH_PH))] + "ISA_HAS_DSPR2" + "addqh.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_addqh_r_ph" +@@ -443,7 +443,7 @@ + UNSPEC_ADDQH_R_PH))] + "ISA_HAS_DSPR2" + "addqh_r.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_addqh_w" +@@ -453,7 +453,7 @@ + UNSPEC_ADDQH_W))] + "ISA_HAS_DSPR2" + "addqh.w\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_addqh_r_w" +@@ -463,7 +463,7 @@ + UNSPEC_ADDQH_R_W))] + "ISA_HAS_DSPR2" + "addqh_r.w\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subqh_ph" +@@ -473,7 +473,7 @@ + UNSPEC_SUBQH_PH))] + "ISA_HAS_DSPR2" + "subqh.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subqh_r_ph" +@@ -483,7 +483,7 @@ + UNSPEC_SUBQH_R_PH))] + "ISA_HAS_DSPR2" + "subqh_r.ph\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subqh_w" +@@ -493,7 +493,7 @@ + UNSPEC_SUBQH_W))] + "ISA_HAS_DSPR2" + "subqh.w\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_subqh_r_w" +@@ -503,7 +503,7 @@ + UNSPEC_SUBQH_R_W))] + "ISA_HAS_DSPR2" + "subqh_r.w\t%0,%z1,%z2" +- [(set_attr "type" "arith") ++ [(set_attr "type" "dspalu") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpax_w_ph" +@@ -514,7 +514,7 @@ + UNSPEC_DPAX_W_PH))] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpax.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpsx_w_ph" +@@ -525,7 +525,7 @@ + UNSPEC_DPSX_W_PH))] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpsx.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpaqx_s_w_ph" +@@ -540,7 +540,7 @@ + UNSPEC_DPAQX_S_W_PH))])] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpaqx_s.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpaqx_sa_w_ph" +@@ -555,7 +555,7 @@ + UNSPEC_DPAQX_SA_W_PH))])] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpaqx_sa.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpsqx_s_w_ph" +@@ -570,7 +570,7 @@ + UNSPEC_DPSQX_S_W_PH))])] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpsqx_s.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmac") + (set_attr "mode" "SI")]) + + (define_insn "mips_dpsqx_sa_w_ph" +@@ -585,5 +585,43 @@ + UNSPEC_DPSQX_SA_W_PH))])] + "ISA_HAS_DSPR2 && !TARGET_64BIT" + "dpsqx_sa.w.ph\t%q0,%z2,%z3" +- [(set_attr "type" "imadd") ++ [(set_attr "type" "dspmacsat") ++ (set_attr "mode" "SI")]) ++ ++;; Convert mtlo $ac[1-3],$0 => mult $ac[1-3],$0,$0 ++;; mthi $ac[1-3],$0 ++(define_peephole2 ++ [(set (match_operand:SI 0 "register_operand" "") ++ (const_int 0)) ++ (set (match_operand:SI 1 "register_operand" "") ++ (const_int 0))] ++ "ISA_HAS_DSPR2 ++ && !TARGET_MIPS16 ++ && !TARGET_64BIT ++ && (((true_regnum (operands[0]) == AC1LO_REGNUM ++ && true_regnum (operands[1]) == AC1HI_REGNUM) ++ || (true_regnum (operands[0]) == AC1HI_REGNUM ++ && true_regnum (operands[1]) == AC1LO_REGNUM)) ++ || ((true_regnum (operands[0]) == AC2LO_REGNUM ++ && true_regnum (operands[1]) == AC2HI_REGNUM) ++ || (true_regnum (operands[0]) == AC2HI_REGNUM ++ && true_regnum (operands[1]) == AC2LO_REGNUM)) ++ || ((true_regnum (operands[0]) == AC3LO_REGNUM ++ && true_regnum (operands[1]) == AC3HI_REGNUM) ++ || (true_regnum (operands[0]) == AC3HI_REGNUM ++ && true_regnum (operands[1]) == AC3LO_REGNUM)))" ++ [(parallel [(set (match_dup 0) (const_int 0)) ++ (set (match_dup 1) (const_int 0))])] ++) ++ ++(define_insn "*mips_acc_init" ++ [(parallel [(set (match_operand:SI 0 "register_operand" "=a") ++ (const_int 0)) ++ (set (match_operand:SI 1 "register_operand" "=a") ++ (const_int 0))])] ++ "ISA_HAS_DSPR2 ++ && !TARGET_MIPS16 ++ && !TARGET_64BIT" ++ "mult\t%q0,$0,$0\t\t# Clear ACC HI/LO" ++ [(set_attr "type" "imul") + (set_attr "mode" "SI")]) +--- a/gcc/config/mips/mips-protos.h ++++ b/gcc/config/mips/mips-protos.h +@@ -177,6 +177,8 @@ extern rtx mips_emit_move (rtx, rtx); + extern bool mips_split_symbol (rtx, rtx, enum machine_mode, rtx *); + extern rtx mips_unspec_address (rtx, enum mips_symbol_type); + extern bool mips_legitimize_address (rtx *, enum machine_mode); ++extern int mask_low_and_shift_len (enum machine_mode, unsigned HOST_WIDE_INT, ++ unsigned HOST_WIDE_INT); + extern void mips_move_integer (rtx, rtx, unsigned HOST_WIDE_INT); + extern bool mips_legitimize_move (enum machine_mode, rtx, rtx); + +@@ -239,6 +241,8 @@ extern void mips_print_operand_address ( + extern void mips_output_external (FILE *, tree, const char *); + extern void mips_output_filename (FILE *, const char *); + extern void mips_output_ascii (FILE *, const char *, size_t); ++extern void octeon_output_shared_variable (FILE *, tree, const char *, ++ unsigned HOST_WIDE_INT, int); + extern void mips_output_aligned_decl_common (FILE *, tree, const char *, + unsigned HOST_WIDE_INT, + unsigned int); +@@ -283,14 +287,18 @@ extern unsigned int mips_hard_regno_nreg + extern bool mips_linked_madd_p (rtx, rtx); + extern bool mips_store_data_bypass_p (rtx, rtx); + extern rtx mips_prefetch_cookie (rtx, rtx); ++extern int mips_mult_madd_chain_bypass_p (rtx, rtx); ++extern int mips_dspalu_bypass_p (rtx, rtx); + + extern void irix_asm_output_align (FILE *, unsigned); + extern const char *current_section_name (void); + extern unsigned int current_section_flags (void); + extern bool mips_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); ++extern void mips_adjust_register_ext_operands (rtx *); + + extern const char *mips16e_output_save_restore (rtx, HOST_WIDE_INT); + extern bool mips16e_save_restore_pattern_p (rtx, HOST_WIDE_INT, + struct mips16e_save_restore_info *); ++extern void mips_expand_vector_init (rtx, rtx); + + #endif /* ! GCC_MIPS_PROTOS_H */ +--- a/gcc/config/mips/mips.c ++++ b/gcc/config/mips/mips.c +@@ -232,6 +232,8 @@ static const char *const mips_fp_conditi + MIPS_FP_CONDITIONS (STRINGIFY) + }; + ++static rtx mips_gnu_local_gp (void); ++ + /* Information about a function's frame layout. */ + struct mips_frame_info GTY(()) { + /* The size of the frame in bytes. */ +@@ -455,6 +457,10 @@ static int mips_base_align_functions; /* + /* The -mcode-readable setting. */ + enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES; + ++/* If size of stack frame exceeds this value, compiler will emit ++ warning message. */ ++static HOST_WIDE_INT mips_warn_framesize = -1; ++ + /* Index [M][R] is true if register R is allowed to hold a value of mode M. */ + bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; + +@@ -497,7 +503,7 @@ const enum reg_class mips_regno_to_class + MD0_REG, MD1_REG, NO_REGS, ST_REGS, + ST_REGS, ST_REGS, ST_REGS, ST_REGS, + ST_REGS, ST_REGS, ST_REGS, NO_REGS, +- NO_REGS, ALL_REGS, ALL_REGS, NO_REGS, ++ NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS, + COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS, + COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS, + COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS, +@@ -527,9 +533,16 @@ const enum reg_class mips_regno_to_class + ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS + }; + ++#ifdef CVMX_SHARED_BSS_FLAGS ++static tree octeon_handle_cvmx_shared_attribute (tree *, tree, tree, int, bool *); ++#endif ++ + /* The value of TARGET_ATTRIBUTE_TABLE. */ + const struct attribute_spec mips_attribute_table[] = { + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ ++#ifdef CVMX_SHARED_BSS_FLAGS ++ { "cvmx_shared", 0, 0, true, false, false, octeon_handle_cvmx_shared_attribute }, ++#endif + { "long_call", 0, 0, false, true, true, NULL }, + { "far", 0, 0, false, true, true, NULL }, + { "near", 0, 0, false, true, true, NULL }, +@@ -642,6 +655,9 @@ static const struct mips_cpu_info mips_c + { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY }, + { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY }, + { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY }, ++ { "xlr", PROCESSOR_XLR, 64, 0 }, ++ { "octeon", PROCESSOR_OCTEON, 64, 0 }, ++ { "ice9", PROCESSOR_5KF, 64 }, /* May diverge from 5kf in future. */ + }; + + /* Default costs. If these are used for a processor we should look +@@ -1006,6 +1022,26 @@ static const struct mips_rtx_cost_data m + { /* SR71000 */ + DEFAULT_COSTS + }, ++ { /* OCTEON */ ++ SOFT_FP_COSTS, ++ /* Increase the latency values (5, 72) by 10% because MULT and ++ DIV are no fully pipelined. */ ++ COSTS_N_INSNS (6), /* int_mult_si */ ++ COSTS_N_INSNS (6), /* int_mult_di */ ++ COSTS_N_INSNS (80), /* int_div_si */ ++ COSTS_N_INSNS (80), /* int_div_di */ ++ 1, /* branch_cost */ ++ 4 /* memory_latency */ ++ }, ++ { /* XLR */ ++ SOFT_FP_COSTS, ++ COSTS_N_INSNS (8), /* int_mult_si */ ++ COSTS_N_INSNS (8), /* int_mult_di */ ++ COSTS_N_INSNS (72), /* int_div_si */ ++ COSTS_N_INSNS (72), /* int_div_di */ ++ 1, /* branch_cost */ ++ 4 /* memory_latency */ ++ } + }; + + /* This hash table keeps track of implicit "mips16" and "nomips16" attributes +@@ -1213,7 +1249,29 @@ mips_split_plus (rtx x, rtx *base_ptr, H + static unsigned int mips_build_integer (struct mips_integer_op *, + unsigned HOST_WIDE_INT); + +-/* A subroutine of mips_build_integer, with the same interface. ++/* See whether: ++ ++ (and:MODE (ashift:MODE X SHIFT) MASK) ++ ++ would have the effect of masking the low N bits of X and then shifting ++ the result left SHIFT bits. Return N if so, otherwise return -1. */ ++ ++int ++mask_low_and_shift_len (enum machine_mode mode, ++ unsigned HOST_WIDE_INT shift, ++ unsigned HOST_WIDE_INT mask) ++{ ++ if (shift >= GET_MODE_BITSIZE (mode)) ++ return -1; ++ ++ /* Undo the CONST_INT sign-extension canonicalisation. */ ++ mask &= GET_MODE_MASK (mode); ++ ++ /* We don't care about the low SHIFT bits of MASK. */ ++ return exact_log2 ((mask >> shift) + 1); ++} ++ ++/* Subroutine of mips_build_integer (with the same interface). + Assume that the final action in the sequence should be a left shift. */ + + static unsigned int +@@ -1390,7 +1448,7 @@ mips_classify_symbol (const_rtx x, enum + if (TARGET_MIPS16_SHORT_JUMP_TABLES) + return SYMBOL_PC_RELATIVE; + +- if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS) ++ if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS && flag_pic) + return SYMBOL_GOT_PAGE_OFST; + + return SYMBOL_ABSOLUTE; +@@ -1413,14 +1471,16 @@ mips_classify_symbol (const_rtx x, enum + return SYMBOL_GP_RELATIVE; + } + +- /* Do not use small-data accesses for weak symbols; they may end up +- being zero. */ +- if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x)) ++ /* Use a small-data access if appropriate; but do not use small-data ++ accesses for weak symbols; they may end up being zero. */ ++ if (TARGET_GPOPT ++ && SYMBOL_REF_SMALL_P (x) ++ && !SYMBOL_REF_WEAK (x)) + return SYMBOL_GP_RELATIVE; + +- /* Don't use GOT accesses for locally-binding symbols when -mno-shared +- is in effect. */ +- if (TARGET_ABICALLS ++ /* Don't use GOT accesses when compiling for the non-PIC ABI, ++ or for locally-binding symbols when -mno-shared is in effect. */ ++ if (TARGET_ABICALLS && flag_pic + && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))) + { + /* There are three cases to consider: +@@ -1800,6 +1860,24 @@ mips_valid_base_register_p (rtx x, enum + && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p)); + } + ++/* Return true if, for every base register BASE_REG, (plus BASE_REG X) ++ can address a value of mode MODE. */ ++ ++static bool ++mips_valid_offset_p (rtx x, enum machine_mode mode) ++{ ++ /* Check that X is a signed 16-bit number. */ ++ if (!const_arith_operand (x, Pmode)) ++ return false; ++ ++ /* We may need to split multiword moves, so make sure that every word ++ is accessible. */ ++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD ++ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) ++ return false; ++ ++ return true; ++} + /* Return true if X is a valid address for machine mode MODE. If it is, + fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in + effect. */ +@@ -2175,7 +2253,9 @@ gen_load_const_gp (rtx reg) + + /* Return a pseudo register that contains the value of $gp throughout + the current function. Such registers are needed by MIPS16 functions, +- for which $gp itself is not a valid base register or addition operand. */ ++ for which $gp itself is not a valid base register or addition operand. ++ Also hold the GP in a non-PIC abicalls function which refers to TLS ++ data - such functions do not require $28 or even a hard register. */ + + static rtx + mips16_gp_pseudo_reg (void) +@@ -2191,7 +2271,11 @@ mips16_gp_pseudo_reg (void) + { + rtx insn, scan, after; + +- insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx); ++ if (TARGET_NONPIC_ABICALLS) ++ insn = gen_loadgp_nonpic (cfun->machine->mips16_gp_pseudo_rtx, ++ mips_gnu_local_gp ()); ++ else ++ insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx); + + push_topmost_sequence (); + /* We need to emit the initialization after the FUNCTION_BEG +@@ -2333,6 +2417,19 @@ mips_add_offset (rtx temp, rtx reg, HOST + return plus_constant (reg, offset); + } + ++/* Return the RTX to use for explicit GOT accesses. Uses a pseudo if ++ possible. */ ++ ++static rtx ++mips_got_base (void) ++{ ++ gcc_assert (can_create_pseudo_p ()); ++ if (TARGET_NONPIC_ABICALLS) ++ return mips16_gp_pseudo_reg (); ++ else ++ return pic_offset_table_rtx; ++} ++ + /* The __tls_get_attr symbol. */ + static GTY(()) rtx mips_tls_symbol; + +@@ -2356,7 +2453,7 @@ mips_call_tls_get_addr (rtx sym, enum mi + start_sequence (); + + emit_insn (gen_rtx_SET (Pmode, a0, +- gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc))); ++ gen_rtx_LO_SUM (Pmode, mips_got_base (), loc))); + insn = mips_expand_call (v0, mips_tls_symbol, const0_rtx, const0_rtx, false); + CONST_OR_PURE_CALL_P (insn) = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); +@@ -2435,9 +2532,9 @@ mips_legitimize_tls_address (rtx loc) + tmp1 = gen_reg_rtx (Pmode); + tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL); + if (Pmode == DImode) +- emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2)); ++ emit_insn (gen_load_gotdi (tmp1, mips_got_base (), tmp2)); + else +- emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2)); ++ emit_insn (gen_load_gotsi (tmp1, mips_got_base (), tmp2)); + dest = gen_reg_rtx (Pmode); + emit_insn (gen_add3_insn (dest, tmp1, tp)); + break; +@@ -2464,7 +2561,7 @@ bool + mips_legitimize_address (rtx *xloc, enum machine_mode mode) + { + rtx base; +- HOST_WIDE_INT offset; ++ HOST_WIDE_INT intval, high, offset; + + if (mips_tls_symbol_p (*xloc)) + { +@@ -2485,6 +2582,32 @@ mips_legitimize_address (rtx *xloc, enum + *xloc = mips_add_offset (NULL, base, offset); + return true; + } ++ ++ /* Handle references to constant addresses by loading the high part ++ into a register and using an offset for the low part. */ ++ if (GET_CODE (base) == CONST_INT) ++ { ++ intval = INTVAL (base); ++ high = trunc_int_for_mode (CONST_HIGH_PART (intval), Pmode); ++ offset = CONST_LOW_PART (intval); ++ /* Ignore cases in which a positive address would be accessed by a ++ negative offset from a negative address. The required wraparound ++ does not occur for 32-bit addresses on 64-bit targets, and it is ++ very unlikely that such an access would occur in real code anyway. ++ ++ If the low offset is not legitimate for MODE, prefer to load ++ the constant normally, instead of using mips_force_address on ++ the legitimized address. The latter option would cause us to ++ use (D)ADDIU unconditionally, but LUI/ORI is more efficient ++ than LUI/ADDIU on some targets. */ ++ if ((intval < 0 || high > 0) ++ && mips_valid_offset_p (GEN_INT (offset), mode)) ++ { ++ base = mips_force_temporary (NULL, GEN_INT (high)); ++ *xloc = plus_constant (base, offset); ++ return true; ++ } ++ } + return false; + } + +@@ -3360,6 +3483,27 @@ mips_rtx_costs (rtx x, int code, int out + return false; + + case ZERO_EXTEND: ++ /* Check for BADDU patterns; see mips.md. */ ++ if (ISA_HAS_BADDU) ++ { ++ rtx op0 = XEXP (x, 0); ++ if ((GET_CODE (op0) == TRUNCATE || GET_CODE (op0) == SUBREG) ++ && GET_MODE (op0) == QImode ++ && GET_CODE (XEXP (op0, 0)) == PLUS) ++ { ++ rtx op1 = XEXP (XEXP (op0, 0), 0); ++ rtx op2 = XEXP (XEXP (op0, 0), 1); ++ if (GET_CODE (op1) == SUBREG ++ && GET_CODE (XEXP (op1, 0)) == TRUNCATE) ++ op1 = XEXP (XEXP (op1, 0), 0); ++ if (GET_CODE (op2) == SUBREG ++ && GET_CODE (XEXP (op2, 0)) == TRUNCATE) ++ op2 = XEXP (XEXP (op2, 0), 0); ++ *total = ++ COSTS_N_INSNS (1) + rtx_cost (op1, 0) + rtx_cost (op2, 0); ++ return true; ++ } ++ } + *total = mips_zero_extend_cost (mode, XEXP (x, 0)); + return false; + +@@ -3869,6 +4013,30 @@ mips_emit_compare (enum rtx_code *code, + } + } + ++/* If it is possible and profitable to use SEQ or SNE to compare a ++ register with OP, return the instruction's second source operand, ++ otherwise return null. MODE is the mode of OP. */ ++ ++static rtx ++mips_get_seq_sne_operand (enum machine_mode mode, rtx op) ++{ ++ if (!ISA_HAS_SEQ_SNE) ++ return NULL; ++ ++ if (reg_imm10_operand (op, mode)) ++ return op; ++ ++ /* If OP is in the range of either ADDIU or XORI, we could either ++ use those instructions and boolify the result, or move OP into a ++ register and use SEQ or SNE. Prefer the former, because it is ++ better than the latter when a 0/1 result is not needed. */ ++ if (uns_arith_operand (op, mode) ++ || arith_operand (op, mode)) ++ return NULL; ++ ++ return force_reg (mode, op); ++} ++ + /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE. + Store the result in TARGET and return true if successful. + +@@ -3883,8 +4051,15 @@ mips_expand_scc (enum rtx_code code, rtx + target = gen_lowpart (GET_MODE (cmp_operands[0]), target); + if (code == EQ || code == NE) + { +- rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]); +- mips_emit_binary (code, target, zie, const0_rtx); ++ rtx scc_operand = mips_get_seq_sne_operand (GET_MODE (cmp_operands[0]), ++ cmp_operands[1]); ++ if (scc_operand) ++ mips_emit_binary (code, target, cmp_operands[0], scc_operand); ++ else ++ { ++ rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]); ++ mips_emit_binary (code, target, zie, const0_rtx); ++ } + } + else + mips_emit_int_order_test (code, 0, target, +@@ -5954,6 +6129,15 @@ mips_expand_ext_as_unaligned_load (rtx d + if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right)) + return false; + ++ if (ISA_HAS_UL_US) ++ { ++ if (GET_MODE (dest) == DImode) ++ emit_insn (gen_mov_uld (dest, src, left)); ++ else ++ emit_insn (gen_mov_ulw (dest, src, left)); ++ return true; ++ } ++ + temp = gen_reg_rtx (GET_MODE (dest)); + if (GET_MODE (dest) == DImode) + { +@@ -5988,6 +6172,16 @@ mips_expand_ins_as_unaligned_store (rtx + + mode = mode_for_size (width, MODE_INT, 0); + src = gen_lowpart (mode, src); ++ ++ if (ISA_HAS_UL_US) ++ { ++ if (GET_MODE (src) == DImode) ++ emit_insn (gen_mov_usd (dest, src, left)); ++ else ++ emit_insn (gen_mov_usw (dest, src, left)); ++ return true; ++ } ++ + if (mode == DImode) + { + emit_insn (gen_mov_sdl (dest, src, left)); +@@ -6398,6 +6592,27 @@ mips_print_float_branch_condition (FILE + } + } + ++/* Likewise bit branches. */ ++ ++static void ++mips_print_bit_branch_condition (FILE *file, enum rtx_code code, int letter) ++{ ++ switch (code) ++ { ++ case EQ: ++ fputs ("bit0", file); ++ break; ++ ++ case NE: ++ fputs ("bit1", file); ++ break; ++ ++ default: ++ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); ++ break; ++ } ++} ++ + /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are: + + 'X' Print CONST_INT OP in hexadecimal format. +@@ -6419,8 +6634,11 @@ mips_print_float_branch_condition (FILE + 'D' Print the second part of a double-word register or memory operand. + 'L' Print the low-order register in a double-word register operand. + 'M' Print high-order register in a double-word register operand. +- 'z' Print $0 if OP is zero, otherwise print OP normally. */ +- ++ 'z' Print $0 if OP is zero, otherwise print OP normally. ++ 'E' substract 1 from the const_int value. ++ 'G' print part of opcode for a branch-bit condition. ++ 'H' print part of opcode for a branch-bit condition, inverted. */ ++ + void + mips_print_operand (FILE *file, rtx op, int letter) + { +@@ -6518,6 +6736,23 @@ mips_print_operand (FILE *file, rtx op, + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + ++ case 'E': ++ { ++ if (code != CONST_INT) ++ output_operand_lossage ("'%%E' misused"); ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); ++ } ++ break; ++ ++ case 'G': ++ mips_print_bit_branch_condition (file, code, letter); ++ break; ++ ++ case 'H': ++ mips_print_bit_branch_condition (file, reverse_condition (code), ++ letter); ++ break; ++ + default: + switch (code) + { +@@ -6627,7 +6862,7 @@ mips_select_rtx_section (enum machine_mo + static section * + mips_function_rodata_section (tree decl) + { +- if (!TARGET_ABICALLS || TARGET_GPWORD) ++ if (!TARGET_ABICALLS || !flag_pic || TARGET_GPWORD) + return default_function_rodata_section (decl); + + if (decl && DECL_SECTION_NAME (decl)) +@@ -6667,6 +6902,12 @@ mips_in_small_data_p (const_tree decl) + if (TARGET_ABICALLS || TARGET_VXWORKS_RTP) + return false; + ++#ifdef CVMX_SHARED_BSS_FLAGS ++ if (TARGET_OCTEON && TREE_CODE (decl) == VAR_DECL ++ && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl))) ++ return false; ++#endif ++ + if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0) + { + const char *name; +@@ -6845,6 +7086,26 @@ mips_output_filename (FILE *stream, cons + } + } + ++/* Initialize vector TARGET to VALS. */ ++ ++void ++mips_expand_vector_init (rtx target, rtx vals) ++{ ++ enum machine_mode mode = GET_MODE (target); ++ enum machine_mode inner = GET_MODE_INNER (mode); ++ unsigned int i, n_elts = GET_MODE_NUNITS (mode); ++ rtx mem; ++ ++ gcc_assert (VECTOR_MODE_P (mode)); ++ ++ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0); ++ for (i = 0; i < n_elts; i++) ++ emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)), ++ XVECEXP (vals, 0, i)); ++ ++ emit_move_insn (target, mem); ++} ++ + /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */ + + static void ATTRIBUTE_UNUSED +@@ -6893,6 +7154,37 @@ mips_dwarf_register_span (rtx reg) + return NULL_RTX; + } + ++/* DSP ALU can bypass data with no delays for the following pairs. */ ++enum insn_code dspalu_bypass_table[][2] = ++{ ++ {CODE_FOR_mips_addsc, CODE_FOR_mips_addwc}, ++ {CODE_FOR_mips_cmpu_eq_qb, CODE_FOR_mips_pick_qb}, ++ {CODE_FOR_mips_cmpu_lt_qb, CODE_FOR_mips_pick_qb}, ++ {CODE_FOR_mips_cmpu_le_qb, CODE_FOR_mips_pick_qb}, ++ {CODE_FOR_mips_cmp_eq_ph, CODE_FOR_mips_pick_ph}, ++ {CODE_FOR_mips_cmp_lt_ph, CODE_FOR_mips_pick_ph}, ++ {CODE_FOR_mips_cmp_le_ph, CODE_FOR_mips_pick_ph}, ++ {CODE_FOR_mips_wrdsp, CODE_FOR_mips_insv} ++}; ++ ++int ++mips_dspalu_bypass_p (rtx out_insn, rtx in_insn) ++{ ++ int i; ++ int num_bypass = (sizeof (dspalu_bypass_table) ++ / (2 * sizeof (enum insn_code))); ++ enum insn_code out_icode = INSN_CODE (out_insn); ++ enum insn_code in_icode = INSN_CODE (in_insn); ++ ++ for (i = 0; i < num_bypass; i++) ++ { ++ if (out_icode == dspalu_bypass_table[i][0] ++ && in_icode == dspalu_bypass_table[i][1]) ++ return true; ++ } ++ ++ return false; ++} + /* Implement ASM_OUTPUT_ASCII. */ + + void +@@ -7117,16 +7409,26 @@ mips_file_start (void) + "\t.previous\n", TARGET_LONG64 ? 64 : 32); + + #ifdef HAVE_AS_GNU_ATTRIBUTE ++#ifdef TARGET_MIPS_SDEMTK ++ fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", ++ (!TARGET_NO_FLOAT ++ ? (TARGET_HARD_FLOAT ++ ? (TARGET_DOUBLE_FLOAT ++ ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3) : 0)); ++#else + fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", + (TARGET_HARD_FLOAT_ABI + ? (TARGET_DOUBLE_FLOAT + ? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3)); + #endif ++#endif + } + + /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */ + if (TARGET_ABICALLS) + fprintf (asm_out_file, "\t.abicalls\n"); ++ if (TARGET_ABICALLS && !flag_pic) ++ fprintf (asm_out_file, "\t.option\tpic0\n"); + + if (flag_verbose_asm) + fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n", +@@ -7617,6 +7919,7 @@ mips16e_output_save_restore (rtx pattern + return buffer; + } + ++ + /* Return true if the current function has an insn that implicitly + refers to $gp. */ + +@@ -7631,6 +7934,7 @@ mips_function_has_gp_insn (void) + push_topmost_sequence (); + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if (USEFUL_INSN_P (insn) ++ + && (get_attr_got (insn) != GOT_UNSET + || mips_small_data_pattern_p (PATTERN (insn)))) + { +@@ -7720,7 +8024,7 @@ mips_save_reg_p (unsigned int regno) + { + /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then + if we have not chosen a call-clobbered substitute. */ +- if (regno == GLOBAL_POINTER_REGNUM) ++ if (regno == GLOBAL_POINTER_REGNUM && fixed_regs[regno]) + return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno; + + /* Check call-saved registers. */ +@@ -7944,7 +8248,7 @@ mips_current_loadgp_style (void) + if (TARGET_RTP_PIC) + return LOADGP_RTP; + +- if (TARGET_ABSOLUTE_ABICALLS) ++ if (TARGET_ABSOLUTE_ABICALLS || !flag_pic) + return LOADGP_ABSOLUTE; + + return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI; +@@ -8059,9 +8363,10 @@ mips_restore_gp (void) + { + rtx base, address; + +- gcc_assert (TARGET_ABICALLS && TARGET_OLDABI); ++ gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && flag_pic); + + base = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx; ++ + address = mips_add_offset (pic_offset_table_rtx, base, + current_function_outgoing_args_size); + mips_emit_move (pic_offset_table_rtx, gen_frame_mem (Pmode, address)); +@@ -8312,7 +8617,18 @@ mips_save_reg (rtx reg, rtx mem) + + /* The __gnu_local_gp symbol. */ + +-static GTY(()) rtx mips_gnu_local_gp; ++static GTY(()) rtx mips_gnu_local_gp_rtx; ++ ++static rtx ++mips_gnu_local_gp (void) ++{ ++ if (mips_gnu_local_gp_rtx == NULL) ++ { ++ mips_gnu_local_gp_rtx = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp"); ++ SYMBOL_REF_FLAGS (mips_gnu_local_gp_rtx) |= SYMBOL_FLAG_LOCAL; ++ } ++ return mips_gnu_local_gp_rtx; ++} + + /* If we're generating n32 or n64 abicalls, emit instructions + to set up the global pointer. */ +@@ -8326,14 +8642,9 @@ mips_emit_loadgp (void) + switch (mips_current_loadgp_style ()) + { + case LOADGP_ABSOLUTE: +- if (mips_gnu_local_gp == NULL) +- { +- mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp"); +- SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL; +- } + emit_insn (Pmode == SImode +- ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp) +- : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp)); ++ ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp ()) ++ : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp ())); + break; + + case LOADGP_NEWABI: +@@ -8490,7 +8801,7 @@ mips_expand_prologue (void) + mips_emit_loadgp (); + + /* Initialize the $gp save slot. */ +- if (frame->cprestore_size > 0) ++ if (frame->cprestore_size > 0 && flag_pic ) + emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size))); + + /* If we are profiling, make sure no instructions are scheduled before +@@ -8549,6 +8860,11 @@ mips_expand_epilogue (bool sibcall_p) + step1 = frame->total_size; + step2 = 0; + ++ if (mips_warn_framesize >= 0 ++ && step1 > mips_warn_framesize) ++ warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes", ++ current_function_name (), step1); ++ + /* Work out which register holds the frame address. */ + if (!frame_pointer_needed) + base = stack_pointer_rtx; +@@ -9456,10 +9772,15 @@ mips_output_division (const char *divisi + s = "bnez\t%2,1f\n\tbreak\t7\n1:"; + } + else if (GENERATE_DIVIDE_TRAPS) +- { +- output_asm_insn (s, operands); +- s = "teq\t%2,%.,7"; +- } ++ { ++ if (TUNE_74K) ++ output_asm_insn ("teq\t%2,%.,7", operands); ++ else ++ { ++ output_asm_insn (s, operands); ++ s = "teq\t%2,%.,7"; ++ } ++ } + else + { + output_asm_insn ("%(bne\t%2,%.,1f", operands); +@@ -9552,6 +9873,7 @@ mips_issue_rate (void) + case PROCESSOR_R5500: + case PROCESSOR_R7000: + case PROCESSOR_R9000: ++ case PROCESSOR_OCTEON: + return 2; + + case PROCESSOR_SB1: +@@ -9577,6 +9899,11 @@ mips_multipass_dfa_lookahead (void) + if (TUNE_SB1) + return 4; + ++ /* Because of the two pipelines we have at most two alternative ++ schedules on Octeon. */ ++ if (mips_tune == PROCESSOR_OCTEON) ++ return 2; ++ + return 0; + } + +@@ -9613,7 +9940,17 @@ mips_maybe_swap_ready (rtx *ready, int p + ready[pos2] = temp; + } + } +- ++ ++int ++mips_mult_madd_chain_bypass_p (rtx out_insn ATTRIBUTE_UNUSED, ++ rtx in_insn ATTRIBUTE_UNUSED) ++{ ++ if (reload_completed) ++ return false; ++ else ++ return true; ++} ++ + /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction + that may clobber hi or lo. */ + static rtx mips_macc_chains_last_hilo; +@@ -11908,6 +12245,28 @@ mips_parse_cpu (const char *cpu_string) + return NULL; + } + ++/* Prepare the extv/extzv operands in OPERANDS for a register extraction. ++ The problem here is that the extv interface always provides word_mode ++ register operands, even if the values were originally SImode. ++ We nevertheless want to use SImode operations for naturally-SImode ++ operands because SUBREGs are harder to optimize. */ ++ ++void ++mips_adjust_register_ext_operands (rtx *operands) ++{ ++ if (GET_CODE (operands[0]) == SUBREG ++ && GET_MODE (operands[0]) == DImode ++ && GET_CODE (operands[1]) == SUBREG ++ && GET_MODE (operands[1]) == DImode ++ && GET_MODE (SUBREG_REG (operands[0])) == SImode ++ && GET_MODE (SUBREG_REG (operands[1])) == SImode ++ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32) ++ { ++ operands[0] = SUBREG_REG (operands[0]); ++ operands[1] = SUBREG_REG (operands[1]); ++ } ++} ++ + /* Set up globals to generate code for the ISA or processor + described by INFO. */ + +@@ -11979,6 +12338,9 @@ mips_handle_option (size_t code, const c + return false; + return true; + ++ case OPT_mwarn_framesize_: ++ return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &mips_warn_framesize) == 1; ++ + default: + return true; + } +@@ -11995,10 +12357,6 @@ mips_override_options (void) + SUBTARGET_OVERRIDE_OPTIONS; + #endif + +- /* Set the small data limit. */ +- mips_small_data_threshold = (g_switch_set +- ? g_switch_value +- : MIPS_DEFAULT_GVALUE); + + /* The following code determines the architecture and register size. + Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()). +@@ -12088,6 +12446,10 @@ mips_override_options (void) + + /* End of code shared with GAS. */ + ++ /* The non-PIC ABI may only be used in conjunction with the o32 ABI. */ ++ if (TARGET_ABICALLS && !flag_pic && mips_abi != ABI_32) ++ sorry ("non-PIC abicalls may only be used with the o32 ABI"); ++ + /* If no -mlong* option was given, infer it from the other options. */ + if ((target_flags_explicit & MASK_LONG64) == 0) + { +@@ -12136,23 +12498,21 @@ mips_override_options (void) + target_flags &= ~MASK_ABICALLS; + } + +- /* MIPS16 cannot generate PIC yet. */ ++ /* MIPS16 cannot generate PIC or abicalls yet. */ + if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS)) + { +- sorry ("MIPS16 PIC"); ++ sorry ("MIPS16 PIC or abicalls are not yet implemented"); + target_flags &= ~MASK_ABICALLS; + flag_pic = flag_pie = flag_shlib = 0; + } + +- if (TARGET_ABICALLS) +- /* We need to set flag_pic for executables as well as DSOs +- because we may reference symbols that are not defined in +- the final executable. (MIPS does not use things like +- copy relocs, for example.) +- +- Also, there is a body of code that uses __PIC__ to distinguish +- between -mabicalls and -mno-abicalls code. */ +- flag_pic = 1; ++ /* For SDE, switch on ABICALLS mode if -fpic or -fpie were used, and the ++ user hasn't explicitly disabled these modes. */ ++ if (TARGET_MIPS_SDE ++ && (flag_pic || flag_pie) && !TARGET_ABICALLS ++ && !((target_flags_explicit & MASK_ABICALLS)) ++ && mips_abi != ABI_EABI) ++ target_flags |= MASK_ABICALLS; + + /* -mvr4130-align is a "speed over size" optimization: it usually produces + faster code, but at the expense of more nops. Enable it at -O3 and +@@ -12167,6 +12527,11 @@ mips_override_options (void) + + /* If we have a nonzero small-data limit, check that the -mgpopt + setting is consistent with the other target flags. */ ++ ++ /* Set the small data limit. */ ++ mips_small_data_threshold = (g_switch_set ++ ? g_switch_value ++ : MIPS_DEFAULT_GVALUE); + if (mips_small_data_threshold > 0) + { + if (!TARGET_GPOPT) +@@ -12262,6 +12627,10 @@ mips_override_options (void) + /* Function to allocate machine-dependent function status. */ + init_machine_status = &mips_init_machine_status; + ++ /* __thread_support is not supported by uClibc. */ ++ if (building_for_uclibc) ++ targetm.have_tls = 0; ++ + /* Default to working around R4000 errata only if the processor + was selected explicitly. */ + if ((target_flags_explicit & MASK_FIX_R4000) == 0 +@@ -12314,18 +12683,24 @@ mips_swap_registers (unsigned int i) + #undef SWAP_STRING + #undef SWAP_INT + } +- +-/* Implement CONDITIONAL_REGISTER_USAGE. */ +- + void + mips_conditional_register_usage (void) + { ++ ++ /* These DSP control register fields are global. */ ++ if (ISA_HAS_DSP) ++ { ++ global_regs[CCDSP_PO_REGNUM] = 1; ++ global_regs[CCDSP_SC_REGNUM] = 1; ++ } + if (!ISA_HAS_DSP) + { + int regno; + + for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++) + fixed_regs[regno] = call_used_regs[regno] = 1; ++ for (regno = DSP_CTRL_REG_FIRST; regno <= DSP_CTRL_REG_LAST; regno++) ++ fixed_regs[regno] = call_used_regs[regno] = 1; + } + if (!TARGET_HARD_FLOAT) + { +@@ -12387,6 +12762,24 @@ mips_conditional_register_usage (void) + for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2) + mips_swap_registers (regno); + } ++ /* In non-PIC abicalls, $gp is completely ordinary; we can use a pseudo ++ for TLS GOT entries. */ ++ if (TARGET_NONPIC_ABICALLS) ++ { ++ call_used_regs[GLOBAL_POINTER_REGNUM] = TARGET_OLDABI; ++ call_really_used_regs[GLOBAL_POINTER_REGNUM] = TARGET_OLDABI; ++ fixed_regs[GLOBAL_POINTER_REGNUM] = 0; ++ } ++ /* $f30 is reserved for errata workarounds in ICE9A. */ ++ if (TARGET_FIX_ICE9A) ++ { ++ const int f30 = FP_REG_FIRST + 30; ++ const int f31 = FP_REG_FIRST + 31; ++ ++ fixed_regs[f30] = call_really_used_regs[f30] = call_used_regs[f30] = 1; ++ if (MAX_FPRS_PER_FMT == 2) ++ fixed_regs[f31] = call_really_used_regs[f31] = call_used_regs[f31] = 1; ++ } + } + + /* When generating MIPS16 code, we want to allocate $24 (T_REG) before +@@ -12411,6 +12804,153 @@ mips_order_regs_for_local_alloc (void) + } + } + ++#ifdef CVMX_SHARED_BSS_FLAGS ++/* Handle a "cvmx_shared" attribute; arguments as in ++ struct attribute_spec.handler. */ ++ ++static tree ++octeon_handle_cvmx_shared_attribute (tree *node, tree name, ++ tree args ATTRIBUTE_UNUSED, ++ int flags ATTRIBUTE_UNUSED, ++ bool *no_add_attrs) ++{ ++ if (TREE_CODE (*node) != VAR_DECL) ++ { ++ warning (OPT_Wattributes, "%qs attribute only applies to variables", ++ IDENTIFIER_POINTER (name)); ++ *no_add_attrs = true; ++ } ++ ++ return NULL_TREE; ++} ++ ++/* Switch to the appropriate section for output of DECL. ++ DECL is either a `VAR_DECL' node or a constant of some sort. ++ RELOC indicates whether forming the initial value of DECL requires ++ link-time relocations. */ ++ ++static section * ++octeon_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align) ++{ ++ if (decl && TREE_CODE (decl) == VAR_DECL ++ && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl))) ++ { ++ const char *sname = NULL; ++ unsigned int flags = SECTION_WRITE; ++ ++ switch (categorize_decl_for_section (decl, reloc)) ++ { ++ case SECCAT_DATA: ++ case SECCAT_SDATA: ++ case SECCAT_RODATA: ++ case SECCAT_SRODATA: ++ case SECCAT_RODATA_MERGE_STR: ++ case SECCAT_RODATA_MERGE_STR_INIT: ++ case SECCAT_RODATA_MERGE_CONST: ++ case SECCAT_DATA_REL: ++ case SECCAT_DATA_REL_LOCAL: ++ case SECCAT_DATA_REL_RO: ++ case SECCAT_DATA_REL_RO_LOCAL: ++ sname = ".cvmx_shared"; ++ break; ++ case SECCAT_BSS: ++ case SECCAT_SBSS: ++ sname = ".cvmx_shared_bss"; ++ flags |= SECTION_BSS; ++ break; ++ case SECCAT_TEXT: ++ case SECCAT_TDATA: ++ case SECCAT_TBSS: ++ break; ++ } ++ if (sname) ++ { ++ return get_section (sname, flags, decl); ++ } ++ } ++ return default_elf_select_section (decl, reloc, align); ++} ++ ++/* Build up a unique section name, expressed as a ++ STRING_CST node, and assign it to DECL_SECTION_NAME (decl). ++ RELOC indicates whether the initial value of EXP requires ++ link-time relocations. */ ++ ++static void ++octeon_unique_section (tree decl, int reloc) ++{ ++ if (decl && TREE_CODE (decl) == VAR_DECL ++ && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (decl))) ++ { ++ const char *sname = NULL; ++ ++ if (! DECL_ONE_ONLY (decl)) ++ { ++ section *sect; ++ sect = octeon_select_section (decl, reloc, DECL_ALIGN (decl)); ++ DECL_SECTION_NAME (decl) = build_string (strlen (sect->named.name), ++ sect->named.name); ++ return; ++ } ++ ++ switch (categorize_decl_for_section (decl, reloc)) ++ { ++ case SECCAT_BSS: ++ case SECCAT_SBSS: ++ sname = ".cvmx_shared_bss.linkonce."; ++ break; ++ case SECCAT_SDATA: ++ case SECCAT_DATA: ++ case SECCAT_DATA_REL: ++ case SECCAT_DATA_REL_LOCAL: ++ case SECCAT_DATA_REL_RO: ++ case SECCAT_DATA_REL_RO_LOCAL: ++ case SECCAT_RODATA: ++ case SECCAT_SRODATA: ++ case SECCAT_RODATA_MERGE_STR: ++ case SECCAT_RODATA_MERGE_STR_INIT: ++ case SECCAT_RODATA_MERGE_CONST: ++ sname = ".cvmx_shared.linkonce."; ++ break; ++ case SECCAT_TEXT: ++ case SECCAT_TDATA: ++ case SECCAT_TBSS: ++ break; ++ } ++ if (sname) ++ { ++ const char *name; ++ size_t plen, nlen; ++ char *string; ++ plen = strlen (sname); ++ ++ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); ++ name = targetm.strip_name_encoding (name); ++ nlen = strlen (name); ++ ++ string = alloca (plen + nlen + 1); ++ memcpy (string, sname, plen); ++ memcpy (string + plen, name, nlen + 1); ++ DECL_SECTION_NAME (decl) = build_string (nlen + plen, string); ++ return; ++ } ++ } ++ default_unique_section (decl, reloc); ++} ++ ++/* Emit an uninitialized cvmx_shared variable. */ ++void ++octeon_output_shared_variable (FILE *stream, tree decl, const char *name, ++ unsigned HOST_WIDE_INT size, int align) ++{ ++ switch_to_section (get_section (".cvmx_shared_bss", CVMX_SHARED_BSS_FLAGS, ++ NULL_TREE)); ++ ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT)); ++ ASM_DECLARE_OBJECT_NAME (stream, name, decl); ++ ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1); ++} ++#endif ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +@@ -12571,6 +13111,7 @@ mips_order_regs_for_local_alloc (void) + #undef TARGET_DWARF_REGISTER_SPAN + #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span + ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-mips.h" +--- a/gcc/config/mips/mips.h ++++ b/gcc/config/mips/mips.h +@@ -67,6 +67,8 @@ enum processor_type { + PROCESSOR_SB1, + PROCESSOR_SB1A, + PROCESSOR_SR71000, ++ PROCESSOR_XLR, ++ PROCESSOR_OCTEON, + PROCESSOR_MAX + }; + +@@ -179,15 +181,19 @@ enum mips_code_readable_setting { + #define TARGET_SIBCALLS \ + (!TARGET_MIPS16 && (!TARGET_USE_GOT || TARGET_EXPLICIT_RELOCS)) + +-/* True if we need to use a global offset table to access some symbols. */ +-#define TARGET_USE_GOT (TARGET_ABICALLS || TARGET_RTP_PIC) ++/* True if we need to use a global offset table to access some symbols. ++ Small data and TLS are not counted. */ ++#define TARGET_USE_GOT ((TARGET_ABICALLS && flag_pic) || TARGET_RTP_PIC) + + /* True if TARGET_USE_GOT and if $gp is a call-clobbered register. */ +-#define TARGET_CALL_CLOBBERED_GP (TARGET_ABICALLS && TARGET_OLDABI) ++#define TARGET_CALL_CLOBBERED_GP (TARGET_ABICALLS && flag_pic && TARGET_OLDABI) + + /* True if TARGET_USE_GOT and if $gp is a call-saved register. */ + #define TARGET_CALL_SAVED_GP (TARGET_USE_GOT && !TARGET_CALL_CLOBBERED_GP) + ++/* True if using abicalls, but not ourselves PIC. */ ++#define TARGET_NONPIC_ABICALLS (TARGET_ABICALLS && !flag_pic) ++ + /* True if indirect calls must use register class PIC_FN_ADDR_REG. + This is true for both the PIC and non-PIC VxWorks RTP modes. */ + #define TARGET_USE_PIC_FN_ADDR_REG (TARGET_ABICALLS || TARGET_VXWORKS_RTP) +@@ -197,7 +203,8 @@ enum mips_code_readable_setting { + Although GAS does understand .gpdword, the SGI linker mishandles + the relocations GAS generates (R_MIPS_GPREL32 followed by R_MIPS_64). + We therefore disable GP-relative switch tables for n64 on IRIX targets. */ +-#define TARGET_GPWORD (TARGET_ABICALLS && !(mips_abi == ABI_64 && TARGET_IRIX)) ++#define TARGET_GPWORD (TARGET_ABICALLS && flag_pic \ ++ && !(mips_abi == ABI_64 && TARGET_IRIX)) + + /* Generate mips16 code */ + #define TARGET_MIPS16 ((target_flags & MASK_MIPS16) != 0) +@@ -237,6 +244,8 @@ enum mips_code_readable_setting { + #define TARGET_SB1 (mips_arch == PROCESSOR_SB1 \ + || mips_arch == PROCESSOR_SB1A) + #define TARGET_SR71K (mips_arch == PROCESSOR_SR71000) ++#define TARGET_OCTEON (mips_arch == PROCESSOR_OCTEON) ++#define TARGET_XLR (mips_arch == PROCESSOR_XLR) + + /* Scheduling target defines. */ + #define TUNE_MIPS3000 (mips_tune == PROCESSOR_R3000) +@@ -311,6 +320,9 @@ enum mips_code_readable_setting { + #define TARGET_IRIX 0 + #define TARGET_IRIX6 0 + ++/* SDE specific stuff. */ ++#define TARGET_MIPS_SDE 0 ++ + /* Define preprocessor macros for the -march and -mtune options. + PREFIX is either _MIPS_ARCH or _MIPS_TUNE, INFO is the selected + processor. If INFO's canonical name is "foo", define PREFIX to +@@ -435,7 +447,10 @@ enum mips_code_readable_setting { + else if (ISA_MIPS64) \ + { \ + builtin_define ("__mips=64"); \ +- builtin_define ("__mips_isa_rev=1"); \ ++ if (TARGET_OCTEON) \ ++ builtin_define ("__mips_isa_rev=2"); \ ++ else \ ++ builtin_define ("__mips_isa_rev=1"); \ + builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS64"); \ + } \ + \ +@@ -520,6 +535,9 @@ enum mips_code_readable_setting { + \ + if (mips_abi == ABI_EABI) \ + builtin_define ("__mips_eabi"); \ ++ \ ++ if (TARGET_FIX_ICE9A) \ ++ builtin_define ("_MIPS_FIX_ICE9A"); \ + } \ + while (0) + +@@ -651,7 +669,8 @@ enum mips_code_readable_setting { + %{march=mips32|march=4kc|march=4km|march=4kp|march=4ksc:-mips32} \ + %{march=mips32r2|march=m4k|march=4ke*|march=4ksd|march=24k* \ + |march=34k*|march=74k*: -mips32r2} \ +- %{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000: -mips64} \ ++ %{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000 \ ++ |march=octeon|march=xlr: -mips64} \ + %{!march=*: -" MULTILIB_ISA_DEFAULT "}}" + + /* A spec that infers a -mhard-float or -msoft-float setting from an +@@ -664,6 +683,11 @@ enum mips_code_readable_setting { + |march=34kc|march=74kc|march=5kc: -msoft-float; \ + march=*: -mhard-float}" + ++/* A spec that infers the -mdsp setting from an -march argument. */ ++ ++#define MIPS_ARCH_DSP_SPEC \ ++ "%{!mno-dsp:%{march=24ke*|march=34k*|march=74k*: -mdsp}}" ++ + /* A spec condition that matches 32-bit options. It only works if + MIPS_ISA_LEVEL_SPEC has been applied. */ + +@@ -672,19 +696,27 @@ enum mips_code_readable_setting { + + /* Support for a compile-time default CPU, et cetera. The rules are: + --with-arch is ignored if -march is specified or a -mips is specified +- (other than -mips16). +- --with-tune is ignored if -mtune is specified. ++ (other than -mips16); likewise --with-arch32 and --with-arch64. ++ --with-tune is ignored if -mtune is specified; likewise ++ --with-tune32 and --with-tune64. + --with-abi is ignored if -mabi is specified. + --with-float is ignored if -mhard-float or -msoft-float are + specified. + --with-divide is ignored if -mdivide-traps or -mdivide-breaks are +- specified. */ ++ specified. ++ --with-fix-ice9a is ignored if -mfix-ice9a or -mno-fix-ice9a are ++ specified. */ + #define OPTION_DEFAULT_SPECS \ + {"arch", "%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ ++ {"arch32", "%{!mabi=*|mabi=32:%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ ++ {"arch64", "%{mabi=n32|mabi=64:%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ + {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ ++ {"tune32", "%{!mabi=*|mabi=32:%{!mtune=*:-mtune=%(VALUE)}}" }, \ ++ {"tune64", "%{mabi=n32|mabi=64:%{!mtune=*:-mtune=%(VALUE)}}" }, \ + {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \ + {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ + {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" }, \ ++ {"fix-ice9a", "%{!mfix-ice9a:%{!mno-fix-ice9a:-mfix-ice9a}}" }, \ + {"llsc", "%{!mllsc:%{!mno-llsc:-m%(VALUE)}}" } + + +@@ -783,6 +815,9 @@ enum mips_code_readable_setting { + || ISA_MIPS64) \ + && !TARGET_MIPS16) + ++/* ISA has Octeon specific pop instruction */ ++#define ISA_HAS_POPCOUNT (TARGET_OCTEON && !TARGET_MIPS16) ++ + /* ISA has three operand multiply instructions that put + the high part in an accumulator: mulhi or mulhiu. */ + #define ISA_HAS_MULHI ((TARGET_MIPS5400 \ +@@ -823,6 +858,7 @@ enum mips_code_readable_setting { + || TARGET_MIPS5400 \ + || TARGET_MIPS5500 \ + || TARGET_SR71K \ ++ || TARGET_OCTEON \ + || TARGET_SMARTMIPS) \ + && !TARGET_MIPS16) + +@@ -848,13 +884,33 @@ enum mips_code_readable_setting { + #define ISA_HAS_TRUNC_W (!ISA_MIPS1) + + /* ISA includes the MIPS32r2 seb and seh instructions. */ +-#define ISA_HAS_SEB_SEH (ISA_MIPS32R2 \ ++#define ISA_HAS_SEB_SEH ((ISA_MIPS32R2 || TARGET_OCTEON) \ + && !TARGET_MIPS16) + + /* ISA includes the MIPS32/64 rev 2 ext and ins instructions. */ +-#define ISA_HAS_EXT_INS (ISA_MIPS32R2 \ ++#define ISA_HAS_EXT_INS ((ISA_MIPS32R2 || TARGET_OCTEON) \ + && !TARGET_MIPS16) + ++/* ISA includes the exts instructions. */ ++#define ISA_HAS_EXTS (TARGET_OCTEON && !TARGET_MIPS16) ++ ++/* ISA includes the bbit* instructions. */ ++#define ISA_HAS_BBIT (TARGET_OCTEON && !TARGET_MIPS16) ++ ++/* ISA includes the seq and sne instructions. */ ++#define ISA_HAS_SEQ_SNE (TARGET_OCTEON && !TARGET_MIPS16) ++ ++/* ISA includes the baddu instruction. */ ++#define ISA_HAS_BADDU (TARGET_OCTEON && !TARGET_MIPS16) ++ ++/* ISA has single-instruction unalinged load/store support. */ ++#define ISA_HAS_UL_US (TARGET_OCTEON \ ++ && TARGET_OCTEON_UNALIGNED \ ++ && !TARGET_MIPS16) ++ ++/* ISA includes the cins instruction. */ ++#define ISA_HAS_CINS (TARGET_OCTEON && !TARGET_MIPS16) ++ + /* ISA has instructions for accessing top part of 64-bit fp regs. */ + #define ISA_HAS_MXHC1 (TARGET_FLOAT64 && ISA_MIPS32R2) + +@@ -935,6 +991,49 @@ enum mips_code_readable_setting { + #endif + + ++/* Some targets (most of those with dynamic linking, e.g. Irix, ++ GNU/Linux, BSD) default to -mabicalls. They mostly default to PIC ++ also. Force the appropriate -mabicalls setting into the command ++ line for the benefit of the -fno-pic spec just below. */ ++#ifdef TARGET_ABICALLS_DEFAULT ++#define ABICALLS_SPEC "%{!mno-abicalls:%{!mabicalls:-mabicalls}}" ++#else ++#define ABICALLS_SPEC "%{!mno-abicalls:%{!mabicalls:-mno-abicalls}}" ++#endif ++ ++/* Make -mabicalls imply PIC unless the target supports non-PIC ++ abicalls. Targets which do not support non-PIC abicalls must set ++ flag_pic for executables as well as DSOs ++ because we may reference symbols that are not defined in ++ the final executable - these targets do not have copy relocs. ++ ++ All 64-bit targets are assumed to not support PIC abicalls. ++ CSL NOTE: It would be nice to remove this restriction before ++ contributing upstream; 64-bit support should be a small project. ++ ++ Also, there is a body of code that uses __PIC__ to distinguish ++ between -mabicalls and -mno-abicalls code. For targets with ++ non-PIC abicalls support any such code will have to be corrected. ++ All you need to do if !__PIC__ is use $t9 for indirect calls ++ and be careful about assuming $gp is set up in inline asm. */ ++#ifdef TARGET_ABICALLS_NONPIC ++#define ABICALLS_SELF_SPECS ABICALLS_SPEC, \ ++ "%{mabicalls:%{!fno-pic:%{mabi=o64|mabi=64|mabi=n32:-fpic}}}" ++#else ++#define ABICALLS_SELF_SPECS ABICALLS_SPEC, \ ++ "%{mabicalls:%{!fno-pic:-fpic}}" ++#endif ++ ++/* Any additional self specs defined by the subtarget. */ ++#ifndef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS "" ++#endif ++ ++#define DRIVER_SELF_SPECS \ ++ SUBTARGET_SELF_SPECS, \ ++ ABICALLS_SELF_SPECS ++ ++ + #ifndef MIPS_ABI_DEFAULT + #define MIPS_ABI_DEFAULT ABI_32 + #endif +@@ -1003,7 +1102,7 @@ enum mips_code_readable_setting { + %{mdspr2} %{mno-dspr2} \ + %{msmartmips} %{mno-smartmips} \ + %{mmt} %{mno-mt} \ +-%{mfix-vr4120} %{mfix-vr4130} \ ++%{mfix-vr4120} %{mfix-vr4130} %{mfix-ice9a} %{mno-fix-ice9a} \ + %(subtarget_asm_optimizing_spec) \ + %(subtarget_asm_debugging_spec) \ + %{mabi=*} %{!mabi*: %(asm_abi_default_spec)} \ +@@ -1012,6 +1111,7 @@ enum mips_code_readable_setting { + %{mshared} %{mno-shared} \ + %{msym32} %{mno-sym32} \ + %{mtune=*} %{v} \ ++%{mocteon-useun} %{mno-octeon-useun} \ + %(subtarget_asm_spec)" + + /* Extra switches sometimes passed to the linker. */ +@@ -1515,9 +1615,24 @@ enum mips_code_readable_setting { + #define DSP_ACC_REG_LAST 181 + #define DSP_ACC_REG_NUM (DSP_ACC_REG_LAST - DSP_ACC_REG_FIRST + 1) + ++#define DSP_CTRL_REG_FIRST 182 ++#define DSP_CTRL_REG_LAST 187 ++ + #define AT_REGNUM (GP_REG_FIRST + 1) + #define HI_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST : MD_REG_FIRST + 1) + #define LO_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST + 1 : MD_REG_FIRST) ++#define AC1HI_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST : DSP_ACC_REG_FIRST + 1) ++#define AC1LO_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST + 1 : DSP_ACC_REG_FIRST) ++#define AC2HI_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST + 2 : DSP_ACC_REG_FIRST + 3) ++#define AC2LO_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST + 3 : DSP_ACC_REG_FIRST + 2) ++#define AC3HI_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST + 4 : DSP_ACC_REG_FIRST + 5) ++#define AC3LO_REGNUM (TARGET_BIG_ENDIAN \ ++ ? DSP_ACC_REG_FIRST + 5 : DSP_ACC_REG_FIRST + 4) + + /* FPSW_REGNUM is the single condition code used if !ISA_HAS_8CC. + If ISA_HAS_8CC, it should not be used, and an arbitrary ST_REG +@@ -1673,6 +1788,7 @@ enum reg_class + ST_REGS, /* status registers (fp status) */ + DSP_ACC_REGS, /* DSP accumulator registers */ + ACC_REGS, /* Hi/Lo and DSP accumulator registers */ ++ FRAME_REGS, /* $arg and $frame */ + ALL_REGS, /* all registers */ + LIM_REG_CLASSES /* max value + 1 */ + }; +@@ -1715,6 +1831,7 @@ enum reg_class + "ST_REGS", \ + "DSP_ACC_REGS", \ + "ACC_REGS", \ ++ "FRAME_REGS", \ + "ALL_REGS" \ + } + +@@ -1758,7 +1875,8 @@ enum reg_class + { 0x00000000, 0x00000000, 0x000007f8, 0x00000000, 0x00000000, 0x00000000 }, /* status registers */ \ + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x003f0000 }, /* dsp accumulator registers */ \ + { 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x003f0000 }, /* hi/lo and dsp accumulator registers */ \ +- { 0xffffffff, 0xffffffff, 0xffff07ff, 0xffffffff, 0xffffffff, 0x0fffffff } /* all registers */ \ ++ { 0x00000000, 0x00000000, 0x00006000, 0x00000000, 0x00000000, 0x00000000 }, /* frame registers */ \ ++ { 0xffffffff, 0xffffffff, 0xffff67ff, 0xffffffff, 0xffffffff, 0x0fffffff } /* all registers */ \ + } + + +@@ -2446,7 +2564,7 @@ typedef struct mips_args { + ? "%*" INSN "\t%" #OPNO "%/" \ + : REG_P (OPERANDS[OPNO]) \ + ? "%*" INSN "r\t%" #OPNO "%/" \ +- : TARGET_ABICALLS \ ++ : TARGET_ABICALLS && flag_pic \ + ? (".option\tpic0\n\t" \ + "%*" INSN "\t%" #OPNO "%/\n\t" \ + ".option\tpic2") \ +--- a/gcc/config/mips/mips.md ++++ b/gcc/config/mips/mips.md +@@ -60,7 +60,10 @@ + (UNSPEC_MEMORY_BARRIER 41) + (UNSPEC_SET_GOT_VERSION 42) + (UNSPEC_UPDATE_GOT_VERSION 43) +- ++ ++ (UNSPEC_UNALIGNED_LOAD 50) ++ (UNSPEC_UNALIGNED_STORE 51) ++ + (UNSPEC_ADDRESS_FIRST 100) + + (TLS_GET_TP_REGNUM 3) +@@ -269,6 +272,7 @@ + ;; slt set less than instructions + ;; signext sign extend instructions + ;; clz the clz and clo instructions ++;; pop pop and dpop + ;; trap trap if instructions + ;; imul integer multiply 2 operands + ;; imul3 integer multiply 3 operands +@@ -291,11 +295,17 @@ + ;; frsqrt floating point reciprocal square root + ;; frsqrt1 floating point reciprocal square root step1 + ;; frsqrt2 floating point reciprocal square root step2 ++;; dspmac DSP MAC instructions not saturating the accumulator ++;; dspmacsat DSP MAC instructions that saturate the accumulator ++;; accext DSP accumulator extract instructions ++;; accmod DSP accumulator modify instructions ++;; dspalu DSP ALU instructions not saturating the result ++;; dspalusat DSP ALU instructions that saturate the result + ;; multi multiword sequence (or user asm statements) + ;; nop no operation + ;; ghost an instruction that produces no real code + (define_attr "type" +- "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,prefetch,prefetchx,condmove,mfc,mtc,mthilo,mfhilo,const,arith,logical,shift,slt,signext,clz,trap,imul,imul3,imadd,idiv,move,fmove,fadd,fmul,fmadd,fdiv,frdiv,frdiv1,frdiv2,fabs,fneg,fcmp,fcvt,fsqrt,frsqrt,frsqrt1,frsqrt2,multi,nop,ghost" ++ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,prefetch,prefetchx,condmove,mfc,mtc,mthilo,mfhilo,const,arith,logical,shift,slt,signext,clz,pop,trap,imul,imul3,imadd,idiv,move,fmove,fadd,fmul,fmadd,fdiv,frdiv,frdiv1,frdiv2,fabs,fneg,fcmp,fcvt,fsqrt,frsqrt,frsqrt1,frsqrt2,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat,multi,nop,ghost" + (cond [(eq_attr "jal" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load")] + (const_string "unknown"))) +@@ -412,7 +422,7 @@ + ;; Attribute describing the processor. This attribute must match exactly + ;; with the processor_type enumeration in mips.h. + (define_attr "cpu" +- "r3000,4kc,4kp,5kc,5kf,20kc,24kc,24kf2_1,24kf1_1,74kc,74kf2_1,74kf1_1,74kf3_2,m4k,r3900,r6000,r4000,r4100,r4111,r4120,r4130,r4300,r4600,r4650,r5000,r5400,r5500,r7000,r8000,r9000,sb1,sb1a,sr71000" ++ "r3000,4kc,4kp,5kc,5kf,20kc,24kc,24kf2_1,24kf1_1,74kc,74kf2_1,74kf1_1,74kf3_2,m4k,r3900,r6000,r4000,r4100,r4111,r4120,r4130,r4300,r4600,r4650,r5000,r5400,r5500,r7000,r8000,r9000,sb1,sb1a,sr71000,xlr,octeon" + (const (symbol_ref "mips_tune"))) + + ;; The type of hardware hazard associated with this instruction. +@@ -461,6 +471,16 @@ + (const_string "yes") + (const_string "no")))) + ++;; Attributes defining whether a branch has a branch-likely variant. ++(define_attr "branch_without_likely" "no,yes" (const_string "no")) ++ ++(define_attr "branch_with_likely" "no,yes" ++ (if_then_else ++ (and (eq_attr "type" "branch") ++ (eq_attr "branch_without_likely" "no")) ++ (const_string "yes") ++ (const_string "no"))) ++ + ;; True if an instruction might assign to hi or lo when reloaded. + ;; This is used by the TUNE_MACC_CHAINS code. + (define_attr "may_clobber_hilo" "no,yes" +@@ -513,6 +533,30 @@ + (V2SF "!TARGET_64BIT && TARGET_PAIRED_SINGLE_FLOAT") + (TF "TARGET_64BIT && TARGET_FLOAT64")]) + ++;; The attributes for the ICE9A fix. ++(define_mode_attr ice9a_stallnops [(SF "") (V2SF "") ++ (DF "movn.d\t$f30, $f28, $0\;movn.d\t$f30, $f28, $0\;movn.d\t$f30, $f28, $0\;movn.d\t$f30, $f28, $0\;movn.d\t$f30, $f28, $0\;")]) ++(define_mode_attr ice9a_round [(SF "") (V2SF "") ++ (DF "\;movn.d\t$f30, %0, $0")]) ++ ++ ++ ++;; stall workaround = 5 insns, => length = 4 * (1+5) = 24 ++(define_mode_attr ice9a_length_stall [(SF "4") (V2SF "4") ++ (DF "24")]) ++ ++;; round workaround = 1 insn, => length = 4 * (1+1) = 8 ++(define_mode_attr ice9a_length_round [(SF "4") (V2SF "4") ++ (DF "8")]) ++ ++;; both workarounds = 5+1 insn, => length = 4 * (1+5+1) = 28 ++(define_mode_attr ice9a_length_both [(SF "4") (V2SF "4") ++ (DF "28")]) ++ ++ ++ ++ ++ + ;; In GPR templates, a string like "subu" will expand to "subu" in the + ;; 32-bit version and "dsubu" in the 64-bit version. + (define_mode_attr d [(SI "") (DI "d") +@@ -525,9 +569,12 @@ + ;; instruction. + (define_mode_attr size [(QI "b") (HI "h")]) + +-;; This attributes gives the mode mask of a SHORT. ++;; This attribute gives the mode mask of a SHORT. + (define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) + ++;; This attribute gives the number of the topmost bit of a SUBDI. ++(define_mode_attr topbit [(QI "7") (HI "15") (SI "31")]) ++ + ;; Mode attributes for GPR loads and stores. + (define_mode_attr load [(SI "lw") (DI "ld")]) + (define_mode_attr store [(SI "sw") (DI "sd")]) +@@ -599,14 +646,23 @@ + ;; to use the same template. + (define_code_iterator any_extend [sign_extend zero_extend]) + ++;; This code iterator allows both sorts of extraction to be treated alike. ++(define_code_iterator any_extract [sign_extract zero_extract]) ++ + ;; This code iterator allows the three shift instructions to be generated + ;; from the same template. + (define_code_iterator any_shift [ashift ashiftrt lshiftrt]) + ++;; This code iterator allows both of the right shift codes to be treated alike. ++(define_code_iterator any_shiftrt [ashiftrt lshiftrt]) ++ + ;; This code iterator allows all native floating-point comparisons to be + ;; generated from the same template. + (define_code_iterator fcond [unordered uneq unlt unle eq lt le]) + ++;; This code iterator allows both equality operators to be treated alike. ++(define_code_iterator equality_op [eq ne]) ++ + ;; This code iterator is used for comparisons that can be implemented + ;; by swapping the operands. + (define_code_iterator swapped_fcond [ge gt unge ungt]) +@@ -663,13 +719,19 @@ + ;; + ;; ......................... + +-(define_delay (and (eq_attr "type" "branch") ++(define_delay (and (eq_attr "branch_with_likely" "yes") + (eq (symbol_ref "TARGET_MIPS16") (const_int 0))) + [(eq_attr "can_delay" "yes") + (nil) + (and (eq_attr "branch_likely" "yes") + (eq_attr "can_delay" "yes"))]) + ++(define_delay (and (eq_attr "branch_without_likely" "yes") ++ (eq (symbol_ref "TARGET_MIPS16") (const_int 0))) ++ [(eq_attr "can_delay" "yes") ++ (nil) ++ (nil)]) ++ + (define_delay (eq_attr "type" "jump") + [(eq_attr "can_delay" "yes") + (nil) +@@ -720,7 +782,9 @@ + (include "7000.md") + (include "9000.md") + (include "sb1.md") ++(include "octeon.md") + (include "sr71k.md") ++(include "xlr.md") + (include "generic.md") + + ;; +@@ -985,6 +1049,51 @@ + [(set_attr "type" "arith") + (set_attr "mode" "SI") + (set_attr "extended_mips16" "yes")]) ++ ++;; Combiner patterns for unsigned byte-add. ++ ++(define_insn "*baddu_si" ++ [(set (match_operand:SI 0 "register_operand" "=d") ++ (zero_extend:SI ++ (subreg:QI ++ (plus:SI (match_operand:SI 1 "register_operand" "d") ++ (match_operand:SI 2 "register_operand" "d")) 3)))] ++ "ISA_HAS_BADDU && TARGET_BIG_ENDIAN" ++ "baddu\\t%0,%1,%2" ++ [(set_attr "type" "arith")]) ++ ++(define_insn "*baddu_disi" ++ [(set (match_operand:SI 0 "register_operand" "=d") ++ (zero_extend:SI ++ (truncate:QI ++ (plus:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand:DI 2 "register_operand" "d")))))] ++ "TARGET_64BIT && ISA_HAS_BADDU" ++ "baddu\\t%0,%1,%2" ++ [(set_attr "type" "arith")]) ++ ++(define_insn "*baddu_didi" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (zero_extend:DI ++ (truncate:QI ++ (plus:DI ++ (subreg:DI ++ (truncate:QI (match_operand:DI 1 "register_operand" "d")) 0) ++ (subreg:DI ++ (truncate:QI (match_operand:DI 2 "register_operand" "d")) 0)))))] ++ "TARGET_64BIT && ISA_HAS_BADDU" ++ "baddu\\t%0,%1,%2" ++ [(set_attr "type" "arith")]) ++ ++(define_insn "*baddu_didi2" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (zero_extend:DI ++ (truncate:QI ++ (plus:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand:DI 2 "register_operand" "d")))))] ++ "TARGET_64BIT && ISA_HAS_BADDU" ++ "baddu\\t%0,%1,%2" ++ [(set_attr "type" "arith")]) + + ;; + ;; .................... +@@ -1041,7 +1150,7 @@ + [(set (match_operand:SCALARF 0 "register_operand" "=f") + (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") + (match_operand:SCALARF 2 "register_operand" "f")))] +- "!TARGET_4300_MUL_FIX" ++ "!TARGET_4300_MUL_FIX && !TARGET_FIX_ICE9A" + "mul.\t%0,%1,%2" + [(set_attr "type" "fmul") + (set_attr "mode" "")]) +@@ -1054,12 +1163,22 @@ + [(set (match_operand:SCALARF 0 "register_operand" "=f") + (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") + (match_operand:SCALARF 2 "register_operand" "f")))] +- "TARGET_4300_MUL_FIX" ++ "TARGET_4300_MUL_FIX && !TARGET_FIX_ICE9A" + "mul.\t%0,%1,%2\;nop" + [(set_attr "type" "fmul") + (set_attr "mode" "") + (set_attr "length" "8")]) + ++(define_insn "*mul3_fix_ice9a" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "TARGET_FIX_ICE9A && !TARGET_4300_MUL_FIX" ++ "mul.\t%0,%1,%2" ++ [(set_attr "type" "fmul") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) ++ + (define_insn "mulv2sf3" + [(set (match_operand:V2SF 0 "register_operand" "=f") + (mult:V2SF (match_operand:V2SF 1 "register_operand" "f") +@@ -1849,21 +1968,43 @@ + (plus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") + (match_operand:ANYF 2 "register_operand" "f")) + (match_operand:ANYF 3 "register_operand" "f")))] +- "ISA_HAS_FP4 && TARGET_FUSED_MADD" ++ "ISA_HAS_FP4 && TARGET_FUSED_MADD && !TARGET_FIX_ICE9A" + "madd.\t%0,%3,%1,%2" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) + ++(define_insn "*madd_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (plus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")) ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "ISA_HAS_FP4 && TARGET_FUSED_MADD && TARGET_FIX_ICE9A" ++ "madd.\t%0,%3,%1,%2" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) ++ + (define_insn "*msub" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (minus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") + (match_operand:ANYF 2 "register_operand" "f")) + (match_operand:ANYF 3 "register_operand" "f")))] +- "ISA_HAS_FP4 && TARGET_FUSED_MADD" ++ "ISA_HAS_FP4 && TARGET_FUSED_MADD && !TARGET_FIX_ICE9A" + "msub.\t%0,%3,%1,%2" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) + ++(define_insn "*msub_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (minus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")) ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "ISA_HAS_FP4 && TARGET_FUSED_MADD && TARGET_FIX_ICE9A" ++ "msub.\t%0,%3,%1,%2" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) ++ + (define_insn "*nmadd" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (neg:ANYF (plus:ANYF +@@ -1873,7 +2014,8 @@ + "ISA_HAS_NMADD_NMSUB (mode) + && TARGET_FUSED_MADD + && HONOR_SIGNED_ZEROS (mode) +- && !HONOR_NANS (mode)" ++ && !HONOR_NANS (mode) ++ && !TARGET_FIX_ICE9A" + "nmadd.\t%0,%3,%1,%2" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) +@@ -1887,11 +2029,44 @@ + "ISA_HAS_NMADD_NMSUB (mode) + && TARGET_FUSED_MADD + && !HONOR_SIGNED_ZEROS (mode) +- && !HONOR_NANS (mode)" ++ && !HONOR_NANS (mode) ++ && !TARGET_FIX_ICE9A" + "nmadd.\t%0,%3,%1,%2" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) + ++(define_insn "*nmadd_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF (plus:ANYF ++ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")) ++ (match_operand:ANYF 3 "register_operand" "f"))))] ++ "ISA_HAS_NMADD_NMSUB (mode) ++ && TARGET_FUSED_MADD ++ && HONOR_SIGNED_ZEROS (mode) ++ && !HONOR_NANS (mode) ++ && TARGET_FIX_ICE9A" ++ "nmadd.\t%0,%3,%1,%2" ++ [(set_attr "type" "fmadd") ++ (set_attr "length" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "*nmadd_fastmath_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (minus:ANYF ++ (mult:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (match_operand:ANYF 2 "register_operand" "f")) ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "ISA_HAS_NMADD_NMSUB (mode) ++ && TARGET_FUSED_MADD ++ && !HONOR_SIGNED_ZEROS (mode) ++ && !HONOR_NANS (mode) ++ && TARGET_FIX_ICE9A" ++ "nmadd.\t%0,%3,%1,%2" ++ [(set_attr "type" "fmadd") ++ (set_attr "length" "") ++ (set_attr "mode" "")]) ++ + (define_insn "*nmsub" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (neg:ANYF (minus:ANYF +@@ -1901,7 +2076,8 @@ + "ISA_HAS_NMADD_NMSUB (mode) + && TARGET_FUSED_MADD + && HONOR_SIGNED_ZEROS (mode) +- && !HONOR_NANS (mode)" ++ && !HONOR_NANS (mode) ++ && !TARGET_FIX_ICE9A" + "nmsub.\t%0,%1,%2,%3" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) +@@ -1915,10 +2091,43 @@ + "ISA_HAS_NMADD_NMSUB (mode) + && TARGET_FUSED_MADD + && !HONOR_SIGNED_ZEROS (mode) +- && !HONOR_NANS (mode)" ++ && !HONOR_NANS (mode) ++ && !TARGET_FIX_ICE9A" + "nmsub.\t%0,%1,%2,%3" + [(set_attr "type" "fmadd") + (set_attr "mode" "")]) ++ ++(define_insn "*nmsub_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF (minus:ANYF ++ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f")) ++ (match_operand:ANYF 1 "register_operand" "f"))))] ++ "ISA_HAS_NMADD_NMSUB (mode) ++ && TARGET_FUSED_MADD ++ && HONOR_SIGNED_ZEROS (mode) ++ && !HONOR_NANS (mode) ++ && TARGET_FIX_ICE9A" ++ "nmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "length" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "*nmsub_fastmath_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (minus:ANYF ++ (match_operand:ANYF 1 "register_operand" "f") ++ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f"))))] ++ "ISA_HAS_NMADD_NMSUB (mode) ++ && TARGET_FUSED_MADD ++ && !HONOR_SIGNED_ZEROS (mode) ++ && !HONOR_NANS (mode) ++ && TARGET_FIX_ICE9A" ++ "nmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "length" "") ++ (set_attr "mode" "")]) + + ;; + ;; .................... +@@ -1973,19 +2182,40 @@ + [(set (match_operand:ANYF 0 "register_operand" "=f") + (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") + (match_operand:ANYF 2 "register_operand" "f")))] +- " && flag_unsafe_math_optimizations" +-{ +- if (TARGET_FIX_SB1) +- return "recip.\t%0,%2\;mov.\t%0,%0"; +- else +- return "recip.\t%0,%2"; +-} ++ " && ++ flag_unsafe_math_optimizations && ++ !TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "recip.\t%0,%2" ++ [(set_attr "type" "frdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "*recip3_fix_sb1" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ " && ++ flag_unsafe_math_optimizations && ++ TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "recip.\t%0,%2\;mov.\t%0,%0" + [(set_attr "type" "frdiv") + (set_attr "mode" "") +- (set (attr "length") +- (if_then_else (ne (symbol_ref "TARGET_FIX_SB1") (const_int 0)) +- (const_int 8) +- (const_int 4)))]) ++ (set_attr "length" "8")]) ++ ++(define_insn "*recip3_fix_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ " && ++ flag_unsafe_math_optimizations && ++ !TARGET_FIX_SB1 && ++ TARGET_FIX_ICE9A" ++ "recip.\t%0,%2" ++ [(set_attr "type" "frdiv") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) ++ + + ;; VR4120 errata MD(A1): signed division instructions do not work correctly + ;; with negative operands. We use special libgcc functions instead. +@@ -2021,60 +2251,117 @@ + ;; .................... + + ;; These patterns work around the early SB-1 rev2 core "F1" erratum (see +-;; "*div[sd]f3" comment for details). ++;; "*div[sd]f3" comment for details), and ICE9A errata. + +-(define_insn "sqrt2" ++(define_expand "sqrt2" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] + "" +-{ +- if (TARGET_FIX_SB1) +- return "sqrt.\t%0,%1\;mov.\t%0,%0"; +- else +- return "sqrt.\t%0,%1"; +-} ++ "") ++ ++(define_insn "*sqrt2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ " && ++ !TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "sqrt.\t%0,%1" ++ [(set_attr "type" "fsqrt") ++ (set_attr "mode" "")]) ++ ++(define_insn "*sqrt2_fix_sb1" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ " && ++ TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "sqrt.\t%0,%1\;mov.\t%0,%0" + [(set_attr "type" "fsqrt") + (set_attr "mode" "") +- (set (attr "length") +- (if_then_else (ne (symbol_ref "TARGET_FIX_SB1") (const_int 0)) +- (const_int 8) +- (const_int 4)))]) ++ (set_attr "length" "8")]) ++ ++(define_insn "*sqrt2_fix_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ " && ++ !TARGET_FIX_SB1 && ++ TARGET_FIX_ICE9A" ++ "sqrt.\t%0,%1" ++ [(set_attr "type" "fsqrt") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) + + (define_insn "*rsqrta" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") + (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] +- " && flag_unsafe_math_optimizations" +-{ +- if (TARGET_FIX_SB1) +- return "rsqrt.\t%0,%2\;mov.\t%0,%0"; +- else +- return "rsqrt.\t%0,%2"; +-} ++ " && ++ flag_unsafe_math_optimizations && ++ !TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "rsqrt.\t%0,%2" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "")]) ++ ++(define_insn "*rsqrta_fix_sb1" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] ++ " && ++ flag_unsafe_math_optimizations && ++ TARGET_FIX_SB1" ++ "rsqrt.\t%0,%2\;mov.\t%0,%0" + [(set_attr "type" "frsqrt") + (set_attr "mode" "") +- (set (attr "length") +- (if_then_else (ne (symbol_ref "TARGET_FIX_SB1") (const_int 0)) +- (const_int 8) +- (const_int 4)))]) ++ (set_attr "length" "8")]) ++ ++(define_insn "*rsqrta_fix_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] ++ " && ++ flag_unsafe_math_optimizations && ++ TARGET_FIX_ICE9A" ++ "rsqrt.\t%0,%2" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) + + (define_insn "*rsqrtb" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") + (match_operand:ANYF 2 "register_operand" "f"))))] +- " && flag_unsafe_math_optimizations" +-{ +- if (TARGET_FIX_SB1) +- return "rsqrt.\t%0,%2\;mov.\t%0,%0"; +- else +- return "rsqrt.\t%0,%2"; +-} ++ " && ++ flag_unsafe_math_optimizations && ++ !TARGET_FIX_SB1 && ++ !TARGET_FIX_ICE9A" ++ "rsqrt.\t%0,%2" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "")]) ++ ++(define_insn "*rsqrtb_fix_sb1" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f"))))] ++ " && ++ flag_unsafe_math_optimizations && ++ TARGET_FIX_SB1" ++ "rsqrt.\t%0,%2\;mov.\t%0,%0" + [(set_attr "type" "frsqrt") + (set_attr "mode" "") +- (set (attr "length") +- (if_then_else (ne (symbol_ref "TARGET_FIX_SB1") (const_int 0)) +- (const_int 8) +- (const_int 4)))]) ++ (set_attr "length" "8")]) ++ ++(define_insn "*rsqrtb_fix_ice9a" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f"))))] ++ " && ++ flag_unsafe_math_optimizations && ++ TARGET_FIX_ICE9A" ++ "rsqrt.\t%0,%2\;mov.\t%0,%0" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "length" "")]) + + ;; + ;; .................... +@@ -2093,7 +2380,9 @@ + (define_insn "abs2" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] +- "!HONOR_NANS (mode)" ++ "!HONOR_NANS (mode) ++ || (TARGET_MIPS_SDE ++ && TARGET_HARD_FLOAT)" + "abs.\t%0,%1" + [(set_attr "type" "fabs") + (set_attr "mode" "")]) +@@ -2115,6 +2404,22 @@ + (set_attr "mode" "")]) + + ;; ++;; ................... ++;; ++;; Count number of 1-bits. ++;; ++;; ................... ++;; ++ ++(define_insn "popcount2" ++ [(set (match_operand:GPR 0 "register_operand" "=d") ++ (popcount:GPR (match_operand:GPR 1 "register_operand" "d")))] ++ "ISA_HAS_POPCOUNT" ++ "pop\t%0,%1" ++ [(set_attr "type" "pop") ++ (set_attr "mode" "")]) ++ ++;; + ;; .................... + ;; + ;; NEGATION and ONE'S COMPLEMENT +@@ -2347,6 +2652,16 @@ + (set_attr "mode" "SI") + (set_attr "extended_mips16" "yes,*")]) + ++(define_insn "*_trunc_exts" ++ [(set (match_operand:SUBDI 0 "register_operand" "=d") ++ (truncate:SUBDI ++ (any_shiftrt:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand:DI 2 "const_int_operand" ""))))] ++ "TARGET_64BIT && ISA_HAS_EXTS && INTVAL (operands[2]) < 32" ++ "exts\t%0,%1,%2,31" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "SI")]) ++ + ;; Combiner patterns to optimize shift/truncate combinations. + + (define_insn "" +@@ -2447,10 +2762,15 @@ + + ;; Extension insns. + +-(define_insn_and_split "zero_extendsidi2" ++(define_expand "zero_extendsidi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] ++ "TARGET_64BIT") ++ ++(define_insn_and_split "*zero_extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=d,d") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,W")))] +- "TARGET_64BIT" ++ "TARGET_64BIT && !ISA_HAS_EXT_INS" + "@ + # + lwu\t%0,%1" +@@ -2471,7 +2791,7 @@ + [(set (match_operand:DI 0 "register_operand" "=d,d") + (and:DI (match_operand:DI 1 "nonimmediate_operand" "d,W") + (const_int 4294967295)))] +- "TARGET_64BIT" ++ "TARGET_64BIT && !ISA_HAS_EXT_INS" + { + if (which_alternative == 0) + return "#"; +@@ -2489,6 +2809,31 @@ + (set_attr "mode" "DI") + (set_attr "length" "8,*")]) + ++(define_insn "*zero_extendsidi2_dext" ++ [(set (match_operand:DI 0 "register_operand" "=d,d") ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,W")))] ++ "TARGET_64BIT && ISA_HAS_EXT_INS" ++ "@ ++ dext\t%0,%1,0,32 ++ lwu\t%0,%1" ++ [(set_attr "type" "shift,load") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "*clear_upper32_dext" ++ [(set (match_operand:DI 0 "register_operand" "=d,d") ++ (and:DI (match_operand:DI 1 "nonimmediate_operand" "d,o") ++ (const_int 4294967295)))] ++ "TARGET_64BIT && ISA_HAS_EXT_INS" ++{ ++ if (which_alternative == 0) ++ return "dext\t%0,%1,0,32"; ++ ++ operands[1] = gen_lowpart (SImode, operands[1]); ++ return "lwu\t%0,%1"; ++} ++ [(set_attr "type" "shift,load") ++ (set_attr "mode" "DI")]) ++ + (define_expand "zero_extend2" + [(set (match_operand:GPR 0 "register_operand") + (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] +@@ -3026,11 +3371,18 @@ + ;; + ;; .................... + +-;; Bit field extract patterns which use lwl/lwr or ldl/ldr. ++;; Bit field extract patterns which use lwl/lwr or ldl/ldr or ++;; exts/ext/dext. ++ ++;; ??? Using nonimmediate_operand for operand 1 will cause mode_for_extraction ++;; to return word_mode rather than QImode for memories. That's probably ++;; harmless given the current middle-end code; the RTL expander will only ++;; pass QImode references in any case, and any attempt to recog() a memory ++;; extraction will fail whatever mode the memory has. + + (define_expand "extv" + [(set (match_operand 0 "register_operand") +- (sign_extract (match_operand:QI 1 "memory_operand") ++ (sign_extract (match_operand 1 "nonimmediate_operand") + (match_operand 2 "immediate_operand") + (match_operand 3 "immediate_operand")))] + "!TARGET_MIPS16" +@@ -3039,10 +3391,52 @@ + INTVAL (operands[2]), + INTVAL (operands[3]))) + DONE; ++ else if (ISA_HAS_EXTS ++ && register_operand (operands[1], VOIDmode) ++ && INTVAL (operands[2]) <= 32) ++ { ++ mips_adjust_register_ext_operands (operands); ++ if (GET_MODE (operands[0]) == SImode) ++ { ++ emit_insn (gen_extvsi (operands[0], operands[1], operands[2], ++ operands[3])); ++ DONE; ++ } ++ else if (TARGET_64BIT && GET_MODE (operands[0]) == DImode) ++ { ++ emit_insn (gen_extvdi (operands[0], operands[1], operands[2], ++ operands[3])); ++ DONE; ++ } ++ } + else + FAIL; + }) + ++(define_insn "extv" ++ [(set (match_operand:GPR 0 "register_operand" "=d") ++ (sign_extract:GPR (match_operand:GPR 1 "register_operand" "d") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" "")))] ++ "ISA_HAS_EXTS && INTVAL (operands[2]) <= 32" ++ "exts\t%0,%1,%3,%E2" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ ++;; If we are extracting something no bigger than 32 bits, the destination ++;; register will be a properly sign-extended SImode value. Truncation ++;; is therefore a no-op in this case. ++(define_insn "*extv_truncdi" ++ [(set (match_operand:SUBDI 0 "register_operand" "=d") ++ (truncate:SUBDI ++ (sign_extract:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" ""))))] ++ "TARGET_64BIT && ISA_HAS_EXTS && INTVAL (operands[2]) <= 32" ++ "exts\t%0,%1,%3,%E2" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ + (define_expand "extzv" + [(set (match_operand 0 "register_operand") + (zero_extract (match_operand 1 "nonimmediate_operand") +@@ -3055,8 +3449,17 @@ + INTVAL (operands[3]))) + DONE; + else if (mips_use_ins_ext_p (operands[1], INTVAL (operands[2]), +- INTVAL (operands[3]))) ++ INTVAL (operands[3])) ++ /* extract_bit_field can invoke us with (subreg:DI (reg:SI)) ++ as the output and size more than 31 bits. We would ++ create incorrect SI values. Instead, just FAIL. */ ++ && (GET_MODE (operands[0]) != DImode ++ || !(GET_CODE (operands[0]) == SUBREG ++ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) ++ < (GET_MODE_SIZE (GET_MODE (operands[0])))) ++ && INTVAL (operands[2]) >= 32))) + { ++ mips_adjust_register_ext_operands (operands); + if (GET_MODE (operands[0]) == DImode) + emit_insn (gen_extzvdi (operands[0], operands[1], operands[2], + operands[3])); +@@ -3080,6 +3483,32 @@ + [(set_attr "type" "arith") + (set_attr "mode" "")]) + ++;; If we're extracting fewer than 32 bits, the upper 33 bits of the ++;; destination will be zero, and thus truncation will be a no-op. ++(define_insn "*extzv_truncdi" ++ [(set (match_operand:SUBDI 0 "register_operand" "=d") ++ (truncate:SUBDI ++ (zero_extract:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" ""))))] ++ "TARGET_64BIT && ISA_HAS_EXT_INS && INTVAL (operands[2]) < 32" ++ "dext\t%0,%1,%3,%2" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ ++;; If we're truncating an extraction that is at least big as the truncation ++;; mode, we can simply extract the useful bits and sign-extend the rest. ++;; The result will be a properly sign-extended value. ++(define_insn "*extz_truncdi_exts" ++ [(set (match_operand:SUBDI 0 "register_operand" "=d") ++ (truncate:SUBDI ++ (zero_extract:DI (match_operand:DI 1 "register_operand" "d") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" ""))))] ++ "TARGET_64BIT && ISA_HAS_EXTS && INTVAL (operands[2]) > " ++ "exts\t%0,%1,%3," ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) + + (define_expand "insv" + [(set (zero_extract (match_operand 0 "nonimmediate_operand") +@@ -3102,9 +3531,9 @@ + emit_insn (gen_insvsi (operands[0], operands[1], operands[2], + operands[3])); + DONE; +- } +- else +- FAIL; ++ } ++ else ++ FAIL; + }) + + (define_insn "insv" +@@ -3118,6 +3547,62 @@ + [(set_attr "type" "arith") + (set_attr "mode" "")]) + ++(define_insn "*insvdi" ++ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+d") ++ (match_operand 1 "const_int_operand" "") ++ (match_operand 2 "const_int_operand" "")) ++ (subreg:DI ++ (truncate:SUBDI (match_operand:DI 3 "register_operand" "d")) 0))] ++ "TARGET_64BIT && mips_use_ins_ext_p (operands[0], INTVAL (operands[1]), ++ INTVAL (operands[2]))" ++ "dins\t%0,%3,%2,%1" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "DI")]) ++ ++;; Combine does not notice that zero- and sign-extensions have no ++;; effect here. ++;; ??? Should ideally be done in combine instead. ++ ++(define_insn "*insv__di" ++ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+d") ++ (match_operand 1 "const_int_operand" "") ++ (match_operand 2 "const_int_operand" "")) ++ (any_extend:DI (match_operand:SUBDI 3 "register_operand" "d")))] ++ "TARGET_64BIT && ISA_HAS_EXT_INS && INTVAL (operands[1]) <= + 1" ++ "dins\t%0,%3,%2,%1" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "*insvdi_clear_upper32" ++ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+d") ++ (match_operand 1 "const_int_operand" "") ++ (match_operand 2 "const_int_operand" "")) ++ (subreg:DI ++ (truncate:SUBDI ++ (and:DI (match_operand:DI 3 "register_operand" "d") ++ (const_int 4294967295))) 0))] ++ "TARGET_64BIT && ISA_HAS_EXT_INS && INTVAL (operands[1]) <= 32" ++ "dins\t%0,%3,%2,%1" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "DI")]) ++ ++;; Combiner pattern for cins. ++ ++(define_insn "*cins" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (match_operator:DI 1 "mask_low_and_shift_operator" ++ [(ashift:DI ++ (match_operand:DI 2 "register_operand" "d") ++ (match_operand:DI 3 "const_int_operand" "")) ++ (match_operand:DI 4 "const_int_operand" "")]))] ++ "TARGET_64BIT && ISA_HAS_CINS" ++{ ++ operands[4] ++ = GEN_INT (mask_low_and_shift_len (DImode, INTVAL (operands[3]), ++ INTVAL (operands[4]))); ++ return "cins\t%0,%2,%3,%E4"; ++}) ++ + ;; Unaligned word moves generated by the bit field patterns. + ;; + ;; As far as the rtl is concerned, both the left-part and right-part +@@ -3135,7 +3620,9 @@ + (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m") + (match_operand:QI 2 "memory_operand" "m")] + UNSPEC_LOAD_LEFT))] +- "!TARGET_MIPS16 && mips_mem_fits_mode_p (mode, operands[1])" ++ "!TARGET_MIPS16 ++ && !ISA_HAS_UL_US ++ && mips_mem_fits_mode_p (mode, operands[1])" + "l\t%0,%2" + [(set_attr "type" "load") + (set_attr "mode" "")]) +@@ -3146,7 +3633,9 @@ + (match_operand:QI 2 "memory_operand" "m") + (match_operand:GPR 3 "register_operand" "0")] + UNSPEC_LOAD_RIGHT))] +- "!TARGET_MIPS16 && mips_mem_fits_mode_p (mode, operands[1])" ++ "!TARGET_MIPS16 ++ && !ISA_HAS_UL_US ++ && mips_mem_fits_mode_p (mode, operands[1])" + "r\t%0,%2" + [(set_attr "type" "load") + (set_attr "mode" "")]) +@@ -3156,7 +3645,9 @@ + (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ") + (match_operand:QI 2 "memory_operand" "m")] + UNSPEC_STORE_LEFT))] +- "!TARGET_MIPS16 && mips_mem_fits_mode_p (mode, operands[0])" ++ "!TARGET_MIPS16 ++ && !ISA_HAS_UL_US ++ && mips_mem_fits_mode_p (mode, operands[0])" + "l\t%z1,%2" + [(set_attr "type" "store") + (set_attr "mode" "")]) +@@ -3172,6 +3663,28 @@ + [(set_attr "type" "store") + (set_attr "mode" "")]) + ++;; Unaligned load and store patterns. ++ ++(define_insn "mov_u" ++ [(set (match_operand:GPR 0 "register_operand" "=d") ++ (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m") ++ (match_operand:QI 2 "memory_operand" "m")] ++ UNSPEC_UNALIGNED_LOAD))] ++ "ISA_HAS_UL_US && mips_mem_fits_mode_p (mode, operands[1])" ++ "u\t%0,%2" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "mov_u" ++ [(set (match_operand:BLK 0 "memory_operand" "=m") ++ (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ") ++ (match_operand:QI 2 "memory_operand" "m")] ++ UNSPEC_UNALIGNED_STORE))] ++ "ISA_HAS_UL_US && mips_mem_fits_mode_p (mode, operands[0])" ++ "u\t%z1,%2" ++ [(set_attr "type" "store") ++ (set_attr "mode" "")]) ++ + ;; An instruction to calculate the high part of a 64-bit SYMBOL_ABSOLUTE. + ;; The required value is: + ;; +@@ -3497,6 +4010,26 @@ + (const_string "*") + (const_string "*")])]) + ++;; Truncate to QI in two steps. Combine should probably canonicalize ++;; this to just one truncate:QI. ++ ++(define_insn "*truncsi_storeqi" ++ [(set (match_operand:QI 0 "memory_operand" "=m") ++ (subreg:QI ++ (truncate:SI (match_operand:DI 1 "register_operand" "d")) 3))] ++ "TARGET_64BIT && !TARGET_MIPS16 && TARGET_BIG_ENDIAN" ++ "sb\t%z1,%0" ++ [(set_attr "type" "store") ++ (set_attr "mode" "QI")]) ++ ++(define_insn "*truncsi_storehi" ++ [(set (match_operand:HI 0 "memory_operand" "=m") ++ (subreg:HI ++ (truncate:SI (match_operand:DI 1 "register_operand" "d")) 2))] ++ "TARGET_64BIT && !TARGET_MIPS16 && TARGET_BIG_ENDIAN" ++ "sh\t%z1,%0" ++ [(set_attr "type" "store") ++ (set_attr "mode" "HI")]) + + ;; On the mips16, we can split ld $r,N($r) into an add and a load, + ;; when the original load is a 4 byte instruction but the add and the +@@ -4270,6 +4803,22 @@ + [(set (match_operand:P 0 "register_operand" "=d") + (const:P (unspec:P [(const_int 0)] UNSPEC_GP)))]) + ++;; Move the constant value of __gnu_local_gp (operand 1) into ++;; operand 0, for non-PIC abicalls code. All uses of the result ++;; are explicit, so there's no need for unspec_volatile here. ++(define_insn_and_split "loadgp_nonpic" ++ [(set (match_operand 0 "register_operand" "=d") ++ (const (unspec [(match_operand 1 "" "")] UNSPEC_LOADGP)))] ++ "TARGET_ABICALLS && !flag_pic" ++ "#" ++ "" ++ [(const_int 0)] ++{ ++ mips_emit_move (operands[0], operands[1]); ++ DONE; ++} ++ [(set_attr "length" "8")]) ++ + ;; Insn to initialize $gp for n32/n64 abicalls. Operand 0 is the offset + ;; of _gp from the start of this function. Operand 1 is the incoming + ;; function address. +@@ -4820,7 +5369,7 @@ + (define_insn_and_split "" + [(set (match_operand:SI 0 "register_operand" "=d") + (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m") +- (match_operand:SI 2 "immediate_operand" "I")))] ++ (match_operand:SI 2 "const_int_operand" "")))] + "TARGET_MIPS16" + "#" + "" +@@ -4997,6 +5546,96 @@ + [(set_attr "type" "branch") + (set_attr "mode" "none")]) + ++;; Conditional branch on whether a bit is set or clear. ++ ++(define_insn "*branch_bit" ++ [(set (pc) ++ (if_then_else ++ (match_operator 0 "equality_operator" ++ [(zero_extract:GPR ++ (match_operand:GPR 2 "register_operand" "d") ++ (const_int 1) ++ (match_operand 3 "const_int_operand" "")) ++ (const_int 0)]) ++ (label_ref (match_operand 1 "" "")) ++ (pc)))] ++ "ISA_HAS_BBIT" ++{ ++ return mips_output_conditional_branch (insn, operands, ++ MIPS_BRANCH ("b%G0", "%2,%3,%1"), ++ MIPS_BRANCH ("b%H0", "%2,%3,%1")); ++} ++ [(set_attr "type" "branch") ++ (set_attr "branch_without_likely" "yes") ++ (set_attr "mode" "none")]) ++ ++(define_insn "*branch_bit_truncdi" ++ [(set (pc) ++ (if_then_else ++ (match_operator 0 "equality_operator" ++ [(zero_extract:DI ++ (subreg:DI ++ (truncate:SUBDI ++ (match_operand:DI 2 "register_operand" "d")) 0) ++ (const_int 1) ++ (match_operand 3 "const_int_operand" "")) ++ (const_int 0)]) ++ (label_ref (match_operand 1 "" "")) ++ (pc)))] ++ "TARGET_64BIT && ISA_HAS_BBIT" ++{ ++ return mips_output_conditional_branch (insn, operands, ++ MIPS_BRANCH ("b%G0", "%2,%3,%1"), ++ MIPS_BRANCH ("b%H0", "%2,%3,%1")); ++} ++ [(set_attr "type" "branch") ++ (set_attr "branch_without_likely" "yes") ++ (set_attr "mode" "none")]) ++ ++(define_insn "*branch_bit_inverted" ++ [(set (pc) ++ (if_then_else ++ (match_operator 0 "equality_operator" ++ [(zero_extract:GPR ++ (match_operand:GPR 2 "register_operand" "d") ++ (const_int 1) ++ (match_operand 3 "const_int_operand" "")) ++ (const_int 0)]) ++ (pc) ++ (label_ref (match_operand 1 "" ""))))] ++ "ISA_HAS_BBIT" ++{ ++ return mips_output_conditional_branch (insn, operands, ++ MIPS_BRANCH ("b%H0", "%2,%3,%1"), ++ MIPS_BRANCH ("b%G0", "%2,%3,%1")); ++} ++ [(set_attr "type" "branch") ++ (set_attr "branch_without_likely" "yes") ++ (set_attr "mode" "none")]) ++ ++(define_insn "*branch_bit_truncdi_inverted" ++ [(set (pc) ++ (if_then_else ++ (match_operator 0 "equality_operator" ++ [(zero_extract:DI ++ (subreg:DI ++ (truncate:SUBDI ++ (match_operand:DI 2 "register_operand" "d")) 0) ++ (const_int 1) ++ (match_operand 3 "const_int_operand" "")) ++ (const_int 0)]) ++ (pc) ++ (label_ref (match_operand 1 "" ""))))] ++ "TARGET_64BIT && ISA_HAS_BBIT" ++{ ++ return mips_output_conditional_branch (insn, operands, ++ MIPS_BRANCH ("b%H0", "%2,%3,%1"), ++ MIPS_BRANCH ("b%G0", "%2,%3,%1")); ++} ++ [(set_attr "type" "branch") ++ (set_attr "branch_without_likely" "yes") ++ (set_attr "mode" "none")]) ++ + ;; MIPS16 branches + + (define_insn "*branch_equality_mips16" +@@ -5065,11 +5704,42 @@ + [(set (match_operand:GPR 0 "register_operand" "=d") + (eq:GPR (match_operand:GPR 1 "register_operand" "d") + (const_int 0)))] +- "!TARGET_MIPS16" ++ "!TARGET_MIPS16 && !ISA_HAS_SEQ_SNE" + "sltu\t%0,%1,1" + [(set_attr "type" "slt") + (set_attr "mode" "")]) + ++(define_insn "*seq_si_to_di" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (eq:DI (match_operand:SI 1 "register_operand" "d") ++ (const_int 0)))] ++ "TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_SEQ_SNE" ++ "sltu\t%0,%1,1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "*s__s" ++ [(set (match_operand:GPR 0 "register_operand" "=d,d") ++ (equality_op:GPR (match_operand:GPR 1 "register_operand" "%d,d") ++ (match_operand:GPR 2 "reg_imm10_operand" "d,YB")))] ++ "ISA_HAS_SEQ_SNE" ++ "@ ++ s\\t%0,%1,%2 ++ si\\t%0,%1,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "*s_si_to_di_s" ++ [(set (match_operand:DI 0 "register_operand" "=d,d") ++ (equality_op:DI (match_operand:SI 1 "register_operand" "%d,d") ++ (match_operand:SI 2 "reg_imm10_operand" "d,YB")))] ++ "TARGET_64BIT && ISA_HAS_SEQ_SNE" ++ "@ ++ s\\t%0,%1,%2 ++ si\\t%0,%1,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "SI")]) ++ + (define_insn "*seq__mips16" + [(set (match_operand:GPR 0 "register_operand" "=t") + (eq:GPR (match_operand:GPR 1 "register_operand" "d") +@@ -5079,6 +5749,15 @@ + [(set_attr "type" "slt") + (set_attr "mode" "")]) + ++(define_insn "*seq_si_to_di_mips16" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (eq:DI (match_operand:SI 1 "register_operand" "d") ++ (const_int 0)))] ++ "TARGET_64BIT && TARGET_MIPS16" ++ "sltu\t%1,1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "DI")]) ++ + ;; "sne" uses sltu instructions in which the first operand is $0. + ;; This isn't possible in mips16 code. + +@@ -5093,11 +5772,20 @@ + [(set (match_operand:GPR 0 "register_operand" "=d") + (ne:GPR (match_operand:GPR 1 "register_operand" "d") + (const_int 0)))] +- "!TARGET_MIPS16" ++ "!TARGET_MIPS16 && !ISA_HAS_SEQ_SNE" + "sltu\t%0,%.,%1" + [(set_attr "type" "slt") + (set_attr "mode" "")]) + ++(define_insn "*sne_si_to_di" ++ [(set (match_operand:DI 0 "register_operand" "=d") ++ (ne:DI (match_operand:SI 1 "register_operand" "d") ++ (const_int 0)))] ++ "TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_SEQ_SNE" ++ "sltu\t%0,%.,%1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "DI")]) ++ + (define_expand "sgt" + [(set (match_operand:SI 0 "register_operand") + (gt:SI (match_dup 1) +@@ -5353,6 +6041,26 @@ + return "%*b\t%l0%/"; + else + { ++ if (final_sequence && (mips_abi == ABI_32 || mips_abi == ABI_O64)) ++ { ++ /* If the delay slot contains a $gp restore, we need to ++ do that first, because we need it for the load ++ label. Other ABIs do not have caller-save $gp. */ ++ rtx next = NEXT_INSN (insn); ++ if (INSN_P (next) && !INSN_DELETED_P (next)) ++ { ++ rtx pat = PATTERN (next); ++ if (GET_CODE (pat) == SET ++ && REG_P (SET_DEST (pat)) ++ && REGNO (SET_DEST (pat)) == PIC_OFFSET_TABLE_REGNUM) ++ { ++ rtx ops[2]; ++ ops[0] = SET_DEST (pat); ++ ops[1] = SET_SRC (pat); ++ output_asm_insn (mips_output_move (ops[0], ops[1]), ops); ++ } ++ } ++ } + output_asm_insn (mips_output_load_label (), operands); + return "%*jr\t%@%/%]"; + } +@@ -5371,7 +6079,13 @@ + (lt (abs (minus (match_dup 0) + (plus (pc) (const_int 4)))) + (const_int 131072))) +- (const_int 4) (const_int 16)))]) ++ (const_int 4) ++ (if_then_else ++ ;; for these two ABIs we may need to move a restore of $gp ++ (ior (eq (symbol_ref "mips_abi") (symbol_ref "ABI_32")) ++ (eq (symbol_ref "mips_abi") (symbol_ref "ABI_O64"))) ++ (const_int 20) ++ (const_int 16))))]) + + ;; We need a different insn for the mips16, because a mips16 branch + ;; does not have a delay slot. +@@ -5462,11 +6176,12 @@ + + ;; Restore the gp that we saved above. Despite the earlier comment, it seems + ;; that older code did recalculate the gp from $25. Continue to jump through +-;; $25 for compatibility (we lose nothing by doing so). ++;; $25 for compatibility (we lose nothing by doing so). Similarly restore ++;; $gp if we might be jumping to code which expects that. + + (define_expand "builtin_longjmp" + [(use (match_operand 0 "register_operand"))] +- "TARGET_USE_GOT" ++ "TARGET_USE_GOT || TARGET_ABICALLS" + { + /* The elements of the buffer are, in order: */ + int W = GET_MODE_SIZE (Pmode); +--- a/gcc/config/mips/mips.opt ++++ b/gcc/config/mips/mips.opt +@@ -124,6 +124,10 @@ mfix-vr4130 + Target Report Var(TARGET_FIX_VR4130) + Work around VR4130 mflo/mfhi errata + ++mfix-ice9a ++Target Report Var(TARGET_FIX_ICE9A) ++Work around SiCortex ICE9A errata ++ + mfix4300 + Target Report Var(TARGET_4300_MUL_FIX) + Work around an early 4300 hardware bug +@@ -176,6 +180,10 @@ mips16 + Target Report RejectNegative Mask(MIPS16) + Generate MIPS16 code + ++mips16e ++Target Report RejectNegative Mask(MIPS16) MaskExists ++Deprecated; alias for -mips16 ++ + mips3d + Target Report RejectNegative Mask(MIPS3D) + Use MIPS-3D instructions +@@ -228,6 +236,10 @@ mno-mips3d + Target Report RejectNegative InverseMask(MIPS3D) + Do not use MIPS-3D instructions + ++mocteon-useun ++Target Report Mask(OCTEON_UNALIGNED) ++Use Octeon-specific unaligned loads/stores for 32/64-bit data ++ + mpaired-single + Target Report Mask(PAIRED_SINGLE_FLOAT) + Use paired-single floating-point instructions +@@ -260,6 +272,10 @@ mtune= + Target RejectNegative Joined Var(mips_tune_string) + -mtune=PROCESSOR Optimize the output for PROCESSOR + ++muclibc ++Target RejectNegative Var(building_for_uclibc) ++Building with -muclibc ++ + muninit-const-in-rodata + Target Report Var(TARGET_UNINIT_CONST_IN_RODATA) + Put uninitialized constants in ROM (needs -membedded-data) +@@ -268,6 +284,10 @@ mvr4130-align + Target Report Mask(VR4130_ALIGN) + Perform VR4130-specific alignment optimizations + ++mwarn-framesize= ++Target RejectNegative Joined ++Warn if a single function's framesize exceeds the given framesize ++ + mxgot + Target Report Var(TARGET_XGOT) + Lift restrictions on GOT size +--- /dev/null ++++ b/gcc/config/mips/montavista-linux.h +@@ -0,0 +1,54 @@ ++/* MontaVista GNU/Linux Configuration. ++ Copyright (C) 2009 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Override linux64.h to default to O32. */ ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS \ ++NO_SHARED_SPECS, \ ++"%{!EB:%{!EL:%(endian_spec)}}", \ ++"%{!mabi=*: -mabi=32}" ++ ++/* We do not need to provide an explicit big-endian multilib. */ ++#undef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ { "meb", "mabi=32" } ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{mel:%{msoft-float:/mel/soft-float ; \ ++ :/mel} ; \ ++ msoft-float:/soft-float}" ++ ++/* MULTILIB_OSDIRNAMES provides directory names used in two ways: ++ relative to $target/lib/ in the GCC installation, and relative to ++ lib/ and usr/lib/ in a sysroot. For the latter, we want names such ++ as plain ../lib64, but these cannot be used outside the sysroot ++ because different multilibs would be mapped to the same directory. ++ Directories are searched both with and without the multilib suffix, ++ so it suffices if the directory without the suffix is correct ++ within the sysroot while the directory with the suffix doesn't ++ exist. We use STARTFILE_PREFIX_SPEC to achieve the desired ++ effect. */ ++#undef STARTFILE_PREFIX_SPEC ++#define STARTFILE_PREFIX_SPEC \ ++ "%{mabi=32: /usr/local/lib/ /lib/ /usr/lib/} \ ++ %{mabi=n32: /usr/local/lib32/ /lib32/ /usr/lib32/} \ ++ %{mabi=64: /usr/local/lib64/ /lib64/ /usr/lib64/}" +--- /dev/null ++++ b/gcc/config/mips/octeon-elf-unwind.h +@@ -0,0 +1,57 @@ ++/* Stack unwinding support through the first exception frame. ++ Copyright (C) 2007 Cavium Networks. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 2, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING. If not, write to ++the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++Boston, MA 02110-1301, USA. */ ++ ++#define MD_FALLBACK_FRAME_STATE_FOR octeon_elf_fallback_frame_state ++ ++/* Check whether this is the cvmx_interrupt_stage2 frame. If the ++ function call was dispatched via k0 assume we are in ++ cvmx_interrupt_stage2. In this case the sp in point to the saved ++ register array. */ ++ ++static _Unwind_Reason_Code ++octeon_elf_fallback_frame_state (struct _Unwind_Context *context, ++ _Unwind_FrameState *fs) ++{ ++ unsigned i; ++ unsigned *pc = context->ra; ++ ++ /* Look for "jalr k0". */ ++ if (pc[-2] != 0x0340f809) ++ return _URC_END_OF_STACK; ++ ++ for (i = 0; i < 32; i++) ++ { ++ fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.reg[i].loc.offset = 8 * i; ++ } ++ ++ /* Keep the next frame's sp. This way we have a CFA that points ++ exactly to the register array. */ ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ fs->regs.cfa_reg = STACK_POINTER_REGNUM; ++ fs->regs.cfa_offset = 0; ++ ++ /* DEPC is saved as the 35. register. */ ++ fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].how = REG_SAVED_OFFSET; ++ fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].loc.offset = 8 * 35; ++ fs->retaddr_column = DWARF_ALT_FRAME_RETURN_COLUMN; ++ ++ return _URC_NO_REASON; ++} +--- /dev/null ++++ b/gcc/config/mips/octeon-elf.h +@@ -0,0 +1,98 @@ ++/* Macros for mips*-octeon-elf target. ++ Copyright (C) 2004, 2005, 2006 Cavium Networks. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 2, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING. If not, write to ++the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++Boston, MA 02110-1301, USA. */ ++ ++/* Add MASK_SOFT_FLOAT and MASK_OCTEON_UNALIGNED. */ ++ ++#undef TARGET_DEFAULT ++#define TARGET_DEFAULT (MASK_SOFT_FLOAT_ABI | MASK_OCTEON_UNALIGNED) ++ ++/* Forward -m*octeon-useun. */ ++ ++#undef SUBTARGET_ASM_SPEC ++#define SUBTARGET_ASM_SPEC "%{mno-octeon-useun} %{!mno-octeon-useun:-mocteon-useun}" ++ ++/* Enable backtrace including on machine exceptions by default. */ ++ ++#undef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC "%{!fno-asynchronous-unwind-tables:-fasynchronous-unwind-tables}" ++ ++/* Without ASM_PREFERRED_EH_DATA_FORMAT, output_call_frame_info emits ++ pointer-sized addresses for FDE addresses. For 64-bit targets, it does ++ it without properly "switching over" to 64-bit as described in the DWARF3 ++ spec. GDB can fall back on .eh_frames and misinterpret FDE addresses. ++ Instead let's be explicit and use augmentation to describe the encoding if ++ pointer size is 64. */ ++ ++#undef ASM_PREFERRED_EH_DATA_FORMAT ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ ++ ((CODE) == 1 && POINTER_SIZE == 64 \ ++ ? (ABI_HAS_64BIT_SYMBOLS ? DW_EH_PE_udata8 : DW_EH_PE_udata4) \ ++ : DW_EH_PE_absptr) ++ ++/* Link to libc library. */ ++ ++#undef LIB_SPEC ++#define LIB_SPEC "-lc" ++ ++/* Link to startup file. */ ++ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC "crti%O%s crtbegin%O%s crt0%O%s" ++ ++/* Default our test-only n64 configuration to -G0 since that is what ++ the kernel uses. */ ++ ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS \ ++"%{mabi=64:%{!G*: -G0}}" ++ ++/* Pass linker emulation mode for N32. */ ++ ++#undef LINK_SPEC ++#define LINK_SPEC "\ ++%(endian_spec) \ ++%{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} %{mips64} \ ++%{mips64r2} %{bestGnum} %{shared} %{non_shared} \ ++%{mabi=n32:-melf32e%{!EL:b}%{EL:l}octeonn32} \ ++%{mabi=64:-melf64e%{!EL:b}%{EL:l}octeon}" ++ ++/* Override because of N32. */ ++ ++#undef LOCAL_LABEL_PREFIX ++#define LOCAL_LABEL_PREFIX ((mips_abi == ABI_N32) ? "." : "$") ++ ++/* Append the core number to the GCOV filename FN. */ ++ ++#define GCOV_TARGET_SUFFIX_LENGTH 2 ++#define ADD_GCOV_TARGET_SUFFIX(FN) \ ++do \ ++ { \ ++ char *fn = FN; \ ++ int core; \ ++ char s[3]; \ ++ \ ++ asm ("rdhwr %0, $0" : "=r"(core)); \ ++ sprintf (s, "%d", core); \ ++ strcat (fn, s); \ ++ } \ ++while (0) ++ ++/* Code to unwind through the exception frame. */ ++#define MD_UNWIND_SUPPORT "config/mips/octeon-elf-unwind.h" +--- /dev/null ++++ b/gcc/config/mips/octeon.h +@@ -0,0 +1,68 @@ ++/* Macros for mips*-octeon-* target. ++ Copyright (C) 2004, 2005, 2006 Cavium Networks. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 2, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING. If not, write to ++the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++Boston, MA 02110-1301, USA. */ ++ ++#define CVMX_SHARED_BSS_FLAGS (SECTION_WRITE | SECTION_BSS) ++ ++#undef TARGET_ASM_SELECT_SECTION ++#define TARGET_ASM_SELECT_SECTION octeon_select_section ++ ++#undef TARGET_ASM_UNIQUE_SECTION ++#define TARGET_ASM_UNIQUE_SECTION octeon_unique_section ++ ++/* Implement ASM_OUTPUT_ALIGNED_DECL_LOCAL. This differs from the ++ generic version only in the use of cvmx_shared attribute. */ ++ ++#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL ++#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(STREAM, DECL, NAME, SIZE, ALIGN) \ ++ do \ ++ { \ ++ if ((DECL) && TREE_CODE ((DECL)) == VAR_DECL \ ++ && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES (DECL))) \ ++ { \ ++ fprintf ((STREAM), "%s", LOCAL_ASM_OP); \ ++ assemble_name ((STREAM), (NAME)); \ ++ fprintf ((STREAM), "\n"); \ ++ octeon_output_shared_variable ((STREAM), (DECL), (NAME), \ ++ (SIZE), (ALIGN)); \ ++ } \ ++ else \ ++ ASM_OUTPUT_ALIGNED_LOCAL (STREAM, NAME, SIZE, ALIGN); \ ++ } \ ++ while (0) ++ ++ ++/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This differs from the mips ++ version only in the use of cvmx_shared attribute. */ ++ ++#undef ASM_OUTPUT_ALIGNED_DECL_COMMON ++#define ASM_OUTPUT_ALIGNED_DECL_COMMON(STREAM, DECL, NAME, SIZE, ALIGN) \ ++ { \ ++ if (TREE_CODE ((DECL)) == VAR_DECL \ ++ && lookup_attribute ("cvmx_shared", DECL_ATTRIBUTES ((DECL)))) \ ++ { \ ++ if (TREE_PUBLIC ((DECL)) && DECL_NAME ((DECL))) \ ++ targetm.asm_out.globalize_label (asm_out_file, (NAME)); \ ++ octeon_output_shared_variable ((STREAM), (DECL), (NAME), \ ++ (SIZE), (ALIGN)); \ ++ } \ ++ else \ ++ mips_output_aligned_decl_common ((STREAM), (DECL), (NAME), (SIZE), \ ++ (ALIGN)); \ ++ } +--- /dev/null ++++ b/gcc/config/mips/octeon.md +@@ -0,0 +1,85 @@ ++;; Octeon pipeline description. ++;; Copyright (C) 2004, 2005, 2006 Cavium Networks. ++;; ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 2, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING. If not, write to the ++;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, ++;; MA 02110-1301, USA. ++ ++;; The OCTEON is a dual-issue processor that can bundle instructions as: ++;; {arith|imul(3)|idiv|*hilo|condmove|load|store|branch|jump|xfer} ++;; {arith|imul(3)|idiv|*hilo|condmove} ++ ++(define_automaton "octeon") ++ ++(define_cpu_unit "octeon_pipe0" "octeon") ++(define_cpu_unit "octeon_pipe1" "octeon") ++(define_cpu_unit "octeon_mult" "octeon") ++ ++(define_insn_reservation "octeon_arith" 1 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "arith,const,shift,slt,nop,logical,signext,move")) ++ "octeon_pipe0 | octeon_pipe1") ++ ++(define_insn_reservation "octeon_condmove" 2 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "condmove")) ++ "octeon_pipe0 | octeon_pipe1") ++ ++;; ??? Unaligned accesses take longer. We will need to differentiate ++;; between the two. ++ ++(define_insn_reservation "octeon_pipe0" 2 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "load,store,prefetch,mfc,mtc")) ++ "octeon_pipe0") ++ ++(define_insn_reservation "octeon_brj" 1 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "branch,jump,call,trap")) ++ "octeon_pipe0") ++ ++(define_insn_reservation "octeon_imul3" 5 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "imul3,pop,clz")) ++ "(octeon_pipe0 | octeon_pipe1) + octeon_mult") ++ ++(define_insn_reservation "octeon_imul" 2 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "imul,mthilo")) ++ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, octeon_mult") ++ ++(define_insn_reservation "octeon_mfhilo" 5 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "mfhilo")) ++ "(octeon_pipe0 | octeon_pipe1) + octeon_mult") ++ ++(define_insn_reservation "octeon_imadd" 4 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "imadd")) ++ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, (octeon_mult * 3)") ++ ++(define_insn_reservation "octeon_idiv" 72 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "idiv")) ++ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, (octeon_mult * 71)") ++ ++;; Assume both pipes are needed for unknown and multiple-instruction ++;; patterns. ++ ++(define_insn_reservation "octeon_unknown" 1 ++ (and (eq_attr "cpu" "octeon") ++ (eq_attr "type" "unknown,multi")) ++ "octeon_pipe0 + octeon_pipe1") +--- a/gcc/config/mips/predicates.md ++++ b/gcc/config/mips/predicates.md +@@ -105,11 +105,15 @@ + /* We can only use direct calls for TARGET_ABSOLUTE_ABICALLS if we + are sure that the target function does not need $25 to be live + on entry. This is true for any locally-defined function because +- any such function will use %hi/%lo accesses to set up $gp. */ ++ any such function will use %hi/%lo accesses to set up $gp. ++ Alternatively, if PLTs and copy relocations are available, the ++ static linker will make sure that $25 is valid on entry to the ++ target function. */ + if (TARGET_ABSOLUTE_ABICALLS + && !(GET_CODE (op) == SYMBOL_REF + && SYMBOL_REF_DECL (op) +- && !DECL_EXTERNAL (SYMBOL_REF_DECL (op)))) ++ && !DECL_EXTERNAL (SYMBOL_REF_DECL (op))) ++ && flag_pic) + return false; + + /* If -mlong-calls or if this function has an explicit long_call +@@ -209,6 +213,20 @@ + } + }) + ++(define_predicate "mask_low_and_shift_operator" ++ (and (match_code "and") ++ (match_test "GET_CODE (XEXP (op, 0)) == ASHIFT ++ && GET_CODE (XEXP (op, 1)) == CONST_INT ++ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT")) ++{ ++ int len; ++ ++ len = mask_low_and_shift_len (GET_MODE (op), ++ INTVAL (XEXP (XEXP (op, 0), 1)), ++ INTVAL (XEXP (op, 1))); ++ return 0 < len && len <= 32; ++}) ++ + (define_predicate "consttable_operand" + (match_test "CONSTANT_P (op)")) + +--- a/gcc/config/mips/sde.h ++++ b/gcc/config/mips/sde.h +@@ -19,7 +19,11 @@ You should have received a copy of the G + along with GCC; see the file COPYING3. If not see + . */ + +-#define DRIVER_SELF_SPECS \ ++#undef TARGET_MIPS_SDE ++#define TARGET_MIPS_SDE 1 ++ ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS \ + /* Make sure a -mips option is present. This helps us to pick \ + the right multilib, and also makes the later specs easier \ + to write. */ \ +@@ -28,6 +32,9 @@ along with GCC; see the file COPYING3. + /* Infer the default float setting from -march. */ \ + MIPS_ARCH_FLOAT_SPEC, \ + \ ++ /* Infer the default dsp setting from -march. */ \ ++ MIPS_ARCH_DSP_SPEC, \ ++ \ + /* If no ABI option is specified, infer one from the ISA level \ + or -mgp setting. */ \ + "%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \ +@@ -56,7 +63,6 @@ along with GCC; see the file COPYING3. + #undef SUBTARGET_ASM_SPEC + #define SUBTARGET_ASM_SPEC "\ + %{!mips1:--trap} \ +-%{fPIC|fpic|fPIE|fpie:%{!mips16*:-KPIC}} \ + %{mips16:-no-mips16}" + + #undef LINK_SPEC +--- a/gcc/config/mips/sdemtk.h ++++ b/gcc/config/mips/sdemtk.h +@@ -19,6 +19,8 @@ You should have received a copy of the G + along with GCC; see the file COPYING3. If not see + . */ + ++#define TARGET_MIPS_SDEMTK 1 ++ + #define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ +@@ -105,3 +107,13 @@ extern void mips_sync_icache (void *beg, + /* ...nor does the call sequence preserve $31. */ + #undef MIPS_SAVE_REG_FOR_PROFILING_P + #define MIPS_SAVE_REG_FOR_PROFILING_P(REGNO) ((REGNO) == GP_REG_FIRST + 31) ++ ++/* From mips.h, with mno-float option added. */ ++ ++#undef MIPS_ARCH_FLOAT_SPEC ++#define MIPS_ARCH_FLOAT_SPEC \ ++ "%{mhard-float|msoft-float|mno-float|march=mips*:; \ ++ march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ ++ |march=34kc|march=74kc|march=5kc: -msoft-float; \ ++ march=*: -mhard-float}" ++ +--- /dev/null ++++ b/gcc/config/mips/sicortex.h +@@ -0,0 +1,30 @@ ++/* SiCortex GNU/Linux Configuration. ++ Copyright (C) 2008 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Override linux.h default to add __SICORTEX__ define. */ ++#undef TARGET_OS_CPP_BUILTINS ++#define TARGET_OS_CPP_BUILTINS() \ ++ do { \ ++ LINUX_TARGET_OS_CPP_BUILTINS(); \ ++ builtin_define ("__SICORTEX__"); \ ++ /* The GNU C++ standard library requires this. */ \ ++ if (c_dialect_cxx ()) \ ++ builtin_define ("_GNU_SOURCE"); \ ++ } while (0) +--- /dev/null ++++ b/gcc/config/mips/t-crtfm +@@ -0,0 +1,9 @@ ++ ++EXTRA_MULTILIB_PARTS += crtfastmath.o ++ ++EXTRA_PARTS += crtfastmath.o ++ ++$(T)crtfastmath.o: $(srcdir)/config/mips/crtfastmath.c $(GCC_PASSES) ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ ++ -c -o $(T)crtfastmath.o $(srcdir)/config/mips/crtfastmath.c ++ +--- /dev/null ++++ b/gcc/config/mips/t-montavista-elf +@@ -0,0 +1,22 @@ ++# MontaVista ELF Configuration. ++# Copyright (C) 2009 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++MULTILIB_OPTIONS = ++MULTILIB_DIRNAMES = +--- /dev/null ++++ b/gcc/config/mips/t-montavista-linux +@@ -0,0 +1,43 @@ ++# MontaVista GNU/Linux Configuration. ++# Copyright (C) 2009 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# Build big-endian and little-endian support libraries. ++MULTILIB_OPTIONS = mel msoft-float march=octeon mabi=n32/mabi=64 ++MULTILIB_DIRNAMES = mel soft-float octeon n32 64 ++MULTILIB_EXCEPTIONS = *mel*/*mabi=n32* *mel*/*mabi=64* ++MULTILIB_EXCEPTIONS += *mel*/*march=octeon* march=octeon march=octeon/mabi=n32 ++MULTILIB_EXCEPTIONS += march=octeon/mabi=64 msoft-float/march=octeon ++ ++# These files must be built for each multilib. ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ ++# See comment in montavista-linux.h on STARTFILE_PREFIX_SPEC for how the real ++# directories used in the sysroots are determined. These directories ++# are specified so that (a) they are distinct and (b) removing the ++# components that form part of the sysroot suffix leaves the real ++# directory within the sysroot. ++MULTILIB_OSDIRNAMES = msoft-float/mabi.n32=../lib32/soft-float ++MULTILIB_OSDIRNAMES += msoft-float/mabi.64=../lib64/soft-float ++MULTILIB_OSDIRNAMES += msoft-float/march.octeon/mabi.n32=../lib32/soft-float/octeon ++MULTILIB_OSDIRNAMES += msoft-float/march.octeon/mabi.64=../lib64/soft-float/octeon ++MULTILIB_OSDIRNAMES += mel/msoft-float=!mel/soft-float ++MULTILIB_OSDIRNAMES += msoft-float=!soft-float ++MULTILIB_OSDIRNAMES += mabi.64=../lib64 ++MULTILIB_OSDIRNAMES += mabi.n32=../lib32 +--- /dev/null ++++ b/gcc/config/mips/t-octeon-elf +@@ -0,0 +1,41 @@ ++# Don't let CTOR_LIST end up in sdata section. ++ ++CRTSTUFF_T_CFLAGS = -G 0 -fno-asynchronous-unwind-tables ++ ++# Assemble startup files. ++ ++$(T)crti.o: $(srcdir)/config/mips/crti.asm $(GCC_PASSES) ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ ++ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/mips/crti.asm ++ ++$(T)crtn.o: $(srcdir)/config/mips/crtn.asm $(GCC_PASSES) ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ ++ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/mips/crtn.asm ++ ++# N32 uses TFmode for long double. ++ ++TPBIT = tp-bit.c ++ ++tp-bit.c: $(srcdir)/config/fp-bit.c ++ echo '#ifdef __MIPSEL__' > tp-bit.c ++ echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c ++ echo '#endif' >> tp-bit.c ++ echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c ++ echo '#define QUIET_NAN_NEGATED' >> tp-bit.c ++ echo '# define TFLOAT' >> tp-bit.c ++ cat $(srcdir)/config/fp-bit.c >> tp-bit.c ++ echo '#endif' >> tp-bit.c ++ ++# We must build libgcc2.a with -G 0, in case the user wants to link ++# without the $gp register. ++ ++TARGET_LIBGCC2_CFLAGS = -G 0 ++ ++# Build both ABIs. ++ ++MULTILIB_OPTIONS = mabi=n32/mabi=eabi/mabi=64 ++MULTILIB_DIRNAMES = n32 eabi n64 ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o ++ ++LIBGCC = stmp-multilib ++INSTALL_LIBGCC = install-multilib +--- a/gcc/config/mips/t-sde ++++ b/gcc/config/mips/t-sde +@@ -10,9 +10,17 @@ $(T)crtn.o: $(srcdir)/config/mips/crtn.a + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/mips/crtn.asm + +-MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64 mips16 msoft-float/mfp64 mcode-readable=no ++# We must build libgcc2.a with -G 0, in case the user wants to link ++# without the $gp register. Use -fno-optimize-sibling-calls in case ++# we have a mixed mips16/non-mips16 environment where a plain "jump" ++# instuction won't work across the divide (no jx instruction). ++# Compile libraries with -mcode-xonly, so that they are link-compatible ++# with both -mcode-readable=pcrel and -mcode-readable=yes. ++TARGET_LIBGCC2_CFLAGS = -G 0 -fno-optimize-sibling-calls -mcode-xonly ++ ++MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64 mips16 msoft-float/mfp64 mno-data-in-code + MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips16 sof f64 spram +-MULTILIB_MATCHES = EL=mel EB=meb ++MULTILIB_MATCHES = EL=mel EB=meb mips16=mips16e + + # The -mfp64 option is only valid in conjunction with -mips32r2. + ifneq ($(filter MIPS_ISA_DEFAULT=33,$(tm_defines)),) +--- /dev/null ++++ b/gcc/config/mips/t-sdelib +@@ -0,0 +1,23 @@ ++# Override newlib settings in t-sde and set up for building ++# against SDE header files and libraries. ++ ++# Remove stdarg.h and stddef.h from USER_H. ++USER_H = $(srcdir)/ginclude/float.h \ ++ $(srcdir)/ginclude/iso646.h \ ++ $(srcdir)/ginclude/stdbool.h \ ++ $(srcdir)/ginclude/varargs.h \ ++ $(EXTRA_HEADERS) ++ ++# Don't run fixinclude ++STMP_FIXINC = stmp-sdefixinc ++stmp-sdefixinc: gsyslimits.h ++ rm -rf include; mkdir include ++ chmod a+rx include ++ rm -f include/syslimits.h ++ cp $(srcdir)/gsyslimits.h include/syslimits.h ++ chmod a+r include/syslimits.h ++ $(STAMP) stmp-sdefixinc ++ ++# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. ++FPBIT = ++DPBIT = +--- a/gcc/config/mips/t-sdemtk ++++ b/gcc/config/mips/t-sdemtk +@@ -1,26 +1,7 @@ +-# Override newlib settings in t-sde and set up for building +-# against SDE header files and libraries. + +-MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64 mips16 msoft-float/mno-float/mfp64 +-MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips16 sof nof f64 ++MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64 mips16 fp64/msoft-float/mno-float ++MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips16 f64 sof nof ++MULTILIB_MATCHES = EL=mel EB=meb ++MULTILIB_EXCLUSIONS = mfp64/!mips32r2 mips16/mips64 mcode-readable=no/!mips16 ++MULTILIB_EXCEPTIONS = + +-# Remove stdarg.h and stddef.h from USER_H. +-USER_H = $(srcdir)/ginclude/float.h \ +- $(srcdir)/ginclude/iso646.h \ +- $(srcdir)/ginclude/stdbool.h \ +- $(srcdir)/ginclude/varargs.h \ +- $(EXTRA_HEADERS) +- +-# Don't run fixinclude +-STMP_FIXINC = stmp-sdefixinc +-stmp-sdefixinc: gsyslimits.h +- rm -rf include; mkdir include +- chmod a+rx include +- rm -f include/syslimits.h +- cp $(srcdir)/gsyslimits.h include/syslimits.h +- chmod a+r include/syslimits.h +- $(STAMP) stmp-sdefixinc +- +-# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. +-FPBIT = +-DPBIT = +--- /dev/null ++++ b/gcc/config/mips/t-sgxx-linux +@@ -0,0 +1,11 @@ ++MULTILIB_OPTIONS = muclibc march=mips2/march=mips32 msoft-float EL/EB ++MULTILIB_DIRNAMES = uclibc mips2 mips32 soft-float el eb ++MULTILIB_MATCHES := EL=mel EB=meb \ ++ march?mips2=mips2 march?mips2=mips3 march?mips2=mips4 \ ++ $(foreach cpu,mips3 mips4 r6000 r4000 vr4100 vr4111 vr4120 vr4130 vr4300 \ ++ r4400 r4600 orion r4650 r8000 vr5000 vr5400 vr5500 rm7000 \ ++ rm9000,march?mips2=march?$(cpu)) \ ++ march?mips32=mips32 \ ++ $(foreach cpu,4kc 4km 4kp 4ks,march?mips32=march?$(cpu)) ++MULTILIB_EXCEPTIONS = *muclibc*/*march?mips2* *muclibc*/*march?mips32* ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o +--- /dev/null ++++ b/gcc/config/mips/t-sgxx-sde +@@ -0,0 +1,7 @@ ++# SourceryG++ overrides for SDE builds ++ ++MULTILIB_OPTIONS = EL/EB mips16 mfp64/msoft-float/mno-float mcode-readable=no ++MULTILIB_DIRNAMES = el eb mips16 fp64 sof nof spram ++MULTILIB_MATCHES = EL=mel EB=meb mips16=mips16e ++MULTILIB_EXCLUSIONS = mcode-readable=no/!mips16 ++MULTILIB_EXCEPTIONS = +--- /dev/null ++++ b/gcc/config/mips/t-sgxxlite-linux +@@ -0,0 +1,5 @@ ++MULTILIB_OPTIONS = muclibc msoft-float EL/EB ++MULTILIB_DIRNAMES = uclibc soft-float el eb ++MULTILIB_MATCHES := EL=mel EB=meb ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ +--- /dev/null ++++ b/gcc/config/mips/t-sicortex +@@ -0,0 +1,24 @@ ++# SiCortex GNU/Linux Configuration. ++# Copyright (C) 2008 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# No O32 libraries for SiCortex. ++MULTILIB_OPTIONS = mabi=n32/mabi=64 ++MULTILIB_DIRNAMES = n32 64 ++MULTILIB_OSDIRNAMES = ../lib32 ../lib64 +--- /dev/null ++++ b/gcc/config/mips/t-wrs-linux +@@ -0,0 +1,50 @@ ++# Wind River GNU/Linux Configuration. ++# Copyright (C) 2006, 2007 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# Build big-endian and little-endian support libraries. ++MULTILIB_OPTIONS = muclibc mel mhard-float march=octeon mabi=n32/mabi=64 ++MULTILIB_DIRNAMES = uclibc mel hard-float octeon n32 64 ++MULTILIB_EXCEPTIONS = *muclibc*/*mhard-float* ++MULTILIB_EXCEPTIONS += *muclibc*/*mabi=n32* ++MULTILIB_EXCEPTIONS += *muclibc*/*mabi=64* ++MULTILIB_EXCEPTIONS += */march=octeon* ++MULTILIB_EXCEPTIONS += march=octeon march=octeon/mabi=32 ++MULTILIB_EXCEPTIONS += mel/mabi=n32 mel/mabi=64 ++MULTILIB_EXCEPTIONS += mabi=n32 ++# These files must be built for each multilib. ++EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ++ ++# See comment in wrs-linux.h on STARTFILE_PREFIX_SPEC for how the real ++# directories used in the sysroots are determined. These directories ++# are specified so that (a) they are distinct and (b) removing the ++# components that form part of the sysroot suffix leaves the real ++# directory within the sysroot. ++MULTILIB_OSDIRNAMES = mel/mhard-float/mabi.n32=../lib32/mel/hard-float ++MULTILIB_OSDIRNAMES += mel/mhard-float/mabi.64=../lib64/mel/hard-float ++MULTILIB_OSDIRNAMES += mhard-float/mabi.n32=../lib32/hard-float ++MULTILIB_OSDIRNAMES += mhard-float/mabi.64=../lib64/hard-float ++MULTILIB_OSDIRNAMES += mel/mhard-float=!mel/hard-float ++MULTILIB_OSDIRNAMES += mhard-float=!hard-float ++MULTILIB_OSDIRNAMES += mabi.64=../lib64 ++MULTILIB_OSDIRNAMES += march.octeon/mabi.n32=../lib32/octeon ++MULTILIB_OSDIRNAMES += march.octeon/mabi.64=../lib64/octeon ++MULTILIB_OSDIRNAMES += muclibc/mel=!uclibc/mel ++MULTILIB_OSDIRNAMES += muclibc=!uclibc ++ +--- a/gcc/config/mips/vr.h ++++ b/gcc/config/mips/vr.h +@@ -26,7 +26,7 @@ along with GCC; see the file COPYING3. + MULTILIB_ABI_DEFAULT, \ + DEFAULT_VR_ARCH } + +-#define DRIVER_SELF_SPECS \ ++#define SUBTARGET_SELF_SPECS \ + /* Enforce the default architecture. This is mostly for \ + the assembler's benefit. */ \ + "%{!march=*:%{!mfix-vr4120:%{!mfix-vr4130:" \ +--- /dev/null ++++ b/gcc/config/mips/wrs-linux.h +@@ -0,0 +1,63 @@ ++/* Wind River GNU/Linux Configuration. ++ Copyright (C) 2006, 2007 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Override linux64.h to default to O32. */ ++#undef SUBTARGET_SELF_SPECS ++#define SUBTARGET_SELF_SPECS \ ++NO_SHARED_SPECS, \ ++"%{!EB:%{!EL:%(endian_spec)}}", \ ++"%{!mabi=*: -mabi=32}" ++ ++/* We do not need to provide an explicit big-endian multilib. */ ++#undef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ { "meb", "mabi=32" } ++ ++/* The GLIBC headers are in /usr/include, relative to the sysroot; the ++ uClibc headers are in /uclibc/usr/include. */ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{muclibc:%{mel:/uclibc/mel ; \ ++ :/uclibc} ; \ ++ mel:%{mhard-float:/mel/hard-float ; \ ++ :/mel} ; \ ++ march=octeon:/octeon ; \ ++ mhard-float:/hard-float}" ++ ++/* MULTILIB_OSDIRNAMES provides directory names used in two ways: ++ relative to $target/lib/ in the GCC installation, and relative to ++ lib/ and usr/lib/ in a sysroot. For the latter, we want names such ++ as plain ../lib64, but these cannot be used outside the sysroot ++ because different multilibs would be mapped to the same directory. ++ Directories are searched both with and without the multilib suffix, ++ so it suffices if the directory without the suffix is correct ++ within the sysroot while the directory with the suffix doesn't ++ exist. We use STARTFILE_PREFIX_SPEC to achieve the desired ++ effect. */ ++#undef STARTFILE_PREFIX_SPEC ++#define STARTFILE_PREFIX_SPEC \ ++ "%{mabi=32: /usr/local/lib/ /lib/ /usr/lib/} \ ++ %{mabi=n32: /usr/local/lib32/ /lib32/ /usr/lib32/} \ ++ %{mabi=64: /usr/local/lib64/ /lib64/ /usr/lib64/}" +--- /dev/null ++++ b/gcc/config/mips/xlr.md +@@ -0,0 +1,89 @@ ++;; DFA-based pipeline description for the XLR. ++;; Copyright (C) 2008 Free Software Foundation, Inc. ++;; ++;; xlr.md Machine Description for the RMI XLR Microprocessor ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_automaton "xlr_main,xlr_muldiv") ++ ++;; Definitions for xlr_main automaton. ++(define_cpu_unit "xlr_main_pipe" "xlr_main") ++ ++(define_insn_reservation "ir_xlr_alu_slt" 2 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "slt")) ++ "xlr_main_pipe") ++ ++;; Integer arithmetic instructions. ++(define_insn_reservation "ir_xlr_alu" 1 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "move,arith,shift,clz,logical,signext,const,unknown,multi,nop,trap")) ++ "xlr_main_pipe") ++ ++;; Integer arithmetic instructions. ++(define_insn_reservation "ir_xlr_condmove" 2 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "condmove")) ++ "xlr_main_pipe") ++ ++;; Load/store instructions. ++(define_insn_reservation "ir_xlr_load" 4 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "load")) ++ "xlr_main_pipe") ++ ++(define_insn_reservation "ir_xlr_store" 1 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "store")) ++ "xlr_main_pipe") ++ ++(define_insn_reservation "ir_xlr_prefetch_x" 1 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "prefetch,prefetchx")) ++ "xlr_main_pipe") ++ ++;; Branch instructions - use branch misprediction latency. ++(define_insn_reservation "ir_xlr_branch" 1 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "branch,jump,call")) ++ "xlr_main_pipe") ++ ++;; Coprocessor move instructions. ++(define_insn_reservation "ir_xlr_xfer" 2 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "mtc,mfc")) ++ "xlr_main_pipe") ++ ++(define_bypass 5 "ir_xlr_xfer" "ir_xlr_xfer") ++ ++;; Definitions for the xlr_muldiv automaton. ++(define_cpu_unit "xlr_imuldiv_nopipe" "xlr_muldiv") ++ ++(define_insn_reservation "ir_xlr_imul" 8 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "imul,imul3,imadd")) ++ "xlr_main_pipe,xlr_imuldiv_nopipe*6") ++ ++(define_insn_reservation "ir_xlr_div" 68 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "idiv")) ++ "xlr_main_pipe,xlr_imuldiv_nopipe*67") ++ ++(define_insn_reservation "xlr_hilo" 2 ++ (and (eq_attr "cpu" "xlr") ++ (eq_attr "type" "mfhilo,mthilo")) ++ "xlr_imuldiv_nopipe") +--- /dev/null ++++ b/gcc/config/print-sysroot-suffix.sh +@@ -0,0 +1,107 @@ ++#! /bin/sh ++# Script to generate SYSROOT_SUFFIX equivalent to MULTILIB_OSDIRNAMES ++# Arguments are MULTILIB_OSDIRNAMES, MULTILIB_OPTIONS, MULTILIB_MATCHES ++# and MULTILIB_ALIASES. ++ ++set -e ++ ++dirnames="$1" ++options="$2" ++matches="$3" ++aliases="$4" ++ ++cat > print-sysroot-suffix3.sh <<\EOF ++#! /bin/sh ++# Print all the multilib matches for this option ++result="$1" ++EOF ++for x in $matches; do ++ l=`echo $x | sed -e 's/=.*$//' -e 's/?/=/g'` ++ r=`echo $x | sed -e 's/^.*=//' -e 's/?/=/g'` ++ echo "[ \"\$1\" = \"$l\" ] && result=\"\$result|$r\"" >> print-sysroot-suffix3.sh ++done ++echo 'echo $result' >> print-sysroot-suffix3.sh ++chmod +x print-sysroot-suffix3.sh ++ ++cat > print-sysroot-suffix2.sh <<\EOF ++#! /bin/sh ++# Recursive script to enumerate all multilib combinations, match against ++# multilib directories and optut a spec string of the result. ++# Will fold identical trees. ++ ++padding="$1" ++optstring="$2" ++shift 2 ++n="\" \\ ++$padding\"" ++if [ $# = 0 ]; then ++ case $optstring in ++EOF ++for x in $aliases; do ++ l=`echo $x | sed -e 's/=.*$//' -e 's/?/=/g'` ++ r=`echo $x | sed -e 's/^.*=//' -e 's/?/=/g'` ++ echo "/$r/) optstring=\"/$l/\" ;;" >> print-sysroot-suffix2.sh ++done ++echo " esac" >> print-sysroot-suffix2.sh ++ ++pat= ++for x in $dirnames; do ++ p=`echo $x | sed -e 's,=!,/$=/,'` ++ pat="$pat -e 's=^//$p='" ++done ++echo ' optstring=`echo "/$optstring" | sed '"$pat\`" >> print-sysroot-suffix2.sh ++cat >> print-sysroot-suffix2.sh <<\EOF ++ case $optstring in ++ //*) ++ ;; ++ *) ++ echo "$optstring" ++ ;; ++ esac ++else ++ thisopt="$1" ++ shift ++ bit= ++ lastcond= ++ result= ++ for x in `echo "$thisopt" | sed -e 's,/, ,g'`; do ++ case $x in ++EOF ++for x in `echo "$options" | sed -e 's,/, ,g'`; do ++ match=`./print-sysroot-suffix3.sh "$x"` ++ echo "$x) optmatch=\"$match\" ;;" >> print-sysroot-suffix2.sh ++done ++cat >> print-sysroot-suffix2.sh <<\EOF ++ esac ++ bit=`"$0" "$padding " "$optstring$x/" "$@"` ++ if [ -z "$lastopt" ]; then ++ lastopt="$optmatch" ++ else ++ if [ "$lastbit" = "$bit" ]; then ++ lastopt="$lastopt|$optmatch" ++ else ++ result="$result$lastopt:$lastbit;$n" ++ lastopt="$optmatch" ++ fi ++ fi ++ lastbit="$bit" ++ done ++ bit=`"$0" "$padding " "$optstring" "$@"` ++ if [ "$bit" = "$lastbit" ]; then ++ if [ -z "$result" ]; then ++ echo "$bit" ++ else ++ echo "$n%{$result:$bit}" ++ fi ++ else ++ echo "$n%{$result$lastopt:$lastbit;$n:$bit}" ++ fi ++fi ++EOF ++ ++chmod +x ./print-sysroot-suffix2.sh ++result=`./print-sysroot-suffix2.sh "" "/" $options` ++echo "#undef SYSROOT_SUFFIX_SPEC" ++echo "#define SYSROOT_SUFFIX_SPEC \"$result\"" ++rm print-sysroot-suffix2.sh ++rm print-sysroot-suffix3.sh +--- a/gcc/config/rs6000/aix.h ++++ b/gcc/config/rs6000/aix.h +@@ -202,6 +202,8 @@ + + /* Define cutoff for using external functions to save floating point. */ + #define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63) ++/* And similarly for general purpose registers. */ ++#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32) + + /* __throw will restore its own return address to be the same as the + return address of the function that the throw is being made to. +--- a/gcc/config/rs6000/altivec.md ++++ b/gcc/config/rs6000/altivec.md +@@ -64,7 +64,6 @@ + (UNSPEC_VPKUWUS 102) + (UNSPEC_VPKSWUS 103) + (UNSPEC_VRL 104) +- (UNSPEC_VSL 107) + (UNSPEC_VSLV4SI 110) + (UNSPEC_VSLO 111) + (UNSPEC_VSR 118) +@@ -582,7 +581,7 @@ + /* Generate [-0.0, -0.0, -0.0, -0.0]. */ + neg0 = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx)); +- emit_insn (gen_altivec_vslw (neg0, neg0, neg0)); ++ emit_insn (gen_ashlv4si3 (neg0, neg0, neg0)); + + /* Use the multiply-add. */ + emit_insn (gen_altivec_vmaddfp (operands[0], operands[1], operands[2], +@@ -641,7 +640,7 @@ + high_product = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero)); + +- emit_insn (gen_altivec_vslw (high_product, high_product, sixteen)); ++ emit_insn (gen_ashlv4si3 (high_product, high_product, sixteen)); + + emit_insn (gen_addv4si3 (operands[0], high_product, low_product)); + +@@ -1227,15 +1226,6 @@ + "vrl %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +-(define_insn "altivec_vsl" +- [(set (match_operand:VI 0 "register_operand" "=v") +- (unspec:VI [(match_operand:VI 1 "register_operand" "v") +- (match_operand:VI 2 "register_operand" "v")] +- UNSPEC_VSL))] +- "TARGET_ALTIVEC" +- "vsl %0,%1,%2" +- [(set_attr "type" "vecsimple")]) +- + (define_insn "altivec_vsl" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") +@@ -1254,6 +1244,14 @@ + "vslo %0,%1,%2" + [(set_attr "type" "vecperm")]) + ++(define_insn "ashl3" ++ [(set (match_operand:VI 0 "register_operand" "=v") ++ (ashift:VI (match_operand:VI 1 "register_operand" "v") ++ (match_operand:VI 2 "register_operand" "v") ))] ++ "TARGET_ALTIVEC" ++ "vsl %0,%1,%2" ++ [(set_attr "type" "vecsimple")]) ++ + (define_insn "lshr3" + [(set (match_operand:VI 0 "register_operand" "=v") + (lshiftrt:VI (match_operand:VI 1 "register_operand" "v") +@@ -2045,7 +2043,7 @@ + [(set (match_dup 2) + (vec_duplicate:V4SI (const_int -1))) + (set (match_dup 3) +- (unspec:V4SI [(match_dup 2) (match_dup 2)] UNSPEC_VSL)) ++ (ashift:V4SI (match_dup 2) (match_dup 2))) + (set (match_operand:V4SF 0 "register_operand" "=v") + (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0)) + (match_operand:V4SF 1 "register_operand" "v")))] +@@ -2648,7 +2646,7 @@ + /* Generate [-0.0, -0.0, -0.0, -0.0]. */ + neg0 = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx)); +- emit_insn (gen_altivec_vslw (neg0, neg0, neg0)); ++ emit_insn (gen_ashlv4si3 (neg0, neg0, neg0)); + + /* XOR */ + emit_insn (gen_xorv4sf3 (operands[0], +--- /dev/null ++++ b/gcc/config/rs6000/crtresfpr.asm +@@ -0,0 +1,90 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtresfpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for restoring floating point registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the floating point save area. */ ++ ++HIDDEN_FUNC(_restfpr_14) lfd 14,-144(11) /* restore fp registers */ ++HIDDEN_FUNC(_restfpr_15) lfd 15,-136(11) ++HIDDEN_FUNC(_restfpr_16) lfd 16,-128(11) ++HIDDEN_FUNC(_restfpr_17) lfd 17,-120(11) ++HIDDEN_FUNC(_restfpr_18) lfd 18,-112(11) ++HIDDEN_FUNC(_restfpr_19) lfd 19,-104(11) ++HIDDEN_FUNC(_restfpr_20) lfd 20,-96(11) ++HIDDEN_FUNC(_restfpr_21) lfd 21,-88(11) ++HIDDEN_FUNC(_restfpr_22) lfd 22,-80(11) ++HIDDEN_FUNC(_restfpr_23) lfd 23,-72(11) ++HIDDEN_FUNC(_restfpr_24) lfd 24,-64(11) ++HIDDEN_FUNC(_restfpr_25) lfd 25,-56(11) ++HIDDEN_FUNC(_restfpr_26) lfd 26,-48(11) ++HIDDEN_FUNC(_restfpr_27) lfd 27,-40(11) ++HIDDEN_FUNC(_restfpr_28) lfd 28,-32(11) ++HIDDEN_FUNC(_restfpr_29) lfd 29,-24(11) ++HIDDEN_FUNC(_restfpr_30) lfd 30,-16(11) ++HIDDEN_FUNC(_restfpr_31) lfd 31,-8(11) ++ blr ++FUNC_END(_restfpr_31) ++FUNC_END(_restfpr_30) ++FUNC_END(_restfpr_29) ++FUNC_END(_restfpr_28) ++FUNC_END(_restfpr_27) ++FUNC_END(_restfpr_26) ++FUNC_END(_restfpr_25) ++FUNC_END(_restfpr_24) ++FUNC_END(_restfpr_23) ++FUNC_END(_restfpr_22) ++FUNC_END(_restfpr_21) ++FUNC_END(_restfpr_20) ++FUNC_END(_restfpr_19) ++FUNC_END(_restfpr_18) ++FUNC_END(_restfpr_17) ++FUNC_END(_restfpr_16) ++FUNC_END(_restfpr_15) ++FUNC_END(_restfpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/crtresgpr.asm +@@ -0,0 +1,90 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtresgpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for restoring integer registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the integer restore area. */ ++ ++HIDDEN_FUNC(_restgpr_14) lwz 14,-72(11) /* restore gp registers */ ++HIDDEN_FUNC(_restgpr_15) lwz 15,-68(11) ++HIDDEN_FUNC(_restgpr_16) lwz 16,-64(11) ++HIDDEN_FUNC(_restgpr_17) lwz 17,-60(11) ++HIDDEN_FUNC(_restgpr_18) lwz 18,-56(11) ++HIDDEN_FUNC(_restgpr_19) lwz 19,-52(11) ++HIDDEN_FUNC(_restgpr_20) lwz 20,-48(11) ++HIDDEN_FUNC(_restgpr_21) lwz 21,-44(11) ++HIDDEN_FUNC(_restgpr_22) lwz 22,-40(11) ++HIDDEN_FUNC(_restgpr_23) lwz 23,-36(11) ++HIDDEN_FUNC(_restgpr_24) lwz 24,-32(11) ++HIDDEN_FUNC(_restgpr_25) lwz 25,-28(11) ++HIDDEN_FUNC(_restgpr_26) lwz 26,-24(11) ++HIDDEN_FUNC(_restgpr_27) lwz 27,-20(11) ++HIDDEN_FUNC(_restgpr_28) lwz 28,-16(11) ++HIDDEN_FUNC(_restgpr_29) lwz 29,-12(11) ++HIDDEN_FUNC(_restgpr_30) lwz 30,-8(11) ++HIDDEN_FUNC(_restgpr_31) lwz 31,-4(11) ++ blr ++FUNC_END(_restgpr_31) ++FUNC_END(_restgpr_30) ++FUNC_END(_restgpr_29) ++FUNC_END(_restgpr_28) ++FUNC_END(_restgpr_27) ++FUNC_END(_restgpr_26) ++FUNC_END(_restgpr_25) ++FUNC_END(_restgpr_24) ++FUNC_END(_restgpr_23) ++FUNC_END(_restgpr_22) ++FUNC_END(_restgpr_21) ++FUNC_END(_restgpr_20) ++FUNC_END(_restgpr_19) ++FUNC_END(_restgpr_18) ++FUNC_END(_restgpr_17) ++FUNC_END(_restgpr_16) ++FUNC_END(_restgpr_15) ++FUNC_END(_restgpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/crtresxfpr.asm +@@ -0,0 +1,95 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtresxfpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for restoring floating point registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the floating point save area. */ ++/* In addition to restoring the fp registers, it will return to the caller's */ ++/* caller */ ++ ++HIDDEN_FUNC(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */ ++HIDDEN_FUNC(_restfpr_15_x) lfd 15,-136(11) ++HIDDEN_FUNC(_restfpr_16_x) lfd 16,-128(11) ++HIDDEN_FUNC(_restfpr_17_x) lfd 17,-120(11) ++HIDDEN_FUNC(_restfpr_18_x) lfd 18,-112(11) ++HIDDEN_FUNC(_restfpr_19_x) lfd 19,-104(11) ++HIDDEN_FUNC(_restfpr_20_x) lfd 20,-96(11) ++HIDDEN_FUNC(_restfpr_21_x) lfd 21,-88(11) ++HIDDEN_FUNC(_restfpr_22_x) lfd 22,-80(11) ++HIDDEN_FUNC(_restfpr_23_x) lfd 23,-72(11) ++HIDDEN_FUNC(_restfpr_24_x) lfd 24,-64(11) ++HIDDEN_FUNC(_restfpr_25_x) lfd 25,-56(11) ++HIDDEN_FUNC(_restfpr_26_x) lfd 26,-48(11) ++HIDDEN_FUNC(_restfpr_27_x) lfd 27,-40(11) ++HIDDEN_FUNC(_restfpr_28_x) lfd 28,-32(11) ++HIDDEN_FUNC(_restfpr_29_x) lfd 29,-24(11) ++HIDDEN_FUNC(_restfpr_30_x) lfd 30,-16(11) ++HIDDEN_FUNC(_restfpr_31_x) lwz 0,4(11) ++ lfd 31,-8(11) ++ mtlr 0 ++ mr 1,11 ++ blr ++FUNC_END(_restfpr_31_x) ++FUNC_END(_restfpr_30_x) ++FUNC_END(_restfpr_29_x) ++FUNC_END(_restfpr_28_x) ++FUNC_END(_restfpr_27_x) ++FUNC_END(_restfpr_26_x) ++FUNC_END(_restfpr_25_x) ++FUNC_END(_restfpr_24_x) ++FUNC_END(_restfpr_23_x) ++FUNC_END(_restfpr_22_x) ++FUNC_END(_restfpr_21_x) ++FUNC_END(_restfpr_20_x) ++FUNC_END(_restfpr_19_x) ++FUNC_END(_restfpr_18_x) ++FUNC_END(_restfpr_17_x) ++FUNC_END(_restfpr_16_x) ++FUNC_END(_restfpr_15_x) ++FUNC_END(_restfpr_14_x) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/crtresxgpr.asm +@@ -0,0 +1,93 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtresxgpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for restoring integer registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the integer restore area. */ ++ ++HIDDEN_FUNC(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */ ++HIDDEN_FUNC(_restgpr_15_x) lwz 15,-68(11) ++HIDDEN_FUNC(_restgpr_16_x) lwz 16,-64(11) ++HIDDEN_FUNC(_restgpr_17_x) lwz 17,-60(11) ++HIDDEN_FUNC(_restgpr_18_x) lwz 18,-56(11) ++HIDDEN_FUNC(_restgpr_19_x) lwz 19,-52(11) ++HIDDEN_FUNC(_restgpr_20_x) lwz 20,-48(11) ++HIDDEN_FUNC(_restgpr_21_x) lwz 21,-44(11) ++HIDDEN_FUNC(_restgpr_22_x) lwz 22,-40(11) ++HIDDEN_FUNC(_restgpr_23_x) lwz 23,-36(11) ++HIDDEN_FUNC(_restgpr_24_x) lwz 24,-32(11) ++HIDDEN_FUNC(_restgpr_25_x) lwz 25,-28(11) ++HIDDEN_FUNC(_restgpr_26_x) lwz 26,-24(11) ++HIDDEN_FUNC(_restgpr_27_x) lwz 27,-20(11) ++HIDDEN_FUNC(_restgpr_28_x) lwz 28,-16(11) ++HIDDEN_FUNC(_restgpr_29_x) lwz 29,-12(11) ++HIDDEN_FUNC(_restgpr_30_x) lwz 30,-8(11) ++HIDDEN_FUNC(_restgpr_31_x) lwz 0,4(11) ++ lwz 31,-4(11) ++ mtlr 0 ++ mr 1,11 ++ blr ++FUNC_END(_restgpr_31_x) ++FUNC_END(_restgpr_30_x) ++FUNC_END(_restgpr_29_x) ++FUNC_END(_restgpr_28_x) ++FUNC_END(_restgpr_27_x) ++FUNC_END(_restgpr_26_x) ++FUNC_END(_restgpr_25_x) ++FUNC_END(_restgpr_24_x) ++FUNC_END(_restgpr_23_x) ++FUNC_END(_restgpr_22_x) ++FUNC_END(_restgpr_21_x) ++FUNC_END(_restgpr_20_x) ++FUNC_END(_restgpr_19_x) ++FUNC_END(_restgpr_18_x) ++FUNC_END(_restgpr_17_x) ++FUNC_END(_restgpr_16_x) ++FUNC_END(_restgpr_15_x) ++FUNC_END(_restgpr_14_x) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/crtsavfpr.asm +@@ -0,0 +1,90 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtsavfpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for saving floating point registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the floating point save area. */ ++ ++HIDDEN_FUNC(_savefpr_14) stfd 14,-144(11) /* save fp registers */ ++HIDDEN_FUNC(_savefpr_15) stfd 15,-136(11) ++HIDDEN_FUNC(_savefpr_16) stfd 16,-128(11) ++HIDDEN_FUNC(_savefpr_17) stfd 17,-120(11) ++HIDDEN_FUNC(_savefpr_18) stfd 18,-112(11) ++HIDDEN_FUNC(_savefpr_19) stfd 19,-104(11) ++HIDDEN_FUNC(_savefpr_20) stfd 20,-96(11) ++HIDDEN_FUNC(_savefpr_21) stfd 21,-88(11) ++HIDDEN_FUNC(_savefpr_22) stfd 22,-80(11) ++HIDDEN_FUNC(_savefpr_23) stfd 23,-72(11) ++HIDDEN_FUNC(_savefpr_24) stfd 24,-64(11) ++HIDDEN_FUNC(_savefpr_25) stfd 25,-56(11) ++HIDDEN_FUNC(_savefpr_26) stfd 26,-48(11) ++HIDDEN_FUNC(_savefpr_27) stfd 27,-40(11) ++HIDDEN_FUNC(_savefpr_28) stfd 28,-32(11) ++HIDDEN_FUNC(_savefpr_29) stfd 29,-24(11) ++HIDDEN_FUNC(_savefpr_30) stfd 30,-16(11) ++HIDDEN_FUNC(_savefpr_31) stfd 31,-8(11) ++ blr ++FUNC_END(_savefpr_31) ++FUNC_END(_savefpr_30) ++FUNC_END(_savefpr_29) ++FUNC_END(_savefpr_28) ++FUNC_END(_savefpr_27) ++FUNC_END(_savefpr_26) ++FUNC_END(_savefpr_25) ++FUNC_END(_savefpr_24) ++FUNC_END(_savefpr_23) ++FUNC_END(_savefpr_22) ++FUNC_END(_savefpr_21) ++FUNC_END(_savefpr_20) ++FUNC_END(_savefpr_19) ++FUNC_END(_savefpr_18) ++FUNC_END(_savefpr_17) ++FUNC_END(_savefpr_16) ++FUNC_END(_savefpr_15) ++FUNC_END(_savefpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/crtsavgpr.asm +@@ -0,0 +1,90 @@ ++/* ++ * Special support for eabi and SVR4 ++ * ++ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008 ++ * Free Software Foundation, Inc. ++ * Written By Michael Meissner ++ * 64-bit support written by David Edelsohn ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++/* Do any initializations needed for the eabi environment */ ++ ++ .file "crtsavgpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++/* On PowerPC64 Linux, these functions are provided by the linker. */ ++#ifndef __powerpc64__ ++ ++/* Routines for saving integer registers, called by the compiler. */ ++/* Called with r11 pointing to the stack header word of the caller of the */ ++/* function, just beyond the end of the integer save area. */ ++ ++HIDDEN_FUNC(_savegpr_14) stw 14,-72(11) /* save gp registers */ ++HIDDEN_FUNC(_savegpr_15) stw 15,-68(11) ++HIDDEN_FUNC(_savegpr_16) stw 16,-64(11) ++HIDDEN_FUNC(_savegpr_17) stw 17,-60(11) ++HIDDEN_FUNC(_savegpr_18) stw 18,-56(11) ++HIDDEN_FUNC(_savegpr_19) stw 19,-52(11) ++HIDDEN_FUNC(_savegpr_20) stw 20,-48(11) ++HIDDEN_FUNC(_savegpr_21) stw 21,-44(11) ++HIDDEN_FUNC(_savegpr_22) stw 22,-40(11) ++HIDDEN_FUNC(_savegpr_23) stw 23,-36(11) ++HIDDEN_FUNC(_savegpr_24) stw 24,-32(11) ++HIDDEN_FUNC(_savegpr_25) stw 25,-28(11) ++HIDDEN_FUNC(_savegpr_26) stw 26,-24(11) ++HIDDEN_FUNC(_savegpr_27) stw 27,-20(11) ++HIDDEN_FUNC(_savegpr_28) stw 28,-16(11) ++HIDDEN_FUNC(_savegpr_29) stw 29,-12(11) ++HIDDEN_FUNC(_savegpr_30) stw 30,-8(11) ++HIDDEN_FUNC(_savegpr_31) stw 31,-4(11) ++ blr ++FUNC_END(_savegpr_31) ++FUNC_END(_savegpr_30) ++FUNC_END(_savegpr_29) ++FUNC_END(_savegpr_28) ++FUNC_END(_savegpr_27) ++FUNC_END(_savegpr_26) ++FUNC_END(_savegpr_25) ++FUNC_END(_savegpr_24) ++FUNC_END(_savegpr_23) ++FUNC_END(_savegpr_22) ++FUNC_END(_savegpr_21) ++FUNC_END(_savegpr_20) ++FUNC_END(_savegpr_19) ++FUNC_END(_savegpr_18) ++FUNC_END(_savegpr_17) ++FUNC_END(_savegpr_16) ++FUNC_END(_savegpr_15) ++FUNC_END(_savegpr_14) ++ ++#endif +--- a/gcc/config/rs6000/crtsavres.asm ++++ /dev/null +@@ -1,307 +0,0 @@ +-/* +- * Special support for eabi and SVR4 +- * +- * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc. +- * Written By Michael Meissner +- * 64-bit support written by David Edelsohn +- * +- * This file is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2, or (at your option) any +- * later version. +- * +- * In addition to the permissions in the GNU General Public License, the +- * Free Software Foundation gives you unlimited permission to link the +- * compiled version of this file with other programs, and to distribute +- * those programs without any restriction coming from the use of this +- * file. (The General Public License restrictions do apply in other +- * respects; for example, they cover modification of the file, and +- * distribution when not linked into another program.) +- * +- * This file is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; see the file COPYING. If not, write to +- * the Free Software Foundation, 51 Franklin Street, Fifth Floor, +- * Boston, MA 02110-1301, USA. +- * +- * As a special exception, if you link this library with files +- * compiled with GCC to produce an executable, this does not cause +- * the resulting executable to be covered by the GNU General Public License. +- * This exception does not however invalidate any other reasons why +- * the executable file might be covered by the GNU General Public License. +- */ +- +-/* Do any initializations needed for the eabi environment */ +- +- .file "crtsavres.asm" +- .section ".text" +- #include "ppc-asm.h" +- +-/* On PowerPC64 Linux, these functions are provided by the linker. */ +-#ifndef __powerpc64__ +- +-/* Routines for saving floating point registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the floating point save area. */ +- +-FUNC_START(_savefpr_14) stfd 14,-144(11) /* save fp registers */ +-FUNC_START(_savefpr_15) stfd 15,-136(11) +-FUNC_START(_savefpr_16) stfd 16,-128(11) +-FUNC_START(_savefpr_17) stfd 17,-120(11) +-FUNC_START(_savefpr_18) stfd 18,-112(11) +-FUNC_START(_savefpr_19) stfd 19,-104(11) +-FUNC_START(_savefpr_20) stfd 20,-96(11) +-FUNC_START(_savefpr_21) stfd 21,-88(11) +-FUNC_START(_savefpr_22) stfd 22,-80(11) +-FUNC_START(_savefpr_23) stfd 23,-72(11) +-FUNC_START(_savefpr_24) stfd 24,-64(11) +-FUNC_START(_savefpr_25) stfd 25,-56(11) +-FUNC_START(_savefpr_26) stfd 26,-48(11) +-FUNC_START(_savefpr_27) stfd 27,-40(11) +-FUNC_START(_savefpr_28) stfd 28,-32(11) +-FUNC_START(_savefpr_29) stfd 29,-24(11) +-FUNC_START(_savefpr_30) stfd 30,-16(11) +-FUNC_START(_savefpr_31) stfd 31,-8(11) +- blr +-FUNC_END(_savefpr_31) +-FUNC_END(_savefpr_30) +-FUNC_END(_savefpr_29) +-FUNC_END(_savefpr_28) +-FUNC_END(_savefpr_27) +-FUNC_END(_savefpr_26) +-FUNC_END(_savefpr_25) +-FUNC_END(_savefpr_24) +-FUNC_END(_savefpr_23) +-FUNC_END(_savefpr_22) +-FUNC_END(_savefpr_21) +-FUNC_END(_savefpr_20) +-FUNC_END(_savefpr_19) +-FUNC_END(_savefpr_18) +-FUNC_END(_savefpr_17) +-FUNC_END(_savefpr_16) +-FUNC_END(_savefpr_15) +-FUNC_END(_savefpr_14) +- +-/* Routines for saving integer registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the integer save area. */ +- +-FUNC_START(_savegpr_14) stw 14,-72(11) /* save gp registers */ +-FUNC_START(_savegpr_15) stw 15,-68(11) +-FUNC_START(_savegpr_16) stw 16,-64(11) +-FUNC_START(_savegpr_17) stw 17,-60(11) +-FUNC_START(_savegpr_18) stw 18,-56(11) +-FUNC_START(_savegpr_19) stw 19,-52(11) +-FUNC_START(_savegpr_20) stw 20,-48(11) +-FUNC_START(_savegpr_21) stw 21,-44(11) +-FUNC_START(_savegpr_22) stw 22,-40(11) +-FUNC_START(_savegpr_23) stw 23,-36(11) +-FUNC_START(_savegpr_24) stw 24,-32(11) +-FUNC_START(_savegpr_25) stw 25,-28(11) +-FUNC_START(_savegpr_26) stw 26,-24(11) +-FUNC_START(_savegpr_27) stw 27,-20(11) +-FUNC_START(_savegpr_28) stw 28,-16(11) +-FUNC_START(_savegpr_29) stw 29,-12(11) +-FUNC_START(_savegpr_30) stw 30,-8(11) +-FUNC_START(_savegpr_31) stw 31,-4(11) +- blr +-FUNC_END(_savegpr_31) +-FUNC_END(_savegpr_30) +-FUNC_END(_savegpr_29) +-FUNC_END(_savegpr_28) +-FUNC_END(_savegpr_27) +-FUNC_END(_savegpr_26) +-FUNC_END(_savegpr_25) +-FUNC_END(_savegpr_24) +-FUNC_END(_savegpr_23) +-FUNC_END(_savegpr_22) +-FUNC_END(_savegpr_21) +-FUNC_END(_savegpr_20) +-FUNC_END(_savegpr_19) +-FUNC_END(_savegpr_18) +-FUNC_END(_savegpr_17) +-FUNC_END(_savegpr_16) +-FUNC_END(_savegpr_15) +-FUNC_END(_savegpr_14) +- +-/* Routines for restoring floating point registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the floating point save area. */ +- +-FUNC_START(_restfpr_14) lfd 14,-144(11) /* restore fp registers */ +-FUNC_START(_restfpr_15) lfd 15,-136(11) +-FUNC_START(_restfpr_16) lfd 16,-128(11) +-FUNC_START(_restfpr_17) lfd 17,-120(11) +-FUNC_START(_restfpr_18) lfd 18,-112(11) +-FUNC_START(_restfpr_19) lfd 19,-104(11) +-FUNC_START(_restfpr_20) lfd 20,-96(11) +-FUNC_START(_restfpr_21) lfd 21,-88(11) +-FUNC_START(_restfpr_22) lfd 22,-80(11) +-FUNC_START(_restfpr_23) lfd 23,-72(11) +-FUNC_START(_restfpr_24) lfd 24,-64(11) +-FUNC_START(_restfpr_25) lfd 25,-56(11) +-FUNC_START(_restfpr_26) lfd 26,-48(11) +-FUNC_START(_restfpr_27) lfd 27,-40(11) +-FUNC_START(_restfpr_28) lfd 28,-32(11) +-FUNC_START(_restfpr_29) lfd 29,-24(11) +-FUNC_START(_restfpr_30) lfd 30,-16(11) +-FUNC_START(_restfpr_31) lfd 31,-8(11) +- blr +-FUNC_END(_restfpr_31) +-FUNC_END(_restfpr_30) +-FUNC_END(_restfpr_29) +-FUNC_END(_restfpr_28) +-FUNC_END(_restfpr_27) +-FUNC_END(_restfpr_26) +-FUNC_END(_restfpr_25) +-FUNC_END(_restfpr_24) +-FUNC_END(_restfpr_23) +-FUNC_END(_restfpr_22) +-FUNC_END(_restfpr_21) +-FUNC_END(_restfpr_20) +-FUNC_END(_restfpr_19) +-FUNC_END(_restfpr_18) +-FUNC_END(_restfpr_17) +-FUNC_END(_restfpr_16) +-FUNC_END(_restfpr_15) +-FUNC_END(_restfpr_14) +- +-/* Routines for restoring integer registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the integer restore area. */ +- +-FUNC_START(_restgpr_14) lwz 14,-72(11) /* restore gp registers */ +-FUNC_START(_restgpr_15) lwz 15,-68(11) +-FUNC_START(_restgpr_16) lwz 16,-64(11) +-FUNC_START(_restgpr_17) lwz 17,-60(11) +-FUNC_START(_restgpr_18) lwz 18,-56(11) +-FUNC_START(_restgpr_19) lwz 19,-52(11) +-FUNC_START(_restgpr_20) lwz 20,-48(11) +-FUNC_START(_restgpr_21) lwz 21,-44(11) +-FUNC_START(_restgpr_22) lwz 22,-40(11) +-FUNC_START(_restgpr_23) lwz 23,-36(11) +-FUNC_START(_restgpr_24) lwz 24,-32(11) +-FUNC_START(_restgpr_25) lwz 25,-28(11) +-FUNC_START(_restgpr_26) lwz 26,-24(11) +-FUNC_START(_restgpr_27) lwz 27,-20(11) +-FUNC_START(_restgpr_28) lwz 28,-16(11) +-FUNC_START(_restgpr_29) lwz 29,-12(11) +-FUNC_START(_restgpr_30) lwz 30,-8(11) +-FUNC_START(_restgpr_31) lwz 31,-4(11) +- blr +-FUNC_END(_restgpr_31) +-FUNC_END(_restgpr_30) +-FUNC_END(_restgpr_29) +-FUNC_END(_restgpr_28) +-FUNC_END(_restgpr_27) +-FUNC_END(_restgpr_26) +-FUNC_END(_restgpr_25) +-FUNC_END(_restgpr_24) +-FUNC_END(_restgpr_23) +-FUNC_END(_restgpr_22) +-FUNC_END(_restgpr_21) +-FUNC_END(_restgpr_20) +-FUNC_END(_restgpr_19) +-FUNC_END(_restgpr_18) +-FUNC_END(_restgpr_17) +-FUNC_END(_restgpr_16) +-FUNC_END(_restgpr_15) +-FUNC_END(_restgpr_14) +- +-/* Routines for restoring floating point registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the floating point save area. */ +-/* In addition to restoring the fp registers, it will return to the caller's */ +-/* caller */ +- +-FUNC_START(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */ +-FUNC_START(_restfpr_15_x) lfd 15,-136(11) +-FUNC_START(_restfpr_16_x) lfd 16,-128(11) +-FUNC_START(_restfpr_17_x) lfd 17,-120(11) +-FUNC_START(_restfpr_18_x) lfd 18,-112(11) +-FUNC_START(_restfpr_19_x) lfd 19,-104(11) +-FUNC_START(_restfpr_20_x) lfd 20,-96(11) +-FUNC_START(_restfpr_21_x) lfd 21,-88(11) +-FUNC_START(_restfpr_22_x) lfd 22,-80(11) +-FUNC_START(_restfpr_23_x) lfd 23,-72(11) +-FUNC_START(_restfpr_24_x) lfd 24,-64(11) +-FUNC_START(_restfpr_25_x) lfd 25,-56(11) +-FUNC_START(_restfpr_26_x) lfd 26,-48(11) +-FUNC_START(_restfpr_27_x) lfd 27,-40(11) +-FUNC_START(_restfpr_28_x) lfd 28,-32(11) +-FUNC_START(_restfpr_29_x) lfd 29,-24(11) +-FUNC_START(_restfpr_30_x) lfd 30,-16(11) +-FUNC_START(_restfpr_31_x) lwz 0,4(11) +- lfd 31,-8(11) +- mtlr 0 +- mr 1,11 +- blr +-FUNC_END(_restfpr_31_x) +-FUNC_END(_restfpr_30_x) +-FUNC_END(_restfpr_29_x) +-FUNC_END(_restfpr_28_x) +-FUNC_END(_restfpr_27_x) +-FUNC_END(_restfpr_26_x) +-FUNC_END(_restfpr_25_x) +-FUNC_END(_restfpr_24_x) +-FUNC_END(_restfpr_23_x) +-FUNC_END(_restfpr_22_x) +-FUNC_END(_restfpr_21_x) +-FUNC_END(_restfpr_20_x) +-FUNC_END(_restfpr_19_x) +-FUNC_END(_restfpr_18_x) +-FUNC_END(_restfpr_17_x) +-FUNC_END(_restfpr_16_x) +-FUNC_END(_restfpr_15_x) +-FUNC_END(_restfpr_14_x) +- +-/* Routines for restoring integer registers, called by the compiler. */ +-/* Called with r11 pointing to the stack header word of the caller of the */ +-/* function, just beyond the end of the integer restore area. */ +- +-FUNC_START(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */ +-FUNC_START(_restgpr_15_x) lwz 15,-68(11) +-FUNC_START(_restgpr_16_x) lwz 16,-64(11) +-FUNC_START(_restgpr_17_x) lwz 17,-60(11) +-FUNC_START(_restgpr_18_x) lwz 18,-56(11) +-FUNC_START(_restgpr_19_x) lwz 19,-52(11) +-FUNC_START(_restgpr_20_x) lwz 20,-48(11) +-FUNC_START(_restgpr_21_x) lwz 21,-44(11) +-FUNC_START(_restgpr_22_x) lwz 22,-40(11) +-FUNC_START(_restgpr_23_x) lwz 23,-36(11) +-FUNC_START(_restgpr_24_x) lwz 24,-32(11) +-FUNC_START(_restgpr_25_x) lwz 25,-28(11) +-FUNC_START(_restgpr_26_x) lwz 26,-24(11) +-FUNC_START(_restgpr_27_x) lwz 27,-20(11) +-FUNC_START(_restgpr_28_x) lwz 28,-16(11) +-FUNC_START(_restgpr_29_x) lwz 29,-12(11) +-FUNC_START(_restgpr_30_x) lwz 30,-8(11) +-FUNC_START(_restgpr_31_x) lwz 0,4(11) +- lwz 31,-4(11) +- mtlr 0 +- mr 1,11 +- blr +-FUNC_END(_restgpr_31_x) +-FUNC_END(_restgpr_30_x) +-FUNC_END(_restgpr_29_x) +-FUNC_END(_restgpr_28_x) +-FUNC_END(_restgpr_27_x) +-FUNC_END(_restgpr_26_x) +-FUNC_END(_restgpr_25_x) +-FUNC_END(_restgpr_24_x) +-FUNC_END(_restgpr_23_x) +-FUNC_END(_restgpr_22_x) +-FUNC_END(_restgpr_21_x) +-FUNC_END(_restgpr_20_x) +-FUNC_END(_restgpr_19_x) +-FUNC_END(_restgpr_18_x) +-FUNC_END(_restgpr_17_x) +-FUNC_END(_restgpr_16_x) +-FUNC_END(_restgpr_15_x) +-FUNC_END(_restgpr_14_x) +- +-#endif +--- a/gcc/config/rs6000/darwin-ldouble.c ++++ b/gcc/config/rs6000/darwin-ldouble.c +@@ -422,15 +422,13 @@ fmsub (double a, double b, double c) + FP_UNPACK_SEMIRAW_Q(U,u); + FP_UNPACK_SEMIRAW_Q(Z,z); + FP_SUB_Q(V,U,Z); +- FP_PACK_SEMIRAW_Q(v,V); +- FP_HANDLE_EXCEPTIONS; + + /* Truncate quad to double. */ +- FP_INIT_ROUNDMODE; +- FP_UNPACK_SEMIRAW_Q(V,v); + #if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q ++ V_f[3] &= 0x0007ffff; + FP_TRUNC(D,Q,2,4,R,V); + #else ++ V_f1 &= 0x0007ffffffffffffL; + FP_TRUNC(D,Q,1,2,R,V); + #endif + FP_PACK_SEMIRAW_D(r,R); +--- a/gcc/config/rs6000/darwin.h ++++ b/gcc/config/rs6000/darwin.h +@@ -191,6 +191,8 @@ + + #undef FP_SAVE_INLINE + #define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64) ++#undef GP_SAVE_INLINE ++#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32) + + /* Darwin uses a function call if everything needs to be saved/restored. */ + #undef WORLD_SAVE_P +--- a/gcc/config/rs6000/dfp.md ++++ b/gcc/config/rs6000/dfp.md +@@ -155,7 +155,7 @@ + (define_expand "negdd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (neg:DD (match_operand:DD 1 "gpc_reg_operand" "")))] +- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)" ++ "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + + (define_insn "*negdd2_fpr" +@@ -168,7 +168,7 @@ + (define_expand "absdd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (abs:DD (match_operand:DD 1 "gpc_reg_operand" "")))] +- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)" ++ "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + + (define_insn "*absdd2_fpr" +@@ -376,7 +376,7 @@ + (define_insn "*movdd_softfloat32" + [(set (match_operand:DD 0 "nonimmediate_operand" "=r,r,m,r,r,r") + (match_operand:DD 1 "input_operand" "r,m,r,G,H,F"))] +- "! TARGET_POWERPC64 && TARGET_SOFT_FLOAT ++ "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS) + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "* +@@ -486,7 +486,7 @@ + (define_expand "negtd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "") + (neg:TD (match_operand:TD 1 "gpc_reg_operand" "")))] +- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)" ++ "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + + (define_insn "*negtd2_fpr" +@@ -499,7 +499,7 @@ + (define_expand "abstd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "") + (abs:TD (match_operand:TD 1 "gpc_reg_operand" "")))] +- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)" ++ "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + + (define_insn "*abstd2_fpr" +--- /dev/null ++++ b/gcc/config/rs6000/e300c2c3.md +@@ -0,0 +1,189 @@ ++;; Pipeline description for Motorola PowerPC e300c3 core. ++;; Copyright (C) 2008 Free Software Foundation, Inc. ++;; Contributed by Edmar Wienskoski (edmar@freescale.com) ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_automaton "ppce300c3_most,ppce300c3_long,ppce300c3_retire") ++(define_cpu_unit "ppce300c3_decode_0,ppce300c3_decode_1" "ppce300c3_most") ++ ++;; We don't simulate general issue queue (GIC). If we have SU insn ++;; and then SU1 insn, they can not be issued on the same cycle ++;; (although SU1 insn and then SU insn can be issued) because the SU ++;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle ++;; multipass insn scheduling will find the situation and issue the SU1 ++;; insn and then the SU insn. ++(define_cpu_unit "ppce300c3_issue_0,ppce300c3_issue_1" "ppce300c3_most") ++ ++;; We could describe completion buffers slots in combination with the ++;; retirement units and the order of completion but the result ++;; automaton would behave in the same way because we can not describe ++;; real latency time with taking in order completion into account. ++;; Actually we could define the real latency time by querying reserved ++;; automaton units but the current scheduler uses latency time before ++;; issuing insns and making any reservations. ++;; ++;; So our description is aimed to achieve a insn schedule in which the ++;; insns would not wait in the completion buffer. ++(define_cpu_unit "ppce300c3_retire_0,ppce300c3_retire_1" "ppce300c3_retire") ++ ++;; Branch unit: ++(define_cpu_unit "ppce300c3_bu" "ppce300c3_most") ++ ++;; IU: ++(define_cpu_unit "ppce300c3_iu0_stage0,ppce300c3_iu1_stage0" "ppce300c3_most") ++ ++;; IU: This used to describe non-pipelined division. ++(define_cpu_unit "ppce300c3_mu_div" "ppce300c3_long") ++ ++;; SRU: ++(define_cpu_unit "ppce300c3_sru_stage0" "ppce300c3_most") ++ ++;; Here we simplified LSU unit description not describing the stages. ++(define_cpu_unit "ppce300c3_lsu" "ppce300c3_most") ++ ++;; FPU: ++(define_cpu_unit "ppce300c3_fpu" "ppce300c3_most") ++ ++;; The following units are used to make automata deterministic ++(define_cpu_unit "present_ppce300c3_decode_0" "ppce300c3_most") ++(define_cpu_unit "present_ppce300c3_issue_0" "ppce300c3_most") ++(define_cpu_unit "present_ppce300c3_retire_0" "ppce300c3_retire") ++(define_cpu_unit "present_ppce300c3_iu0_stage0" "ppce300c3_most") ++ ++;; The following sets to make automata deterministic when option ndfa is used. ++(presence_set "present_ppce300c3_decode_0" "ppce300c3_decode_0") ++(presence_set "present_ppce300c3_issue_0" "ppce300c3_issue_0") ++(presence_set "present_ppce300c3_retire_0" "ppce300c3_retire_0") ++(presence_set "present_ppce300c3_iu0_stage0" "ppce300c3_iu0_stage0") ++ ++;; Some useful abbreviations. ++(define_reservation "ppce300c3_decode" ++ "ppce300c3_decode_0|ppce300c3_decode_1+present_ppce300c3_decode_0") ++(define_reservation "ppce300c3_issue" ++ "ppce300c3_issue_0|ppce300c3_issue_1+present_ppce300c3_issue_0") ++(define_reservation "ppce300c3_retire" ++ "ppce300c3_retire_0|ppce300c3_retire_1+present_ppce300c3_retire_0") ++(define_reservation "ppce300c3_iu_stage0" ++ "ppce300c3_iu0_stage0|ppce300c3_iu1_stage0+present_ppce300c3_iu0_stage0") ++ ++;; Compares can be executed either one of the IU or SRU ++(define_insn_reservation "ppce300c3_cmp" 1 ++ (and (eq_attr "type" "cmp,compare,delayed_compare,fast_compare") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+(ppce300c3_iu_stage0|ppce300c3_sru_stage0) \ ++ +ppce300c3_retire") ++ ++;; Other one cycle IU insns ++(define_insn_reservation "ppce300c3_iu" 1 ++ (and (eq_attr "type" "integer,insert_word") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_retire") ++ ++;; Branch. Actually this latency time is not used by the scheduler. ++(define_insn_reservation "ppce300c3_branch" 1 ++ (and (eq_attr "type" "jmpreg,branch") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_bu,ppce300c3_retire") ++ ++;; Multiply is non-pipelined but can be executed in any IU ++(define_insn_reservation "ppce300c3_multiply" 2 ++ (and (eq_attr "type" "imul,imul2,imul3,imul_compare") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0, \ ++ ppce300c3_iu_stage0+ppce300c3_retire") ++ ++;; Divide. We use the average latency time here. We omit reserving a ++;; retire unit because of the result automata will be huge. ++(define_insn_reservation "ppce300c3_divide" 20 ++ (and (eq_attr "type" "idiv") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_mu_div,\ ++ ppce300c3_mu_div*19") ++ ++;; CR logical ++(define_insn_reservation "ppce300c3_cr_logical" 1 ++ (and (eq_attr "type" "cr_logical,delayed_cr") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") ++ ++;; Mfcr ++(define_insn_reservation "ppce300c3_mfcr" 1 ++ (and (eq_attr "type" "mfcr") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") ++ ++;; Mtcrf ++(define_insn_reservation "ppce300c3_mtcrf" 1 ++ (and (eq_attr "type" "mtcr") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") ++ ++;; Mtjmpr ++(define_insn_reservation "ppce300c3_mtjmpr" 1 ++ (and (eq_attr "type" "mtjmpr,mfjmpr") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") ++ ++;; Float point instructions ++(define_insn_reservation "ppce300c3_fpcompare" 3 ++ (and (eq_attr "type" "fpcompare") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire") ++ ++(define_insn_reservation "ppce300c3_fp" 3 ++ (and (eq_attr "type" "fp") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire") ++ ++(define_insn_reservation "ppce300c3_dmul" 4 ++ (and (eq_attr "type" "dmul") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu,nothing,ppce300c3_retire") ++ ++; Divides are not pipelined ++(define_insn_reservation "ppce300c3_sdiv" 18 ++ (and (eq_attr "type" "sdiv") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*17") ++ ++(define_insn_reservation "ppce300c3_ddiv" 33 ++ (and (eq_attr "type" "ddiv") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*32") ++ ++;; Loads ++(define_insn_reservation "ppce300c3_load" 2 ++ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") ++ ++(define_insn_reservation "ppce300c3_fpload" 2 ++ (and (eq_attr "type" "fpload,fpload_ux,fpload_u") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") ++ ++;; Stores. ++(define_insn_reservation "ppce300c3_store" 2 ++ (and (eq_attr "type" "store,store_ux,store_u") ++ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") ++ ++(define_insn_reservation "ppce300c3_fpstore" 2 ++ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") ++ (eq_attr "cpu" "ppce300c3")) ++ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") +--- a/gcc/config/rs6000/e500.h ++++ b/gcc/config/rs6000/e500.h +@@ -19,7 +19,6 @@ + #undef TARGET_SPE_ABI + #undef TARGET_SPE + #undef TARGET_E500 +-#undef TARGET_ISEL + #undef TARGET_FPRS + #undef TARGET_E500_SINGLE + #undef TARGET_E500_DOUBLE +@@ -28,13 +27,12 @@ + #define TARGET_SPE_ABI rs6000_spe_abi + #define TARGET_SPE rs6000_spe + #define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540) +-#define TARGET_ISEL rs6000_isel + #define TARGET_FPRS (rs6000_float_gprs == 0) + #define TARGET_E500_SINGLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 1) + #define TARGET_E500_DOUBLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 2) + #define CHECK_E500_OPTIONS \ + do { \ +- if (TARGET_E500 || TARGET_SPE || TARGET_SPE_ABI || TARGET_ISEL \ ++ if (TARGET_E500 || TARGET_SPE || TARGET_SPE_ABI \ + || TARGET_E500_SINGLE || TARGET_E500_DOUBLE) \ + { \ + if (TARGET_ALTIVEC) \ +--- /dev/null ++++ b/gcc/config/rs6000/e500crtres32gpr.asm +@@ -0,0 +1,84 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtres32gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for restoring 32-bit integer registers, called by the compiler. */ ++/* "Bare" versions that simply return to their caller. */ ++ ++HIDDEN_FUNC(_rest32gpr_14) lwz 14,-72(11) ++HIDDEN_FUNC(_rest32gpr_15) lwz 15,-68(11) ++HIDDEN_FUNC(_rest32gpr_16) lwz 16,-64(11) ++HIDDEN_FUNC(_rest32gpr_17) lwz 17,-60(11) ++HIDDEN_FUNC(_rest32gpr_18) lwz 18,-56(11) ++HIDDEN_FUNC(_rest32gpr_19) lwz 19,-52(11) ++HIDDEN_FUNC(_rest32gpr_20) lwz 20,-48(11) ++HIDDEN_FUNC(_rest32gpr_21) lwz 21,-44(11) ++HIDDEN_FUNC(_rest32gpr_22) lwz 22,-40(11) ++HIDDEN_FUNC(_rest32gpr_23) lwz 23,-36(11) ++HIDDEN_FUNC(_rest32gpr_24) lwz 24,-32(11) ++HIDDEN_FUNC(_rest32gpr_25) lwz 25,-28(11) ++HIDDEN_FUNC(_rest32gpr_26) lwz 26,-24(11) ++HIDDEN_FUNC(_rest32gpr_27) lwz 27,-20(11) ++HIDDEN_FUNC(_rest32gpr_28) lwz 28,-16(11) ++HIDDEN_FUNC(_rest32gpr_29) lwz 29,-12(11) ++HIDDEN_FUNC(_rest32gpr_30) lwz 30,-8(11) ++HIDDEN_FUNC(_rest32gpr_31) lwz 31,-4(11) ++ blr ++FUNC_END(_rest32gpr_31) ++FUNC_END(_rest32gpr_30) ++FUNC_END(_rest32gpr_29) ++FUNC_END(_rest32gpr_28) ++FUNC_END(_rest32gpr_27) ++FUNC_END(_rest32gpr_26) ++FUNC_END(_rest32gpr_25) ++FUNC_END(_rest32gpr_24) ++FUNC_END(_rest32gpr_23) ++FUNC_END(_rest32gpr_22) ++FUNC_END(_rest32gpr_21) ++FUNC_END(_rest32gpr_20) ++FUNC_END(_rest32gpr_19) ++FUNC_END(_rest32gpr_18) ++FUNC_END(_rest32gpr_17) ++FUNC_END(_rest32gpr_16) ++FUNC_END(_rest32gpr_15) ++FUNC_END(_rest32gpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtres64gpr.asm +@@ -0,0 +1,84 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtres64gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for restoring 64-bit integer registers, called by the compiler. */ ++/* "Bare" versions that return to their caller. */ ++ ++HIDDEN_FUNC(_rest64gpr_14) evldd 14,0(11) ++HIDDEN_FUNC(_rest64gpr_15) evldd 15,8(11) ++HIDDEN_FUNC(_rest64gpr_16) evldd 16,16(11) ++HIDDEN_FUNC(_rest64gpr_17) evldd 17,24(11) ++HIDDEN_FUNC(_rest64gpr_18) evldd 18,32(11) ++HIDDEN_FUNC(_rest64gpr_19) evldd 19,40(11) ++HIDDEN_FUNC(_rest64gpr_20) evldd 20,48(11) ++HIDDEN_FUNC(_rest64gpr_21) evldd 21,56(11) ++HIDDEN_FUNC(_rest64gpr_22) evldd 22,64(11) ++HIDDEN_FUNC(_rest64gpr_23) evldd 23,72(11) ++HIDDEN_FUNC(_rest64gpr_24) evldd 24,80(11) ++HIDDEN_FUNC(_rest64gpr_25) evldd 25,88(11) ++HIDDEN_FUNC(_rest64gpr_26) evldd 26,96(11) ++HIDDEN_FUNC(_rest64gpr_27) evldd 27,104(11) ++HIDDEN_FUNC(_rest64gpr_28) evldd 28,112(11) ++HIDDEN_FUNC(_rest64gpr_29) evldd 29,120(11) ++HIDDEN_FUNC(_rest64gpr_30) evldd 30,128(11) ++HIDDEN_FUNC(_rest64gpr_31) evldd 31,136(11) ++ blr ++FUNC_END(_rest64gpr_31) ++FUNC_END(_rest64gpr_30) ++FUNC_END(_rest64gpr_29) ++FUNC_END(_rest64gpr_28) ++FUNC_END(_rest64gpr_27) ++FUNC_END(_rest64gpr_26) ++FUNC_END(_rest64gpr_25) ++FUNC_END(_rest64gpr_24) ++FUNC_END(_rest64gpr_23) ++FUNC_END(_rest64gpr_22) ++FUNC_END(_rest64gpr_21) ++FUNC_END(_rest64gpr_20) ++FUNC_END(_rest64gpr_19) ++FUNC_END(_rest64gpr_18) ++FUNC_END(_rest64gpr_17) ++FUNC_END(_rest64gpr_16) ++FUNC_END(_rest64gpr_15) ++FUNC_END(_rest64gpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtres64gprctr.asm +@@ -0,0 +1,83 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtres64gprctr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for restoring 64-bit integer registers where the number of ++ registers to be restored is passed in CTR, called by the compiler. */ ++ ++HIDDEN_FUNC(_rest64gpr_ctr_14) evldd 14,0(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_15) evldd 15,8(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_16) evldd 16,16(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_17) evldd 17,24(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_18) evldd 18,32(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_19) evldd 19,40(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_20) evldd 20,48(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_21) evldd 21,56(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_22) evldd 22,64(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_23) evldd 23,72(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_24) evldd 24,80(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_25) evldd 25,88(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_26) evldd 26,96(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_27) evldd 27,104(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_28) evldd 28,112(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_29) evldd 29,120(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_30) evldd 30,128(11) ++ bdz _rest64_gpr_ctr_done ++HIDDEN_FUNC(_rest64gpr_ctr_31) evldd 31,136(11) ++_rest64gpr_ctr_done: blr ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtrest32gpr.asm +@@ -0,0 +1,86 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtrest32gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for restoring 32-bit integer registers, called by the compiler. */ ++/* "Tail" versions that perform a tail call. */ ++ ++HIDDEN_FUNC(_rest32gpr_14_t) lwz 14,-72(11) ++HIDDEN_FUNC(_rest32gpr_15_t) lwz 15,-68(11) ++HIDDEN_FUNC(_rest32gpr_16_t) lwz 16,-64(11) ++HIDDEN_FUNC(_rest32gpr_17_t) lwz 17,-60(11) ++HIDDEN_FUNC(_rest32gpr_18_t) lwz 18,-56(11) ++HIDDEN_FUNC(_rest32gpr_19_t) lwz 19,-52(11) ++HIDDEN_FUNC(_rest32gpr_20_t) lwz 20,-48(11) ++HIDDEN_FUNC(_rest32gpr_21_t) lwz 21,-44(11) ++HIDDEN_FUNC(_rest32gpr_22_t) lwz 22,-40(11) ++HIDDEN_FUNC(_rest32gpr_23_t) lwz 23,-36(11) ++HIDDEN_FUNC(_rest32gpr_24_t) lwz 24,-32(11) ++HIDDEN_FUNC(_rest32gpr_25_t) lwz 25,-28(11) ++HIDDEN_FUNC(_rest32gpr_26_t) lwz 26,-24(11) ++HIDDEN_FUNC(_rest32gpr_27_t) lwz 27,-20(11) ++HIDDEN_FUNC(_rest32gpr_28_t) lwz 28,-16(11) ++HIDDEN_FUNC(_rest32gpr_29_t) lwz 29,-12(11) ++HIDDEN_FUNC(_rest32gpr_30_t) lwz 30,-8(11) ++HIDDEN_FUNC(_rest32gpr_31_t) lwz 31,-4(11) ++ lwz 0,4(11) ++ mr 1,11 ++ blr ++FUNC_END(_rest32gpr_31_t) ++FUNC_END(_rest32gpr_30_t) ++FUNC_END(_rest32gpr_29_t) ++FUNC_END(_rest32gpr_28_t) ++FUNC_END(_rest32gpr_27_t) ++FUNC_END(_rest32gpr_26_t) ++FUNC_END(_rest32gpr_25_t) ++FUNC_END(_rest32gpr_24_t) ++FUNC_END(_rest32gpr_23_t) ++FUNC_END(_rest32gpr_22_t) ++FUNC_END(_rest32gpr_21_t) ++FUNC_END(_rest32gpr_20_t) ++FUNC_END(_rest32gpr_19_t) ++FUNC_END(_rest32gpr_18_t) ++FUNC_END(_rest32gpr_17_t) ++FUNC_END(_rest32gpr_16_t) ++FUNC_END(_rest32gpr_15_t) ++FUNC_END(_rest32gpr_14_t) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtrest64gpr.asm +@@ -0,0 +1,85 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtrest64gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* "Tail" versions that perform a tail call. */ ++ ++HIDDEN_FUNC(_rest64gpr_14_t) evldd 14,0(11) ++HIDDEN_FUNC(_rest64gpr_15_t) evldd 15,8(11) ++HIDDEN_FUNC(_rest64gpr_16_t) evldd 16,16(11) ++HIDDEN_FUNC(_rest64gpr_17_t) evldd 17,24(11) ++HIDDEN_FUNC(_rest64gpr_18_t) evldd 18,32(11) ++HIDDEN_FUNC(_rest64gpr_19_t) evldd 19,40(11) ++HIDDEN_FUNC(_rest64gpr_20_t) evldd 20,48(11) ++HIDDEN_FUNC(_rest64gpr_21_t) evldd 21,56(11) ++HIDDEN_FUNC(_rest64gpr_22_t) evldd 22,64(11) ++HIDDEN_FUNC(_rest64gpr_23_t) evldd 23,72(11) ++HIDDEN_FUNC(_rest64gpr_24_t) evldd 24,80(11) ++HIDDEN_FUNC(_rest64gpr_25_t) evldd 25,88(11) ++HIDDEN_FUNC(_rest64gpr_26_t) evldd 26,96(11) ++HIDDEN_FUNC(_rest64gpr_27_t) evldd 27,104(11) ++HIDDEN_FUNC(_rest64gpr_28_t) evldd 28,112(11) ++HIDDEN_FUNC(_rest64gpr_29_t) evldd 29,120(11) ++HIDDEN_FUNC(_rest64gpr_30_t) evldd 30,128(11) ++HIDDEN_FUNC(_rest64gpr_31_t) lwz 0,148(11) ++ evldd 31,136(11) ++ addi 1,11,144 ++ blr ++FUNC_END(_rest64gpr_31_t) ++FUNC_END(_rest64gpr_30_t) ++FUNC_END(_rest64gpr_29_t) ++FUNC_END(_rest64gpr_28_t) ++FUNC_END(_rest64gpr_27_t) ++FUNC_END(_rest64gpr_26_t) ++FUNC_END(_rest64gpr_25_t) ++FUNC_END(_rest64gpr_24_t) ++FUNC_END(_rest64gpr_23_t) ++FUNC_END(_rest64gpr_22_t) ++FUNC_END(_rest64gpr_21_t) ++FUNC_END(_rest64gpr_20_t) ++FUNC_END(_rest64gpr_19_t) ++FUNC_END(_rest64gpr_18_t) ++FUNC_END(_rest64gpr_17_t) ++FUNC_END(_rest64gpr_16_t) ++FUNC_END(_rest64gpr_15_t) ++FUNC_END(_rest64gpr_14_t) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtresx32gpr.asm +@@ -0,0 +1,87 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtresx32gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for restoring 32-bit integer registers, called by the compiler. */ ++/* "Exit" versions that return to the caller's caller. */ ++ ++HIDDEN_FUNC(_rest32gpr_14_x) lwz 14,-72(11) ++HIDDEN_FUNC(_rest32gpr_15_x) lwz 15,-68(11) ++HIDDEN_FUNC(_rest32gpr_16_x) lwz 16,-64(11) ++HIDDEN_FUNC(_rest32gpr_17_x) lwz 17,-60(11) ++HIDDEN_FUNC(_rest32gpr_18_x) lwz 18,-56(11) ++HIDDEN_FUNC(_rest32gpr_19_x) lwz 19,-52(11) ++HIDDEN_FUNC(_rest32gpr_20_x) lwz 20,-48(11) ++HIDDEN_FUNC(_rest32gpr_21_x) lwz 21,-44(11) ++HIDDEN_FUNC(_rest32gpr_22_x) lwz 22,-40(11) ++HIDDEN_FUNC(_rest32gpr_23_x) lwz 23,-36(11) ++HIDDEN_FUNC(_rest32gpr_24_x) lwz 24,-32(11) ++HIDDEN_FUNC(_rest32gpr_25_x) lwz 25,-28(11) ++HIDDEN_FUNC(_rest32gpr_26_x) lwz 26,-24(11) ++HIDDEN_FUNC(_rest32gpr_27_x) lwz 27,-20(11) ++HIDDEN_FUNC(_rest32gpr_28_x) lwz 28,-16(11) ++HIDDEN_FUNC(_rest32gpr_29_x) lwz 29,-12(11) ++HIDDEN_FUNC(_rest32gpr_30_x) lwz 30,-8(11) ++HIDDEN_FUNC(_rest32gpr_31_x) lwz 0,4(11) ++ lwz 31,-4(11) ++ mr 1,11 ++ mtlr 0 ++ blr ++FUNC_END(_rest32gpr_31_x) ++FUNC_END(_rest32gpr_30_x) ++FUNC_END(_rest32gpr_29_x) ++FUNC_END(_rest32gpr_28_x) ++FUNC_END(_rest32gpr_27_x) ++FUNC_END(_rest32gpr_26_x) ++FUNC_END(_rest32gpr_25_x) ++FUNC_END(_rest32gpr_24_x) ++FUNC_END(_rest32gpr_23_x) ++FUNC_END(_rest32gpr_22_x) ++FUNC_END(_rest32gpr_21_x) ++FUNC_END(_rest32gpr_20_x) ++FUNC_END(_rest32gpr_19_x) ++FUNC_END(_rest32gpr_18_x) ++FUNC_END(_rest32gpr_17_x) ++FUNC_END(_rest32gpr_16_x) ++FUNC_END(_rest32gpr_15_x) ++FUNC_END(_rest32gpr_14_x) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtresx64gpr.asm +@@ -0,0 +1,86 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtresx64gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* "Exit" versions that return to their caller's caller. */ ++ ++HIDDEN_FUNC(_rest64gpr_14_x) evldd 14,0(11) ++HIDDEN_FUNC(_rest64gpr_15_x) evldd 15,8(11) ++HIDDEN_FUNC(_rest64gpr_16_x) evldd 16,16(11) ++HIDDEN_FUNC(_rest64gpr_17_x) evldd 17,24(11) ++HIDDEN_FUNC(_rest64gpr_18_x) evldd 18,32(11) ++HIDDEN_FUNC(_rest64gpr_19_x) evldd 19,40(11) ++HIDDEN_FUNC(_rest64gpr_20_x) evldd 20,48(11) ++HIDDEN_FUNC(_rest64gpr_21_x) evldd 21,56(11) ++HIDDEN_FUNC(_rest64gpr_22_x) evldd 22,64(11) ++HIDDEN_FUNC(_rest64gpr_23_x) evldd 23,72(11) ++HIDDEN_FUNC(_rest64gpr_24_x) evldd 24,80(11) ++HIDDEN_FUNC(_rest64gpr_25_x) evldd 25,88(11) ++HIDDEN_FUNC(_rest64gpr_26_x) evldd 26,96(11) ++HIDDEN_FUNC(_rest64gpr_27_x) evldd 27,104(11) ++HIDDEN_FUNC(_rest64gpr_28_x) evldd 28,112(11) ++HIDDEN_FUNC(_rest64gpr_29_x) evldd 29,120(11) ++HIDDEN_FUNC(_rest64gpr_30_x) evldd 30,128(11) ++HIDDEN_FUNC(_rest64gpr_31_x) lwz 0,148(11) ++ evldd 31,136(11) ++ addi 1,11,144 ++ mtlr 0 ++ blr ++FUNC_END(_rest64gpr_31_x) ++FUNC_END(_rest64gpr_30_x) ++FUNC_END(_rest64gpr_29_x) ++FUNC_END(_rest64gpr_28_x) ++FUNC_END(_rest64gpr_27_x) ++FUNC_END(_rest64gpr_26_x) ++FUNC_END(_rest64gpr_25_x) ++FUNC_END(_rest64gpr_24_x) ++FUNC_END(_rest64gpr_23_x) ++FUNC_END(_rest64gpr_22_x) ++FUNC_END(_rest64gpr_21_x) ++FUNC_END(_rest64gpr_20_x) ++FUNC_END(_rest64gpr_19_x) ++FUNC_END(_rest64gpr_18_x) ++FUNC_END(_rest64gpr_17_x) ++FUNC_END(_rest64gpr_16_x) ++FUNC_END(_rest64gpr_15_x) ++FUNC_END(_rest64gpr_14_x) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsav32gpr.asm +@@ -0,0 +1,84 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsav32gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 32-bit integer registers, called by the compiler. */ ++/* "Bare" versions that simply return to their caller. */ ++ ++HIDDEN_FUNC(_save32gpr_14) stw 14,-72(11) ++HIDDEN_FUNC(_save32gpr_15) stw 15,-68(11) ++HIDDEN_FUNC(_save32gpr_16) stw 16,-64(11) ++HIDDEN_FUNC(_save32gpr_17) stw 17,-60(11) ++HIDDEN_FUNC(_save32gpr_18) stw 18,-56(11) ++HIDDEN_FUNC(_save32gpr_19) stw 19,-52(11) ++HIDDEN_FUNC(_save32gpr_20) stw 20,-48(11) ++HIDDEN_FUNC(_save32gpr_21) stw 21,-44(11) ++HIDDEN_FUNC(_save32gpr_22) stw 22,-40(11) ++HIDDEN_FUNC(_save32gpr_23) stw 23,-36(11) ++HIDDEN_FUNC(_save32gpr_24) stw 24,-32(11) ++HIDDEN_FUNC(_save32gpr_25) stw 25,-28(11) ++HIDDEN_FUNC(_save32gpr_26) stw 26,-24(11) ++HIDDEN_FUNC(_save32gpr_27) stw 27,-20(11) ++HIDDEN_FUNC(_save32gpr_28) stw 28,-16(11) ++HIDDEN_FUNC(_save32gpr_29) stw 29,-12(11) ++HIDDEN_FUNC(_save32gpr_30) stw 30,-8(11) ++HIDDEN_FUNC(_save32gpr_31) stw 31,-4(11) ++ blr ++FUNC_END(_save32gpr_31) ++FUNC_END(_save32gpr_30) ++FUNC_END(_save32gpr_29) ++FUNC_END(_save32gpr_28) ++FUNC_END(_save32gpr_27) ++FUNC_END(_save32gpr_26) ++FUNC_END(_save32gpr_25) ++FUNC_END(_save32gpr_24) ++FUNC_END(_save32gpr_23) ++FUNC_END(_save32gpr_22) ++FUNC_END(_save32gpr_21) ++FUNC_END(_save32gpr_20) ++FUNC_END(_save32gpr_19) ++FUNC_END(_save32gpr_18) ++FUNC_END(_save32gpr_17) ++FUNC_END(_save32gpr_16) ++FUNC_END(_save32gpr_15) ++FUNC_END(_save32gpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsav64gpr.asm +@@ -0,0 +1,83 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsav64gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 64-bit integer registers, called by the compiler. */ ++ ++HIDDEN_FUNC(_save64gpr_14) evstdd 14,0(11) ++HIDDEN_FUNC(_save64gpr_15) evstdd 15,8(11) ++HIDDEN_FUNC(_save64gpr_16) evstdd 16,16(11) ++HIDDEN_FUNC(_save64gpr_17) evstdd 17,24(11) ++HIDDEN_FUNC(_save64gpr_18) evstdd 18,32(11) ++HIDDEN_FUNC(_save64gpr_19) evstdd 19,40(11) ++HIDDEN_FUNC(_save64gpr_20) evstdd 20,48(11) ++HIDDEN_FUNC(_save64gpr_21) evstdd 21,56(11) ++HIDDEN_FUNC(_save64gpr_22) evstdd 22,64(11) ++HIDDEN_FUNC(_save64gpr_23) evstdd 23,72(11) ++HIDDEN_FUNC(_save64gpr_24) evstdd 24,80(11) ++HIDDEN_FUNC(_save64gpr_25) evstdd 25,88(11) ++HIDDEN_FUNC(_save64gpr_26) evstdd 26,96(11) ++HIDDEN_FUNC(_save64gpr_27) evstdd 27,104(11) ++HIDDEN_FUNC(_save64gpr_28) evstdd 28,112(11) ++HIDDEN_FUNC(_save64gpr_29) evstdd 29,120(11) ++HIDDEN_FUNC(_save64gpr_30) evstdd 30,128(11) ++HIDDEN_FUNC(_save64gpr_31) evstdd 31,136(11) ++ blr ++FUNC_END(_save64gpr_31) ++FUNC_END(_save64gpr_30) ++FUNC_END(_save64gpr_29) ++FUNC_END(_save64gpr_28) ++FUNC_END(_save64gpr_27) ++FUNC_END(_save64gpr_26) ++FUNC_END(_save64gpr_25) ++FUNC_END(_save64gpr_24) ++FUNC_END(_save64gpr_23) ++FUNC_END(_save64gpr_22) ++FUNC_END(_save64gpr_21) ++FUNC_END(_save64gpr_20) ++FUNC_END(_save64gpr_19) ++FUNC_END(_save64gpr_18) ++FUNC_END(_save64gpr_17) ++FUNC_END(_save64gpr_16) ++FUNC_END(_save64gpr_15) ++FUNC_END(_save64gpr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsav64gprctr.asm +@@ -0,0 +1,102 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsav64gprctr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 64-bit integer registers where the number of ++ registers to be saved is passed in CTR, called by the compiler. */ ++/* "Bare" versions that return to their caller. */ ++ ++HIDDEN_FUNC(_save64gpr_ctr_14) evstdd 14,0(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_15) evstdd 15,8(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_16) evstdd 16,16(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_17) evstdd 17,24(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_18) evstdd 18,32(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_19) evstdd 19,40(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_20) evstdd 20,48(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_21) evstdd 21,56(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_22) evstdd 22,64(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_23) evstdd 23,72(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_24) evstdd 24,80(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_25) evstdd 25,88(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_26) evstdd 26,96(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_27) evstdd 27,104(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_28) evstdd 28,112(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_29) evstdd 29,120(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_30) evstdd 30,128(11) ++ bdz _save64_gpr_ctr_done ++HIDDEN_FUNC(_save64gpr_ctr_31) evstdd 31,136(11) ++_save64gpr_ctr_done: blr ++FUNC_END(_save64gpr_ctr_31) ++FUNC_END(_save64gpr_ctr_30) ++FUNC_END(_save64gpr_ctr_29) ++FUNC_END(_save64gpr_ctr_28) ++FUNC_END(_save64gpr_ctr_27) ++FUNC_END(_save64gpr_ctr_26) ++FUNC_END(_save64gpr_ctr_25) ++FUNC_END(_save64gpr_ctr_24) ++FUNC_END(_save64gpr_ctr_23) ++FUNC_END(_save64gpr_ctr_22) ++FUNC_END(_save64gpr_ctr_21) ++FUNC_END(_save64gpr_ctr_20) ++FUNC_END(_save64gpr_ctr_19) ++FUNC_END(_save64gpr_ctr_18) ++FUNC_END(_save64gpr_ctr_17) ++FUNC_END(_save64gpr_ctr_16) ++FUNC_END(_save64gpr_ctr_15) ++FUNC_END(_save64gpr_ctr_14) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsavg32gpr.asm +@@ -0,0 +1,84 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsavg32gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 32-bit integer registers, called by the compiler. */ ++/* "GOT" versions that load the address of the GOT into lr before returning. */ ++ ++HIDDEN_FUNC(_save32gpr_14_g) stw 14,-72(11) ++HIDDEN_FUNC(_save32gpr_15_g) stw 15,-68(11) ++HIDDEN_FUNC(_save32gpr_16_g) stw 16,-64(11) ++HIDDEN_FUNC(_save32gpr_17_g) stw 17,-60(11) ++HIDDEN_FUNC(_save32gpr_18_g) stw 18,-56(11) ++HIDDEN_FUNC(_save32gpr_19_g) stw 19,-52(11) ++HIDDEN_FUNC(_save32gpr_20_g) stw 20,-48(11) ++HIDDEN_FUNC(_save32gpr_21_g) stw 21,-44(11) ++HIDDEN_FUNC(_save32gpr_22_g) stw 22,-40(11) ++HIDDEN_FUNC(_save32gpr_23_g) stw 23,-36(11) ++HIDDEN_FUNC(_save32gpr_24_g) stw 24,-32(11) ++HIDDEN_FUNC(_save32gpr_25_g) stw 25,-28(11) ++HIDDEN_FUNC(_save32gpr_26_g) stw 26,-24(11) ++HIDDEN_FUNC(_save32gpr_27_g) stw 27,-20(11) ++HIDDEN_FUNC(_save32gpr_28_g) stw 28,-16(11) ++HIDDEN_FUNC(_save32gpr_29_g) stw 29,-12(11) ++HIDDEN_FUNC(_save32gpr_30_g) stw 30,-8(11) ++HIDDEN_FUNC(_save32gpr_31_g) stw 31,-4(11) ++ b _GLOBAL_OFFSET_TABLE_-4 ++FUNC_END(_save32gpr_31_g) ++FUNC_END(_save32gpr_30_g) ++FUNC_END(_save32gpr_29_g) ++FUNC_END(_save32gpr_28_g) ++FUNC_END(_save32gpr_27_g) ++FUNC_END(_save32gpr_26_g) ++FUNC_END(_save32gpr_25_g) ++FUNC_END(_save32gpr_24_g) ++FUNC_END(_save32gpr_23_g) ++FUNC_END(_save32gpr_22_g) ++FUNC_END(_save32gpr_21_g) ++FUNC_END(_save32gpr_20_g) ++FUNC_END(_save32gpr_19_g) ++FUNC_END(_save32gpr_18_g) ++FUNC_END(_save32gpr_17_g) ++FUNC_END(_save32gpr_16_g) ++FUNC_END(_save32gpr_15_g) ++FUNC_END(_save32gpr_14_g) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsavg64gpr.asm +@@ -0,0 +1,84 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsavg64gpr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 64-bit integer registers, called by the compiler. */ ++/* "GOT" versions that load the address of the GOT into lr before returning. */ ++ ++HIDDEN_FUNC(_save64gpr_14_g) evstdd 14,0(11) ++HIDDEN_FUNC(_save64gpr_15_g) evstdd 15,8(11) ++HIDDEN_FUNC(_save64gpr_16_g) evstdd 16,16(11) ++HIDDEN_FUNC(_save64gpr_17_g) evstdd 17,24(11) ++HIDDEN_FUNC(_save64gpr_18_g) evstdd 18,32(11) ++HIDDEN_FUNC(_save64gpr_19_g) evstdd 19,40(11) ++HIDDEN_FUNC(_save64gpr_20_g) evstdd 20,48(11) ++HIDDEN_FUNC(_save64gpr_21_g) evstdd 21,56(11) ++HIDDEN_FUNC(_save64gpr_22_g) evstdd 22,64(11) ++HIDDEN_FUNC(_save64gpr_23_g) evstdd 23,72(11) ++HIDDEN_FUNC(_save64gpr_24_g) evstdd 24,80(11) ++HIDDEN_FUNC(_save64gpr_25_g) evstdd 25,88(11) ++HIDDEN_FUNC(_save64gpr_26_g) evstdd 26,96(11) ++HIDDEN_FUNC(_save64gpr_27_g) evstdd 27,104(11) ++HIDDEN_FUNC(_save64gpr_28_g) evstdd 28,112(11) ++HIDDEN_FUNC(_save64gpr_29_g) evstdd 29,120(11) ++HIDDEN_FUNC(_save64gpr_30_g) evstdd 30,128(11) ++HIDDEN_FUNC(_save64gpr_31_g) evstdd 31,136(11) ++ b _GLOBAL_OFFSET_TABLE_-4 ++FUNC_END(_save64gpr_31_g) ++FUNC_END(_save64gpr_30_g) ++FUNC_END(_save64gpr_29_g) ++FUNC_END(_save64gpr_28_g) ++FUNC_END(_save64gpr_27_g) ++FUNC_END(_save64gpr_26_g) ++FUNC_END(_save64gpr_25_g) ++FUNC_END(_save64gpr_24_g) ++FUNC_END(_save64gpr_23_g) ++FUNC_END(_save64gpr_22_g) ++FUNC_END(_save64gpr_21_g) ++FUNC_END(_save64gpr_20_g) ++FUNC_END(_save64gpr_19_g) ++FUNC_END(_save64gpr_18_g) ++FUNC_END(_save64gpr_17_g) ++FUNC_END(_save64gpr_16_g) ++FUNC_END(_save64gpr_15_g) ++FUNC_END(_save64gpr_14_g) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500crtsavg64gprctr.asm +@@ -0,0 +1,101 @@ ++/* ++ * Special support for e500 eabi and SVR4 ++ * ++ * Copyright (C) 2008 Free Software Foundation, Inc. ++ * Written by Nathan Froyd ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2, or (at your option) any ++ * later version. ++ * ++ * In addition to the permissions in the GNU General Public License, the ++ * Free Software Foundation gives you unlimited permission to link the ++ * compiled version of this file with other programs, and to distribute ++ * those programs without any restriction coming from the use of this ++ * file. (The General Public License restrictions do apply in other ++ * respects; for example, they cover modification of the file, and ++ * distribution when not linked into another program.) ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 51 Franklin Street, Fifth Floor, ++ * Boston, MA 02110-1301, USA. ++ * ++ * As a special exception, if you link this library with files ++ * compiled with GCC to produce an executable, this does not cause ++ * the resulting executable to be covered by the GNU General Public License. ++ * This exception does not however invalidate any other reasons why ++ * the executable file might be covered by the GNU General Public License. ++ */ ++ ++ .file "e500crtsavg64gprctr.asm" ++ .section ".text" ++ #include "ppc-asm.h" ++ ++#ifdef __SPE__ ++ ++/* Routines for saving 64-bit integer registers, called by the compiler. */ ++/* "GOT" versions that load the address of the GOT into lr before returning. */ ++ ++HIDDEN_FUNC(_save64gpr_ctr_14_g) evstdd 14,0(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_15_g) evstdd 15,8(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_16_g) evstdd 16,16(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_17_g) evstdd 17,24(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_18_g) evstdd 18,32(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_19_g) evstdd 19,40(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_20_g) evstdd 20,48(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_21_g) evstdd 21,56(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_22_g) evstdd 22,64(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_23_g) evstdd 23,72(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_24_g) evstdd 24,80(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_25_g) evstdd 25,88(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_26_g) evstdd 26,96(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_27_g) evstdd 27,104(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_28_g) evstdd 28,112(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_29_g) evstdd 29,120(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_30_g) evstdd 30,128(11) ++ bdz _save64_gpr_ctr_g_done ++HIDDEN_FUNC(_save64gpr_ctr_31_g) evstdd 31,136(11) ++_save64gpr_ctr_g_done: b _GLOBAL_OFFSET_TABLE_-4 ++FUNC_END(_save64gpr_ctr_31_g) ++FUNC_END(_save64gpr_ctr_30_g) ++FUNC_END(_save64gpr_ctr_29_g) ++FUNC_END(_save64gpr_ctr_28_g) ++FUNC_END(_save64gpr_ctr_27_g) ++FUNC_END(_save64gpr_ctr_26_g) ++FUNC_END(_save64gpr_ctr_25_g) ++FUNC_END(_save64gpr_ctr_24_g) ++FUNC_END(_save64gpr_ctr_23_g) ++FUNC_END(_save64gpr_ctr_22_g) ++FUNC_END(_save64gpr_ctr_21_g) ++FUNC_END(_save64gpr_ctr_20_g) ++FUNC_END(_save64gpr_ctr_19_g) ++FUNC_END(_save64gpr_ctr_18_g) ++FUNC_END(_save64gpr_ctr_17_g) ++FUNC_END(_save64gpr_ctr_16_g) ++FUNC_END(_save64gpr_ctr_15_g) ++FUNC_END(_save64gpr_ctr_14_g) ++ ++#endif +--- /dev/null ++++ b/gcc/config/rs6000/e500mc.h +@@ -0,0 +1,46 @@ ++/* Core target definitions for GNU compiler ++ for IBM RS/6000 PowerPC targeted to embedded ELF systems. ++ Copyright (C) 1995, 1996, 2000, 2003, 2004, 2007 Free Software Foundation, Inc. ++ Contributed by Cygnus Support. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ . */ ++ ++/* Add -meabi to target flags. */ ++#undef TARGET_DEFAULT ++#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI) ++ ++#undef TARGET_VERSION ++#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)"); ++ ++#undef TARGET_OS_CPP_BUILTINS ++#define TARGET_OS_CPP_BUILTINS() \ ++ do \ ++ { \ ++ builtin_define_std ("PPC"); \ ++ builtin_define ("__embedded__"); \ ++ builtin_assert ("system=embedded"); \ ++ builtin_assert ("cpu=powerpc"); \ ++ builtin_assert ("machine=powerpc"); \ ++ TARGET_OS_SYSV_CPP_BUILTINS (); \ ++ } \ ++ while (0) ++ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC "-maix-struct-return" ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC "-mppc%{m64:64} -me500mc" +--- /dev/null ++++ b/gcc/config/rs6000/e500mc.md +@@ -0,0 +1,198 @@ ++;; Pipeline description for Motorola PowerPC e500mc core. ++;; Copyright (C) 2008 Free Software Foundation, Inc. ++;; Contributed by Edmar Wienskoski (edmar@freescale.com) ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++;; e500mc 32-bit SU(2), LSU, FPU, BPU ++;; Max issue 3 insns/clock cycle (includes 1 branch) ++;; FP is half clocked, timings of other instructions are as in the e500v2. ++ ++(define_automaton "e500mc_most,e500mc_long,e500mc_retire") ++(define_cpu_unit "e500mc_decode_0,e500mc_decode_1" "e500mc_most") ++(define_cpu_unit "e500mc_issue_0,e500mc_issue_1" "e500mc_most") ++(define_cpu_unit "e500mc_retire_0,e500mc_retire_1" "e500mc_retire") ++ ++;; SU. ++(define_cpu_unit "e500mc_su0_stage0,e500mc_su1_stage0" "e500mc_most") ++ ++;; MU. ++(define_cpu_unit "e500mc_mu_stage0,e500mc_mu_stage1" "e500mc_most") ++(define_cpu_unit "e500mc_mu_stage2,e500mc_mu_stage3" "e500mc_most") ++ ++;; Non-pipelined division. ++(define_cpu_unit "e500mc_mu_div" "e500mc_long") ++ ++;; LSU. ++(define_cpu_unit "e500mc_lsu" "e500mc_most") ++ ++;; FPU. ++(define_cpu_unit "e500mc_fpu" "e500mc_most") ++ ++;; Branch unit. ++(define_cpu_unit "e500mc_bu" "e500mc_most") ++ ++;; The following units are used to make the automata deterministic. ++(define_cpu_unit "present_e500mc_decode_0" "e500mc_most") ++(define_cpu_unit "present_e500mc_issue_0" "e500mc_most") ++(define_cpu_unit "present_e500mc_retire_0" "e500mc_retire") ++(define_cpu_unit "present_e500mc_su0_stage0" "e500mc_most") ++ ++;; The following sets to make automata deterministic when option ndfa is used. ++(presence_set "present_e500mc_decode_0" "e500mc_decode_0") ++(presence_set "present_e500mc_issue_0" "e500mc_issue_0") ++(presence_set "present_e500mc_retire_0" "e500mc_retire_0") ++(presence_set "present_e500mc_su0_stage0" "e500mc_su0_stage0") ++ ++;; Some useful abbreviations. ++(define_reservation "e500mc_decode" ++ "e500mc_decode_0|e500mc_decode_1+present_e500mc_decode_0") ++(define_reservation "e500mc_issue" ++ "e500mc_issue_0|e500mc_issue_1+present_e500mc_issue_0") ++(define_reservation "e500mc_retire" ++ "e500mc_retire_0|e500mc_retire_1+present_e500mc_retire_0") ++(define_reservation "e500mc_su_stage0" ++ "e500mc_su0_stage0|e500mc_su1_stage0+present_e500mc_su0_stage0") ++ ++;; Simple SU insns. ++(define_insn_reservation "e500mc_su" 1 ++ (and (eq_attr "type" "integer,insert_word,insert_dword,cmp,compare,\ ++ delayed_compare,var_delayed_compare,fast_compare,\ ++ shift,trap,var_shift_rotate,cntlz,exts") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") ++ ++(define_insn_reservation "e500mc_two" 1 ++ (and (eq_attr "type" "two") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\ ++ e500mc_issue+e500mc_su_stage0+e500mc_retire") ++ ++(define_insn_reservation "e500mc_three" 1 ++ (and (eq_attr "type" "three") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\ ++ e500mc_issue+e500mc_su_stage0+e500mc_retire,\ ++ e500mc_issue+e500mc_su_stage0+e500mc_retire") ++ ++;; Multiply. ++(define_insn_reservation "e500mc_multiply" 4 ++ (and (eq_attr "type" "imul,imul2,imul3,imul_compare") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_mu_stage0,e500mc_mu_stage1,\ ++ e500mc_mu_stage2,e500mc_mu_stage3+e500mc_retire") ++ ++;; Divide. We use the average latency time here. ++(define_insn_reservation "e500mc_divide" 14 ++ (and (eq_attr "type" "idiv") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_mu_stage0+e500mc_mu_div,\ ++ e500mc_mu_div*13") ++ ++;; Branch. ++(define_insn_reservation "e500mc_branch" 1 ++ (and (eq_attr "type" "jmpreg,branch,isync") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_bu,e500mc_retire") ++ ++;; CR logical. ++(define_insn_reservation "e500mc_cr_logical" 1 ++ (and (eq_attr "type" "cr_logical,delayed_cr") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_bu,e500mc_retire") ++ ++;; Mfcr. ++(define_insn_reservation "e500mc_mfcr" 1 ++ (and (eq_attr "type" "mfcr") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire") ++ ++;; Mtcrf. ++(define_insn_reservation "e500mc_mtcrf" 1 ++ (and (eq_attr "type" "mtcr") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire") ++ ++;; Mtjmpr. ++(define_insn_reservation "e500mc_mtjmpr" 1 ++ (and (eq_attr "type" "mtjmpr,mfjmpr") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") ++ ++;; Brinc. ++(define_insn_reservation "e500mc_brinc" 1 ++ (and (eq_attr "type" "brinc") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") ++ ++;; Loads. ++(define_insn_reservation "e500mc_load" 3 ++ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ ++ load_l,sync") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") ++ ++(define_insn_reservation "e500mc_fpload" 4 ++ (and (eq_attr "type" "fpload,fpload_ux,fpload_u") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing*2,e500mc_retire") ++ ++;; Stores. ++(define_insn_reservation "e500mc_store" 3 ++ (and (eq_attr "type" "store,store_ux,store_u,store_c") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") ++ ++(define_insn_reservation "e500mc_fpstore" 3 ++ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") ++ ++;; Simple FP. ++(define_insn_reservation "e500mc_simple_float" 8 ++ (and (eq_attr "type" "fpsimple") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire") ++ ++;; FP. ++(define_insn_reservation "e500mc_float" 8 ++ (and (eq_attr "type" "fp") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire") ++ ++(define_insn_reservation "e500mc_fpcompare" 8 ++ (and (eq_attr "type" "fpcompare") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire") ++ ++;; The following ignores the retire unit to avoid a large automata. ++ ++(define_insn_reservation "e500mc_dmul" 10 ++ (and (eq_attr "type" "dmul") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu") ++ ++;; FP divides are not pipelined. ++(define_insn_reservation "e500mc_sdiv" 36 ++ (and (eq_attr "type" "sdiv") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*35") ++ ++(define_insn_reservation "e500mc_ddiv" 66 ++ (and (eq_attr "type" "ddiv") ++ (eq_attr "cpu" "ppce500mc")) ++ "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*65") +--- a/gcc/config/rs6000/eabi-ci.asm ++++ b/gcc/config/rs6000/eabi-ci.asm +@@ -111,6 +111,7 @@ __EH_FRAME_BEGIN__: + /* Head of __init function used for static constructors. */ + .section ".init","ax" + .align 2 ++FUNC_START(_init) + FUNC_START(__init) + stwu 1,-16(1) + mflr 0 +@@ -119,6 +120,7 @@ FUNC_START(__init) + /* Head of __fini function used for static destructors. */ + .section ".fini","ax" + .align 2 ++FUNC_START(_fini) + FUNC_START(__fini) + stwu 1,-16(1) + mflr 0 +--- a/gcc/config/rs6000/eabi-cn.asm ++++ b/gcc/config/rs6000/eabi-cn.asm +@@ -36,7 +36,6 @@ Boston, MA 02110-1301, USA. + /* This file just supplies labeled ending points for the .got* and other + special sections. It is linked in last after other modules. */ + +- .file "crtn.s" + .ident "GNU C crtn.s" + + #ifndef __powerpc64__ +--- a/gcc/config/rs6000/eabi.asm ++++ b/gcc/config/rs6000/eabi.asm +@@ -114,6 +114,9 @@ + .Linit = .-.LCTOC1 + .long .Linit_p /* address of variable to say we've been called */ + ++.Lfini = .-.LCTOC1 ++ .long __fini /* global destructors in .fini */ ++ + .text + .align 2 + .Lptr: +@@ -226,10 +229,12 @@ FUNC_START(__eabi) + + lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */ + +-/* Done adjusting pointers, return by way of doing the C++ global constructors. */ ++/* Done adjusting pointers. We used to return here by way of doing the ++ C++ global constructors, but we currently let newlib take care of ++ running them and registering finalizers. */ + + .Ldone: +- b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */ ++ blr + FUNC_END(__eabi) + + /* Special subroutine to convert a bunch of pointers directly. +@@ -240,7 +245,7 @@ FUNC_END(__eabi) + r11 has the address of .LCTOC1 in it. + r12 has the value to add to each pointer + r13 .. r31 are unchanged */ +- ++#ifdef _RELOCATABLE + FUNC_START(__eabi_convert) + cmplw 1,3,4 /* any pointers to convert? */ + subf 5,3,4 /* calculate number of words to convert */ +@@ -295,5 +300,6 @@ FUNC_START(__eabi_uconvert) + blr + + FUNC_END(__eabi_uconvert) ++#endif /* _RELOCATABLE */ + + #endif +--- a/gcc/config/rs6000/eabi.h ++++ b/gcc/config/rs6000/eabi.h +@@ -23,10 +23,6 @@ + #undef TARGET_DEFAULT + #define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI) + +-/* Invoke an initializer function to set up the GOT. */ +-#define NAME__MAIN "__eabi" +-#define INVOKE__main +- + #undef TARGET_VERSION + #define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)"); + +@@ -42,3 +38,20 @@ + TARGET_OS_SYSV_CPP_BUILTINS (); \ + } \ + while (0) ++ ++/* Add -te500v1 and -te500v2 options for convenience in generating ++ multilibs. */ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC \ ++ "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} " \ ++ "%{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe} " \ ++ "%{te600: -mcpu=7400 -maltivec -mabi=altivec}" \ ++ "%{te500mc: -mcpu=e500mc -maix-struct-return}" ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC \ ++ "%{te500v1:-mppc -mspe -me500 ; \ ++ te500v2:-mppc -mspe -me500 ; \ ++ te600:-mppc -maltivec ; \ ++ te500mc:-mppc -me500mc ; \ ++ :-mppc%{m64:64}}" +--- a/gcc/config/rs6000/linux.h ++++ b/gcc/config/rs6000/linux.h +@@ -128,3 +128,29 @@ + #ifdef TARGET_DEFAULT_LONG_DOUBLE_128 + #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128 + #endif ++ ++/* Add -te500v1 and -te500v2 options for convenience in generating ++ multilibs. */ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC \ ++ "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} " \ ++ "%{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe} " \ ++ "%{te600: -mcpu=7400 -maltivec -mabi=altivec}" \ ++ "%{te500mc: -mcpu=e500mc}" ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC \ ++ "%{te500v1:-mppc -mspe -me500 ; \ ++ te500v2:-mppc -mspe -me500 ; \ ++ te600:-mppc -maltivec ; \ ++ te500mc:-me500mc ; \ ++ :-mppc%{m64:64}}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{msoft-float:/nof ; \ ++ te600:/te600 ; \ ++ te500v1:/te500v1 ; \ ++ te500v2:/te500v2 ; \ ++ te500mc:/te500mc}" +--- /dev/null ++++ b/gcc/config/rs6000/montavista-linux.h +@@ -0,0 +1,41 @@ ++/* MontaVista GNU/Linux Configuration. ++ Copyright (C) 2009 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Add -te500v2 option for convenience in generating multilibs. */ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC \ ++ "%{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe} " \ ++ "%{te600: -mcpu=7400 -maltivec -mabi=altivec}" \ ++ "%{te500mc: -mcpu=e500mc}" ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC \ ++ "%{te500v2:-mppc -mspe -me500 ; \ ++ te600:-mppc -maltivec ; \ ++ te500mc:-me500mc ; \ ++ :-mppc}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{msoft-float:/soft-float ; \ ++ te600:/te600 ; \ ++ te500v2:/te500v2 ; \ ++ te500mc:/te500mc}" +--- a/gcc/config/rs6000/netbsd.h ++++ b/gcc/config/rs6000/netbsd.h +@@ -75,8 +75,7 @@ + #define STARTFILE_SPEC NETBSD_STARTFILE_SPEC + + #undef ENDFILE_SPEC +-#define ENDFILE_SPEC \ +- "crtsavres%O%s %(netbsd_endfile_spec)" ++#define ENDFILE_SPEC "%(netbsd_endfile_spec)" + + #undef LIB_SPEC + #define LIB_SPEC NETBSD_LIB_SPEC +--- a/gcc/config/rs6000/paired.md ++++ b/gcc/config/rs6000/paired.md +@@ -28,7 +28,7 @@ + (UNSPEC_EXTODD_V2SF 333) + ]) + +-(define_insn "negv2sf2" ++(define_insn "paired_negv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" +@@ -42,7 +42,7 @@ + "ps_rsqrte %0,%1" + [(set_attr "type" "fp")]) + +-(define_insn "absv2sf2" ++(define_insn "paired_absv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" +@@ -56,7 +56,7 @@ + "ps_nabs %0,%1" + [(set_attr "type" "fp")]) + +-(define_insn "addv2sf3" ++(define_insn "paired_addv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] +@@ -64,7 +64,7 @@ + "ps_add %0,%1,%2" + [(set_attr "type" "fp")]) + +-(define_insn "subv2sf3" ++(define_insn "paired_subv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] +@@ -72,7 +72,7 @@ + "ps_sub %0,%1,%2" + [(set_attr "type" "fp")]) + +-(define_insn "mulv2sf3" ++(define_insn "paired_mulv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] +@@ -87,7 +87,7 @@ + "ps_res %0,%1" + [(set_attr "type" "fp")]) + +-(define_insn "divv2sf3" ++(define_insn "paired_divv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] +--- a/gcc/config/rs6000/ppc-asm.h ++++ b/gcc/config/rs6000/ppc-asm.h +@@ -110,6 +110,11 @@ name: \ + .globl GLUE(.,name); \ + GLUE(.,name): + ++#define HIDDEN_FUNC(name) \ ++ FUNC_START(name) \ ++ .hidden name; \ ++ .hidden GLUE(.,name); ++ + #define FUNC_END(name) \ + GLUE(.L,name): \ + .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name) +@@ -136,6 +141,11 @@ name: \ + .globl GLUE(.,name); \ + GLUE(.,name): + ++#define HIDDEN_FUNC(name) \ ++ FUNC_START(name) \ ++ .hidden name; \ ++ .hidden GLUE(.,name); ++ + #define FUNC_END(name) \ + GLUE(.L,name): \ + .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name) +@@ -153,6 +163,10 @@ GLUE(.L,name): \ + .globl FUNC_NAME(name); \ + FUNC_NAME(name): + ++#define HIDDEN_FUNC(name) \ ++ FUNC_START(name) \ ++ .hidden FUNC_NAME(name); ++ + #define FUNC_END(name) \ + GLUE(.L,name): \ + .size FUNC_NAME(name),GLUE(.L,name)-FUNC_NAME(name) +--- a/gcc/config/rs6000/predicates.md ++++ b/gcc/config/rs6000/predicates.md +@@ -915,7 +915,7 @@ + rtx elt; + int count = XVECLEN (op, 0); + +- if (count != 55) ++ if (count != 54) + return 0; + + index = 0; +@@ -964,9 +964,8 @@ + || GET_MODE (SET_SRC (elt)) != Pmode) + return 0; + +- if (GET_CODE (XVECEXP (op, 0, index++)) != USE +- || GET_CODE (XVECEXP (op, 0, index++)) != USE +- || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER) ++ if (GET_CODE (XVECEXP (op, 0, index++)) != SET ++ || GET_CODE (XVECEXP (op, 0, index++)) != SET) + return 0; + return 1; + }) +--- a/gcc/config/rs6000/rs6000.c ++++ b/gcc/config/rs6000/rs6000.c +@@ -174,9 +174,15 @@ int rs6000_ieeequad; + /* Nonzero to use AltiVec ABI. */ + int rs6000_altivec_abi; + ++/* Nonzero if we want SPE SIMD instructions. */ ++int rs6000_spe; ++ + /* Nonzero if we want SPE ABI extensions. */ + int rs6000_spe_abi; + ++/* Nonzero to use isel instructions. */ ++int rs6000_isel; ++ + /* Nonzero if floating point operations are done in the GPRs. */ + int rs6000_float_gprs = 0; + +@@ -669,6 +675,44 @@ struct processor_costs ppc8540_cost = { + 1, /* prefetch streams /*/ + }; + ++/* Instruction costs on E300C2 and E300C3 cores. */ ++static const ++struct processor_costs ppce300c2c3_cost = { ++ COSTS_N_INSNS (4), /* mulsi */ ++ COSTS_N_INSNS (4), /* mulsi_const */ ++ COSTS_N_INSNS (4), /* mulsi_const9 */ ++ COSTS_N_INSNS (4), /* muldi */ ++ COSTS_N_INSNS (19), /* divsi */ ++ COSTS_N_INSNS (19), /* divdi */ ++ COSTS_N_INSNS (3), /* fp */ ++ COSTS_N_INSNS (4), /* dmul */ ++ COSTS_N_INSNS (18), /* sdiv */ ++ COSTS_N_INSNS (33), /* ddiv */ ++ 32, ++ 16, /* l1 cache */ ++ 16, /* l2 cache */ ++ 1, /* prefetch streams /*/ ++}; ++ ++/* Instruction costs on PPCE500MC processors. */ ++static const ++struct processor_costs ppce500mc_cost = { ++ COSTS_N_INSNS (4), /* mulsi */ ++ COSTS_N_INSNS (4), /* mulsi_const */ ++ COSTS_N_INSNS (4), /* mulsi_const9 */ ++ COSTS_N_INSNS (4), /* muldi */ ++ COSTS_N_INSNS (14), /* divsi */ ++ COSTS_N_INSNS (14), /* divdi */ ++ COSTS_N_INSNS (8), /* fp */ ++ COSTS_N_INSNS (10), /* dmul */ ++ COSTS_N_INSNS (36), /* sdiv */ ++ COSTS_N_INSNS (66), /* ddiv */ ++ 64, /* cache line size */ ++ 32, /* l1 cache */ ++ 128, /* l2 cache */ ++ 1, /* prefetch streams /*/ ++}; ++ + /* Instruction costs on POWER4 and POWER5 processors. */ + static const + struct processor_costs power4_cost = { +@@ -713,12 +757,11 @@ static const char *rs6000_invalid_within + static rtx rs6000_generate_compare (enum rtx_code); + static void rs6000_emit_stack_tie (void); + static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx); +-static rtx spe_synthesize_frame_save (rtx); + static bool spe_func_has_64bit_regs_p (void); + static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int, + int, HOST_WIDE_INT); + static rtx gen_frame_mem_offset (enum machine_mode, rtx, int); +-static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int); ++static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int, int); + static unsigned rs6000_hash_constant (rtx); + static unsigned toc_hash_function (const void *); + static int toc_hash_eq (const void *, const void *); +@@ -728,7 +771,7 @@ static bool legitimate_small_data_p (enu + static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int); + static struct machine_function * rs6000_init_machine_status (void); + static bool rs6000_assemble_integer (rtx, unsigned int, int); +-static bool no_global_regs_above (int); ++static bool no_global_regs_above (int, bool); + #ifdef HAVE_GAS_HIDDEN + static void rs6000_assemble_visibility (tree, int); + #endif +@@ -741,7 +784,13 @@ static void rs6000_eliminate_indexed_mem + static const char *rs6000_mangle_type (const_tree); + extern const struct attribute_spec rs6000_attribute_table[]; + static void rs6000_set_default_type_attributes (tree); ++static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool); ++static void rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool); ++static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int, ++ enum machine_mode, bool, bool, bool); + static bool rs6000_reg_live_or_pic_offset_p (int); ++static int rs6000_savres_strategy (rs6000_stack_t *, bool, int, int); ++static void rs6000_restore_saved_cr (rtx, int); + static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT); + static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT); + static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, +@@ -1420,6 +1469,9 @@ rs6000_override_options (const char *def + {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN}, + /* 8548 has a dummy entry for now. */ + {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN}, ++ {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, ++ {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK}, ++ {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"970", PROCESSOR_POWER4, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64}, +@@ -1523,9 +1575,20 @@ rs6000_override_options (const char *def + } + } + +- if (TARGET_E500) ++ if ((TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC) ++ && !rs6000_explicit_options.isel) + rs6000_isel = 1; + ++ if (rs6000_cpu == PROCESSOR_PPCE300C2 ++ || rs6000_cpu == PROCESSOR_PPCE300C3 ++ || rs6000_cpu == PROCESSOR_PPCE500MC) ++ { ++ if (TARGET_ALTIVEC) ++ error ("AltiVec not supported in this target"); ++ if (TARGET_SPE) ++ error ("Spe not supported in this target"); ++ } ++ + /* If we are optimizing big endian systems for space, use the load/store + multiple and string instructions. */ + if (BYTES_BIG_ENDIAN && optimize_size) +@@ -1635,9 +1698,9 @@ rs6000_override_options (const char *def + SUB3TARGET_OVERRIDE_OPTIONS; + #endif + +- if (TARGET_E500) ++ if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC) + { +- /* The e500 does not have string instructions, and we set ++ /* The e500 and e500mc do not have string instructions, and we set + MASK_STRING above when optimizing for size. */ + if ((target_flags & MASK_STRING) != 0) + target_flags = target_flags & ~MASK_STRING; +@@ -1845,6 +1908,15 @@ rs6000_override_options (const char *def + rs6000_cost = &ppc8540_cost; + break; + ++ case PROCESSOR_PPCE300C2: ++ case PROCESSOR_PPCE300C3: ++ rs6000_cost = &ppce300c2c3_cost; ++ break; ++ ++ case PROCESSOR_PPCE500MC: ++ rs6000_cost = &ppce500mc_cost; ++ break; ++ + case PROCESSOR_POWER4: + case PROCESSOR_POWER5: + rs6000_cost = &power4_cost; +@@ -2144,11 +2216,21 @@ rs6000_handle_option (size_t code, const + rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE)); + break; + ++ case OPT_misel: ++ rs6000_explicit_options.isel = true; ++ rs6000_isel = value; ++ break; ++ + case OPT_misel_: + rs6000_explicit_options.isel = true; + rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel)); + break; + ++ case OPT_mspe: ++ rs6000_explicit_options.spe = true; ++ rs6000_spe = value; ++ break; ++ + case OPT_mspe_: + rs6000_explicit_options.spe = true; + rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe)); +@@ -2395,6 +2477,8 @@ rs6000_file_start (void) + (TARGET_ALTIVEC_ABI ? 2 + : TARGET_SPE_ABI ? 3 + : 1)); ++ fprintf (file, "\t.gnu_attribute 12, %d\n", ++ aix_struct_return ? 2 : 1); + } + #endif + +@@ -3145,24 +3229,26 @@ invalid_e500_subreg (rtx op, enum machin + if (TARGET_E500_DOUBLE) + { + /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or +- subreg:TI and reg:TF. */ ++ subreg:TI and reg:TF. Decimal float modes are like integer ++ modes (only low part of each register used) for this ++ purpose. */ + if (GET_CODE (op) == SUBREG +- && (mode == SImode || mode == DImode || mode == TImode) ++ && (mode == SImode || mode == DImode || mode == TImode ++ || mode == DDmode || mode == TDmode) + && REG_P (SUBREG_REG (op)) + && (GET_MODE (SUBREG_REG (op)) == DFmode +- || GET_MODE (SUBREG_REG (op)) == TFmode +- || GET_MODE (SUBREG_REG (op)) == DDmode +- || GET_MODE (SUBREG_REG (op)) == TDmode)) ++ || GET_MODE (SUBREG_REG (op)) == TFmode)) + return true; + + /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and + reg:TI. */ + if (GET_CODE (op) == SUBREG +- && (mode == DFmode || mode == TFmode +- || mode == DDmode || mode == TDmode) ++ && (mode == DFmode || mode == TFmode) + && REG_P (SUBREG_REG (op)) + && (GET_MODE (SUBREG_REG (op)) == DImode +- || GET_MODE (SUBREG_REG (op)) == TImode)) ++ || GET_MODE (SUBREG_REG (op)) == TImode ++ || GET_MODE (SUBREG_REG (op)) == DDmode ++ || GET_MODE (SUBREG_REG (op)) == TDmode)) + return true; + } + +@@ -3413,10 +3499,10 @@ rs6000_legitimate_offset_address_p (enum + return SPE_CONST_OFFSET_OK (offset); + + case DFmode: +- case DDmode: + if (TARGET_E500_DOUBLE) + return SPE_CONST_OFFSET_OK (offset); + ++ case DDmode: + case DImode: + /* On e500v2, we may have: + +@@ -3433,11 +3519,11 @@ rs6000_legitimate_offset_address_p (enum + break; + + case TFmode: +- case TDmode: + if (TARGET_E500_DOUBLE) + return (SPE_CONST_OFFSET_OK (offset) + && SPE_CONST_OFFSET_OK (offset + 8)); + ++ case TDmode: + case TImode: + if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64) + extra = 12; +@@ -3582,8 +3668,10 @@ rs6000_legitimize_address (rtx x, rtx ol + && GET_CODE (XEXP (x, 1)) == CONST_INT + && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000 + && !(SPE_VECTOR_MODE (mode) ++ || ALTIVEC_VECTOR_MODE (mode) + || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode +- || mode == DImode)))) ++ || mode == DImode || mode == DDmode ++ || mode == TDmode)))) + { + HOST_WIDE_INT high_int, low_int; + rtx sum; +@@ -3591,7 +3679,14 @@ rs6000_legitimize_address (rtx x, rtx ol + high_int = INTVAL (XEXP (x, 1)) - low_int; + sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0), + GEN_INT (high_int)), 0); +- return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int)); ++ /* Using a REG+CONST 64-bit integer load on 64-bit platforms ++ requires that CONST be word-aligned. */ ++ if (TARGET_POWERPC64 ++ && (mode == DImode || mode == DDmode) ++ && (low_int & 0x3)) ++ return gen_rtx_PLUS (Pmode, sum, force_reg (Pmode, GEN_INT (low_int))); ++ else ++ return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int)); + } + else if (GET_CODE (x) == PLUS + && GET_CODE (XEXP (x, 0)) == REG +@@ -3599,11 +3694,12 @@ rs6000_legitimize_address (rtx x, rtx ol + && GET_MODE_NUNITS (mode) == 1 + && ((TARGET_HARD_FLOAT && TARGET_FPRS) + || TARGET_POWERPC64 +- || (((mode != DImode && mode != DFmode && mode != DDmode) +- || TARGET_E500_DOUBLE) +- && mode != TFmode && mode != TDmode)) ++ || ((mode != DImode && mode != DFmode && mode != DDmode) ++ || (TARGET_E500_DOUBLE && mode != DDmode))) + && (TARGET_POWERPC64 || mode != DImode) +- && mode != TImode) ++ && mode != TImode ++ && mode != TFmode ++ && mode != TDmode) + { + return gen_rtx_PLUS (Pmode, XEXP (x, 0), + force_reg (Pmode, force_operand (XEXP (x, 1), 0))); +@@ -3630,19 +3726,29 @@ rs6000_legitimize_address (rtx x, rtx ol + /* We accept [reg + reg] and [reg + OFFSET]. */ + + if (GET_CODE (x) == PLUS) +- { +- rtx op1 = XEXP (x, 0); +- rtx op2 = XEXP (x, 1); +- +- op1 = force_reg (Pmode, op1); +- +- if (GET_CODE (op2) != REG +- && (GET_CODE (op2) != CONST_INT +- || !SPE_CONST_OFFSET_OK (INTVAL (op2)))) +- op2 = force_reg (Pmode, op2); +- +- return gen_rtx_PLUS (Pmode, op1, op2); +- } ++ { ++ rtx op1 = XEXP (x, 0); ++ rtx op2 = XEXP (x, 1); ++ rtx y; ++ ++ op1 = force_reg (Pmode, op1); ++ ++ if (GET_CODE (op2) != REG ++ && (GET_CODE (op2) != CONST_INT ++ || !SPE_CONST_OFFSET_OK (INTVAL (op2)) ++ || (GET_MODE_SIZE (mode) > 8 ++ && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8)))) ++ op2 = force_reg (Pmode, op2); ++ ++ /* We can't always do [reg + reg] for these, because [reg + ++ reg + offset] is not a legitimate addressing mode. */ ++ y = gen_rtx_PLUS (Pmode, op1, op2); ++ ++ if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2)) ++ return force_reg (Pmode, y); ++ else ++ return y; ++ } + + return force_reg (Pmode, x); + } +@@ -4190,7 +4296,8 @@ rs6000_legitimate_address (enum machine_ + && mode != TDmode + && ((TARGET_HARD_FLOAT && TARGET_FPRS) + || TARGET_POWERPC64 +- || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE)) ++ || (mode != DFmode && mode != DDmode) ++ || (TARGET_E500_DOUBLE && mode != DDmode)) + && (TARGET_POWERPC64 || mode != DImode) + && legitimate_indexed_address_p (x, reg_ok_strict)) + return 1; +@@ -4314,7 +4421,8 @@ rs6000_hard_regno_nregs (int regno, enum + would require function_arg and rs6000_spe_function_arg to handle + SCmode so as to pass the value correctly in a pair of + registers. */ +- if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode) ++ if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode ++ && !DECIMAL_FLOAT_MODE_P (mode)) + return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD; + + return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; +@@ -4394,16 +4502,19 @@ rs6000_conditional_register_usage (void) + if (TARGET_ALTIVEC) + global_regs[VSCR_REGNO] = 1; + +- if (TARGET_ALTIVEC_ABI) +- { +- for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i) +- call_used_regs[i] = call_really_used_regs[i] = 1; ++ /* If we are not using the AltiVec ABI, pretend that the normally ++ call-saved registers are also call-used. We could use them ++ normally if we saved and restored them in the prologue; that ++ would require using the alignment padding around the register ++ save area, and some care with unwinding information. */ ++ if (! TARGET_ALTIVEC_ABI) ++ for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i) ++ call_used_regs[i] = call_really_used_regs[i] = 1; + +- /* AIX reserves VR20:31 in non-extended ABI mode. */ +- if (TARGET_XCOFF) +- for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i) +- fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; +- } ++ if (TARGET_ALTIVEC_ABI && TARGET_XCOFF) ++ /* AIX reserves VR20:31 in non-extended ABI mode. */ ++ for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i) ++ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; + } + + /* Try to output insns to set TARGET equal to the constant C if it can +@@ -5588,14 +5699,12 @@ spe_build_register_parallel (enum machin + switch (mode) + { + case DFmode: +- case DDmode: + r1 = gen_rtx_REG (DImode, gregno); + r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx); + return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1)); + + case DCmode: + case TFmode: +- case TDmode: + r1 = gen_rtx_REG (DImode, gregno); + r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx); + r3 = gen_rtx_REG (DImode, gregno + 2); +@@ -5628,13 +5737,12 @@ rs6000_spe_function_arg (CUMULATIVE_ARGS + /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but + are passed and returned in a pair of GPRs for ABI compatibility. */ + if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode +- || mode == DDmode || mode == TDmode + || mode == DCmode || mode == TCmode)) + { + int n_words = rs6000_arg_size (mode, type); + + /* Doubles go in an odd/even register pair (r5/r6, etc). */ +- if (mode == DFmode || mode == DDmode) ++ if (mode == DFmode) + gregno += (1 - gregno) & 1; + + /* Multi-reg args are not split between registers and stack. */ +@@ -6047,10 +6155,8 @@ function_arg (CUMULATIVE_ARGS *cum, enum + else if (TARGET_SPE_ABI && TARGET_SPE + && (SPE_VECTOR_MODE (mode) + || (TARGET_E500_DOUBLE && (mode == DFmode +- || mode == DDmode + || mode == DCmode + || mode == TFmode +- || mode == TDmode + || mode == TCmode)))) + return rs6000_spe_function_arg (cum, mode, type); + +@@ -7049,9 +7155,9 @@ static struct builtin_description bdesc_ + { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB }, + { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH }, + { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW }, +- { MASK_ALTIVEC, CODE_FOR_altivec_vslb, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB }, +- { MASK_ALTIVEC, CODE_FOR_altivec_vslh, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH }, +- { MASK_ALTIVEC, CODE_FOR_altivec_vslw, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW }, ++ { MASK_ALTIVEC, CODE_FOR_ashlv16qi3, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB }, ++ { MASK_ALTIVEC, CODE_FOR_ashlv8hi3, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH }, ++ { MASK_ALTIVEC, CODE_FOR_ashlv4si3, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW }, + { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL }, + { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO }, + { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB }, +@@ -7211,10 +7317,10 @@ static struct builtin_description bdesc_ + { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS }, + { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR }, + +- { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 }, +- { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 }, +- { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 }, +- { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 }, ++ { 0, CODE_FOR_paired_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 }, ++ { 0, CODE_FOR_paired_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 }, ++ { 0, CODE_FOR_paired_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 }, ++ { 0, CODE_FOR_paired_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 }, + { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 }, + { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 }, + { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 }, +@@ -7223,10 +7329,10 @@ static struct builtin_description bdesc_ + { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 }, + + /* Place holder, leave as first spe builtin. */ +- { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW }, +- { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND }, ++ { 0, CODE_FOR_addv2si3, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW }, ++ { 0, CODE_FOR_andv2si3, "__builtin_spe_evand", SPE_BUILTIN_EVAND }, + { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC }, +- { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS }, ++ { 0, CODE_FOR_divv2si3, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS }, + { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU }, + { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV }, + { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD }, +@@ -7502,7 +7608,7 @@ static struct builtin_description bdesc_ + + /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and + end with SPE_BUILTIN_EVSUBFUSIAAW. */ +- { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS }, ++ { 0, CODE_FOR_absv2si2, "__builtin_spe_evabs", SPE_BUILTIN_EVABS }, + { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW }, + { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW }, + { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW }, +@@ -7534,9 +7640,9 @@ static struct builtin_description bdesc_ + /* Place-holder. Leave as last unary SPE builtin. */ + { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW }, + +- { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 }, ++ { 0, CODE_FOR_paired_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 }, + { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 }, +- { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 }, ++ { 0, CODE_FOR_paired_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 }, + { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 }, + { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 } + }; +@@ -9051,6 +9157,8 @@ build_opaque_vector_type (tree node, int + static void + rs6000_init_builtins (void) + { ++ tree tdecl; ++ + V2SI_type_node = build_vector_type (intSI_type_node, 2); + V2SF_type_node = build_vector_type (float_type_node, 2); + V4HI_type_node = build_vector_type (intHI_type_node, 4); +@@ -9088,60 +9196,75 @@ rs6000_init_builtins (void) + float_type_internal_node = float_type_node; + void_type_internal_node = void_type_node; + +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__bool char"), +- bool_char_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__bool short"), +- bool_short_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__bool int"), +- bool_int_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__pixel"), +- pixel_type_node)); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__bool char"), ++ bool_char_type_node); ++ TYPE_NAME (bool_char_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__bool short"), ++ bool_short_type_node); ++ TYPE_NAME (bool_short_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__bool int"), ++ bool_int_type_node); ++ TYPE_NAME (bool_int_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__pixel"), ++ pixel_type_node); ++ TYPE_NAME (pixel_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); + + bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16); + bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8); + bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4); + pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8); + +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector unsigned char"), +- unsigned_V16QI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector signed char"), +- V16QI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector __bool char"), +- bool_V16QI_type_node)); +- +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector unsigned short"), +- unsigned_V8HI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector signed short"), +- V8HI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector __bool short"), +- bool_V8HI_type_node)); +- +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector unsigned int"), +- unsigned_V4SI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector signed int"), +- V4SI_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector __bool int"), +- bool_V4SI_type_node)); +- +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector float"), +- V4SF_type_node)); +- (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, +- get_identifier ("__vector __pixel"), +- pixel_V8HI_type_node)); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned char"), ++ unsigned_V16QI_type_node); ++ TYPE_NAME (unsigned_V16QI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed char"), ++ V16QI_type_node); ++ TYPE_NAME (V16QI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool char"), ++ bool_V16QI_type_node); ++ TYPE_NAME ( bool_V16QI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned short"), ++ unsigned_V8HI_type_node); ++ TYPE_NAME (unsigned_V8HI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed short"), ++ V8HI_type_node); ++ TYPE_NAME (V8HI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool short"), ++ bool_V8HI_type_node); ++ TYPE_NAME (bool_V8HI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector unsigned int"), ++ unsigned_V4SI_type_node); ++ TYPE_NAME (unsigned_V4SI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector signed int"), ++ V4SI_type_node); ++ TYPE_NAME (V4SI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __bool int"), ++ bool_V4SI_type_node); ++ TYPE_NAME (bool_V4SI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector float"), ++ V4SF_type_node); ++ TYPE_NAME (V4SF_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); ++ tdecl = build_decl (TYPE_DECL, get_identifier ("__vector __pixel"), ++ pixel_V8HI_type_node); ++ TYPE_NAME (pixel_V8HI_type_node) = tdecl; ++ (*lang_hooks.decls.pushdecl) (tdecl); + + if (TARGET_PAIRED_FLOAT) + paired_init_builtins (); +@@ -12472,7 +12595,7 @@ rs6000_generate_compare (enum rtx_code c + switch (op_mode) + { + case SFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0, +@@ -12480,7 +12603,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case DFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstdfeq_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpdfeq_gpr (compare_result, rs6000_compare_op0, +@@ -12488,7 +12611,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case TFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tsttfeq_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmptfeq_gpr (compare_result, rs6000_compare_op0, +@@ -12504,7 +12627,7 @@ rs6000_generate_compare (enum rtx_code c + switch (op_mode) + { + case SFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0, +@@ -12512,7 +12635,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case DFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpdfgt_gpr (compare_result, rs6000_compare_op0, +@@ -12520,7 +12643,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case TFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tsttfgt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmptfgt_gpr (compare_result, rs6000_compare_op0, +@@ -12536,7 +12659,7 @@ rs6000_generate_compare (enum rtx_code c + switch (op_mode) + { + case SFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0, +@@ -12544,7 +12667,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case DFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstdflt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpdflt_gpr (compare_result, rs6000_compare_op0, +@@ -12552,7 +12675,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case TFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tsttflt_gpr (compare_result, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmptflt_gpr (compare_result, rs6000_compare_op0, +@@ -12587,7 +12710,7 @@ rs6000_generate_compare (enum rtx_code c + switch (op_mode) + { + case SFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0, +@@ -12595,7 +12718,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case DFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tstdfeq_gpr (compare_result2, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmpdfeq_gpr (compare_result2, rs6000_compare_op0, +@@ -12603,7 +12726,7 @@ rs6000_generate_compare (enum rtx_code c + break; + + case TFmode: +- cmp = flag_unsafe_math_optimizations ++ cmp = (flag_finite_math_only && !flag_trapping_math) + ? gen_tsttfeq_gpr (compare_result2, rs6000_compare_op0, + rs6000_compare_op1) + : gen_cmptfeq_gpr (compare_result2, rs6000_compare_op0, +@@ -13946,8 +14069,8 @@ rs6000_split_multireg_move (rtx dst, rtx + reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode; + else if (ALTIVEC_REGNO_P (reg)) + reg_mode = V16QImode; +- else if (TARGET_E500_DOUBLE && (mode == TFmode || mode == TDmode)) +- reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode; ++ else if (TARGET_E500_DOUBLE && mode == TFmode) ++ reg_mode = DFmode; + else + reg_mode = word_mode; + reg_mode_size = GET_MODE_SIZE (reg_mode); +@@ -14535,7 +14658,7 @@ rs6000_stack_info (void) + { + /* Align stack so SPE GPR save area is aligned on a + double-word boundary. */ +- if (info_ptr->spe_gp_size != 0) ++ if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0) + info_ptr->spe_padding_size + = 8 - (-info_ptr->cr_save_offset % 8); + else +@@ -14686,8 +14809,7 @@ spe_func_has_64bit_regs_p (void) + + if (SPE_VECTOR_MODE (mode)) + return true; +- if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode +- || mode == DDmode || mode == TDmode)) ++ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode)) + return true; + } + } +@@ -15191,10 +15313,12 @@ rs6000_emit_stack_tie (void) + + /* Emit the correct code for allocating stack space, as insns. + If COPY_R12, make sure a copy of the old frame is left in r12. ++ If COPY_R11, make sure a copy of the old frame is left in r11, ++ in preference to r12 if COPY_R12. + The generated code may use hard register 0 as a temporary. */ + + static void +-rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12) ++rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12, int copy_r11) + { + rtx insn; + rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM); +@@ -15243,8 +15367,11 @@ rs6000_emit_allocate_stack (HOST_WIDE_IN + warning (0, "stack limit expression is not supported"); + } + +- if (copy_r12 || ! TARGET_UPDATE) +- emit_move_insn (gen_rtx_REG (Pmode, 12), stack_reg); ++ if (copy_r12 || copy_r11 || ! TARGET_UPDATE) ++ emit_move_insn (copy_r11 ++ ? gen_rtx_REG (Pmode, 11) ++ : gen_rtx_REG (Pmode, 12), ++ stack_reg); + + if (TARGET_UPDATE) + { +@@ -15270,7 +15397,9 @@ rs6000_emit_allocate_stack (HOST_WIDE_IN + ? gen_addsi3 (stack_reg, stack_reg, todec) + : gen_adddi3 (stack_reg, stack_reg, todec)); + emit_move_insn (gen_rtx_MEM (Pmode, stack_reg), +- gen_rtx_REG (Pmode, 12)); ++ copy_r11 ++ ? gen_rtx_REG (Pmode, 11) ++ : gen_rtx_REG (Pmode, 12)); + } + + RTX_FRAME_RELATED_P (insn) = 1; +@@ -15359,77 +15488,12 @@ rs6000_frame_related (rtx insn, rtx reg, + } + } + +- if (TARGET_SPE) +- real = spe_synthesize_frame_save (real); +- + RTX_FRAME_RELATED_P (insn) = 1; + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, + real, + REG_NOTES (insn)); + } + +-/* Given an SPE frame note, return a PARALLEL of SETs with the +- original note, plus a synthetic register save. */ +- +-static rtx +-spe_synthesize_frame_save (rtx real) +-{ +- rtx synth, offset, reg, real2; +- +- if (GET_CODE (real) != SET +- || GET_MODE (SET_SRC (real)) != V2SImode) +- return real; +- +- /* For the SPE, registers saved in 64-bits, get a PARALLEL for their +- frame related note. The parallel contains a set of the register +- being saved, and another set to a synthetic register (n+1200). +- This is so we can differentiate between 64-bit and 32-bit saves. +- Words cannot describe this nastiness. */ +- +- gcc_assert (GET_CODE (SET_DEST (real)) == MEM +- && GET_CODE (XEXP (SET_DEST (real), 0)) == PLUS +- && GET_CODE (SET_SRC (real)) == REG); +- +- /* Transform: +- (set (mem (plus (reg x) (const y))) +- (reg z)) +- into: +- (set (mem (plus (reg x) (const y+4))) +- (reg z+1200)) +- */ +- +- real2 = copy_rtx (real); +- PUT_MODE (SET_DEST (real2), SImode); +- reg = SET_SRC (real2); +- real2 = replace_rtx (real2, reg, gen_rtx_REG (SImode, REGNO (reg))); +- synth = copy_rtx (real2); +- +- if (BYTES_BIG_ENDIAN) +- { +- offset = XEXP (XEXP (SET_DEST (real2), 0), 1); +- real2 = replace_rtx (real2, offset, GEN_INT (INTVAL (offset) + 4)); +- } +- +- reg = SET_SRC (synth); +- +- synth = replace_rtx (synth, reg, +- gen_rtx_REG (SImode, REGNO (reg) + 1200)); +- +- offset = XEXP (XEXP (SET_DEST (synth), 0), 1); +- synth = replace_rtx (synth, offset, +- GEN_INT (INTVAL (offset) +- + (BYTES_BIG_ENDIAN ? 0 : 4))); +- +- RTX_FRAME_RELATED_P (synth) = 1; +- RTX_FRAME_RELATED_P (real2) = 1; +- if (BYTES_BIG_ENDIAN) +- real = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, synth, real2)); +- else +- real = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, real2, synth)); +- +- return real; +-} +- + /* Returns an insn that has a vrsave set operation with the + appropriate CLOBBERs. */ + +@@ -15503,7 +15567,7 @@ emit_frame_save (rtx frame_reg, rtx fram + + /* Some cases that need register indexed addressing. */ + if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode)) +- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == DDmode)) ++ || (TARGET_E500_DOUBLE && mode == DFmode) + || (TARGET_SPE_ABI + && SPE_VECTOR_MODE (mode) + && !SPE_CONST_OFFSET_OK (offset))) +@@ -15543,7 +15607,7 @@ gen_frame_mem_offset (enum machine_mode + int_rtx = GEN_INT (offset); + + if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode)) +- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == DDmode))) ++ || (TARGET_E500_DOUBLE && mode == DFmode)) + { + offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH); + emit_move_insn (offset_rtx, int_rtx); +@@ -15558,11 +15622,11 @@ gen_frame_mem_offset (enum machine_mode + and cannot use stmw/lmw if there are any in its range. */ + + static bool +-no_global_regs_above (int first_greg) ++no_global_regs_above (int first, bool gpr) + { + int i; +- for (i = 0; i < 32 - first_greg; i++) +- if (global_regs[first_greg + i]) ++ for (i = first; i < (gpr ? 32 : 64); i++) ++ if (global_regs[i]) + return false; + return true; + } +@@ -15571,6 +15635,164 @@ no_global_regs_above (int first_greg) + #define TARGET_FIX_AND_CONTINUE 0 + #endif + ++/* It's really GPR 13 and FPR 14, but we need the smaller of the two. */ ++#define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO ++#define LAST_SAVRES_REGISTER 31 ++#define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1) ++ ++static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8]; ++ ++/* Return the symbol for an out-of-line register save/restore routine. ++ We are saving/restoring GPRs if GPR is true. */ ++ ++static rtx ++rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp) ++{ ++ int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32); ++ rtx sym; ++ int select = ((savep ? 1 : 0) << 2 ++ | (TARGET_SPE_ABI ++ /* On the SPE, we never have any FPRs, but we do have ++ 32/64-bit versions of the routines. */ ++ ? (info->spe_64bit_regs_used ? 1 : 0) ++ : (gpr ? 1 : 0)) << 1 ++ | (exitp ? 1: 0)); ++ ++ /* Don't generate bogus routine names. */ ++ gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER); ++ ++ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]; ++ ++ if (sym == NULL) ++ { ++ char name[30]; ++ const char *action; ++ const char *regkind; ++ const char *exit_suffix; ++ ++ action = savep ? "save" : "rest"; ++ ++ /* SPE has slightly different names for its routines depending on ++ whether we are saving 32-bit or 64-bit registers. */ ++ if (TARGET_SPE_ABI) ++ { ++ /* No floating point saves on the SPE. */ ++ gcc_assert (gpr); ++ ++ regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr"; ++ } ++ else ++ regkind = gpr ? "gpr" : "fpr"; ++ ++ exit_suffix = exitp ? "_x" : ""; ++ ++ sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix); ++ ++ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select] ++ = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name)); ++ SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION; ++ } ++ ++ return sym; ++} ++ ++/* Emit a sequence of insns, including a stack tie if needed, for ++ resetting the stack pointer. If SAVRES is true, then don't reset the ++ stack pointer, but move the base of the frame into r11 for use by ++ out-of-line register restore routines. */ ++ ++static void ++rs6000_emit_stack_reset (rs6000_stack_t *info, ++ rtx sp_reg_rtx, rtx frame_reg_rtx, ++ int sp_offset, bool savres) ++{ ++ /* This blockage is needed so that sched doesn't decide to move ++ the sp change before the register restores. */ ++ if (frame_reg_rtx != sp_reg_rtx ++ || (TARGET_SPE_ABI ++ && info->spe_64bit_regs_used != 0 ++ && info->first_gp_reg_save != 32)) ++ rs6000_emit_stack_tie (); ++ ++ if (frame_reg_rtx != sp_reg_rtx) ++ { ++ rs6000_emit_stack_tie (); ++ if (sp_offset != 0) ++ emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx, ++ GEN_INT (sp_offset))); ++ else if (!savres) ++ emit_move_insn (sp_reg_rtx, frame_reg_rtx); ++ } ++ else if (sp_offset != 0) ++ { ++ /* If we are restoring registers out-of-line, we will be using the ++ "exit" variants of the restore routines, which will reset the ++ stack for us. But we do need to point r11 into the right place ++ for those routines. */ ++ rtx dest_reg = (savres ++ ? gen_rtx_REG (Pmode, 11) ++ : sp_reg_rtx); ++ ++ emit_insn (TARGET_32BIT ++ ? gen_addsi3 (dest_reg, sp_reg_rtx, ++ GEN_INT (sp_offset)) ++ : gen_adddi3 (dest_reg, sp_reg_rtx, ++ GEN_INT (sp_offset))); ++ } ++} ++ ++/* Construct a parallel rtx describing the effect of a call to an ++ out-of-line register save/restore routine. */ ++ ++static rtx ++rs6000_make_savres_rtx (rs6000_stack_t *info, ++ rtx frame_reg_rtx, int save_area_offset, ++ enum machine_mode reg_mode, ++ bool savep, bool gpr, bool exitp) ++{ ++ int i; ++ int offset, start_reg, end_reg, n_regs; ++ int reg_size = GET_MODE_SIZE (reg_mode); ++ rtx sym; ++ rtvec p; ++ ++ offset = 0; ++ start_reg = (gpr ++ ? info->first_gp_reg_save ++ : info->first_fp_reg_save); ++ end_reg = gpr ? 32 : 64; ++ n_regs = end_reg - start_reg; ++ p = rtvec_alloc ((exitp ? 4 : 3) + n_regs); ++ ++ /* If we're saving registers, then we should never say we're exiting. */ ++ gcc_assert ((savep && !exitp) || !savep); ++ ++ if (exitp) ++ RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode); ++ ++ RTVEC_ELT (p, offset++) ++ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65)); ++ ++ sym = rs6000_savres_routine_sym (info, savep, gpr, exitp); ++ RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym); ++ RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11)); ++ ++ for (i = 0; i < end_reg - start_reg; i++) ++ { ++ rtx addr, reg, mem; ++ reg = gen_rtx_REG (reg_mode, start_reg + i); ++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, ++ GEN_INT (save_area_offset + reg_size*i)); ++ mem = gen_frame_mem (reg_mode, addr); ++ ++ RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode, ++ savep ? mem : reg, ++ savep ? reg : mem); ++ } ++ ++ return gen_rtx_PARALLEL (VOIDmode, p); ++} ++ + /* Determine whether the gp REG is really used. */ + + static bool +@@ -15585,6 +15807,93 @@ rs6000_reg_live_or_pic_offset_p (int reg + || (DEFAULT_ABI == ABI_DARWIN && flag_pic)))); + } + ++enum { ++ SAVRES_MULTIPLE = 0x1, ++ SAVRES_INLINE_FPRS = 0x2, ++ SAVRES_INLINE_GPRS = 0x4 ++}; ++ ++/* Determine the strategy for savings/restoring registers. */ ++ ++static int ++rs6000_savres_strategy (rs6000_stack_t *info, bool savep, ++ int using_static_chain_p, int sibcall) ++{ ++ bool using_multiple_p; ++ bool common; ++ bool savres_fprs_inline; ++ bool savres_gprs_inline; ++ bool noclobber_global_gprs ++ = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true); ++ ++ using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64 ++ && (!TARGET_SPE_ABI ++ || info->spe_64bit_regs_used == 0) ++ && info->first_gp_reg_save < 31 ++ && noclobber_global_gprs); ++ /* Don't bother to try to save things out-of-line if r11 is occupied ++ by the static chain. It would require too much fiddling and the ++ static chain is rarely used anyway. */ ++ common = (using_static_chain_p ++ || sibcall ++ || current_function_calls_eh_return ++ || !info->lr_save_p ++ || cfun->machine->ra_need_lr ++ || info->total_size > 32767); ++ savres_fprs_inline = (common ++ || info->first_fp_reg_save == 64 ++ || !no_global_regs_above (info->first_fp_reg_save, ++ /*gpr=*/false) ++ || FP_SAVE_INLINE (info->first_fp_reg_save)); ++ savres_gprs_inline = (common ++ /* Saving CR interferes with the exit routines ++ used on the SPE, so just punt here. */ ++ || (!savep ++ && TARGET_SPE_ABI ++ && info->spe_64bit_regs_used != 0 ++ && info->cr_save_p != 0) ++ || info->first_gp_reg_save == 32 ++ || !noclobber_global_gprs ++ || GP_SAVE_INLINE (info->first_gp_reg_save)); ++ ++ if (savep) ++ /* If we are going to use store multiple, then don't even bother ++ with the out-of-line routines, since the store-multiple instruction ++ will always be smaller. */ ++ savres_gprs_inline = savres_gprs_inline || using_multiple_p; ++ else ++ { ++ /* The situation is more complicated with load multiple. We'd ++ prefer to use the out-of-line routines for restores, since the ++ "exit" out-of-line routines can handle the restore of LR and ++ the frame teardown. But we can only use the out-of-line ++ routines if we know that we've used store multiple or ++ out-of-line routines in the prologue, i.e. if we've saved all ++ the registers from first_gp_reg_save. Otherwise, we risk ++ loading garbage from the stack. Furthermore, we can only use ++ the "exit" out-of-line gpr restore if we haven't saved any ++ fprs. */ ++ bool saved_all = !savres_gprs_inline || using_multiple_p; ++ ++ if (saved_all && info->first_fp_reg_save != 64) ++ /* We can't use the exit routine; use load multiple if it's ++ available. */ ++ savres_gprs_inline = savres_gprs_inline || using_multiple_p; ++ } ++ ++ /* Code intended for use in shared libraries cannot be reliably linked ++ with out-of-line prologues and epilogues. */ ++ if (flag_pic) ++ { ++ savres_gprs_inline = 1; ++ savres_fprs_inline = 1; ++ } ++ ++ return (using_multiple_p ++ | (savres_fprs_inline << 1) ++ | (savres_gprs_inline << 2)); ++} ++ + /* Emit function prologue as insns. */ + + void +@@ -15598,8 +15907,13 @@ rs6000_emit_prologue (void) + rtx frame_reg_rtx = sp_reg_rtx; + rtx cr_save_rtx = NULL_RTX; + rtx insn; ++ int strategy; + int saving_FPRs_inline; ++ int saving_GPRs_inline; + int using_store_multiple; ++ int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE ++ && df_regs_ever_live_p (STATIC_CHAIN_REGNUM) ++ && call_used_regs[STATIC_CHAIN_REGNUM]); + HOST_WIDE_INT sp_offset = 0; + + if (TARGET_FIX_AND_CONTINUE) +@@ -15622,15 +15936,12 @@ rs6000_emit_prologue (void) + reg_size = 8; + } + +- using_store_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64 +- && (!TARGET_SPE_ABI +- || info->spe_64bit_regs_used == 0) +- && info->first_gp_reg_save < 31 +- && no_global_regs_above (info->first_gp_reg_save)); +- saving_FPRs_inline = (info->first_fp_reg_save == 64 +- || FP_SAVE_INLINE (info->first_fp_reg_save) +- || current_function_calls_eh_return +- || cfun->machine->ra_need_lr); ++ strategy = rs6000_savres_strategy (info, /*savep=*/true, ++ /*static_chain_p=*/using_static_chain_p, ++ /*sibcall=*/0); ++ using_store_multiple = strategy & SAVRES_MULTIPLE; ++ saving_FPRs_inline = strategy & SAVRES_INLINE_FPRS; ++ saving_GPRs_inline = strategy & SAVRES_INLINE_GPRS; + + /* For V.4, update stack before we do any saving and set back pointer. */ + if (! WORLD_SAVE_P (info) +@@ -15638,17 +15949,24 @@ rs6000_emit_prologue (void) + && (DEFAULT_ABI == ABI_V4 + || current_function_calls_eh_return)) + { ++ bool need_r11 = (TARGET_SPE ++ ? (!saving_GPRs_inline ++ && info->spe_64bit_regs_used == 0) ++ : (!saving_FPRs_inline || !saving_GPRs_inline)); + if (info->total_size < 32767) + sp_offset = info->total_size; + else +- frame_reg_rtx = frame_ptr_rtx; ++ frame_reg_rtx = (need_r11 ++ ? gen_rtx_REG (Pmode, 11) ++ : frame_ptr_rtx); + rs6000_emit_allocate_stack (info->total_size, + (frame_reg_rtx != sp_reg_rtx + && (info->cr_save_p + || info->lr_save_p + || info->first_fp_reg_save < 64 + || info->first_gp_reg_save < 32 +- ))); ++ )), ++ need_r11); + if (frame_reg_rtx != sp_reg_rtx) + rs6000_emit_stack_tie (); + } +@@ -15825,40 +16143,147 @@ rs6000_emit_prologue (void) + } + else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64) + { ++ rtx par; ++ ++ par = rs6000_make_savres_rtx (info, frame_reg_rtx, ++ info->fp_save_offset + sp_offset, ++ DFmode, ++ /*savep=*/true, /*gpr=*/false, ++ /*exitp=*/false); ++ insn = emit_insn (par); ++ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size, ++ NULL_RTX, NULL_RTX); ++ } ++ ++ /* Save GPRs. This is done as a PARALLEL if we are using ++ the store-multiple instructions. */ ++ if (!WORLD_SAVE_P (info) ++ && TARGET_SPE_ABI ++ && info->spe_64bit_regs_used != 0 ++ && info->first_gp_reg_save != 32) ++ { + int i; +- char rname[30]; +- const char *alloc_rname; +- rtvec p; +- p = rtvec_alloc (2 + 64 - info->first_fp_reg_save); ++ rtx spe_save_area_ptr; ++ ++ /* Determine whether we can address all of the registers that need ++ to be saved with an offset from the stack pointer that fits in ++ the small const field for SPE memory instructions. */ ++ int spe_regs_addressable_via_sp ++ = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset ++ + (32 - info->first_gp_reg_save - 1) * reg_size) ++ && saving_GPRs_inline); ++ int spe_offset; ++ ++ if (spe_regs_addressable_via_sp) ++ { ++ spe_save_area_ptr = frame_reg_rtx; ++ spe_offset = info->spe_gp_save_offset + sp_offset; ++ } ++ else ++ { ++ /* Make r11 point to the start of the SPE save area. We need ++ to be careful here if r11 is holding the static chain. If ++ it is, then temporarily save it in r0. We would use r0 as ++ our base register here, but using r0 as a base register in ++ loads and stores means something different from what we ++ would like. */ ++ int ool_adjust = (saving_GPRs_inline ++ ? 0 ++ : (info->first_gp_reg_save ++ - (FIRST_SAVRES_REGISTER+1))*8); ++ HOST_WIDE_INT offset = (info->spe_gp_save_offset ++ + sp_offset - ool_adjust); + +- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode, +- gen_rtx_REG (Pmode, +- LR_REGNO)); +- sprintf (rname, "%s%d%s", SAVE_FP_PREFIX, +- info->first_fp_reg_save - 32, SAVE_FP_SUFFIX); +- alloc_rname = ggc_strdup (rname); +- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, +- gen_rtx_SYMBOL_REF (Pmode, +- alloc_rname)); +- for (i = 0; i < 64 - info->first_fp_reg_save; i++) ++ if (using_static_chain_p) ++ { ++ rtx r0 = gen_rtx_REG (Pmode, 0); ++ gcc_assert (info->first_gp_reg_save > 11); ++ ++ emit_move_insn (r0, gen_rtx_REG (Pmode, 11)); ++ } ++ ++ spe_save_area_ptr = gen_rtx_REG (Pmode, 11); ++ insn = emit_insn (gen_addsi3 (spe_save_area_ptr, ++ frame_reg_rtx, ++ GEN_INT (offset))); ++ /* We need to make sure the move to r11 gets noted for ++ properly outputting unwind information. */ ++ if (!saving_GPRs_inline) ++ rs6000_frame_related (insn, frame_reg_rtx, offset, ++ NULL_RTX, NULL_RTX); ++ spe_offset = 0; ++ } ++ ++ if (saving_GPRs_inline) + { +- rtx addr, reg, mem; +- reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i); +- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, +- GEN_INT (info->fp_save_offset +- + sp_offset + 8*i)); +- mem = gen_frame_mem (DFmode, addr); ++ for (i = 0; i < 32 - info->first_gp_reg_save; i++) ++ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) ++ { ++ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i); ++ rtx offset, addr, mem; ++ ++ /* We're doing all this to ensure that the offset fits into ++ the immediate offset of 'evstdd'. */ ++ gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset)); ++ ++ offset = GEN_INT (reg_size * i + spe_offset); ++ addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset); ++ mem = gen_rtx_MEM (V2SImode, addr); ++ ++ insn = emit_move_insn (mem, reg); ++ ++ rs6000_frame_related (insn, spe_save_area_ptr, ++ info->spe_gp_save_offset ++ + sp_offset + reg_size * i, ++ offset, const0_rtx); ++ } ++ } ++ else ++ { ++ rtx par; + +- RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg); ++ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11), ++ 0, reg_mode, ++ /*savep=*/true, /*gpr=*/true, ++ /*exitp=*/false); ++ insn = emit_insn (par); ++ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size, ++ NULL_RTX, NULL_RTX); + } +- insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); ++ ++ ++ /* Move the static chain pointer back. */ ++ if (using_static_chain_p && !spe_regs_addressable_via_sp) ++ emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0)); ++ } ++ else if (!WORLD_SAVE_P (info) && !saving_GPRs_inline) ++ { ++ rtx par; ++ ++ /* Need to adjust r11 if we saved any FPRs. */ ++ if (info->first_fp_reg_save != 64) ++ { ++ rtx r11 = gen_rtx_REG (reg_mode, 11); ++ rtx offset = GEN_INT (info->total_size ++ + (-8 * (64-info->first_fp_reg_save))); ++ rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx ++ ? sp_reg_rtx : r11); ++ ++ emit_insn (TARGET_32BIT ++ ? gen_addsi3 (r11, ptr_reg, offset) ++ : gen_adddi3 (r11, ptr_reg, offset)); ++ } ++ ++ par = rs6000_make_savres_rtx (info, frame_reg_rtx, ++ info->gp_save_offset + sp_offset, ++ reg_mode, ++ /*savep=*/true, /*gpr=*/true, ++ /*exitp=*/false); ++ insn = emit_insn (par); + rs6000_frame_related (insn, frame_ptr_rtx, info->total_size, + NULL_RTX, NULL_RTX); + } +- +- /* Save GPRs. This is done as a PARALLEL if we are using +- the store-multiple instructions. */ +- if (!WORLD_SAVE_P (info) && using_store_multiple) ++ else if (!WORLD_SAVE_P (info) && using_store_multiple) + { + rtvec p; + int i; +@@ -15879,80 +16304,6 @@ rs6000_emit_prologue (void) + rs6000_frame_related (insn, frame_ptr_rtx, info->total_size, + NULL_RTX, NULL_RTX); + } +- else if (!WORLD_SAVE_P (info) +- && TARGET_SPE_ABI +- && info->spe_64bit_regs_used != 0 +- && info->first_gp_reg_save != 32) +- { +- int i; +- rtx spe_save_area_ptr; +- int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE +- && df_regs_ever_live_p (STATIC_CHAIN_REGNUM) +- && !call_used_regs[STATIC_CHAIN_REGNUM]); +- +- /* Determine whether we can address all of the registers that need +- to be saved with an offset from the stack pointer that fits in +- the small const field for SPE memory instructions. */ +- int spe_regs_addressable_via_sp +- = SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset +- + (32 - info->first_gp_reg_save - 1) * reg_size); +- int spe_offset; +- +- if (spe_regs_addressable_via_sp) +- { +- spe_save_area_ptr = frame_reg_rtx; +- spe_offset = info->spe_gp_save_offset + sp_offset; +- } +- else +- { +- /* Make r11 point to the start of the SPE save area. We need +- to be careful here if r11 is holding the static chain. If +- it is, then temporarily save it in r0. We would use r0 as +- our base register here, but using r0 as a base register in +- loads and stores means something different from what we +- would like. */ +- if (using_static_chain_p) +- { +- rtx r0 = gen_rtx_REG (Pmode, 0); +- +- gcc_assert (info->first_gp_reg_save > 11); +- +- emit_move_insn (r0, gen_rtx_REG (Pmode, 11)); +- } +- +- spe_save_area_ptr = gen_rtx_REG (Pmode, 11); +- emit_insn (gen_addsi3 (spe_save_area_ptr, frame_reg_rtx, +- GEN_INT (info->spe_gp_save_offset + sp_offset))); +- +- spe_offset = 0; +- } +- +- for (i = 0; i < 32 - info->first_gp_reg_save; i++) +- if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) +- { +- rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i); +- rtx offset, addr, mem; +- +- /* We're doing all this to ensure that the offset fits into +- the immediate offset of 'evstdd'. */ +- gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset)); +- +- offset = GEN_INT (reg_size * i + spe_offset); +- addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset); +- mem = gen_rtx_MEM (V2SImode, addr); +- +- insn = emit_move_insn (mem, reg); +- +- rs6000_frame_related (insn, spe_save_area_ptr, +- info->spe_gp_save_offset +- + sp_offset + reg_size * i, +- offset, const0_rtx); +- } +- +- /* Move the static chain pointer back. */ +- if (using_static_chain_p && !spe_regs_addressable_via_sp) +- emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0)); +- } + else if (!WORLD_SAVE_P (info)) + { + int i; +@@ -16052,7 +16403,8 @@ rs6000_emit_prologue (void) + (frame_reg_rtx != sp_reg_rtx + && ((info->altivec_size != 0) + || (info->vrsave_mask != 0) +- ))); ++ )), ++ FALSE); + if (frame_reg_rtx != sp_reg_rtx) + rs6000_emit_stack_tie (); + } +@@ -16208,8 +16560,7 @@ rs6000_output_function_prologue (FILE *f + && !FP_SAVE_INLINE (info->first_fp_reg_save)) + fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n", + SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX, +- RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, +- RESTORE_FP_SUFFIX); ++ RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX); + + /* Write .extern for AIX common mode routines, if needed. */ + if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined) +@@ -16253,6 +16604,54 @@ rs6000_output_function_prologue (FILE *f + rs6000_pic_labelno++; + } + ++/* Reload CR from REG. */ ++ ++static void ++rs6000_restore_saved_cr (rtx reg, int using_mfcr_multiple) ++{ ++ int count = 0; ++ int i; ++ ++ if (using_mfcr_multiple) ++ { ++ for (i = 0; i < 8; i++) ++ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) ++ count++; ++ gcc_assert (count); ++ } ++ ++ if (using_mfcr_multiple && count > 1) ++ { ++ rtvec p; ++ int ndx; ++ ++ p = rtvec_alloc (count); ++ ++ ndx = 0; ++ for (i = 0; i < 8; i++) ++ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) ++ { ++ rtvec r = rtvec_alloc (2); ++ RTVEC_ELT (r, 0) = reg; ++ RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i)); ++ RTVEC_ELT (p, ndx) = ++ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i), ++ gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR)); ++ ndx++; ++ } ++ emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); ++ gcc_assert (ndx == count); ++ } ++ else ++ for (i = 0; i < 8; i++) ++ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) ++ { ++ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, ++ CR0_REGNO+i), ++ reg)); ++ } ++} ++ + /* Emit function epilogue as insns. + + At present, dwarf2out_frame_debug_expr doesn't understand +@@ -16264,10 +16663,13 @@ void + rs6000_emit_epilogue (int sibcall) + { + rs6000_stack_t *info; ++ int restoring_GPRs_inline; + int restoring_FPRs_inline; + int using_load_multiple; + int using_mtcr_multiple; + int use_backchain_to_restore_sp; ++ int restore_lr; ++ int strategy; + int sp_offset = 0; + rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1); + rtx frame_reg_rtx = sp_reg_rtx; +@@ -16283,15 +16685,11 @@ rs6000_emit_epilogue (int sibcall) + reg_size = 8; + } + +- using_load_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64 +- && (!TARGET_SPE_ABI +- || info->spe_64bit_regs_used == 0) +- && info->first_gp_reg_save < 31 +- && no_global_regs_above (info->first_gp_reg_save)); +- restoring_FPRs_inline = (sibcall +- || current_function_calls_eh_return +- || info->first_fp_reg_save == 64 +- || FP_SAVE_INLINE (info->first_fp_reg_save)); ++ strategy = rs6000_savres_strategy (info, /*savep=*/false, ++ /*static_chain_p=*/0, sibcall); ++ using_load_multiple = strategy & SAVRES_MULTIPLE; ++ restoring_FPRs_inline = strategy & SAVRES_INLINE_FPRS; ++ restoring_GPRs_inline = strategy & SAVRES_INLINE_GPRS; + use_backchain_to_restore_sp = (frame_pointer_needed + || current_function_calls_alloca + || info->total_size > 32767); +@@ -16299,6 +16697,10 @@ rs6000_emit_epilogue (int sibcall) + || rs6000_cpu == PROCESSOR_PPC603 + || rs6000_cpu == PROCESSOR_PPC750 + || optimize_size); ++ restore_lr = (info->lr_save_p ++ && (restoring_GPRs_inline ++ || (restoring_FPRs_inline ++ && info->first_fp_reg_save < 64))); + + if (WORLD_SAVE_P (info)) + { +@@ -16537,8 +16939,9 @@ rs6000_emit_epilogue (int sibcall) + emit_insn (generate_set_vrsave (reg, info, 1)); + } + +- /* Get the old lr if we saved it. */ +- if (info->lr_save_p) ++ /* Get the old lr if we saved it. If we are restoring registers ++ out-of-line, then the out-of-line routines can do this for us. */ ++ if (restore_lr && restoring_GPRs_inline) + { + rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, + info->lr_save_offset + sp_offset); +@@ -16557,7 +16960,7 @@ rs6000_emit_epilogue (int sibcall) + } + + /* Set LR here to try to overlap restores below. */ +- if (info->lr_save_p) ++ if (restore_lr && restoring_GPRs_inline) + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), + gen_rtx_REG (Pmode, 0)); + +@@ -16593,35 +16996,17 @@ rs6000_emit_epilogue (int sibcall) + + /* Restore GPRs. This is done as a PARALLEL if we are using + the load-multiple instructions. */ +- if (using_load_multiple) +- { +- rtvec p; +- p = rtvec_alloc (32 - info->first_gp_reg_save); +- for (i = 0; i < 32 - info->first_gp_reg_save; i++) +- { +- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, +- GEN_INT (info->gp_save_offset +- + sp_offset +- + reg_size * i)); +- rtx mem = gen_frame_mem (reg_mode, addr); +- +- RTVEC_ELT (p, i) = +- gen_rtx_SET (VOIDmode, +- gen_rtx_REG (reg_mode, info->first_gp_reg_save + i), +- mem); +- } +- emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); +- } +- else if (TARGET_SPE_ABI +- && info->spe_64bit_regs_used != 0 +- && info->first_gp_reg_save != 32) ++ if (TARGET_SPE_ABI ++ && info->spe_64bit_regs_used != 0 ++ && info->first_gp_reg_save != 32) + { + /* Determine whether we can address all of the registers that need + to be saved with an offset from the stack pointer that fits in + the small const field for SPE memory instructions. */ + int spe_regs_addressable_via_sp +- = SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset +- + (32 - info->first_gp_reg_save - 1) * reg_size); ++ = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset ++ + (32 - info->first_gp_reg_save - 1) * reg_size) ++ && restoring_GPRs_inline); + int spe_offset; + + if (spe_regs_addressable_via_sp) +@@ -16633,10 +17018,17 @@ rs6000_emit_epilogue (int sibcall) + not clobbering it when we were saving registers in the prologue. + There's no need to worry here because the static chain is passed + anew to every function. */ ++ int ool_adjust = (restoring_GPRs_inline ++ ? 0 ++ : (info->first_gp_reg_save ++ - (FIRST_SAVRES_REGISTER+1))*8); ++ + if (frame_reg_rtx == sp_reg_rtx) + frame_reg_rtx = gen_rtx_REG (Pmode, 11); + emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx, +- GEN_INT (info->spe_gp_save_offset + sp_offset))); ++ GEN_INT (info->spe_gp_save_offset ++ + sp_offset ++ - ool_adjust))); + /* Keep the invariant that frame_reg_rtx + sp_offset points + at the top of the stack frame. */ + sp_offset = -info->spe_gp_save_offset; +@@ -16644,26 +17036,80 @@ rs6000_emit_epilogue (int sibcall) + spe_offset = 0; + } + +- for (i = 0; i < 32 - info->first_gp_reg_save; i++) +- if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) +- { +- rtx offset, addr, mem; ++ if (restoring_GPRs_inline) ++ { ++ for (i = 0; i < 32 - info->first_gp_reg_save; i++) ++ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) ++ { ++ rtx offset, addr, mem; + +- /* We're doing all this to ensure that the immediate offset +- fits into the immediate field of 'evldd'. */ +- gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i)); +- +- offset = GEN_INT (spe_offset + reg_size * i); +- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset); +- mem = gen_rtx_MEM (V2SImode, addr); ++ /* We're doing all this to ensure that the immediate offset ++ fits into the immediate field of 'evldd'. */ ++ gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i)); ++ ++ offset = GEN_INT (spe_offset + reg_size * i); ++ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset); ++ mem = gen_rtx_MEM (V2SImode, addr); + +- emit_move_insn (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i), +- mem); +- } ++ emit_move_insn (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i), ++ mem); ++ } ++ } ++ else ++ { ++ rtx par; ++ ++ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11), ++ 0, reg_mode, ++ /*savep=*/false, /*gpr=*/true, ++ /*exitp=*/true); ++ emit_jump_insn (par); ++ ++ /* We don't want anybody else emitting things after we jumped ++ back. */ ++ return; ++ } + } +- else +- for (i = 0; i < 32 - info->first_gp_reg_save; i++) +- if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) ++ else if (!restoring_GPRs_inline) ++ { ++ /* We are jumping to an out-of-line function. */ ++ bool can_use_exit = info->first_fp_reg_save == 64; ++ rtx par; ++ ++ /* Emit stack reset code if we need it. */ ++ if (can_use_exit) ++ rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx, ++ sp_offset, can_use_exit); ++ else ++ emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11), ++ sp_reg_rtx, ++ GEN_INT (sp_offset - info->fp_size))); ++ ++ par = rs6000_make_savres_rtx (info, frame_reg_rtx, ++ info->gp_save_offset, reg_mode, ++ /*savep=*/false, /*gpr=*/true, ++ /*exitp=*/can_use_exit); ++ ++ if (can_use_exit) ++ { ++ if (info->cr_save_p) ++ rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), ++ using_mtcr_multiple); ++ ++ emit_jump_insn (par); ++ ++ /* We don't want anybody else emitting things after we jumped ++ back. */ ++ return; ++ } ++ else ++ emit_insn (par); ++ } ++ else if (using_load_multiple) ++ { ++ rtvec p; ++ p = rtvec_alloc (32 - info->first_gp_reg_save); ++ for (i = 0; i < 32 - info->first_gp_reg_save; i++) + { + rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, + GEN_INT (info->gp_save_offset +@@ -16671,9 +17117,40 @@ rs6000_emit_epilogue (int sibcall) + + reg_size * i)); + rtx mem = gen_frame_mem (reg_mode, addr); + +- emit_move_insn (gen_rtx_REG (reg_mode, +- info->first_gp_reg_save + i), mem); ++ RTVEC_ELT (p, i) = ++ gen_rtx_SET (VOIDmode, ++ gen_rtx_REG (reg_mode, info->first_gp_reg_save + i), ++ mem); + } ++ emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); ++ } ++ else ++ { ++ for (i = 0; i < 32 - info->first_gp_reg_save; i++) ++ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i)) ++ { ++ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, ++ GEN_INT (info->gp_save_offset ++ + sp_offset ++ + reg_size * i)); ++ rtx mem = gen_frame_mem (reg_mode, addr); ++ ++ emit_move_insn (gen_rtx_REG (reg_mode, ++ info->first_gp_reg_save + i), mem); ++ } ++ } ++ ++ if (restore_lr && !restoring_GPRs_inline) ++ { ++ rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, ++ info->lr_save_offset + sp_offset); ++ ++ emit_move_insn (gen_rtx_REG (Pmode, 0), mem); ++ } ++ ++ if (restore_lr && !restoring_GPRs_inline) ++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), ++ gen_rtx_REG (Pmode, 0)); + + /* Restore fpr's if we need to do it without calling a function. */ + if (restoring_FPRs_inline) +@@ -16695,69 +17172,12 @@ rs6000_emit_epilogue (int sibcall) + + /* If we saved cr, restore it here. Just those that were used. */ + if (info->cr_save_p) +- { +- rtx r12_rtx = gen_rtx_REG (SImode, 12); +- int count = 0; +- +- if (using_mtcr_multiple) +- { +- for (i = 0; i < 8; i++) +- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) +- count++; +- gcc_assert (count); +- } +- +- if (using_mtcr_multiple && count > 1) +- { +- rtvec p; +- int ndx; +- +- p = rtvec_alloc (count); +- +- ndx = 0; +- for (i = 0; i < 8; i++) +- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) +- { +- rtvec r = rtvec_alloc (2); +- RTVEC_ELT (r, 0) = r12_rtx; +- RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i)); +- RTVEC_ELT (p, ndx) = +- gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i), +- gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR)); +- ndx++; +- } +- emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); +- gcc_assert (ndx == count); +- } +- else +- for (i = 0; i < 8; i++) +- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i]) +- { +- emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, +- CR0_REGNO+i), +- r12_rtx)); +- } +- } ++ rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple); + + /* If this is V.4, unwind the stack pointer after all of the loads + have been done. */ +- if (frame_reg_rtx != sp_reg_rtx) +- { +- /* This blockage is needed so that sched doesn't decide to move +- the sp change before the register restores. */ +- rs6000_emit_stack_tie (); +- if (sp_offset != 0) +- emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx, +- GEN_INT (sp_offset))); +- else +- emit_move_insn (sp_reg_rtx, frame_reg_rtx); +- } +- else if (sp_offset != 0) +- emit_insn (TARGET_32BIT +- ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx, +- GEN_INT (sp_offset)) +- : gen_adddi3 (sp_reg_rtx, sp_reg_rtx, +- GEN_INT (sp_offset))); ++ rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx, ++ sp_offset, !restoring_FPRs_inline); + + if (current_function_calls_eh_return) + { +@@ -16771,30 +17191,30 @@ rs6000_emit_epilogue (int sibcall) + { + rtvec p; + if (! restoring_FPRs_inline) +- p = rtvec_alloc (3 + 64 - info->first_fp_reg_save); ++ p = rtvec_alloc (4 + 64 - info->first_fp_reg_save); + else + p = rtvec_alloc (2); + + RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode); +- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, +- gen_rtx_REG (Pmode, +- LR_REGNO)); ++ RTVEC_ELT (p, 1) = (restoring_FPRs_inline ++ ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65)) ++ : gen_rtx_CLOBBER (VOIDmode, ++ gen_rtx_REG (Pmode, 65))); + + /* If we have to restore more than two FP registers, branch to the + restore function. It will return to our caller. */ + if (! restoring_FPRs_inline) + { + int i; +- char rname[30]; +- const char *alloc_rname; +- +- sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX, +- info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX); +- alloc_rname = ggc_strdup (rname); +- RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, +- gen_rtx_SYMBOL_REF (Pmode, +- alloc_rname)); ++ rtx sym; + ++ sym = rs6000_savres_routine_sym (info, ++ /*savep=*/false, ++ /*gpr=*/false, ++ /*exitp=*/true); ++ RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym); ++ RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, ++ gen_rtx_REG (Pmode, 11)); + for (i = 0; i < 64 - info->first_fp_reg_save; i++) + { + rtx addr, mem; +@@ -16802,7 +17222,7 @@ rs6000_emit_epilogue (int sibcall) + GEN_INT (info->fp_save_offset + 8*i)); + mem = gen_frame_mem (DFmode, addr); + +- RTVEC_ELT (p, i+3) = ++ RTVEC_ELT (p, i+4) = + gen_rtx_SET (VOIDmode, + gen_rtx_REG (DFmode, info->first_fp_reg_save + i), + mem); +@@ -18611,6 +19031,9 @@ rs6000_issue_rate (void) + case CPU_PPC7400: + case CPU_PPC8540: + case CPU_CELL: ++ case CPU_PPCE300C2: ++ case CPU_PPCE300C3: ++ case CPU_PPCE500MC: + return 2; + case CPU_RIOS2: + case CPU_PPC604: +@@ -21814,8 +22237,8 @@ rs6000_function_value (const_tree valtyp + && ALTIVEC_VECTOR_MODE (mode)) + regno = ALTIVEC_ARG_RETURN; + else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT +- && (mode == DFmode || mode == DDmode || mode == DCmode +- || mode == TFmode || mode == TDmode || mode == TCmode)) ++ && (mode == DFmode || mode == DCmode ++ || mode == TFmode || mode == TCmode)) + return spe_build_register_parallel (mode, GP_ARG_RETURN); + else + regno = GP_ARG_RETURN; +@@ -21856,8 +22279,8 @@ rs6000_libcall_value (enum machine_mode + else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg) + return rs6000_complex_function_value (mode); + else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT +- && (mode == DFmode || mode == DDmode || mode == DCmode +- || mode == TFmode || mode == TDmode || mode == TCmode)) ++ && (mode == DFmode || mode == DCmode ++ || mode == TFmode || mode == TCmode)) + return spe_build_register_parallel (mode, GP_ARG_RETURN); + else + regno = GP_ARG_RETURN; +@@ -21904,19 +22327,22 @@ rs6000_is_opaque_type (const_tree type) + { + return (type == opaque_V2SI_type_node + || type == opaque_V2SF_type_node +- || type == opaque_p_V2SI_type_node + || type == opaque_V4SI_type_node); + } + + static rtx + rs6000_dwarf_register_span (rtx reg) + { +- unsigned regno; ++ rtx parts[8]; ++ int i, words; ++ unsigned regno = REGNO (reg); ++ enum machine_mode mode = GET_MODE (reg); + + if (TARGET_SPE ++ && regno < 32 + && (SPE_VECTOR_MODE (GET_MODE (reg)) +- || (TARGET_E500_DOUBLE +- && (GET_MODE (reg) == DFmode || GET_MODE (reg) == DDmode)))) ++ || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) ++ && mode != SFmode && mode != SDmode && mode != SCmode))) + ; + else + return NULL_RTX; +@@ -21926,15 +22352,23 @@ rs6000_dwarf_register_span (rtx reg) + /* The duality of the SPE register size wreaks all kinds of havoc. + This is a way of distinguishing r0 in 32-bits from r0 in + 64-bits. */ +- return +- gen_rtx_PARALLEL (VOIDmode, +- BYTES_BIG_ENDIAN +- ? gen_rtvec (2, +- gen_rtx_REG (SImode, regno + 1200), +- gen_rtx_REG (SImode, regno)) +- : gen_rtvec (2, +- gen_rtx_REG (SImode, regno), +- gen_rtx_REG (SImode, regno + 1200))); ++ words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD; ++ gcc_assert (words <= 4); ++ for (i = 0; i < words; i++, regno++) ++ { ++ if (BYTES_BIG_ENDIAN) ++ { ++ parts[2 * i] = gen_rtx_REG (SImode, regno + 1200); ++ parts[2 * i + 1] = gen_rtx_REG (SImode, regno); ++ } ++ else ++ { ++ parts[2 * i] = gen_rtx_REG (SImode, regno); ++ parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200); ++ } ++ } ++ ++ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts)); + } + + /* Fill in sizes for SPE register high parts in table used by unwinder. */ +--- a/gcc/config/rs6000/rs6000.h ++++ b/gcc/config/rs6000/rs6000.h +@@ -117,6 +117,9 @@ + %{mcpu=G5: -mpower4 -maltivec} \ + %{mcpu=8540: -me500} \ + %{mcpu=8548: -me500} \ ++%{mcpu=e300c2: -mppc} \ ++%{mcpu=e300c3: -mppc -mpmr} \ ++%{mcpu=e500mc: -me500mc} \ + %{maltivec: -maltivec} \ + -many" + +@@ -262,6 +265,9 @@ enum processor_type + PROCESSOR_PPC7400, + PROCESSOR_PPC7450, + PROCESSOR_PPC8540, ++ PROCESSOR_PPCE300C2, ++ PROCESSOR_PPCE300C3, ++ PROCESSOR_PPCE500MC, + PROCESSOR_POWER4, + PROCESSOR_POWER5, + PROCESSOR_POWER6, +@@ -313,12 +319,15 @@ enum group_termination + }; + + /* Support for a compile-time default CPU, et cetera. The rules are: +- --with-cpu is ignored if -mcpu is specified. ++ --with-cpu is ignored if -mcpu is specified; likewise --with-cpu32 ++ and --with-cpu64. + --with-tune is ignored if -mtune is specified. + --with-float is ignored if -mhard-float or -msoft-float are + specified. */ + #define OPTION_DEFAULT_SPECS \ +- {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \ ++ {"cpu", "%{mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}" }, \ ++ {"cpu32", "%{m64|mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}" }, \ ++ {"cpu64", "%{m32|mcpu=*|te500mc|te500v1|te500v2|te600:;:-mcpu=%(VALUE)}" }, \ + {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ + {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" } + +@@ -349,6 +358,8 @@ extern int rs6000_long_double_type_size; + extern int rs6000_ieeequad; + extern int rs6000_altivec_abi; + extern int rs6000_spe_abi; ++extern int rs6000_spe; ++extern int rs6000_isel; + extern int rs6000_float_gprs; + extern int rs6000_alignment_flags; + extern const char *rs6000_sched_insert_nops_str; +@@ -378,7 +389,7 @@ extern enum rs6000_nop_insertion rs6000_ + #define TARGET_SPE_ABI 0 + #define TARGET_SPE 0 + #define TARGET_E500 0 +-#define TARGET_ISEL 0 ++#define TARGET_ISEL rs6000_isel + #define TARGET_FPRS 1 + #define TARGET_E500_SINGLE 0 + #define TARGET_E500_DOUBLE 0 +@@ -561,7 +572,7 @@ extern enum rs6000_nop_insertion rs6000_ + #define LOCAL_ALIGNMENT(TYPE, ALIGN) \ + ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : \ + (TARGET_E500_DOUBLE \ +- && (TYPE_MODE (TYPE) == DFmode || TYPE_MODE (TYPE) == DDmode)) ? 64 : \ ++ && TYPE_MODE (TYPE) == DFmode) ? 64 : \ + ((TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \ + && SPE_VECTOR_MODE (TYPE_MODE (TYPE))) || (TARGET_PAIRED_FLOAT \ + && TREE_CODE (TYPE) == VECTOR_TYPE \ +@@ -587,7 +598,7 @@ extern enum rs6000_nop_insertion rs6000_ + fit into 1, whereas DI still needs two. */ + #define MEMBER_TYPE_FORCES_BLK(FIELD, MODE) \ + ((TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \ +- || (TARGET_E500_DOUBLE && ((MODE) == DFmode || (MODE) == DDmode))) ++ || (TARGET_E500_DOUBLE && (MODE) == DFmode)) + + /* A bit-field declared as `int' forces `int' alignment for the struct. */ + #define PCC_BITFIELD_TYPE_MATTERS 1 +@@ -596,6 +607,7 @@ extern enum rs6000_nop_insertion rs6000_ + Make vector constants quadword aligned. */ + #define CONSTANT_ALIGNMENT(EXP, ALIGN) \ + (TREE_CODE (EXP) == STRING_CST \ ++ && (STRICT_ALIGNMENT || !optimize_size) \ + && (ALIGN) < BITS_PER_WORD \ + ? BITS_PER_WORD \ + : (ALIGN)) +@@ -607,7 +619,7 @@ extern enum rs6000_nop_insertion rs6000_ + (TREE_CODE (TYPE) == VECTOR_TYPE ? ((TARGET_SPE_ABI \ + || TARGET_PAIRED_FLOAT) ? 64 : 128) \ + : (TARGET_E500_DOUBLE \ +- && (TYPE_MODE (TYPE) == DFmode || TYPE_MODE (TYPE) == DDmode)) ? 64 \ ++ && TYPE_MODE (TYPE) == DFmode) ? 64 \ + : TREE_CODE (TYPE) == ARRAY_TYPE \ + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) +@@ -731,8 +743,8 @@ extern enum rs6000_nop_insertion rs6000_ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \ + /* AltiVec registers. */ \ +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 1, 1 \ + , 1, 1, 1 \ + } +@@ -750,8 +762,8 @@ extern enum rs6000_nop_insertion rs6000_ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \ + /* AltiVec registers. */ \ +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0 \ + , 0, 0, 0 \ + } +@@ -1189,7 +1201,7 @@ enum reg_class + (((CLASS) == FLOAT_REGS) \ + ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \ + : (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS \ +- && ((MODE) == DFmode || (MODE) == DDmode)) \ ++ && (MODE) == DFmode) \ + ? 1 \ + : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) + +--- a/gcc/config/rs6000/rs6000.md ++++ b/gcc/config/rs6000/rs6000.md +@@ -133,7 +133,7 @@ + ;; Processor type -- this attribute must exactly match the processor_type + ;; enumeration in rs6000.h. + +-(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,power4,power5,power6,cell" ++(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,power4,power5,power6,cell" + (const (symbol_ref "rs6000_cpu_attr"))) + + +@@ -166,6 +166,8 @@ + (include "7xx.md") + (include "7450.md") + (include "8540.md") ++(include "e300c2c3.md") ++(include "e500mc.md") + (include "power4.md") + (include "power5.md") + (include "power6.md") +@@ -8887,7 +8889,7 @@ + rtx label = gen_label_rtx (); + if (TARGET_E500_DOUBLE) + { +- if (flag_unsafe_math_optimizations) ++ if (flag_finite_math_only && !flag_trapping_math) + emit_insn (gen_spe_abstf2_tst (operands[0], operands[1], label)); + else + emit_insn (gen_spe_abstf2_cmp (operands[0], operands[1], label)); +@@ -11642,7 +11644,7 @@ + + (define_expand "bltgt" + [(use (match_operand 0 "" ""))] +- "" ++ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)" + "{ rs6000_emit_cbranch (LTGT, operands[0]); DONE; }") + + ;; For SNE, we would prefer that the xor/abs sequence be used for integers. +@@ -11776,7 +11778,7 @@ + + (define_expand "sltgt" + [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))] +- "" ++ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)" + "{ rs6000_emit_sCOND (LTGT, operands[0]); DONE; }") + + (define_expand "stack_protect_set" +@@ -12084,7 +12086,7 @@ + (define_insn "move_from_CR_gt_bit" + [(set (match_operand:SI 0 "gpc_reg_operand" "=r") + (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))] +- "TARGET_E500" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS" + "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,31,31" + [(set_attr "type" "mfcr") + (set_attr "length" "8")]) +@@ -14469,10 +14471,23 @@ + "{stm|stmw} %2,%1" + [(set_attr "type" "store_ux")]) + ++(define_insn "*save_gpregs_" ++ [(match_parallel 0 "any_parallel_operand" ++ [(clobber (reg:P 65)) ++ (use (match_operand:P 1 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:P 2 "memory_operand" "=m") ++ (match_operand:P 3 "gpc_reg_operand" "r"))])] ++ "" ++ "bl %z1" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) ++ + (define_insn "*save_fpregs_" + [(match_parallel 0 "any_parallel_operand" + [(clobber (reg:P 65)) +- (use (match_operand:P 1 "call_operand" "s")) ++ (use (match_operand:P 1 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) + (set (match_operand:DF 2 "memory_operand" "=m") + (match_operand:DF 3 "gpc_reg_operand" "f"))])] + "" +@@ -14562,15 +14577,43 @@ + ; FIXME: This would probably be somewhat simpler if the Cygnus sibcall + ; stuff was in GCC. Oh, and "any_parallel_operand" is a bit flexible... + ++(define_insn "*restore_gpregs_" ++ [(match_parallel 0 "any_parallel_operand" ++ [(clobber (match_operand:P 1 "register_operand" "=l")) ++ (use (match_operand:P 2 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:P 3 "gpc_reg_operand" "=r") ++ (match_operand:P 4 "memory_operand" "m"))])] ++ "" ++ "bl %z2" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) ++ ++(define_insn "*return_and_restore_gpregs_" ++ [(match_parallel 0 "any_parallel_operand" ++ [(return) ++ (clobber (match_operand:P 1 "register_operand" "=l")) ++ (use (match_operand:P 2 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:P 3 "gpc_reg_operand" "=r") ++ (match_operand:P 4 "memory_operand" "m"))])] ++ "" ++ "b %z2" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) ++ + (define_insn "*return_and_restore_fpregs_" + [(match_parallel 0 "any_parallel_operand" + [(return) +- (use (reg:P 65)) +- (use (match_operand:P 1 "call_operand" "s")) +- (set (match_operand:DF 2 "gpc_reg_operand" "=f") +- (match_operand:DF 3 "memory_operand" "m"))])] ++ (clobber (match_operand:P 1 "register_operand" "=l")) ++ (use (match_operand:P 2 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:DF 3 "gpc_reg_operand" "=f") ++ (match_operand:DF 4 "memory_operand" "m"))])] + "" +- "b %z1") ++ "b %z2" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) + + ; This is used in compiling the unwind routines. + (define_expand "eh_return" +@@ -14617,6 +14660,120 @@ + }" + [(set_attr "type" "load")]) + ++;;; Expanders for vector insn patterns shared between the SPE and TARGET_PAIRED systems. ++ ++(define_expand "absv2sf2" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ emit_insn (gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_ABS (V2SFmode, operands[1]))); ++ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO))); ++ DONE; ++ } ++}") ++ ++(define_expand "negv2sf2" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ emit_insn (gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_NEG (V2SFmode, operands[1]))); ++ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO))); ++ DONE; ++ } ++}") ++ ++(define_expand "addv2sf3" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") ++ (match_operand:V2SF 2 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); ++ ++ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_PLUS (V2SFmode, operands[1], operands[2])); ++ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); ++ emit_insn (par); ++ DONE; ++ } ++}") ++ ++(define_expand "subv2sf3" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") ++ (match_operand:V2SF 2 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); ++ ++ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_MINUS (V2SFmode, operands[1], operands[2])); ++ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); ++ emit_insn (par); ++ DONE; ++ } ++}") ++ ++(define_expand "mulv2sf3" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") ++ (match_operand:V2SF 2 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); ++ ++ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_MULT (V2SFmode, operands[1], operands[2])); ++ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); ++ emit_insn (par); ++ DONE; ++ } ++}") ++ ++(define_expand "divv2sf3" ++ [(set (match_operand:V2SF 0 "gpc_reg_operand" "") ++ (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") ++ (match_operand:V2SF 2 "gpc_reg_operand" "")))] ++ "TARGET_PAIRED_FLOAT || TARGET_SPE" ++ " ++{ ++ if (TARGET_SPE) ++ { ++ /* We need to make a note that we clobber SPEFSCR. */ ++ rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); ++ ++ XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], ++ gen_rtx_DIV (V2SFmode, operands[1], operands[2])); ++ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); ++ emit_insn (par); ++ DONE; ++ } ++}") ++ + + (include "sync.md") + (include "altivec.md") +--- a/gcc/config/rs6000/rs6000.opt ++++ b/gcc/config/rs6000/rs6000.opt +@@ -190,7 +190,7 @@ Target RejectNegative Joined + -mvrsave=yes/no Deprecated option. Use -mvrsave/-mno-vrsave instead + + misel +-Target Var(rs6000_isel) ++Target + Generate isel instructions + + misel= +@@ -198,7 +198,7 @@ Target RejectNegative Joined + -misel=yes/no Deprecated option. Use -misel/-mno-isel instead + + mspe +-Target Var(rs6000_spe) ++Target + Generate SPE SIMD instructions on E500 + + mpaired +--- a/gcc/config/rs6000/sol-ci.asm ++++ b/gcc/config/rs6000/sol-ci.asm +@@ -36,7 +36,6 @@ + # This file just supplies labeled starting points for the .got* and other + # special sections. It is linked in first before other modules. + +- .file "scrti.s" + .ident "GNU C scrti.s" + + #ifndef __powerpc64__ +--- a/gcc/config/rs6000/sol-cn.asm ++++ b/gcc/config/rs6000/sol-cn.asm +@@ -36,7 +36,6 @@ + # This file just supplies labeled ending points for the .got* and other + # special sections. It is linked in last after other modules. + +- .file "scrtn.s" + .ident "GNU C scrtn.s" + + #ifndef __powerpc64__ +--- a/gcc/config/rs6000/spe.md ++++ b/gcc/config/rs6000/spe.md +@@ -164,7 +164,7 @@ + + ;; SPE SIMD instructions + +-(define_insn "spe_evabs" ++(define_insn "absv2si2" + [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r") + (abs:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))] + "TARGET_SPE" +@@ -181,7 +181,7 @@ + [(set_attr "type" "vecsimple") + (set_attr "length" "4")]) + +-(define_insn "spe_evand" ++(define_insn "andv2si3" + [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r") + (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r") + (match_operand:V2SI 2 "gpc_reg_operand" "r")))] +@@ -1898,7 +1898,7 @@ + [(set_attr "type" "veccomplex") + (set_attr "length" "4")]) + +-(define_insn "spe_evaddw" ++(define_insn "addv2si3" + [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r") + (plus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r") + (match_operand:V2SI 2 "gpc_reg_operand" "r")))] +@@ -2028,7 +2028,7 @@ + [(set_attr "type" "veccomplex") + (set_attr "length" "4")]) + +-(define_insn "spe_evdivws" ++(define_insn "divv2si3" + [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r") + (div:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r") + (match_operand:V2SI 2 "gpc_reg_operand" "r"))) +@@ -2933,7 +2933,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1000))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efscmpeq %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -2943,7 +2944,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1001))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && flag_finite_math_only && !flag_trapping_math" + "efststeq %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -2953,7 +2955,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1002))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efscmpgt %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -2963,7 +2966,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1003))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && flag_finite_math_only && !flag_trapping_math" + "efststgt %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -2973,7 +2977,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1004))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efscmplt %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -2983,7 +2988,8 @@ + [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r") + (match_operand:SF 2 "gpc_reg_operand" "r"))] + 1005))] +- "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS ++ && flag_finite_math_only && !flag_trapping_math" + "efststlt %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -2995,7 +3001,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + CMPDFEQ_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmpeq %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -3005,7 +3012,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + TSTDFEQ_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && flag_finite_math_only && !flag_trapping_math" + "efdtsteq %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -3015,7 +3023,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + CMPDFGT_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmpgt %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -3025,7 +3034,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + TSTDFGT_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && flag_finite_math_only && !flag_trapping_math" + "efdtstgt %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -3035,7 +3045,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + CMPDFLT_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmplt %0,%1,%2" + [(set_attr "type" "veccmp")]) + +@@ -3045,7 +3056,8 @@ + [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r") + (match_operand:DF 2 "gpc_reg_operand" "r"))] + TSTDFLT_GPR))] +- "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations" ++ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE ++ && flag_finite_math_only && !flag_trapping_math" + "efdtstlt %0,%1,%2" + [(set_attr "type" "veccmpsimple")]) + +@@ -3059,7 +3071,7 @@ + CMPTFEQ_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && !flag_unsafe_math_optimizations" ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmpeq %0,%L1,%L2" + [(set_attr "type" "veccmp") + (set_attr "length" "12")]) +@@ -3072,7 +3084,7 @@ + TSTTFEQ_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && flag_unsafe_math_optimizations" ++ && flag_finite_math_only && !flag_trapping_math" + "efdtsteq %0,%1,%2\;bng %0,$+8\;efdtsteq %0,%L1,%L2" + [(set_attr "type" "veccmpsimple") + (set_attr "length" "12")]) +@@ -3085,7 +3097,7 @@ + CMPTFGT_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && !flag_unsafe_math_optimizations" ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmpgt %0,%1,%2\;bgt %0,$+16\;efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmpgt %0,%L1,%L2" + [(set_attr "type" "veccmp") + (set_attr "length" "20")]) +@@ -3098,7 +3110,7 @@ + TSTTFGT_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && flag_unsafe_math_optimizations" ++ && flag_finite_math_only && !flag_trapping_math" + "efdtstgt %0,%1,%2\;bgt %0,$+16\;efdtsteq %0,%1,%2\;bng %0,$+8\;efdtstgt %0,%L1,%L2" + [(set_attr "type" "veccmpsimple") + (set_attr "length" "20")]) +@@ -3111,7 +3123,7 @@ + CMPTFLT_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && !flag_unsafe_math_optimizations" ++ && !(flag_finite_math_only && !flag_trapping_math)" + "efdcmplt %0,%1,%2\;bgt %0,$+16\;efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmplt %0,%L1,%L2" + [(set_attr "type" "veccmp") + (set_attr "length" "20")]) +@@ -3124,7 +3136,7 @@ + TSTTFLT_GPR))] + "!TARGET_IEEEQUAD + && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128 +- && flag_unsafe_math_optimizations" ++ && flag_finite_math_only && !flag_trapping_math" + "efdtstlt %0,%1,%2\;bgt %0,$+16\;efdtsteq %0,%1,%2\;bng %0,$+8\;efdtstlt %0,%L1,%L2" + [(set_attr "type" "veccmpsimple") + (set_attr "length" "20")]) +@@ -3135,6 +3147,44 @@ + (unspec:CCFP [(match_operand 1 "cc_reg_operand" "y") + (match_operand 2 "cc_reg_operand" "y")] + E500_CR_IOR_COMPARE))] +- "TARGET_E500" ++ "TARGET_HARD_FLOAT && !TARGET_FPRS" + "cror 4*%0+gt,4*%1+gt,4*%2+gt" + [(set_attr "type" "cr_logical")]) ++ ++;; Out-of-line prologues and epilogues. ++(define_insn "*save_gpregs_spe" ++ [(match_parallel 0 "any_parallel_operand" ++ [(clobber (reg:P 65)) ++ (use (match_operand:P 1 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:V2SI 2 "memory_operand" "=m") ++ (match_operand:V2SI 3 "gpc_reg_operand" "r"))])] ++ "TARGET_SPE_ABI" ++ "bl %z1" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) ++ ++(define_insn "*restore_gpregs_spe" ++ [(match_parallel 0 "any_parallel_operand" ++ [(clobber (reg:P 65)) ++ (use (match_operand:P 1 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:V2SI 2 "gpc_reg_operand" "=r") ++ (match_operand:V2SI 3 "memory_operand" "m"))])] ++ "TARGET_SPE_ABI" ++ "bl %z1" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) ++ ++(define_insn "*return_and_restore_gpregs_spe" ++ [(match_parallel 0 "any_parallel_operand" ++ [(return) ++ (clobber (reg:P 65)) ++ (use (match_operand:P 1 "symbol_ref_operand" "s")) ++ (use (reg:P 11)) ++ (set (match_operand:V2SI 2 "gpc_reg_operand" "=r") ++ (match_operand:V2SI 3 "memory_operand" "m"))])] ++ "TARGET_SPE_ABI" ++ "b %z1" ++ [(set_attr "type" "branch") ++ (set_attr "length" "4")]) +--- a/gcc/config/rs6000/sysv4.h ++++ b/gcc/config/rs6000/sysv4.h +@@ -266,19 +266,27 @@ do { \ + #endif + + /* Define cutoff for using external functions to save floating point. +- Currently on V.4, always use inline stores. */ +-#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64) ++ Currently on 64-bit V.4, always use inline stores. When optimizing ++ for size on 32-bit targets, use external functions when ++ profitable. */ ++#define FP_SAVE_INLINE(FIRST_REG) (optimize_size && !TARGET_64BIT \ ++ ? ((FIRST_REG) == 62 \ ++ || (FIRST_REG) == 63) \ ++ : (FIRST_REG) < 64) ++/* And similarly for general purpose registers. */ ++#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32 \ ++ && (TARGET_64BIT || !optimize_size)) + + /* Put jump tables in read-only memory, rather than in .text. */ + #define JUMP_TABLES_IN_TEXT_SECTION 0 + + /* Prefix and suffix to use to saving floating point. */ + #define SAVE_FP_PREFIX "_savefpr_" +-#define SAVE_FP_SUFFIX "_l" ++#define SAVE_FP_SUFFIX (TARGET_64BIT ? "_l" : "") + + /* Prefix and suffix to use to restoring floating point. */ + #define RESTORE_FP_PREFIX "_restfpr_" +-#define RESTORE_FP_SUFFIX "_l" ++#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "_l" : "") + + /* Type used for ptrdiff_t, as a string used in a declaration. */ + #define PTRDIFF_TYPE "int" +@@ -577,9 +585,9 @@ extern int fixuplabelno; + /* Override svr4.h definition. */ + #undef ASM_SPEC + #define ASM_SPEC "%(asm_cpu) \ +-%{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}} \ +-%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \ +-%{mrelocatable} %{mrelocatable-lib} %{fpic|fpie|fPIC|fPIE:-K PIC} \ ++%{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}}" \ ++SVR4_ASM_SPEC \ ++"%{mrelocatable} %{mrelocatable-lib} %{fpic|fpie|fPIC|fPIE:-K PIC} \ + %{memb|msdata|msdata=eabi: -memb} \ + %{mlittle|mlittle-endian:-mlittle; \ + mbig|mbig-endian :-mbig; \ +@@ -606,6 +614,9 @@ extern int fixuplabelno; + #define CC1_SECURE_PLT_DEFAULT_SPEC "" + #endif + ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC "" ++ + /* Pass -G xxx to the compiler and set correct endian mode. */ + #define CC1_SPEC "%{G*} %(cc1_cpu) \ + %{mlittle|mlittle-endian: %(cc1_endian_little); \ +@@ -630,7 +641,7 @@ extern int fixuplabelno; + %{msdata: -msdata=default} \ + %{mno-sdata: -msdata=none} \ + %{!mbss-plt: %{!msecure-plt: %(cc1_secure_plt_default)}} \ +-%{profile: -p}" ++%{profile: -p}" CC1_EXTRA_SPEC + + /* Don't put -Y P, for cross compilers. */ + #ifndef CROSS_DIRECTORY_STRUCTURE +@@ -777,19 +788,19 @@ extern int fixuplabelno; + /* Override svr4.h definition. */ + #undef ENDFILE_SPEC + #define ENDFILE_SPEC "\ +-%{mads : crtsavres.o%s %(endfile_ads) ; \ +- myellowknife : crtsavres.o%s %(endfile_yellowknife) ; \ +- mmvme : crtsavres.o%s %(endfile_mvme) ; \ +- msim : crtsavres.o%s %(endfile_sim) ; \ ++%{mads : %(endfile_ads) ; \ ++ myellowknife : %(endfile_yellowknife) ; \ ++ mmvme : %(endfile_mvme) ; \ ++ msim : %(endfile_sim) ; \ + mwindiss : %(endfile_windiss) ; \ +- mcall-freebsd: crtsavres.o%s %(endfile_freebsd) ; \ +- mcall-linux : crtsavres.o%s %(endfile_linux) ; \ +- mcall-gnu : crtsavres.o%s %(endfile_gnu) ; \ +- mcall-netbsd : crtsavres.o%s %(endfile_netbsd) ; \ +- mcall-openbsd: crtsavres.o%s %(endfile_openbsd) ; \ ++ mcall-freebsd: %(endfile_freebsd) ; \ ++ mcall-linux : %(endfile_linux) ; \ ++ mcall-gnu : %(endfile_gnu) ; \ ++ mcall-netbsd : %(endfile_netbsd) ; \ ++ mcall-openbsd: %(endfile_openbsd) ; \ + : %(crtsavres_default) %(endfile_default) }" + +-#define CRTSAVRES_DEFAULT_SPEC "crtsavres.o%s" ++#define CRTSAVRES_DEFAULT_SPEC "" + + #define ENDFILE_DEFAULT_SPEC "crtend.o%s ecrtn.o%s" + +@@ -833,15 +844,15 @@ extern int fixuplabelno; + #define CPP_OS_MVME_SPEC "" + + /* PowerPC simulator based on netbsd system calls support. */ +-#define LIB_SIM_SPEC "--start-group -lsim -lc --end-group" ++#define LIB_SIM_SPEC LIB_DEFAULT_SPEC + +-#define STARTFILE_SIM_SPEC "ecrti.o%s sim-crt0.o%s crtbegin.o%s" ++#define STARTFILE_SIM_SPEC "ecrti.o%s crtbegin.o%s" + +-#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s" ++#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s -Tsim-hosted.ld" + + #define LINK_START_SIM_SPEC "" + +-#define LINK_OS_SIM_SPEC "-m elf32ppcsim" ++#define LINK_OS_SIM_SPEC "" + + #define CPP_OS_SIM_SPEC "" + +--- /dev/null ++++ b/gcc/config/rs6000/t-cs-eabi +@@ -0,0 +1,17 @@ ++# Multilibs for powerpc embedded ELF targets. ++ ++MULTILIB_OPTIONS = te500v1/te500v2/te600/te500mc \ ++ msoft-float ++ ++MULTILIB_DIRNAMES = te500v1 te500v2 te600 te500mc \ ++ nof ++ ++MULTILIB_EXCEPTIONS = *te600*/*msoft-float* \ ++ *te500v1*/*msoft-float* \ ++ *te500v2*/*msoft-float* \ ++ *te500mc*/*msoft-float* ++ ++MULTILIB_EXTRA_OPTS = mno-eabi mstrict-align ++ ++MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \ ++ ${MULTILIB_MATCHES_ENDIAN} +--- /dev/null ++++ b/gcc/config/rs6000/t-linux +@@ -0,0 +1,12 @@ ++# Multilibs for powerpc-linux-gnu targets. ++ ++MULTILIB_OPTIONS = te500v1/te500v2/te600/te500mc \ ++ msoft-float ++ ++MULTILIB_DIRNAMES = te500v1 te500v2 te600 te500mc \ ++ nof ++ ++MULTILIB_EXCEPTIONS = *te600*/*msoft-float* \ ++ *te500v1*/*msoft-float* \ ++ *te500v2*/*msoft-float* \ ++ *te500mc*/*msoft-float* +--- /dev/null ++++ b/gcc/config/rs6000/t-montavista-linux +@@ -0,0 +1,26 @@ ++# MontaVista GNU/Linux Configuration. ++# Copyright (C) 2009 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# Build hard-float, soft-float, E500mc, E500v2 and E600 ++# libraries. ++MULTILIB_OPTIONS = msoft-float/te500mc/te500v2/te600 ++MULTILIB_DIRNAMES = soft-float te500mc te500v2 te600 ++MULTILIB_EXCEPTIONS = ++MULTILIB_OSDIRNAMES = msoft-float=!soft-float +--- a/gcc/config/rs6000/t-netbsd ++++ b/gcc/config/rs6000/t-netbsd +@@ -2,13 +2,32 @@ + + LIB2FUNCS_EXTRA = tramp.S + ++LIB2FUNCS_STATIC_EXTRA = crtsavfpr.S crtresfpr.S \ ++ crtsavgpr.S crtresgpr.S \ ++ crtresxfpr.S crtresxgpr.S ++ + tramp.S: $(srcdir)/config/rs6000/tramp.asm + cat $(srcdir)/config/rs6000/tramp.asm > tramp.S + +-crtsavres.S: $(srcdir)/config/rs6000/crtsavres.asm +- cat $(srcdir)/config/rs6000/crtsavres.asm >crtsavres.S ++crtsavfpr.S: $(srcdir)/config/rs6000/crtsavfpr.asm ++ cat $(srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S ++ ++crtresfpr.S: $(srcdir)/config/rs6000/crtresfpr.asm ++ cat $(srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S ++ ++crtsavgpr.S: $(srcdir)/config/rs6000/crtsavgpr.asm ++ cat $(srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S ++ ++crtresgpr.S: $(srcdir)/config/rs6000/crtresgpr.asm ++ cat $(srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S ++ ++crtresxfpr.S: $(srcdir)/config/rs6000/crtresxfpr.asm ++ cat $(srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S ++ ++crtresxgpr.S: $(srcdir)/config/rs6000/crtresxgpr.asm ++ cat $(srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S + +-EXTRA_PARTS += crtsavres$(objext) ++EXTRA_PARTS += libcrtsavres.a + + # It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata. + CRTSTUFF_T_CFLAGS += -msdata=none +@@ -37,6 +56,20 @@ EXTRA_MULTILIB_PARTS = crtbegin$(objext) + crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext) \ + crtsavres$(objext) + +-$(T)crtsavres$(objext): crtsavres.S +- $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) \ +- -c crtsavres.S -o $(T)crtsavres$(objext) ++$(T)crtsavfpr$(objext): crtsavfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavfpr.S -o $(T)crtsavfpr$(objext) ++ ++$(T)crtresfpr$(objext): crtresfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresfpr.S -o $(T)crtresfpr$(objext) ++ ++$(T)crtsavgpr$(objext): crtsavgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavgpr.S -o $(T)crtsavgpr$(objext) ++ ++$(T)crtresgpr$(objext): crtresgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresgpr.S -o $(T)crtresgpr$(objext) ++ ++$(T)crtresxfpr$(objext): crtresxfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxfpr.S -o $(T)crtresxfpr$(objext) ++ ++$(T)crtresxgpr$(objext): crtresxgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxgpr.S -o $(T)crtresxgpr$(objext) +--- /dev/null ++++ b/gcc/config/rs6000/t-ppc-e500mc +@@ -0,0 +1,12 @@ ++# Multilibs for powerpc embedded ELF targets. ++ ++MULTILIB_OPTIONS = ++ ++MULTILIB_DIRNAMES = ++ ++MULTILIB_EXCEPTIONS = ++ ++MULTILIB_EXTRA_OPTS = mno-eabi mstrict-align ++ ++MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \ ++ ${MULTILIB_MATCHES_ENDIAN} +--- a/gcc/config/rs6000/t-ppccomm ++++ b/gcc/config/rs6000/t-ppccomm +@@ -2,11 +2,24 @@ + + LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/darwin-ldouble.c + +-# This one can't end up in shared libgcc +-LIB2FUNCS_STATIC_EXTRA = eabi.S +- +-eabi.S: $(srcdir)/config/rs6000/eabi.asm +- cat $(srcdir)/config/rs6000/eabi.asm > eabi.S ++# These can't end up in shared libgcc ++LIB2FUNCS_STATIC_EXTRA = \ ++ crtsavfpr.S crtresfpr.S \ ++ crtsavgpr.S crtresgpr.S \ ++ crtresxfpr.S crtresxgpr.S \ ++ e500crtres32gpr.S \ ++ e500crtres64gpr.S \ ++ e500crtres64gprctr.S \ ++ e500crtrest32gpr.S \ ++ e500crtrest64gpr.S \ ++ e500crtresx32gpr.S \ ++ e500crtresx64gpr.S \ ++ e500crtsav32gpr.S \ ++ e500crtsav64gpr.S \ ++ e500crtsav64gprctr.S \ ++ e500crtsavg32gpr.S \ ++ e500crtsavg64gpr.S \ ++ e500crtsavg64gprctr.S + + tramp.S: $(srcdir)/config/rs6000/tramp.asm + cat $(srcdir)/config/rs6000/tramp.asm > tramp.S +@@ -18,8 +31,7 @@ MULTILIB_MATCHES_SYSV = mcall-sysv=mcall + EXTRA_MULTILIB_PARTS = crtbegin$(objext) crtend$(objext) \ + crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext) \ + ecrti$(objext) ecrtn$(objext) \ +- ncrti$(objext) ncrtn$(objext) \ +- crtsavres$(objext) ++ ncrti$(objext) ncrtn$(objext) + + # We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and + # end labels to all of the special sections used when we link using gcc. +@@ -37,8 +49,62 @@ ncrti.S: $(srcdir)/config/rs6000/sol-ci. + ncrtn.S: $(srcdir)/config/rs6000/sol-cn.asm + cat $(srcdir)/config/rs6000/sol-cn.asm >ncrtn.S + +-crtsavres.S: $(srcdir)/config/rs6000/crtsavres.asm +- cat $(srcdir)/config/rs6000/crtsavres.asm >crtsavres.S ++crtsavfpr.S: $(srcdir)/config/rs6000/crtsavfpr.asm ++ cat $(srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S ++ ++crtresfpr.S: $(srcdir)/config/rs6000/crtresfpr.asm ++ cat $(srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S ++ ++crtsavgpr.S: $(srcdir)/config/rs6000/crtsavgpr.asm ++ cat $(srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S ++ ++crtresgpr.S: $(srcdir)/config/rs6000/crtresgpr.asm ++ cat $(srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S ++ ++crtresxfpr.S: $(srcdir)/config/rs6000/crtresxfpr.asm ++ cat $(srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S ++ ++crtresxgpr.S: $(srcdir)/config/rs6000/crtresxgpr.asm ++ cat $(srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S ++ ++e500crtres32gpr.S: $(srcdir)/config/rs6000/e500crtres32gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtres32gpr.asm >e500crtres32gpr.S ++ ++e500crtres64gpr.S: $(srcdir)/config/rs6000/e500crtres64gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtres64gpr.asm >e500crtres64gpr.S ++ ++e500crtres64gprctr.S: $(srcdir)/config/rs6000/e500crtres64gprctr.asm ++ cat $(srcdir)/config/rs6000/e500crtres64gprctr.asm >e500crtres64gprctr.S ++ ++e500crtrest32gpr.S: $(srcdir)/config/rs6000/e500crtrest32gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtrest32gpr.asm >e500crtrest32gpr.S ++ ++e500crtrest64gpr.S: $(srcdir)/config/rs6000/e500crtrest64gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtrest64gpr.asm >e500crtrest64gpr.S ++ ++e500crtresx32gpr.S: $(srcdir)/config/rs6000/e500crtresx32gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtresx32gpr.asm >e500crtresx32gpr.S ++ ++e500crtresx64gpr.S: $(srcdir)/config/rs6000/e500crtresx64gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtresx64gpr.asm >e500crtresx64gpr.S ++ ++e500crtsav32gpr.S: $(srcdir)/config/rs6000/e500crtsav32gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtsav32gpr.asm >e500crtsav32gpr.S ++ ++e500crtsav64gpr.S: $(srcdir)/config/rs6000/e500crtsav64gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtsav64gpr.asm >e500crtsav64gpr.S ++ ++e500crtsav64gprctr.S: $(srcdir)/config/rs6000/e500crtsav64gprctr.asm ++ cat $(srcdir)/config/rs6000/e500crtsav64gprctr.asm >e500crtsav64gprctr.S ++ ++e500crtsavg32gpr.S: $(srcdir)/config/rs6000/e500crtsavg32gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtsavg32gpr.asm >e500crtsavg32gpr.S ++ ++e500crtsavg64gpr.S: $(srcdir)/config/rs6000/e500crtsavg64gpr.asm ++ cat $(srcdir)/config/rs6000/e500crtsavg64gpr.asm >e500crtsavg64gpr.S ++ ++e500crtsavg64gprctr.S: $(srcdir)/config/rs6000/e500crtsavg64gprctr.asm ++ cat $(srcdir)/config/rs6000/e500crtsavg64gprctr.asm >e500crtsavg64gprctr.S + + # Build multiple copies of ?crt{i,n}.o, one for each target switch. + $(T)ecrti$(objext): ecrti.S +@@ -53,8 +119,62 @@ $(T)ncrti$(objext): ncrti.S + $(T)ncrtn$(objext): ncrtn.S + $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrtn.S -o $(T)ncrtn$(objext) + +-$(T)crtsavres$(objext): crtsavres.S +- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavres.S -o $(T)crtsavres$(objext) ++$(T)crtsavfpr$(objext): crtsavfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavfpr.S -o $(T)crtsavfpr$(objext) ++ ++$(T)crtresfpr$(objext): crtresfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresfpr.S -o $(T)crtresfpr$(objext) ++ ++$(T)crtsavgpr$(objext): crtsavgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavgpr.S -o $(T)crtsavgpr$(objext) ++ ++$(T)crtresgpr$(objext): crtresgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresgpr.S -o $(T)crtresgpr$(objext) ++ ++$(T)crtresxfpr$(objext): crtresxfpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxfpr.S -o $(T)crtresxfpr$(objext) ++ ++$(T)crtresxgpr$(objext): crtresxgpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxgpr.S -o $(T)crtresxgpr$(objext) ++ ++$(T)e500crtres32gpr$(objext): e500crtres32gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres32gpr.S -o $(T)e500crtres32gpr$(objext) ++ ++$(T)e500crtres64gpr$(objext): e500crtres64gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres64gpr.S -o $(T)e500crtres64gpr$(objext) ++ ++$(T)e500crtres64gprctr$(objext): e500crtres64gprctr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtres64gprctr.S -o $(T)e500crtres64gprctr$(objext) ++ ++$(T)e500crtrest32gpr$(objext): e500crtrest32gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtrest32gpr.S -o $(T)e500crtrest32gpr$(objext) ++ ++$(T)e500crtrest64gpr$(objext): e500crtrest64gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtrest64gpr.S -o $(T)e500crtrest64gpr$(objext) ++ ++$(T)e500crtresx32gpr$(objext): e500crtresx32gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtresx32gpr.S -o $(T)e500crtresx32gpr$(objext) ++ ++$(T)e500crtresx64gpr$(objext): e500crtresx64gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtresx64gpr.S -o $(T)e500crtresx64gpr$(objext) ++ ++$(T)e500crtsav32gpr$(objext): e500crtsav32gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav32gpr.S -o $(T)e500crtsav32gpr$(objext) ++ ++$(T)e500crtsav64gpr$(objext): e500crtsav64gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav64gpr.S -o $(T)e500crtsav64gpr$(objext) ++ ++$(T)e500crtsav64gprctr$(objext): e500crtsav64gprctr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsav64gprctr.S -o $(T)e500crtsav64gprctr$(objext) ++ ++$(T)e500crtsavg32gpr$(objext): e500crtsavg32gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg32gpr.S -o $(T)e500crtsavg32gpr$(objext) ++ ++$(T)e500crtsavg64gpr$(objext): e500crtsavg64gpr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg64gpr.S -o $(T)e500crtsavg64gpr$(objext) ++ ++$(T)e500crtsavg64gprctr$(objext): e500crtsavg64gprctr.S ++ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c e500crtsavg64gprctr.S -o $(T)e500crtsavg64gprctr$(objext) + + # It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata. + CRTSTUFF_T_CFLAGS = -msdata=none +--- a/gcc/config/rs6000/t-ppcgas ++++ b/gcc/config/rs6000/t-ppcgas +@@ -1,14 +1,16 @@ + # Multilibs for powerpc embedded ELF targets. + +-MULTILIB_OPTIONS = msoft-float \ +- mlittle/mbig \ +- fleading-underscore ++MULTILIB_OPTIONS = te500v1/te500v2/te600 \ ++ msoft-float + +-MULTILIB_DIRNAMES = nof \ +- le be \ +- und ++MULTILIB_DIRNAMES = te500v1 te500v2 te600 \ ++ nof + +-MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align ++MULTILIB_EXCEPTIONS = *te600*/*msoft-float* \ ++ *te500v1*/*msoft-float* \ ++ *te500v2*/*msoft-float* ++ ++MULTILIB_EXTRA_OPTS = mno-eabi mstrict-align + + MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \ + ${MULTILIB_MATCHES_ENDIAN} +--- /dev/null ++++ b/gcc/config/rs6000/t-timesys +@@ -0,0 +1,17 @@ ++# Overrides for timesys ++ ++# We want to build six multilibs: ++# . (default, -mcpu=740) ++# 4xx (-mcpu=405) ++# 44x (-mcpu=440) ++# 8xx (-mcpu=801) ++# 85xx (-te500v1) ++# 74xx (-te600) ++ ++MULTILIB_OPTIONS = mcpu=405/mcpu=440/mcpu=801/te500v1/te600 ++ ++MULTILIB_DIRNAMES = 4xx 44x 8xx 85xx 74xx ++ ++MULTILIB_MATCHES = ++ ++MULTILIB_EXCEPTIONS = +--- /dev/null ++++ b/gcc/config/rs6000/t-wrs-linux +@@ -0,0 +1,30 @@ ++# Wind River GNU/Linux Configuration. ++# Copyright (C) 2006, 2007 ++# Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# Build hard-float (32-bit and 64-bit), soft-float, E500v1 and E500v2 ++# libraries. ++MULTILIB_OPTIONS = muclibc m64 msoft-float te500v1 te500v2 ++MULTILIB_DIRNAMES = uclibc 64 soft-float te500v1 te500v2 ++MULTILIB_EXCEPTIONS = *muclibc*/*m64* *muclibc*/*msoft-float* ++MULTILIB_EXCEPTIONS += *muclibc*/*te500v1* *muclibc*/*te500v2* ++MULTILIB_EXCEPTIONS += *m64*/*msoft-float* *m64*/*te500v1* *m64*/*te500v2* ++MULTILIB_EXCEPTIONS += *msoft-float*/*te500v1* *msoft-float*/*te500v2* ++MULTILIB_EXCEPTIONS += *te500v1*/*te500v2* ++MULTILIB_OSDIRNAMES = muclibc=!uclibc m64=../lib64 msoft-float=!soft-float +--- /dev/null ++++ b/gcc/config/rs6000/timesys-linux.h +@@ -0,0 +1,41 @@ ++/* Configuration file for timesys ARM GNU/Linux EABI targets. ++ Copyright (C) 2007 ++ Free Software Foundation, Inc. ++ Contributed by CodeSourcery, LLC ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ . */ ++ ++/* Add -t flags for convenience in generating multilibs. */ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC \ ++ "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} " \ ++ "%{te600: -mcpu=7400 -maltivec -mabi=altivec} " ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC \ ++ "%{te500v1:-mppc -mspe -me500 ; \ ++ te600:-mppc -maltivec ; \ ++ mcpu=405:-m405 ; \ ++ mcpu=440:-m440 ; \ ++ :-mppc%{m64:64}}" ++ ++ ++/* FIXME:We should be dynamically creating this from the makefile. ++ See m68k for an example. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{mcpu=405:/4xx ; mcpu=440:/44x ; mcpu=801:/8xx ; te500v1:/85xx ; te600:/74xx}" +--- /dev/null ++++ b/gcc/config/rs6000/wrs-linux.h +@@ -0,0 +1,44 @@ ++/* Wind River GNU/Linux Configuration. ++ Copyright (C) 2006, 2007 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Add -te500v1 and -te500v2 options for convenience in generating ++ multilibs. */ ++#undef CC1_EXTRA_SPEC ++#define CC1_EXTRA_SPEC "%{te500v1: -mcpu=8540 -mfloat-gprs=single -mspe=yes -mabi=spe} %{te500v2: -mcpu=8548 -mfloat-gprs=double -mspe=yes -mabi=spe}" ++ ++#undef ASM_DEFAULT_SPEC ++#define ASM_DEFAULT_SPEC \ ++ "%{te500v1:-mppc -mspe -me500 ; \ ++ te500v2:-mppc -mspe -me500 ; \ ++ :-mppc%{m64:64}}" ++ ++/* The GLIBC headers are in /usr/include, relative to the sysroot; the ++ uClibc headers are in /uclibc/usr/include. */ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" ++ ++/* The various C libraries each have their own subdirectory. */ ++#undef SYSROOT_SUFFIX_SPEC ++#define SYSROOT_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc ; \ ++ msoft-float:/soft-float ; \ ++ te500v1:/te500v1 ; \ ++ te500v2:/te500v2}" +--- /dev/null ++++ b/gcc/config/sh/cs-sgxxlite-linux.h +@@ -0,0 +1,23 @@ ++/* SH SourceryG++ GNU/Linux Configuration. ++ Copyright (C) 2008 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef SYSROOT_HEADERS_SUFFIX_SPEC ++#define SYSROOT_HEADERS_SUFFIX_SPEC \ ++ "%{muclibc:/uclibc}" +--- a/gcc/config/sh/lib1funcs.asm ++++ b/gcc/config/sh/lib1funcs.asm +@@ -2084,8 +2084,9 @@ GLOBAL(ic_invalidate): + GLOBAL(ic_invalidate): + ocbwb @r4 + synco +- rts + icbi @r4 ++ rts ++ nop + ENDFUNC(GLOBAL(ic_invalidate)) + #elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__)) + /* For system code, we use ic_invalidate_line_i, but user code +--- a/gcc/config/sh/linux-unwind.h ++++ b/gcc/config/sh/linux-unwind.h +@@ -27,7 +27,10 @@ the Free Software Foundation, 51 Frankli + Boston, MA 02110-1301, USA. */ + + /* Do code reading to identify a signal frame, and set the frame +- state data appropriately. See unwind-dw2.c for the structs. */ ++ state data appropriately. See unwind-dw2.c for the structs. ++ Don't use this at all if inhibit_libc is used. */ ++ ++#ifndef inhibit_libc + + #include + #include +@@ -251,3 +254,5 @@ sh_fallback_frame_state (struct _Unwind_ + return _URC_NO_REASON; + } + #endif /* defined (__SH5__) */ ++ ++#endif /* inhibit_libc */ +--- a/gcc/config/sh/t-1e ++++ /dev/null +@@ -1 +0,0 @@ +-MULTILIB_ENDIAN = +--- a/gcc/config/sh/t-linux ++++ b/gcc/config/sh/t-linux +@@ -4,6 +4,5 @@ LIB2FUNCS_EXTRA= $(srcdir)/config/sh/lin + + MULTILIB_DIRNAMES= + MULTILIB_MATCHES = +-MULTILIB_EXCEPTIONS= + + EXTRA_MULTILIB_PARTS= crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o +--- a/gcc/config/sh/t-mlib-sh1 ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh1=m1/ +--- a/gcc/config/sh/t-mlib-sh2 ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2=m2/ +--- a/gcc/config/sh/t-mlib-sh2a ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2a=m2a/ +--- a/gcc/config/sh/t-mlib-sh2a-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2a_nofpu=m2a-nofpu/ +--- a/gcc/config/sh/t-mlib-sh2a-single ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2a_single=m2a-single/ +--- a/gcc/config/sh/t-mlib-sh2a-single-only ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2a_single_only=m2a-single-only/ +--- a/gcc/config/sh/t-mlib-sh2e ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh2e=m2e/ +--- a/gcc/config/sh/t-mlib-sh3 ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh3=m3/ +--- a/gcc/config/sh/t-mlib-sh3e ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh3e=m3e/ +--- a/gcc/config/sh/t-mlib-sh4 ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4=m4/ +--- a/gcc/config/sh/t-mlib-sh4-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4_nofpu=m4-nofpu/ +--- a/gcc/config/sh/t-mlib-sh4-single ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4_single=m4-single/ +--- a/gcc/config/sh/t-mlib-sh4-single-only ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4_single_only=m4-single-only/ +--- a/gcc/config/sh/t-mlib-sh4a ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4a=m4a/ +--- a/gcc/config/sh/t-mlib-sh4a-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4a_nofpu=m4a-nofpu/ +--- a/gcc/config/sh/t-mlib-sh4a-single ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4a_single=m4a-single/ +--- a/gcc/config/sh/t-mlib-sh4a-single-only ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4a_single_only=m4a-single-only/ +--- a/gcc/config/sh/t-mlib-sh4al ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh4al=m4al/ +--- a/gcc/config/sh/t-mlib-sh5-32media ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_32media=m5-32media/ +--- a/gcc/config/sh/t-mlib-sh5-32media-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_32media_nofpu=m5-32media-nofpu/ +--- a/gcc/config/sh/t-mlib-sh5-64media ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_64media=m5-64media/ +--- a/gcc/config/sh/t-mlib-sh5-64media-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_64media_nofpu=m5-64media-nofpu/ +--- a/gcc/config/sh/t-mlib-sh5-compact ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_compact=m5-compact/ +--- a/gcc/config/sh/t-mlib-sh5-compact-nofpu ++++ /dev/null +@@ -1 +0,0 @@ +-ML_sh5_compact_nofpu=m5-compact-nofpu/ +--- /dev/null ++++ b/gcc/config/sh/t-sgxxlite-linux +@@ -0,0 +1,3 @@ ++MULTILIB_OPTIONS += muclibc ++MULTILIB_OSDIRNAMES += muclibc=!uclibc m4al/muclibc=!m4al/uclibc mb/muclibc=!mb/uclibc ++MULTILIB_EXCEPTIONS += mb/m4al/muclibc +--- a/gcc/config/sh/t-sh ++++ b/gcc/config/sh/t-sh +@@ -27,10 +27,10 @@ fp-bit.c: $(srcdir)/config/fp-bit.c + echo '#endif' >> fp-bit.c + cat $(srcdir)/config/fp-bit.c >> fp-bit.c + +-MULTILIB_ENDIAN = ml/mb +-MULTILIB_CPUS= $(ML_sh1)$(ML_sh2a)$(ML_sh2a_nofpu)$(ML_sh2a_single_only)$(ML_sh2a_single)$(ML_sh2e)$(ML_sh2)$(ML_sh3e)$(ML_sh3)$(ML_sh4_nofpu)$(ML_sh4_single_only)$(ML_sh4_single)$(ML_sh4)$(ML_sh4a_nofpu)$(ML_sh4a_single_only)$(ML_sh4a_single)$(ML_sh4a)$(ML_sh5_32media)$(ML_sh5_32media_nofpu)$(ML_sh5_compact)$(ML_sh5_compact_nofpu)$(ML_sh5_64media)$(ML_sh5_64media_nofpu) ++DEFAULT_ENDIAN = $(word 1,$(TM_ENDIAN_CONFIG)) ++OTHER_ENDIAN = $(word 2,$(TM_ENDIAN_CONFIG)) + +-MULTILIB_OPTIONS= $(MULTILIB_ENDIAN) $(MULTILIB_CPUS:/=) ++MULTILIB_OPTIONS= $(OTHER_ENDIAN) $(TM_MULTILIB_CONFIG) + MULTILIB_DIRNAMES= + + # The separate entries for m2a-nofpu and m2a-single-only with +@@ -58,7 +58,34 @@ MULTILIB_MATCHES = $(shell \ + done) + + # SH1 only supports big endian. +-MULTILIB_EXCEPTIONS = ml/m1 ml/m2a* ++MULTILIB_EXCEPTIONS = ml/m1 ml/m2a* $(TM_MULTILIB_EXCEPTIONS_CONFIG) ++ ++MULTILIB_OSDIRNAMES = \ ++ $(OTHER_ENDIAN)=!$(OTHER_ENDIAN) \ ++ m1=!m1 $(OTHER_ENDIAN)/m1=!$(OTHER_ENDIAN)/m1 \ ++ m2a=!m2a $(OTHER_ENDIAN)/m2a=!$(OTHER_ENDIAN)/m2a \ ++ m2a-nofpu=!m2a-nofpu $(OTHER_ENDIAN)/m2a-nofpu=!$(OTHER_ENDIAN)/m2a-nofpu \ ++ m2a-single-only=!m2a-single-only $(OTHER_ENDIAN)/m2a-single-only=!$(OTHER_ENDIAN)/m2a-single-only \ ++ m2a-single=!m2a-single $(OTHER_ENDIAN)/m2a-single=!$(OTHER_ENDIAN)/m2a-single \ ++ m2e=!m2e $(OTHER_ENDIAN)/m2e=!$(OTHER_ENDIAN)/m2e \ ++ m2=!m2 $(OTHER_ENDIAN)/m2=!$(OTHER_ENDIAN)/m2 \ ++ m3e=!m3e $(OTHER_ENDIAN)/m3e=!$(OTHER_ENDIAN)/m3e \ ++ m3=!m3 $(OTHER_ENDIAN)/m3=!$(OTHER_ENDIAN)/m3 \ ++ m4-nofpu=!m4-nofpu $(OTHER_ENDIAN)/m4-nofpu=!$(OTHER_ENDIAN)/m4-nofpu \ ++ m4-single-only=!m4-single-only $(OTHER_ENDIAN)/m4-single-only=!$(OTHER_ENDIAN)/m4-single-only \ ++ m4-single=!m4-single $(OTHER_ENDIAN)/m4-single=!$(OTHER_ENDIAN)/m4-single \ ++ m4=!m4 $(OTHER_ENDIAN)/m4=!$(OTHER_ENDIAN)/m4 \ ++ m4a-nofpu=!m4a-nofpu $(OTHER_ENDIAN)/m4a-nofpu=!$(OTHER_ENDIAN)/m4a-nofpu \ ++ m4a-single-only=!m4a-single-only $(OTHER_ENDIAN)/m4a-single-only=!$(OTHER_ENDIAN)/m4a-single-only \ ++ m4a-single=!m4a-single $(OTHER_ENDIAN)/m4a-single=!$(OTHER_ENDIAN)/m4a-single \ ++ m4a=!m4a $(OTHER_ENDIAN)/m4a=!$(OTHER_ENDIAN)/m4a \ ++ m4al=!m4al $(OTHER_ENDIAN)/m4al=!$(OTHER_ENDIAN)/m4al \ ++ m5-32media=!m5-32media $(OTHER_ENDIAN)/m5-32media=!$(OTHER_ENDIAN)/m5-32media \ ++ m5-32media-nofpu=!m5-32media-nofpu $(OTHER_ENDIAN)/m5-32media-nofpu=!$(OTHER_ENDIAN)/m5-32media-nofpu \ ++ m5-compact=!m5-compact $(OTHER_ENDIAN)/m5-compact=!$(OTHER_ENDIAN)/m5-compact \ ++ m5-compact-nofpu=!m5-compact-nofpu $(OTHER_ENDIAN)/m5-compact-nofpu=!$(OTHER_ENDIAN)/m5-compact-nofpu \ ++ m5-64media=!m5-64media $(OTHER_ENDIAN)/m5-64media=!$(OTHER_ENDIAN)/m5-64media \ ++ m5-64media-nofpu=!m5-64media-nofpu $(OTHER_ENDIAN)/m5-64media-nofpu=!$(OTHER_ENDIAN)/m5-64media-nofpu + + LIBGCC = stmp-multilib + INSTALL_LIBGCC = install-multilib +--- a/gcc/config/sol2.h ++++ b/gcc/config/sol2.h +@@ -123,12 +123,12 @@ along with GCC; see the file COPYING3. + %{YP,*} \ + %{R*} \ + %{compat-bsd: \ +- %{!YP,*:%{p|pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \ +- %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \ +- -R /usr/ucblib} \ ++ %{!YP,*:%{p|pg:-Y P,%R/usr/ucblib:%R/usr/ccs/lib/libp:%R/usr/lib/libp:%R/usr/ccs/lib:%R/usr/lib} \ ++ %{!p:%{!pg:-Y P,%R/usr/ucblib:%R/usr/ccs/lib:%R/usr/lib}}} \ ++ -R %R/usr/ucblib} \ + %{!compat-bsd: \ +- %{!YP,*:%{p|pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \ +- %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}}" ++ %{!YP,*:%{p|pg:-Y P,%R/usr/ccs/lib/libp:%R/usr/lib/libp:%R/usr/ccs/lib:%R/usr/lib} \ ++ %{!p:%{!pg:-Y P,%R/usr/ccs/lib:%R/usr/lib}}}}" + + #undef LINK_ARCH32_SPEC + #define LINK_ARCH32_SPEC LINK_ARCH32_SPEC_BASE +--- a/gcc/config/sparc/linux64.h ++++ b/gcc/config/sparc/linux64.h +@@ -49,10 +49,15 @@ along with GCC; see the file COPYING3. + in a Medium/Low code model environment. */ + + #undef TARGET_DEFAULT ++#ifdef BIARCH_32BIT_DEFAULT ++#define TARGET_DEFAULT \ ++ (MASK_APP_REGS + MASK_FPU) ++#else + #define TARGET_DEFAULT \ + (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \ + + MASK_STACK_BIAS + MASK_APP_REGS + MASK_FPU + MASK_LONG_DOUBLE_128) + #endif ++#endif + + #undef ASM_CPU_DEFAULT_SPEC + #define ASM_CPU_DEFAULT_SPEC "-Av9a" +@@ -167,7 +172,7 @@ along with GCC; see the file COPYING3. + { "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \ + { "link_arch", LINK_ARCH_SPEC }, + +-#define LINK_ARCH32_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \ ++#define LINK_ARCH32_SPEC "-m elf32_sparc -Y P,%R/usr/lib %{shared:-shared} \ + %{!shared: \ + %{!ibcs: \ + %{!static: \ +@@ -176,7 +181,7 @@ along with GCC; see the file COPYING3. + %{static:-static}}} \ + " + +-#define LINK_ARCH64_SPEC "-m elf64_sparc -Y P,/usr/lib64 %{shared:-shared} \ ++#define LINK_ARCH64_SPEC "-m elf64_sparc -Y P,%R/usr/lib64 %{shared:-shared} \ + %{!shared: \ + %{!ibcs: \ + %{!static: \ +@@ -257,7 +262,7 @@ along with GCC; see the file COPYING3. + #else /* !SPARC_BI_ARCH */ + + #undef LINK_SPEC +-#define LINK_SPEC "-m elf64_sparc -Y P,/usr/lib64 %{shared:-shared} \ ++#define LINK_SPEC "-m elf64_sparc -Y P,%R/usr/lib64 %{shared:-shared} \ + %{!shared: \ + %{!ibcs: \ + %{!static: \ +--- a/gcc/config/sparc/sol2-bi.h ++++ b/gcc/config/sparc/sol2-bi.h +@@ -172,12 +172,12 @@ + %{YP,*} \ + %{R*} \ + %{compat-bsd: \ +- %{!YP,*:%{p|pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \ +- %{!p:%{!pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/sparcv9}}} \ +- -R /usr/ucblib/sparcv9} \ ++ %{!YP,*:%{p|pg:-Y P,%R/usr/ucblib/sparcv9:%R/usr/lib/libp/sparcv9:%R/usr/lib/sparcv9} \ ++ %{!p:%{!pg:-Y P,%R/usr/ucblib/sparcv9:%R/usr/lib/sparcv9}}} \ ++ -R %R/usr/ucblib/sparcv9} \ + %{!compat-bsd: \ +- %{!YP,*:%{p|pg:-Y P,/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \ +- %{!p:%{!pg:-Y P,/usr/lib/sparcv9}}}}" ++ %{!YP,*:%{p|pg:-Y P,%R/usr/lib/libp/sparcv9:%R/usr/lib/sparcv9} \ ++ %{!p:%{!pg:-Y P,%R/usr/lib/sparcv9}}}}" + + #define LINK_ARCH64_SPEC LINK_ARCH64_SPEC_BASE + +--- a/gcc/config/sparc/sparc.c ++++ b/gcc/config/sparc/sparc.c +@@ -2371,6 +2371,8 @@ emit_soft_tfmode_cvt (enum rtx_code code + { + case SImode: + func = "_Qp_itoq"; ++ if (TARGET_ARCH64) ++ operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]); + break; + case DImode: + func = "_Qp_xtoq"; +@@ -2385,6 +2387,8 @@ emit_soft_tfmode_cvt (enum rtx_code code + { + case SImode: + func = "_Qp_uitoq"; ++ if (TARGET_ARCH64) ++ operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]); + break; + case DImode: + func = "_Qp_uxtoq"; +@@ -4623,6 +4627,7 @@ function_arg_slotno (const struct sparc_ + { + case MODE_FLOAT: + case MODE_COMPLEX_FLOAT: ++ case MODE_VECTOR_INT: + if (TARGET_ARCH64 && TARGET_FPU && named) + { + if (slotno >= SPARC_FP_ARG_MAX) +@@ -6097,7 +6102,7 @@ void + sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison) + { + const char *qpfunc; +- rtx slot0, slot1, result, tem, tem2; ++ rtx slot0, slot1, result, tem, tem2, libfunc; + enum machine_mode mode; + + switch (comparison) +@@ -6159,7 +6164,8 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, + else + slot1 = y; + +- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL, ++ libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc); ++ emit_library_call (libfunc, LCT_NORMAL, + DImode, 2, + XEXP (slot0, 0), Pmode, + XEXP (slot1, 0), Pmode); +@@ -6168,7 +6174,8 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, + } + else + { +- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL, ++ libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc); ++ emit_library_call (libfunc, LCT_NORMAL, + SImode, 2, + x, TFmode, y, TFmode); + +@@ -6180,7 +6187,7 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, + register so reload doesn't clobber the value if it needs + the return register for a spill reg. */ + result = gen_reg_rtx (mode); +- emit_move_insn (result, hard_libcall_value (mode)); ++ emit_move_insn (result, hard_libcall_value (mode, libfunc)); + + switch (comparison) + { +--- a/gcc/config/svr4.h ++++ b/gcc/config/svr4.h +@@ -55,7 +55,8 @@ along with GCC; see the file COPYING3. + && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \ + && strcmp (STR, "Tbss")) + +-/* Provide an ASM_SPEC appropriate for svr4. Here we try to support as ++/* Provide an ASM_SPEC appropriate for svr4. ++ If we're not using GAS, we try to support as + many of the specialized svr4 assembler options as seems reasonable, + given that there are certain options which we can't (or shouldn't) + support directly due to the fact that they conflict with other options +@@ -74,9 +75,16 @@ along with GCC; see the file COPYING3. + read its stdin. + */ + +-#undef ASM_SPEC +-#define ASM_SPEC \ ++#ifdef USE_GAS ++#define SVR4_ASM_SPEC \ ++ "%{v:-V} %{Wa,*:%*}" ++#else ++#define SVR4_ASM_SPEC \ + "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}" ++#endif ++ ++#undef ASM_SPEC ++#define ASM_SPEC SVR4_ASM_SPEC + + #define AS_NEEDS_DASH_FOR_PIPED_INPUT + +--- /dev/null ++++ b/gcc/config/t-eglibc +@@ -0,0 +1,25 @@ ++# multilibs -*- mode:Makefile -*- ++ ++MULTILIB_EXCEPTIONS := ++MULTILIB_MATCHES := ++MULTILIB_ALIASES := ++ ++# For all items in EGLIBC_CONFIGS except for the last one ++# do $1. For the last one do $2. The items are separated with ",". ++EGLIBC_AWK = $(shell echo $(EGLIBC_CONFIGS) | $(AWK) \ ++ '{ \ ++ N=split ($$0, configs, ","); \ ++ for (i = 1; i < N; ++i) $1; \ ++ $2; \ ++ }') ++ ++MULTILIB_OPTIONS := $(call EGLIBC_AWK, \ ++ printf ("feglibc=%s/", configs[i]), \ ++ printf ("feglibc=%s\n", configs[i])) ++MULTILIB_DIRNAMES := $(call EGLIBC_AWK, \ ++ printf ("%s ", configs[i]), \ ++ printf ("%s\n", configs[i])) ++MULTILIB_OSDIRNAMES := $(call EGLIBC_AWK, \ ++ printf ("feglibc.%s=!%s ", configs[i], configs[i]), \ ++ printf ("feglibc.%s=!%s\n", configs[i], configs[i])) ++ +--- /dev/null ++++ b/gcc/config/t-sysroot-suffix +@@ -0,0 +1,6 @@ ++# Generate SYSROOT_SUFFIX_SPEC from MULTILIB_OSDIRNAMES ++ ++sysroot-suffix.h: $(srcdir)/config/print-sysroot-suffix.sh ++ $(SHELL) $(srcdir)/config/print-sysroot-suffix.sh \ ++ "$(MULTILIB_OSDIRNAMES)" "$(MULTILIB_OPTIONS)" \ ++ "$(MULTILIB_MATCHES)" "$(MULTILIB_ALIASES)" > $@ +--- a/gcc/configure ++++ b/gcc/configure +@@ -458,7 +458,7 @@ ac_includes_default="\ + # include + #endif" + +-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os target_noncanonical build_libsubdir build_subdir host_subdir target_subdir GENINSRC CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT NO_MINUS_C_MINUS_O OUTPUT_OPTION CPP EGREP loose_warn cxx_compat_warn strict_warn warn_cflags nocommon_flag TREEBROWSER valgrind_path valgrind_path_defines valgrind_command coverage_flags enable_multilib enable_decimal_float enable_fixed_point enable_shared TARGET_SYSTEM_ROOT TARGET_SYSTEM_ROOT_DEFINE CROSS_SYSTEM_HEADER_DIR onestep PKGVERSION REPORT_BUGS_TO REPORT_BUGS_TEXI datarootdir docdir htmldir SET_MAKE AWK LN_S LN RANLIB ac_ct_RANLIB ranlib_flags INSTALL INSTALL_PROGRAM INSTALL_DATA make_compare_target have_mktemp_command MAKEINFO BUILD_INFO GENERATED_MANPAGES FLEX BISON NM AR COLLECT2_LIBS GNAT_LIBEXC LDEXP_LIB TARGET_GETGROUPS_T LIBICONV LTLIBICONV LIBICONV_DEP manext objext gthread_flags extra_modes_file extra_opt_files USE_NLS LIBINTL LIBINTL_DEP INCINTL XGETTEXT GMSGFMT POSUB CATALOGS DATADIRNAME INSTOBJEXT GENCAT CATOBJEXT host_cc_for_libada CROSS ALL SYSTEM_HEADER_DIR inhibit_libc CC_FOR_BUILD BUILD_CFLAGS STMP_FIXINC STMP_FIXPROTO collect2 LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN ac_ct_AR STRIP ac_ct_STRIP lt_ECHO objdir enable_fast_install gcc_cv_as ORIGINAL_AS_FOR_TARGET gcc_cv_ld ORIGINAL_LD_FOR_TARGET gcc_cv_nm ORIGINAL_NM_FOR_TARGET gcc_cv_objdump libgcc_visibility GGC zlibdir zlibinc MAINT gcc_tooldir dollar slibdir subdirs srcdir all_compilers all_gtfiles all_lang_makefrags all_lang_makefiles all_languages all_selected_languages build_exeext build_install_headers_dir build_xm_file_list build_xm_include_list build_xm_defines build_file_translate check_languages cpp_install_dir xmake_file tmake_file extra_gcc_objs extra_headers_list extra_objs extra_parts extra_passes extra_programs float_h_file gcc_config_arguments gcc_gxx_include_dir host_exeext host_xm_file_list host_xm_include_list host_xm_defines out_host_hook_obj install lang_opt_files lang_specs_files lang_tree_files local_prefix md_file objc_boehm_gc out_file out_object_file thread_file tm_file_list tm_include_list tm_defines tm_p_file_list tm_p_include_list xm_file_list xm_include_list xm_defines c_target_objs cxx_target_objs target_cpu_default GMPLIBS GMPINC LIBOBJS LTLIBOBJS' ++ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os target_noncanonical licensedir build_libsubdir build_subdir host_subdir target_subdir GENINSRC CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT NO_MINUS_C_MINUS_O OUTPUT_OPTION CPP EGREP loose_warn cxx_compat_warn strict_warn warn_cflags nocommon_flag TREEBROWSER valgrind_path valgrind_path_defines valgrind_command coverage_flags enable_multilib enable_decimal_float enable_fixed_point enable_shared TARGET_SYSTEM_ROOT TARGET_SYSTEM_ROOT_DEFINE CROSS_SYSTEM_HEADER_DIR CONFIGURE_SPECS EGLIBC_CONFIGS onestep PKGVERSION REPORT_BUGS_TO REPORT_BUGS_TEXI datarootdir docdir htmldir SET_MAKE AWK LN_S LN RANLIB ac_ct_RANLIB ranlib_flags INSTALL INSTALL_PROGRAM INSTALL_DATA make_compare_target have_mktemp_command MAKEINFO BUILD_INFO GENERATED_MANPAGES FLEX BISON NM AR COLLECT2_LIBS GNAT_LIBEXC LDEXP_LIB TARGET_GETGROUPS_T LIBICONV LTLIBICONV LIBICONV_DEP manext objext gthread_flags extra_modes_file extra_opt_files USE_NLS LIBINTL LIBINTL_DEP INCINTL XGETTEXT GMSGFMT POSUB CATALOGS DATADIRNAME INSTOBJEXT GENCAT CATOBJEXT host_cc_for_libada CROSS ALL SYSTEM_HEADER_DIR inhibit_libc CC_FOR_BUILD BUILD_CFLAGS STMP_FIXINC STMP_FIXPROTO collect2 LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN ac_ct_AR STRIP ac_ct_STRIP lt_ECHO objdir enable_fast_install gcc_cv_as ORIGINAL_AS_FOR_TARGET gcc_cv_ld ORIGINAL_LD_FOR_TARGET gcc_cv_nm ORIGINAL_NM_FOR_TARGET gcc_cv_objdump libgcc_visibility GGC zlibdir zlibinc MAINT gcc_tooldir dollar slibdir subdirs srcdir all_compilers all_gtfiles all_lang_makefrags all_lang_makefiles all_languages all_selected_languages build_exeext build_install_headers_dir build_xm_file_list build_xm_include_list build_xm_defines build_file_translate check_languages cpp_install_dir xmake_file tmake_file TM_ENDIAN_CONFIG TM_CPU_CONFIG TM_MULTILIB_CONFIG TM_MULTILIB_EXCEPTIONS_CONFIG extra_gcc_objs extra_headers_list extra_objs extra_parts extra_passes extra_programs float_h_file gcc_config_arguments gcc_gxx_include_dir host_exeext host_xm_file_list host_xm_include_list host_xm_defines out_host_hook_obj install lang_opt_files lang_specs_files lang_tree_files local_prefix md_file objc_boehm_gc out_file out_object_file thread_file tm_file_list tm_include_list tm_defines tm_p_file_list tm_p_include_list xm_file_list xm_include_list xm_defines c_target_objs cxx_target_objs target_cpu_default GMPLIBS GMPINC LIBOBJS LTLIBOBJS' + ac_subst_files='language_hooks' + + # Initialize some variables set by options. +@@ -1048,6 +1048,7 @@ Optional Features: + arrange to use setjmp/longjmp exception handling + --enable-secureplt enable -msecure-plt by default for PowerPC + --enable-cld enable -mcld by default for 32bit x86 ++ --enable-mips-nonpic enable non-PIC ABI by default for MIPS GNU/Linux o32 + --disable-win32-registry + disable lookup of installation paths in the + Registry on Windows hosts +@@ -1068,6 +1069,8 @@ Optional Features: + --enable-version-specific-runtime-libs + specify that runtime libraries should be + installed in a compiler-specific directory ++ --enable-poison-system-directories ++ warn for use of native system header directories + + Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] +@@ -1089,6 +1092,9 @@ Optional Packages: + --with-build-sysroot=sysroot + use sysroot as the system root during the build + --with-sysroot=DIR Search for usr/lib, usr/include, et al, within DIR. ++ --with-specs=SPECS add SPECS to driver command-line processing ++ --with-eglibc-configs=CONFIGS ++ build multilibs for these EGLIBC configurations + --with-pkgversion=PKG Use PKG in the version string in place of "GCC" + --with-bugurl=URL Direct users to URL to report a bug + --with-gnu-ld assume the C compiler uses GNU ld default=no +@@ -7367,6 +7373,28 @@ fi; + + + ++ ++# Check whether --with-specs or --without-specs was given. ++if test "${with_specs+set}" = set; then ++ withval="$with_specs" ++ CONFIGURE_SPECS=$withval ++else ++ CONFIGURE_SPECS= ++ ++fi; ++ ++ ++ ++# Check whether --with-eglibc-configs or --without-eglibc-configs was given. ++if test "${with_eglibc_configs+set}" = set; then ++ withval="$with_eglibc_configs" ++ EGLIBC_CONFIGS=$withval ++else ++ EGLIBC_CONFIGS= ++ ++fi; ++ ++ + # Build with intermodule optimisations + # Check whether --enable-intermodule or --disable-intermodule was given. + if test "${enable_intermodule+set}" = set; then +@@ -12971,7 +12999,7 @@ else + *) realsrcdir=../${srcdir};; + esac + saved_CFLAGS="${CFLAGS}" +- CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ ++ CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" LDFLAGS="" \ + ${realsrcdir}/configure \ + --enable-languages=${enable_languages-all} \ + --target=$target_alias --host=$build_alias --build=$build_alias +@@ -13119,6 +13147,12 @@ else + enable_cld=no + fi; + ++# Check whether --enable-mips-nonpic or --disable-mips-nonpic was given. ++if test "${enable_mips_nonpic+set}" = set; then ++ enableval="$enable_mips_nonpic" ++ ++fi; ++ + # Windows32 Registry support for specifying GCC installation paths. + # Check whether --enable-win32-registry or --disable-win32-registry was given. + if test "${enable_win32_registry+set}" = set; then +@@ -14064,13 +14098,13 @@ if test "${lt_cv_nm_interface+set}" = se + else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext +- (eval echo "\"\$as_me:14067: $ac_compile\"" >&5) ++ (eval echo "\"\$as_me:14173: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 +- (eval echo "\"\$as_me:14070: $NM \\\"conftest.$ac_objext\\\"\"" >&5) ++ (eval echo "\"\$as_me:14176: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 +- (eval echo "\"\$as_me:14073: output\"" >&5) ++ (eval echo "\"\$as_me:14179: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" +@@ -15125,7 +15159,7 @@ ia64-*-hpux*) + ;; + *-*-irix6*) + # Find out which ABI we are using. +- echo '#line 15128 "configure"' > conftest.$ac_ext ++ echo '#line 15234 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +@@ -15745,11 +15779,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:15748: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:15854: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:15752: \$? = $ac_status" >&5 ++ echo "$as_me:15858: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -16067,11 +16101,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:16070: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:16176: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:16074: \$? = $ac_status" >&5 ++ echo "$as_me:16180: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -16172,11 +16206,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:16175: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:16281: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:16179: \$? = $ac_status" >&5 ++ echo "$as_me:16285: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -16227,11 +16261,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:16230: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:16336: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:16234: \$? = $ac_status" >&5 ++ echo "$as_me:16340: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -19024,7 +19058,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 19027 "configure" ++#line 19133 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -19124,7 +19158,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 19127 "configure" ++#line 19233 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -20592,6 +20626,22 @@ x: + tls_first_minor=16 + tls_as_opt='-32 --fatal-warnings' + ;; ++ m68k-*-*) ++ conftest_s=' ++ .section .tdata,"awT",@progbits ++x: ++ .word 2 ++ .text ++foo: ++ move.l x@TLSGD(%a5),%a0 ++ move.l x@TLSLDM(%a5),%a0 ++ move.l x@TLSLDO(%a5),%a0 ++ move.l x@TLSIE(%a5),%a0 ++ move.l x@TLSLE(%a5),%a0' ++ tls_first_major=2 ++ tls_first_minor=19 ++ tls_as_opt='--fatal-warnings' ++ ;; + powerpc-*-*) + conftest_s=' + .section ".tdata","awT",@progbits +@@ -22098,7 +22148,8 @@ esac + case "$target" in + i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \ + | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \ +- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | spu-*-*) ++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* \ ++ | spu-*-* | fido*-*-*) + insn="nop" + ;; + ia64*-*-* | s390*-*-*) +@@ -23088,6 +23139,21 @@ fi + fi; + + ++# Check whether --enable-poison-system-directories or --disable-poison-system-directories was given. ++if test "${enable_poison_system_directories+set}" = set; then ++ enableval="$enable_poison_system_directories" ++ ++else ++ enable_poison_system_directories=no ++fi; ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ ++cat >>confdefs.h <<\_ACEOF ++#define ENABLE_POISON_SYSTEM_DIRECTORIES 1 ++_ACEOF ++ ++fi ++ + + # Check whether --with-datarootdir or --without-datarootdir was given. + if test "${with_datarootdir+set}" = set; then +@@ -23175,6 +23241,10 @@ fi; + + + ++ ++ ++ ++ + # Echo link setup. + if test x${build} = x${host} ; then + if test x${host} = x${target} ; then +@@ -23844,6 +23914,7 @@ s,@target_cpu@,$target_cpu,;t t + s,@target_vendor@,$target_vendor,;t t + s,@target_os@,$target_os,;t t + s,@target_noncanonical@,$target_noncanonical,;t t ++s,@licensedir@,$licensedir,;t t + s,@build_libsubdir@,$build_libsubdir,;t t + s,@build_subdir@,$build_subdir,;t t + s,@host_subdir@,$host_subdir,;t t +@@ -23877,6 +23948,8 @@ s,@enable_shared@,$enable_shared,;t t + s,@TARGET_SYSTEM_ROOT@,$TARGET_SYSTEM_ROOT,;t t + s,@TARGET_SYSTEM_ROOT_DEFINE@,$TARGET_SYSTEM_ROOT_DEFINE,;t t + s,@CROSS_SYSTEM_HEADER_DIR@,$CROSS_SYSTEM_HEADER_DIR,;t t ++s,@CONFIGURE_SPECS@,$CONFIGURE_SPECS,;t t ++s,@EGLIBC_CONFIGS@,$EGLIBC_CONFIGS,;t t + s,@onestep@,$onestep,;t t + s,@PKGVERSION@,$PKGVERSION,;t t + s,@REPORT_BUGS_TO@,$REPORT_BUGS_TO,;t t +@@ -23983,6 +24056,10 @@ s,@check_languages@,$check_languages,;t + s,@cpp_install_dir@,$cpp_install_dir,;t t + s,@xmake_file@,$xmake_file,;t t + s,@tmake_file@,$tmake_file,;t t ++s,@TM_ENDIAN_CONFIG@,$TM_ENDIAN_CONFIG,;t t ++s,@TM_CPU_CONFIG@,$TM_CPU_CONFIG,;t t ++s,@TM_MULTILIB_CONFIG@,$TM_MULTILIB_CONFIG,;t t ++s,@TM_MULTILIB_EXCEPTIONS_CONFIG@,$TM_MULTILIB_EXCEPTIONS_CONFIG,;t t + s,@extra_gcc_objs@,$extra_gcc_objs,;t t + s,@extra_headers_list@,$extra_headers_list,;t t + s,@extra_objs@,$extra_objs,;t t +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -767,6 +767,22 @@ AC_SUBST(TARGET_SYSTEM_ROOT) + AC_SUBST(TARGET_SYSTEM_ROOT_DEFINE) + AC_SUBST(CROSS_SYSTEM_HEADER_DIR) + ++AC_ARG_WITH(specs, ++ [AS_HELP_STRING([--with-specs=SPECS], ++ [add SPECS to driver command-line processing])], ++ [CONFIGURE_SPECS=$withval], ++ [CONFIGURE_SPECS=] ++) ++AC_SUBST(CONFIGURE_SPECS) ++ ++AC_ARG_WITH(eglibc-configs, ++ [AS_HELP_STRING([--with-eglibc-configs=CONFIGS], ++ [build multilibs for these EGLIBC configurations])], ++ [EGLIBC_CONFIGS=$withval], ++ [EGLIBC_CONFIGS=] ++) ++AC_SUBST(EGLIBC_CONFIGS) ++ + # Build with intermodule optimisations + AC_ARG_ENABLE(intermodule, + [ --enable-intermodule build the compiler in one step], +@@ -1479,7 +1495,7 @@ else + *) realsrcdir=../${srcdir};; + esac + saved_CFLAGS="${CFLAGS}" +- CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ ++ CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" LDFLAGS="" \ + ${realsrcdir}/configure \ + --enable-languages=${enable_languages-all} \ + --target=$target_alias --host=$build_alias --build=$build_alias +@@ -1552,6 +1568,10 @@ AC_ARG_ENABLE(cld, + [ --enable-cld enable -mcld by default for 32bit x86], [], + [enable_cld=no]) + ++AC_ARG_ENABLE(mips-nonpic, ++[ --enable-mips-nonpic enable non-PIC ABI by default for MIPS GNU/Linux o32], ++[], []) ++ + # Windows32 Registry support for specifying GCC installation paths. + AC_ARG_ENABLE(win32-registry, + [ --disable-win32-registry +@@ -2455,6 +2475,22 @@ x: + tls_first_minor=16 + tls_as_opt='-32 --fatal-warnings' + ;; ++ m68k-*-*) ++ conftest_s=' ++ .section .tdata,"awT",@progbits ++x: ++ .word 2 ++ .text ++foo: ++ move.l x@TLSGD(%a5),%a0 ++ move.l x@TLSLDM(%a5),%a0 ++ move.l x@TLSLDO(%a5),%a0 ++ move.l x@TLSIE(%a5),%a0 ++ move.l x@TLSLE(%a5),%a0' ++ tls_first_major=2 ++ tls_first_minor=19 ++ tls_as_opt='--fatal-warnings' ++ ;; + powerpc-*-*) + conftest_s=' + .section ".tdata","awT",@progbits +@@ -3077,7 +3113,8 @@ esac + case "$target" in + i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \ + | x86_64*-*-* | hppa*-*-* | arm*-*-* | strongarm*-*-* | xscale*-*-* \ +- | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* | spu-*-*) ++ | xstormy16*-*-* | cris-*-* | xtensa-*-* | bfin-*-* | score*-*-* \ ++ | spu-*-* | fido*-*-*) + insn="nop" + ;; + ia64*-*-* | s390*-*-*) +@@ -3731,6 +3768,16 @@ else + fi) + AC_SUBST(slibdir) + ++AC_ARG_ENABLE([poison-system-directories], ++ AS_HELP_STRING([--enable-poison-system-directories], ++ [warn for use of native system header directories]),, ++ [enable_poison_system_directories=no]) ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES], ++ [1], ++ [Define to warn for use of native system header directories]) ++fi ++ + AC_ARG_WITH(datarootdir, + [ --with-datarootdir=DIR Use DIR as the data root [[PREFIX/share]]], + datarootdir="\${prefix}/$with_datarootdir", +@@ -3768,6 +3815,10 @@ AC_SUBST(check_languages) + AC_SUBST(cpp_install_dir) + AC_SUBST(xmake_file) + AC_SUBST(tmake_file) ++AC_SUBST(TM_ENDIAN_CONFIG) ++AC_SUBST(TM_CPU_CONFIG) ++AC_SUBST(TM_MULTILIB_CONFIG) ++AC_SUBST(TM_MULTILIB_EXCEPTIONS_CONFIG) + AC_SUBST(extra_gcc_objs) + AC_SUBST(extra_headers_list) + AC_SUBST(extra_objs) +--- a/gcc/cp/class.c ++++ b/gcc/cp/class.c +@@ -6030,7 +6030,7 @@ resolve_address_of_overloaded_function ( + if (flags & tf_error) + { + error ("no matches converting function %qD to type %q#T", +- DECL_NAME (OVL_FUNCTION (overload)), ++ DECL_NAME (OVL_CURRENT (overload)), + target_type); + + /* print_candidates expects a chain with the functions in +@@ -6179,13 +6179,8 @@ instantiate_type (tree lhstype, tree rhs + dependent on overload resolution. */ + gcc_assert (TREE_CODE (rhs) == ADDR_EXPR + || TREE_CODE (rhs) == COMPONENT_REF +- || TREE_CODE (rhs) == COMPOUND_EXPR +- || really_overloaded_fn (rhs)); +- +- /* We don't overwrite rhs if it is an overloaded function. +- Copying it would destroy the tree link. */ +- if (TREE_CODE (rhs) != OVERLOAD) +- rhs = copy_node (rhs); ++ || really_overloaded_fn (rhs) ++ || (flag_ms_extensions && TREE_CODE (rhs) == FUNCTION_DECL)); + + /* This should really only be used when attempting to distinguish + what sort of a pointer to function we have. For now, any +@@ -6237,19 +6232,6 @@ instantiate_type (tree lhstype, tree rhs + /*explicit_targs=*/NULL_TREE, + access_path); + +- case COMPOUND_EXPR: +- TREE_OPERAND (rhs, 0) +- = instantiate_type (lhstype, TREE_OPERAND (rhs, 0), flags); +- if (TREE_OPERAND (rhs, 0) == error_mark_node) +- return error_mark_node; +- TREE_OPERAND (rhs, 1) +- = instantiate_type (lhstype, TREE_OPERAND (rhs, 1), flags); +- if (TREE_OPERAND (rhs, 1) == error_mark_node) +- return error_mark_node; +- +- TREE_TYPE (rhs) = lhstype; +- return rhs; +- + case ADDR_EXPR: + { + if (PTRMEM_OK_P (rhs)) +--- a/gcc/cp/cvt.c ++++ b/gcc/cp/cvt.c +@@ -580,6 +580,7 @@ ocp_convert (tree type, tree expr, int c + tree e = expr; + enum tree_code code = TREE_CODE (type); + const char *invalid_conv_diag; ++ tree e1; + + if (error_operand_p (e) || type == error_mark_node) + return error_mark_node; +@@ -628,6 +629,9 @@ ocp_convert (tree type, tree expr, int c + } + } + ++ if (e1 = targetm.convert_to_type (type, e)) ++ return e1; ++ + if (code == VOID_TYPE && (convtype & CONV_STATIC)) + { + e = convert_to_void (e, /*implicit=*/NULL); +@@ -1190,11 +1194,18 @@ build_expr_type_conversion (int desires, + tree + type_promotes_to (tree type) + { ++ tree promoted_type; ++ + if (type == error_mark_node) + return error_mark_node; + + type = TYPE_MAIN_VARIANT (type); + ++ /* Check for promotions of target-defined types first. */ ++ promoted_type = targetm.promoted_type (type); ++ if (promoted_type) ++ return promoted_type; ++ + /* bool always promotes to int (not unsigned), even if it's the same + size. */ + if (type == boolean_type_node) +--- a/gcc/cp/decl.c ++++ b/gcc/cp/decl.c +@@ -4379,7 +4379,7 @@ maybe_deduce_size_from_array_init (tree + + cp_apply_type_quals_to_decl (cp_type_quals (TREE_TYPE (decl)), decl); + +- layout_decl (decl, 0); ++ relayout_decl (decl); + } + } + +@@ -7413,6 +7413,7 @@ grokdeclarator (const cp_declarator *dec + bool type_was_error_mark_node = false; + bool parameter_pack_p = declarator? declarator->parameter_pack_p : false; + bool set_no_warning = false; ++ const char *errmsg; + + signed_p = declspecs->specs[(int)ds_signed]; + unsigned_p = declspecs->specs[(int)ds_unsigned]; +@@ -8092,6 +8093,12 @@ grokdeclarator (const cp_declarator *dec + error ("%qs declared as function returning an array", name); + type = integer_type_node; + } ++ errmsg = targetm.invalid_return_type (type); ++ if (errmsg) ++ { ++ error (errmsg); ++ type = integer_type_node; ++ } + + /* Pick up type qualifiers which should be applied to `this'. */ + memfn_quals = declarator->u.function.qualifiers; +@@ -8585,8 +8592,14 @@ grokdeclarator (const cp_declarator *dec + + /* Replace the anonymous name with the real name everywhere. */ + for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) +- if (TYPE_NAME (t) == oldname) +- TYPE_NAME (t) = decl; ++ { ++ if (TYPE_NAME (t) == oldname) ++ { ++ debug_hooks->set_name (t, decl); ++ TYPE_NAME (t) = decl; ++ } ++ } ++ + + if (TYPE_LANG_SPECIFIC (type)) + TYPE_WAS_ANONYMOUS (type) = 1; +@@ -9378,6 +9391,7 @@ grokparms (cp_parameter_declarator *firs + tree init = parm->default_argument; + tree attrs; + tree decl; ++ const char *errmsg; + + if (parm == no_parameters) + break; +@@ -9418,6 +9432,14 @@ grokparms (cp_parameter_declarator *firs + init = NULL_TREE; + } + ++ if (type != error_mark_node ++ && (errmsg = targetm.invalid_parameter_type (type))) ++ { ++ error (errmsg); ++ type = error_mark_node; ++ TREE_TYPE (decl) = error_mark_node; ++ } ++ + if (type != error_mark_node) + { + /* Top-level qualifiers on the parameters are +--- a/gcc/cp/decl2.c ++++ b/gcc/cp/decl2.c +@@ -1682,6 +1682,10 @@ decl_needed_p (tree decl) + || (DECL_ASSEMBLER_NAME_SET_P (decl) + && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))) + return true; ++ /* Functions marked "dllexport" must be emitted so that they are ++ visible to other DLLs. */ ++ if (lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl))) ++ return true; + /* Otherwise, DECL does not need to be emitted -- yet. A subsequent + reference to DECL might cause it to be emitted later. */ + return false; +@@ -1963,6 +1967,14 @@ determine_visibility (tree decl) + /* tinfo visibility is based on the type it's for. */ + constrain_visibility + (decl, type_visibility (TREE_TYPE (DECL_NAME (decl)))); ++ ++ /* Give the target a chance to override the visibility associated ++ with DECL. */ ++ if (TREE_PUBLIC (decl) ++ && !DECL_REALLY_EXTERN (decl) ++ && CLASS_TYPE_P (TREE_TYPE (DECL_NAME (decl))) ++ && !CLASSTYPE_VISIBILITY_SPECIFIED (TREE_TYPE (DECL_NAME (decl)))) ++ targetm.cxx.determine_class_data_visibility (decl); + } + else if (use_template) + /* Template instantiations and specializations get visibility based +--- a/gcc/cp/mangle.c ++++ b/gcc/cp/mangle.c +@@ -1593,6 +1593,13 @@ write_type (tree type) + if (target_mangling) + { + write_string (target_mangling); ++ /* Add substitutions for types other than fundamental ++ types. */ ++ if (TREE_CODE (type) != VOID_TYPE ++ && TREE_CODE (type) != INTEGER_TYPE ++ && TREE_CODE (type) != REAL_TYPE ++ && TREE_CODE (type) != BOOLEAN_TYPE) ++ add_substitution (type); + return; + } + +--- a/gcc/cp/semantics.c ++++ b/gcc/cp/semantics.c +@@ -3218,8 +3218,10 @@ expand_or_defer_fn (tree fn) + + /* If the user wants us to keep all inline functions, then mark + this function as needed so that finish_file will make sure to +- output it later. */ +- if (flag_keep_inline_functions && DECL_DECLARED_INLINE_P (fn)) ++ output it later. Similarly, all dllexport'd functions must ++ be emitted; there may be callers in other DLLs. */ ++ if ((flag_keep_inline_functions && DECL_DECLARED_INLINE_P (fn)) ++ || lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))) + mark_needed (fn); + } + +--- a/gcc/cp/typeck.c ++++ b/gcc/cp/typeck.c +@@ -1627,10 +1627,14 @@ decay_conversion (tree exp) + tree + default_conversion (tree exp) + { ++ /* Check for target-specific promotions. */ ++ tree promoted_type = targetm.promoted_type (TREE_TYPE (exp)); ++ if (promoted_type) ++ exp = cp_convert (promoted_type, exp); + /* Perform the integral promotions first so that bitfield + expressions (which may promote to "int", even if the bitfield is + declared "unsigned") are promoted correctly. */ +- if (INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (exp))) ++ else if (INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (exp))) + exp = perform_integral_promotions (exp); + /* Perform the other conversions. */ + exp = decay_conversion (exp); +@@ -4837,6 +4841,12 @@ build_compound_expr (tree lhs, tree rhs) + return rhs; + } + ++ if (type_unknown_p (rhs)) ++ { ++ error ("no context to resolve type of %qE", rhs); ++ return error_mark_node; ++ } ++ + return build2 (COMPOUND_EXPR, TREE_TYPE (rhs), lhs, rhs); + } + +@@ -6814,6 +6824,7 @@ static int + comp_ptr_ttypes_real (tree to, tree from, int constp) + { + bool to_more_cv_qualified = false; ++ bool is_opaque_pointer = false; + + for (; ; to = TREE_TYPE (to), from = TREE_TYPE (from)) + { +@@ -6848,9 +6859,13 @@ comp_ptr_ttypes_real (tree to, tree from + constp &= TYPE_READONLY (to); + } + ++ if (TREE_CODE (to) == VECTOR_TYPE) ++ is_opaque_pointer = vector_targets_convertible_p (to, from); ++ + if (TREE_CODE (to) != POINTER_TYPE && !TYPE_PTRMEM_P (to)) + return ((constp >= 0 || to_more_cv_qualified) +- && same_type_ignoring_top_level_qualifiers_p (to, from)); ++ && (is_opaque_pointer ++ || same_type_ignoring_top_level_qualifiers_p (to, from))); + } + } + +@@ -6911,6 +6926,8 @@ ptr_reasonably_similar (const_tree to, c + bool + comp_ptr_ttypes_const (tree to, tree from) + { ++ bool is_opaque_pointer = false; ++ + for (; ; to = TREE_TYPE (to), from = TREE_TYPE (from)) + { + if (TREE_CODE (to) != TREE_CODE (from)) +@@ -6921,8 +6938,12 @@ comp_ptr_ttypes_const (tree to, tree fro + TYPE_OFFSET_BASETYPE (to))) + continue; + ++ if (TREE_CODE (to) == VECTOR_TYPE) ++ is_opaque_pointer = vector_targets_convertible_p (to, from); ++ + if (TREE_CODE (to) != POINTER_TYPE) +- return same_type_ignoring_top_level_qualifiers_p (to, from); ++ return (is_opaque_pointer ++ || same_type_ignoring_top_level_qualifiers_p (to, from)); + } + } + +--- a/gcc/cse.c ++++ b/gcc/cse.c +@@ -5776,6 +5776,11 @@ cse_process_notes_1 (rtx x, rtx object, + validate_change (object, &XEXP (x, i), + cse_process_notes (XEXP (x, i), object, changed), 0); + ++ /* Rebuild a PLUS expression in canonical form if the first operand ++ ends up as a constant. */ ++ if (code == PLUS && GET_CODE (XEXP (x, 0)) == CONST_INT) ++ return plus_constant (XEXP(x, 1), INTVAL (XEXP (x, 0))); ++ + return x; + } + +--- a/gcc/dbxout.c ++++ b/gcc/dbxout.c +@@ -373,6 +373,7 @@ const struct gcc_debug_hooks dbx_debug_h + dbxout_handle_pch, /* handle_pch */ + debug_nothing_rtx, /* var_location */ + debug_nothing_void, /* switch_text_section */ ++ debug_nothing_tree_tree, /* set_name */ + 0 /* start_end_main_source_file */ + }; + #endif /* DBX_DEBUGGING_INFO */ +--- a/gcc/debug.c ++++ b/gcc/debug.c +@@ -49,6 +49,7 @@ const struct gcc_debug_hooks do_nothing_ + debug_nothing_int, /* handle_pch */ + debug_nothing_rtx, /* var_location */ + debug_nothing_void, /* switch_text_section */ ++ debug_nothing_tree_tree, /* set_name */ + 0 /* start_end_main_source_file */ + }; + +--- a/gcc/debug.h ++++ b/gcc/debug.h +@@ -124,6 +124,8 @@ struct gcc_debug_hooks + text sections. */ + void (* switch_text_section) (void); + ++ void (* set_name) (tree, tree); ++ + /* This is 1 if the debug writer wants to see start and end commands for the + main source files, and 0 otherwise. */ + int start_end_main_source_file; +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -28,10 +28,10 @@ extensions, accepted by GCC in C89 mode + * Local Labels:: Labels local to a block. + * Labels as Values:: Getting pointers to labels, and computed gotos. + * Nested Functions:: As in Algol and Pascal, lexical scoping of functions. +-* Constructing Calls:: Dispatching a call to another function. ++* Constructing Calls:: Dispatching a call to another function. + * Typeof:: @code{typeof}: referring to the type of an expression. + * Conditionals:: Omitting the middle operand of a @samp{?:} expression. +-* Long Long:: Double-word integers---@code{long long int}. ++* Long Long:: Double-word integers---@code{long long int}. + * Complex:: Data types for complex numbers. + * Floating Types:: Additional Floating Types. + * Decimal Float:: Decimal Floating Types. +@@ -40,41 +40,41 @@ extensions, accepted by GCC in C89 mode + * Zero Length:: Zero-length arrays. + * Variable Length:: Arrays whose length is computed at run time. + * Empty Structures:: Structures with no members. +-* Variadic Macros:: Macros with a variable number of arguments. ++* Variadic Macros:: Macros with a variable number of arguments. + * Escaped Newlines:: Slightly looser rules for escaped newlines. + * Subscripting:: Any array can be subscripted, even if not an lvalue. + * Pointer Arith:: Arithmetic on @code{void}-pointers and function pointers. + * Initializers:: Non-constant initializers. + * Compound Literals:: Compound literals give structures, unions +- or arrays as values. +-* Designated Inits:: Labeling elements of initializers. ++ or arrays as values. ++* Designated Inits:: Labeling elements of initializers. + * Cast to Union:: Casting to union type from any member of the union. +-* Case Ranges:: `case 1 ... 9' and such. +-* Mixed Declarations:: Mixing declarations and code. ++* Case Ranges:: `case 1 ... 9' and such. ++* Mixed Declarations:: Mixing declarations and code. + * Function Attributes:: Declaring that functions have no side effects, +- or that they can never return. ++ or that they can never return. + * Attribute Syntax:: Formal syntax for attributes. + * Function Prototypes:: Prototype declarations and old-style definitions. + * C++ Comments:: C++ comments are recognized. + * Dollar Signs:: Dollar sign is allowed in identifiers. + * Character Escapes:: @samp{\e} stands for the character @key{ESC}. +-* Variable Attributes:: Specifying attributes of variables. +-* Type Attributes:: Specifying attributes of types. ++* Variable Attributes:: Specifying attributes of variables. ++* Type Attributes:: Specifying attributes of types. + * Alignment:: Inquiring about the alignment of a type or variable. + * Inline:: Defining inline functions (as fast as macros). + * Extended Asm:: Assembler instructions with C expressions as operands. +- (With them you can define ``built-in'' functions.) ++ (With them you can define ``built-in'' functions.) + * Constraints:: Constraints for asm operands + * Asm Labels:: Specifying the assembler name to use for a C symbol. + * Explicit Reg Vars:: Defining variables residing in specified registers. + * Alternate Keywords:: @code{__const__}, @code{__asm__}, etc., for header files. + * Incomplete Enums:: @code{enum foo;}, with details to follow. +-* Function Names:: Printable strings which are the name of the current +- function. ++* Function Names:: Printable strings which are the name of the current ++ function. + * Return Address:: Getting the return or frame address of a function. + * Vector Extensions:: Using vector instructions through built-in functions. + * Offsetof:: Special syntax for implementing @code{offsetof}. +-* Atomic Builtins:: Built-in functions for atomic memory access. ++* Atomic Builtins:: Built-in functions for atomic memory access. + * Object Size Checking:: Built-in functions for limited buffer overflow + checking. + * Other Builtins:: Other built-in functions. +@@ -2486,7 +2486,13 @@ defined by shared libraries. + @cindex function without a prologue/epilogue code + Use this attribute on the ARM, AVR, IP2K and SPU ports to indicate that + the specified function does not need prologue/epilogue sequences generated by +-the compiler. It is up to the programmer to provide these sequences. ++the compiler. It is up to the programmer to provide these sequences. The ++only statements that can be safely included in naked functions are ++@code{asm} statements that do not have operands. All other statements, ++including declarations of local variables, @code{if} statements, and so ++forth, should be avoided. Naked functions should be used to implement the ++body of an assembly function, while allowing the compiler to construct ++the requisite function declaration for the assembler. + + @item near + @cindex functions which do not handle memory bank switching on 68HC11/68HC12 +@@ -2539,7 +2545,7 @@ be non-null pointers. For instance, the + @smallexample + extern void * + my_memcpy (void *dest, const void *src, size_t len) +- __attribute__((nonnull (1, 2))); ++ __attribute__((nonnull (1, 2))); + @end smallexample + + @noindent +@@ -2557,7 +2563,7 @@ following declaration is equivalent to t + @smallexample + extern void * + my_memcpy (void *dest, const void *src, size_t len) +- __attribute__((nonnull)); ++ __attribute__((nonnull)); + @end smallexample + + @item noreturn +@@ -3710,13 +3716,13 @@ targets. You can use @code{__declspec ( + compilers. + + @item weak +-The @code{weak} attribute is described in @xref{Function Attributes}. ++The @code{weak} attribute is described in @ref{Function Attributes}. + + @item dllimport +-The @code{dllimport} attribute is described in @xref{Function Attributes}. ++The @code{dllimport} attribute is described in @ref{Function Attributes}. + + @item dllexport +-The @code{dllexport} attribute is described in @xref{Function Attributes}. ++The @code{dllexport} attribute is described in @ref{Function Attributes}. + + @end table + +@@ -3897,21 +3903,21 @@ Three attributes currently are defined f + @code{altivec}, @code{ms_struct} and @code{gcc_struct}. + + For full documentation of the struct attributes please see the +-documentation in the @xref{i386 Variable Attributes}, section. ++documentation in @ref{i386 Variable Attributes}. + + For documentation of @code{altivec} attribute please see the +-documentation in the @xref{PowerPC Type Attributes}, section. ++documentation in @ref{PowerPC Type Attributes}. + + @subsection SPU Variable Attributes + + The SPU supports the @code{spu_vector} attribute for variables. For +-documentation of this attribute please see the documentation in the +-@xref{SPU Type Attributes}, section. ++documentation of this attribute please see the documentation in ++@ref{SPU Type Attributes}. + + @subsection Xstormy16 Variable Attributes + + One attribute is currently defined for xstormy16 configurations: +-@code{below100} ++@code{below100}. + + @table @code + @item below100 +@@ -4231,6 +4237,8 @@ and caught in another, the class must ha + Otherwise the two shared objects will be unable to use the same + typeinfo node and exception handling will break. + ++@end table ++ + @subsection ARM Type Attributes + + On those ARM targets that support @code{dllimport} (such as Symbian +@@ -4258,7 +4266,9 @@ most Symbian OS code uses @code{__declsp + @subsection i386 Type Attributes + + Two attributes are currently defined for i386 configurations: +-@code{ms_struct} and @code{gcc_struct} ++@code{ms_struct} and @code{gcc_struct}. ++ ++@table @code + + @item ms_struct + @itemx gcc_struct +@@ -4286,8 +4296,8 @@ packed))}. + Three attributes currently are defined for PowerPC configurations: + @code{altivec}, @code{ms_struct} and @code{gcc_struct}. + +-For full documentation of the struct attributes please see the +-documentation in the @xref{i386 Type Attributes}, section. ++For full documentation of the @code{ms_struct} and @code{gcc_struct} ++attributes please see the documentation in @ref{i386 Type Attributes}. + + The @code{altivec} attribute allows one to declare AltiVec vector data + types supported by the AltiVec Programming Interface Manual. The +@@ -5231,7 +5241,6 @@ GCC provides three magic variables which + function, as a string. The first of these is @code{__func__}, which + is part of the C99 standard: + +-@display + The identifier @code{__func__} is implicitly declared by the translator + as if, immediately following the opening brace of each function + definition, the declaration +@@ -5240,9 +5249,9 @@ definition, the declaration + static const char __func__[] = "function-name"; + @end smallexample + ++@noindent + appeared, where function-name is the name of the lexically-enclosing + function. This name is the unadorned name of the function. +-@end display + + @code{__FUNCTION__} is another name for @code{__func__}. Older + versions of GCC recognize only this name. However, it is not +@@ -5451,12 +5460,12 @@ the @code{offsetof} macro. + + @smallexample + primary: +- "__builtin_offsetof" "(" @code{typename} "," offsetof_member_designator ")" ++ "__builtin_offsetof" "(" @code{typename} "," offsetof_member_designator ")" + + offsetof_member_designator: +- @code{identifier} +- | offsetof_member_designator "." @code{identifier} +- | offsetof_member_designator "[" @code{expr} "]" ++ @code{identifier} ++ | offsetof_member_designator "." @code{identifier} ++ | offsetof_member_designator "[" @code{expr} "]" + @end smallexample + + This extension is sufficient such that +@@ -5649,7 +5658,7 @@ assert (__builtin_object_size (p, 0) == + assert (__builtin_object_size (p, 1) == sizeof (var.buf1) - 1); + /* The object q points to is var. */ + assert (__builtin_object_size (q, 0) +- == (char *) (&var + 1) - (char *) &var.b); ++ == (char *) (&var + 1) - (char *) &var.b); + /* The subobject q points to is var.b. */ + assert (__builtin_object_size (q, 1) == sizeof (var.b)); + @end smallexample +@@ -5701,11 +5710,11 @@ There are also checking built-in functio + @smallexample + int __builtin___sprintf_chk (char *s, int flag, size_t os, const char *fmt, ...); + int __builtin___snprintf_chk (char *s, size_t maxlen, int flag, size_t os, +- const char *fmt, ...); ++ const char *fmt, ...); + int __builtin___vsprintf_chk (char *s, int flag, size_t os, const char *fmt, +- va_list ap); ++ va_list ap); + int __builtin___vsnprintf_chk (char *s, size_t maxlen, int flag, size_t os, +- const char *fmt, va_list ap); ++ const char *fmt, va_list ap); + @end smallexample + + The added @var{flag} argument is passed unchanged to @code{__sprintf_chk} +@@ -11518,7 +11527,7 @@ test specifically for GNU C++ (@pxref{Co + Predefined Macros,cpp,The GNU C Preprocessor}). + + @menu +-* Volatiles:: What constitutes an access to a volatile object. ++* Volatiles:: What constitutes an access to a volatile object. + * Restricted Pointers:: C99 restricted pointers and references. + * Vague Linkage:: Where G++ puts inlines, vtables and such. + * C++ Interface:: You can use a single C++ header file for both +@@ -12039,7 +12048,7 @@ interface table mechanism, instead of re + + @end table + +-See also @xref{Namespace Association}. ++See also @ref{Namespace Association}. + + @node Namespace Association + @section Namespace Association +@@ -12266,7 +12275,7 @@ should work just fine for standard-confo + Previously it was possible to use an empty prototype parameter list to + indicate an unspecified number of parameters (like C), rather than no + parameters, as C++ demands. This feature has been removed, except where +-it is required for backwards compatibility @xref{Backwards Compatibility}. ++it is required for backwards compatibility. @xref{Backwards Compatibility}. + @end table + + G++ allows a virtual function returning @samp{void *} to be overridden +@@ -12317,7 +12326,7 @@ used to be acceptable in previous drafts + compilation of C++ written to such drafts, G++ contains some backwards + compatibilities. @emph{All such backwards compatibility features are + liable to disappear in future versions of G++.} They should be considered +-deprecated @xref{Deprecated Features}. ++deprecated. @xref{Deprecated Features}. + + @table @code + @item For scope +--- a/gcc/doc/fragments.texi ++++ b/gcc/doc/fragments.texi +@@ -143,6 +143,22 @@ options enabled. Therefore @code{MULTIL + *mthumb/*mhard-float* + @end smallexample + ++@findex MULTILIB_ALIASES ++@item MULTILIB_ALIASES ++Sometimes it is desirable to support a large set of multilib options, but ++only build libraries for a subset of those multilibs. The remaining ++combinations use a sutiable alternative multilb. In that case, set ++@code{MULTILIB_ALIASES} to a list of the form @samp{realname=aliasname}. ++ ++For example, consider a little-endian ARM toolchain with big-endian and ++Thumb multilibs. If a big-endian Thumb multilib is not wanted, then ++setting @code{MULTILIB_ALIASES} to @samp{mbig-endian=mbig-endian/mthumb} ++makes this combination use the big-endian ARM libraries instead. ++ ++If the multilib is instead excluded by setting @code{MULTILIB_EXCEPTIONS} ++then big-endian Thumb code uses the default multilib as none of the ++remaining multilibs match. ++ + @findex MULTILIB_EXTRA_OPTS + @item MULTILIB_EXTRA_OPTS + Sometimes it is desirable that when building multiple versions of +--- a/gcc/doc/gcc.texi ++++ b/gcc/doc/gcc.texi +@@ -147,12 +147,12 @@ Introduction, gccint, GNU Compiler Colle + * GNU Project:: The GNU Project and GNU/Linux. + + * Copying:: GNU General Public License says +- how you can copy and share GCC. ++ how you can copy and share GCC. + * GNU Free Documentation License:: How you can copy and share this manual. + * Contributors:: People who have contributed to GCC. + + * Option Index:: Index to command line options. +-* Keyword Index:: Index of concepts and symbol names. ++* Keyword Index:: Index of concepts and symbol names. + @end menu + + @include frontends.texi +--- a/gcc/doc/install.texi ++++ b/gcc/doc/install.texi +@@ -671,7 +671,7 @@ internal data files of GCC@. The defaul + + @item --libexecdir=@var{dirname} + Specify the installation directory for internal executables of GCC@. +- The default is @file{@var{exec-prefix}/libexec}. ++The default is @file{@var{exec-prefix}/libexec}. + + @item --with-slibdir=@var{dirname} + Specify the installation directory for the shared libgcc library. The +@@ -3513,15 +3513,17 @@ applications. There are no standard Uni + @end html + @heading @anchor{m68k-x-x}m68k-*-* + By default, @samp{m68k-*-aout}, @samp{m68k-*-coff*}, +-@samp{m68k-*-elf*}, @samp{m68k-*-rtems} and @samp{m68k-*-uclinux} ++@samp{m68k-*-elf*}, @samp{m68k-*-rtems}, @samp{m68k-*-uclinux} and ++@samp{m68k-*-linux} + build libraries for both M680x0 and ColdFire processors. If you only + need the M680x0 libraries, you can omit the ColdFire ones by passing + @option{--with-arch=m68k} to @command{configure}. Alternatively, you + can omit the M680x0 libraries by passing @option{--with-arch=cf} to +-@command{configure}. These targets default to 5206 code when ++@command{configure}. These targets default to 5206 or 5475 code as ++appropriate for the target system when + configured with @option{--with-arch=cf} and 68020 code otherwise. + +-The @samp{m68k-*-linux-gnu}, @samp{m68k-*-netbsd} and ++The @samp{m68k-*-netbsd} and + @samp{m68k-*-openbsd} targets also support the @option{--with-arch} + option. They will generate ColdFire CFV4e code when configured with + @option{--with-arch=cf} and 68020 code otherwise. +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -120,11 +120,11 @@ only one of these two forms, whichever o + @xref{Option Index}, for an index to GCC's options. + + @menu +-* Option Summary:: Brief list of all options, without explanations. ++* Option Summary:: Brief list of all options, without explanations. + * Overall Options:: Controlling the kind of output: + an executable, object files, assembler files, + or preprocessed source. +-* Invoking G++:: Compiling C++ programs. ++* Invoking G++:: Compiling C++ programs. + * C Dialect Options:: Controlling the variant of C language compiled. + * C++ Dialect Options:: Variations on C++. + * Objective-C and Objective-C++ Dialect Options:: Variations on Objective-C +@@ -248,6 +248,7 @@ Objective-C and Objective-C++ Dialects}. + -Wno-multichar -Wnonnull -Wno-overflow @gol + -Woverlength-strings -Wpacked -Wpadded @gol + -Wparentheses -Wpointer-arith -Wno-pointer-to-int-cast @gol ++-Wno-poison-system-directories @gol + -Wredundant-decls @gol + -Wreturn-type -Wsequence-point -Wshadow @gol + -Wsign-compare -Wsign-conversion -Wstack-protector @gol +@@ -309,13 +310,13 @@ Objective-C and Objective-C++ Dialects}. + -p -pg -print-file-name=@var{library} -print-libgcc-file-name @gol + -print-multi-directory -print-multi-lib @gol + -print-prog-name=@var{program} -print-search-dirs -Q @gol +--print-sysroot-headers-suffix @gol ++-print-sysroot -print-sysroot-headers-suffix @gol + -save-temps -time} + + @item Optimization Options + @xref{Optimize Options,,Options that Control Optimization}. + @gccoptlist{ +--falign-functions[=@var{n}] -falign-jumps[=@var{n}] @gol ++-falign-arrays -falign-functions[=@var{n}] -falign-jumps[=@var{n}] @gol + -falign-labels[=@var{n}] -falign-loops[=@var{n}] -fassociative-math @gol + -fauto-inc-dec -fbranch-probabilities -fbranch-target-load-optimize @gol + -fbranch-target-load-optimize2 -fbtr-bb-exclusive -fcaller-saves @gol +@@ -388,7 +389,7 @@ Objective-C and Objective-C++ Dialects}. + @gccoptlist{@var{object-file-name} -l@var{library} @gol + -nostartfiles -nodefaultlibs -nostdlib -pie -rdynamic @gol + -s -static -static-libgcc -shared -shared-libgcc -symbolic @gol +--Wl,@var{option} -Xlinker @var{option} @gol ++-T @var{script} -Wl,@var{option} -Xlinker @var{option} @gol + -u @var{symbol}} + + @item Directory Options +@@ -421,8 +422,11 @@ Objective-C and Objective-C++ Dialects}. + -msched-prolog -mno-sched-prolog @gol + -mlittle-endian -mbig-endian -mwords-little-endian @gol + -mfloat-abi=@var{name} -msoft-float -mhard-float -mfpe @gol ++-mfp16-format=@var{name} + -mthumb-interwork -mno-thumb-interwork @gol ++-mfix-janus-2cc @gol + -mcpu=@var{name} -march=@var{name} -mfpu=@var{name} @gol ++-mmarvell-div @gol + -mstructure-size-boundary=@var{n} @gol + -mabort-on-noreturn @gol + -mlong-calls -mno-long-calls @gol +@@ -434,7 +438,9 @@ Objective-C and Objective-C++ Dialects}. + -mthumb -marm @gol + -mtpcs-frame -mtpcs-leaf-frame @gol + -mcaller-super-interworking -mcallee-super-interworking @gol +--mtp=@var{name}} ++-mtp=@var{name} @gol ++-mlow-irq-latency -mword-relocations @gol ++-mfix-cortex-m3-ldrd} + + @emph{AVR Options} + @gccoptlist{-mmcu=@var{mcu} -msize -minit-stack=@var{n} -mno-interrupts @gol +@@ -602,7 +608,8 @@ Objective-C and Objective-C++ Dialects}. + -mnobitfield -mrtd -mno-rtd -mdiv -mno-div -mshort @gol + -mno-short -mhard-float -m68881 -msoft-float -mpcrel @gol + -malign-int -mstrict-align -msep-data -mno-sep-data @gol +--mshared-library-id=n -mid-shared-library -mno-id-shared-library} ++-mshared-library-id=n -mid-shared-library -mno-id-shared-library @gol ++-mxgot -mno-xgot} + + @emph{M68hc1x Options} + @gccoptlist{-m6811 -m6812 -m68hc11 -m68hc12 -m68hcs12 @gol +@@ -619,7 +626,7 @@ Objective-C and Objective-C++ Dialects}. + @emph{MIPS Options} + @gccoptlist{-EL -EB -march=@var{arch} -mtune=@var{arch} @gol + -mips1 -mips2 -mips3 -mips4 -mips32 -mips32r2 -mips64 @gol +--mips16 -mno-mips16 -mflip-mips16 @gol ++-mips16 -mips16e -mno-mips16 -mflip-mips16 @gol + -minterlink-mips16 -mno-interlink-mips16 @gol + -mabi=@var{abi} -mabicalls -mno-abicalls @gol + -mshared -mno-shared -mxgot -mno-xgot -mgp32 -mgp64 @gol +@@ -642,11 +649,12 @@ Objective-C and Objective-C++ Dialects}. + -mmad -mno-mad -mfused-madd -mno-fused-madd -nocpp @gol + -mfix-r4000 -mno-fix-r4000 -mfix-r4400 -mno-fix-r4400 @gol + -mfix-vr4120 -mno-fix-vr4120 -mfix-vr4130 -mno-fix-vr4130 @gol +--mfix-sb1 -mno-fix-sb1 @gol ++-mfix-ice9a -mno-fix-ice9a -mfix-sb1 -mno-fix-sb1 @gol + -mflush-func=@var{func} -mno-flush-func @gol + -mbranch-cost=@var{num} -mbranch-likely -mno-branch-likely @gol + -mfp-exceptions -mno-fp-exceptions @gol +--mvr4130-align -mno-vr4130-align} ++-mvr4130-align -mno-vr4130-align @gol ++-mwarn-framesize=@var{framesize}} + + @emph{MMIX Options} + @gccoptlist{-mlibfuncs -mno-libfuncs -mepsilon -mno-epsilon -mabi=gnu @gol +@@ -746,7 +754,7 @@ See RS/6000 and PowerPC Options. + -mprefergot -musermode -multcost=@var{number} -mdiv=@var{strategy} @gol + -mdivsi3_libfunc=@var{name} @gol + -madjust-unroll -mindexed-addressing -mgettrcost=@var{number} -mpt-fixed @gol +- -minvalid-symbols} ++-minvalid-symbols} + + @emph{SPARC Options} + @gccoptlist{-mcpu=@var{cpu-type} @gol +@@ -3377,6 +3385,14 @@ code. However, note that using @option{ + option will @emph{not} warn about unknown pragmas in system + headers---for that, @option{-Wunknown-pragmas} must also be used. + ++@item -Wno-poison-system-directories ++@opindex Wno-poison-system-directories ++Do not warn for @option{-I} or @option{-L} options using system ++directories such as @file{/usr/include} when cross compiling. This ++option is intended for use in chroot environments when such ++directories contain the correct headers and libraries for the target ++system rather than the host. ++ + @item -Wfloat-equal + @opindex Wfloat-equal + @opindex Wno-float-equal +@@ -5065,6 +5081,14 @@ variable @env{GCC_EXEC_PREFIX} to the di + Don't forget the trailing @samp{/}. + @xref{Environment Variables}. + ++@item -print-sysroot ++@opindex print-sysroot ++Print the target sysroot directory that will be used during ++compilation. This is the target sysroot specified either at configure ++time or or using the @option{--sysroot} option, possibly with an extra ++suffix that depends on compilation options. If no target sysroot is ++specified, the options prints nothing. ++ + @item -print-sysroot-headers-suffix + @opindex print-sysroot-headers-suffix + Print the suffix added to the target sysroot when searching for +@@ -5128,7 +5152,13 @@ the compiler to use information gained f + compiling each of them. + + Not all optimizations are controlled directly by a flag. Only +-optimizations that have a flag are listed. ++optimizations that have a flag are listed in this section. ++ ++Depending on the target and how GCC was configured, a slightly different ++set of optimizations may be enabled at each @option{-O} level than ++those listed here. You can invoke GCC with @samp{-Q --help=optimizers} ++to find out the exact set of optimizations that are enabled at each level. ++@xref{Overall Options}, for examples. + + @table @gcctabopt + @item -O +@@ -5355,9 +5385,9 @@ as follows: + + @table @gcctabopt + @item max-inline-insns-single +- is set to @var{n}/2. ++is set to @var{n}/2. + @item max-inline-insns-auto +- is set to @var{n}/2. ++is set to @var{n}/2. + @end table + + See below for a documentation of the individual +@@ -6207,6 +6237,14 @@ arithmetic on constants, the overflowed + The @option{-fstrict-overflow} option is enabled at levels + @option{-O2}, @option{-O3}, @option{-Os}. + ++@item -falign-arrays ++@opindex falign-arrays ++Set the minimum alignment for array variables to be the largest power ++of two less than or equal to their total storage size, or the biggest ++alignment used on the machine, whichever is smaller. This option may be ++helpful when compiling legacy code that uses type punning on arrays that ++does not strictly conform to the C standard. ++ + @item -falign-functions + @itemx -falign-functions=@var{n} + @opindex falign-functions +@@ -6757,6 +6795,21 @@ int foo (void) + + Not all targets support this option. + ++@item -fremove-local-statics ++@opindex fremove-local-statics ++Converts function-local static variables to automatic variables when it ++is safe to do so. This transformation can reduce the number of ++instructions executed due to automatic variables being cheaper to ++read/write than static variables. ++ ++@item -fpromote-loop-indices ++@opindex fpromote-loop-indices ++Converts loop indices that have a type shorter than the word size to ++word-sized quantities. This transformation can reduce the overhead ++associated with sign/zero-extension and truncation of such variables. ++Using @option{-funsafe-loop-optimizations} with this option may result ++in more effective optimization. ++ + @item --param @var{name}=@var{value} + @opindex param + In some places, GCC uses various constants to control the amount of +@@ -7582,23 +7635,42 @@ about any unresolved references (unless + option @samp{-Xlinker -z -Xlinker defs}). Only a few systems support + this option. + ++@item -T @var{script} ++@opindex T ++@cindex linker script ++Use @var{script} as the linker script. This option is supported by most ++systems using the GNU linker. On some targets, such as bare-board ++targets without an operating system, the @option{-T} option may be required ++when linking to avoid references to undefined symbols. ++ + @item -Xlinker @var{option} + @opindex Xlinker + Pass @var{option} as an option to the linker. You can use this to + supply system-specific linker options which GCC does not know how to + recognize. + +-If you want to pass an option that takes an argument, you must use ++If you want to pass an option that takes a separate argument, you must use + @option{-Xlinker} twice, once for the option and once for the argument. + For example, to pass @option{-assert definitions}, you must write + @samp{-Xlinker -assert -Xlinker definitions}. It does not work to write + @option{-Xlinker "-assert definitions"}, because this passes the entire + string as a single argument, which is not what the linker expects. + ++When using the GNU linker, it is usually more convenient to pass ++arguments to linker options using the @option{@var{option}=@var{value}} ++syntax than as separate arguments. For example, you can specify ++@samp{-Xlinker -Map=output.map} rather than ++@samp{-Xlinker -Map -Xlinker output.map}. Other linkers may not support ++this syntax for command-line options. ++ + @item -Wl,@var{option} + @opindex Wl + Pass @var{option} as an option to the linker. If @var{option} contains +-commas, it is split into multiple options at the commas. ++commas, it is split into multiple options at the commas. You can use this ++syntax to pass an argument to the option. ++For example, @samp{-Wl,-Map,output.map} passes @samp{-Map output.map} to the ++linker. When using the GNU linker, you can also get the same effect with ++@samp{-Wl,-Map=output.map}. + + @item -u @var{symbol} + @opindex u +@@ -8500,35 +8572,30 @@ different function prologues), and this + locate the start if functions inside an executable piece of code. The + default is @option{-msched-prolog}. + ++@item -mfloat-abi=@var{name} ++@opindex mfloat-abi ++Specifies which floating-point ABI to use. Permissible values ++are: @samp{soft}, @samp{softfp} and @samp{hard}. ++ ++Specifying @samp{soft} causes GCC to generate output containing ++library calls for floating-point operations. ++@samp{softfp} allows the generation of code using hardware floating-point ++instructions, but still uses the soft-float calling conventions. ++@samp{hard} allows generation of floating-point instructions ++and uses FPU-specific calling conventions. ++ ++The default depends on the specific target configuration. Note that ++the hard-float and soft-float ABIs are not link-compatible; you must ++compile your entire program with the same ABI, and link with a ++compatible set of libraries. ++ + @item -mhard-float + @opindex mhard-float +-Generate output containing floating point instructions. This is the +-default. ++Equivalent to @option{-mfloat-abi=hard}. + + @item -msoft-float + @opindex msoft-float +-Generate output containing library calls for floating point. +-@strong{Warning:} the requisite libraries are not available for all ARM +-targets. Normally the facilities of the machine's usual C compiler are +-used, but this cannot be done directly in cross-compilation. You must make +-your own arrangements to provide suitable library functions for +-cross-compilation. +- +-@option{-msoft-float} changes the calling convention in the output file; +-therefore, it is only useful if you compile @emph{all} of a program with +-this option. In particular, you need to compile @file{libgcc.a}, the +-library that comes with GCC, with @option{-msoft-float} in order for +-this to work. +- +-@item -mfloat-abi=@var{name} +-@opindex mfloat-abi +-Specifies which ABI to use for floating point values. Permissible values +-are: @samp{soft}, @samp{softfp} and @samp{hard}. +- +-@samp{soft} and @samp{hard} are equivalent to @option{-msoft-float} +-and @option{-mhard-float} respectively. @samp{softfp} allows the generation +-of floating point instructions, but still uses the soft-float calling +-conventions. ++Equivalent to @option{-mfloat-abi=soft}. + + @item -mlittle-endian + @opindex mlittle-endian +@@ -8567,8 +8634,9 @@ assembly code. Permissible names are: @ + @samp{arm10e}, @samp{arm1020e}, @samp{arm1022e}, + @samp{arm1136j-s}, @samp{arm1136jf-s}, @samp{mpcore}, @samp{mpcorenovfp}, + @samp{arm1156t2-s}, @samp{arm1176jz-s}, @samp{arm1176jzf-s}, +-@samp{cortex-a8}, @samp{cortex-r4}, @samp{cortex-m3}, +-@samp{xscale}, @samp{iwmmxt}, @samp{ep9312}. ++@samp{cortex-a8}, @samp{cortex-r4}, @samp{cortex-r4f}, @samp{cortex-m3}, ++@samp{cortex-m1}, @samp{cortex-m0}, ++@samp{xscale}, @samp{iwmmxt}, @samp{ep9312} @samp{marvell-f}. + + @itemx -mtune=@var{name} + @opindex mtune +@@ -8600,13 +8668,26 @@ of the @option{-mcpu=} option. Permissi + @opindex mfp + This specifies what floating point hardware (or hardware emulation) is + available on the target. Permissible names are: @samp{fpa}, @samp{fpe2}, +-@samp{fpe3}, @samp{maverick}, @samp{vfp}. @option{-mfp} and @option{-mfpe} ++@samp{fpe3}, @samp{maverick}, @samp{vfp}, @samp{vfpv3}, @samp{vfpv3-d16}, ++@samp{neon}, and @samp{neon-fp16}. @option{-mfp} and @option{-mfpe} + are synonyms for @option{-mfpu}=@samp{fpe}@var{number}, for compatibility + with older versions of GCC@. + + If @option{-msoft-float} is specified this specifies the format of + floating point values. + ++@item -mfp16-format=@var{name} ++@opindex mfp16-format ++Specify the format of the @code{__fp16} half-precision floating-point type. ++Permissible names are @samp{none}, @samp{ieee}, and @samp{alternative}; ++the default is @samp{none}, in which case the @code{__fp16} type is not ++defined. Refer to the ARM Half-precision Extensions documentation for ++details of the formats. ++ ++@item -mmarvell-div ++@opindex mmarvell-div ++Generate hardware integer division instructions supported by some Marvell cores. ++ + @item -mstructure-size-boundary=@var{n} + @opindex mstructure-size-boundary + The size of all structures and unions will be rounded up to a multiple +@@ -8714,6 +8795,10 @@ This option automatically enables either + mixed 16/32-bit Thumb-2 instructions based on the @option{-mcpu=@var{name}} + and @option{-march=@var{name}} options. + ++@item -mfix-janus-2cc ++@opindex mfix-janus-2cc ++Work around hardware errata for Avalent Janus 2CC cores. ++ + @item -mtpcs-frame + @opindex mtpcs-frame + Generate a stack frame that is compliant with the Thumb Procedure Call +@@ -8749,6 +8834,25 @@ models are @option{soft}, which generate + best available method for the selected processor. The default setting is + @option{auto}. + ++@item -mlow-irq-latency ++@opindex mlow-irq-latency ++Avoid instructions with high interrupt latency when generating ++code. This can increase code size and reduce performance. ++The option is off by default. ++ ++@item -mword-relocations ++@opindex mword-relocations ++Only generate absolute relocations on word sized values (i.e. R_ARM_ABS32). ++This is enabled by default on targets (uClinux, SymbianOS) where the runtime ++loader imposes this restriction. ++ ++@item -mfix-cortex-m3-ldrd ++@opindex mfix-cortex-m3-ldrd ++Some Cortex-M3 cores can cause data corruption when @code{ldrd} instructions ++with overlapping destination and base registers are used. This option avoids ++generating these instructions. This option is enabled by default when ++@option{-mcpu=cortex-m3} is specified. ++ + @end table + + @node AVR Options +@@ -11402,7 +11506,7 @@ below, which also classifies the CPUs in + + @multitable @columnfractions 0.20 0.80 + @item @strong{Family} @tab @strong{@samp{-mcpu} arguments} +-@item @samp{51qe} @tab @samp{51qe} ++@item @samp{51} @tab @samp{51} @samp{51ac} @samp{51cn} @samp{51em} @samp{51qe} + @item @samp{5206} @tab @samp{5202} @samp{5204} @samp{5206} + @item @samp{5206e} @tab @samp{5206e} + @item @samp{5208} @tab @samp{5207} @samp{5208} +@@ -11411,6 +11515,7 @@ below, which also classifies the CPUs in + @item @samp{5216} @tab @samp{5214} @samp{5216} + @item @samp{52235} @tab @samp{52230} @samp{52231} @samp{52232} @samp{52233} @samp{52234} @samp{52235} + @item @samp{5225} @tab @samp{5224} @samp{5225} ++@item @samp{52259} @tab @samp{52252} @samp{52254} @samp{52255} @samp{52256} @samp{52258} @samp{52259} + @item @samp{5235} @tab @samp{5232} @samp{5233} @samp{5234} @samp{5235} @samp{523x} + @item @samp{5249} @tab @samp{5249} + @item @samp{5250} @tab @samp{5250} +@@ -11418,6 +11523,7 @@ below, which also classifies the CPUs in + @item @samp{5272} @tab @samp{5272} + @item @samp{5275} @tab @samp{5274} @samp{5275} + @item @samp{5282} @tab @samp{5280} @samp{5281} @samp{5282} @samp{528x} ++@item @samp{53017} @tab @samp{53011} @samp{53012} @samp{53013} @samp{53014} @samp{53015} @samp{53016} @samp{53017} + @item @samp{5307} @tab @samp{5307} + @item @samp{5329} @tab @samp{5327} @samp{5328} @samp{5329} @samp{532x} + @item @samp{5373} @tab @samp{5372} @samp{5373} @samp{537x} +@@ -11711,6 +11817,38 @@ compiled. Specifying a value of 0 will + other values will force the allocation of that number to the current + library but is no more space or time efficient than omitting this option. + ++@item -mxgot ++@itemx -mno-xgot ++@opindex mxgot ++@opindex mno-xgot ++When generating position-independent code for ColdFire, generate code ++that works if the GOT has more than 8192 entries. This code is ++larger and slower than code generated without this option. On M680x0 ++processors, this option is not needed; @option{-fPIC} suffices. ++ ++GCC normally uses a single instruction to load values from the GOT@. ++While this is relatively efficient, it only works if the GOT ++is smaller than about 64k. Anything larger causes the linker ++to report an error such as: ++ ++@cindex relocation truncated to fit (ColdFire) ++@smallexample ++relocation truncated to fit: R_68K_GOT16O foobar ++@end smallexample ++ ++If this happens, you should recompile your code with @option{-mxgot}. ++It should then work with very large GOTs. However, code generated with ++@option{-mxgot} is less efficient, since it takes 4 instructions to fetch ++the value of a global symbol. ++ ++Note that some linkers, including newer versions of the GNU linker, ++can create multiple GOTs and sort GOT entries. If you have such a linker, ++you should only need to use @option{-mxgot} when compiling a single ++object file that accesses more than 8192 GOT entries. Very few do. ++ ++These options have no effect unless GCC is generating ++position-independent code. ++ + @end table + + @node M68hc1x Options +@@ -11871,6 +12009,7 @@ The processor names are: + @samp{24kec}, @samp{24kef2_1}, @samp{24kef1_1}, + @samp{34kc}, @samp{34kf2_1}, @samp{34kf1_1}, + @samp{74kc}, @samp{74kf2_1}, @samp{74kf1_1}, @samp{74kf3_2}, ++@samp{ice9}, + @samp{m4k}, + @samp{orion}, + @samp{r2000}, @samp{r3000}, @samp{r3900}, @samp{r4000}, @samp{r4400}, +@@ -11879,7 +12018,8 @@ The processor names are: + @samp{sb1}, + @samp{sr71000}, + @samp{vr4100}, @samp{vr4111}, @samp{vr4120}, @samp{vr4130}, @samp{vr4300}, +-@samp{vr5000}, @samp{vr5400} and @samp{vr5500}. ++@samp{vr5000}, @samp{vr5400}, @samp{vr5500} ++and @samp{xlr}. + The special value @samp{from-abi} selects the + most compatible architecture for the selected ABI (that is, + @samp{mips1} for 32-bit ABIs and @samp{mips3} for 64-bit ABIs)@. +@@ -11957,11 +12097,14 @@ Equivalent to @samp{-march=mips32r2}. + Equivalent to @samp{-march=mips64}. + + @item -mips16 ++@itemx -mips16e + @itemx -mno-mips16 + @opindex mips16 ++@opindex mips16e + @opindex mno-mips16 + Generate (do not generate) MIPS16 code. If GCC is targetting a + MIPS32 or MIPS64 architecture, it will make use of the MIPS16e ASE@. ++@option{-mips16e} is a deprecated alias for @option{-mips16}. + + MIPS16 code generation can also be controlled on a per-function basis + by means of @code{mips16} and @code{nomips16} attributes. +@@ -12453,6 +12596,12 @@ although GCC will avoid using @code{mflo + VR4130 @code{macc}, @code{macchi}, @code{dmacc} and @code{dmacchi} + instructions are available instead. + ++@item -mfix-ice9a ++@itemx -mno-fix-ice9a ++@opindex mfix-ice9a ++Work around ICE9A double floating-point multiplication ++errata. When enabled, the preprocessor defines @code{_MIPS_FIX_ICE9A}. ++ + @item -mfix-sb1 + @itemx -mno-fix-sb1 + @opindex mfix-sb1 +@@ -12512,6 +12661,13 @@ thinks should execute in parallel. + This option only has an effect when optimizing for the VR4130. + It normally makes code faster, but at the expense of making it bigger. + It is enabled by default at optimization level @option{-O3}. ++ ++@item -mwarn-framesize=@var{framesize} ++@opindex mwarn-framesize ++Emit a compile-time warning if the current function exceeds the given ++frame size. This is intended to help identify functions which ++may cause a stack overflow in run-time environments with limited or ++absent stack, e.g., BIOS. + @end table + + @node MMIX Options +@@ -12943,11 +13099,12 @@ Supported values for @var{cpu_type} are + @samp{601}, @samp{602}, @samp{603}, @samp{603e}, @samp{604}, + @samp{604e}, @samp{620}, @samp{630}, @samp{740}, @samp{7400}, + @samp{7450}, @samp{750}, @samp{801}, @samp{821}, @samp{823}, +-@samp{860}, @samp{970}, @samp{8540}, @samp{ec603e}, @samp{G3}, +-@samp{G4}, @samp{G5}, @samp{power}, @samp{power2}, @samp{power3}, +-@samp{power4}, @samp{power5}, @samp{power5+}, @samp{power6}, +-@samp{power6x}, @samp{common}, @samp{powerpc}, @samp{powerpc64}, +-@samp{rios}, @samp{rios1}, @samp{rios2}, @samp{rsc}, and @samp{rs64}. ++@samp{860}, @samp{970}, @samp{8540}, @samp{e300c2}, @samp{e300c3}, ++@samp{e500mc}, @samp{ec603e}, @samp{G3}, @samp{G4}, @samp{G5}, ++@samp{power}, @samp{power2}, @samp{power3}, @samp{power4}, ++@samp{power5}, @samp{power5+}, @samp{power6}, @samp{power6x}, ++@samp{common}, @samp{powerpc}, @samp{powerpc64}, @samp{rios}, ++@samp{rios1}, @samp{rios2}, @samp{rsc}, and @samp{rs64}. + + @option{-mcpu=common} selects a completely generic processor. Code + generated under this option will run on any POWER or PowerPC processor. +@@ -13482,12 +13639,11 @@ header to indicate that @samp{eabi} exte + On System V.4 and embedded PowerPC systems do (do not) adhere to the + Embedded Applications Binary Interface (eabi) which is a set of + modifications to the System V.4 specifications. Selecting @option{-meabi} +-means that the stack is aligned to an 8 byte boundary, a function +-@code{__eabi} is called to from @code{main} to set up the eabi +-environment, and the @option{-msdata} option can use both @code{r2} and ++means that the stack is aligned to an 8 byte boundary, ++and the @option{-msdata} option can use both @code{r2} and + @code{r13} to point to two separate small data areas. Selecting + @option{-mno-eabi} means that the stack is aligned to a 16 byte boundary, +-do not call an initialization function from @code{main}, and the ++and the + @option{-msdata} option will only use @code{r13} to point to a single + small data area. The @option{-meabi} option is on by default if you + configured GCC using one of the @samp{powerpc*-*-eabi*} options. +@@ -14916,12 +15072,25 @@ Use it to conform to a non-default appli + + @item -fno-common + @opindex fno-common +-In C, allocate even uninitialized global variables in the data section of the +-object file, rather than generating them as common blocks. This has the +-effect that if the same variable is declared (without @code{extern}) in +-two different compilations, you will get an error when you link them. +-The only reason this might be useful is if you wish to verify that the +-program will work on other systems which always work this way. ++In C code, controls the placement of uninitialized global variables. ++Unix C compilers have traditionally permitted multiple definitions of ++such variables in different compilation units by placing the variables ++in a common block. ++This is the behavior specified by @option{-fcommon}, and is the default ++for GCC on most targets. ++On the other hand, this behavior is not required by ISO C, and on some ++targets may carry a speed or code size penalty on variable references. ++The @option{-fno-common} option specifies that the compiler should place ++uninitialized global variables in the data section of the object file, ++rather than generating them as common blocks. ++This has the effect that if the same variable is declared ++(without @code{extern}) in two different compilations, ++you will get a multiple-definition error when you link them. ++In this case, you must compile with @option{-fcommon} instead. ++Compiling with @option{-fno-common} is useful on targets for which ++it provides better performance, or if you wish to verify that the ++program will work on other systems which always treat uninitialized ++variable declarations this way. + + @item -fno-ident + @opindex fno-ident +--- a/gcc/doc/md.texi ++++ b/gcc/doc/md.texi +@@ -25,11 +25,11 @@ See the next chapter for information on + * Example:: An explained example of a @code{define_insn} pattern. + * RTL Template:: The RTL template defines what insns match a pattern. + * Output Template:: The output template says how to make assembler code +- from such an insn. ++ from such an insn. + * Output Statement:: For more generality, write C code to output +- the assembler code. ++ the assembler code. + * Predicates:: Controlling what kinds of operands can be used +- for an insn. ++ for an insn. + * Constraints:: Fine-tuning operand selection. + * Standard Names:: Names mark patterns to use for code generation. + * Pattern Ordering:: When the order of patterns makes a difference. +@@ -38,13 +38,13 @@ See the next chapter for information on + * Looping Patterns:: How to define patterns for special looping insns. + * Insn Canonicalizations::Canonicalization of Instructions + * Expander Definitions::Generating a sequence of several RTL insns +- for a standard operation. ++ for a standard operation. + * Insn Splitting:: Splitting Instructions into Multiple Instructions. +-* Including Patterns:: Including Patterns in Machine Descriptions. ++* Including Patterns:: Including Patterns in Machine Descriptions. + * Peephole Definitions::Defining machine-specific peephole optimizations. + * Insn Attributes:: Specifying the value of attributes for generated insns. + * Conditional Execution::Generating @code{define_insn} patterns for +- predication. ++ predication. + * Constant Definitions::Defining symbolic constants that can be used in the + md file. + * Iterators:: Using iterators to generate patterns from a template. +@@ -1626,7 +1626,7 @@ it includes both constraints that are us + constraints that aren't. The compiler source file mentioned in the + table heading for each architecture is the definitive reference for + the meanings of that architecture's constraints. +- ++ + @table @emph + @item ARM family---@file{config/arm/arm.h} + @table @code +@@ -2889,10 +2889,10 @@ Signed 16-bit constant (@minus{}32768--3 + @item L + Value appropriate as displacement. + @table @code +- @item (0..4095) +- for short displacement +- @item (-524288..524287) +- for long displacement ++@item (0..4095) ++for short displacement ++@item (-524288..524287) ++for long displacement + @end table + + @item M +@@ -2901,14 +2901,14 @@ Constant integer with a value of 0x7ffff + @item N + Multiple letter constraint followed by 4 parameter letters. + @table @code +- @item 0..9: +- number of the part counting from most to least significant +- @item H,Q: +- mode of the part +- @item D,S,H: +- mode of the containing operand +- @item 0,F: +- value of the other parts (F---all bits set) ++@item 0..9: ++number of the part counting from most to least significant ++@item H,Q: ++mode of the part ++@item D,S,H: ++mode of the containing operand ++@item 0,F: ++value of the other parts (F---all bits set) + @end table + The constraint matches if the specified part of a constant + has a value different from its other parts. +@@ -3345,8 +3345,8 @@ definition from the i386 machine descrip + (define_peephole2 + [(match_scratch:SI 3 "r") + (set (match_operand:SI 0 "register_operand" "") +- (mult:SI (match_operand:SI 1 "memory_operand" "") +- (match_operand:SI 2 "immediate_operand" "")))] ++ (mult:SI (match_operand:SI 1 "memory_operand" "") ++ (match_operand:SI 2 "immediate_operand" "")))] + + "!satisfies_constraint_K (operands[2])" + +@@ -5378,15 +5378,15 @@ following for its @code{dbra} instructio + @group + (define_insn "decrement_and_branch_until_zero" + [(set (pc) +- (if_then_else +- (ge (plus:SI (match_operand:SI 0 "general_operand" "+d*am") +- (const_int -1)) +- (const_int 0)) +- (label_ref (match_operand 1 "" "")) +- (pc))) ++ (if_then_else ++ (ge (plus:SI (match_operand:SI 0 "general_operand" "+d*am") ++ (const_int -1)) ++ (const_int 0)) ++ (label_ref (match_operand 1 "" "")) ++ (pc))) + (set (match_dup 0) +- (plus:SI (match_dup 0) +- (const_int -1)))] ++ (plus:SI (match_dup 0) ++ (const_int -1)))] + "find_reg_note (insn, REG_NONNEG, 0)" + "@dots{}") + @end group +@@ -5404,14 +5404,14 @@ pattern will not be matched by the combi + @group + (define_insn "decrement_and_branch_until_zero" + [(set (pc) +- (if_then_else +- (ge (match_operand:SI 0 "general_operand" "+d*am") +- (const_int 1)) +- (label_ref (match_operand 1 "" "")) +- (pc))) ++ (if_then_else ++ (ge (match_operand:SI 0 "general_operand" "+d*am") ++ (const_int 1)) ++ (label_ref (match_operand 1 "" "")) ++ (pc))) + (set (match_dup 0) +- (plus:SI (match_dup 0) +- (const_int -1)))] ++ (plus:SI (match_dup 0) ++ (const_int -1)))] + "find_reg_note (insn, REG_NONNEG, 0)" + "@dots{}") + @end group +@@ -6033,7 +6033,7 @@ from i386.md: + "&& reload_completed" + [(parallel [(set (match_dup 0) + (and:SI (match_dup 0) (const_int 65535))) +- (clobber (reg:CC 17))])] ++ (clobber (reg:CC 17))])] + "" + [(set_attr "type" "alu1")]) + +@@ -7158,10 +7158,10 @@ the instruction issue is possible if the + automaton state to another one. This algorithm is very fast, and + furthermore, its speed is not dependent on processor + complexity@footnote{However, the size of the automaton depends on +- processor complexity. To limit this effect, machine descriptions +- can split orthogonal parts of the machine description among several +- automata: but then, since each of these must be stepped independently, +- this does cause a small decrease in the algorithm's performance.}. ++processor complexity. To limit this effect, machine descriptions ++can split orthogonal parts of the machine description among several ++automata: but then, since each of these must be stepped independently, ++this does cause a small decrease in the algorithm's performance.}. + + @cindex automaton based pipeline description + The rest of this section describes the directives that constitute +@@ -7761,8 +7761,8 @@ rtx-based construct, such as a @code{def + + @menu + * Defining Mode Iterators:: Defining a new mode iterator. +-* Substitutions:: Combining mode iterators with substitutions +-* Examples:: Examples ++* Substitutions:: Combining mode iterators with substitutions ++* Examples:: Examples + @end menu + + @node Defining Mode Iterators +--- a/gcc/doc/options.texi ++++ b/gcc/doc/options.texi +@@ -29,13 +29,13 @@ The files can contain the following type + + @itemize @bullet + @item +-A language definition record.  These records have two fields: the +-string @samp{Language} and the name of the language.  Once a language ++A language definition record. These records have two fields: the ++string @samp{Language} and the name of the language. Once a language + has been declared in this way, it can be used as an option property. + @xref{Option properties}. + + @item +-An option definition record.  These records have the following fields: ++An option definition record. These records have the following fields: + + @enumerate + @item +@@ -62,11 +62,11 @@ tab forms the help text. This allows yo + of argument the option takes. + + @item +-A target mask record.  These records have one field of the form +-@samp{Mask(@var{x})}.  The options-processing script will automatically ++A target mask record. These records have one field of the form ++@samp{Mask(@var{x})}. The options-processing script will automatically + allocate a bit in @code{target_flags} (@pxref{Run-time Target}) for + each mask name @var{x} and set the macro @code{MASK_@var{x}} to the +-appropriate bitmask.  It will also declare a @code{TARGET_@var{x}} ++appropriate bitmask. It will also declare a @code{TARGET_@var{x}} + macro that has the value 1 when bit @code{MASK_@var{x}} is set and + 0 otherwise. + +--- a/gcc/doc/passes.texi ++++ b/gcc/doc/passes.texi +@@ -20,7 +20,7 @@ where near complete. + @menu + * Parsing pass:: The language front end turns text into bits. + * Gimplification pass:: The bits are turned into something we can optimize. +-* Pass manager:: Sequencing the optimization passes. ++* Pass manager:: Sequencing the optimization passes. + * Tree-SSA passes:: Optimizations on a high-level representation. + * RTL passes:: Optimizations on a low-level representation. + @end menu +--- a/gcc/doc/rtl.texi ++++ b/gcc/doc/rtl.texi +@@ -3019,11 +3019,9 @@ represents @var{x} before @var{x} is mod + @var{m} must be the machine mode for pointers on the machine in use. + + The expression @var{y} must be one of three forms: +-@table @code + @code{(plus:@var{m} @var{x} @var{z})}, + @code{(minus:@var{m} @var{x} @var{z})}, or + @code{(plus:@var{m} @var{x} @var{i})}, +-@end table + where @var{z} is an index register and @var{i} is a constant. + + Here is an example of its use: +--- a/gcc/doc/sourcebuild.texi ++++ b/gcc/doc/sourcebuild.texi +@@ -198,9 +198,7 @@ timestamp. + + @itemize @bullet + @item The standard GNU @file{config.sub} and @file{config.guess} +-files, kept in the top level directory, are used. FIXME: when is the +-@file{config.guess} file in the @file{gcc} directory (that just calls +-the top level one) used? ++files, kept in the top level directory, are used. + + @item The file @file{config.gcc} is used to handle configuration + specific to the particular target machine. The file +@@ -1021,7 +1019,11 @@ an empty @var{exclude-opts} list. + + @item @{ dg-xfail-if @var{comment} @{ @var{selector} @} @{ @var{include-opts} @} @{ @var{exclude-opts} @} @} + Expect the test to fail if the conditions (which are the same as for +-@code{dg-skip-if}) are met. ++@code{dg-skip-if}) are met. This does not affect the execute step. ++ ++@item @{ dg-xfail-run-if @var{comment} @{ @var{selector} @} @{ @var{include-opts} @} @{ @var{exclude-opts} @} @} ++Expect the execute step of a test to fail if the conditions (which are ++the same as for @code{dg-skip-if}) and @code{dg-xfail-if}) are met. + + @item @{ dg-require-@var{support} args @} + Skip the test if the target does not provide the required support; +--- a/gcc/doc/tm.texi ++++ b/gcc/doc/tm.texi +@@ -35,7 +35,7 @@ through the macros defined in the @file{ + * Register Classes:: Defining the classes of hardware registers. + * Old Constraints:: The old way to define machine-specific constraints. + * Stack and Calling:: Defining which way the stack grows and by how much. +-* Varargs:: Defining the varargs macros. ++* Varargs:: Defining the varargs macros. + * Trampolines:: Code set up at run time to enter a nested function. + * Library Calls:: Controlling how library routines are implicitly called. + * Addressing Modes:: Defining addressing modes valid for memory operands. +@@ -44,7 +44,7 @@ through the macros defined in the @file{ + * Costs:: Defining relative costs of different operations. + * Scheduling:: Adjusting the behavior of the instruction scheduler. + * Sections:: Dividing storage into text, data, and other sections. +-* PIC:: Macros for position independent code. ++* PIC:: Macros for position independent code. + * Assembler Format:: Defining how to write insns and pseudo-ops to output. + * Debugging Info:: Defining the format of debugging output. + * Floating Point:: Handling floating point for cross-compilers. +@@ -1138,6 +1138,9 @@ macro is used instead of that alignment + + If this macro is not defined, then @var{basic-align} is used. + ++This macro should never be used directly; use ++@code{calculate_global_alignment} instead. ++ + @findex strcpy + One use of this macro is to increase alignment of medium-size data to + make it all fit in fewer cache lines. Another is to cause character +@@ -1169,6 +1172,9 @@ If this macro is not defined, then @var{ + + One use of this macro is to increase alignment of medium-size data to + make it all fit in fewer cache lines. ++ ++This macro should never be used directly; use ++@code{calculate_local_alignment} instead. + @end defmac + + @defmac EMPTY_FIELD_BOUNDARY +@@ -1895,11 +1901,11 @@ For passing values in registers, see @re + For returning values in registers, see @ref{Scalar Return}. + + @menu +-* Register Basics:: Number and kinds of registers. +-* Allocation Order:: Order in which registers are allocated. +-* Values in Registers:: What kinds of values each reg can hold. +-* Leaf Functions:: Renumbering registers for leaf functions. +-* Stack Registers:: Handling a register stack such as 80387. ++* Register Basics:: Number and kinds of registers. ++* Allocation Order:: Order in which registers are allocated. ++* Values in Registers:: What kinds of values each reg can hold. ++* Leaf Functions:: Renumbering registers for leaf functions. ++* Stack Registers:: Handling a register stack such as 80387. + @end menu + + @node Register Basics +@@ -2064,6 +2070,15 @@ machines, define @code{REG_ALLOC_ORDER} + the highest numbered allocable register first. + @end defmac + ++@deftypefn {Target Hook} void TARGET_ADJUST_REG_ALLOC_ORDER (int *@var{order}) ++If @code{REG_ALLOC_ORDER} has been defined, this hook is called after ++all command-line options have been processed. It enables adjustment of ++the allocation order based on target-specific flags. Any such adjustment ++should be performed by the hook directly on the elements of the ++array @code{order}. On entry to the hook this array is an ++unmodified copy of @code{REG_ALLOC_ORDER}. ++@end deftypefn ++ + @defmac ORDER_REGS_FOR_LOCAL_ALLOC + A C statement (sans semicolon) to choose the order in which to allocate + hard registers for pseudo-registers local to a basic block. +@@ -2476,6 +2491,15 @@ address where its value is either multip + added to another register (as well as added to a displacement). + @end defmac + ++@defmac MODE_INDEX_REG_CLASS (@var{mode}) ++This is a variation of the @code{INDEX_REG_CLASS} macro which allows ++the selection of an index register in a mode dependent manner. It can ++return @code{NO_REGS} for modes that do not support any form of index ++register. If @var{mode} is @code{VOIDmode} then the macro should ++return a class of registers that is suitable for all addresses in ++which an index register of some form is allowed. ++@end defmac ++ + @defmac REGNO_OK_FOR_BASE_P (@var{num}) + A C expression which is nonzero if register number @var{num} is + suitable for use as a base register in operand addresses. It may be +@@ -2535,6 +2559,14 @@ looking for one that is valid, and will + only if neither labeling works. + @end defmac + ++@defmac REGNO_MODE_OK_FOR_INDEX_P (@var{num}, @var{mode}) ++A C expression that is just like @code{REGNO_OK_FOR_INDEX_P}, except ++that the expression may examine the mode of the memory reference ++in @var{mode}. If @var{mode} is @code{VOIDmode}, the macro should ++return true if @var{x} is suitable for all modes in which some ++form of index register is allowed. ++@end defmac ++ + @defmac PREFERRED_RELOAD_CLASS (@var{x}, @var{class}) + A C expression that places additional restrictions on the register class + to use when it is necessary to copy value @var{x} into a register in class +@@ -2969,7 +3001,7 @@ be treated like memory constraints by th + + It should return 1 if the operand type represented by the constraint + at the start of @var{str}, the first letter of which is the letter @var{c}, +- comprises a subset of all memory references including ++comprises a subset of all memory references including + all those whose address is simply a base register. This allows the reload + pass to reload an operand, if it does not directly correspond to the operand + type of @var{c}, by copying its address into a base register. +@@ -4272,6 +4304,18 @@ The definition of @code{LIBRARY_VALUE} n + data types, because none of the library functions returns such types. + @end defmac + ++@deftypefn {Target Hook} rtx TARGET_LIBCALL_VALUE (enum machine_mode ++@var{mode}, rtx @var{fun}) ++Define this hook if the back-end needs to know the name of the libcall ++function in order to determine where the result should be returned. ++ ++The mode of the result is given by @var{mode} and the name of the called ++library function is given by @var{fun}. The hook should return an RTX ++representing the place where the library function result will be returned. ++ ++If this hook is not defined, then LIBCALL_VALUE will be used. ++@end deftypefn ++ + @defmac FUNCTION_VALUE_REGNO_P (@var{regno}) + A C expression that is nonzero if @var{regno} is the number of a hard + register in which the values of called function may come back. +@@ -6741,10 +6785,10 @@ instructions do. + * Uninitialized Data:: Output of uninitialized variables. + * Label Output:: Output and generation of labels. + * Initialization:: General principles of initialization +- and termination routines. ++ and termination routines. + * Macros for Initialization:: +- Specific macros that control the handling of +- initialization and termination routines. ++ Specific macros that control the handling of ++ initialization and termination routines. + * Instruction Output:: Output of actual instructions. + * Dispatch Tables:: Output of jump tables. + * Exception Region Output:: Output of exception region code. +@@ -6873,7 +6917,7 @@ This is true on most ELF targets. + Choose a set of section attributes for use by @code{TARGET_ASM_NAMED_SECTION} + based on a variable or function decl, a section name, and whether or not the + declaration's initializer may contain runtime relocations. @var{decl} may be +- null, in which case read-write data should be assumed. ++null, in which case read-write data should be assumed. + + The default version of this function handles choosing code vs data, + read-only vs read-write data, and @code{flag_pic}. You should only +@@ -7077,7 +7121,7 @@ assembler for grouping arithmetic expres + default to normal parentheses, which is correct for most assemblers. + @end deftypevr + +- These macros are provided by @file{real.h} for writing the definitions ++These macros are provided by @file{real.h} for writing the definitions + of @code{ASM_OUTPUT_DOUBLE} and the like: + + @defmac REAL_VALUE_TO_TARGET_SINGLE (@var{x}, @var{l}) +@@ -10355,6 +10399,36 @@ and @var{type2}, or @code{NULL} if valid + the front end. + @end deftypefn + ++@deftypefn {Target Hook} {const char *} TARGET_INVALID_PARAMETER_TYPE (tree @var{type}) ++If defined, this macro returns the diagnostic message when it is ++invalid for functions to include parameters of type @var{type}, ++or @code{NULL} if validity should be determined by ++the front end. ++@end deftypefn ++ ++@deftypefn {Target Hook} {const char *} TARGET_INVALID_RETURN_TYPE (tree @var{type}) ++If defined, this macro returns the diagnostic message when it is ++invalid for functions to have return type @var{type}, ++or @code{NULL} if validity should be determined by ++the front end. ++@end deftypefn ++ ++@deftypefn {Target Hook} {tree} TARGET_PROMOTED_TYPE (tree @var{type}) ++If defined, this target hook returns the type to which values of ++@var{type} should be promoted when they appear in expressions, ++analogous to the integer promotions, or @code{NULL_TREE} to use the ++front end's normal promotion rules. This hook is useful when there are ++target-specific types with special promotion rules. ++@end deftypefn ++ ++@deftypefn {Target Hook} {tree} TARGET_CONVERT_TO_TYPE (tree @var{type}, tree @var{expr}) ++If defined, this hook returns the result of converting @var{expr} to ++@var{type}. It should return the converted expression, ++or @code{NULL_TREE} to apply the front end's normal conversion rules. ++This hook is useful when there are target-specific types with special ++conversion rules. ++@end deftypefn ++ + @defmac TARGET_USE_JCR_SECTION + This macro determines whether to use the JCR section to register Java + classes. By default, TARGET_USE_JCR_SECTION is defined to 1 if both +@@ -10372,3 +10446,14 @@ to the functions in @file{libgcc} that p + call stack unwinding. It is used in declarations in @file{unwind-generic.h} + and the associated definitions of those functions. + @end defmac ++ ++@deftypefn {Target Hook} {bool} TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS (void) ++When optimization is disabled, this hook indicates whether or not ++arguments should be allocated to stack slots. Normally, GCC allocates ++stacks slots for arguments when not optimizing in order to make ++debugging easier. However, when a function is declared with ++@code{__attribute__((naked))}, there is no stack frame, and the compiler ++cannot safely move arguments from the registers in which they are passed ++to the stack. Therefore, this hook should return true in general, but ++false for naked functions. The default implementation always returns true. ++@end deftypefn +--- a/gcc/doc/tree-ssa.texi ++++ b/gcc/doc/tree-ssa.texi +@@ -37,12 +37,12 @@ functions and programming constructs nee + passes for GIMPLE@. + + @menu +-* GENERIC:: A high-level language-independent representation. ++* GENERIC:: A high-level language-independent representation. + * GIMPLE:: A lower-level factored tree representation. +-* Annotations:: Attributes for statements and variables. +-* Statement Operands:: Variables referenced by GIMPLE statements. +-* SSA:: Static Single Assignment representation. +-* Alias analysis:: Representing aliased loads and stores. ++* Annotations:: Attributes for statements and variables. ++* Statement Operands:: Variables referenced by GIMPLE statements. ++* SSA:: Static Single Assignment representation. ++* Alias analysis:: Representing aliased loads and stores. + @end menu + + @node GENERIC +@@ -735,10 +735,10 @@ void f() + | RELOP + op0 -> val + op1 -> val +- | COND_EXPR +- op0 -> condition +- op1 -> val +- op2 -> val ++ | COND_EXPR ++ op0 -> condition ++ op1 -> val ++ op2 -> val + @end smallexample + + @node Annotations +@@ -943,7 +943,7 @@ How to choose the appropriate iterator: + + @enumerate + @item Determine whether you are need to see the operand pointers, or just the +- trees, and choose the appropriate macro: ++trees, and choose the appropriate macro: + + @smallexample + Need Macro: +@@ -954,12 +954,12 @@ tree FOR_EACH_SSA_TREE_OPERAN + @end smallexample + + @item You need to declare a variable of the type you are interested +- in, and an ssa_op_iter structure which serves as the loop +- controlling variable. ++in, and an ssa_op_iter structure which serves as the loop controlling ++variable. + + @item Determine which operands you wish to use, and specify the flags of +- those you are interested in. They are documented in +- @file{tree-ssa-operands.h}: ++those you are interested in. They are documented in ++@file{tree-ssa-operands.h}: + + @smallexample + #define SSA_OP_USE 0x01 /* @r{Real USE operands.} */ +@@ -1228,27 +1228,27 @@ which''. + + The following macros can be used to examine PHI nodes + +-@defmac PHI_RESULT (@var{phi}) ++@defmac PHI_RESULT (@var{phi}) + Returns the @code{SSA_NAME} created by PHI node @var{phi} (i.e., + @var{phi}'s LHS)@. + @end defmac + +-@defmac PHI_NUM_ARGS (@var{phi}) ++@defmac PHI_NUM_ARGS (@var{phi}) + Returns the number of arguments in @var{phi}. This number is exactly + the number of incoming edges to the basic block holding @var{phi}@. + @end defmac + +-@defmac PHI_ARG_ELT (@var{phi}, @var{i}) ++@defmac PHI_ARG_ELT (@var{phi}, @var{i}) + Returns a tuple representing the @var{i}th argument of @var{phi}@. + Each element of this tuple contains an @code{SSA_NAME} @var{var} and + the incoming edge through which @var{var} flows. + @end defmac + +-@defmac PHI_ARG_EDGE (@var{phi}, @var{i}) ++@defmac PHI_ARG_EDGE (@var{phi}, @var{i}) + Returns the incoming edge for the @var{i}th argument of @var{phi}. + @end defmac + +-@defmac PHI_ARG_DEF (@var{phi}, @var{i}) ++@defmac PHI_ARG_DEF (@var{phi}, @var{i}) + Returns the @code{SSA_NAME} for the @var{i}th argument of @var{phi}. + @end defmac + +@@ -1274,36 +1274,36 @@ the program@. + For instance, given the following code: + + @smallexample +- 1 L0: +- 2 x_1 = PHI (0, x_5) +- 3 if (x_1 < 10) +- 4 if (x_1 > 7) +- 5 y_2 = 0 +- 6 else +- 7 y_3 = x_1 + x_7 +- 8 endif +- 9 x_5 = x_1 + 1 ++ 1 L0: ++ 2 x_1 = PHI (0, x_5) ++ 3 if (x_1 < 10) ++ 4 if (x_1 > 7) ++ 5 y_2 = 0 ++ 6 else ++ 7 y_3 = x_1 + x_7 ++ 8 endif ++ 9 x_5 = x_1 + 1 + 10 goto L0; +- 11 endif ++ 11 endif + @end smallexample + + Suppose that we insert new names @code{x_10} and @code{x_11} (lines + @code{4} and @code{8})@. + + @smallexample +- 1 L0: +- 2 x_1 = PHI (0, x_5) +- 3 if (x_1 < 10) +- 4 x_10 = @dots{} +- 5 if (x_1 > 7) +- 6 y_2 = 0 +- 7 else +- 8 x_11 = @dots{} +- 9 y_3 = x_1 + x_7 +- 10 endif +- 11 x_5 = x_1 + 1 +- 12 goto L0; +- 13 endif ++ 1 L0: ++ 2 x_1 = PHI (0, x_5) ++ 3 if (x_1 < 10) ++ 4 x_10 = @dots{} ++ 5 if (x_1 > 7) ++ 6 y_2 = 0 ++ 7 else ++ 8 x_11 = @dots{} ++ 9 y_3 = x_1 + x_7 ++ 10 endif ++ 11 x_5 = x_1 + 1 ++ 12 goto L0; ++ 13 endif + @end smallexample + + We want to replace all the uses of @code{x_1} with the new definitions +@@ -1341,40 +1341,40 @@ There are several @code{TODO} flags that + + @itemize @bullet + @item @code{TODO_update_ssa}. Update the SSA form inserting PHI nodes +- for newly exposed symbols and virtual names marked for updating. +- When updating real names, only insert PHI nodes for a real name +- @code{O_j} in blocks reached by all the new and old definitions for +- @code{O_j}. If the iterated dominance frontier for @code{O_j} +- is not pruned, we may end up inserting PHI nodes in blocks that +- have one or more edges with no incoming definition for +- @code{O_j}. This would lead to uninitialized warnings for +- @code{O_j}'s symbol@. ++for newly exposed symbols and virtual names marked for updating. ++When updating real names, only insert PHI nodes for a real name ++@code{O_j} in blocks reached by all the new and old definitions for ++@code{O_j}. If the iterated dominance frontier for @code{O_j} ++is not pruned, we may end up inserting PHI nodes in blocks that ++have one or more edges with no incoming definition for ++@code{O_j}. This would lead to uninitialized warnings for ++@code{O_j}'s symbol@. + + @item @code{TODO_update_ssa_no_phi}. Update the SSA form without +- inserting any new PHI nodes at all. This is used by passes that +- have either inserted all the PHI nodes themselves or passes that +- need only to patch use-def and def-def chains for virtuals +- (e.g., DCE)@. ++inserting any new PHI nodes at all. This is used by passes that ++have either inserted all the PHI nodes themselves or passes that ++need only to patch use-def and def-def chains for virtuals ++(e.g., DCE)@. + + + @item @code{TODO_update_ssa_full_phi}. Insert PHI nodes everywhere +- they are needed. No pruning of the IDF is done. This is used +- by passes that need the PHI nodes for @code{O_j} even if it +- means that some arguments will come from the default definition +- of @code{O_j}'s symbol (e.g., @code{pass_linear_transform})@. +- +- WARNING: If you need to use this flag, chances are that your +- pass may be doing something wrong. Inserting PHI nodes for an +- old name where not all edges carry a new replacement may lead to +- silent codegen errors or spurious uninitialized warnings@. ++they are needed. No pruning of the IDF is done. This is used ++by passes that need the PHI nodes for @code{O_j} even if it ++means that some arguments will come from the default definition ++of @code{O_j}'s symbol (e.g., @code{pass_linear_transform})@. ++ ++WARNING: If you need to use this flag, chances are that your ++pass may be doing something wrong. Inserting PHI nodes for an ++old name where not all edges carry a new replacement may lead to ++silent codegen errors or spurious uninitialized warnings@. + + @item @code{TODO_update_ssa_only_virtuals}. Passes that update the +- SSA form on their own may want to delegate the updating of +- virtual names to the generic updater. Since FUD chains are +- easier to maintain, this simplifies the work they need to do. +- NOTE: If this flag is used, any OLD->NEW mappings for real names +- are explicitly destroyed and only the symbols marked for +- renaming are processed@. ++SSA form on their own may want to delegate the updating of ++virtual names to the generic updater. Since FUD chains are ++easier to maintain, this simplifies the work they need to do. ++NOTE: If this flag is used, any OLD->NEW mappings for real names ++are explicitly destroyed and only the symbols marked for ++renaming are processed@. + @end itemize + + @subsection Preserving the virtual SSA form +@@ -1445,8 +1445,8 @@ slightly different. For each argument @ + function will: + + @enumerate +-@item Walk the use-def chains for @var{arg}. +-@item Call @code{FN (@var{arg}, @var{phi}, @var{data})}. ++@item Walk the use-def chains for @var{arg}. ++@item Call @code{FN (@var{arg}, @var{phi}, @var{data})}. + @end enumerate + + Note how the first argument to @var{fn} is no longer the original +@@ -1466,26 +1466,26 @@ hooks to execute custom code at various + + @enumerate + @item Once to initialize any local data needed while processing +- @var{bb} and its children. This local data is pushed into an +- internal stack which is automatically pushed and popped as the +- walker traverses the dominator tree. ++@var{bb} and its children. This local data is pushed into an ++internal stack which is automatically pushed and popped as the ++walker traverses the dominator tree. + + @item Once before traversing all the statements in the @var{bb}. + + @item Once for every statement inside @var{bb}. + + @item Once after traversing all the statements and before recursing +- into @var{bb}'s dominator children. ++into @var{bb}'s dominator children. + + @item It then recurses into all the dominator children of @var{bb}. + + @item After recursing into all the dominator children of @var{bb} it +- can, optionally, traverse every statement in @var{bb} again +- (i.e., repeating steps 2 and 3). ++can, optionally, traverse every statement in @var{bb} again ++(i.e., repeating steps 2 and 3). + + @item Once after walking the statements in @var{bb} and @var{bb}'s +- dominator children. At this stage, the block local data stack +- is popped. ++dominator children. At this stage, the block local data stack ++is popped. + @end enumerate + @end deftypefn + +@@ -1535,16 +1535,16 @@ int bar (void) + If you copy the symbol tag for a variable for some reason, you probably + also want to copy the subvariables for that variable. + +-@item Points-to and escape analysis. ++@item Points-to and escape analysis. + + This phase walks the use-def chains in the SSA web looking for + three things: + +- @itemize @bullet +- @item Assignments of the form @code{P_i = &VAR} +- @item Assignments of the form P_i = malloc() +- @item Pointers and ADDR_EXPR that escape the current function. +- @end itemize ++@itemize @bullet ++@item Assignments of the form @code{P_i = &VAR} ++@item Assignments of the form P_i = malloc() ++@item Pointers and ADDR_EXPR that escape the current function. ++@end itemize + + The concept of `escaping' is the same one used in the Java world. + When a pointer or an ADDR_EXPR escapes, it means that it has been +@@ -1562,7 +1562,7 @@ call-clobbered. Simply put, if an ADDR_ + variable is call-clobbered. If a pointer P_i escapes, then all + the variables pointed-to by P_i (and its memory tag) also escape. + +-@item Compute flow-sensitive aliases ++@item Compute flow-sensitive aliases + + We have two classes of memory tags. Memory tags associated with + the pointed-to data type of the pointers in the program. These +@@ -1579,7 +1579,7 @@ associated with each pointer P_i. If P_ + call-clobbered the variables it points to and its tag. + + +-@item Compute flow-insensitive aliases ++@item Compute flow-insensitive aliases + + This pass will compare the alias set of every symbol memory tag and + every addressable variable found in the program. Given a symbol +--- a/gcc/doc/trouble.texi ++++ b/gcc/doc/trouble.texi +@@ -19,21 +19,21 @@ missing features that are too much work + where people's opinions differ as to what is best. + + @menu +-* Actual Bugs:: Bugs we will fix later. +-* Cross-Compiler Problems:: Common problems of cross compiling with GCC. ++* Actual Bugs:: Bugs we will fix later. ++* Cross-Compiler Problems:: Common problems of cross compiling with GCC. + * Interoperation:: Problems using GCC with other compilers, +- and with certain linkers, assemblers and debuggers. ++ and with certain linkers, assemblers and debuggers. + * Incompatibilities:: GCC is incompatible with traditional C. + * Fixed Headers:: GCC uses corrected versions of system header files. +- This is necessary, but doesn't always work smoothly. ++ This is necessary, but doesn't always work smoothly. + * Standard Libraries:: GCC uses the system C library, which might not be +- compliant with the ISO C standard. ++ compliant with the ISO C standard. + * Disappointments:: Regrettable things we can't change, but not quite bugs. +-* C++ Misunderstandings:: Common misunderstandings with GNU C++. ++* C++ Misunderstandings:: Common misunderstandings with GNU C++. + * Protoize Caveats:: Things to watch out for when using @code{protoize}. +-* Non-bugs:: Things we think are right, but some others disagree. ++* Non-bugs:: Things we think are right, but some others disagree. + * Warnings and Errors:: Which problems in your code get warnings, +- and which get errors. ++ and which get errors. + @end menu + + @node Actual Bugs +--- a/gcc/dse.c ++++ b/gcc/dse.c +@@ -228,7 +228,7 @@ struct store_info + /* An bitmask as wide as the number of bytes in the word that + contains a 1 if the byte may be needed. The store is unused if + all of the bits are 0. */ +- long positions_needed; ++ unsigned HOST_WIDEST_INT positions_needed; + + /* The next store info for this insn. */ + struct store_info *next; +@@ -239,6 +239,15 @@ struct store_info + rtx rhs; + }; + ++/* Return a bitmask with the first N low bits set. */ ++ ++static unsigned HOST_WIDEST_INT ++lowpart_bitmask (int n) ++{ ++ unsigned HOST_WIDEST_INT mask = ~(unsigned HOST_WIDEST_INT) 0; ++ return mask >> (HOST_BITS_PER_WIDEST_INT - n); ++} ++ + typedef struct store_info *store_info_t; + static alloc_pool cse_store_info_pool; + static alloc_pool rtx_store_info_pool; +@@ -1153,6 +1162,39 @@ clear_rhs_from_active_local_stores (void + } + } + ++/* Mark byte POS bytes from the beginning of store S_INFO as unneeded. */ ++ ++static inline void ++set_position_unneeded (store_info_t s_info, int pos) ++{ ++ s_info->positions_needed &= ~(((unsigned HOST_WIDEST_INT) 1) << pos); ++} ++ ++/* Mark the whole store S_INFO as unneeded. */ ++ ++static inline void ++set_all_positions_unneeded (store_info_t s_info) ++{ ++ s_info->positions_needed = (unsigned HOST_WIDEST_INT) 0; ++} ++ ++/* Return TRUE if any bytes from S_INFO store are needed. */ ++ ++static inline bool ++any_positions_needed_p (store_info_t s_info) ++{ ++ return (s_info->positions_needed != (unsigned HOST_WIDEST_INT) 0); ++} ++ ++/* Return TRUE if all bytes START through START+WIDTH-1 from S_INFO ++ store are needed. */ ++ ++static inline bool ++all_positions_needed_p (store_info_t s_info, int start, int width) ++{ ++ unsigned HOST_WIDEST_INT mask = lowpart_bitmask (width) << start; ++ return (s_info->positions_needed & mask) == mask; ++} + + /* BODY is an instruction pattern that belongs to INSN. Return 1 if + there is a candidate store, after adding it to the appropriate +@@ -1223,6 +1265,7 @@ record_store (rtx body, bb_info_t bb_inf + } + + width = GET_MODE_SIZE (GET_MODE (mem)); ++ gcc_assert ((unsigned) width <= HOST_BITS_PER_WIDEST_INT); + + if (spill_alias_set) + { +@@ -1308,7 +1351,7 @@ record_store (rtx body, bb_info_t bb_inf + && (GET_MODE (mem) == entry->mode)) + { + delete = true; +- s_info->positions_needed = 0; ++ set_all_positions_unneeded (s_info); + } + if (dump_file) + fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n", +@@ -1322,9 +1365,10 @@ record_store (rtx body, bb_info_t bb_inf + fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n", + INSN_UID (ptr->insn), s_info->group_id, + (int)s_info->begin, (int)s_info->end); +- for (i = offset; i < offset+width; i++) +- if (i >= s_info->begin && i < s_info->end) +- s_info->positions_needed &= ~(1L << (i - s_info->begin)); ++ for (i = MAX (offset, s_info->begin); ++ i < offset + width && i < s_info->end; ++ i++) ++ set_position_unneeded (s_info, i - s_info->begin); + } + else if (s_info->rhs) + /* Need to see if it is possible for this store to overwrite +@@ -1340,9 +1384,9 @@ record_store (rtx body, bb_info_t bb_inf + + /* An insn can be deleted if every position of every one of + its s_infos is zero. */ +- if (s_info->positions_needed != 0) ++ if (any_positions_needed_p (s_info)) + delete = false; +- ++ + if (delete) + { + insn_info_t insn_to_delete = ptr; +@@ -1360,8 +1404,6 @@ record_store (rtx body, bb_info_t bb_inf + ptr = next; + } + +- gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT); +- + /* Finish filling in the store_info. */ + store_info->next = insn_info->store_rec; + insn_info->store_rec = store_info; +@@ -1369,7 +1411,7 @@ record_store (rtx body, bb_info_t bb_inf + store_info->alias_set = spill_alias_set; + store_info->mem_addr = get_addr (XEXP (mem, 0)); + store_info->cse_base = base; +- store_info->positions_needed = (1L << width) - 1; ++ store_info->positions_needed = lowpart_bitmask (width); + store_info->group_id = group_id; + store_info->begin = offset; + store_info->end = offset + width; +@@ -1820,16 +1862,14 @@ check_mem_read_rtx (rtx *loc, void *data + else + { + if (store_info->rhs +- && (offset >= store_info->begin) +- && (offset + width <= store_info->end)) +- { +- int mask = ((1L << width) - 1) << (offset - store_info->begin); +- +- if ((store_info->positions_needed & mask) == mask +- && replace_read (store_info, i_ptr, +- read_info, insn_info, loc)) +- return 0; +- } ++ && offset >= store_info->begin ++ && offset + width <= store_info->end ++ && all_positions_needed_p (store_info, ++ offset - store_info->begin, ++ width) ++ && replace_read (store_info, i_ptr, read_info, ++ insn_info, loc)) ++ return 0; + /* The bases are the same, just see if the offsets + overlap. */ + if ((offset < store_info->end) +@@ -1887,16 +1927,12 @@ check_mem_read_rtx (rtx *loc, void *data + if (store_info->rhs + && store_info->group_id == -1 + && store_info->cse_base == base +- && (offset >= store_info->begin) +- && (offset + width <= store_info->end)) +- { +- int mask = ((1L << width) - 1) << (offset - store_info->begin); +- +- if ((store_info->positions_needed & mask) == mask +- && replace_read (store_info, i_ptr, +- read_info, insn_info, loc)) +- return 0; +- } ++ && offset >= store_info->begin ++ && offset + width <= store_info->end ++ && all_positions_needed_p (store_info, ++ offset - store_info->begin, width) ++ && replace_read (store_info, i_ptr, read_info, insn_info, loc)) ++ return 0; + + if (!store_info->alias_set) + remove = canon_true_dependence (store_info->mem, +--- a/gcc/dwarf2out.c ++++ b/gcc/dwarf2out.c +@@ -1705,7 +1705,7 @@ static dw_cfa_location cfa_temp; + static void + dwarf2out_frame_debug_expr (rtx expr, const char *label) + { +- rtx src, dest; ++ rtx src, dest, span; + HOST_WIDE_INT offset; + + /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of +@@ -2081,7 +2081,32 @@ dwarf2out_frame_debug_expr (rtx expr, co + } + + def_cfa_1 (label, &cfa); +- queue_reg_save (label, src, NULL_RTX, offset); ++ { ++ span = targetm.dwarf_register_span (src); ++ ++ if (!span) ++ queue_reg_save (label, src, NULL_RTX, offset); ++ else ++ { ++ /* We have a PARALLEL describing where the contents of SRC ++ live. Queue register saves for each piece of the ++ PARALLEL. */ ++ int par_index; ++ int limit; ++ HOST_WIDE_INT span_offset = offset; ++ ++ gcc_assert (GET_CODE (span) == PARALLEL); ++ ++ limit = XVECLEN (span, 0); ++ for (par_index = 0; par_index < limit; par_index++) ++ { ++ rtx elem = XVECEXP (span, 0, par_index); ++ ++ queue_reg_save (label, elem, NULL_RTX, span_offset); ++ span_offset += GET_MODE_SIZE (GET_MODE (elem)); ++ } ++ } ++ } + break; + + default: +@@ -3914,6 +3939,7 @@ static void dwarf2out_imported_module_or + static void dwarf2out_abstract_function (tree); + static void dwarf2out_var_location (rtx); + static void dwarf2out_begin_function (tree); ++static void dwarf2out_set_name (tree, tree); + + /* The debug hooks structure. */ + +@@ -3947,6 +3973,7 @@ const struct gcc_debug_hooks dwarf2_debu + debug_nothing_int, /* handle_pch */ + dwarf2out_var_location, + dwarf2out_switch_text_section, ++ dwarf2out_set_name, + 1 /* start_end_main_source_file */ + }; + #endif +@@ -5334,12 +5361,9 @@ debug_str_eq (const void *x1, const void + (const char *)x2) == 0; + } + +-/* Add a string attribute value to a DIE. */ +- +-static inline void +-add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str) ++static struct indirect_string_node * ++find_AT_string (const char *str) + { +- dw_attr_node attr; + struct indirect_string_node *node; + void **slot; + +@@ -5360,6 +5384,18 @@ add_AT_string (dw_die_ref die, enum dwar + node = (struct indirect_string_node *) *slot; + + node->refcount++; ++ return node; ++} ++ ++/* Add a string attribute value to a DIE. */ ++ ++static inline void ++add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str) ++{ ++ dw_attr_node attr; ++ struct indirect_string_node *node; ++ ++ node = find_AT_string (str); + + attr.dw_attr = attr_kind; + attr.dw_attr_val.val_class = dw_val_class_str; +@@ -14538,6 +14574,31 @@ maybe_emit_file (struct dwarf_file_data + return fd->emitted_number; + } + ++/* Replace DW_AT_name for the decl with name. */ ++ ++static void ++dwarf2out_set_name (tree decl, tree name) ++{ ++ dw_die_ref die; ++ dw_attr_ref attr; ++ ++ die = TYPE_SYMTAB_DIE (decl); ++ if (!die) ++ return; ++ ++ attr = get_AT (die, DW_AT_name); ++ if (attr) ++ { ++ struct indirect_string_node *node; ++ ++ node = find_AT_string (dwarf2_name (name, 0)); ++ /* replace the string. */ ++ attr->dw_attr_val.v.val_str = node; ++ } ++ ++ else ++ add_name_attribute (die, dwarf2_name (name, 0)); ++} + /* Called by the final INSN scan whenever we see a var location. We + use it to drop labels in the right places, and throw the location in + our lookup table. */ +--- a/gcc/emit-rtl.c ++++ b/gcc/emit-rtl.c +@@ -1909,6 +1909,7 @@ adjust_address_1 (rtx memref, enum machi + rtx memoffset = MEM_OFFSET (memref); + rtx size = 0; + unsigned int memalign = MEM_ALIGN (memref); ++ int pbits; + + /* If there are no changes, just return the original memory reference. */ + if (mode == GET_MODE (memref) && !offset +@@ -1920,6 +1921,16 @@ adjust_address_1 (rtx memref, enum machi + (plus (plus reg reg) const_int) -- so do this always. */ + addr = copy_rtx (addr); + ++ /* Convert a possibly large offset to a signed value within the ++ range of the target address space. */ ++ pbits = GET_MODE_BITSIZE (Pmode); ++ if (HOST_BITS_PER_WIDE_INT > pbits) ++ { ++ int shift = HOST_BITS_PER_WIDE_INT - pbits; ++ offset = (((HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) offset << shift)) ++ >> shift); ++ } ++ + if (adjust) + { + /* If MEMREF is a LO_SUM and the offset is within the alignment of the +--- a/gcc/explow.c ++++ b/gcc/explow.c +@@ -489,6 +489,7 @@ memory_address (enum machine_mode mode, + + done: + ++ gcc_assert (memory_address_p (mode, x)); + /* If we didn't change the address, we are done. Otherwise, mark + a reg as a pointer if we have REG or REG + CONST_INT. */ + if (oldx == x) +@@ -1489,9 +1490,9 @@ hard_function_value (const_tree valtype, + in which a scalar value of mode MODE was returned by a library call. */ + + rtx +-hard_libcall_value (enum machine_mode mode) ++hard_libcall_value (enum machine_mode mode, rtx fun) + { +- return LIBCALL_VALUE (mode); ++ return targetm.calls.libcall_value (mode, fun); + } + + /* Look up the tree code for a given rtx code +--- a/gcc/expmed.c ++++ b/gcc/expmed.c +@@ -103,7 +103,8 @@ static int add_cost[NUM_MACHINE_MODES]; + static int neg_cost[NUM_MACHINE_MODES]; + static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; + static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; +-static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; ++static int shiftsub0_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; ++static int shiftsub1_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; + static int mul_cost[NUM_MACHINE_MODES]; + static int sdiv_cost[NUM_MACHINE_MODES]; + static int udiv_cost[NUM_MACHINE_MODES]; +@@ -130,7 +131,8 @@ init_expmed (void) + struct rtx_def shift; rtunion shift_fld1; + struct rtx_def shift_mult; rtunion shift_mult_fld1; + struct rtx_def shift_add; rtunion shift_add_fld1; +- struct rtx_def shift_sub; rtunion shift_sub_fld1; ++ struct rtx_def shift_sub0; rtunion shift_sub0_fld1; ++ struct rtx_def shift_sub1; rtunion shift_sub1_fld1; + } all; + + rtx pow2[MAX_BITS_PER_WORD]; +@@ -202,9 +204,13 @@ init_expmed (void) + XEXP (&all.shift_add, 0) = &all.shift_mult; + XEXP (&all.shift_add, 1) = &all.reg; + +- PUT_CODE (&all.shift_sub, MINUS); +- XEXP (&all.shift_sub, 0) = &all.shift_mult; +- XEXP (&all.shift_sub, 1) = &all.reg; ++ PUT_CODE (&all.shift_sub0, MINUS); ++ XEXP (&all.shift_sub0, 0) = &all.shift_mult; ++ XEXP (&all.shift_sub0, 1) = &all.reg; ++ ++ PUT_CODE (&all.shift_sub1, MINUS); ++ XEXP (&all.shift_sub1, 0) = &all.reg; ++ XEXP (&all.shift_sub1, 1) = &all.shift_mult; + + for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); + mode != VOIDmode; +@@ -222,7 +228,8 @@ init_expmed (void) + PUT_MODE (&all.shift, mode); + PUT_MODE (&all.shift_mult, mode); + PUT_MODE (&all.shift_add, mode); +- PUT_MODE (&all.shift_sub, mode); ++ PUT_MODE (&all.shift_sub0, mode); ++ PUT_MODE (&all.shift_sub1, mode); + + add_cost[mode] = rtx_cost (&all.plus, SET); + neg_cost[mode] = rtx_cost (&all.neg, SET); +@@ -248,7 +255,7 @@ init_expmed (void) + } + + shift_cost[mode][0] = 0; +- shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode]; ++ shiftadd_cost[mode][0] = shiftsub0_cost[mode][0] = shiftsub1_cost[mode][0] = add_cost[mode]; + + n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode)); + for (m = 1; m < n; m++) +@@ -258,7 +265,8 @@ init_expmed (void) + + shift_cost[mode][m] = rtx_cost (&all.shift, SET); + shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET); +- shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET); ++ shiftsub0_cost[mode][m] = rtx_cost (&all.shift_sub0, SET); ++ shiftsub1_cost[mode][m] = rtx_cost (&all.shift_sub1, SET); + } + } + } +@@ -976,7 +984,10 @@ store_fixed_bit_field (rtx op0, unsigned + } + + if (op0 != temp) +- emit_move_insn (op0, temp); ++ { ++ op0 = copy_rtx (op0); ++ emit_move_insn (op0, temp); ++ } + } + + /* Store a bit field that is split across multiple accessible memory objects. +@@ -2426,6 +2437,7 @@ synth_mult (struct algorithm *alg_out, u + struct mult_cost best_cost; + struct mult_cost new_limit; + int op_cost, op_latency; ++ unsigned HOST_WIDE_INT orig_t = t; + unsigned HOST_WIDE_INT q; + int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode)); + int hash_index; +@@ -2568,6 +2580,38 @@ synth_mult (struct algorithm *alg_out, u + best_alg->log[best_alg->ops] = m; + best_alg->op[best_alg->ops] = alg_shift; + } ++ ++ /* See if treating ORIG_T as a signed number yields a better ++ sequence. Try this sequence only for a negative ORIG_T ++ as it would be useless for a non-negative ORIG_T. */ ++ if ((HOST_WIDE_INT) orig_t < 0) ++ { ++ /* Shift ORIG_T as follows because a right shift of a ++ negative-valued signed type is implementation ++ defined. */ ++ q = ~(~orig_t >> m); ++ /* The function expand_shift will choose between a shift ++ and a sequence of additions, so the observed cost is ++ given as MIN (m * add_cost[mode], ++ shift_cost[mode][m]). */ ++ op_cost = m * add_cost[mode]; ++ if (shift_cost[mode][m] < op_cost) ++ op_cost = shift_cost[mode][m]; ++ new_limit.cost = best_cost.cost - op_cost; ++ new_limit.latency = best_cost.latency - op_cost; ++ synth_mult (alg_in, q, &new_limit, mode); ++ ++ alg_in->cost.cost += op_cost; ++ alg_in->cost.latency += op_cost; ++ if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) ++ { ++ struct algorithm *x; ++ best_cost = alg_in->cost; ++ x = alg_in, alg_in = best_alg, best_alg = x; ++ best_alg->log[best_alg->ops] = m; ++ best_alg->op[best_alg->ops] = alg_shift; ++ } ++ } + } + if (cache_hit) + goto done; +@@ -2630,6 +2674,29 @@ synth_mult (struct algorithm *alg_out, u + best_alg->op[best_alg->ops] = alg_add_t_m2; + } + } ++ ++ /* We may be able to calculate a * -7, a * -15, a * -31, etc ++ quickly with a - a * n for some appropriate constant n. */ ++ m = exact_log2 (-orig_t + 1); ++ if (m >= 0 && m < maxm) ++ { ++ op_cost = shiftsub1_cost[mode][m]; ++ new_limit.cost = best_cost.cost - op_cost; ++ new_limit.latency = best_cost.latency - op_cost; ++ synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode); ++ ++ alg_in->cost.cost += op_cost; ++ alg_in->cost.latency += op_cost; ++ if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) ++ { ++ struct algorithm *x; ++ best_cost = alg_in->cost; ++ x = alg_in, alg_in = best_alg, best_alg = x; ++ best_alg->log[best_alg->ops] = m; ++ best_alg->op[best_alg->ops] = alg_sub_t_m2; ++ } ++ } ++ + if (cache_hit) + goto done; + } +@@ -2699,9 +2766,9 @@ synth_mult (struct algorithm *alg_out, u + hardware the shift may be executed concurrently with the + earlier steps in the algorithm. */ + op_cost = add_cost[mode] + shift_cost[mode][m]; +- if (shiftsub_cost[mode][m] < op_cost) ++ if (shiftsub0_cost[mode][m] < op_cost) + { +- op_cost = shiftsub_cost[mode][m]; ++ op_cost = shiftsub0_cost[mode][m]; + op_latency = op_cost; + } + else +@@ -2764,7 +2831,7 @@ synth_mult (struct algorithm *alg_out, u + m = exact_log2 (q); + if (m >= 0 && m < maxm) + { +- op_cost = shiftsub_cost[mode][m]; ++ op_cost = shiftsub0_cost[mode][m]; + new_limit.cost = best_cost.cost - op_cost; + new_limit.latency = best_cost.latency - op_cost; + synth_mult (alg_in, (t + 1) >> m, &new_limit, mode); +--- a/gcc/expr.c ++++ b/gcc/expr.c +@@ -2038,10 +2038,55 @@ emit_group_store (rtx orig_dst, rtx src, + HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1)); + enum machine_mode mode = GET_MODE (tmps[i]); + unsigned int bytelen = GET_MODE_SIZE (mode); ++ unsigned int adj_bytelen = bytelen; + rtx dest = dst; + + /* Handle trailing fragments that run over the size of the struct. */ + if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) ++ adj_bytelen = ssize - bytepos; ++ ++ if (GET_CODE (dst) == CONCAT) ++ { ++ if (bytepos + adj_bytelen ++ <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) ++ dest = XEXP (dst, 0); ++ else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) ++ { ++ bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))); ++ dest = XEXP (dst, 1); ++ } ++ else ++ { ++ enum machine_mode dest_mode = GET_MODE (dest); ++ enum machine_mode tmp_mode = GET_MODE (tmps[i]); ++ ++ gcc_assert (bytepos == 0 && XVECLEN (src, 0)); ++ ++ if (GET_MODE_ALIGNMENT (dest_mode) ++ >= GET_MODE_ALIGNMENT (tmp_mode)) ++ { ++ dest = assign_stack_temp (dest_mode, ++ GET_MODE_SIZE (dest_mode), ++ 0); ++ emit_move_insn (adjust_address (dest, ++ tmp_mode, ++ bytepos), ++ tmps[i]); ++ dst = dest; ++ } ++ else ++ { ++ dest = assign_stack_temp (tmp_mode, ++ GET_MODE_SIZE (tmp_mode), ++ 0); ++ emit_move_insn (dest, tmps[i]); ++ dst = adjust_address (dest, dest_mode, bytepos); ++ } ++ break; ++ } ++ } ++ ++ if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) + { + /* store_bit_field always takes its value from the lsb. + Move the fragment to the lsb if it's not already there. */ +@@ -2059,28 +2104,7 @@ emit_group_store (rtx orig_dst, rtx src, + build_int_cst (NULL_TREE, shift), + tmps[i], 0); + } +- bytelen = ssize - bytepos; +- } +- +- if (GET_CODE (dst) == CONCAT) +- { +- if (bytepos + bytelen <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) +- dest = XEXP (dst, 0); +- else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) +- { +- bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))); +- dest = XEXP (dst, 1); +- } +- else +- { +- gcc_assert (bytepos == 0 && XVECLEN (src, 0)); +- dest = assign_stack_temp (GET_MODE (dest), +- GET_MODE_SIZE (GET_MODE (dest)), 0); +- emit_move_insn (adjust_address (dest, GET_MODE (tmps[i]), bytepos), +- tmps[i]); +- dst = dest; +- break; +- } ++ bytelen = adj_bytelen; + } + + /* Optimize the access just a bit. */ +--- a/gcc/expr.h ++++ b/gcc/expr.h +@@ -729,7 +729,7 @@ extern void probe_stack_range (HOST_WIDE + + /* Return an rtx that refers to the value returned by a library call + in its original home. This becomes invalid if any more code is emitted. */ +-extern rtx hard_libcall_value (enum machine_mode); ++extern rtx hard_libcall_value (enum machine_mode, rtx); + + /* Return the mode desired by operand N of a particular bitfield + insert/extract insn, or MAX_MACHINE_MODE if no such insn is +--- a/gcc/final.c ++++ b/gcc/final.c +@@ -893,6 +893,7 @@ shorten_branches (rtx first ATTRIBUTE_UN + if (LABEL_P (insn)) + { + rtx next; ++ bool next_is_jumptable; + + /* Merge in alignments computed by compute_alignments. */ + log = LABEL_TO_ALIGNMENT (insn); +@@ -902,31 +903,30 @@ shorten_branches (rtx first ATTRIBUTE_UN + max_skip = LABEL_TO_MAX_SKIP (insn); + } + +- log = LABEL_ALIGN (insn); +- if (max_log < log) ++ next = next_nonnote_insn (insn); ++ next_is_jumptable = next && JUMP_TABLE_DATA_P (next); ++ if (!next_is_jumptable) + { +- max_log = log; +- max_skip = LABEL_ALIGN_MAX_SKIP; ++ log = LABEL_ALIGN (insn); ++ if (max_log < log) ++ { ++ max_log = log; ++ max_skip = LABEL_ALIGN_MAX_SKIP; ++ } + } +- next = next_nonnote_insn (insn); + /* ADDR_VECs only take room if read-only data goes into the text + section. */ +- if (JUMP_TABLES_IN_TEXT_SECTION +- || readonly_data_section == text_section) +- if (next && JUMP_P (next)) +- { +- rtx nextbody = PATTERN (next); +- if (GET_CODE (nextbody) == ADDR_VEC +- || GET_CODE (nextbody) == ADDR_DIFF_VEC) +- { +- log = ADDR_VEC_ALIGN (next); +- if (max_log < log) +- { +- max_log = log; +- max_skip = LABEL_ALIGN_MAX_SKIP; +- } +- } +- } ++ if ((JUMP_TABLES_IN_TEXT_SECTION ++ || readonly_data_section == text_section) ++ && next_is_jumptable) ++ { ++ log = ADDR_VEC_ALIGN (next); ++ if (max_log < log) ++ { ++ max_log = log; ++ max_skip = LABEL_ALIGN_MAX_SKIP; ++ } ++ } + LABEL_TO_ALIGNMENT (insn) = max_log; + LABEL_TO_MAX_SKIP (insn) = max_skip; + max_log = 0; +@@ -1390,6 +1390,9 @@ asm_insn_count (rtx body) + else + template = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL); + ++ if (*template == '\0') ++ return 0; ++ + for (; *template; template++) + if (IS_ASM_LOGICAL_LINE_SEPARATOR (*template, template) + || *template == '\n') +@@ -2003,48 +2006,41 @@ final_scan_insn (rtx insn, FILE *file, i + } + + next = next_nonnote_insn (insn); +- if (next != 0 && JUMP_P (next)) ++ /* If this label is followed by a jump-table, make sure we put ++ the label in the read-only section. Also possibly write the ++ label and jump table together. */ ++ if (next != 0 && JUMP_TABLE_DATA_P (next)) + { +- rtx nextbody = PATTERN (next); +- +- /* If this label is followed by a jump-table, +- make sure we put the label in the read-only section. Also +- possibly write the label and jump table together. */ +- +- if (GET_CODE (nextbody) == ADDR_VEC +- || GET_CODE (nextbody) == ADDR_DIFF_VEC) +- { + #if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC) +- /* In this case, the case vector is being moved by the +- target, so don't output the label at all. Leave that +- to the back end macros. */ ++ /* In this case, the case vector is being moved by the ++ target, so don't output the label at all. Leave that ++ to the back end macros. */ + #else +- if (! JUMP_TABLES_IN_TEXT_SECTION) +- { +- int log_align; ++ if (! JUMP_TABLES_IN_TEXT_SECTION) ++ { ++ int log_align; + +- switch_to_section (targetm.asm_out.function_rodata_section +- (current_function_decl)); ++ switch_to_section (targetm.asm_out.function_rodata_section ++ (current_function_decl)); + + #ifdef ADDR_VEC_ALIGN +- log_align = ADDR_VEC_ALIGN (next); ++ log_align = ADDR_VEC_ALIGN (next); + #else +- log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT); ++ log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT); + #endif +- ASM_OUTPUT_ALIGN (file, log_align); +- } +- else +- switch_to_section (current_function_section ()); ++ ASM_OUTPUT_ALIGN (file, log_align); ++ } ++ else ++ switch_to_section (current_function_section ()); + + #ifdef ASM_OUTPUT_CASE_LABEL +- ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn), +- next); ++ ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn), ++ next); + #else +- targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn)); ++ targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn)); + #endif + #endif +- break; +- } ++ break; + } + if (LABEL_ALT_ENTRY_P (insn)) + output_alternate_entry_point (file, insn); +--- a/gcc/fold-const.c ++++ b/gcc/fold-const.c +@@ -2290,7 +2290,24 @@ fold_convert_const_real_from_real (tree + real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1)); + t = build_real (type, value); + +- TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1); ++ /* If converting an infinity or NAN to a representation that doesn't ++ have one, set the overflow bit so that we can produce some kind of ++ error message at the appropriate point if necessary. It's not the ++ most user-friendly message, but it's better than nothing. */ ++ if (REAL_VALUE_ISINF (TREE_REAL_CST (arg1)) ++ && !MODE_HAS_INFINITIES (TYPE_MODE (type))) ++ TREE_OVERFLOW (t) = 1; ++ else if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)) ++ && !MODE_HAS_NANS (TYPE_MODE (type))) ++ TREE_OVERFLOW (t) = 1; ++ /* Regular overflow, conversion produced an infinity in a mode that ++ can't represent them. */ ++ else if (!MODE_HAS_INFINITIES (TYPE_MODE (type)) ++ && REAL_VALUE_ISINF (value) ++ && !REAL_VALUE_ISINF (TREE_REAL_CST (arg1))) ++ TREE_OVERFLOW (t) = 1; ++ else ++ TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1); + return t; + } + +--- a/gcc/function.c ++++ b/gcc/function.c +@@ -73,6 +73,10 @@ along with GCC; see the file COPYING3. + #define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT + #endif + ++#ifndef DATA_ALIGNMENT ++#define DATA_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT ++#endif ++ + #ifndef STACK_ALIGNMENT_NEEDED + #define STACK_ALIGNMENT_NEEDED 1 + #endif +@@ -419,7 +423,7 @@ assign_stack_local_1 (enum machine_mode + stack slot. */ + type = lang_hooks.types.type_for_mode (mode, 0); + if (type) +- alignment = LOCAL_ALIGNMENT (type, alignment); ++ alignment = calculate_local_alignment (type, alignment); + + alignment /= BITS_PER_UNIT; + } +@@ -625,7 +629,7 @@ assign_stack_temp_for_type (enum machine + type = lang_hooks.types.type_for_mode (mode, 0); + + if (type) +- align = LOCAL_ALIGNMENT (type, align); ++ align = calculate_local_alignment (type, align); + + /* Try to find an available, already-allocated temporary of the proper + mode which meets the size and alignment requirements. Choose the +@@ -1530,6 +1534,7 @@ instantiate_virtual_regs_in_insn (rtx in + } + x = simplify_gen_subreg (recog_data.operand_mode[i], new, + GET_MODE (new), SUBREG_BYTE (x)); ++ gcc_assert (x); + break; + + default: +@@ -1845,6 +1850,9 @@ aggregate_value_p (const_tree exp, const + bool + use_register_for_decl (const_tree decl) + { ++ if (!targetm.calls.allocate_stack_slots_for_args()) ++ return true; ++ + /* Honor volatile. */ + if (TREE_SIDE_EFFECTS (decl)) + return false; +@@ -2425,6 +2433,30 @@ assign_parm_adjust_entry_rtl (struct ass + data->entry_parm = entry_parm; + } + ++/* A subroutine of assign_parms. Reconstitute any values which were ++ passed in multiple registers and would fit in a single register. */ ++ ++static void ++assign_parm_remove_parallels (struct assign_parm_data_one *data) ++{ ++ rtx entry_parm = data->entry_parm; ++ ++ /* Convert the PARALLEL to a REG of the same mode as the parallel. ++ This can be done with register operations rather than on the ++ stack, even if we will store the reconstituted parameter on the ++ stack later. */ ++ if (GET_CODE (entry_parm) == PARALLEL ++ && data->passed_mode != BLKmode) ++ { ++ rtx parmreg = gen_reg_rtx (GET_MODE (entry_parm)); ++ emit_group_store (parmreg, entry_parm, NULL_TREE, ++ GET_MODE_SIZE (GET_MODE (entry_parm))); ++ entry_parm = parmreg; ++ } ++ ++ data->entry_parm = entry_parm; ++} ++ + /* A subroutine of assign_parms. Adjust DATA->STACK_RTL such that it's + always valid and properly aligned. */ + +@@ -2470,8 +2502,6 @@ assign_parm_setup_block_p (struct assign + { + if (data->nominal_mode == BLKmode) + return true; +- if (GET_CODE (data->entry_parm) == PARALLEL) +- return true; + + #ifdef BLOCK_REG_PADDING + /* Only assign_parm_setup_block knows how to deal with register arguments +@@ -2497,59 +2527,10 @@ assign_parm_setup_block (struct assign_p + rtx stack_parm = data->stack_parm; + HOST_WIDE_INT size; + HOST_WIDE_INT size_stored; +- rtx orig_entry_parm = entry_parm; + + if (GET_CODE (entry_parm) == PARALLEL) + entry_parm = emit_group_move_into_temps (entry_parm); + +- /* If we've a non-block object that's nevertheless passed in parts, +- reconstitute it in register operations rather than on the stack. */ +- if (GET_CODE (entry_parm) == PARALLEL +- && data->nominal_mode != BLKmode) +- { +- rtx elt0 = XEXP (XVECEXP (orig_entry_parm, 0, 0), 0); +- +- if ((XVECLEN (entry_parm, 0) > 1 +- || hard_regno_nregs[REGNO (elt0)][GET_MODE (elt0)] > 1) +- && use_register_for_decl (parm)) +- { +- rtx parmreg = gen_reg_rtx (data->nominal_mode); +- +- push_to_sequence2 (all->first_conversion_insn, +- all->last_conversion_insn); +- +- /* For values returned in multiple registers, handle possible +- incompatible calls to emit_group_store. +- +- For example, the following would be invalid, and would have to +- be fixed by the conditional below: +- +- emit_group_store ((reg:SF), (parallel:DF)) +- emit_group_store ((reg:SI), (parallel:DI)) +- +- An example of this are doubles in e500 v2: +- (parallel:DF (expr_list (reg:SI) (const_int 0)) +- (expr_list (reg:SI) (const_int 4))). */ +- if (data->nominal_mode != data->passed_mode) +- { +- rtx t = gen_reg_rtx (GET_MODE (entry_parm)); +- emit_group_store (t, entry_parm, NULL_TREE, +- GET_MODE_SIZE (GET_MODE (entry_parm))); +- convert_move (parmreg, t, 0); +- } +- else +- emit_group_store (parmreg, entry_parm, data->nominal_type, +- int_size_in_bytes (data->nominal_type)); +- +- all->first_conversion_insn = get_insns (); +- all->last_conversion_insn = get_last_insn (); +- end_sequence (); +- +- SET_DECL_RTL (parm, parmreg); +- return; +- } +- } +- + size = int_size_in_bytes (data->passed_type); + size_stored = CEIL_ROUND (size, UNITS_PER_WORD); + if (stack_parm == 0) +@@ -2714,6 +2695,8 @@ assign_parm_setup_reg (struct assign_par + else + SET_DECL_RTL (parm, parmreg); + ++ assign_parm_remove_parallels (data); ++ + /* Copy the value into the register. */ + if (data->nominal_mode != data->passed_mode + || promoted_nominal_mode != data->promoted_mode) +@@ -2876,6 +2859,8 @@ assign_parm_setup_stack (struct assign_p + execution. */ + bool to_conversion = false; + ++ assign_parm_remove_parallels (data); ++ + if (data->promoted_mode != data->nominal_mode) + { + /* Conversion is required. */ +@@ -5560,6 +5545,77 @@ current_function_assembler_name (void) + { + return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (cfun->decl)); + } ++ ++/* Helper function for below. This function adjusts alignments as ++ appropriate according to the setting of -falign-arrays. If that is ++ specified then the minimum alignment for array variables is set to be ++ the largest power of two less than or equal to their total storage size, ++ or the biggest alignment used on the machine, whichever is smaller. */ ++ ++static unsigned int ++alignment_for_aligned_arrays (tree ty, unsigned int existing_alignment) ++{ ++ unsigned int min_alignment; ++ tree size; ++ ++ /* Return the existing alignment if not using -falign-arrays or if ++ the type is not an array type. */ ++ if (!flag_align_arrays || TREE_CODE (ty) != ARRAY_TYPE) ++ return existing_alignment; ++ ++ /* Extract the total storage size of the array in bits. */ ++ size = TYPE_SIZE (ty); ++ gcc_assert (size); ++ ++ /* At least for variable-length arrays, TREE_CODE (size) might not be an ++ integer constant; check it now. If it is not, give the array at ++ least BIGGEST_ALIGNMENT just to be safe. Furthermore, we assume that ++ alignments always fit into a host integer. So if we can't fit the ++ size of the array in bits into a host integer, it must also be large ++ enough to deserve at least BIGGEST_ALIGNMENT (see below). */ ++ if (TREE_CODE (size) != INTEGER_CST || !host_integerp (size, 1)) ++ min_alignment = BIGGEST_ALIGNMENT; ++ else ++ { ++ unsigned HOST_WIDE_INT bits = TREE_INT_CST_LOW (size); ++ bits = (bits ? bits : 1); ++ ++ /* An array with size greater than BIGGEST_ALIGNMENT is assigned ++ at least that alignment. In all other cases the minimum ++ alignment of the array is set to be the largest power of two ++ less than or equal to the total storage size of the array. ++ We assume that BIGGEST_ALIGNMENT fits in "unsigned int"; thus, ++ the shift below will not overflow. */ ++ if (bits >= BIGGEST_ALIGNMENT) ++ min_alignment = BIGGEST_ALIGNMENT; ++ else ++ min_alignment = 1 << (floor_log2 (bits)); ++ } ++ ++ /* Having computed the minimum permissible alignment, enlarge it ++ if EXISTING_ALIGNMENT is greater. */ ++ return MAX (min_alignment, existing_alignment); ++} ++ ++/* Return the alignment in bits to be used for a local variable ++ of type TY whose usual alignment would be EXISTING_ALIGNMENT. */ ++ ++unsigned int ++calculate_local_alignment (tree ty, unsigned int existing_alignment) ++{ ++ return alignment_for_aligned_arrays (ty, ++ LOCAL_ALIGNMENT (ty, existing_alignment)); ++} ++ ++/* Return the alignment in bits to be used for a global variable ++ of type TY whose usual alignment would be EXISTING_ALIGNMENT. */ ++ ++unsigned int ++calculate_global_alignment (tree ty, unsigned int existing_alignment) ++{ ++ return alignment_for_aligned_arrays (ty, ++ DATA_ALIGNMENT (ty, existing_alignment)); ++} + + + static unsigned int +--- a/gcc/function.h ++++ b/gcc/function.h +@@ -594,4 +594,10 @@ extern bool reference_callee_copied (CUM + extern void used_types_insert (tree); + + extern int get_next_funcdef_no (void); ++ ++extern unsigned int calculate_local_alignment ( ++ tree ty, unsigned int existing_alignment); ++extern unsigned int calculate_global_alignment ( ++ tree ty, unsigned int existing_alignment); ++ + #endif /* GCC_FUNCTION_H */ +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -155,6 +155,8 @@ static const char *print_prog_name = NUL + + static int print_multi_directory; + ++static int print_sysroot; ++ + /* Flag saying to print the relative path we'd use to + find OS libraries given the current compiler flags. */ + +@@ -643,8 +645,32 @@ proper position among the other output f + + /* config.h can define SWITCHES_NEED_SPACES to control which options + require spaces between the option and the argument. */ ++/* GCC Bugzilla PR11810 indicates that GCC does not correctly handle ++ "-ofoo.o", in that it records "-ofoo.o" as a temporary file to ++ delete, rather than "foo.o". ++ ++ Unfortunately, Eclipse's makefile generators use the "-ofoo.o" ++ form. See also CS Issue #3433. So, although most users probably ++ use "-o foo.o", the "-ofoo.o" form is used in practice. ++ ++ See this email thread for additional information: ++ ++ http://gcc.gnu.org/ml/gcc/2008-07/msg00395.html ++ ++ Therefore, we define SWITCHES_NEED_SPACES to include "o" by ++ default. This causes "-ofoo.o" to be split into "-o foo.o" during ++ the initial processing of the command-line, before being seen by ++ the specs machinery. ++ ++ A risk of this change is that tools which *require* the "-ofoo.o" ++ form will no longer work. However, we know of no such tools, and ++ they would not have worked with the "-o foo.o" form anyhow. ++ ++ If this general strategy is acceptable upstream, the best approach ++ might be simply to eliminate this macro, since the only definitions ++ in target files are also to the value "o". */ + #ifndef SWITCHES_NEED_SPACES +-#define SWITCHES_NEED_SPACES "" ++#define SWITCHES_NEED_SPACES "o" + #endif + + /* config.h can define ENDFILE_SPEC to override the default crtn files. */ +@@ -720,6 +746,8 @@ proper position among the other output f + %{!fsyntax-only:%{!c:%{!M:%{!MM:%{!E:%{!S:\ + %(linker) %l " LINK_PIE_SPEC "%X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} %{r}\ + %{s} %{t} %{u*} %{x} %{z} %{Z} %{!A:%{!nostdlib:%{!nostartfiles:%S}}}\ ++ %{Wno-poison-system-directories:--no-poison-system-directories}\ ++ %{Werror=poison-system-directories:--error-poison-system-directories}\ + %{static:} %{L*} %(mfwrap) %(link_libgcc) %o\ + %{fopenmp|ftree-parallelize-loops=*:%:include(libgomp.spec)%(link_gomp)} %(mflib)\ + %{fprofile-arcs|fprofile-generate|coverage:-lgcov}\ +@@ -874,7 +902,7 @@ static const char *const multilib_defaul + #endif + + static const char *const driver_self_specs[] = { +- DRIVER_SELF_SPECS, GOMP_SELF_SPECS ++ DRIVER_SELF_SPECS, CONFIGURE_SPECS, GOMP_SELF_SPECS + }; + + #ifndef OPTION_DEFAULT_SPECS +@@ -1150,6 +1178,7 @@ static const struct option_map option_ma + {"--print-multi-directory", "-print-multi-directory", 0}, + {"--print-multi-os-directory", "-print-multi-os-directory", 0}, + {"--print-prog-name", "-print-prog-name=", "aj"}, ++ {"--print-sysroot", "-print-sysroot", 0}, + {"--print-sysroot-headers-suffix", "-print-sysroot-headers-suffix", 0}, + {"--profile", "-p", 0}, + {"--profile-blocks", "-a", 0}, +@@ -3224,6 +3253,7 @@ display_help (void) + -print-multi-lib Display the mapping between command line options and\n\ + multiple library search directories\n"), stdout); + fputs (_(" -print-multi-os-directory Display the relative path to OS libraries\n"), stdout); ++ fputs (_(" -print-sysroot Display the target libraries directory\n"), stdout); + fputs (_(" -print-sysroot-headers-suffix Display the sysroot suffix used to find headers\n"), stdout); + fputs (_(" -Wa, Pass comma-separated on to the assembler\n"), stdout); + fputs (_(" -Wp, Pass comma-separated on to the preprocessor\n"), stdout); +@@ -3668,6 +3698,8 @@ warranty; not even for MERCHANTABILITY o + print_multi_lib = 1; + else if (! strcmp (argv[i], "-print-multi-directory")) + print_multi_directory = 1; ++ else if (! strcmp (argv[i], "-print-sysroot")) ++ print_sysroot = 1; + else if (! strcmp (argv[i], "-print-multi-os-directory")) + print_multi_os_directory = 1; + else if (! strcmp (argv[i], "-print-sysroot-headers-suffix")) +@@ -4099,6 +4131,8 @@ warranty; not even for MERCHANTABILITY o + ; + else if (! strcmp (argv[i], "-print-multi-directory")) + ; ++ else if (! strcmp (argv[i], "-print-sysroot")) ++ ; + else if (! strcmp (argv[i], "-print-multi-os-directory")) + ; + else if (! strcmp (argv[i], "-print-sysroot-headers-suffix")) +@@ -4518,28 +4552,51 @@ do_self_spec (const char *spec) + + if (argbuf_index > 0) + { +- int i, first; ++ int i, first, n; + + first = n_switches; +- n_switches += argbuf_index; +- switches = xrealloc (switches, +- sizeof (struct switchstr) * (n_switches + 1)); +- +- switches[n_switches] = switches[first]; ++ n = n_switches + argbuf_index; ++ switches = xrealloc (switches, sizeof (struct switchstr) * (n + 1)); ++ switches[n] = switches[first]; + for (i = 0; i < argbuf_index; i++) + { + struct switchstr *sw; ++ const char *p = &argbuf[i][1]; ++ int c = *p; + + /* Each switch should start with '-'. */ + if (argbuf[i][0] != '-') + fatal ("switch '%s' does not start with '-'", argbuf[i]); + +- sw = &switches[i + first]; ++ sw = &switches[n_switches]; + sw->part1 = &argbuf[i][1]; + sw->args = 0; + sw->live_cond = 0; + sw->validated = 0; + sw->ordering = 0; ++ ++ /* Deal with option arguments in separate argv elements. */ ++ if ((SWITCH_TAKES_ARG (c) > (p[1] != 0)) ++ || WORD_SWITCH_TAKES_ARG (p)) ++ { ++ int j = 0; ++ int n_args = WORD_SWITCH_TAKES_ARG (p); ++ ++ if (n_args == 0) ++ { ++ /* Count only the option arguments in separate argv elements. */ ++ n_args = SWITCH_TAKES_ARG (c) - (p[1] != 0); ++ } ++ if (i + n_args >= argbuf_index) ++ fatal ("argument to '-%s' is missing", p); ++ switches[n_switches].args ++ = XNEWVEC (const char *, n_args + 1); ++ while (j < n_args) ++ switches[n_switches].args[j++] = argbuf[++i]; ++ /* Null-terminate the vector. */ ++ switches[n_switches].args[j] = 0; ++ } ++ n_switches++; + } + } + } +@@ -6455,6 +6512,18 @@ main (int argc, char **argv) + return (0); + } + ++ if (print_sysroot) ++ { ++ if (target_system_root) ++ { ++ if (target_sysroot_suffix) ++ printf ("%s%s\n", target_system_root, target_sysroot_suffix); ++ else ++ printf ("%s\n", target_system_root); ++ } ++ return (0); ++ } ++ + if (print_multi_os_directory) + { + if (multilib_os_dir == NULL) +@@ -7949,7 +8018,7 @@ include_spec_function (int argc, const c + if (argc != 1) + abort (); + +- file = find_a_file (&startfile_prefixes, argv[0], R_OK, 0); ++ file = find_a_file (&startfile_prefixes, argv[0], R_OK, true); + read_specs (file ? file : argv[0], FALSE); + + return NULL; +--- a/gcc/gengtype-lex.c ++++ /dev/null +@@ -1,2636 +0,0 @@ +-#line 2 "gengtype-lex.c" +- +-#line 4 "gengtype-lex.c" +- +-#define YY_INT_ALIGNED short int +- +-/* A lexical scanner generated by flex */ +- +-#define FLEX_SCANNER +-#define YY_FLEX_MAJOR_VERSION 2 +-#define YY_FLEX_MINOR_VERSION 5 +-#define YY_FLEX_SUBMINOR_VERSION 35 +-#if YY_FLEX_SUBMINOR_VERSION > 0 +-#define FLEX_BETA +-#endif +- +-/* First, we deal with platform-specific or compiler-specific issues. */ +- +-/* begin standard C headers. */ +-#include +-#include +-#include +-#include +- +-/* end standard C headers. */ +- +-/* flex integer type definitions */ +- +-#ifndef FLEXINT_H +-#define FLEXINT_H +- +-/* C99 systems have . Non-C99 systems may or may not. */ +- +-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +- +-/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, +- * if you want the limit (max/min) macros for int types. +- */ +-#ifndef __STDC_LIMIT_MACROS +-#define __STDC_LIMIT_MACROS 1 +-#endif +- +-#include +-typedef int8_t flex_int8_t; +-typedef uint8_t flex_uint8_t; +-typedef int16_t flex_int16_t; +-typedef uint16_t flex_uint16_t; +-typedef int32_t flex_int32_t; +-typedef uint32_t flex_uint32_t; +-#else +-typedef signed char flex_int8_t; +-typedef short int flex_int16_t; +-typedef int flex_int32_t; +-typedef unsigned char flex_uint8_t; +-typedef unsigned short int flex_uint16_t; +-typedef unsigned int flex_uint32_t; +-#endif /* ! C99 */ +- +-/* Limits of integral types. */ +-#ifndef INT8_MIN +-#define INT8_MIN (-128) +-#endif +-#ifndef INT16_MIN +-#define INT16_MIN (-32767-1) +-#endif +-#ifndef INT32_MIN +-#define INT32_MIN (-2147483647-1) +-#endif +-#ifndef INT8_MAX +-#define INT8_MAX (127) +-#endif +-#ifndef INT16_MAX +-#define INT16_MAX (32767) +-#endif +-#ifndef INT32_MAX +-#define INT32_MAX (2147483647) +-#endif +-#ifndef UINT8_MAX +-#define UINT8_MAX (255U) +-#endif +-#ifndef UINT16_MAX +-#define UINT16_MAX (65535U) +-#endif +-#ifndef UINT32_MAX +-#define UINT32_MAX (4294967295U) +-#endif +- +-#endif /* ! FLEXINT_H */ +- +-#ifdef __cplusplus +- +-/* The "const" storage-class-modifier is valid. */ +-#define YY_USE_CONST +- +-#else /* ! __cplusplus */ +- +-/* C99 requires __STDC__ to be defined as 1. */ +-#if defined (__STDC__) +- +-#define YY_USE_CONST +- +-#endif /* defined (__STDC__) */ +-#endif /* ! __cplusplus */ +- +-#ifdef YY_USE_CONST +-#define yyconst const +-#else +-#define yyconst +-#endif +- +-/* Returned upon end-of-file. */ +-#define YY_NULL 0 +- +-/* Promotes a possibly negative, possibly signed char to an unsigned +- * integer for use as an array index. If the signed char is negative, +- * we want to instead treat it as an 8-bit unsigned char, hence the +- * double cast. +- */ +-#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) +- +-/* Enter a start condition. This macro really ought to take a parameter, +- * but we do it the disgusting crufty way forced on us by the ()-less +- * definition of BEGIN. +- */ +-#define BEGIN (yy_start) = 1 + 2 * +- +-/* Translate the current start state into a value that can be later handed +- * to BEGIN to return to the state. The YYSTATE alias is for lex +- * compatibility. +- */ +-#define YY_START (((yy_start) - 1) / 2) +-#define YYSTATE YY_START +- +-/* Action number for EOF rule of a given start state. */ +-#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) +- +-/* Special action meaning "start processing a new file". */ +-#define YY_NEW_FILE yyrestart(yyin ) +- +-#define YY_END_OF_BUFFER_CHAR 0 +- +-/* Size of default input buffer. */ +-#ifndef YY_BUF_SIZE +-#define YY_BUF_SIZE 16384 +-#endif +- +-/* The state buf must be large enough to hold one state per character in the main buffer. +- */ +-#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) +- +-#ifndef YY_TYPEDEF_YY_BUFFER_STATE +-#define YY_TYPEDEF_YY_BUFFER_STATE +-typedef struct yy_buffer_state *YY_BUFFER_STATE; +-#endif +- +-extern int yyleng; +- +-extern FILE *yyin, *yyout; +- +-#define EOB_ACT_CONTINUE_SCAN 0 +-#define EOB_ACT_END_OF_FILE 1 +-#define EOB_ACT_LAST_MATCH 2 +- +- #define YY_LESS_LINENO(n) +- +-/* Return all but the first "n" matched characters back to the input stream. */ +-#define yyless(n) \ +- do \ +- { \ +- /* Undo effects of setting up yytext. */ \ +- int yyless_macro_arg = (n); \ +- YY_LESS_LINENO(yyless_macro_arg);\ +- *yy_cp = (yy_hold_char); \ +- YY_RESTORE_YY_MORE_OFFSET \ +- (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ +- YY_DO_BEFORE_ACTION; /* set up yytext again */ \ +- } \ +- while ( 0 ) +- +-#define unput(c) yyunput( c, (yytext_ptr) ) +- +-#ifndef YY_TYPEDEF_YY_SIZE_T +-#define YY_TYPEDEF_YY_SIZE_T +-typedef size_t yy_size_t; +-#endif +- +-#ifndef YY_STRUCT_YY_BUFFER_STATE +-#define YY_STRUCT_YY_BUFFER_STATE +-struct yy_buffer_state +- { +- FILE *yy_input_file; +- +- char *yy_ch_buf; /* input buffer */ +- char *yy_buf_pos; /* current position in input buffer */ +- +- /* Size of input buffer in bytes, not including room for EOB +- * characters. +- */ +- yy_size_t yy_buf_size; +- +- /* Number of characters read into yy_ch_buf, not including EOB +- * characters. +- */ +- int yy_n_chars; +- +- /* Whether we "own" the buffer - i.e., we know we created it, +- * and can realloc() it to grow it, and should free() it to +- * delete it. +- */ +- int yy_is_our_buffer; +- +- /* Whether this is an "interactive" input source; if so, and +- * if we're using stdio for input, then we want to use getc() +- * instead of fread(), to make sure we stop fetching input after +- * each newline. +- */ +- int yy_is_interactive; +- +- /* Whether we're considered to be at the beginning of a line. +- * If so, '^' rules will be active on the next match, otherwise +- * not. +- */ +- int yy_at_bol; +- +- int yy_bs_lineno; /**< The line count. */ +- int yy_bs_column; /**< The column count. */ +- +- /* Whether to try to fill the input buffer when we reach the +- * end of it. +- */ +- int yy_fill_buffer; +- +- int yy_buffer_status; +- +-#define YY_BUFFER_NEW 0 +-#define YY_BUFFER_NORMAL 1 +- /* When an EOF's been seen but there's still some text to process +- * then we mark the buffer as YY_EOF_PENDING, to indicate that we +- * shouldn't try reading from the input source any more. We might +- * still have a bunch of tokens to match, though, because of +- * possible backing-up. +- * +- * When we actually see the EOF, we change the status to "new" +- * (via yyrestart()), so that the user can continue scanning by +- * just pointing yyin at a new input file. +- */ +-#define YY_BUFFER_EOF_PENDING 2 +- +- }; +-#endif /* !YY_STRUCT_YY_BUFFER_STATE */ +- +-/* Stack of input buffers. */ +-static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */ +-static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */ +-static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */ +- +-/* We provide macros for accessing buffer states in case in the +- * future we want to put the buffer states in a more general +- * "scanner state". +- * +- * Returns the top of the stack, or NULL. +- */ +-#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \ +- ? (yy_buffer_stack)[(yy_buffer_stack_top)] \ +- : NULL) +- +-/* Same as previous macro, but useful when we know that the buffer stack is not +- * NULL or when we need an lvalue. For internal use only. +- */ +-#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)] +- +-/* yy_hold_char holds the character lost when yytext is formed. */ +-static char yy_hold_char; +-static int yy_n_chars; /* number of characters read into yy_ch_buf */ +-int yyleng; +- +-/* Points to current character in buffer. */ +-static char *yy_c_buf_p = (char *) 0; +-static int yy_init = 0; /* whether we need to initialize */ +-static int yy_start = 0; /* start state number */ +- +-/* Flag which is used to allow yywrap()'s to do buffer switches +- * instead of setting up a fresh yyin. A bit of a hack ... +- */ +-static int yy_did_buffer_switch_on_eof; +- +-void yyrestart (FILE *input_file ); +-void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); +-YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ); +-void yy_delete_buffer (YY_BUFFER_STATE b ); +-void yy_flush_buffer (YY_BUFFER_STATE b ); +-void yypush_buffer_state (YY_BUFFER_STATE new_buffer ); +-void yypop_buffer_state (void ); +- +-static void yyensure_buffer_stack (void ); +-static void yy_load_buffer_state (void ); +-static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file ); +- +-#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER ) +- +-YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ); +-YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ); +-YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len ); +- +-void *yyalloc (yy_size_t ); +-void *yyrealloc (void *,yy_size_t ); +-void yyfree (void * ); +- +-#define yy_new_buffer yy_create_buffer +- +-#define yy_set_interactive(is_interactive) \ +- { \ +- if ( ! YY_CURRENT_BUFFER ){ \ +- yyensure_buffer_stack (); \ +- YY_CURRENT_BUFFER_LVALUE = \ +- yy_create_buffer(yyin,YY_BUF_SIZE ); \ +- } \ +- YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ +- } +- +-#define yy_set_bol(at_bol) \ +- { \ +- if ( ! YY_CURRENT_BUFFER ){\ +- yyensure_buffer_stack (); \ +- YY_CURRENT_BUFFER_LVALUE = \ +- yy_create_buffer(yyin,YY_BUF_SIZE ); \ +- } \ +- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ +- } +- +-#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) +- +-/* Begin user sect3 */ +- +-#define yywrap(n) 1 +-#define YY_SKIP_YYWRAP +- +-typedef unsigned char YY_CHAR; +- +-FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0; +- +-typedef int yy_state_type; +- +-extern int yylineno; +- +-int yylineno = 1; +- +-extern char *yytext; +-#define yytext_ptr yytext +- +-static yy_state_type yy_get_previous_state (void ); +-static yy_state_type yy_try_NUL_trans (yy_state_type current_state ); +-static int yy_get_next_buffer (void ); +-static void yy_fatal_error (yyconst char msg[] ); +- +-/* Done after the current pattern has been matched and before the +- * corresponding action - sets up yytext. +- */ +-#define YY_DO_BEFORE_ACTION \ +- (yytext_ptr) = yy_bp; \ +- yyleng = (size_t) (yy_cp - yy_bp); \ +- (yy_hold_char) = *yy_cp; \ +- *yy_cp = '\0'; \ +- (yy_c_buf_p) = yy_cp; +- +-#define YY_NUM_RULES 49 +-#define YY_END_OF_BUFFER 50 +-/* This struct is not used in this scanner, +- but its presence is necessary. */ +-struct yy_trans_info +- { +- flex_int32_t yy_verify; +- flex_int32_t yy_nxt; +- }; +-static yyconst flex_int16_t yy_accept[445] = +- { 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 50, 36, +- 36, 33, 45, 36, 45, 34, 36, 36, 34, 34, +- 34, 34, 34, 31, 10, 10, 31, 29, 31, 31, +- 31, 20, 31, 31, 31, 31, 31, 31, 31, 31, +- 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, +- 31, 10, 31, 41, 39, 46, 46, 0, 0, 0, +- 37, 0, 0, 0, 38, 32, 34, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 34, 34, 34, 34, +- 34, 10, 0, 25, 0, 0, 0, 0, 9, 20, +- 24, 0, 0, 0, 0, 0, 0, 0, 0, 26, +- +- 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, +- 42, 44, 43, 0, 35, 0, 0, 0, 0, 0, +- 0, 34, 34, 34, 34, 34, 34, 27, 28, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 34, 34, 34, 34, 34, 34, +- 0, 0, 0, 13, 0, 14, 0, 0, 0, 0, +- 22, 22, 0, 0, 0, 0, 0, 0, 0, 0, +- +- 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, +- 0, 34, 34, 34, 34, 34, 34, 0, 0, 0, +- 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, +- 34, 34, 34, 34, 3, 0, 0, 0, 0, 12, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, +- 0, 0, 0, 0, 34, 4, 5, 2, 34, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, +- 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 22, 22, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 34, 34, 34, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 21, 0, 0, 0, 0, 0, 0, 34, 7, 6, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, +- 0, 0, 0, 34, 0, 0, 0, 0, 0, 0, +- 0, 0, 19, 0, 0, 47, 34, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 34, 0, 0, +- +- 0, 0, 0, 0, 0, 0, 34, 0, 24, 24, +- 0, 0, 0, 0, 0, 0, 0, 34, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, +- 0, 23, 0, 0, 0, 0, 0, 40, 0, 0, +- 0, 0, 0, 0 +- } ; +- +-static yyconst flex_int32_t yy_ec[256] = +- { 0, +- 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, +- 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 2, 1, 4, 5, 1, 6, 1, 7, 8, +- 9, 10, 1, 6, 6, 11, 12, 13, 13, 13, +- 13, 13, 13, 13, 13, 13, 13, 6, 6, 6, +- 6, 6, 1, 1, 14, 15, 16, 17, 18, 19, +- 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, +- 23, 29, 30, 31, 32, 33, 34, 23, 35, 23, +- 36, 37, 38, 1, 39, 1, 40, 41, 42, 43, +- +- 44, 45, 46, 47, 48, 49, 49, 50, 51, 52, +- 53, 54, 49, 55, 56, 57, 58, 59, 49, 60, +- 61, 62, 6, 6, 6, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 1, 1 +- } ; +- +-static yyconst flex_int32_t yy_meta[63] = +- { 0, +- 1, 2, 3, 1, 1, 1, 1, 1, 4, 5, +- 1, 1, 6, 7, 7, 7, 7, 7, 7, 7, +- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +- 7, 7, 7, 7, 7, 8, 1, 1, 9, 9, +- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, +- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, +- 9, 9 +- } ; +- +-static yyconst flex_int16_t yy_base[483] = +- { 0, +- 0, 38, 96, 12, 12, 13, 15, 16, 1028, 1444, +- 32, 51, 20, 990, 1016, 0, 157, 18, 1007, 964, +- 966, 961, 969, 1444, 25, 27, 27, 1444, 983, 1008, +- 1008, 1004, 215, 253, 5, 32, 29, 974, 45, 962, +- 996, 35, 38, 39, 40, 41, 134, 42, 136, 137, +- 138, 75, 996, 0, 1444, 985, 984, 166, 964, 162, +- 1444, 0, 987, 990, 1444, 1444, 0, 186, 165, 974, +- 931, 933, 928, 936, 168, 943, 967, 928, 140, 930, +- 935, 87, 167, 1444, 979, 974, 977, 968, 1444, 950, +- 1444, 935, 934, 145, 52, 46, 148, 165, 922, 1444, +- +- 1444, 152, 156, 155, 170, 173, 175, 182, 183, 185, +- 211, 214, 222, 218, 221, 269, 957, 956, 291, 0, +- 1444, 1444, 1444, 922, 1444, 937, 898, 195, 900, 905, +- 907, 912, 906, 892, 890, 903, 893, 1444, 1444, 209, +- 254, 251, 353, 248, 391, 354, 350, 351, 340, 355, +- 341, 429, 339, 356, 344, 347, 360, 390, 43, 361, +- 391, 395, 429, 1444, 0, 0, 280, 906, 900, 886, +- 884, 897, 872, 876, 890, 867, 873, 878, 876, 866, +- 381, 348, 382, 1444, 384, 1444, 389, 397, 491, 398, +- 1444, 528, 418, 399, 420, 477, 478, 422, 421, 480, +- +- 479, 0, 449, 1444, 884, 861, 867, 872, 870, 860, +- 859, 892, 857, 866, 850, 862, 586, 493, 496, 494, +- 484, 624, 1444, 0, 878, 876, 876, 834, 839, 841, +- 832, 830, 199, 830, 490, 499, 486, 492, 488, 489, +- 662, 0, 863, 828, 837, 821, 833, 0, 832, 859, +- 700, 738, 776, 829, 1444, 431, 258, 437, 515, 1444, +- 846, 844, 841, 817, 829, 809, 319, 815, 813, 478, +- 809, 512, 528, 520, 525, 814, 1444, 0, 833, 0, +- 0, 0, 803, 551, 808, 1444, 1444, 1444, 852, 383, +- 521, 530, 539, 822, 829, 813, 793, 787, 802, 801, +- +- 556, 793, 783, 785, 792, 787, 523, 545, 535, 1444, +- 0, 795, 0, 561, 585, 1444, 555, 343, 581, 584, +- 794, 811, 792, 773, 772, 1444, 0, 771, 783, 772, +- 764, 552, 890, 558, 0, 623, 778, 784, 928, 966, +- 583, 593, 594, 613, 792, 792, 771, 761, 746, 591, +- 1444, 1004, 0, 778, 0, 0, 766, 776, 1444, 1444, +- 620, 621, 626, 627, 653, 777, 769, 775, 1042, 1444, +- 0, 772, 787, 767, 556, 577, 615, 649, 629, 762, +- 753, 774, 1444, 0, 763, 1444, 773, 632, 659, 662, +- 656, 654, 754, 742, 753, 0, 754, 729, 665, 688, +- +- 667, 744, 742, 683, 0, 695, 692, 689, 715, 722, +- 699, 711, 701, 666, 673, 0, 705, 1080, 704, 749, +- 751, 753, 756, 663, 658, 618, 593, 0, 0, 1444, +- 758, 1444, 760, 600, 588, 543, 483, 1444, 439, 386, +- 247, 206, 167, 1444, 1118, 1127, 1136, 1145, 1154, 1158, +- 1167, 1176, 1185, 1194, 1202, 1211, 1220, 1229, 1238, 1247, +- 1256, 1265, 1273, 1282, 1290, 1298, 1306, 1314, 1323, 1331, +- 1340, 1349, 1357, 1365, 1374, 1383, 1392, 1400, 1409, 1417, +- 1426, 1435 +- } ; +- +-static yyconst flex_int16_t yy_def[483] = +- { 0, +- 445, 445, 444, 3, 446, 446, 446, 446, 444, 444, +- 444, 444, 447, 448, 449, 450, 444, 444, 450, 450, +- 450, 450, 450, 444, 444, 444, 451, 444, 452, 444, +- 444, 444, 453, 453, 34, 34, 34, 34, 34, 454, +- 444, 34, 34, 34, 34, 34, 34, 34, 34, 34, +- 34, 444, 455, 456, 444, 457, 457, 444, 444, 447, +- 444, 447, 444, 448, 444, 444, 450, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 450, 450, 450, 450, +- 450, 444, 451, 444, 451, 444, 452, 444, 444, 444, +- 444, 34, 34, 34, 34, 34, 34, 34, 454, 444, +- +- 444, 34, 34, 34, 34, 34, 34, 34, 34, 34, +- 34, 34, 34, 34, 34, 444, 455, 455, 444, 458, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 450, 450, 450, 450, 450, 450, 444, 444, 34, +- 34, 34, 453, 34, 453, 34, 34, 34, 34, 34, +- 34, 453, 34, 34, 34, 34, 34, 34, 34, 34, +- 34, 34, 119, 444, 119, 459, 444, 444, 444, 444, +- 444, 444, 444, 444, 450, 450, 450, 450, 450, 450, +- 34, 34, 34, 444, 34, 444, 34, 34, 453, 34, +- 444, 444, 34, 34, 34, 34, 34, 34, 34, 34, +- +- 34, 460, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 450, 450, 450, 450, 450, 450, 34, 34, 34, +- 34, 453, 444, 192, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 34, 34, 34, 34, 34, 34, +- 453, 461, 444, 444, 444, 444, 444, 462, 444, 450, +- 450, 450, 450, 450, 444, 34, 34, 34, 34, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 34, 34, 34, 34, 453, 444, 463, 444, 464, +- 465, 466, 444, 444, 450, 444, 444, 444, 450, 34, +- 34, 34, 34, 444, 444, 444, 444, 444, 444, 444, +- +- 467, 444, 444, 444, 444, 444, 34, 34, 34, 444, +- 468, 444, 469, 444, 450, 444, 34, 34, 34, 34, +- 444, 444, 444, 444, 444, 444, 192, 444, 444, 444, +- 444, 34, 453, 34, 470, 444, 444, 450, 450, 450, +- 34, 34, 34, 34, 444, 444, 444, 444, 444, 34, +- 444, 453, 471, 444, 472, 473, 444, 450, 444, 444, +- 34, 34, 34, 34, 34, 444, 444, 444, 453, 444, +- 474, 444, 444, 450, 34, 34, 34, 34, 34, 444, +- 444, 444, 444, 475, 444, 444, 450, 34, 34, 34, +- 34, 34, 444, 444, 444, 476, 444, 450, 34, 34, +- +- 34, 444, 444, 444, 477, 444, 450, 34, 444, 478, +- 34, 444, 444, 444, 444, 479, 444, 450, 34, 444, +- 478, 478, 480, 444, 444, 444, 444, 481, 482, 444, +- 444, 444, 480, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 0, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444 +- } ; +- +-static yyconst flex_int16_t yy_nxt[1507] = +- { 0, +- 10, 11, 12, 13, 10, 10, 14, 10, 10, 10, +- 10, 15, 10, 52, 55, 55, 53, 55, 55, 75, +- 444, 56, 56, 61, 57, 57, 82, 82, 82, 82, +- 84, 92, 94, 58, 58, 10, 10, 10, 10, 17, +- 12, 13, 18, 10, 14, 10, 10, 10, 10, 15, +- 10, 59, 58, 58, 19, 92, 62, 95, 92, 96, +- 76, 92, 98, 85, 92, 92, 92, 92, 92, 92, +- 59, 92, 92, 10, 10, 10, 116, 82, 92, 117, +- 143, 20, 105, 142, 103, 109, 198, 102, 82, 82, +- 104, 106, 107, 21, 22, 23, 24, 25, 26, 27, +- +- 24, 28, 29, 28, 28, 28, 30, 31, 32, 33, +- 34, 35, 33, 36, 33, 37, 38, 33, 33, 33, +- 33, 33, 33, 33, 33, 33, 33, 33, 39, 33, +- 33, 40, 41, 24, 33, 33, 42, 43, 44, 45, +- 33, 33, 33, 46, 33, 47, 33, 48, 33, 49, +- 33, 50, 33, 51, 33, 33, 33, 33, 68, 58, +- 92, 69, 92, 92, 92, 61, 75, 58, 58, 75, +- 84, 92, 141, 70, 92, 110, 59, 144, 92, 134, +- 145, 92, 92, 112, 113, 59, 108, 68, 58, 115, +- 69, 92, 111, 114, 135, 147, 92, 301, 62, 92, +- +- 71, 92, 70, 85, 146, 59, 148, 76, 92, 92, +- 76, 92, 72, 73, 74, 91, 91, 91, 91, 91, +- 91, 91, 91, 91, 91, 91, 91, 151, 149, 71, +- 150, 152, 181, 153, 170, 92, 301, 92, 154, 155, +- 92, 72, 73, 74, 92, 269, 270, 92, 92, 171, +- 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, +- 91, 91, 91, 91, 91, 156, 157, 158, 161, 182, +- 116, 82, 160, 117, 92, 183, 162, 92, 185, 93, +- 92, 203, 203, 159, 92, 443, 291, 204, 91, 91, +- 91, 163, 163, 164, 163, 163, 163, 163, 163, 163, +- +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 165, +- 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, +- 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, +- 165, 165, 165, 184, 184, 184, 184, 184, 184, 184, +- 184, 184, 184, 184, 184, 92, 92, 92, 219, 92, +- 92, 300, 342, 92, 92, 301, 92, 92, 188, 190, +- 92, 92, 92, 194, 152, 195, 92, 92, 184, 184, +- 184, 186, 186, 186, 186, 186, 186, 186, 186, 186, +- +- 186, 186, 186, 152, 152, 189, 187, 92, 92, 92, +- 92, 442, 193, 317, 196, 92, 92, 92, 199, 218, +- 220, 92, 221, 92, 92, 92, 186, 186, 186, 191, +- 192, 192, 191, 191, 191, 191, 191, 191, 191, 191, +- 191, 197, 201, 200, 92, 222, 92, 92, 92, 236, +- 203, 203, 290, 152, 152, 441, 204, 92, 292, 237, +- 239, 235, 240, 92, 191, 191, 191, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 223, 223, 223, 223, 223, 223, 223, 223, 223, +- +- 223, 223, 223, 92, 92, 92, 92, 256, 258, 257, +- 92, 273, 92, 301, 92, 92, 92, 259, 92, 92, +- 92, 238, 92, 304, 158, 92, 223, 223, 223, 224, +- 224, 241, 272, 152, 152, 275, 293, 274, 92, 305, +- 273, 92, 225, 226, 152, 276, 92, 92, 227, 92, +- 307, 92, 314, 314, 92, 320, 92, 327, 327, 318, +- 319, 92, 314, 314, 440, 92, 274, 308, 228, 229, +- 230, 92, 309, 341, 334, 231, 332, 232, 92, 388, +- 337, 92, 92, 233, 92, 234, 255, 255, 255, 255, +- 255, 255, 255, 255, 255, 255, 255, 255, 338, 343, +- +- 333, 344, 389, 92, 361, 439, 339, 92, 350, 92, +- 92, 340, 340, 352, 362, 363, 301, 92, 437, 92, +- 92, 255, 255, 255, 260, 260, 260, 260, 260, 260, +- 260, 260, 260, 260, 260, 260, 354, 375, 390, 92, +- 376, 92, 364, 377, 355, 369, 92, 92, 152, 356, +- 356, 365, 92, 92, 392, 92, 436, 378, 92, 260, +- 260, 260, 277, 277, 277, 277, 277, 277, 277, 277, +- 277, 277, 277, 277, 379, 92, 399, 401, 400, 92, +- 92, 408, 92, 435, 152, 92, 434, 391, 92, 409, +- 409, 92, 411, 92, 427, 410, 426, 277, 277, 277, +- +- 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, +- 286, 286, 414, 418, 92, 92, 420, 420, 418, 418, +- 425, 415, 421, 422, 422, 92, 429, 419, 424, 152, +- 92, 429, 429, 417, 152, 286, 286, 286, 287, 287, +- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, +- 420, 420, 422, 422, 422, 422, 421, 431, 431, 431, +- 431, 431, 431, 413, 432, 412, 432, 407, 432, 406, +- 404, 403, 402, 287, 287, 287, 288, 288, 288, 288, +- 288, 288, 288, 288, 288, 288, 288, 288, 398, 397, +- 395, 394, 393, 387, 386, 385, 382, 381, 380, 374, +- +- 373, 372, 301, 301, 368, 367, 366, 358, 357, 304, +- 349, 288, 288, 288, 310, 310, 310, 310, 310, 310, +- 310, 310, 310, 310, 310, 310, 348, 301, 301, 301, +- 347, 346, 345, 336, 331, 330, 329, 328, 301, 325, +- 324, 301, 301, 323, 322, 321, 315, 313, 312, 310, +- 310, 310, 316, 316, 316, 316, 316, 316, 316, 316, +- 316, 316, 316, 316, 306, 303, 302, 299, 298, 297, +- 296, 295, 294, 289, 285, 284, 283, 282, 281, 280, +- 279, 271, 268, 267, 266, 265, 264, 316, 316, 316, +- 351, 351, 351, 351, 351, 351, 351, 351, 351, 351, +- +- 351, 351, 263, 262, 261, 254, 253, 252, 251, 250, +- 249, 248, 247, 246, 245, 244, 243, 217, 216, 215, +- 214, 213, 212, 211, 210, 351, 351, 351, 359, 359, +- 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, +- 209, 208, 207, 206, 205, 180, 179, 178, 177, 176, +- 175, 174, 173, 172, 169, 168, 167, 118, 118, 100, +- 140, 92, 90, 359, 359, 359, 360, 360, 360, 360, +- 360, 360, 360, 360, 360, 360, 360, 360, 139, 444, +- 138, 444, 137, 136, 133, 132, 131, 130, 129, 128, +- 127, 126, 444, 125, 124, 123, 122, 118, 101, 100, +- +- 97, 360, 360, 360, 370, 370, 370, 370, 370, 370, +- 370, 370, 370, 370, 370, 370, 90, 89, 88, 87, +- 81, 80, 79, 78, 77, 66, 64, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 370, +- 370, 370, 383, 383, 383, 383, 383, 383, 383, 383, +- 383, 383, 383, 383, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 383, 383, 383, +- 430, 430, 430, 430, 430, 430, 430, 430, 430, 430, +- 430, 430, 444, 444, 444, 444, 444, 444, 444, 444, +- +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 430, 430, 430, 16, 16, +- 16, 16, 16, 16, 16, 16, 16, 54, 54, 54, +- 54, 54, 54, 54, 54, 54, 60, 60, 60, 60, +- 60, 60, 60, 60, 60, 63, 63, 63, 63, 63, +- 63, 63, 63, 63, 65, 65, 65, 65, 65, 65, +- 65, 65, 65, 67, 67, 444, 67, 83, 83, 83, +- 83, 83, 83, 83, 83, 83, 86, 86, 86, 86, +- 86, 86, 86, 86, 86, 92, 92, 92, 92, 92, +- 92, 92, 92, 92, 99, 99, 99, 99, 99, 99, +- +- 99, 444, 99, 119, 444, 444, 444, 444, 444, 444, +- 119, 120, 120, 444, 120, 444, 120, 120, 120, 120, +- 121, 121, 121, 121, 121, 121, 121, 121, 121, 166, +- 166, 444, 166, 444, 166, 166, 166, 166, 202, 202, +- 444, 202, 444, 202, 202, 202, 202, 242, 242, 444, +- 242, 444, 242, 242, 242, 242, 278, 278, 444, 278, +- 444, 278, 278, 278, 278, 255, 255, 255, 255, 255, +- 444, 444, 255, 311, 311, 444, 311, 444, 311, 311, +- 311, 311, 286, 286, 286, 286, 286, 444, 444, 286, +- 287, 287, 287, 287, 287, 444, 444, 287, 288, 288, +- +- 288, 288, 288, 444, 444, 288, 326, 326, 326, 326, +- 326, 444, 444, 326, 335, 335, 444, 335, 444, 335, +- 335, 335, 335, 316, 316, 316, 316, 316, 444, 444, +- 316, 353, 353, 444, 353, 444, 353, 353, 353, 353, +- 371, 371, 444, 371, 444, 371, 371, 371, 371, 359, +- 359, 359, 359, 359, 444, 444, 359, 360, 360, 360, +- 360, 360, 444, 444, 360, 384, 384, 444, 384, 444, +- 384, 384, 384, 384, 396, 396, 444, 396, 444, 396, +- 396, 396, 396, 405, 405, 444, 405, 444, 405, 405, +- 405, 405, 416, 416, 444, 416, 444, 416, 416, 416, +- +- 416, 423, 423, 444, 444, 444, 423, 444, 423, 428, +- 428, 444, 428, 444, 428, 428, 428, 428, 433, 433, +- 433, 444, 433, 433, 444, 433, 438, 438, 444, 438, +- 444, 438, 438, 438, 438, 430, 430, 430, 430, 430, +- 444, 444, 430, 9, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- +- 444, 444, 444, 444, 444, 444 +- } ; +- +-static yyconst flex_int16_t yy_chk[1507] = +- { 0, +- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +- 1, 1, 1, 4, 5, 6, 4, 7, 8, 18, +- 0, 5, 6, 13, 7, 8, 25, 25, 26, 26, +- 27, 35, 35, 11, 11, 1, 1, 1, 2, 2, +- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +- 2, 11, 12, 12, 2, 37, 13, 36, 36, 37, +- 18, 42, 39, 27, 43, 44, 45, 46, 48, 159, +- 12, 39, 96, 2, 2, 2, 52, 52, 95, 52, +- 96, 2, 44, 95, 43, 48, 159, 42, 82, 82, +- 43, 45, 46, 2, 2, 2, 3, 3, 3, 3, +- +- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +- 3, 3, 3, 3, 3, 3, 3, 3, 17, 17, +- 47, 17, 49, 50, 51, 60, 69, 58, 58, 75, +- 83, 94, 94, 17, 97, 49, 17, 97, 102, 79, +- 98, 104, 103, 50, 50, 58, 47, 68, 68, 51, +- 68, 98, 49, 50, 79, 103, 105, 443, 60, 106, +- +- 17, 107, 68, 83, 102, 68, 104, 69, 108, 109, +- 75, 110, 17, 17, 17, 33, 33, 33, 33, 33, +- 33, 33, 33, 33, 33, 33, 33, 107, 105, 68, +- 106, 107, 140, 108, 128, 140, 442, 111, 109, 110, +- 112, 68, 68, 68, 114, 233, 233, 115, 113, 128, +- 33, 33, 33, 34, 34, 34, 34, 34, 34, 34, +- 34, 34, 34, 34, 34, 111, 112, 113, 115, 141, +- 116, 116, 114, 116, 144, 142, 115, 142, 144, 34, +- 141, 167, 167, 113, 257, 441, 257, 167, 34, 34, +- 34, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- +- 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, +- 119, 119, 119, 143, 143, 143, 143, 143, 143, 143, +- 143, 143, 143, 143, 143, 153, 149, 151, 182, 318, +- 155, 267, 318, 156, 182, 267, 147, 148, 149, 151, +- 146, 150, 154, 155, 153, 156, 157, 160, 143, 143, +- 143, 145, 145, 145, 145, 145, 145, 145, 145, 145, +- +- 145, 145, 145, 146, 147, 150, 148, 181, 183, 290, +- 185, 440, 154, 290, 157, 187, 158, 161, 160, 181, +- 183, 162, 185, 188, 190, 194, 145, 145, 145, 152, +- 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, +- 152, 158, 162, 161, 193, 187, 195, 199, 198, 194, +- 203, 203, 256, 188, 190, 439, 203, 256, 258, 195, +- 198, 193, 199, 258, 152, 152, 152, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, +- 163, 189, 189, 189, 189, 189, 189, 189, 189, 189, +- +- 189, 189, 189, 196, 197, 201, 200, 218, 220, 219, +- 221, 236, 237, 437, 239, 240, 235, 221, 238, 218, +- 220, 197, 219, 270, 201, 236, 189, 189, 189, 192, +- 192, 200, 235, 196, 238, 237, 259, 236, 272, 270, +- 273, 259, 192, 192, 239, 240, 274, 291, 192, 307, +- 272, 275, 284, 284, 273, 293, 292, 301, 301, 291, +- 292, 309, 314, 314, 436, 293, 273, 274, 192, 192, +- 192, 308, 275, 317, 309, 192, 307, 192, 332, 375, +- 314, 317, 375, 192, 334, 192, 217, 217, 217, 217, +- 217, 217, 217, 217, 217, 217, 217, 217, 315, 319, +- +- 308, 320, 376, 376, 341, 435, 315, 319, 332, 341, +- 320, 315, 315, 334, 342, 343, 434, 350, 427, 342, +- 343, 217, 217, 217, 222, 222, 222, 222, 222, 222, +- 222, 222, 222, 222, 222, 222, 336, 361, 377, 344, +- 362, 377, 344, 363, 336, 350, 361, 362, 388, 336, +- 336, 344, 363, 364, 379, 379, 426, 364, 388, 222, +- 222, 222, 241, 241, 241, 241, 241, 241, 241, 241, +- 241, 241, 241, 241, 365, 378, 389, 391, 390, 365, +- 392, 399, 391, 425, 392, 389, 424, 378, 390, 400, +- 400, 399, 401, 401, 415, 400, 414, 241, 241, 241, +- +- 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, +- 251, 251, 404, 407, 400, 408, 409, 409, 407, 407, +- 413, 404, 409, 410, 410, 411, 417, 408, 412, 411, +- 419, 417, 417, 406, 419, 251, 251, 251, 252, 252, +- 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, +- 420, 420, 421, 421, 422, 422, 420, 423, 423, 431, +- 431, 433, 433, 403, 423, 402, 431, 398, 433, 397, +- 395, 394, 393, 252, 252, 252, 253, 253, 253, 253, +- 253, 253, 253, 253, 253, 253, 253, 253, 387, 385, +- 382, 381, 380, 374, 373, 372, 368, 367, 366, 358, +- +- 357, 354, 349, 348, 347, 346, 345, 338, 337, 331, +- 330, 253, 253, 253, 276, 276, 276, 276, 276, 276, +- 276, 276, 276, 276, 276, 276, 329, 328, 325, 324, +- 323, 322, 321, 312, 306, 305, 304, 303, 302, 300, +- 299, 298, 297, 296, 295, 294, 285, 283, 279, 276, +- 276, 276, 289, 289, 289, 289, 289, 289, 289, 289, +- 289, 289, 289, 289, 271, 269, 268, 266, 265, 264, +- 263, 262, 261, 254, 250, 249, 247, 246, 245, 244, +- 243, 234, 232, 231, 230, 229, 228, 289, 289, 289, +- 333, 333, 333, 333, 333, 333, 333, 333, 333, 333, +- +- 333, 333, 227, 226, 225, 216, 215, 214, 213, 212, +- 211, 210, 209, 208, 207, 206, 205, 180, 179, 178, +- 177, 176, 175, 174, 173, 333, 333, 333, 339, 339, +- 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, +- 172, 171, 170, 169, 168, 137, 136, 135, 134, 133, +- 132, 131, 130, 129, 127, 126, 124, 118, 117, 99, +- 93, 92, 90, 339, 339, 339, 340, 340, 340, 340, +- 340, 340, 340, 340, 340, 340, 340, 340, 88, 87, +- 86, 85, 81, 80, 78, 77, 76, 74, 73, 72, +- 71, 70, 64, 63, 59, 57, 56, 53, 41, 40, +- +- 38, 340, 340, 340, 352, 352, 352, 352, 352, 352, +- 352, 352, 352, 352, 352, 352, 32, 31, 30, 29, +- 23, 22, 21, 20, 19, 15, 14, 9, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 352, +- 352, 352, 369, 369, 369, 369, 369, 369, 369, 369, +- 369, 369, 369, 369, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 0, 0, 369, 369, 369, +- 418, 418, 418, 418, 418, 418, 418, 418, 418, 418, +- 418, 418, 0, 0, 0, 0, 0, 0, 0, 0, +- +- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +- 0, 0, 0, 0, 0, 418, 418, 418, 445, 445, +- 445, 445, 445, 445, 445, 445, 445, 446, 446, 446, +- 446, 446, 446, 446, 446, 446, 447, 447, 447, 447, +- 447, 447, 447, 447, 447, 448, 448, 448, 448, 448, +- 448, 448, 448, 448, 449, 449, 449, 449, 449, 449, +- 449, 449, 449, 450, 450, 0, 450, 451, 451, 451, +- 451, 451, 451, 451, 451, 451, 452, 452, 452, 452, +- 452, 452, 452, 452, 452, 453, 453, 453, 453, 453, +- 453, 453, 453, 453, 454, 454, 454, 454, 454, 454, +- +- 454, 0, 454, 455, 0, 0, 0, 0, 0, 0, +- 455, 456, 456, 0, 456, 0, 456, 456, 456, 456, +- 457, 457, 457, 457, 457, 457, 457, 457, 457, 458, +- 458, 0, 458, 0, 458, 458, 458, 458, 459, 459, +- 0, 459, 0, 459, 459, 459, 459, 460, 460, 0, +- 460, 0, 460, 460, 460, 460, 461, 461, 0, 461, +- 0, 461, 461, 461, 461, 462, 462, 462, 462, 462, +- 0, 0, 462, 463, 463, 0, 463, 0, 463, 463, +- 463, 463, 464, 464, 464, 464, 464, 0, 0, 464, +- 465, 465, 465, 465, 465, 0, 0, 465, 466, 466, +- +- 466, 466, 466, 0, 0, 466, 467, 467, 467, 467, +- 467, 0, 0, 467, 468, 468, 0, 468, 0, 468, +- 468, 468, 468, 469, 469, 469, 469, 469, 0, 0, +- 469, 470, 470, 0, 470, 0, 470, 470, 470, 470, +- 471, 471, 0, 471, 0, 471, 471, 471, 471, 472, +- 472, 472, 472, 472, 0, 0, 472, 473, 473, 473, +- 473, 473, 0, 0, 473, 474, 474, 0, 474, 0, +- 474, 474, 474, 474, 475, 475, 0, 475, 0, 475, +- 475, 475, 475, 476, 476, 0, 476, 0, 476, 476, +- 476, 476, 477, 477, 0, 477, 0, 477, 477, 477, +- +- 477, 478, 478, 0, 0, 0, 478, 0, 478, 479, +- 479, 0, 479, 0, 479, 479, 479, 479, 480, 480, +- 480, 0, 480, 480, 0, 480, 481, 481, 0, 481, +- 0, 481, 481, 481, 481, 482, 482, 482, 482, 482, +- 0, 0, 482, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, +- +- 444, 444, 444, 444, 444, 444 +- } ; +- +-static yy_state_type yy_last_accepting_state; +-static char *yy_last_accepting_cpos; +- +-extern int yy_flex_debug; +-int yy_flex_debug = 0; +- +-/* The intent behind this definition is that it'll catch +- * any uses of REJECT which flex missed. +- */ +-#define REJECT reject_used_but_not_detected +-#define yymore() yymore_used_but_not_detected +-#define YY_MORE_ADJ 0 +-#define YY_RESTORE_YY_MORE_OFFSET +-char *yytext; +-#line 1 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* -*- indented-text -*- */ +-/* Process source files and output type information. +- Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it under +-the terms of the GNU General Public License as published by the Free +-Software Foundation; either version 3, or (at your option) any later +-version. +- +-GCC is distributed in the hope that it will be useful, but WITHOUT ANY +-WARRANTY; without even the implied warranty of MERCHANTABILITY or +-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-for more details. +- +-You should have received a copy of the GNU General Public License +-along with GCC; see the file COPYING3. If not see +-. */ +-#line 22 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-#include "bconfig.h" +-#include "system.h" +- +-#define malloc xmalloc +-#define realloc xrealloc +- +-#include "gengtype.h" +- +-#define YY_DECL int yylex (const char **yylval) +-#define yyterminate() return EOF_TOKEN +- +-struct fileloc lexer_line; +-int lexer_toplevel_done; +- +-static void +-update_lineno (const char *l, size_t len) +-{ +- while (len-- > 0) +- if (*l++ == '\n') +- lexer_line.line++; +-} +- +- +-#line 984 "gengtype-lex.c" +- +-#define INITIAL 0 +-#define in_struct 1 +-#define in_struct_comment 2 +-#define in_comment 3 +- +-#ifndef YY_NO_UNISTD_H +-/* Special case for "unistd.h", since it is non-ANSI. We include it way +- * down here because we want the user's section 1 to have been scanned first. +- * The user has a chance to override it with an option. +- */ +-#include +-#endif +- +-#ifndef YY_EXTRA_TYPE +-#define YY_EXTRA_TYPE void * +-#endif +- +-static int yy_init_globals (void ); +- +-/* Accessor methods to globals. +- These are made visible to non-reentrant scanners for convenience. */ +- +-int yylex_destroy (void ); +- +-int yyget_debug (void ); +- +-void yyset_debug (int debug_flag ); +- +-YY_EXTRA_TYPE yyget_extra (void ); +- +-void yyset_extra (YY_EXTRA_TYPE user_defined ); +- +-FILE *yyget_in (void ); +- +-void yyset_in (FILE * in_str ); +- +-FILE *yyget_out (void ); +- +-void yyset_out (FILE * out_str ); +- +-int yyget_leng (void ); +- +-char *yyget_text (void ); +- +-int yyget_lineno (void ); +- +-void yyset_lineno (int line_number ); +- +-/* Macros after this point can all be overridden by user definitions in +- * section 1. +- */ +- +-#ifndef YY_SKIP_YYWRAP +-#ifdef __cplusplus +-extern "C" int yywrap (void ); +-#else +-extern int yywrap (void ); +-#endif +-#endif +- +-#ifndef yytext_ptr +-static void yy_flex_strncpy (char *,yyconst char *,int ); +-#endif +- +-#ifdef YY_NEED_STRLEN +-static int yy_flex_strlen (yyconst char * ); +-#endif +- +-#ifndef YY_NO_INPUT +- +-#ifdef __cplusplus +-static int yyinput (void ); +-#else +-static int input (void ); +-#endif +- +-#endif +- +-/* Amount of stuff to slurp up with each read. */ +-#ifndef YY_READ_BUF_SIZE +-#define YY_READ_BUF_SIZE 8192 +-#endif +- +-/* Copy whatever the last rule matched to the standard output. */ +-#ifndef ECHO +-/* This used to be an fputs(), but since the string might contain NUL's, +- * we now use fwrite(). +- */ +-#define ECHO fwrite( yytext, yyleng, 1, yyout ) +-#endif +- +-/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, +- * is returned in "result". +- */ +-#ifndef YY_INPUT +-#define YY_INPUT(buf,result,max_size) \ +- if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ +- { \ +- int c = '*'; \ +- int n; \ +- for ( n = 0; n < max_size && \ +- (c = getc( yyin )) != EOF && c != '\n'; ++n ) \ +- buf[n] = (char) c; \ +- if ( c == '\n' ) \ +- buf[n++] = (char) c; \ +- if ( c == EOF && ferror( yyin ) ) \ +- YY_FATAL_ERROR( "input in flex scanner failed" ); \ +- result = n; \ +- } \ +- else \ +- { \ +- errno=0; \ +- while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \ +- { \ +- if( errno != EINTR) \ +- { \ +- YY_FATAL_ERROR( "input in flex scanner failed" ); \ +- break; \ +- } \ +- errno=0; \ +- clearerr(yyin); \ +- } \ +- }\ +-\ +- +-#endif +- +-/* No semi-colon after return; correct usage is to write "yyterminate();" - +- * we don't want an extra ';' after the "return" because that will cause +- * some compilers to complain about unreachable statements. +- */ +-#ifndef yyterminate +-#define yyterminate() return YY_NULL +-#endif +- +-/* Number of entries by which start-condition stack grows. */ +-#ifndef YY_START_STACK_INCR +-#define YY_START_STACK_INCR 25 +-#endif +- +-/* Report a fatal error. */ +-#ifndef YY_FATAL_ERROR +-#define YY_FATAL_ERROR(msg) yy_fatal_error( msg ) +-#endif +- +-/* end tables serialization structures and prototypes */ +- +-/* Default declaration of generated scanner - a define so the user can +- * easily add parameters. +- */ +-#ifndef YY_DECL +-#define YY_DECL_IS_OURS 1 +- +-extern int yylex (void); +- +-#define YY_DECL int yylex (void) +-#endif /* !YY_DECL */ +- +-/* Code executed at the beginning of each rule, after yytext and yyleng +- * have been set up. +- */ +-#ifndef YY_USER_ACTION +-#define YY_USER_ACTION +-#endif +- +-/* Code executed at the end of each rule. */ +-#ifndef YY_BREAK +-#define YY_BREAK break; +-#endif +- +-#define YY_RULE_SETUP \ +- if ( yyleng > 0 ) \ +- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \ +- (yytext[yyleng - 1] == '\n'); \ +- YY_USER_ACTION +- +-/** The main scanner function which does all the work. +- */ +-YY_DECL +-{ +- register yy_state_type yy_current_state; +- register char *yy_cp, *yy_bp; +- register int yy_act; +- +-#line 56 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +- +- /* Do this on entry to yylex(): */ +- *yylval = 0; +- if (lexer_toplevel_done) +- { +- BEGIN(INITIAL); +- lexer_toplevel_done = 0; +- } +- +- /* Things we look for in skipping mode: */ +-#line 1181 "gengtype-lex.c" +- +- if ( !(yy_init) ) +- { +- (yy_init) = 1; +- +-#ifdef YY_USER_INIT +- YY_USER_INIT; +-#endif +- +- if ( ! (yy_start) ) +- (yy_start) = 1; /* first start state */ +- +- if ( ! yyin ) +- yyin = stdin; +- +- if ( ! yyout ) +- yyout = stdout; +- +- if ( ! YY_CURRENT_BUFFER ) { +- yyensure_buffer_stack (); +- YY_CURRENT_BUFFER_LVALUE = +- yy_create_buffer(yyin,YY_BUF_SIZE ); +- } +- +- yy_load_buffer_state( ); +- } +- +- while ( 1 ) /* loops until end-of-file is reached */ +- { +- yy_cp = (yy_c_buf_p); +- +- /* Support of yytext. */ +- *yy_cp = (yy_hold_char); +- +- /* yy_bp points to the position in yy_ch_buf of the start of +- * the current run. +- */ +- yy_bp = yy_cp; +- +- yy_current_state = (yy_start); +- yy_current_state += YY_AT_BOL(); +-yy_match: +- do +- { +- register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; +- if ( yy_accept[yy_current_state] ) +- { +- (yy_last_accepting_state) = yy_current_state; +- (yy_last_accepting_cpos) = yy_cp; +- } +- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) +- { +- yy_current_state = (int) yy_def[yy_current_state]; +- if ( yy_current_state >= 445 ) +- yy_c = yy_meta[(unsigned int) yy_c]; +- } +- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; +- ++yy_cp; +- } +- while ( yy_current_state != 444 ); +- yy_cp = (yy_last_accepting_cpos); +- yy_current_state = (yy_last_accepting_state); +- +-yy_find_action: +- yy_act = yy_accept[yy_current_state]; +- +- YY_DO_BEFORE_ACTION; +- +-do_action: /* This label is used only to access EOF actions. */ +- +- switch ( yy_act ) +- { /* beginning of action switch */ +- case 0: /* must back up */ +- /* undo the effects of YY_DO_BEFORE_ACTION */ +- *yy_cp = (yy_hold_char); +- yy_cp = (yy_last_accepting_cpos); +- yy_current_state = (yy_last_accepting_state); +- goto yy_find_action; +- +-case 1: +-/* rule 1 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 67 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return TYPEDEF; +-} +- YY_BREAK +-case 2: +-/* rule 2 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 71 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return STRUCT; +-} +- YY_BREAK +-case 3: +-/* rule 3 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 75 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return UNION; +-} +- YY_BREAK +-case 4: +-/* rule 4 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 79 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return EXTERN; +-} +- YY_BREAK +-case 5: +-/* rule 5 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 83 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return STATIC; +-} +- YY_BREAK +-case 6: +-/* rule 6 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 88 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return DEFVEC_OP; +-} +- YY_BREAK +-case 7: +-/* rule 7 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 92 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return DEFVEC_I; +-} +- YY_BREAK +-case 8: +-/* rule 8 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 96 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- BEGIN(in_struct); +- return DEFVEC_ALLOC; +-} +- YY_BREAK +- +- +-case 9: +-YY_RULE_SETUP +-#line 104 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ BEGIN(in_struct_comment); } +- YY_BREAK +-case 10: +-/* rule 10 can match eol */ +-YY_RULE_SETUP +-#line 106 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ update_lineno (yytext, yyleng); } +- YY_BREAK +-case 11: +-/* rule 11 can match eol */ +-YY_RULE_SETUP +-#line 107 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ lexer_line.line++; } +- YY_BREAK +-case 12: +-/* rule 12 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 5; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 109 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* don't care */ +- YY_BREAK +-case 13: +-/* rule 13 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 3; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 110 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return GTY_TOKEN; } +- YY_BREAK +-case 14: +-/* rule 14 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 3; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 111 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return VEC_TOKEN; } +- YY_BREAK +-case 15: +-/* rule 15 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 5; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 112 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return UNION; } +- YY_BREAK +-case 16: +-/* rule 16 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 6; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 113 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return STRUCT; } +- YY_BREAK +-case 17: +-/* rule 17 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 4; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 114 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return ENUM; } +- YY_BREAK +-case 18: +-/* rule 18 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 9; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 115 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return PTR_ALIAS; } +- YY_BREAK +-case 19: +-/* rule 19 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 10; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 116 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return NESTED_PTR; } +- YY_BREAK +-case 20: +-YY_RULE_SETUP +-#line 117 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return NUM; } +- YY_BREAK +-case 21: +-/* rule 21 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 118 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- *yylval = xmemdup (yytext, yyleng, yyleng+1); +- return PARAM_IS; +-} +- YY_BREAK +-case 22: +-/* rule 22 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-#line 124 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-case 23: +-/* rule 23 can match eol */ +-YY_RULE_SETUP +-#line 124 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- size_t len; +- +- for (len = yyleng; ISSPACE (yytext[len-1]); len--) +- ; +- +- *yylval = xmemdup (yytext, len, len+1); +- update_lineno (yytext, yyleng); +- return SCALAR; +-} +- YY_BREAK +-case 24: +-/* rule 24 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp -= 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 136 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- *yylval = xmemdup (yytext, yyleng, yyleng+1); +- return ID; +-} +- YY_BREAK +-case 25: +-/* rule 25 can match eol */ +-YY_RULE_SETUP +-#line 141 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- *yylval = xmemdup (yytext+1, yyleng-2, yyleng-1); +- return STRING; +-} +- YY_BREAK +-/* This "terminal" avoids having to parse integer constant expressions. */ +-case 26: +-/* rule 26 can match eol */ +-YY_RULE_SETUP +-#line 146 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- *yylval = xmemdup (yytext+1, yyleng-2, yyleng-1); +- return ARRAY; +-} +- YY_BREAK +-case 27: +-/* rule 27 can match eol */ +-YY_RULE_SETUP +-#line 150 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- *yylval = xmemdup (yytext+1, yyleng-2, yyleng); +- return CHAR; +-} +- YY_BREAK +-case 28: +-YY_RULE_SETUP +-#line 155 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return ELLIPSIS; } +- YY_BREAK +-case 29: +-YY_RULE_SETUP +-#line 156 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ return yytext[0]; } +- YY_BREAK +-/* ignore pp-directives */ +-case 30: +-/* rule 30 can match eol */ +-YY_RULE_SETUP +-#line 159 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{lexer_line.line++;} +- YY_BREAK +-case 31: +-YY_RULE_SETUP +-#line 161 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- error_at_line (&lexer_line, "unexpected character `%s'", yytext); +-} +- YY_BREAK +- +-case 32: +-YY_RULE_SETUP +-#line 166 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ BEGIN(in_comment); } +- YY_BREAK +-case 33: +-/* rule 33 can match eol */ +-YY_RULE_SETUP +-#line 167 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ lexer_line.line++; } +- YY_BREAK +-case 34: +-#line 169 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-case 35: +-/* rule 35 can match eol */ +-#line 170 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-case 36: +-/* rule 36 can match eol */ +-YY_RULE_SETUP +-#line 170 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* do nothing */ +- YY_BREAK +-case 37: +-/* rule 37 can match eol */ +-YY_RULE_SETUP +-#line 171 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ update_lineno (yytext, yyleng); } +- YY_BREAK +-case 38: +-/* rule 38 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 172 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* do nothing */ +- YY_BREAK +- +-case 39: +-/* rule 39 can match eol */ +-YY_RULE_SETUP +-#line 175 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ lexer_line.line++; } +- YY_BREAK +-case 40: +-#line 177 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-case 41: +-YY_RULE_SETUP +-#line 177 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* do nothing */ +- YY_BREAK +-case 42: +-/* rule 42 can match eol */ +-*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */ +-(yy_c_buf_p) = yy_cp = yy_bp + 1; +-YY_DO_BEFORE_ACTION; /* set up yytext again */ +-YY_RULE_SETUP +-#line 178 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* do nothing */ +- YY_BREAK +- +-case 43: +-YY_RULE_SETUP +-#line 180 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ BEGIN(INITIAL); } +- YY_BREAK +-case 44: +-YY_RULE_SETUP +-#line 181 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ BEGIN(in_struct); } +- YY_BREAK +-case 45: +-#line 184 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-case 46: +-YY_RULE_SETUP +-#line 184 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- error_at_line (&lexer_line, +- "unterminated comment or string; unexpected EOF"); +-} +- YY_BREAK +-case 47: +-/* rule 47 can match eol */ +-YY_RULE_SETUP +-#line 189 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-/* do nothing */ +- YY_BREAK +-case 48: +-/* rule 48 can match eol */ +-YY_RULE_SETUP +-#line 190 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-{ +- error_at_line (&lexer_line, "stray GTY marker"); +-} +- YY_BREAK +-case 49: +-YY_RULE_SETUP +-#line 194 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +-YY_FATAL_ERROR( "flex scanner jammed" ); +- YY_BREAK +-#line 1651 "gengtype-lex.c" +-case YY_STATE_EOF(INITIAL): +-case YY_STATE_EOF(in_struct): +-case YY_STATE_EOF(in_struct_comment): +-case YY_STATE_EOF(in_comment): +- yyterminate(); +- +- case YY_END_OF_BUFFER: +- { +- /* Amount of text matched not including the EOB char. */ +- int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1; +- +- /* Undo the effects of YY_DO_BEFORE_ACTION. */ +- *yy_cp = (yy_hold_char); +- YY_RESTORE_YY_MORE_OFFSET +- +- if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) +- { +- /* We're scanning a new file or input source. It's +- * possible that this happened because the user +- * just pointed yyin at a new source and called +- * yylex(). If so, then we have to assure +- * consistency between YY_CURRENT_BUFFER and our +- * globals. Here is the right place to do so, because +- * this is the first action (other than possibly a +- * back-up) that will match for the new input source. +- */ +- (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; +- YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin; +- YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; +- } +- +- /* Note that here we test for yy_c_buf_p "<=" to the position +- * of the first EOB in the buffer, since yy_c_buf_p will +- * already have been incremented past the NUL character +- * (since all states make transitions on EOB to the +- * end-of-buffer state). Contrast this with the test +- * in input(). +- */ +- if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) +- { /* This was really a NUL. */ +- yy_state_type yy_next_state; +- +- (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; +- +- yy_current_state = yy_get_previous_state( ); +- +- /* Okay, we're now positioned to make the NUL +- * transition. We couldn't have +- * yy_get_previous_state() go ahead and do it +- * for us because it doesn't know how to deal +- * with the possibility of jamming (and we don't +- * want to build jamming into it because then it +- * will run more slowly). +- */ +- +- yy_next_state = yy_try_NUL_trans( yy_current_state ); +- +- yy_bp = (yytext_ptr) + YY_MORE_ADJ; +- +- if ( yy_next_state ) +- { +- /* Consume the NUL. */ +- yy_cp = ++(yy_c_buf_p); +- yy_current_state = yy_next_state; +- goto yy_match; +- } +- +- else +- { +- yy_cp = (yy_last_accepting_cpos); +- yy_current_state = (yy_last_accepting_state); +- goto yy_find_action; +- } +- } +- +- else switch ( yy_get_next_buffer( ) ) +- { +- case EOB_ACT_END_OF_FILE: +- { +- (yy_did_buffer_switch_on_eof) = 0; +- +- if ( yywrap( ) ) +- { +- /* Note: because we've taken care in +- * yy_get_next_buffer() to have set up +- * yytext, we can now set up +- * yy_c_buf_p so that if some total +- * hoser (like flex itself) wants to +- * call the scanner after we return the +- * YY_NULL, it'll still work - another +- * YY_NULL will get returned. +- */ +- (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ; +- +- yy_act = YY_STATE_EOF(YY_START); +- goto do_action; +- } +- +- else +- { +- if ( ! (yy_did_buffer_switch_on_eof) ) +- YY_NEW_FILE; +- } +- break; +- } +- +- case EOB_ACT_CONTINUE_SCAN: +- (yy_c_buf_p) = +- (yytext_ptr) + yy_amount_of_matched_text; +- +- yy_current_state = yy_get_previous_state( ); +- +- yy_cp = (yy_c_buf_p); +- yy_bp = (yytext_ptr) + YY_MORE_ADJ; +- goto yy_match; +- +- case EOB_ACT_LAST_MATCH: +- (yy_c_buf_p) = +- &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)]; +- +- yy_current_state = yy_get_previous_state( ); +- +- yy_cp = (yy_c_buf_p); +- yy_bp = (yytext_ptr) + YY_MORE_ADJ; +- goto yy_find_action; +- } +- break; +- } +- +- default: +- YY_FATAL_ERROR( +- "fatal flex scanner internal error--no action found" ); +- } /* end of action switch */ +- } /* end of scanning one token */ +-} /* end of yylex */ +- +-/* yy_get_next_buffer - try to read in a new buffer +- * +- * Returns a code representing an action: +- * EOB_ACT_LAST_MATCH - +- * EOB_ACT_CONTINUE_SCAN - continue scanning from current position +- * EOB_ACT_END_OF_FILE - end of file +- */ +-static int yy_get_next_buffer (void) +-{ +- register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; +- register char *source = (yytext_ptr); +- register int number_to_move, i; +- int ret_val; +- +- if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] ) +- YY_FATAL_ERROR( +- "fatal flex scanner internal error--end of buffer missed" ); +- +- if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) +- { /* Don't try to fill the buffer, so this is an EOF. */ +- if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 ) +- { +- /* We matched a single character, the EOB, so +- * treat this as a final EOF. +- */ +- return EOB_ACT_END_OF_FILE; +- } +- +- else +- { +- /* We matched some text prior to the EOB, first +- * process it. +- */ +- return EOB_ACT_LAST_MATCH; +- } +- } +- +- /* Try to read more data. */ +- +- /* First move last chars to start of buffer. */ +- number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1; +- +- for ( i = 0; i < number_to_move; ++i ) +- *(dest++) = *(source++); +- +- if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) +- /* don't do the read, it's not guaranteed to return an EOF, +- * just force an EOF +- */ +- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0; +- +- else +- { +- int num_to_read = +- YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; +- +- while ( num_to_read <= 0 ) +- { /* Not enough room in the buffer - grow it. */ +- +- /* just a shorter name for the current buffer */ +- YY_BUFFER_STATE b = YY_CURRENT_BUFFER; +- +- int yy_c_buf_p_offset = +- (int) ((yy_c_buf_p) - b->yy_ch_buf); +- +- if ( b->yy_is_our_buffer ) +- { +- int new_size = b->yy_buf_size * 2; +- +- if ( new_size <= 0 ) +- b->yy_buf_size += b->yy_buf_size / 8; +- else +- b->yy_buf_size *= 2; +- +- b->yy_ch_buf = (char *) +- /* Include room in for 2 EOB chars. */ +- yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ); +- } +- else +- /* Can't grow it, we don't own it. */ +- b->yy_ch_buf = 0; +- +- if ( ! b->yy_ch_buf ) +- YY_FATAL_ERROR( +- "fatal error - scanner input buffer overflow" ); +- +- (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset]; +- +- num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - +- number_to_move - 1; +- +- } +- +- if ( num_to_read > YY_READ_BUF_SIZE ) +- num_to_read = YY_READ_BUF_SIZE; +- +- /* Read in more data. */ +- YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), +- (yy_n_chars), (size_t) num_to_read ); +- +- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); +- } +- +- if ( (yy_n_chars) == 0 ) +- { +- if ( number_to_move == YY_MORE_ADJ ) +- { +- ret_val = EOB_ACT_END_OF_FILE; +- yyrestart(yyin ); +- } +- +- else +- { +- ret_val = EOB_ACT_LAST_MATCH; +- YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = +- YY_BUFFER_EOF_PENDING; +- } +- } +- +- else +- ret_val = EOB_ACT_CONTINUE_SCAN; +- +- if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { +- /* Extend the array by 50%, plus the number we really need. */ +- yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1); +- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ); +- if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) +- YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); +- } +- +- (yy_n_chars) += number_to_move; +- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR; +- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR; +- +- (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; +- +- return ret_val; +-} +- +-/* yy_get_previous_state - get the state just before the EOB char was reached */ +- +- static yy_state_type yy_get_previous_state (void) +-{ +- register yy_state_type yy_current_state; +- register char *yy_cp; +- +- yy_current_state = (yy_start); +- yy_current_state += YY_AT_BOL(); +- +- for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp ) +- { +- register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); +- if ( yy_accept[yy_current_state] ) +- { +- (yy_last_accepting_state) = yy_current_state; +- (yy_last_accepting_cpos) = yy_cp; +- } +- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) +- { +- yy_current_state = (int) yy_def[yy_current_state]; +- if ( yy_current_state >= 445 ) +- yy_c = yy_meta[(unsigned int) yy_c]; +- } +- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; +- } +- +- return yy_current_state; +-} +- +-/* yy_try_NUL_trans - try to make a transition on the NUL character +- * +- * synopsis +- * next_state = yy_try_NUL_trans( current_state ); +- */ +- static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state ) +-{ +- register int yy_is_jam; +- register char *yy_cp = (yy_c_buf_p); +- +- register YY_CHAR yy_c = 1; +- if ( yy_accept[yy_current_state] ) +- { +- (yy_last_accepting_state) = yy_current_state; +- (yy_last_accepting_cpos) = yy_cp; +- } +- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) +- { +- yy_current_state = (int) yy_def[yy_current_state]; +- if ( yy_current_state >= 445 ) +- yy_c = yy_meta[(unsigned int) yy_c]; +- } +- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; +- yy_is_jam = (yy_current_state == 444); +- +- return yy_is_jam ? 0 : yy_current_state; +-} +- +-#ifndef YY_NO_INPUT +-#ifdef __cplusplus +- static int yyinput (void) +-#else +- static int input (void) +-#endif +- +-{ +- int c; +- +- *(yy_c_buf_p) = (yy_hold_char); +- +- if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR ) +- { +- /* yy_c_buf_p now points to the character we want to return. +- * If this occurs *before* the EOB characters, then it's a +- * valid NUL; if not, then we've hit the end of the buffer. +- */ +- if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) +- /* This was really a NUL. */ +- *(yy_c_buf_p) = '\0'; +- +- else +- { /* need more input */ +- int offset = (yy_c_buf_p) - (yytext_ptr); +- ++(yy_c_buf_p); +- +- switch ( yy_get_next_buffer( ) ) +- { +- case EOB_ACT_LAST_MATCH: +- /* This happens because yy_g_n_b() +- * sees that we've accumulated a +- * token and flags that we need to +- * try matching the token before +- * proceeding. But for input(), +- * there's no matching to consider. +- * So convert the EOB_ACT_LAST_MATCH +- * to EOB_ACT_END_OF_FILE. +- */ +- +- /* Reset buffer status. */ +- yyrestart(yyin ); +- +- /*FALLTHROUGH*/ +- +- case EOB_ACT_END_OF_FILE: +- { +- if ( yywrap( ) ) +- return EOF; +- +- if ( ! (yy_did_buffer_switch_on_eof) ) +- YY_NEW_FILE; +-#ifdef __cplusplus +- return yyinput(); +-#else +- return input(); +-#endif +- } +- +- case EOB_ACT_CONTINUE_SCAN: +- (yy_c_buf_p) = (yytext_ptr) + offset; +- break; +- } +- } +- } +- +- c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */ +- *(yy_c_buf_p) = '\0'; /* preserve yytext */ +- (yy_hold_char) = *++(yy_c_buf_p); +- +- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n'); +- +- return c; +-} +-#endif /* ifndef YY_NO_INPUT */ +- +-/** Immediately switch to a different input stream. +- * @param input_file A readable stream. +- * +- * @note This function does not reset the start condition to @c INITIAL . +- */ +- void yyrestart (FILE * input_file ) +-{ +- +- if ( ! YY_CURRENT_BUFFER ){ +- yyensure_buffer_stack (); +- YY_CURRENT_BUFFER_LVALUE = +- yy_create_buffer(yyin,YY_BUF_SIZE ); +- } +- +- yy_init_buffer(YY_CURRENT_BUFFER,input_file ); +- yy_load_buffer_state( ); +-} +- +-/** Switch to a different input buffer. +- * @param new_buffer The new input buffer. +- * +- */ +- void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) +-{ +- +- /* TODO. We should be able to replace this entire function body +- * with +- * yypop_buffer_state(); +- * yypush_buffer_state(new_buffer); +- */ +- yyensure_buffer_stack (); +- if ( YY_CURRENT_BUFFER == new_buffer ) +- return; +- +- if ( YY_CURRENT_BUFFER ) +- { +- /* Flush out information for old buffer. */ +- *(yy_c_buf_p) = (yy_hold_char); +- YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); +- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); +- } +- +- YY_CURRENT_BUFFER_LVALUE = new_buffer; +- yy_load_buffer_state( ); +- +- /* We don't actually know whether we did this switch during +- * EOF (yywrap()) processing, but the only time this flag +- * is looked at is after yywrap() is called, so it's safe +- * to go ahead and always set it. +- */ +- (yy_did_buffer_switch_on_eof) = 1; +-} +- +-static void yy_load_buffer_state (void) +-{ +- (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; +- (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; +- yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; +- (yy_hold_char) = *(yy_c_buf_p); +-} +- +-/** Allocate and initialize an input buffer state. +- * @param file A readable stream. +- * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. +- * +- * @return the allocated buffer state. +- */ +- YY_BUFFER_STATE yy_create_buffer (FILE * file, int size ) +-{ +- YY_BUFFER_STATE b; +- +- b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ); +- if ( ! b ) +- YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); +- +- b->yy_buf_size = size; +- +- /* yy_ch_buf has to be 2 characters longer than the size given because +- * we need to put in 2 end-of-buffer characters. +- */ +- b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 ); +- if ( ! b->yy_ch_buf ) +- YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); +- +- b->yy_is_our_buffer = 1; +- +- yy_init_buffer(b,file ); +- +- return b; +-} +- +-/** Destroy the buffer. +- * @param b a buffer created with yy_create_buffer() +- * +- */ +- void yy_delete_buffer (YY_BUFFER_STATE b ) +-{ +- +- if ( ! b ) +- return; +- +- if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ +- YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; +- +- if ( b->yy_is_our_buffer ) +- yyfree((void *) b->yy_ch_buf ); +- +- yyfree((void *) b ); +-} +- +-/* Initializes or reinitializes a buffer. +- * This function is sometimes called more than once on the same buffer, +- * such as during a yyrestart() or at EOF. +- */ +- static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file ) +- +-{ +- int oerrno = errno; +- +- yy_flush_buffer(b ); +- +- b->yy_input_file = file; +- b->yy_fill_buffer = 1; +- +- /* If b is the current buffer, then yy_init_buffer was _probably_ +- * called from yyrestart() or through yy_get_next_buffer. +- * In that case, we don't want to reset the lineno or column. +- */ +- if (b != YY_CURRENT_BUFFER){ +- b->yy_bs_lineno = 1; +- b->yy_bs_column = 0; +- } +- +- b->yy_is_interactive = 0; +- +- errno = oerrno; +-} +- +-/** Discard all buffered characters. On the next scan, YY_INPUT will be called. +- * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. +- * +- */ +- void yy_flush_buffer (YY_BUFFER_STATE b ) +-{ +- if ( ! b ) +- return; +- +- b->yy_n_chars = 0; +- +- /* We always need two end-of-buffer characters. The first causes +- * a transition to the end-of-buffer state. The second causes +- * a jam in that state. +- */ +- b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; +- b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; +- +- b->yy_buf_pos = &b->yy_ch_buf[0]; +- +- b->yy_at_bol = 1; +- b->yy_buffer_status = YY_BUFFER_NEW; +- +- if ( b == YY_CURRENT_BUFFER ) +- yy_load_buffer_state( ); +-} +- +-/** Pushes the new state onto the stack. The new state becomes +- * the current state. This function will allocate the stack +- * if necessary. +- * @param new_buffer The new state. +- * +- */ +-void yypush_buffer_state (YY_BUFFER_STATE new_buffer ) +-{ +- if (new_buffer == NULL) +- return; +- +- yyensure_buffer_stack(); +- +- /* This block is copied from yy_switch_to_buffer. */ +- if ( YY_CURRENT_BUFFER ) +- { +- /* Flush out information for old buffer. */ +- *(yy_c_buf_p) = (yy_hold_char); +- YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); +- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); +- } +- +- /* Only push if top exists. Otherwise, replace top. */ +- if (YY_CURRENT_BUFFER) +- (yy_buffer_stack_top)++; +- YY_CURRENT_BUFFER_LVALUE = new_buffer; +- +- /* copied from yy_switch_to_buffer. */ +- yy_load_buffer_state( ); +- (yy_did_buffer_switch_on_eof) = 1; +-} +- +-/** Removes and deletes the top of the stack, if present. +- * The next element becomes the new top. +- * +- */ +-void yypop_buffer_state (void) +-{ +- if (!YY_CURRENT_BUFFER) +- return; +- +- yy_delete_buffer(YY_CURRENT_BUFFER ); +- YY_CURRENT_BUFFER_LVALUE = NULL; +- if ((yy_buffer_stack_top) > 0) +- --(yy_buffer_stack_top); +- +- if (YY_CURRENT_BUFFER) { +- yy_load_buffer_state( ); +- (yy_did_buffer_switch_on_eof) = 1; +- } +-} +- +-/* Allocates the stack if it does not exist. +- * Guarantees space for at least one push. +- */ +-static void yyensure_buffer_stack (void) +-{ +- int num_to_alloc; +- +- if (!(yy_buffer_stack)) { +- +- /* First allocation is just for 2 elements, since we don't know if this +- * scanner will even need a stack. We use 2 instead of 1 to avoid an +- * immediate realloc on the next call. +- */ +- num_to_alloc = 1; +- (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc +- (num_to_alloc * sizeof(struct yy_buffer_state*) +- ); +- if ( ! (yy_buffer_stack) ) +- YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); +- +- memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*)); +- +- (yy_buffer_stack_max) = num_to_alloc; +- (yy_buffer_stack_top) = 0; +- return; +- } +- +- if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){ +- +- /* Increase the buffer to prepare for a possible push. */ +- int grow_size = 8 /* arbitrary grow size */; +- +- num_to_alloc = (yy_buffer_stack_max) + grow_size; +- (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc +- ((yy_buffer_stack), +- num_to_alloc * sizeof(struct yy_buffer_state*) +- ); +- if ( ! (yy_buffer_stack) ) +- YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); +- +- /* zero only the new slots.*/ +- memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*)); +- (yy_buffer_stack_max) = num_to_alloc; +- } +-} +- +-/** Setup the input buffer state to scan directly from a user-specified character buffer. +- * @param base the character buffer +- * @param size the size in bytes of the character buffer +- * +- * @return the newly allocated buffer state object. +- */ +-YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size ) +-{ +- YY_BUFFER_STATE b; +- +- if ( size < 2 || +- base[size-2] != YY_END_OF_BUFFER_CHAR || +- base[size-1] != YY_END_OF_BUFFER_CHAR ) +- /* They forgot to leave room for the EOB's. */ +- return 0; +- +- b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ); +- if ( ! b ) +- YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" ); +- +- b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ +- b->yy_buf_pos = b->yy_ch_buf = base; +- b->yy_is_our_buffer = 0; +- b->yy_input_file = 0; +- b->yy_n_chars = b->yy_buf_size; +- b->yy_is_interactive = 0; +- b->yy_at_bol = 1; +- b->yy_fill_buffer = 0; +- b->yy_buffer_status = YY_BUFFER_NEW; +- +- yy_switch_to_buffer(b ); +- +- return b; +-} +- +-/** Setup the input buffer state to scan a string. The next call to yylex() will +- * scan from a @e copy of @a str. +- * @param yystr a NUL-terminated string to scan +- * +- * @return the newly allocated buffer state object. +- * @note If you want to scan bytes that may contain NUL values, then use +- * yy_scan_bytes() instead. +- */ +-YY_BUFFER_STATE yy_scan_string (yyconst char * yystr ) +-{ +- +- return yy_scan_bytes(yystr,strlen(yystr) ); +-} +- +-/** Setup the input buffer state to scan the given bytes. The next call to yylex() will +- * scan from a @e copy of @a bytes. +- * @param bytes the byte buffer to scan +- * @param len the number of bytes in the buffer pointed to by @a bytes. +- * +- * @return the newly allocated buffer state object. +- */ +-YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len ) +-{ +- YY_BUFFER_STATE b; +- char *buf; +- yy_size_t n; +- int i; +- +- /* Get memory for full buffer, including space for trailing EOB's. */ +- n = _yybytes_len + 2; +- buf = (char *) yyalloc(n ); +- if ( ! buf ) +- YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" ); +- +- for ( i = 0; i < _yybytes_len; ++i ) +- buf[i] = yybytes[i]; +- +- buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; +- +- b = yy_scan_buffer(buf,n ); +- if ( ! b ) +- YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" ); +- +- /* It's okay to grow etc. this buffer, and we should throw it +- * away when we're done. +- */ +- b->yy_is_our_buffer = 1; +- +- return b; +-} +- +-#ifndef YY_EXIT_FAILURE +-#define YY_EXIT_FAILURE 2 +-#endif +- +-static void yy_fatal_error (yyconst char* msg ) +-{ +- (void) fprintf( stderr, "%s\n", msg ); +- exit( YY_EXIT_FAILURE ); +-} +- +-/* Redefine yyless() so it works in section 3 code. */ +- +-#undef yyless +-#define yyless(n) \ +- do \ +- { \ +- /* Undo effects of setting up yytext. */ \ +- int yyless_macro_arg = (n); \ +- YY_LESS_LINENO(yyless_macro_arg);\ +- yytext[yyleng] = (yy_hold_char); \ +- (yy_c_buf_p) = yytext + yyless_macro_arg; \ +- (yy_hold_char) = *(yy_c_buf_p); \ +- *(yy_c_buf_p) = '\0'; \ +- yyleng = yyless_macro_arg; \ +- } \ +- while ( 0 ) +- +-/* Accessor methods (get/set functions) to struct members. */ +- +-/** Get the current line number. +- * +- */ +-int yyget_lineno (void) +-{ +- +- return yylineno; +-} +- +-/** Get the input stream. +- * +- */ +-FILE *yyget_in (void) +-{ +- return yyin; +-} +- +-/** Get the output stream. +- * +- */ +-FILE *yyget_out (void) +-{ +- return yyout; +-} +- +-/** Get the length of the current token. +- * +- */ +-int yyget_leng (void) +-{ +- return yyleng; +-} +- +-/** Get the current token. +- * +- */ +- +-char *yyget_text (void) +-{ +- return yytext; +-} +- +-/** Set the current line number. +- * @param line_number +- * +- */ +-void yyset_lineno (int line_number ) +-{ +- +- yylineno = line_number; +-} +- +-/** Set the input stream. This does not discard the current +- * input buffer. +- * @param in_str A readable stream. +- * +- * @see yy_switch_to_buffer +- */ +-void yyset_in (FILE * in_str ) +-{ +- yyin = in_str ; +-} +- +-void yyset_out (FILE * out_str ) +-{ +- yyout = out_str ; +-} +- +-int yyget_debug (void) +-{ +- return yy_flex_debug; +-} +- +-void yyset_debug (int bdebug ) +-{ +- yy_flex_debug = bdebug ; +-} +- +-static int yy_init_globals (void) +-{ +- /* Initialization is the same as for the non-reentrant scanner. +- * This function is called from yylex_destroy(), so don't allocate here. +- */ +- +- (yy_buffer_stack) = 0; +- (yy_buffer_stack_top) = 0; +- (yy_buffer_stack_max) = 0; +- (yy_c_buf_p) = (char *) 0; +- (yy_init) = 0; +- (yy_start) = 0; +- +-/* Defined in main.c */ +-#ifdef YY_STDINIT +- yyin = stdin; +- yyout = stdout; +-#else +- yyin = (FILE *) 0; +- yyout = (FILE *) 0; +-#endif +- +- /* For future reference: Set errno on error, since we are called by +- * yylex_init() +- */ +- return 0; +-} +- +-/* yylex_destroy is for both reentrant and non-reentrant scanners. */ +-int yylex_destroy (void) +-{ +- +- /* Pop the buffer stack, destroying each element. */ +- while(YY_CURRENT_BUFFER){ +- yy_delete_buffer(YY_CURRENT_BUFFER ); +- YY_CURRENT_BUFFER_LVALUE = NULL; +- yypop_buffer_state(); +- } +- +- /* Destroy the stack itself. */ +- yyfree((yy_buffer_stack) ); +- (yy_buffer_stack) = NULL; +- +- /* Reset the globals. This is important in a non-reentrant scanner so the next time +- * yylex() is called, initialization will occur. */ +- yy_init_globals( ); +- +- return 0; +-} +- +-/* +- * Internal utility routines. +- */ +- +-#ifndef yytext_ptr +-static void yy_flex_strncpy (char* s1, yyconst char * s2, int n ) +-{ +- register int i; +- for ( i = 0; i < n; ++i ) +- s1[i] = s2[i]; +-} +-#endif +- +-#ifdef YY_NEED_STRLEN +-static int yy_flex_strlen (yyconst char * s ) +-{ +- register int n; +- for ( n = 0; s[n]; ++n ) +- ; +- +- return n; +-} +-#endif +- +-void *yyalloc (yy_size_t size ) +-{ +- return (void *) malloc( size ); +-} +- +-void *yyrealloc (void * ptr, yy_size_t size ) +-{ +- /* The cast to (char *) in the following accommodates both +- * implementations that use char* generic pointers, and those +- * that use void* generic pointers. It works with the latter +- * because both ANSI C and C++ allow castless assignment from +- * any pointer type to void*, and deal with argument conversions +- * as though doing an assignment. +- */ +- return (void *) realloc( (char *) ptr, size ); +-} +- +-void yyfree (void * ptr ) +-{ +- free( (char *) ptr ); /* see yyrealloc() for (char *) cast */ +-} +- +-#define YYTABLES_NAME "yytables" +- +-#line 194 "/abuild/rguenther/tmp/gcc-4.3.3/gcc-4.3.3/gcc/gengtype-lex.l" +- +- +- +-void +-yybegin (const char *fname) +-{ +- yyin = fopen (fname, "r"); +- if (yyin == NULL) +- { +- perror (fname); +- exit (1); +- } +- lexer_line.file = fname; +- lexer_line.line = 1; +-} +- +-void +-yyend (void) +-{ +- fclose (yyin); +-} +- +--- a/gcc/genmultilib ++++ b/gcc/genmultilib +@@ -73,6 +73,20 @@ + # the os directory names are used exclusively. Use the mapping when + # there is no one-to-one equivalence between GCC levels and the OS. + ++# The optional eighth option is a list of multilib aliases. This takes the ++# same form as the third argument. It specifies that the second multilib is ++# a synonym for the first. This allows a suitable multilib to be selected ++# for all option combinations while only building a subset of all possible ++# multilibs. ++# For example: ++# genmultilib "mbig-endian mthumb" "eb thumb" "" "" "" "" "" \ ++# "mbig-endian=mbig-endian/mthumb" yes ++# This produces: ++# ". !mbig-endian !mthumb;", ++# "be mbig-endian !mthumb;", ++# "be mbig-endian mthumb;", ++# "thumb !mbig-endian mthumb;", ++ + # The last option should be "yes" if multilibs are enabled. If it is not + # "yes", all GCC multilib dir names will be ".". + +@@ -121,7 +135,8 @@ exceptions=$4 + extra=$5 + exclusions=$6 + osdirnames=$7 +-enable_multilib=$8 ++aliases=$8 ++enable_multilib=$9 + + echo "static const char *const multilib_raw[] = {" + +@@ -129,6 +144,23 @@ mkdir tmpmultilib.$$ || exit 1 + # Use cd ./foo to avoid CDPATH output. + cd ./tmpmultilib.$$ || exit 1 + ++# Handle aliases ++cat >tmpmultilib3 <<\EOF ++#!/bin/sh ++# Output a list of aliases (including the original name) for a multilib. ++ ++echo $1 ++EOF ++for a in ${aliases}; do ++ l=`echo $a | sed -e 's/=.*$//' -e 's/?/=/g'` ++ r=`echo $a | sed -e 's/^.*=//' -e 's/?/=/g'` ++ echo "[ \$1 == /$l/ ] && echo /$r/" >>tmpmultilib3 ++ ++ # Also add the alias to the exclusion list ++ exceptions="${exceptions} $r" ++done ++chmod +x tmpmultilib3 ++ + # What we want to do is select all combinations of the sets in + # options. Each combination which includes a set of mutually + # exclusive options must then be output multiple times, once for each +@@ -195,6 +227,21 @@ EOF + combinations=`./tmpmultilib2 ${combinations}` + fi + ++# Check that all the aliases actually exist ++for a in ${aliases}; do ++ l=`echo $a | sed -e 's/=.*$//' -e 's/?/=/g'` ++ for c in ${combinations}; do ++ if [ "/$l/" = "$c" ]; then ++ l="" ++ break; ++ fi ++ done ++ if [ -n "$l" ] ;then ++ echo "Missing multilib $l for alias $a" 1>&2 ++ exit 1 ++ fi ++done ++ + # Construct a sed pattern which will convert option names to directory + # names. + todirnames= +@@ -343,23 +390,25 @@ for combo in ${combinations}; do + fi + fi + +- # Look through the options. We must output each option that is +- # present, and negate each option that is not present. +- optout= +- for set in ${options}; do +- setopts=`echo ${set} | sed -e 's_[/|]_ _g'` +- for opt in ${setopts}; do +- if expr "${combo} " : ".*/${opt}/.*" > /dev/null; then +- optout="${optout} ${opt}" +- else +- optout="${optout} !${opt}" +- fi ++ for optcombo in `./tmpmultilib3 ${combo}`; do ++ # Look through the options. We must output each option that is ++ # present, and negate each option that is not present. ++ optout= ++ for set in ${options}; do ++ setopts=`echo ${set} | sed -e 's_[/|]_ _g'` ++ for opt in ${setopts}; do ++ if expr "${optcombo} " : ".*/${opt}/.*" > /dev/null; then ++ optout="${optout} ${opt}" ++ else ++ optout="${optout} !${opt}" ++ fi ++ done + done +- done +- optout=`echo ${optout} | sed -e 's/^ //'` ++ optout=`echo ${optout} | sed -e 's/^ //'` + +- # Output the line with all appropriate matches. +- dirout="${dirout}" optout="${optout}" ./tmpmultilib2 ++ # Output the line with all appropriate matches. ++ dirout="${dirout}" optout="${optout}" ./tmpmultilib2 ++ done + done + + # Terminate the list of string. +--- a/gcc/haifa-sched.c ++++ b/gcc/haifa-sched.c +@@ -1846,6 +1846,23 @@ move_insn (rtx insn) + SCHED_GROUP_P (insn) = 0; + } + ++/* Return true if scheduling INSN will finish current clock cycle. */ ++static bool ++insn_finishes_cycle_p (rtx insn) ++{ ++ if (SCHED_GROUP_P (insn)) ++ /* After issuing INSN, rest of the sched_group will be forced to issue ++ in order. Don't make any plans for the rest of cycle. */ ++ return true; ++ ++ /* Finishing the block will, apparently, finish the cycle. */ ++ if (current_sched_info->insn_finishes_block_p ++ && current_sched_info->insn_finishes_block_p (insn)) ++ return true; ++ ++ return false; ++} ++ + /* The following structure describe an entry of the stack of choices. */ + struct choice_entry + { +@@ -1902,13 +1919,15 @@ static int + max_issue (struct ready_list *ready, int *index, int max_points) + { + int n, i, all, n_ready, best, delay, tries_num, points = -1; ++ int rest; + struct choice_entry *top; + rtx insn; + + best = 0; + memcpy (choice_stack->state, curr_state, dfa_state_size); + top = choice_stack; +- top->rest = cached_first_cycle_multipass_dfa_lookahead; ++ /* Add +1 to account the empty initial state. */ ++ top->rest = cached_first_cycle_multipass_dfa_lookahead + 1; + top->n = 0; + n_ready = ready->n_ready; + for (all = i = 0; i < n_ready; i++) +@@ -1918,7 +1937,10 @@ max_issue (struct ready_list *ready, int + tries_num = 0; + for (;;) + { +- if (top->rest == 0 || i >= n_ready) ++ if (/* Enough instructions are issued (or we won't issue more). */ ++ top->rest == 0 ++ /* Or there's nothing left to try. */ ++ || i >= n_ready) + { + if (top == choice_stack) + break; +@@ -1942,17 +1964,27 @@ max_issue (struct ready_list *ready, int + break; + insn = ready_element (ready, i); + delay = state_transition (curr_state, insn); ++ + if (delay < 0) + { +- if (state_dead_lock_p (curr_state)) +- top->rest = 0; ++ rest = top->rest; ++ if (state_dead_lock_p (curr_state) ++ || insn_finishes_cycle_p (insn)) ++ /* We won't issue any more instructions in the next ++ choice_state. */ ++ rest = 0; + else +- top->rest--; ++ rest--; ++ + n = top->n; + if (memcmp (top->state, curr_state, dfa_state_size) != 0) + n += ISSUE_POINTS (insn); ++ ++ /* Go to next choice_state. */ + top++; +- top->rest = cached_first_cycle_multipass_dfa_lookahead; ++ ++ /* Initialize it. */ ++ top->rest = rest; + top->index = i; + top->n = n; + memcpy (top->state, curr_state, dfa_state_size); +--- a/gcc/hooks.c ++++ b/gcc/hooks.c +@@ -34,6 +34,12 @@ hook_void_void (void) + { + } + ++/* Generic hook that receives an int * and does nothing. */ ++void ++hook_intp_void (int *p ATTRIBUTE_UNUSED) ++{ ++} ++ + /* Generic hook that takes no arguments and returns false. */ + bool + hook_bool_void_false (void) +@@ -319,3 +325,10 @@ hook_constcharptr_int_const_tree_const_t + { + return NULL; + } ++ ++/* Generic hook that takes a const_tree and returns NULL_TREE. */ ++tree ++hook_tree_const_tree_null (const_tree t ATTRIBUTE_UNUSED) ++{ ++ return NULL; ++} +--- a/gcc/hooks.h ++++ b/gcc/hooks.h +@@ -51,6 +51,7 @@ extern bool hook_bool_tree_tree_false (t + extern bool hook_bool_tree_bool_false (tree, bool); + + extern void hook_void_void (void); ++extern void hook_intp_void (int *); + extern void hook_void_constcharptr (const char *); + extern void hook_void_FILEptr_constcharptr (FILE *, const char *); + extern void hook_void_tree (tree); +@@ -62,6 +63,8 @@ extern int hook_int_rtx_0 (rtx); + extern int hook_int_size_t_constcharptr_int_0 (size_t, const char *, int); + extern int hook_int_void_no_regs (void); + ++extern tree hook_tree_const_tree_null (const_tree); ++ + extern tree hook_tree_tree_tree_null (tree, tree); + extern tree hook_tree_tree_tree_tree_3rd_identity (tree, tree, tree); + extern tree hook_tree_tree_tree_bool_null (tree, tree, bool); +--- a/gcc/integrate.c ++++ b/gcc/integrate.c +@@ -81,8 +81,9 @@ function_attribute_inlinable_p (const_tr + int i; + + for (i = 0; targetm.attribute_table[i].name != NULL; i++) +- if (is_attribute_p (targetm.attribute_table[i].name, name)) +- return targetm.function_attribute_inlinable_p (fndecl); ++ if (is_attribute_p (targetm.attribute_table[i].name, name) ++ && !targetm.function_attribute_inlinable_p (fndecl)) ++ return false; + } + } + +--- a/gcc/java/Make-lang.in ++++ b/gcc/java/Make-lang.in +@@ -303,11 +303,13 @@ java/jcf-io.o: java/jcf-io.c $(CONFIG_H) + $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(ZLIBINC) \ + $(srcdir)/java/jcf-io.c $(OUTPUT_OPTION) + ++# This must match the setting in libjava/Makefile.am. ++jardir = $(prefix)/$(target_noncanonical)/share/java + # jcf-path.o needs a -D. + java/jcf-path.o: java/jcf-path.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ + java/jcf.h + $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ +- -DLIBGCJ_ZIP_FILE='"$(datadir)/java/libgcj-$(version).jar"' \ ++ -DLIBGCJ_ZIP_FILE='"$(jardir)/libgcj-$(version).jar"' \ + -DDEFAULT_TARGET_VERSION=\"$(version)\" \ + $(srcdir)/java/jcf-path.c $(OUTPUT_OPTION) + +--- a/gcc/jump.c ++++ b/gcc/jump.c +@@ -1551,12 +1551,22 @@ rtx_renumbered_equal_p (const_rtx x, con + + if (reg_renumber[reg_x] >= 0) + { ++ if (!subreg_offset_representable_p (reg_renumber[reg_x], ++ GET_MODE (SUBREG_REG (x)), ++ byte_x, ++ GET_MODE (x))) ++ return 0; + reg_x = subreg_regno_offset (reg_renumber[reg_x], + GET_MODE (SUBREG_REG (x)), + byte_x, + GET_MODE (x)); + byte_x = 0; + } ++ else if (!subreg_offset_representable_p (reg_x, ++ GET_MODE (SUBREG_REG (x)), ++ byte_x, ++ GET_MODE (x))) ++ return 0; + } + else + { +@@ -1572,12 +1582,22 @@ rtx_renumbered_equal_p (const_rtx x, con + + if (reg_renumber[reg_y] >= 0) + { ++ if (!subreg_offset_representable_p (reg_renumber[reg_y], ++ GET_MODE (SUBREG_REG (y)), ++ byte_y, ++ GET_MODE (y))) ++ return 0; + reg_y = subreg_regno_offset (reg_renumber[reg_y], + GET_MODE (SUBREG_REG (y)), + byte_y, + GET_MODE (y)); + byte_y = 0; + } ++ else if (!subreg_offset_representable_p (reg_y, ++ GET_MODE (SUBREG_REG (y)), ++ byte_y, ++ GET_MODE (y))) ++ return 0; + } + else + { +--- a/gcc/libgcc2.c ++++ b/gcc/libgcc2.c +@@ -1830,6 +1830,7 @@ CTYPE + CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d) + { + MTYPE ac, bd, ad, bc, x, y; ++ CTYPE res; + + ac = a * c; + bd = b * d; +@@ -1886,7 +1887,9 @@ CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, + } + } + +- return x + I * y; ++ __real__ res = x; ++ __imag__ res = y; ++ return res; + } + #endif /* complex multiply */ + +@@ -1897,6 +1900,7 @@ CTYPE + CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d) + { + MTYPE denom, ratio, x, y; ++ CTYPE res; + + /* ??? We can get better behavior from logarithmic scaling instead of + the division. But that would mean starting to link libgcc against +@@ -1942,7 +1946,9 @@ CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, + } + } + +- return x + I * y; ++ __real__ res = x; ++ __imag__ res = y; ++ return res; + } + #endif /* complex divide */ + +@@ -2137,7 +2143,8 @@ __do_global_dtors (void) + (*(p-1)) (); + } + #endif +-#if defined (EH_FRAME_SECTION_NAME) && !defined (HAS_INIT_SECTION) ++#if defined (EH_FRAME_SECTION_NAME) && !defined (HAS_INIT_SECTION) \ ++ && !defined (__MINGW32__) + { + static int completed = 0; + if (! completed) +@@ -2156,14 +2163,14 @@ __do_global_dtors (void) + void + __do_global_ctors (void) + { +-#ifdef EH_FRAME_SECTION_NAME ++ atexit (__do_global_dtors); ++#if defined (EH_FRAME_SECTION_NAME) && !defined (__MINGW32__) + { + static struct object object; + __register_frame_info (__EH_FRAME_BEGIN__, &object); + } + #endif + DO_GLOBAL_CTORS_BODY; +- atexit (__do_global_dtors); + } + #endif /* no HAS_INIT_SECTION */ + +--- a/gcc/modulo-sched.c ++++ b/gcc/modulo-sched.c +@@ -268,6 +268,7 @@ static struct sched_info sms_sched_info + sms_print_insn, + NULL, + compute_jump_reg_dependencies, ++ NULL, /* insn_finishes_block_p */ + NULL, NULL, + NULL, NULL, + 0, 0, 0, +--- a/gcc/optabs.c ++++ b/gcc/optabs.c +@@ -2140,6 +2140,10 @@ expand_binop (enum machine_mode mode, op + && GET_MODE (op0) != mode) + op0 = convert_to_mode (mode, op0, unsignedp); + ++ /* Force things into registers so subreg handling comes out right. */ ++ op0 = force_reg (mode, op0); ++ op1x = force_reg (op1_mode, op1x); ++ + /* Pass 1 for NO_QUEUE so we don't lose any increments + if the libcall is cse'd or moved. */ + value = emit_library_call_value (libfunc, +@@ -3281,7 +3285,8 @@ expand_unop (enum machine_mode mode, opt + if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab + || unoptab == popcount_optab || unoptab == parity_optab) + outmode +- = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node))); ++ = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node), ++ optab_libfunc (unoptab, mode))); + + start_sequence (); + +@@ -4508,10 +4513,12 @@ prepare_float_lib_cmp (rtx *px, rtx *py, + mode != VOIDmode; + mode = GET_MODE_WIDER_MODE (mode)) + { +- if ((libfunc = optab_libfunc (code_to_optab[comparison], mode))) ++ if (code_to_optab[comparison] ++ && (libfunc = optab_libfunc (code_to_optab[comparison], mode))) + break; + +- if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode))) ++ if (code_to_optab[swapped] ++ && (libfunc = optab_libfunc (code_to_optab[swapped], mode))) + { + rtx tmp; + tmp = x; x = y; y = tmp; +@@ -4519,7 +4526,8 @@ prepare_float_lib_cmp (rtx *px, rtx *py, + break; + } + +- if ((libfunc = optab_libfunc (code_to_optab[reversed], mode)) ++ if (code_to_optab[reversed] ++ && (libfunc = optab_libfunc (code_to_optab[reversed], mode)) + && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed)) + { + comparison = reversed; +--- a/gcc/opts.c ++++ b/gcc/opts.c +@@ -1787,6 +1787,10 @@ common_handle_option (size_t scode, cons + /* These are no-ops, preserved for backward compatibility. */ + break; + ++ case OPT_feglibc_: ++ /* This is a no-op at the moment. */ ++ break; ++ + default: + /* If the flag was handled in a standard way, assume the lack of + processing here is intentional. */ +--- a/gcc/passes.c ++++ b/gcc/passes.c +@@ -522,6 +522,7 @@ init_optimization_passes (void) + NEXT_PASS (pass_cleanup_cfg); + NEXT_PASS (pass_rename_ssa_copies); + NEXT_PASS (pass_ccp); ++ NEXT_PASS (pass_promote_short_indices); + NEXT_PASS (pass_forwprop); + NEXT_PASS (pass_update_address_taken); + NEXT_PASS (pass_simple_dse); +@@ -561,6 +562,7 @@ init_optimization_passes (void) + execute TODO_rebuild_alias at this point even if + pass_create_structure_vars was disabled. */ + NEXT_PASS (pass_build_alias); ++ NEXT_PASS (pass_remove_local_statics); + NEXT_PASS (pass_return_slot); + NEXT_PASS (pass_rename_ssa_copies); + +--- a/gcc/pointer-set.c ++++ b/gcc/pointer-set.c +@@ -181,6 +181,23 @@ void pointer_set_traverse (const struct + break; + } + ++/* Return the number of elements in PSET. */ ++ ++size_t ++pointer_set_n_elements (struct pointer_set_t *pset) ++{ ++ return pset->n_elements; ++} ++ ++/* Remove all entries from PSET. */ ++ ++void ++pointer_set_clear (struct pointer_set_t *pset) ++{ ++ pset->n_elements = 0; ++ memset (pset->slots, 0, sizeof (pset->slots[0]) * pset->n_slots); ++} ++ + + /* A pointer map is represented the same way as a pointer_set, so + the hash code is based on the address of the key, rather than +@@ -301,3 +318,20 @@ void pointer_map_traverse (const struct + if (pmap->keys[i] && !fn (pmap->keys[i], &pmap->values[i], data)) + break; + } ++ ++/* Return the number of elements in PMAP. */ ++ ++size_t ++pointer_map_n_elements (struct pointer_map_t *pmap) ++{ ++ return pmap->n_elements; ++} ++ ++/* Remove all entries from PMAP. */ ++ ++void pointer_map_clear (struct pointer_map_t *pmap) ++{ ++ pmap->n_elements = 0; ++ memset (pmap->keys, 0, sizeof (pmap->keys[0]) * pmap->n_slots); ++ memset (pmap->values, 0, sizeof (pmap->values[0]) * pmap->n_slots); ++} +--- a/gcc/pointer-set.h ++++ b/gcc/pointer-set.h +@@ -29,6 +29,8 @@ int pointer_set_insert (struct pointer_s + void pointer_set_traverse (const struct pointer_set_t *, + bool (*) (const void *, void *), + void *); ++size_t pointer_set_n_elements (struct pointer_set_t *); ++void pointer_set_clear (struct pointer_set_t *); + + struct pointer_map_t; + struct pointer_map_t *pointer_map_create (void); +@@ -38,5 +40,7 @@ void **pointer_map_contains (const struc + void **pointer_map_insert (struct pointer_map_t *pmap, const void *p); + void pointer_map_traverse (const struct pointer_map_t *, + bool (*) (const void *, void **, void *), void *); ++size_t pointer_map_n_elements (struct pointer_map_t *); ++void pointer_map_clear (struct pointer_map_t *); + + #endif /* POINTER_SET_H */ +--- a/gcc/postreload.c ++++ b/gcc/postreload.c +@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3. + #include "tree.h" + #include "timevar.h" + #include "tree-pass.h" ++#include "addresses.h" + #include "df.h" + #include "dbgcnt.h" + +@@ -705,17 +706,19 @@ reload_combine (void) + int last_label_ruid; + int min_labelno, n_labels; + HARD_REG_SET ever_live_at_start, *label_live; ++ enum reg_class index_regs; + + /* If reg+reg can be used in offsetable memory addresses, the main chunk of + reload has already used it where appropriate, so there is no use in + trying to generate it now. */ +- if (double_reg_address_ok && INDEX_REG_CLASS != NO_REGS) ++ index_regs = index_reg_class (VOIDmode); ++ if (double_reg_address_ok && index_regs != NO_REGS) + return; + + /* To avoid wasting too much time later searching for an index register, + determine the minimum and maximum index register numbers. */ + for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) +- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r)) ++ if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], r)) + { + if (first_index_reg == -1) + first_index_reg = r; +@@ -823,8 +826,8 @@ reload_combine (void) + substitute uses of REG (typically in MEMs) with. + First check REG and BASE for being index registers; + we can use them even if they are not dead. */ +- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno) +- || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], ++ if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], regno) ++ || TEST_HARD_REG_BIT (reg_class_contents[index_regs], + REGNO (base))) + { + const_reg = reg; +@@ -838,8 +841,7 @@ reload_combine (void) + two registers. */ + for (i = first_index_reg; i <= last_index_reg; i++) + { +- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], +- i) ++ if (TEST_HARD_REG_BIT (reg_class_contents[index_regs], i) + && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES + && reg_state[i].store_ruid <= reg_state[regno].use_ruid + && hard_regno_nregs[i][GET_MODE (reg)] == 1) +--- a/gcc/real.c ++++ b/gcc/real.c +@@ -4379,6 +4379,165 @@ const struct real_format decimal_quad_fo + false + }; + ++/* Encode half-precision floats. This routine is used both for the IEEE ++ ARM alternative encodings. */ ++static void ++encode_ieee_half (const struct real_format *fmt, long *buf, ++ const REAL_VALUE_TYPE *r) ++{ ++ unsigned long image, sig, exp; ++ unsigned long sign = r->sign; ++ bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0; ++ ++ image = sign << 15; ++ sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 11)) & 0x3ff; ++ ++ switch (r->cl) ++ { ++ case rvc_zero: ++ break; ++ ++ case rvc_inf: ++ if (fmt->has_inf) ++ image |= 31 << 10; ++ else ++ image |= 0x7fff; ++ break; ++ ++ case rvc_nan: ++ if (fmt->has_nans) ++ { ++ if (r->canonical) ++ sig = (fmt->canonical_nan_lsbs_set ? (1 << 9) - 1 : 0); ++ if (r->signalling == fmt->qnan_msb_set) ++ sig &= ~(1 << 9); ++ else ++ sig |= 1 << 9; ++ if (sig == 0) ++ sig = 1 << 8; ++ ++ image |= 31 << 10; ++ image |= sig; ++ } ++ else ++ image |= 0x3ff; ++ break; ++ ++ case rvc_normal: ++ /* Recall that IEEE numbers are interpreted as 1.F x 2**exp, ++ whereas the intermediate representation is 0.F x 2**exp. ++ Which means we're off by one. */ ++ if (denormal) ++ exp = 0; ++ else ++ exp = REAL_EXP (r) + 15 - 1; ++ image |= exp << 10; ++ image |= sig; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ buf[0] = image; ++} ++ ++/* Decode half-precision floats. This routine is used both for the IEEE ++ ARM alternative encodings. */ ++static void ++decode_ieee_half (const struct real_format *fmt, REAL_VALUE_TYPE *r, ++ const long *buf) ++{ ++ unsigned long image = buf[0] & 0xffff; ++ bool sign = (image >> 15) & 1; ++ int exp = (image >> 10) & 0x1f; ++ ++ memset (r, 0, sizeof (*r)); ++ image <<= HOST_BITS_PER_LONG - 11; ++ image &= ~SIG_MSB; ++ ++ if (exp == 0) ++ { ++ if (image && fmt->has_denorm) ++ { ++ r->cl = rvc_normal; ++ r->sign = sign; ++ SET_REAL_EXP (r, -14); ++ r->sig[SIGSZ-1] = image << 1; ++ normalize (r); ++ } ++ else if (fmt->has_signed_zero) ++ r->sign = sign; ++ } ++ else if (exp == 31 && (fmt->has_nans || fmt->has_inf)) ++ { ++ if (image) ++ { ++ r->cl = rvc_nan; ++ r->sign = sign; ++ r->signalling = (((image >> (HOST_BITS_PER_LONG - 2)) & 1) ++ ^ fmt->qnan_msb_set); ++ r->sig[SIGSZ-1] = image; ++ } ++ else ++ { ++ r->cl = rvc_inf; ++ r->sign = sign; ++ } ++ } ++ else ++ { ++ r->cl = rvc_normal; ++ r->sign = sign; ++ SET_REAL_EXP (r, exp - 15 + 1); ++ r->sig[SIGSZ-1] = image | SIG_MSB; ++ } ++} ++ ++/* Half-precision format, as specified in IEEE 754R. */ ++const struct real_format ieee_half_format = ++ { ++ encode_ieee_half, ++ decode_ieee_half, ++ 2, ++ 11, ++ 11, ++ -13, ++ 16, ++ 15, ++ 15, ++ false, ++ true, ++ true, ++ true, ++ true, ++ true, ++ false ++ }; ++ ++/* ARM's alternative half-precision format, similar to IEEE but with ++ no reserved exponent value for NaNs and infinities; rather, it just ++ extends the range of exponents by one. */ ++const struct real_format arm_half_format = ++ { ++ encode_ieee_half, ++ decode_ieee_half, ++ 2, ++ 11, ++ 11, ++ -13, ++ 17, ++ 15, ++ 15, ++ false, ++ false, ++ false, ++ true, ++ true, ++ false, ++ false ++ }; ++ + /* A synthetic "format" for internal arithmetic. It's the size of the + internal significand minus the two bits needed for proper rounding. + The encode and decode routines exist only to satisfy our paranoia +--- a/gcc/real.h ++++ b/gcc/real.h +@@ -286,6 +286,8 @@ extern const struct real_format real_int + extern const struct real_format decimal_single_format; + extern const struct real_format decimal_double_format; + extern const struct real_format decimal_quad_format; ++extern const struct real_format ieee_half_format; ++extern const struct real_format arm_half_format; + + + /* ====================================================================== */ +--- a/gcc/recog.c ++++ b/gcc/recog.c +@@ -587,6 +587,7 @@ validate_replace_rtx_1 (rtx *loc, rtx fr + simplifications, as it is not our job. */ + + if (SWAPPABLE_OPERANDS_P (x) ++ && !reload_in_progress + && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) + { + validate_unshare_change (object, loc, +--- a/gcc/regclass.c ++++ b/gcc/regclass.c +@@ -468,6 +468,24 @@ init_reg_sets_1 (void) + inv_reg_alloc_order[reg_alloc_order[i]] = i; + #endif + ++#ifdef REG_ALLOC_ORDER ++ /* Allow the target to change the allocation order based on ++ supplied flags. */ ++ targetm.adjust_reg_alloc_order (reg_alloc_order); ++ ++ /* Now the contents of reg_alloc_order are fixed, calculate the ++ inverse map. */ ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ inv_reg_alloc_order[reg_alloc_order[i]] = i; ++#endif ++ ++ restore_register_info (); ++ ++#ifdef REG_ALLOC_ORDER ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ inv_reg_alloc_order[reg_alloc_order[i]] = i; ++#endif ++ + /* This macro allows the fixed or call-used registers + and the register classes to depend on target flags. */ + +@@ -995,10 +1013,10 @@ static void reg_scan_mark_refs (rtx, rtx + /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */ + + static inline bool +-ok_for_index_p_nonstrict (rtx reg) ++ok_for_index_p_nonstrict (rtx reg, enum machine_mode mode) + { + unsigned regno = REGNO (reg); +- return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno); ++ return regno >= FIRST_PSEUDO_REGISTER || ok_for_index_p_1 (regno, mode); + } + + /* A version of regno_ok_for_base_p for use during regclass, when all pseudos +@@ -2073,7 +2091,7 @@ record_address_regs (enum machine_mode m + enum reg_class class; + + if (context == 1) +- class = INDEX_REG_CLASS; ++ class = index_reg_class (mode); + else + class = base_reg_class (mode, outer_code, index_code); + +@@ -2123,7 +2141,8 @@ record_address_regs (enum machine_mode m + as well as in the tests below, that all addresses are in + canonical form. */ + +- else if (INDEX_REG_CLASS == base_reg_class (VOIDmode, PLUS, SCRATCH)) ++ else if (index_reg_class (mode) ++ == base_reg_class (mode, PLUS, SCRATCH)) + { + record_address_regs (mode, arg0, context, PLUS, code1, scale); + if (! CONSTANT_P (arg1)) +@@ -2149,7 +2168,7 @@ record_address_regs (enum machine_mode m + else if (code0 == REG && code1 == REG + && REGNO (arg0) < FIRST_PSEUDO_REGISTER + && (ok_for_base_p_nonstrict (arg0, mode, PLUS, REG) +- || ok_for_index_p_nonstrict (arg0))) ++ || ok_for_index_p_nonstrict (arg0, mode))) + record_address_regs (mode, arg1, + ok_for_base_p_nonstrict (arg0, mode, PLUS, REG) + ? 1 : 0, +@@ -2157,7 +2176,7 @@ record_address_regs (enum machine_mode m + else if (code0 == REG && code1 == REG + && REGNO (arg1) < FIRST_PSEUDO_REGISTER + && (ok_for_base_p_nonstrict (arg1, mode, PLUS, REG) +- || ok_for_index_p_nonstrict (arg1))) ++ || ok_for_index_p_nonstrict (arg1, mode))) + record_address_regs (mode, arg0, + ok_for_base_p_nonstrict (arg1, mode, PLUS, REG) + ? 1 : 0, +--- a/gcc/regrename.c ++++ b/gcc/regrename.c +@@ -566,14 +566,14 @@ scan_rtx_address (rtx insn, rtx *loc, en + int index_op; + unsigned regno0 = REGNO (op0), regno1 = REGNO (op1); + +- if (REGNO_OK_FOR_INDEX_P (regno1) ++ if (regno_ok_for_index_p (regno1, mode) + && regno_ok_for_base_p (regno0, mode, PLUS, REG)) + index_op = 1; +- else if (REGNO_OK_FOR_INDEX_P (regno0) ++ else if (regno_ok_for_index_p (regno0, mode) + && regno_ok_for_base_p (regno1, mode, PLUS, REG)) + index_op = 0; + else if (regno_ok_for_base_p (regno0, mode, PLUS, REG) +- || REGNO_OK_FOR_INDEX_P (regno1)) ++ || regno_ok_for_index_p (regno1, mode)) + index_op = 1; + else if (regno_ok_for_base_p (regno1, mode, PLUS, REG)) + index_op = 0; +@@ -598,7 +598,7 @@ scan_rtx_address (rtx insn, rtx *loc, en + } + + if (locI) +- scan_rtx_address (insn, locI, INDEX_REG_CLASS, action, mode); ++ scan_rtx_address (insn, locI, index_reg_class (mode), action, mode); + if (locB) + scan_rtx_address (insn, locB, base_reg_class (mode, PLUS, index_code), + action, mode); +@@ -820,7 +820,7 @@ build_def_use (basic_block bb) + OP_IN, 0); + + for (i = 0; i < recog_data.n_dups; i++) +- *recog_data.dup_loc[i] = copy_rtx (old_dups[i]); ++ *recog_data.dup_loc[i] = old_dups[i]; + for (i = 0; i < n_ops; i++) + *recog_data.operand_loc[i] = old_operands[i]; + if (recog_data.n_dups) +@@ -1486,14 +1486,14 @@ replace_oldest_value_addr (rtx *loc, enu + int index_op; + unsigned regno0 = REGNO (op0), regno1 = REGNO (op1); + +- if (REGNO_OK_FOR_INDEX_P (regno1) ++ if (regno_ok_for_index_p (regno1, mode) + && regno_ok_for_base_p (regno0, mode, PLUS, REG)) + index_op = 1; +- else if (REGNO_OK_FOR_INDEX_P (regno0) ++ else if (regno_ok_for_index_p (regno0, mode) + && regno_ok_for_base_p (regno1, mode, PLUS, REG)) + index_op = 0; + else if (regno_ok_for_base_p (regno0, mode, PLUS, REG) +- || REGNO_OK_FOR_INDEX_P (regno1)) ++ || regno_ok_for_index_p (regno1, mode)) + index_op = 1; + else if (regno_ok_for_base_p (regno1, mode, PLUS, REG)) + index_op = 0; +@@ -1518,8 +1518,8 @@ replace_oldest_value_addr (rtx *loc, enu + } + + if (locI) +- changed |= replace_oldest_value_addr (locI, INDEX_REG_CLASS, mode, +- insn, vd); ++ changed |= replace_oldest_value_addr (locI, index_reg_class (mode), ++ mode, insn, vd); + if (locB) + changed |= replace_oldest_value_addr (locB, + base_reg_class (mode, PLUS, +--- a/gcc/reload.c ++++ b/gcc/reload.c +@@ -5034,7 +5034,7 @@ find_reloads_address (enum machine_mode + loc = &XEXP (*loc, 0); + } + +- if (double_reg_address_ok) ++ if (double_reg_address_ok && index_reg_class (mode) != NO_REGS) + { + /* Unshare the sum as well. */ + *loc = ad = copy_rtx (ad); +@@ -5042,8 +5042,8 @@ find_reloads_address (enum machine_mode + /* Reload the displacement into an index reg. + We assume the frame pointer or arg pointer is a base reg. */ + find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1), +- INDEX_REG_CLASS, GET_MODE (ad), opnum, +- type, ind_levels); ++ index_reg_class (mode), GET_MODE (ad), ++ opnum, type, ind_levels); + return 0; + } + else +@@ -5436,13 +5436,13 @@ find_reloads_address_1 (enum machine_mod + #define REG_OK_FOR_CONTEXT(CONTEXT, REGNO, MODE, OUTER, INDEX) \ + ((CONTEXT) == 0 \ + ? regno_ok_for_base_p (REGNO, MODE, OUTER, INDEX) \ +- : REGNO_OK_FOR_INDEX_P (REGNO)) ++ : regno_ok_for_index_p (REGNO, MODE)) + + enum reg_class context_reg_class; + RTX_CODE code = GET_CODE (x); + + if (context == 1) +- context_reg_class = INDEX_REG_CLASS; ++ context_reg_class = index_reg_class (mode); + else + context_reg_class = base_reg_class (mode, outer_code, index_code); + +@@ -5534,17 +5534,17 @@ find_reloads_address_1 (enum machine_mod + + else if (code0 == REG && code1 == REG) + { +- if (REGNO_OK_FOR_INDEX_P (REGNO (op1)) ++ if (regno_ok_for_index_p (REGNO (op1), mode) + && regno_ok_for_base_p (REGNO (op0), mode, PLUS, REG)) + return 0; +- else if (REGNO_OK_FOR_INDEX_P (REGNO (op0)) ++ else if (regno_ok_for_index_p (REGNO (op0), mode) + && regno_ok_for_base_p (REGNO (op1), mode, PLUS, REG)) + return 0; + else if (regno_ok_for_base_p (REGNO (op0), mode, PLUS, REG)) + find_reloads_address_1 (mode, orig_op1, 1, PLUS, SCRATCH, + &XEXP (x, 1), opnum, type, ind_levels, + insn); +- else if (REGNO_OK_FOR_INDEX_P (REGNO (op1))) ++ else if (regno_ok_for_index_p (REGNO (op1), mode)) + find_reloads_address_1 (mode, orig_op0, 0, PLUS, REG, + &XEXP (x, 0), opnum, type, ind_levels, + insn); +@@ -5552,7 +5552,7 @@ find_reloads_address_1 (enum machine_mod + find_reloads_address_1 (mode, orig_op0, 1, PLUS, SCRATCH, + &XEXP (x, 0), opnum, type, ind_levels, + insn); +- else if (REGNO_OK_FOR_INDEX_P (REGNO (op0))) ++ else if (regno_ok_for_index_p (REGNO (op0), mode)) + find_reloads_address_1 (mode, orig_op1, 0, PLUS, REG, + &XEXP (x, 1), opnum, type, ind_levels, + insn); +@@ -5622,7 +5622,7 @@ find_reloads_address_1 (enum machine_mod + need to live longer than a TYPE reload normally would, so be + conservative and class it as RELOAD_OTHER. */ + if ((REG_P (XEXP (op1, 1)) +- && !REGNO_OK_FOR_INDEX_P (REGNO (XEXP (op1, 1)))) ++ && !regno_ok_for_index_p (REGNO (XEXP (op1, 1)), mode)) + || GET_CODE (XEXP (op1, 1)) == PLUS) + find_reloads_address_1 (mode, XEXP (op1, 1), 1, code, SCRATCH, + &XEXP (op1, 1), opnum, RELOAD_OTHER, +--- a/gcc/reload1.c ++++ b/gcc/reload1.c +@@ -7677,6 +7677,9 @@ emit_reload_insns (struct insn_chain *ch + } + } + ++ if (i < 0 && rld[r].in != NULL_RTX && rld[r].reg_rtx != NULL_RTX) ++ forget_old_reloads_1 (rld[r].reg_rtx, NULL_RTX, NULL); ++ + /* The following if-statement was #if 0'd in 1.34 (or before...). + It's reenabled in 1.35 because supposedly nothing else + deals with this problem. */ +--- a/gcc/rtl-factoring.c ++++ b/gcc/rtl-factoring.c +@@ -444,15 +444,17 @@ collect_pattern_seqs (void) + htab_iterator hti0, hti1, hti2; + p_hash_bucket hash_bucket; + p_hash_elem e0, e1; +-#ifdef STACK_REGS ++#if defined STACK_REGS || defined HAVE_cc0 + basic_block bb; +- bitmap_head stack_reg_live; ++ bitmap_head dont_collect; + + /* Extra initialization step to ensure that no stack registers (if present) +- are live across abnormal edges. Set a flag in STACK_REG_LIVE for an insn +- if a stack register is live after the insn. */ +- bitmap_initialize (&stack_reg_live, NULL); ++ or cc0 code (if present) are live across abnormal edges. ++ Set a flag in DONT_COLLECT for an insn if a stack register is live ++ after the insn or the insn is cc0 setter or user. */ ++ bitmap_initialize (&dont_collect, NULL); + ++#ifdef STACK_REGS + FOR_EACH_BB (bb) + { + regset_head live; +@@ -476,7 +478,7 @@ collect_pattern_seqs (void) + { + if (REGNO_REG_SET_P (&live, reg)) + { +- bitmap_set_bit (&stack_reg_live, INSN_UID (insn)); ++ bitmap_set_bit (&dont_collect, INSN_UID (insn)); + break; + } + } +@@ -493,6 +495,28 @@ collect_pattern_seqs (void) + } + #endif + ++#ifdef HAVE_cc0 ++ /* Mark CC0 setters and users as ineligible for collection into sequences. ++ This is an over-conservative fix, since it is OK to include ++ a cc0_setter, but only if we also include the corresponding cc0_user, ++ and vice versa. */ ++ FOR_EACH_BB (bb) ++ { ++ rtx insn; ++ rtx next_tail; ++ ++ next_tail = NEXT_INSN (BB_END (bb)); ++ ++ for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn)) ++ { ++ if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn))) ++ bitmap_set_bit (&dont_collect, INSN_UID (insn)); ++ } ++ } ++#endif ++ ++#endif /* defined STACK_REGS || defined HAVE_cc0 */ ++ + /* Initialize PATTERN_SEQS to empty. */ + pattern_seqs = 0; + +@@ -505,15 +529,15 @@ collect_pattern_seqs (void) + FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem, + hti2) + if (e0 != e1 +-#ifdef STACK_REGS +- && !bitmap_bit_p (&stack_reg_live, INSN_UID (e0->insn)) +- && !bitmap_bit_p (&stack_reg_live, INSN_UID (e1->insn)) ++#if defined STACK_REGS || defined HAVE_cc0 ++ && !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn)) ++ && !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn)) + #endif + ) + match_seqs (e0, e1); +-#ifdef STACK_REGS ++#if defined STACK_REGS || defined HAVE_cc0 + /* Free unused data. */ +- bitmap_clear (&stack_reg_live); ++ bitmap_clear (&dont_collect); + #endif + } + +--- a/gcc/sched-ebb.c ++++ b/gcc/sched-ebb.c +@@ -271,6 +271,7 @@ static struct sched_info ebb_sched_info + ebb_print_insn, + contributes_to_priority, + compute_jump_reg_dependencies, ++ NULL, /* insn_finishes_block_p */ + + NULL, NULL, + NULL, NULL, +--- a/gcc/sched-int.h ++++ b/gcc/sched-int.h +@@ -376,6 +376,10 @@ struct sched_info + the jump in the regset. */ + void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset); + ++ /* Return true if scheduling insn (passed as the parameter) will trigger ++ finish of scheduling current block. */ ++ bool (*insn_finishes_block_p) (rtx); ++ + /* The boundaries of the set of insns to be scheduled. */ + rtx prev_head, next_tail; + +--- a/gcc/sched-rgn.c ++++ b/gcc/sched-rgn.c +@@ -2210,6 +2210,19 @@ compute_jump_reg_dependencies (rtx insn + add_branch_dependences. */ + } + ++/* Return true if scheduling INSN will trigger finish of scheduling ++ current block. */ ++static bool ++rgn_insn_finishes_block_p (rtx insn) ++{ ++ if (INSN_BB (insn) == target_bb ++ && sched_target_n_insns + 1 == target_n_insns) ++ /* INSN is the last not-scheduled instruction in the current block. */ ++ return true; ++ ++ return false; ++} ++ + /* Used in schedule_insns to initialize current_sched_info for scheduling + regions (or single basic blocks). */ + +@@ -2223,6 +2236,7 @@ static struct sched_info region_sched_in + rgn_print_insn, + contributes_to_priority, + compute_jump_reg_dependencies, ++ rgn_insn_finishes_block_p, + + NULL, NULL, + NULL, NULL, +--- a/gcc/sdbout.c ++++ b/gcc/sdbout.c +@@ -336,6 +336,7 @@ const struct gcc_debug_hooks sdb_debug_h + debug_nothing_int, /* handle_pch */ + debug_nothing_rtx, /* var_location */ + debug_nothing_void, /* switch_text_section */ ++ debug_nothing_tree_tree, /* set_name */ + 0 /* start_end_main_source_file */ + }; + +--- a/gcc/target-def.h ++++ b/gcc/target-def.h +@@ -461,6 +461,7 @@ + #define TARGET_CANNOT_MODIFY_JUMPS_P hook_bool_void_false + #define TARGET_BRANCH_TARGET_REGISTER_CLASS hook_int_void_no_regs + #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED hook_bool_bool_false ++#define TARGET_ADJUST_REG_ALLOC_ORDER hook_intp_void + #define TARGET_CANNOT_FORCE_CONST_MEM hook_bool_rtx_false + #define TARGET_CANNOT_COPY_INSN_P NULL + #define TARGET_COMMUTATIVE_P hook_bool_const_rtx_commutative_p +@@ -512,6 +513,10 @@ + #define TARGET_INVALID_CONVERSION hook_constcharptr_const_tree_const_tree_null + #define TARGET_INVALID_UNARY_OP hook_constcharptr_int_const_tree_null + #define TARGET_INVALID_BINARY_OP hook_constcharptr_int_const_tree_const_tree_null ++#define TARGET_INVALID_PARAMETER_TYPE hook_constcharptr_const_tree_null ++#define TARGET_INVALID_RETURN_TYPE hook_constcharptr_const_tree_null ++#define TARGET_PROMOTED_TYPE hook_tree_const_tree_null ++#define TARGET_CONVERT_TO_TYPE hook_tree_tree_tree_null + + #define TARGET_FIXED_CONDITION_CODE_REGS hook_bool_uintp_uintp_false + +@@ -568,7 +573,9 @@ + #define TARGET_ARG_PARTIAL_BYTES hook_int_CUMULATIVE_ARGS_mode_tree_bool_0 + + #define TARGET_FUNCTION_VALUE default_function_value ++#define TARGET_LIBCALL_VALUE default_libcall_value + #define TARGET_INTERNAL_ARG_POINTER default_internal_arg_pointer ++#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS hook_bool_void_true + + #define TARGET_CALLS { \ + TARGET_PROMOTE_FUNCTION_ARGS, \ +@@ -588,7 +595,9 @@ + TARGET_ARG_PARTIAL_BYTES, \ + TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN, \ + TARGET_FUNCTION_VALUE, \ +- TARGET_INTERNAL_ARG_POINTER \ ++ TARGET_LIBCALL_VALUE, \ ++ TARGET_INTERNAL_ARG_POINTER, \ ++ TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS \ + } + + #ifndef TARGET_UNWIND_TABLES_DEFAULT +@@ -731,6 +740,7 @@ + TARGET_CANNOT_MODIFY_JUMPS_P, \ + TARGET_BRANCH_TARGET_REGISTER_CLASS, \ + TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED, \ ++ TARGET_ADJUST_REG_ALLOC_ORDER, \ + TARGET_CANNOT_FORCE_CONST_MEM, \ + TARGET_CANNOT_COPY_INSN_P, \ + TARGET_COMMUTATIVE_P, \ +@@ -782,6 +792,10 @@ + TARGET_INVALID_CONVERSION, \ + TARGET_INVALID_UNARY_OP, \ + TARGET_INVALID_BINARY_OP, \ ++ TARGET_INVALID_PARAMETER_TYPE, \ ++ TARGET_INVALID_RETURN_TYPE, \ ++ TARGET_PROMOTED_TYPE, \ ++ TARGET_CONVERT_TO_TYPE, \ + TARGET_SECONDARY_RELOAD, \ + TARGET_EXPAND_TO_RTL_HOOK, \ + TARGET_INSTANTIATE_DECLS, \ +--- a/gcc/target.h ++++ b/gcc/target.h +@@ -555,6 +555,11 @@ struct gcc_target + already been generated. */ + bool (* branch_target_register_callee_saved) (bool after_pe_gen); + ++ /* Called only if REG_ALLOC_ORDER is defined. Given an array that has ++ been initialized from REG_ALLOC_ORDER, make any target-specific ++ adjustments that cannot be expressed in the definition of that macro. */ ++ void (* adjust_reg_alloc_order) (int *); ++ + /* True if the constant X cannot be placed in the constant pool. */ + bool (* cannot_force_const_mem) (rtx); + +@@ -830,9 +835,18 @@ struct gcc_target + rtx (*function_value) (const_tree ret_type, const_tree fn_decl_or_type, + bool outgoing); + ++ /* Return the rtx for the result of a libcall of mode MODE, ++ calling the function FN_NAME. */ ++ rtx (*libcall_value) (enum machine_mode, rtx); ++ + /* Return an rtx for the argument pointer incoming to the + current function. */ + rtx (*internal_arg_pointer) (void); ++ ++ /* Return true if all function parameters should be spilled to the ++ stack. */ ++ bool (*allocate_stack_slots_for_args) (void); ++ + } calls; + + /* Return the diagnostic message string if conversion from FROMTYPE +@@ -847,6 +861,24 @@ struct gcc_target + is not permitted on TYPE1 and TYPE2, NULL otherwise. */ + const char *(*invalid_binary_op) (int op, const_tree type1, const_tree type2); + ++ /* Return the diagnostic message string if TYPE is not valid as a ++ function parameter type, NULL otherwise. */ ++ const char *(*invalid_parameter_type) (const_tree type); ++ ++ /* Return the diagnostic message string if TYPE is not valid as a ++ function return type, NULL otherwise. */ ++ const char *(*invalid_return_type) (const_tree type); ++ ++ /* If values of TYPE are promoted to some other type when used in ++ expressions (analogous to the integer promotions), return that type, ++ or NULL_TREE otherwise. */ ++ tree (*promoted_type) (const_tree type); ++ ++ /* Convert EXPR to TYPE, if target-specific types with special conversion ++ rules are involved. Return the converted expression, or NULL to apply ++ the standard conversion rules. */ ++ tree (*convert_to_type) (tree type, tree expr); ++ + /* Return the class for a secondary reload, and fill in extra information. */ + enum reg_class (*secondary_reload) (bool, rtx, enum reg_class, + enum machine_mode, +--- a/gcc/targhooks.c ++++ b/gcc/targhooks.c +@@ -565,6 +565,12 @@ default_function_value (const_tree ret_t + } + + rtx ++default_libcall_value (enum machine_mode mode, rtx fun ATTRIBUTE_UNUSED) ++{ ++ return LIBCALL_VALUE (mode); ++} ++ ++rtx + default_internal_arg_pointer (void) + { + /* If the reg that the virtual arg pointer will be translated into is +--- a/gcc/targhooks.h ++++ b/gcc/targhooks.h +@@ -87,6 +87,7 @@ extern const char *hook_invalid_arg_for_ + (const_tree, const_tree, const_tree); + extern bool hook_bool_const_rtx_commutative_p (const_rtx, int); + extern rtx default_function_value (const_tree, const_tree, bool); ++extern rtx default_libcall_value (enum machine_mode, rtx); + extern rtx default_internal_arg_pointer (void); + extern enum reg_class default_secondary_reload (bool, rtx, enum reg_class, + enum machine_mode, +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -129,6 +129,7 @@ DEFTIMEVAR (TV_TREE_LOOP_IVOPTS , " + DEFTIMEVAR (TV_PREDCOM , "predictive commoning") + DEFTIMEVAR (TV_TREE_LOOP_INIT , "tree loop init") + DEFTIMEVAR (TV_TREE_LOOP_FINI , "tree loop fini") ++DEFTIMEVAR (TV_TREE_LOOP_PROMOTE , "tree loop index promotion") + DEFTIMEVAR (TV_TREE_CH , "tree copy headers") + DEFTIMEVAR (TV_TREE_SSA_UNCPROP , "tree SSA uncprop") + DEFTIMEVAR (TV_TREE_SSA_TO_NORMAL , "tree SSA to normal") +@@ -136,6 +137,7 @@ DEFTIMEVAR (TV_TREE_NRV , "tree NR + DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies") + DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier") + DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier") ++DEFTIMEVAR (TV_TREE_RLS , "tree local static removal") + DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier") + DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers") + DEFTIMEVAR (TV_DOMINANCE , "dominance computation") +--- a/gcc/toplev.h ++++ b/gcc/toplev.h +@@ -131,6 +131,7 @@ extern int flag_unroll_loops; + extern int flag_unroll_all_loops; + extern int flag_unswitch_loops; + extern int flag_cprop_registers; ++extern int flag_remove_local_statics; + extern int time_report; + + /* Things to do with target switches. */ +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -264,6 +264,7 @@ extern struct tree_opt_pass pass_iv_cano + extern struct tree_opt_pass pass_scev_cprop; + extern struct tree_opt_pass pass_empty_loop; + extern struct tree_opt_pass pass_record_bounds; ++extern struct tree_opt_pass pass_promote_short_indices; + extern struct tree_opt_pass pass_if_conversion; + extern struct tree_opt_pass pass_vectorize; + extern struct tree_opt_pass pass_complete_unroll; +@@ -328,6 +329,7 @@ extern struct tree_opt_pass pass_reassoc + extern struct tree_opt_pass pass_rebuild_cgraph_edges; + extern struct tree_opt_pass pass_build_cgraph_edges; + extern struct tree_opt_pass pass_reset_cc_flags; ++extern struct tree_opt_pass pass_remove_local_statics; + + /* IPA Passes */ + extern struct tree_opt_pass pass_ipa_matrix_reorg; +--- a/gcc/tree-predcom.c ++++ b/gcc/tree-predcom.c +@@ -1294,6 +1294,7 @@ ref_at_iteration (struct loop *loop, tre + { + tree idx, *idx_p, type, val, op0 = NULL_TREE, ret; + affine_iv iv; ++ tree fs; + bool ok; + + if (handled_component_p (ref)) +@@ -1341,7 +1342,10 @@ ref_at_iteration (struct loop *loop, tre + else + return NULL_TREE; + +- ok = simple_iv (loop, first_stmt (loop->header), idx, &iv, true); ++ fs = first_stmt (loop->header); ++ if (!fs) ++ return NULL_TREE; ++ ok = simple_iv (loop, fs, idx, &iv, true); + if (!ok) + return NULL_TREE; + iv.base = expand_simple_operations (iv.base); +--- a/gcc/tree-ssa-loop-ivopts.c ++++ b/gcc/tree-ssa-loop-ivopts.c +@@ -1391,10 +1391,75 @@ idx_record_use (tree base, tree *idx, + return true; + } + +-/* Returns true if memory reference REF may be unaligned. */ ++/* If we can prove that TOP = cst * BOT for some constant cst, ++ store cst to MUL and return true. Otherwise return false. ++ The returned value is always sign-extended, regardless of the ++ signedness of TOP and BOT. */ + + static bool +-may_be_unaligned_p (tree ref) ++constant_multiple_of (tree top, tree bot, double_int *mul) ++{ ++ tree mby; ++ enum tree_code code; ++ double_int res, p0, p1; ++ unsigned precision = TYPE_PRECISION (TREE_TYPE (top)); ++ ++ STRIP_NOPS (top); ++ STRIP_NOPS (bot); ++ ++ if (operand_equal_p (top, bot, 0)) ++ { ++ *mul = double_int_one; ++ return true; ++ } ++ ++ code = TREE_CODE (top); ++ switch (code) ++ { ++ case MULT_EXPR: ++ mby = TREE_OPERAND (top, 1); ++ if (TREE_CODE (mby) != INTEGER_CST) ++ return false; ++ ++ if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res)) ++ return false; ++ ++ *mul = double_int_sext (double_int_mul (res, tree_to_double_int (mby)), ++ precision); ++ return true; ++ ++ case PLUS_EXPR: ++ case MINUS_EXPR: ++ if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &p0) ++ || !constant_multiple_of (TREE_OPERAND (top, 1), bot, &p1)) ++ return false; ++ ++ if (code == MINUS_EXPR) ++ p1 = double_int_neg (p1); ++ *mul = double_int_sext (double_int_add (p0, p1), precision); ++ return true; ++ ++ case INTEGER_CST: ++ if (TREE_CODE (bot) != INTEGER_CST) ++ return false; ++ ++ p0 = double_int_sext (tree_to_double_int (top), precision); ++ p1 = double_int_sext (tree_to_double_int (bot), precision); ++ if (double_int_zero_p (p1)) ++ return false; ++ *mul = double_int_sext (double_int_sdivmod (p0, p1, FLOOR_DIV_EXPR, &res), ++ precision); ++ return double_int_zero_p (res); ++ ++ default: ++ return false; ++ } ++} ++ ++/* Returns true if memory reference REF with step STEP may be unaligned. */ ++ ++static bool ++may_be_unaligned_p (tree ref, tree step) + { + tree base; + tree base_type; +@@ -1418,11 +1483,20 @@ may_be_unaligned_p (tree ref) + base_type = TREE_TYPE (base); + base_align = TYPE_ALIGN (base_type); + +- if (mode != BLKmode +- && (base_align < GET_MODE_ALIGNMENT (mode) ++ if (mode != BLKmode) ++ { ++ double_int mul; ++ tree al = build_int_cst (TREE_TYPE (step), ++ GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT); ++ ++ if (base_align < GET_MODE_ALIGNMENT (mode) + || bitpos % GET_MODE_ALIGNMENT (mode) != 0 +- || bitpos % BITS_PER_UNIT != 0)) +- return true; ++ || bitpos % BITS_PER_UNIT != 0) ++ return true; ++ ++ if (! constant_multiple_of (step, al, &mul)) ++ return true; ++ } + + return false; + } +@@ -1549,7 +1623,7 @@ find_interesting_uses_address (struct iv + + /* Moreover, on strict alignment platforms, check that it is + sufficiently aligned. */ +- if (STRICT_ALIGNMENT && may_be_unaligned_p (base)) ++ if (STRICT_ALIGNMENT && may_be_unaligned_p (base, step)) + goto fail; + + base = build_fold_addr_expr (base); +@@ -2585,71 +2659,6 @@ tree_int_cst_sign_bit (const_tree t) + return (w >> bitno) & 1; + } + +-/* If we can prove that TOP = cst * BOT for some constant cst, +- store cst to MUL and return true. Otherwise return false. +- The returned value is always sign-extended, regardless of the +- signedness of TOP and BOT. */ +- +-static bool +-constant_multiple_of (tree top, tree bot, double_int *mul) +-{ +- tree mby; +- enum tree_code code; +- double_int res, p0, p1; +- unsigned precision = TYPE_PRECISION (TREE_TYPE (top)); +- +- STRIP_NOPS (top); +- STRIP_NOPS (bot); +- +- if (operand_equal_p (top, bot, 0)) +- { +- *mul = double_int_one; +- return true; +- } +- +- code = TREE_CODE (top); +- switch (code) +- { +- case MULT_EXPR: +- mby = TREE_OPERAND (top, 1); +- if (TREE_CODE (mby) != INTEGER_CST) +- return false; +- +- if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res)) +- return false; +- +- *mul = double_int_sext (double_int_mul (res, tree_to_double_int (mby)), +- precision); +- return true; +- +- case PLUS_EXPR: +- case MINUS_EXPR: +- if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &p0) +- || !constant_multiple_of (TREE_OPERAND (top, 1), bot, &p1)) +- return false; +- +- if (code == MINUS_EXPR) +- p1 = double_int_neg (p1); +- *mul = double_int_sext (double_int_add (p0, p1), precision); +- return true; +- +- case INTEGER_CST: +- if (TREE_CODE (bot) != INTEGER_CST) +- return false; +- +- p0 = double_int_sext (tree_to_double_int (top), precision); +- p1 = double_int_sext (tree_to_double_int (bot), precision); +- if (double_int_zero_p (p1)) +- return false; +- *mul = double_int_sext (double_int_sdivmod (p0, p1, FLOOR_DIV_EXPR, &res), +- precision); +- return double_int_zero_p (res); +- +- default: +- return false; +- } +-} +- + /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the + same precision that is at least as wide as the precision of TYPE, stores + BA to A and BB to B, and returns the type of BA. Otherwise, returns the +--- /dev/null ++++ b/gcc/tree-ssa-loop-promote.c +@@ -0,0 +1,1555 @@ ++/* Promotion of shorter-than-word-size loop indices. ++ Copyright (C) 2009 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published by the ++Free Software Foundation; either version 3, or (at your option) any ++later version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* This pass finds loop indices that are declared as ++ shorter-than-word-size and replaces them with word-sized loop ++ indices. (It assumes that word-sized quantities are the most ++ efficient type on which to do arithmetic.) The loop optimization ++ machinery has a difficult time seeing through the casts required to ++ promote such indices to word-sized quantities for memory addressing ++ and/or preserving the semantics of the source language (such as C). ++ The transformation also helps eliminate unnecessary ++ {sign,zero}-extensions required for the same. ++ ++ Although this is most naturally expressed as a loop optimization ++ pass, we choose to place this pass some ways before the loop ++ optimization passes proper, so that other scalar optimizations will ++ run on our "cleaned-up" code. This decision has the negative of ++ requiring us to build and destroy all the loop optimization ++ infrastructure. ++ ++ The algorithm is relatively simple. For each single-exit loop, we ++ identify the loop index variable. If the loop index variable is ++ shorter than the word size, then we have a candidate for promotion. ++ We determine whether the scalar evolution of the loop index fits a ++ particular pattern (incremented by 1, compared against a ++ similarly-typed loop bound, and only modified by a single increment ++ within the loop), as well as examining the uses of the loop index to ++ ensure we are able to safely promote those uses (e.g. the loop index ++ must not be stored to memory or passed to function calls). If these ++ conditions are satisfied, we create an appropriate word-sized type ++ and replace all uses and defs of the loop index variable with the new ++ variable. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++ ++#include "toplev.h" ++#include "rtl.h" ++#include "tm_p.h" ++#include "hard-reg-set.h" ++#include "obstack.h" ++#include "basic-block.h" ++#include "pointer-set.h" ++#include "intl.h" ++ ++#include "tree.h" ++#include "tree-gimple.h" ++#include "hashtab.h" ++#include "diagnostic.h" ++#include "tree-flow.h" ++#include "tree-dump.h" ++#include "cfgloop.h" ++#include "flags.h" ++#include "timevar.h" ++#include "tree-pass.h" ++#include "tree-chrec.h" ++#include "tree-scalar-evolution.h" ++#include "tree-inline.h" ++ ++struct promote_info { ++ /* The loop being analyzed. */ ++ struct loop *loop; ++ ++ /* The COND_EXPR controlling exit from the loop. */ ++ tree exit_expr; ++ ++ /* The loop index variable's SSA_NAME that is defined in a phi node in ++ LOOP->HEADER. Note that this SSA_NAME may be different than the ++ one appearing in EXIT_EXPR. */ ++ tree loop_index_name; ++ ++ /* The bound of the loop. */ ++ tree loop_limit; ++ ++ /* Whether we've warned about things with ++ warn_unsafe_loop_optimizations. */ ++ bool warned; ++ ++ /* LOOP_INDEX_NAME's underlying VAR_DECL. */ ++ tree var_decl; ++ ++ /* The types to which defs/uses of LOOP_INDEX_NAME are cast via ++ NOP_EXPRs. */ ++ VEC(tree, heap) *cast_types; ++ ++ /* The number of times we have seen a cast to the corresponding type ++ (as determined by types_compatible_p) in CAST_TYPES. */ ++ VEC(int, heap) *cast_counts; ++ ++ /* Whether LOOP_INDEX_NAME is suitable for promotion. */ ++ bool can_be_promoted_p; ++ ++ /* If CAN_BE_PROMOTED_P, the promoted type. */ ++ tree promoted_type; ++ ++ /* If CAN_BE_PROMOTED_P, the promoted VAR_DECL. */ ++ tree promoted_var; ++}; ++ ++/* A set of `struct promote_info'. */ ++ ++static struct pointer_set_t *promotion_info; ++ ++/* A set of all potentially promotable SSA_NAMEs, used for quick ++decision-making during analysis. */ ++ ++static struct pointer_set_t *promotable_names; ++ ++/* A map from SSA_NAMEs to the VAR_DECL to which they will be ++ promoted. */ ++ ++static struct pointer_map_t *variable_map; ++ ++/* A set of the stmts that we have already rebuilt with promoted variables. */ ++ ++static struct pointer_set_t *promoted_stmts; ++ ++ ++/* Add CASTED to PI->CAST_TYPES if we haven't seen CASTED before. */ ++ ++static void ++add_casted_type (struct promote_info *pi, tree casted) ++{ ++ int i; ++ tree type; ++ ++ /* For this information to be useful later, CASTED must be wider than ++ the type of the variable. */ ++ if (TYPE_PRECISION (casted) <= TYPE_PRECISION (TREE_TYPE (pi->var_decl))) ++ return; ++ ++ for (i = 0; VEC_iterate (tree, pi->cast_types, i, type); i++) ++ if (types_compatible_p (casted, type)) ++ { ++ int c = VEC_index(int, pi->cast_counts, i); ++ VEC_replace(int, pi->cast_counts, i, ++c); ++ return; ++ } ++ ++ /* Haven't see the type before. */ ++ VEC_safe_push (tree, heap, pi->cast_types, casted); ++ VEC_safe_push (int, heap, pi->cast_counts, 1); ++} ++ ++/* Return the most-casted-to type in PI->CAST_TYPES. Return an ++ appropriately signed variant of size_type_node if the variable wasn't ++ cast in some fashion. */ ++ ++static tree ++choose_profitable_promoted_type (struct promote_info *pi) ++{ ++ int i; ++ int count; ++ tree type = NULL_TREE; ++ int maxuse = -1; ++ ++ for (i = 0; VEC_iterate (int, pi->cast_counts, i, count); i++) ++ if (count > maxuse) ++ { ++ maxuse = count; ++ type = VEC_index (tree, pi->cast_types, i); ++ } ++ ++ if (type == NULL_TREE) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Warning, failed to find upcast type for "); ++ print_generic_expr (dump_file, pi->loop_index_name, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return (TYPE_UNSIGNED (TREE_TYPE (pi->var_decl)) ++ ? size_type_node ++ : signed_type_for (size_type_node)); ++ } ++ else ++ return signed_type_for (type); ++} ++ ++/* Intuit the loop index for LOOP from PHI. There must be a path that ++ only goes through NOP_EXPRs or CONVERT_EXPRs from the result of PHI ++ to one of the operands of COND. If such a path cannot be found, ++ return NULL_TREE. If LIMIT is not NULL and a path can be found, ++ store the other operand of COND into LIMIT. */ ++ ++static tree ++find_promotion_candidate_from_phi (struct loop *loop, tree cond, ++ tree phi, tree *limit) ++{ ++ tree op0, op1; ++ tree result, candidate; ++ ++ result = candidate = PHI_RESULT (phi); ++ /* Must be an integer variable. */ ++ if (TREE_CODE (TREE_TYPE (candidate)) != INTEGER_TYPE) ++ return NULL_TREE; ++ ++ op0 = TREE_OPERAND (cond, 0); ++ op1 = TREE_OPERAND (cond, 1); ++ ++ /* See if there's a path from CANDIDATE to an operand of COND. */ ++ while (true) ++ { ++ use_operand_p use; ++ imm_use_iterator iui; ++ tree use_stmt = NULL_TREE; ++ ++ if (candidate == op0) ++ { ++ if (limit) *limit = op1; ++ break; ++ } ++ if (candidate == op1) ++ { ++ if (limit) *limit = op0; ++ break; ++ } ++ ++ /* Find a single use in the loop header. Give up if there's ++ multiple ones. */ ++ FOR_EACH_IMM_USE_FAST (use, iui, candidate) ++ { ++ tree stmt = USE_STMT (use); ++ ++ if (bb_for_stmt (stmt) == loop->header) ++ { ++ if (use_stmt) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Rejecting "); ++ print_generic_expr (dump_file, candidate, 0); ++ fprintf (dump_file, " because it has multiple uses in the loop header (bb #%d).\n", ++ loop->header->index); ++ fprintf (dump_file, "first use: "); ++ print_generic_expr (dump_file, use_stmt, 0); ++ fprintf (dump_file, "\nsecond use: "); ++ print_generic_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "\n(possibly more, but unanalyzed)\n"); ++ } ++ return NULL_TREE; ++ } ++ else ++ use_stmt = stmt; ++ } ++ } ++ ++ /* No uses in the loop header, bail. */ ++ if (use_stmt == NULL_TREE) ++ return NULL_TREE; ++ ++ if (TREE_CODE (use_stmt) != GIMPLE_MODIFY_STMT ++ || TREE_CODE (GIMPLE_STMT_OPERAND (use_stmt, 0)) != SSA_NAME ++ || (TREE_CODE (GIMPLE_STMT_OPERAND (use_stmt, 1)) != NOP_EXPR ++ && TREE_CODE (GIMPLE_STMT_OPERAND (use_stmt, 1)) != CONVERT_EXPR)) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Rejecting "); ++ print_generic_expr (dump_file, candidate, 0); ++ fprintf (dump_file, " because of use in "); ++ print_generic_expr (dump_file, use_stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return NULL_TREE; ++ } ++ ++ candidate = GIMPLE_STMT_OPERAND (use_stmt, 0); ++ } ++ ++ /* CANDIDATE is now what we believe to be the loop index variable. There ++ are two possibilities: ++ ++ - CANDIDATE is not the "true" loop index variable, but rather is a ++ promoted version of RESULT, done for purposes of satisfying a ++ language's semantics; ++ ++ - CANDIDATE is the "true" loop index variable. */ ++ if (!types_compatible_p (TREE_TYPE (result), TREE_TYPE (candidate))) ++ candidate = result; ++ ++ /* The type of candidate must be "short" to consider promoting it. */ ++ if (TREE_CODE (TREE_TYPE (candidate)) != INTEGER_TYPE ++ || TYPE_PRECISION (TREE_TYPE (candidate)) >= TYPE_PRECISION (size_type_node)) ++ return NULL_TREE; ++ ++ return candidate; ++} ++ ++/* Find the loop index variable of LOOP. LOOP's exit is controlled by ++ the COND_EXPR EXPR. IF we can't determine what the loop index ++ variable is, or EXPR does not appear to be analyzable, then return ++ NULL_TREE. */ ++ ++static tree ++find_promotion_candidate (struct loop *loop, tree expr, tree *limit) ++{ ++ tree cond = COND_EXPR_COND (expr); ++ tree phi; ++ tree candidate = NULL_TREE; ++ ++ switch (TREE_CODE (cond)) ++ { ++ case GT_EXPR: ++ case GE_EXPR: ++ case NE_EXPR: ++ case LT_EXPR: ++ case LE_EXPR: ++ break; ++ ++ default: ++ return NULL_TREE; ++ } ++ ++ /* We'd like to examine COND and intuit the loop index variable from ++ there. Instead, we're going to start from the phi nodes in BB and ++ attempt to work our way forwards to one of the operands of COND, ++ since starting from COND might yield an upcast loop index. If we ++ find multiple phi nodes whose results reach COND, then give up. */ ++ for (phi = phi_nodes (loop->header); phi != NULL_TREE; phi = PHI_CHAIN (phi)) ++ { ++ tree t = find_promotion_candidate_from_phi (loop, cond, phi, limit); ++ ++ if (t == NULL_TREE) ++ continue; ++ else if (candidate == NULL_TREE) ++ candidate = t; ++ else ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Can't find a candidate from "); ++ print_generic_expr (dump_file, expr, 0); ++ fprintf (dump_file, "\n because too many phi node results reach the condition.\n"); ++ } ++ return NULL_TREE; ++ } ++ } ++ ++ return candidate; ++} ++ ++/* Return true if X is something that could be promoted. */ ++ ++static bool ++could_be_promoted (tree x) ++{ ++ return (TREE_CODE (x) == INTEGER_CST ++ || (TREE_CODE (x) == SSA_NAME ++ && pointer_set_contains (promotable_names, x))); ++} ++ ++/* Examine EXPR's suitability with respect to being able to promote VAR. ++ ASSIGNED_TO is true if EXPR is being assigned to VAR; otherwise, EXPR ++ contains a use of VAR. */ ++ ++static bool ++check_expr_for_promotability (struct promote_info *pi, tree var, ++ tree expr, bool assigning_to) ++{ ++ tree type = TREE_TYPE (expr); ++ bool ok = true; ++ ++ switch (TREE_CODE (expr)) ++ { ++ case PLUS_EXPR: ++ case MINUS_EXPR: ++ case MULT_EXPR: ++ case EQ_EXPR: ++ case NE_EXPR: ++ case LT_EXPR: ++ case LE_EXPR: ++ case GT_EXPR: ++ case GE_EXPR: ++ { ++ tree op0 = TREE_OPERAND (expr, 0); ++ tree op1 = TREE_OPERAND (expr, 1); ++ ++ ok = ((op0 == var && could_be_promoted (op1)) ++ || (op1 == var && could_be_promoted (op0))); ++ break; ++ } ++ case COND_EXPR: ++ if (TREE_TYPE (expr) == NULL ++ || TREE_TYPE (expr) == void_type_node) ++ ok = true; ++ else ++ /* This is conservative; it's possible that these sorts of nodes ++ could be promoted, but we'd have to be very careful about ++ checking in which parts of the COND_EXPR the promotable ++ variable(s) are. */ ++ ok = false; ++ break; ++ case SSA_NAME: ++ ok = (expr == var || could_be_promoted (expr)); ++ break; ++ case NOP_EXPR: ++ case CONVERT_EXPR: ++ if (!assigning_to) ++ { ++ add_casted_type (pi, type); ++ break; ++ } ++ /* Fallthrough. */ ++ default: ++ ok = false; ++ } ++ ++ return ok; ++} ++ ++/* Analyze the loop index VAR for promotability. The rules for ++ promotability are: ++ ++ For uses: ++ ++ - The underlying variable may be used in NOP_EXPRs. ++ ++ - The underlying variable may be used in simple arithmmetic ++ expressions so long as the other parts are potentially promotable ++ variables or constants (so we don't go willy-nilly on promoting ++ things). ++ ++ - The underlying variable may not be stored to memory. ++ ++ - All uses must occur inside the loop. ++ ++ For defs: ++ ++ - The underlying variable may not be loaded from memory; and ++ ++ - The underlying variable may only be formed from expressions ++ involving potentially promotable varibles or constants. ++ ++ Note that defs may occur outside of the loop; we do this to handle ++ initial conditions before entering the loop. */ ++ ++static void ++analyze_loop_index_uses (tree var, struct promote_info *pi) ++{ ++ imm_use_iterator iui; ++ use_operand_p use; ++ tree rhs; ++ tree bad_stmt = NULL_TREE; ++ const char *reason = NULL; ++ ++ FOR_EACH_IMM_USE_FAST (use, iui, var) ++ { ++ basic_block bb; ++ tree use_stmt = USE_STMT (use); ++ ++ /* Uses must exist only within the loop. */ ++ bb = bb_for_stmt (use_stmt); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Checking "); ++ print_generic_expr (dump_file, use_stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ if (!flow_bb_inside_loop_p (pi->loop, bb)) ++ { ++ bad_stmt = use_stmt; ++ reason = " is involved in stmt outside loop "; ++ break; ++ } ++ ++ /* We cannot store the index to memory. */ ++ if (stmt_references_memory_p (use_stmt)) ++ { ++ bad_stmt = use_stmt; ++ reason = " is stored to memory in "; ++ break; ++ } ++ ++ /* We cannot pass the variable to a function. */ ++ if (get_call_expr_in (use_stmt)) ++ { ++ bad_stmt = use_stmt; ++ reason = " is passed to function in "; ++ break; ++ } ++ ++ if (TREE_CODE (use_stmt) == GIMPLE_MODIFY_STMT) ++ { ++ tree lhs = GIMPLE_STMT_OPERAND (use_stmt, 0); ++ rhs = GIMPLE_STMT_OPERAND (use_stmt, 1); ++ ++ if (!check_expr_for_promotability (pi, var, rhs, ++ /*is_assign=*/false)) ++ { ++ bad_stmt = rhs; ++ reason = " is involved in non-promotable expression "; ++ break; ++ } ++ else if ((TREE_CODE_CLASS (TREE_CODE (rhs)) == tcc_binary ++ || TREE_CODE (rhs) == SSA_NAME) ++ && !check_expr_for_promotability (pi, var, lhs, ++ /*is_assign=*/true)) ++ { ++ bad_stmt = lhs; ++ reason = " is being assigned to non-promotable variable "; ++ break; ++ } ++ } ++ else if (TREE_CODE (use_stmt) != COND_EXPR ++ && TREE_CODE (use_stmt) != PHI_NODE) ++ { ++ /* Use of the variable in some statement we don't know how to ++ analyze. */ ++ bad_stmt = use_stmt; ++ reason = " is used in unanalyzable expression in "; ++ break; ++ } ++ } ++ ++ if (bad_stmt && reason) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Loop index "); ++ print_generic_expr (dump_file, var, 0); ++ fprintf (dump_file, "%s", reason); ++ print_generic_expr (dump_file, bad_stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ pi->can_be_promoted_p = false; ++ } ++} ++ ++/* Check that the uses and def of VAR, defined in STMT, conform to the ++ rules given above. */ ++ ++static bool ++analyze_loop_index (tree var, tree stmt, void *data) ++{ ++ struct promote_info *pi = data; ++ tree t; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Analyzing loop index "); ++ print_generic_expr (dump_file, var, 0); ++ fprintf (dump_file, " defined in "); ++ print_generic_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ /* Check the definition. */ ++ switch (TREE_CODE (stmt)) ++ { ++ case PHI_NODE: ++ /* Phi nodes are OK. */ ++ break; ++ ++ case GIMPLE_MODIFY_STMT: ++ t = GIMPLE_STMT_OPERAND (stmt, 1); ++ if (!check_expr_for_promotability (pi, var, t, ++ /*is_assign=*/true)) ++ break; ++ /* Fallthrough. */ ++ ++ default: ++ /* Something we can't handle or the variable is being loaded from ++ memory. */ ++ pi->can_be_promoted_p = false; ++ goto done; ++ } ++ ++ if (TREE_CODE (stmt) == PHI_NODE) ++ { ++ int i; ++ ++ for (i = 0; i < PHI_NUM_ARGS (stmt); i++) ++ { ++ tree arg = PHI_ARG_DEF (stmt, i); ++ ++ if (TREE_CODE (arg) == SSA_NAME) ++ pointer_set_insert (promotable_names, arg); ++ } ++ ++ analyze_loop_index_uses (PHI_RESULT (stmt), pi); ++ } ++ else ++ analyze_loop_index_uses (var, pi); ++ ++ /* Only worth continuing if we think the loop index can be ++ promoted. */ ++ done: ++ if (dump_file) ++ { ++ fprintf (dump_file, "Done analyzing "); ++ print_generic_expr (dump_file, var, 0); ++ fprintf (dump_file, " defined in "); ++ print_generic_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "...%s to analyze\n\n", ++ pi->can_be_promoted_p ? "continuing" : "not continuing"); ++ } ++ return !pi->can_be_promoted_p; ++} ++ ++/* Check for the idiom: ++ ++ short x, y; ++ unsigned short x.2, y.2, tmp; ++ ... ++ x.2 = (unsigned short) x; ++ y.2 = (unsigned short) y; ++ tmp = x.2 + y.2; ++ x = (short) tmp; ++ ++ which is generated by convert for avoiding signed arithmetic ++ overflow. RHS is "(short) tmp" in the above statement. If RHS is ++ defined via such an idiom, store x and y into OP0 and OP1, ++ respectively. We permit y.2 to be a constant if necessary. */ ++ ++static tree ++upcast_operand_p (tree t) ++{ ++ tree def, nop; ++ ++ if (TREE_CODE (t) == INTEGER_CST) ++ return t; ++ ++ if (TREE_CODE (t) != SSA_NAME ++ || !has_single_use (t)) ++ return NULL_TREE; ++ ++ def = SSA_NAME_DEF_STMT (t); ++ if (TREE_CODE (def) != GIMPLE_MODIFY_STMT) ++ return NULL_TREE; ++ ++ nop = GIMPLE_STMT_OPERAND (def, 1); ++ if (TREE_CODE (nop) != CONVERT_EXPR ++ && TREE_CODE (nop) != NOP_EXPR) ++ return NULL_TREE; ++ ++ return TREE_OPERAND (nop, 0); ++} ++ ++static bool ++signed_arithmetic_overflow_idiom_p (tree rhs, tree *op0, tree *op1) ++{ ++ tree tmp = TREE_OPERAND (rhs, 0); ++ tree op_stmt = SSA_NAME_DEF_STMT (tmp); ++ tree expr, x2, y2; ++ bool yes = false; ++ enum tree_code code; ++ ++ if (!has_single_use (tmp) ++ || TREE_CODE (op_stmt) != GIMPLE_MODIFY_STMT) ++ goto done; ++ expr = GIMPLE_STMT_OPERAND (op_stmt, 1); ++ ++ /* This could probably profitably be expanded to consider ++ MINUS_EXPR, MULT_EXPR, etc. */ ++ code = TREE_CODE (expr); ++ if (code != PLUS_EXPR) ++ goto done; ++ x2 = TREE_OPERAND (expr, 0); ++ y2 = TREE_OPERAND (expr, 1); ++ ++ x2 = upcast_operand_p (x2); ++ if (x2 == NULL_TREE) ++ goto done; ++ y2 = upcast_operand_p (y2); ++ if (y2 == NULL_TREE) ++ goto done; ++ ++ *op0 = x2; ++ *op1 = y2; ++ yes = true; ++ ++ done: ++ return yes; ++} ++ ++/* The loop index should have a specific usage pattern: ++ ++ - It should be defined in a phi node with two incoming values: ++ ++ LI_phi = PHI (LI_out, LI_in) ++ ++ - One incoming value, LI_out, should be from outside the loop. ++ ++ - The other incoming value, LI_in, should be defined thusly: ++ ++ LI_in = LI_phi + increment ++ ++ - increment should be 1. We permit other increments with ++ -funsafe-loop-optimizations. ++ ++ - Finally, in the comparison to exit the loop, the loop index must be ++ compared against a variable that has a type at least as precise as ++ the loop index's type. For instance, something like: ++ ++ char limit; ++ short i; ++ ++ for (i = 0; i < limit; i++) ... ++ ++ would not be permitted. */ ++ ++static bool ++stmt_in_loop_p (tree t, struct loop *loop) ++{ ++ basic_block bb; ++ ++ if (t == NULL_TREE) ++ return false; ++ ++ bb = bb_for_stmt (t); ++ if (bb == NULL) ++ return false; ++ ++ return flow_bb_inside_loop_p (loop, bb); ++} ++ ++static bool ++analyze_loop_index_definition_pattern (struct promote_info *pi) ++{ ++ tree phi = SSA_NAME_DEF_STMT (pi->loop_index_name); ++ bool ok = false, warn = false; ++ tree in0, in1; ++ bool inside0, inside1; ++ tree def0, def1, rhs, op0, op1, increment = NULL_TREE; ++ ++ if (TREE_CODE (phi) != PHI_NODE ++ || PHI_NUM_ARGS (phi) != 2) ++ goto done; ++ ++ in0 = PHI_ARG_DEF (phi, 0); ++ in1 = PHI_ARG_DEF (phi, 1); ++ ++ /* Figure out which value comes from outside the loop. */ ++ def0 = SSA_NAME_DEF_STMT (in0); ++ def1 = SSA_NAME_DEF_STMT (in1); ++ ++ inside0 = stmt_in_loop_p (def0, pi->loop); ++ inside1 = stmt_in_loop_p (def1, pi->loop); ++ ++ if (inside0 && inside1) ++ goto done; ++ else if (inside0) ++ { ++ tree t = in0; ++ in0 = in1; ++ in1 = t; ++ t = def0; ++ def0 = def1; ++ def1 = t; ++ } ++ else if (!inside1) ++ goto done; ++ ++ /* IN0 comes from outside the loop, IN1 from inside. Analyze IN1. */ ++ if (TREE_CODE (def1) != GIMPLE_MODIFY_STMT) ++ goto done; ++ ++ rhs = GIMPLE_STMT_OPERAND (def1, 1); ++ ++ switch (TREE_CODE (rhs)) ++ { ++ case CONVERT_EXPR: ++ case NOP_EXPR: ++ if (!signed_arithmetic_overflow_idiom_p (rhs, &op0, &op1)) ++ goto done; ++ goto plus; ++ case PLUS_EXPR: ++ op0 = TREE_OPERAND (rhs, 0); ++ op1 = TREE_OPERAND (rhs, 1); ++ plus: ++ { ++ bool op0_li = op0 == PHI_RESULT (phi); ++ bool op1_li = op1 == PHI_RESULT (phi); ++ if (op0_li && op1_li) ++ /* This is weird, and definitely is not a case we can support ++ for promotion. */ ++ goto done; ++ else if (op0_li) ++ increment = op1; ++ else if (op1_li) ++ increment = op0; ++ else ++ goto done; ++ break; ++ } ++ default: ++ break; ++ } ++ ++ ++ /* Check that the exit condition for the loop is OK. */ ++ { ++ tree cond = COND_EXPR_COND (pi->exit_expr); ++ enum tree_code code = TREE_CODE (cond); ++ ++ op0 = TREE_OPERAND (cond, 0); ++ op1 = TREE_OPERAND (cond, 1); ++ ++ if (op0 == pi->loop_limit) ++ { ++ tree t = op0; ++ op0 = op1; ++ op1 = t; ++ code = swap_tree_comparison (code); ++ } ++ ++ if (code != LT_EXPR && code != LE_EXPR) ++ goto done; ++ ++ if (!types_compatible_p (TREE_TYPE (pi->loop_index_name), ++ TREE_TYPE (pi->loop_limit))) ++ { ++ switch (TREE_CODE (pi->loop_limit)) ++ { ++ case INTEGER_CST: ++ if (!int_fits_type_p (pi->loop_limit, ++ TREE_TYPE (pi->loop_index_name))) ++ goto done; ++ break; ++ case SSA_NAME: ++ { ++ tree v = pi->loop_limit; ++ tree def = SSA_NAME_DEF_STMT (v); ++ ++ /* Backtrack through CONVERT_EXPRs and/or NOP_EXPRs to ++ determine if the variables "started out" as the same ++ type. */ ++ while (TREE_CODE (def) == GIMPLE_MODIFY_STMT) ++ { ++ tree rhs = GIMPLE_STMT_OPERAND (def, 1); ++ ++ if (TREE_CODE (rhs) != NOP_EXPR ++ && TREE_CODE (rhs) != CONVERT_EXPR) ++ break; ++ ++ v = TREE_OPERAND (rhs, 0); ++ def = SSA_NAME_DEF_STMT (v); ++ } ++ /* Permit comparisons between non-compatible types with ++ flag_unsafe_loop_optimizations, since we can assume the ++ loop index does not overflow. */ ++ if (types_compatible_p (TREE_TYPE (pi->loop_index_name), ++ TREE_TYPE (v)) ++ || flag_unsafe_loop_optimizations) ++ break; ++ /* Fallthrough. */ ++ default: ++ goto done; ++ } ++ } ++ } ++ } ++ ++ if (increment == NULL_TREE) ++ goto done; ++ if (TREE_CODE (increment) != INTEGER_CST ++ || compare_tree_int (increment, 1) != 0) ++ { ++ warn = true; ++ if (!flag_unsafe_loop_optimizations) ++ goto done; ++ } ++ ++ ok = true; ++ done: ++ if (warn && !pi->warned) ++ { ++ pi->warned = true; ++ /* We can promote unsigned indices only if -funsafe-loop-optimizations ++ is in effect, since the user might be depending on the modulo ++ wraparound behavior of unsigned types. */ ++ if (warn_unsafe_loop_optimizations) ++ { ++ const char *wording; ++ ++ wording = (flag_unsafe_loop_optimizations ++ ? N_("assuming that the loop counter does not overflow") ++ : N_("cannot optimize loop, the loop counter may overflow")); ++ warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording)); ++ } ++ } ++ ++ return ok; ++} ++ ++/* Analyze the loop associated with PI_ to see if its loop index can be ++ promoted. */ ++ ++static bool ++analyze_loop (const void *pi_, void *data) ++{ ++ struct promote_info *pi = CONST_CAST (struct promote_info *, ++ (const struct promote_info *) pi_); ++ bool *changed = data; ++ ++ /* We previously determined we can't promote this; go ahead and ++ continue iterating. */ ++ if (pi->loop_index_name == NULL_TREE) ++ return true; ++ ++ /* Assume we can always promote the loop index, even if it doesn't ++ exist. */ ++ pi->can_be_promoted_p = true; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Analyzing "); ++ print_generic_expr (dump_file, pi->loop_index_name, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ if (pi->loop_index_name ++ && analyze_loop_index_definition_pattern (pi)) ++ { ++ /* Clear any previously gathered information. */ ++ VEC_truncate (tree, pi->cast_types, 0); ++ VEC_truncate (int, pi->cast_counts, 0); ++ ++ walk_use_def_chains (pi->loop_index_name, analyze_loop_index, pi, false); ++ } ++ else ++ pi->can_be_promoted_p = false; ++ ++ /* If we determined the loop index is used in strange ways, clear it ++ so we don't examine it again. */ ++ if (!pi->can_be_promoted_p) ++ pi->loop_index_name = NULL_TREE; ++ ++ /* Let our caller know whether to re-do the analysis. */ ++ *changed = *changed || !pi->can_be_promoted_p; ++ /* Continue if PI is promotable. */ ++ return pi->can_be_promoted_p; ++} ++ ++/* Add PI_->LOOP_INDEX_NAME to the set of variables, DATA, that we are ++ considering for promotion. */ ++ ++static bool ++add_variable (const void *pi_, void *data ATTRIBUTE_UNUSED) ++{ ++ const struct promote_info *pi = (const struct promote_info *) pi_; ++ struct pointer_set_t *pset = (struct pointer_set_t *) data; ++ int presentp; ++ ++ if (pi->loop_index_name != NULL_TREE) ++ { ++ presentp = pointer_set_insert (pset, pi->loop_index_name); ++ gcc_assert (!presentp); ++ } ++ ++ /* Continue traversal. */ ++ return true; ++} ++ ++/* For each promotable variable: ++ ++ - create a new, promoted VAR_DECL; ++ ++ - walk through all the uses and defs and create new statements using ++ the promoted variables. We don't create new phi nodes; post-pass ++ SSA update will handle those for us. */ ++ ++/* Make dump files readable. */ ++#define PROMOTED_VAR_SUFFIX ".promoted" ++ ++/* Create a variable NAME with TYPE and do the necessary work to inform ++ the SSA machinery about it. */ ++ ++static tree ++create_pli_var (tree type, char *name) ++{ ++ tree var = create_tmp_var (type, name); ++ create_var_ann (var); ++ mark_sym_for_renaming (var); ++ add_referenced_var (var); ++ return var; ++} ++ ++/* Associate the SSA_NAME VAR with the promoted variable DATA. */ ++ ++static bool ++associate_name_with_var (tree var, tree def_stmt, void *data) ++{ ++ tree promoted_var = (tree) data; ++ void **p; ++ ++ gcc_assert (promoted_var != NULL_TREE); ++ ++ if (TREE_CODE (def_stmt) == PHI_NODE) ++ var = PHI_RESULT (def_stmt); ++ ++ p = pointer_map_insert (variable_map, var); ++ ++ if (!*p) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Associating "); ++ print_generic_expr (dump_file, var, 0); ++ fprintf (dump_file, " with "); ++ print_generic_expr (dump_file, promoted_var, 0); ++ fprintf (dump_file, "\n\n"); ++ } ++ *(tree *)p = promoted_var; ++ } ++ ++ /* Continue traversal. */ ++ return false; ++} ++ ++/* Create a promoted variable for the variable from PI_. */ ++ ++static bool ++create_promoted_variable (const void *pi_, void *data ATTRIBUTE_UNUSED) ++{ ++ struct promote_info *pi = CONST_CAST (struct promote_info *, ++ (const struct promote_info *) pi_); ++ ++ if (pi->can_be_promoted_p) ++ { ++ tree type = choose_profitable_promoted_type (pi); ++ tree orig_name = DECL_NAME (pi->var_decl); ++ size_t id_len = IDENTIFIER_LENGTH (orig_name); ++ size_t name_len = id_len + strlen (PROMOTED_VAR_SUFFIX) + 1; ++ char *name; ++ ++ name = alloca (name_len); ++ strcpy (name, IDENTIFIER_POINTER (orig_name)); ++ strcpy (name + id_len, PROMOTED_VAR_SUFFIX); ++ ++ pi->promoted_type = type; ++ pi->promoted_var = create_pli_var (type, name); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Created new variable "); ++ print_generic_expr (dump_file, pi->promoted_var, 0); ++ fprintf (dump_file, " to stand in for "); ++ print_generic_expr (dump_file, pi->loop_index_name, 0); ++ fprintf (dump_file, "\n\n"); ++ } ++ ++ walk_use_def_chains (pi->loop_index_name, ++ associate_name_with_var, ++ pi->promoted_var, false); ++ } ++ ++ /* Continue traversal. */ ++ return true; ++} ++ ++/* Rebuild T with newly promoted variables; STMT is the original ++ statement in which T appeared and may be equivalent to T. TYPE is ++ non-null when rebuilding the rhs of a GIMPLE_MODIFY_STMT and ++ indicates the type of the lhs. */ ++ ++static tree ++rebuild_with_promotion_1 (tree t, tree stmt, tree type, ++ block_stmt_iterator bsi, ++ struct promote_info *pi) ++{ ++ tree op0, op1; ++ ++ switch (TREE_CODE (t)) ++ { ++ case GIMPLE_MODIFY_STMT: ++ { ++ tree orig_op0 = GIMPLE_STMT_OPERAND (t, 0); ++ tree orig_op1 = GIMPLE_STMT_OPERAND (t, 1); ++ tree x, y; ++ void **v; ++ ++ /* If we are defining a promotable variable, check for special ++ idioms. */ ++ v = pointer_map_contains (variable_map, orig_op0); ++ if (v != NULL ++ && *(tree *)v == pi->promoted_var ++ && (TREE_CODE (orig_op1) == NOP_EXPR ++ || TREE_CODE (orig_op1) == CONVERT_EXPR) ++ && signed_arithmetic_overflow_idiom_p (orig_op1, &x, &y)) ++ { ++ tree tmp = TREE_OPERAND (orig_op1, 0); ++ void **xp; ++ void **yp; ++ ++ if (TYPE_PRECISION (TREE_TYPE (tmp)) ++ >= TYPE_PRECISION (pi->promoted_type)) ++ goto done; ++ ++ /* It's possible that we've already promoted the operands of ++ one or both of the NOP_EXPRs. In that case, we can ++ bypass the logic below and go straight to rebuilding the ++ rhs that we really want to transform. */ ++ if (TREE_CODE (x) == VAR_DECL ++ || TREE_CODE (y) == VAR_DECL) ++ goto build_fake; ++ xp = pointer_map_contains (variable_map, x); ++ yp = pointer_map_contains (variable_map, y); ++ ++ /* Nothing to see here. */ ++ if (!types_compatible_p (TREE_TYPE (x), ++ TREE_TYPE (y)) ++ || (xp == NULL && yp == NULL)) ++ goto done; ++ x = (xp == NULL ? NULL_TREE : *(tree *)xp); ++ y = (yp == NULL ? NULL_TREE : *(tree *)yp); ++ ++ if (x != pi->promoted_var && y != pi->promoted_var) ++ goto done; ++ ++ ++ build_fake: ++ orig_op1 = build2 (PLUS_EXPR, TREE_TYPE (x), x, y); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Substituting "); ++ print_generic_expr (dump_file, orig_op1, 0); ++ fprintf (dump_file, " for rhs of original statement\n"); ++ } ++ done: ++ ; ++ } ++ ++ op0 = rebuild_with_promotion_1 (orig_op0, stmt, type, bsi, pi); ++ op1 = rebuild_with_promotion_1 (orig_op1, stmt, TREE_TYPE (op0), bsi, pi); ++ /* Something must have been rebuilt. */ ++ gcc_assert ((op0 != orig_op0) || (op1 != orig_op1)); ++ if (op0 != orig_op0) ++ GIMPLE_STMT_OPERAND (t, 0) = op0; ++ if (op1 != orig_op1) ++ GIMPLE_STMT_OPERAND (t, 1) = op1; ++ return t; ++ } ++ case NOP_EXPR: ++ case CONVERT_EXPR: ++ { ++ tree pvar = rebuild_with_promotion_1 (TREE_OPERAND (t, 0), stmt, type, bsi, pi); ++ ++ if (types_compatible_p (type, TREE_TYPE (pvar))) ++ return pvar; ++ else ++ return build1 (TREE_CODE (t), type, pvar); ++ } ++ case INTEGER_CST: ++ { ++ return build_int_cst_wide (pi->promoted_type, ++ TREE_INT_CST_LOW (t), ++ TREE_INT_CST_HIGH (t)); ++ } ++ case COND_EXPR: ++ { ++ tree orig_op0 = TREE_OPERAND (t, 0); ++ op0 = rebuild_with_promotion_1 (orig_op0, stmt, type, bsi, pi); ++ gcc_assert (orig_op0 != op0); ++ TREE_OPERAND (t, 0) = op0; ++ return t; ++ } ++ case PLUS_EXPR: ++ case MINUS_EXPR: ++ case MULT_EXPR: ++ type = pi->promoted_type; ++ goto binary_expr; ++ case EQ_EXPR: ++ case NE_EXPR: ++ case LT_EXPR: ++ case LE_EXPR: ++ case GT_EXPR: ++ case GE_EXPR: ++ type = TREE_TYPE (t); ++ binary_expr: ++ op0 = TREE_OPERAND (t, 0); ++ op1 = TREE_OPERAND (t, 1); ++ op0 = rebuild_with_promotion_1 (op0, stmt, type, bsi, pi); ++ op1 = rebuild_with_promotion_1 (op1, stmt, type, bsi, pi); ++ return build2 (TREE_CODE (t), type, op0, op1); ++ case SSA_NAME: ++ { ++ void **p = pointer_map_contains (variable_map, t); ++ ++ if (p == NULL) ++ { ++ /* This is unexpected, but it does happen if we were dealing ++ with COND_EXPRs and such. Just go ahead and create a ++ temporary for it. */ ++ if (types_compatible_p (TREE_TYPE (t), pi->promoted_type) ++ || SSA_NAME_DEF_STMT (t) == stmt) ++ return t; ++ else ++ goto insert_cast; ++ } ++ else ++ return *(tree *)p; ++ } ++ case VAR_DECL: ++ return t; ++ default: ++ insert_cast: ++ { ++ tree tmp, nop, cast; ++ tree to_upcast = t; ++ ++ /* If we are dealing with a memory reference, then we can't have ++ wrap it in a NOP_EXPR; we need to load the value from memory ++ first, then convert it. */ ++ if (!is_gimple_reg (to_upcast)) ++ { ++ tree tmp = create_pli_var (TREE_TYPE (to_upcast), ++ CONST_CAST (char *, "loadtmp")); ++ tree stmt = build_gimple_modify_stmt (tmp, to_upcast); ++ bsi_insert_before (&bsi, stmt, BSI_SAME_STMT); ++ to_upcast = tmp; ++ } ++ ++ tmp = create_pli_var (pi->promoted_type, ++ CONST_CAST (char *, "promotetmp")); ++ nop = build1 (NOP_EXPR, pi->promoted_type, to_upcast); ++ cast = build_gimple_modify_stmt (tmp, nop); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Inserting cast "); ++ print_generic_expr (dump_file, cast, 0); ++ fprintf (dump_file, " prior to "); ++ print_generic_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ bsi_insert_before (&bsi, cast, BSI_SAME_STMT); ++ return tmp; ++ } ++ } ++} ++ ++/* Rebuild STMT, which contains uses or a def of the promotable variable ++ associated with PI. */ ++ ++static void ++rebuild_with_promotion (tree stmt, struct promote_info *pi) ++{ ++ tree rebuilt; ++ block_stmt_iterator bsi; ++ ++ if (pointer_set_insert (promoted_stmts, stmt)) ++ return; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Rebuilding stmt "); ++ print_generic_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ bsi = bsi_for_stmt (stmt); ++ rebuilt = rebuild_with_promotion_1 (stmt, stmt, NULL, bsi, pi); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Converted stmt "); ++ print_generic_expr (dump_file, rebuilt, 0); ++ fprintf (dump_file, "\n\n"); ++ } ++ update_stmt (rebuilt); ++} ++ ++/* Helper function for promote_variable that walks over use/def ++ chains. */ ++ ++static bool ++promote_variable_1 (tree var, tree stmt, void *data) ++{ ++ struct promote_info *pi = (struct promote_info *) data; ++ imm_use_iterator imi; ++ tree use_stmt; ++ ++ /* Due to the way walk_use_def_chains works, when STMT is a PHI_NODE, ++ VAR is actually an argument to the phi node, not the result of it. ++ Rebuild uses of the phi node's result after handle integer constant ++ inputs to the phi node. */ ++ if (TREE_CODE (stmt) == PHI_NODE) ++ { ++ if (TREE_CODE (var) == INTEGER_CST) ++ { ++ edge e = loop_preheader_edge (pi->loop); ++ basic_block preheader = e->src; ++ block_stmt_iterator bsi = bsi_last (preheader); ++ tree cst = build_int_cst_wide (pi->promoted_type, ++ TREE_INT_CST_LOW (var), ++ TREE_INT_CST_HIGH (var)); ++ tree assign = build_gimple_modify_stmt (pi->promoted_var, cst); ++ bsi_insert_after (&bsi, assign, BSI_NEW_STMT); ++ } ++ var = PHI_RESULT (stmt); ++ } ++ else ++ rebuild_with_promotion (stmt, pi); ++ ++ FOR_EACH_IMM_USE_STMT (use_stmt, imi, var) ++ { ++ if (TREE_CODE (use_stmt) != PHI_NODE) ++ rebuild_with_promotion (use_stmt, pi); ++ } ++ ++ return false; ++} ++ ++/* Convert all uses and defs of PI_->LOOP_INDEX_NAME as linked by ++ use-def chains to uses and defs of PI_->PROMOTED_VAR. */ ++ ++static bool ++promote_variable (const void *pi_, void *data ATTRIBUTE_UNUSED) ++{ ++ const struct promote_info *pi = (const struct promote_info *) pi_; ++ ++ if (pi->can_be_promoted_p) ++ { ++ walk_use_def_chains (pi->loop_index_name, promote_variable_1, ++ CONST_CAST (struct promote_info *, pi), false); ++ } ++ ++ /* Continue traversal. */ ++ return true; ++} ++ ++/* Free PI_ and its associated data. */ ++ ++static bool ++free_pi_entries (const void *pi_, void *data ATTRIBUTE_UNUSED) ++{ ++ struct promote_info *pi = CONST_CAST (struct promote_info *, ++ (const struct promote_info *) pi_); ++ ++ VEC_free (tree, heap, pi->cast_types); ++ VEC_free (int, heap, pi->cast_counts); ++ free (pi); ++ ++ /* Continue traversal. */ ++ return true; ++} ++ ++/* Collect information about variables that we believe to be loop ++ indices in PROMOTION_INFO. */ ++ ++static void ++collect_promotion_candidates (void) ++{ ++ loop_iterator li; ++ struct loop *loop; ++ ++ FOR_EACH_LOOP (li, loop, 0) ++ { ++ basic_block header = loop->header; ++ tree exit_cond = last_stmt (header); ++ ++ if (exit_cond && TREE_CODE (exit_cond) == COND_EXPR) ++ { ++ tree loop_index; ++ tree limit; ++ struct promote_info *pi; ++ ++ loop_index = find_promotion_candidate (loop, exit_cond, &limit); ++ if (loop_index == NULL_TREE) ++ continue; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Found loop index "); ++ print_generic_expr (dump_file, loop_index, 0); ++ fprintf (dump_file, " involved in "); ++ print_generic_expr (dump_file, exit_cond, 0); ++ fprintf (dump_file, "\n\n"); ++ } ++ ++ pi = XCNEW (struct promote_info); ++ pi->loop = loop; ++ pi->exit_expr = exit_cond; ++ pi->loop_index_name = loop_index; ++ pi->loop_limit = limit; ++ pi->var_decl = SSA_NAME_VAR (loop_index); ++ /* We think so, anyway... */ ++ pi->can_be_promoted_p = true; ++ pointer_set_insert (promotion_info, pi); ++ } ++ else if (dump_file) ++ { ++ fprintf (dump_file, "\nSkipping analysis of loop %d (header bb #%d)\n", ++ loop->num, loop->header->index); ++ if (exit_cond) ++ { ++ fprintf (dump_file, "Exit condition was "); ++ print_generic_expr (dump_file, exit_cond, 0); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ } ++} ++ ++/* Free memory associated with global variables that we used. */ ++ ++static void ++pli_cleanup (void) ++{ ++ if (promoted_stmts) ++ { ++ pointer_set_destroy (promoted_stmts); ++ promoted_stmts = NULL; ++ } ++ if (variable_map) ++ { ++ pointer_map_destroy (variable_map); ++ variable_map = NULL; ++ } ++ if (promotable_names) ++ { ++ pointer_set_destroy (promotable_names); ++ promotable_names = NULL; ++ } ++ if (promotion_info) ++ { ++ pointer_set_traverse (promotion_info, free_pi_entries, NULL); ++ pointer_set_destroy (promotion_info); ++ promotion_info = NULL; ++ } ++} ++ ++/* The guts of the pass. */ ++ ++static unsigned int ++promote_short_indices (void) ++{ ++ bool did_something = false; ++ bool changed; ++ size_t max_iterations, i, n_promoted; ++ ++ promotion_info = pointer_set_create (); ++ collect_promotion_candidates (); ++ ++ if (dump_file) ++ fprintf (dump_file, "Found %d candidates for promotion\n", ++ (int) pointer_set_n_elements (promotion_info)); ++ ++ /* Nothing to do. */ ++ if (pointer_set_n_elements (promotion_info) == 0) ++ goto cleanup; ++ ++ /* We have information about which variables are loop index variables. ++ We now need to determine the promotability of the loop indices. ++ Since the promotability of loop indices may depend on other loop ++ indices, we need to repeat this until we reach a fixed point. */ ++ changed = true; ++ max_iterations = pointer_set_n_elements (promotion_info); ++ i = 0; ++ ++ promotable_names = pointer_set_create (); ++ ++ while (changed) ++ { ++ changed = false; ++ pointer_set_clear (promotable_names); ++ pointer_set_traverse (promotion_info, add_variable, ++ promotable_names); ++ n_promoted = pointer_set_n_elements (promotable_names); ++ ++ if (dump_file) ++ fprintf (dump_file, "\nIteration %d, have %d variables to consider\n", ++ (int) i, (int) n_promoted); ++ ++ if (n_promoted == 0) ++ break; ++ gcc_assert (i < max_iterations); ++ pointer_set_traverse (promotion_info, analyze_loop, &changed); ++ i++; ++ } ++ ++ if (dump_file) ++ fprintf (dump_file, "Promoting %d variables\n", ++ (int) n_promoted); ++ ++ if (n_promoted != 0) ++ { ++ did_something = true; ++ variable_map = pointer_map_create (); ++ promoted_stmts = pointer_set_create (); ++ pointer_set_traverse (promotion_info, create_promoted_variable, NULL); ++ pointer_set_traverse (promotion_info, promote_variable, NULL); ++ } ++ ++ cleanup: ++ pli_cleanup (); ++ return did_something ? TODO_update_ssa : 0; ++} ++ ++/* Entry point for the short loop index promotion pass. */ ++ ++static unsigned int ++tree_short_index_promotion (void) ++{ ++ unsigned int changed = 0; ++ ++ /* Initialize all the necessary loop infrastructure. */ ++ loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES | LOOPS_HAVE_RECORDED_EXITS); ++ add_noreturn_fake_exit_edges (); ++ connect_infinite_loops_to_exit (); ++ ++ if (number_of_loops () > 1) ++ changed = promote_short_indices (); ++ ++ /* Tear down loop optimization infrastructure. */ ++ remove_fake_exit_edges (); ++ free_numbers_of_iterations_estimates (); ++ loop_optimizer_finalize (); ++ ++ return changed; ++} ++ ++static bool ++gate_short_index_promotion (void) ++{ ++ return flag_promote_loop_indices; ++} ++ ++struct tree_opt_pass pass_promote_short_indices = ++{ ++ "promoteshort", /* name */ ++ gate_short_index_promotion, /* gate */ ++ tree_short_index_promotion, /* execute */ ++ NULL, /* sub */ ++ NULL, /* next */ ++ 0, /* static_pass_number */ ++ TV_TREE_LOOP_PROMOTE, /* tv_id */ ++ PROP_cfg | PROP_ssa, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ TODO_dump_func | TODO_verify_loops ++ | TODO_ggc_collect, /* todo_flags_finish */ ++ 0 /* letter */ ++}; +--- a/gcc/tree-ssa-pre.c ++++ b/gcc/tree-ssa-pre.c +@@ -2006,7 +2006,7 @@ compute_antic (void) + fprintf (dump_file, "Starting iteration %d\n", num_iterations); + num_iterations++; + changed = false; +- for (i = 0; i < last_basic_block - NUM_FIXED_BLOCKS; i++) ++ for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++) + { + if (TEST_BIT (changed_blocks, postorder[i])) + { +@@ -2038,7 +2038,7 @@ compute_antic (void) + fprintf (dump_file, "Starting iteration %d\n", num_iterations); + num_iterations++; + changed = false; +- for (i = 0; i < last_basic_block - NUM_FIXED_BLOCKS; i++) ++ for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++) + { + if (TEST_BIT (changed_blocks, postorder[i])) + { +@@ -2345,6 +2345,10 @@ create_expression_by_pieces (basic_block + tree op2 = TREE_OPERAND (expr, 1); + tree genop1 = find_or_generate_expression (block, op1, stmts); + tree genop2 = find_or_generate_expression (block, op2, stmts); ++ /* Ensure op2 is a sizetype for POINTER_PLUS_EXPR. It ++ may be a constant with the wrong type. */ ++ if (TREE_CODE(expr) == POINTER_PLUS_EXPR) ++ genop2 = fold_convert (sizetype, genop2); + folded = fold_build2 (TREE_CODE (expr), TREE_TYPE (expr), + genop1, genop2); + break; +--- /dev/null ++++ b/gcc/tree-ssa-remove-local-statics.c +@@ -0,0 +1,813 @@ ++/* Local static variable elimination pass. ++ Copyright (C) 2007 Free Software Foundation, Inc. ++ Contributed by Nathan Froyd ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published by the ++Free Software Foundation; either version 3, or (at your option) any ++later version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Converting static function-local variables to automatic variables. ++ ++ The motivating example is a function like: ++ ++ void ++ foo (unsigned n) ++ { ++ static int var; ++ unsigned i; ++ ++ for (i = 0; i != n; i++) ++ { ++ var = ... ++ ++ do other things with var... ++ } ++ } ++ ++ Because VAR is static, doing things like code motion to loads and ++ stores of VAR is difficult. Furthermore, accesses to VAR are ++ inefficient. This pass aims to recognize the cases where it is not ++ necessary for VAR to be static and modify the code so that later ++ passes will do the appropriate optimizations. ++ ++ The criteria for a static function-local variable V in a function F ++ being converted to an automatic variable are: ++ ++ 1. F does not call setjmp; and ++ 2. V's address is never taken; and ++ 3. V is not declared volatile; and ++ 4. V is not used in any nested function; ++ 5. V is not an aggregate value (union, struct, array, etc.); and ++ 6. Every use of V is defined along all paths leading to the use. ++ ++ NOTE: For ease of implementation, we currently treat a function call ++ as killing all previous definitions of static variables, since we ++ could have: ++ ++ static void ++ foo (...) ++ { ++ static int x; ++ ++ x = ...; (1) ++ ++ f (...); (2) ++ ++ ... = x; (3) ++ } ++ ++ The use at (3) needs to pick up a possible definition made by the ++ call at (2). If the call at (2) does not call back into 'foo', ++ then the call is not a killing call. We currently treat it as ++ though it is. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++ ++#include "rtl.h" ++#include "tm_p.h" ++#include "hard-reg-set.h" ++#include "obstack.h" ++#include "basic-block.h" ++ ++#include "tree.h" ++#include "tree-gimple.h" ++#include "hashtab.h" ++#include "diagnostic.h" ++#include "tree-flow.h" ++#include "tree-dump.h" ++#include "flags.h" ++#include "timevar.h" ++#include "tree-pass.h" ++ ++struct rls_decl_info ++{ ++ /* The variable declaration. */ ++ tree orig_var; ++ ++ /* Its index in rls_block_local_data. */ ++ int index; ++ ++ /* Whether we can optimize this variable. */ ++ bool optimizable_p; ++ ++ /* The new variable declaration, if we can optimize away the staticness ++ of 'orig_var'. */ ++ tree new_var; ++}; ++ ++/* Filled with 'struct rls_decl_info'; keyed off ORIG_VAR. */ ++static htab_t static_variables; ++ ++struct rls_stmt_info ++{ ++ /* The variable declaration. */ ++ tree var; ++ ++ /* The statement in which we found a def or a use of the variable. */ ++ tree stmt; ++ ++ /* Whether STMT represents a use of VAR. */ ++ bool use_p; ++ ++ /* A bitmap whose entries denote what variables have been defined ++ when execution arrives at STMT. This field is only used when ++ USE_P is true. */ ++ sbitmap defined; ++}; ++ ++/* Filled with 'struct rls_stmt_info'; keyed off STMT. */ ++static htab_t defuse_statements; ++ ++static struct ++{ ++ /* The number of static variables we found. */ ++ size_t n_statics; ++ ++ /* The number of optimizable variables we found. */ ++ size_t n_optimizable; ++} stats; ++ ++struct rls_block_dataflow_data { ++ /* A bitmap whose entries denote what variables have been defined on ++ entry to this block. */ ++ sbitmap defined_in; ++ ++ /* A bitmap whose entries denote what variables have been defined on ++ exit from this block. */ ++ sbitmap defined_out; ++}; ++ ++/* Parameters for the 'static_variables' hash table. */ ++ ++static hashval_t ++rls_hash_decl_info (const void *x) ++{ ++ return htab_hash_pointer ++ ((const void *) ((const struct rls_decl_info *) x)->orig_var); ++} ++ ++static int ++rls_eq_decl_info (const void *x, const void *y) ++{ ++ const struct rls_decl_info *a = x; ++ const struct rls_decl_info *b = y; ++ ++ return a->orig_var == b->orig_var; ++} ++ ++static void ++rls_free_decl_info (void *info) ++{ ++ free (info); ++} ++ ++/* Parameters for the 'defuse_statements' hash table. */ ++ ++static hashval_t ++rls_hash_use_info (const void *x) ++{ ++ return htab_hash_pointer ++ ((const void *) ((const struct rls_stmt_info *) x)->stmt); ++} ++ ++static int ++rls_eq_use_info (const void *x, const void *y) ++{ ++ const struct rls_stmt_info *a = x; ++ const struct rls_stmt_info *b = y; ++ ++ return a->stmt == b->stmt; ++} ++ ++static void ++rls_free_use_info (void *info) ++{ ++ struct rls_stmt_info *stmt_info = info; ++ ++ if (stmt_info->defined) ++ sbitmap_free (stmt_info->defined); ++ ++ free (stmt_info); ++} ++ ++/* Initialize data structures and statistics. */ ++ ++static void ++rls_init (void) ++{ ++ basic_block bb; ++ ++ /* We expect relatively few static variables, hence the small ++ initial size for the hash table. */ ++ static_variables = htab_create (8, rls_hash_decl_info, ++ rls_eq_decl_info, rls_free_decl_info); ++ ++ /* We expect quite a few statements. */ ++ defuse_statements = htab_create (128, rls_hash_use_info, ++ rls_eq_use_info, rls_free_use_info); ++ ++ FOR_ALL_BB (bb) ++ { ++ struct rls_block_dataflow_data *data; ++ ++ data = XNEW (struct rls_block_dataflow_data); ++ memset (data, 0, sizeof (*data)); ++ bb->aux = data; ++ } ++ ++ stats.n_statics = 0; ++ stats.n_optimizable = 0; ++} ++ ++/* Free data structures. */ ++ ++static void ++rls_done (void) ++{ ++ basic_block bb; ++ ++ htab_delete (static_variables); ++ htab_delete (defuse_statements); ++ ++ FOR_ALL_BB (bb) ++ { ++ struct rls_block_dataflow_data *data = bb->aux; ++ ++ gcc_assert (data); ++ ++ if (data->defined_in) ++ sbitmap_free (data->defined_in); ++ if (data->defined_out) ++ sbitmap_free (data->defined_out); ++ free (data); ++ bb->aux = NULL; ++ } ++} ++ ++ ++/* Doing the initial work to find static variables. */ ++ ++/* Examine the defining statement for VAR and determine whether it is a ++ static variable we could potentially optimize. If so, stick in it ++ in the 'static_variables' hashtable. ++ ++ STMT is the statement in which a definition or use of VAR occurs. ++ USE_P indicates whether VAR is used or defined in STMT. Enter STMT ++ into 'defuse_statements' as well for use during dataflow ++ analysis. */ ++ ++static void ++maybe_discover_new_declaration (tree var, tree stmt, bool use_p) ++{ ++ tree def_stmt = SSA_NAME_VAR (var); ++ ++ if (TREE_CODE (def_stmt) == VAR_DECL ++ && DECL_CONTEXT (def_stmt) != NULL_TREE ++ && TREE_CODE (DECL_CONTEXT (def_stmt)) == FUNCTION_DECL ++ && TREE_STATIC (def_stmt) ++ /* We cannot optimize away aggregate statics, as we would have to ++ prove that definitions of every field of the aggregate dominate ++ uses. */ ++ && !AGGREGATE_TYPE_P (TREE_TYPE (def_stmt)) ++ /* GCC doesn't normally treat vectors as aggregates; we need to, ++ though, since a user could use intrinsics to read/write ++ particular fields of the vector, thereby treating it as an ++ array. */ ++ && TREE_CODE (TREE_TYPE (def_stmt)) != VECTOR_TYPE ++ && !TREE_ADDRESSABLE (def_stmt) ++ && !TREE_THIS_VOLATILE (def_stmt)) ++ { ++ struct rls_decl_info dummy; ++ void **slot; ++ ++ dummy.orig_var = def_stmt; ++ slot = htab_find_slot (static_variables, &dummy, INSERT); ++ ++ if (*slot == NULL) ++ { ++ /* Found a use or a def of a new declaration. */ ++ struct rls_decl_info *info = XNEW (struct rls_decl_info); ++ ++ info->orig_var = def_stmt; ++ info->index = stats.n_statics++; ++ /* Optimistically assume that we can optimize. */ ++ info->optimizable_p = true; ++ info->new_var = NULL_TREE; ++ *slot = (void *) info; ++ } ++ ++ /* Enter the statement into DEFUSE_STATEMENTS. */ ++ { ++ struct rls_stmt_info dummy; ++ struct rls_stmt_info *info; ++ ++ dummy.stmt = stmt; ++ slot = htab_find_slot (defuse_statements, &dummy, INSERT); ++ ++ /* We should never insert the same statement into the ++ hashtable twice. */ ++ gcc_assert (*slot == NULL); ++ ++ info = XNEW (struct rls_stmt_info); ++ info->var = def_stmt; ++ info->stmt = stmt; ++ if (dump_file) ++ { ++ fprintf (dump_file, "entering as %s ", use_p ? "use" : "def"); ++ print_generic_stmt (dump_file, stmt, TDF_DETAILS); ++ } ++ info->use_p = use_p; ++ /* We don't know how big to make the bitmap yet. */ ++ info->defined = NULL; ++ *slot = (void *) info; ++ } ++ } ++} ++ ++/* Grovel through all the statements in the program, looking for ++ SSA_NAMEs whose SSA_NAME_VAR is a VAR_DECL. We look at both use and ++ def SSA_NAMEs. */ ++ ++static void ++find_static_nonvolatile_declarations (void) ++{ ++ basic_block bb; ++ ++ FOR_EACH_BB (bb) ++ { ++ block_stmt_iterator i; ++ ++ for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) ++ { ++ tree var; ++ ssa_op_iter iter; ++ tree stmt = bsi_stmt (i); ++ ++ /* If there's a call expression in STMT, then previous passes ++ will have determined if the call transitively defines some ++ static variable. However, we need more precise ++ information--we need to know whether static variables are ++ live out after the call. ++ ++ Since we'll never see something like: ++ ++ staticvar = foo (bar, baz); ++ ++ in GIMPLE (the result of the call will be assigned to a ++ normal, non-static local variable which is then assigned to ++ STATICVAR in a subsequent statement), don't bother finding ++ new declarations if we see a CALL_EXPR. */ ++ if (get_call_expr_in (stmt) == NULL_TREE) ++ FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_VDEF) ++ { ++ maybe_discover_new_declaration (var, stmt, false); ++ } ++ ++ FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_VUSE) ++ { ++ maybe_discover_new_declaration (var, stmt, true); ++ } ++ } ++ } ++} ++ ++ ++/* Determining if we have anything to optimize. */ ++ ++/* Examine *SLOT (which is a 'struct rls_decl_info *') to see whether ++ the associated variable is optimizable. If it is, create a new, ++ non-static declaration for the variable; this new variable will be ++ used during a subsequent rewrite of the function. */ ++ ++#define NEW_VAR_PREFIX ".unstatic" ++ ++static int ++maybe_create_new_variable (void **slot, void *data ATTRIBUTE_UNUSED) ++{ ++ struct rls_decl_info *info = *slot; ++ tree id_node = DECL_NAME (info->orig_var); ++ size_t id_len = IDENTIFIER_LENGTH (id_node); ++ size_t name_len = id_len + strlen (NEW_VAR_PREFIX) + 1; ++ char *name; ++ ++ /* Don't create a new variable multiple times. */ ++ gcc_assert (!info->new_var); ++ ++ /* Tie the new name to the old one to aid debugging dumps. */ ++ name = alloca (name_len); ++ strcpy (name, IDENTIFIER_POINTER (id_node)); ++ strcpy (name + id_len, NEW_VAR_PREFIX); ++ info->new_var = create_tmp_var (TREE_TYPE (info->orig_var), name); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "new variable "); ++ print_generic_stmt (dump_file, info->new_var, TDF_DETAILS); ++ } ++ ++ /* Inform SSA about this new variable. */ ++ create_var_ann (info->new_var); ++ mark_sym_for_renaming (info->new_var); ++ add_referenced_var (info->new_var); ++ ++ /* Always continue scanning. */ ++ return 1; ++} ++ ++#undef NEW_VAR_PREFIX ++ ++/* Traverse the 'defuse_statements' hash table. For every use, ++ determine if the associated variable is defined along all paths ++ leading to said use. Remove the associated variable from ++ 'static_variables' if it is not. */ ++ ++static int ++check_definedness (void **slot, void *data ATTRIBUTE_UNUSED) ++{ ++ struct rls_stmt_info *info = *slot; ++ struct rls_decl_info dummy; ++ ++ /* We don't need to look at definitions. Continue scanning. */ ++ if (!info->use_p) ++ return 1; ++ ++ dummy.orig_var = info->var; ++ slot = htab_find_slot (static_variables, &dummy, INSERT); ++ ++ /* Might not be there because we deleted it already. */ ++ if (*slot) ++ { ++ struct rls_decl_info *decl = *slot; ++ ++ if (!TEST_BIT (info->defined, decl->index)) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "not optimizing "); ++ print_generic_stmt (dump_file, decl->orig_var, TDF_DETAILS); ++ fprintf (dump_file, "due to uncovered use in "); ++ print_generic_stmt (dump_file, info->stmt, TDF_DETAILS); ++ fprintf (dump_file, "\n"); ++ } ++ ++ htab_clear_slot (static_variables, slot); ++ stats.n_optimizable--; ++ } ++ } ++ ++ /* Continue scan. */ ++ return 1; ++} ++ ++/* Check all statements in 'defuse_statements' to see if all the ++ statements that use a static variable have that variable defined ++ along all paths leading to the statement. Once that's done, go ++ through and create new, non-static variables for any static variables ++ that can be optimized. */ ++ ++static size_t ++determine_optimizable_statics (void) ++{ ++ htab_traverse (defuse_statements, check_definedness, NULL); ++ ++ htab_traverse (static_variables, maybe_create_new_variable, NULL); ++ ++ return stats.n_optimizable; ++} ++ ++/* Look at STMT to see if we have uses or defs of a static variable. ++ STMT is passed in DATA. Definitions of a static variable are found ++ by the presence of a V_MUST_DEF, while uses are found by the presence ++ of a VUSE. */ ++ ++static int ++unstaticize_variable (void **slot, void *data) ++{ ++ struct rls_decl_info *info = *slot; ++ tree stmt = (tree) data; ++ tree vdef; ++ tree vuse; ++ ++ /* We should have removed unoptimizable variables during an earlier ++ traversal. */ ++ gcc_assert (info->optimizable_p); ++ ++ /* Check for virtual definitions first. */ ++ vdef = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_VDEF); ++ ++ if (vdef != NULL ++ && ZERO_SSA_OPERANDS (stmt, SSA_OP_DEF) ++ && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT ++ && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == VAR_DECL ++ && GIMPLE_STMT_OPERAND (stmt, 0) == info->orig_var) ++ { ++ /* Make the statement define the new name. The new name has ++ already been marked for renaming, so no need to do that ++ here. */ ++ GIMPLE_STMT_OPERAND (stmt, 0) = info->new_var; ++ ++ update_stmt (stmt); ++ ++ /* None of the other optimizable static variables can occur ++ in this statement. Stop the scan. */ ++ return 0; ++ } ++ ++ /* Check for virtual uses. */ ++ vuse = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_VUSE); ++ ++ if (vuse != NULL ++ && TREE_CODE (stmt) == GIMPLE_MODIFY_STMT ++ && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == VAR_DECL ++ && GIMPLE_STMT_OPERAND (stmt, 1) == info->orig_var) ++ { ++ /* Make the statement use the new name. */ ++ GIMPLE_STMT_OPERAND (stmt, 1) = info->new_var; ++ ++ update_stmt (stmt); ++ ++ /* None of the other optimizable static variables can occur ++ in this statement. Stop the scan. */ ++ return 0; ++ } ++ ++ /* Continue scanning. */ ++ return 1; ++} ++ ++/* Determine if we have any static variables we can optimize. If so, ++ replace any defs or uses of those variables in their defining/using ++ statements. */ ++ ++static void ++maybe_remove_static_from_declarations (void) ++{ ++ size_t n_optimizable = determine_optimizable_statics (); ++ basic_block bb; ++ ++ if (n_optimizable) ++ /* Replace any optimizable variables with new, non-static variables. */ ++ FOR_EACH_BB (bb) ++ { ++ block_stmt_iterator bsi; ++ ++ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) ++ { ++ tree stmt = bsi_stmt (bsi); ++ ++ htab_traverse (static_variables, unstaticize_variable, stmt); ++ } ++ } ++} ++ ++/* Callback for htab_traverse to initialize the bitmap for *SLOT, which ++ is a 'struct rls_stmt_info'. */ ++ ++static int ++initialize_statement_dataflow (void **slot, void *data ATTRIBUTE_UNUSED) ++{ ++ struct rls_stmt_info *info = *slot; ++ ++ gcc_assert (!info->defined); ++ ++ if (info->use_p) ++ { ++ info->defined = sbitmap_alloc (stats.n_statics); ++ /* Assume defined along all paths until otherwise informed. */ ++ sbitmap_ones (info->defined); ++ } ++ ++ /* Continue traversal. */ ++ return 1; ++} ++ ++/* We have N_STATICS static variables to consider. Go through all the ++ blocks and all the use statements to initialize their bitmaps. */ ++ ++static void ++initialize_block_and_statement_dataflow (size_t n_statics) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB (bb) ++ { ++ struct rls_block_dataflow_data *data = bb->aux; ++ ++ gcc_assert (data); ++ ++ data->defined_in = sbitmap_alloc (n_statics); ++ sbitmap_zero (data->defined_in); ++ data->defined_out = sbitmap_alloc (n_statics); ++ sbitmap_zero (data->defined_out); ++ } ++ ++ htab_traverse (defuse_statements, initialize_statement_dataflow, NULL); ++} ++ ++/* Apply the individual effects of the stmts in BB to update the ++ dataflow analysis information for BB. */ ++ ++static void ++compute_definedness_for_block (basic_block bb) ++{ ++ bool changed_p = false; ++ struct rls_block_dataflow_data *data = bb->aux; ++ block_stmt_iterator bsi; ++ ++ sbitmap_copy (data->defined_out, data->defined_in); ++ ++ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) ++ { ++ tree stmt = bsi_stmt (bsi); ++ struct rls_stmt_info dummy; ++ void **slot; ++ ++ /* First see if this statement uses or defines a static variable. */ ++ dummy.stmt = stmt; ++ slot = htab_find_slot (defuse_statements, &dummy, INSERT); ++ ++ /* Check for uses. */ ++ if (*slot != NULL) ++ { ++ struct rls_stmt_info *info = *slot; ++ ++ gcc_assert (get_call_expr_in (stmt) == NULL_TREE); ++ ++ if (info->use_p) ++ { ++ gcc_assert (info->defined); ++ ++ /* Found a statement that uses a function-local static ++ variable. Copy the current state of definedness. */ ++ sbitmap_copy (info->defined, data->defined_out); ++ } ++ else ++ { ++ struct rls_decl_info dummy; ++ struct rls_decl_info *decl; ++ ++ gcc_assert (!info->defined); ++ ++ /* Found a statement that defines a function-local static ++ variable. Look up the associated variable's information ++ and mark it as defined in the block. */ ++ dummy.orig_var = info->var; ++ slot = htab_find_slot (static_variables, &dummy, INSERT); ++ ++ gcc_assert (*slot); ++ ++ decl = (struct rls_decl_info *) *slot; ++ ++ SET_BIT (data->defined_out, decl->index); ++ changed_p |= true; ++ } ++ } ++ else if (get_call_expr_in (stmt) != NULL_TREE) ++ /* If there's a call expression in STMT, then previous passes ++ will have determined if the call transitively defines some ++ static variable. However, we need more precise ++ information--we need to know whether static variables are ++ live out after the call. In the absence of such information, ++ simply declare that all static variables are clobbered by the ++ call. A better analysis would be interprocedural and compute ++ the liveness information we require, but for now, we're being ++ pessimistic. */ ++ sbitmap_zero (data->defined_out); ++ } ++} ++ ++/* Solve the dataflow equations: ++ ++ DEFINED_IN(b) = intersect DEFINED_OUT(p) for p in preds(b) ++ DEFINED_OUT(b) = VARIABLES_DEFINED (b, DEFINED_IN (b)) ++ ++ via a simple iterative solver. VARIABLES_DEFINED is computed by ++ 'compute_definedness_for_block'. */ ++ ++static void ++compute_definedness (void) ++{ ++ basic_block bb; ++ bool changed_p; ++ sbitmap tmp_bitmap = sbitmap_alloc (stats.n_statics); ++ ++ /* Compute initial sets. */ ++ FOR_EACH_BB (bb) ++ { ++ compute_definedness_for_block (bb); ++ } ++ ++ /* Iterate. */ ++ do { ++ changed_p = false; ++ ++ FOR_EACH_BB (bb) ++ { ++ edge e; ++ edge_iterator ei; ++ struct rls_block_dataflow_data *data = bb->aux; ++ bool bitmap_changed_p = false; ++ ++ sbitmap_ones (tmp_bitmap); ++ ++ gcc_assert (data); ++ ++ /* We require information about whether a variable was defined ++ over all paths leading to a particular use. Therefore, we ++ intersect the DEFINED sets of all predecessors. */ ++ FOR_EACH_EDGE (e, ei, bb->preds) ++ { ++ struct rls_block_dataflow_data *pred_data = e->src->aux; ++ ++ gcc_assert (pred_data); ++ ++ sbitmap_a_and_b (tmp_bitmap, tmp_bitmap, pred_data->defined_out); ++ } ++ ++ bitmap_changed_p = !sbitmap_equal (tmp_bitmap, data->defined_in); ++ ++ if (bitmap_changed_p) ++ { ++ sbitmap_copy (data->defined_in, tmp_bitmap); ++ compute_definedness_for_block (bb); ++ } ++ ++ changed_p |= bitmap_changed_p; ++ } ++ } while (changed_p); ++ ++ sbitmap_free (tmp_bitmap); ++} ++ ++static unsigned int ++execute_rls (void) ++{ ++ rls_init (); ++ ++ find_static_nonvolatile_declarations (); ++ ++ /* Can we optimize anything? */ ++ if (stats.n_statics != 0) ++ { ++ stats.n_optimizable = stats.n_statics; ++ ++ if (dump_file) ++ fprintf (dump_file, "found %d static variables to consider\n", ++ stats.n_statics); ++ ++ initialize_block_and_statement_dataflow (stats.n_statics); ++ ++ compute_definedness (); ++ ++ maybe_remove_static_from_declarations (); ++ ++ if (dump_file) ++ fprintf (dump_file, "removed %d static variables\n", ++ stats.n_optimizable); ++ } ++ ++ rls_done (); ++ ++ return 0; ++} ++ ++static bool ++gate_rls (void) ++{ ++ return (flag_remove_local_statics != 0 ++ && !current_function_calls_setjmp ++ && !cgraph_node (current_function_decl)->ever_was_nested); ++} ++ ++struct tree_opt_pass pass_remove_local_statics = ++{ ++ "remlocstatic", /* name */ ++ gate_rls, /* gate */ ++ execute_rls, /* execute */ ++ NULL, /* sub */ ++ NULL, /* next */ ++ 0, /* static_pass_number */ ++ TV_TREE_RLS, /* tv_id */ ++ PROP_cfg | PROP_ssa, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts ++ | TODO_update_ssa, /* todo_flags_finish */ ++ 0 /* letter */ ++}; +--- a/gcc/tree-ssa-sink.c ++++ b/gcc/tree-ssa-sink.c +@@ -458,6 +458,46 @@ sink_code_in_bb (basic_block bb) + last = false; + continue; + } ++ ++ /* We cannot move statements that contain references to block-scope ++ variables out of that block, as this may lead to incorrect aliasing ++ when we lay out the stack frame in cfgexpand.c. ++ In lieu of more sophisticated analysis, be very conservative here ++ and prohibit moving any statement that references memory out of a ++ block with variables. */ ++ if (stmt_references_memory_p (stmt)) ++ { ++ tree fromblock = TREE_BLOCK (stmt); ++ while (fromblock ++ && fromblock != current_function_decl ++ && !BLOCK_VARS (fromblock)) ++ fromblock = BLOCK_SUPERCONTEXT (fromblock); ++ if (fromblock && fromblock != current_function_decl) ++ { ++ tree tostmt; ++ tree toblock; ++ if (bsi_end_p (tobsi)) ++ tostmt = last_stmt (tobb); ++ else ++ tostmt = bsi_stmt (tobsi); ++ if (tostmt) ++ toblock = TREE_BLOCK (tostmt); ++ else ++ toblock = NULL; ++ while (toblock ++ && toblock != current_function_decl ++ && toblock != fromblock) ++ toblock = BLOCK_SUPERCONTEXT (toblock); ++ if (!toblock || toblock != fromblock) ++ { ++ if (!bsi_end_p (bsi)) ++ bsi_prev (&bsi); ++ last = false; ++ continue; ++ } ++ } ++ } ++ + if (dump_file) + { + fprintf (dump_file, "Sinking "); +--- a/gcc/tree-vect-transform.c ++++ b/gcc/tree-vect-transform.c +@@ -1366,22 +1366,41 @@ vect_get_constant_vectors (slp_tree slp_ + tree stmt = VEC_index (tree, stmts, 0); + stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); +- int nunits = TYPE_VECTOR_SUBPARTS (vectype); ++ int nunits; + tree vec_cst; + tree t = NULL_TREE; + int j, number_of_places_left_in_vector; + tree vector_type; +- tree op, vop, operation; ++ tree op, vop; + int group_size = VEC_length (tree, stmts); + unsigned int vec_num, i; + int number_of_copies = 1; +- bool is_store = false; + unsigned int number_of_vectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); + VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors); +- bool constant_p; ++ bool constant_p, is_store; + ++ op = GIMPLE_STMT_OPERAND (stmt, 1); + if (STMT_VINFO_DATA_REF (stmt_vinfo)) + is_store = true; ++ else ++ { ++ is_store = false; ++ op = TREE_OPERAND (op, op_num); ++ } ++ ++ if (CONSTANT_CLASS_P (op)) ++ { ++ vector_type = vectype; ++ constant_p = true; ++ } ++ else ++ { ++ vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); ++ gcc_assert (vector_type); ++ constant_p = false; ++ } ++ ++ nunits = TYPE_VECTOR_SUBPARTS (vector_type); + + /* NUMBER_OF_COPIES is the number of times we need to use the same values in + created vectors. It is greater than 1 if unrolling is performed. +@@ -1402,18 +1421,13 @@ vect_get_constant_vectors (slp_tree slp_ + number_of_copies = least_common_multiple (nunits, group_size) / group_size; + + number_of_places_left_in_vector = nunits; +- constant_p = true; + for (j = 0; j < number_of_copies; j++) + { + for (i = group_size - 1; VEC_iterate (tree, stmts, i, stmt); i--) + { +- operation = GIMPLE_STMT_OPERAND (stmt, 1); +- if (is_store) +- op = operation; +- else +- op = TREE_OPERAND (operation, op_num); +- if (!CONSTANT_CLASS_P (op)) +- constant_p = false; ++ op = GIMPLE_STMT_OPERAND (stmt, 1); ++ if (!STMT_VINFO_DATA_REF (stmt_vinfo)) ++ op = TREE_OPERAND (op, op_num); + + /* Create 'vect_ = {op0,op1,...,opn}'. */ + t = tree_cons (NULL_TREE, op, t); +@@ -1424,16 +1438,12 @@ vect_get_constant_vectors (slp_tree slp_ + { + number_of_places_left_in_vector = nunits; + +- vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); +- gcc_assert (vector_type); + if (constant_p) + vec_cst = build_vector (vector_type, t); + else + vec_cst = build_constructor_from_list (vector_type, t); +- constant_p = true; + VEC_quick_push (tree, voprnds, +- vect_init_vector (stmt, vec_cst, vector_type, +- NULL)); ++ vect_init_vector (stmt, vec_cst, vector_type, NULL)); + t = NULL_TREE; + } + } +@@ -1829,7 +1839,7 @@ vect_get_vec_def_for_operand (tree op, t + stmt_vec_info def_stmt_info = NULL; + stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); + tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); +- int nunits = TYPE_VECTOR_SUBPARTS (vectype); ++ unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype); + loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); + tree vec_inv; + tree vec_cst; +@@ -1878,16 +1888,17 @@ vect_get_vec_def_for_operand (tree op, t + { + t = tree_cons (NULL_TREE, op, t); + } +- vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); +- gcc_assert (vector_type); +- vec_cst = build_vector (vector_type, t); +- +- return vect_init_vector (stmt, vec_cst, vector_type, NULL); ++ vec_cst = build_vector (vectype, t); ++ return vect_init_vector (stmt, vec_cst, vectype, NULL); + } + + /* Case 2: operand is defined outside the loop - loop invariant. */ + case vect_invariant_def: + { ++ vector_type = get_vectype_for_scalar_type (TREE_TYPE (def)); ++ gcc_assert (vector_type); ++ nunits = TYPE_VECTOR_SUBPARTS (vector_type); ++ + if (scalar_def) + *scalar_def = def; + +@@ -1901,8 +1912,6 @@ vect_get_vec_def_for_operand (tree op, t + } + + /* FIXME: use build_constructor directly. */ +- vector_type = get_vectype_for_scalar_type (TREE_TYPE (def)); +- gcc_assert (vector_type); + vec_inv = build_constructor_from_list (vector_type, t); + return vect_init_vector (stmt, vec_inv, vector_type, NULL); + } +@@ -2167,6 +2176,7 @@ get_initial_def_for_reduction (tree stmt + struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo); + int nunits = TYPE_VECTOR_SUBPARTS (vectype); ++ tree scalar_type = TREE_TYPE (vectype); + enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)); + tree type = TREE_TYPE (init_val); + tree vecdef; +@@ -2174,7 +2184,6 @@ get_initial_def_for_reduction (tree stmt + tree init_def; + tree t = NULL_TREE; + int i; +- tree vector_type; + bool nested_in_vect_loop = false; + + gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)); +@@ -2195,15 +2204,14 @@ get_initial_def_for_reduction (tree stmt + else + *adjustment_def = init_val; + /* Create a vector of zeros for init_def. */ +- if (SCALAR_FLOAT_TYPE_P (type)) +- def_for_init = build_real (type, dconst0); ++ if (SCALAR_FLOAT_TYPE_P (scalar_type)) ++ def_for_init = build_real (scalar_type, dconst0); + else +- def_for_init = build_int_cst (type, 0); ++ def_for_init = build_int_cst (scalar_type, 0); ++ + for (i = nunits - 1; i >= 0; --i) + t = tree_cons (NULL_TREE, def_for_init, t); +- vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init)); +- gcc_assert (vector_type); +- init_def = build_vector (vector_type, t); ++ init_def = build_vector (vectype, t); + break; + + case MIN_EXPR: +--- a/gcc/tree.c ++++ b/gcc/tree.c +@@ -4031,6 +4031,7 @@ handle_dll_attribute (tree * pnode, tree + bool *no_add_attrs) + { + tree node = *pnode; ++ bool is_dllimport; + + /* These attributes may apply to structure and union types being created, + but otherwise should pass to the declaration involved. */ +@@ -4078,9 +4079,11 @@ handle_dll_attribute (tree * pnode, tree + return NULL_TREE; + } + ++ is_dllimport = is_attribute_p ("dllimport", name); ++ + /* Report error on dllimport ambiguities seen now before they cause + any damage. */ +- else if (is_attribute_p ("dllimport", name)) ++ if (is_dllimport) + { + /* Honor any target-specific overrides. */ + if (!targetm.valid_dllimport_attribute_p (node)) +@@ -4122,6 +4125,9 @@ handle_dll_attribute (tree * pnode, tree + if (*no_add_attrs == false) + DECL_DLLIMPORT_P (node) = 1; + } ++ else if (DECL_DECLARED_INLINE_P (node)) ++ /* An exported function, even if inline, must be emitted. */ ++ DECL_EXTERNAL (node) = 0; + + /* Report error if symbol is not accessible at global scope. */ + if (!TREE_PUBLIC (node) +--- a/gcc/tree.h ++++ b/gcc/tree.h +@@ -399,7 +399,10 @@ struct tree_base GTY(()) + unsigned lang_flag_6 : 1; + unsigned visited : 1; + +- unsigned spare : 23; ++ /* For tree_type. */ ++ unsigned packed_flag : 1; ++ ++ unsigned spare : 22; + + /* FIXME tuples: Eventually, we need to move this somewhere external to + the trees. */ +@@ -2287,7 +2290,7 @@ struct tree_block GTY(()) + + /* Indicated that objects of this type should be laid out in as + compact a way as possible. */ +-#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->type.packed_flag) ++#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->common.base.packed_flag) + + /* Used by type_contains_placeholder_p to avoid recomputation. + Values are: 0 (unknown), 1 (false), 2 (true). Never access +@@ -2306,17 +2309,16 @@ struct tree_type GTY(()) + tree attributes; + unsigned int uid; + +- unsigned int precision : 9; +- ENUM_BITFIELD(machine_mode) mode : 7; +- +- unsigned string_flag : 1; ++ unsigned int precision : 10; + unsigned no_force_blk_flag : 1; + unsigned needs_constructing_flag : 1; + unsigned transparent_union_flag : 1; +- unsigned packed_flag : 1; + unsigned restrict_flag : 1; + unsigned contains_placeholder_bits : 2; + ++ ENUM_BITFIELD(machine_mode) mode : 7; ++ unsigned string_flag : 1; ++ + unsigned lang_flag_0 : 1; + unsigned lang_flag_1 : 1; + unsigned lang_flag_2 : 1; +--- a/gcc/unwind-dw2.c ++++ b/gcc/unwind-dw2.c +@@ -1402,16 +1402,12 @@ uw_advance_context (struct _Unwind_Conte + /* Fill in CONTEXT for top-of-stack. The only valid registers at this + level will be the return address and the CFA. */ + +-#define uw_init_context(CONTEXT) \ +- do \ +- { \ +- /* Do any necessary initialization to access arbitrary stack frames. \ +- On the SPARC, this means flushing the register windows. */ \ +- __builtin_unwind_init (); \ +- uw_init_context_1 (CONTEXT, __builtin_dwarf_cfa (), \ +- __builtin_return_address (0)); \ +- } \ +- while (0) ++#define uw_init_context(CONTEXT) \ ++ /* Do any necessary initialization to access arbitrary stack frames. \ ++ On the SPARC, this means flushing the register windows. */ \ ++ (__builtin_unwind_init (), \ ++ uw_init_context_1 ((CONTEXT), __builtin_dwarf_cfa (), \ ++ __builtin_return_address (0))) + + static inline void + init_dwarf_reg_size_table (void) +@@ -1419,7 +1415,7 @@ init_dwarf_reg_size_table (void) + __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table); + } + +-static void ++static _Unwind_Reason_Code + uw_init_context_1 (struct _Unwind_Context *context, + void *outer_cfa, void *outer_ra) + { +@@ -1433,7 +1429,8 @@ uw_init_context_1 (struct _Unwind_Contex + context->flags = EXTENDED_CONTEXT_BIT; + + code = uw_frame_state_for (context, &fs); +- gcc_assert (code == _URC_NO_REASON); ++ if (code != _URC_NO_REASON) ++ return code; + + #if __GTHREADS + { +@@ -1459,6 +1456,8 @@ uw_init_context_1 (struct _Unwind_Contex + initialization context, then we can't see it in the given + call frame data. So have the initialization context tell us. */ + context->ra = __builtin_extract_return_addr (outer_ra); ++ ++ return _URC_NO_REASON; + } + + +--- a/gcc/unwind-sjlj.c ++++ b/gcc/unwind-sjlj.c +@@ -297,10 +297,11 @@ uw_advance_context (struct _Unwind_Conte + uw_update_context (context, fs); + } + +-static inline void ++static inline _Unwind_Reason_Code + uw_init_context (struct _Unwind_Context *context) + { + context->fc = _Unwind_SjLj_GetContext (); ++ return _URC_NO_REASON; + } + + static void __attribute__((noreturn)) +--- a/gcc/unwind.inc ++++ b/gcc/unwind.inc +@@ -90,7 +90,8 @@ _Unwind_RaiseException(struct _Unwind_Ex + _Unwind_Reason_Code code; + + /* Set up this_context to describe the current stack frame. */ +- uw_init_context (&this_context); ++ code = uw_init_context (&this_context); ++ gcc_assert (code == _URC_NO_REASON); + cur_context = this_context; + + /* Phase 1: Search. Unwind the stack, calling the personality routine +@@ -203,7 +204,8 @@ _Unwind_ForcedUnwind (struct _Unwind_Exc + struct _Unwind_Context this_context, cur_context; + _Unwind_Reason_Code code; + +- uw_init_context (&this_context); ++ code = uw_init_context (&this_context); ++ gcc_assert (code == _URC_NO_REASON); + cur_context = this_context; + + exc->private_1 = (_Unwind_Ptr) stop; +@@ -226,7 +228,8 @@ _Unwind_Resume (struct _Unwind_Exception + struct _Unwind_Context this_context, cur_context; + _Unwind_Reason_Code code; + +- uw_init_context (&this_context); ++ code = uw_init_context (&this_context); ++ gcc_assert (code == _URC_NO_REASON); + cur_context = this_context; + + /* Choose between continuing to process _Unwind_RaiseException +@@ -256,7 +259,8 @@ _Unwind_Resume_or_Rethrow (struct _Unwin + if (exc->private_1 == 0) + return _Unwind_RaiseException (exc); + +- uw_init_context (&this_context); ++ code = uw_init_context (&this_context); ++ gcc_assert (code == _URC_NO_REASON); + cur_context = this_context; + + code = _Unwind_ForcedUnwind_Phase2 (exc, &cur_context); +@@ -285,7 +289,9 @@ _Unwind_Backtrace(_Unwind_Trace_Fn trace + struct _Unwind_Context context; + _Unwind_Reason_Code code; + +- uw_init_context (&context); ++ code = uw_init_context (&context); ++ if (code != _URC_NO_REASON) ++ return _URC_FATAL_PHASE1_ERROR; + + while (1) + { +--- a/gcc/varasm.c ++++ b/gcc/varasm.c +@@ -555,7 +555,7 @@ get_section (const char *name, unsigned + static bool + use_object_blocks_p (void) + { +- return flag_section_anchors; ++ return flag_section_anchors && flag_toplevel_reorder; + } + + /* Return the object_block structure for section SECT. Create a new +@@ -1095,13 +1095,12 @@ align_variable (tree decl, bool dont_out + /* On some machines, it is good to increase alignment sometimes. */ + if (! DECL_USER_ALIGN (decl)) + { +-#ifdef DATA_ALIGNMENT +- unsigned int data_align = DATA_ALIGNMENT (TREE_TYPE (decl), align); ++ unsigned int data_align = ++ calculate_global_alignment (TREE_TYPE (decl), align); + /* Don't increase alignment too much for TLS variables - TLS space + is too precious. */ + if (! DECL_THREAD_LOCAL_P (decl) || data_align <= BITS_PER_WORD) + align = data_align; +-#endif + #ifdef CONSTANT_ALIGNMENT + if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node) + { +--- a/gcc/vmsdbgout.c ++++ b/gcc/vmsdbgout.c +@@ -211,6 +211,7 @@ const struct gcc_debug_hooks vmsdbg_debu + debug_nothing_int, /* handle_pch */ + debug_nothing_rtx, /* var_location */ + debug_nothing_void, /* switch_text_section */ ++ debug_nothing_tree_tree, /* set_name */ + 0 /* start_end_main_source_file */ + }; + +--- a/include/libiberty.h ++++ b/include/libiberty.h +@@ -583,6 +583,10 @@ extern int pexecute (const char *, char + + extern int pwait (int, int *, int); + ++/* Convert a Cygwin path to a Windows path. */ ++ ++extern int cygpath (const char *, char []); ++ + #if !HAVE_DECL_ASPRINTF + /* Like sprintf but provides a pointer to malloc'd storage, which must + be freed by the caller. */ +--- a/libcpp/Makefile.in ++++ b/libcpp/Makefile.in +@@ -119,7 +119,7 @@ stamp-h1: $(srcdir)/config.in config.sta + -rm -f stamp-h1 + $(SHELL) ./config.status config.h + +-$(srcdir)/config.in: @MAINT@ $(srcdir)/configure ++$(srcdir)/config.in: @MAINT@ $(srcdir)/configure.ac + cd $(srcdir) && $(AUTOHEADER) + -rm -f stamp-h1 + +--- a/libcpp/configure ++++ b/libcpp/configure +@@ -8311,6 +8311,7 @@ case $target in + sparc64*-*-* | ultrasparc-*-freebsd* | \ + sparcv9-*-solaris2* | \ + sparc-*-solaris2.[789] | sparc-*-solaris2.1[0-9]* | \ ++ sparc-wrs-linux-gnu | \ + spu-*-* | \ + sh[123456789l]*-*-*) + need_64bit_hwint=yes ;; +--- a/libcpp/configure.ac ++++ b/libcpp/configure.ac +@@ -129,6 +129,7 @@ case $target in + sparc64*-*-* | ultrasparc-*-freebsd* | \ + sparcv9-*-solaris2* | \ + sparc-*-solaris2.[789] | sparc-*-solaris2.1[0-9]* | \ ++ sparc-wrs-linux-gnu | \ + spu-*-* | \ + sh[123456789l]*-*-*) + need_64bit_hwint=yes ;; +--- a/libcpp/lex.c ++++ b/libcpp/lex.c +@@ -1240,7 +1240,7 @@ cpp_token_len (const cpp_token *token) + + switch (TOKEN_SPELL (token)) + { +- default: len = 4; break; ++ default: len = 6; break; + case SPELL_LITERAL: len = token->val.str.len; break; + case SPELL_IDENT: len = NODE_LEN (token->val.node) * 10; break; + } +--- a/libffi/Makefile.am ++++ b/libffi/Makefile.am +@@ -156,7 +156,9 @@ nodist_libffi_convenience_la_SOURCES = $ + + AM_CFLAGS = -Wall -g -fexceptions + +-libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++ ++libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) + + AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src + AM_CCASFLAGS = $(AM_CPPFLAGS) +--- a/libffi/Makefile.in ++++ b/libffi/Makefile.in +@@ -439,7 +439,8 @@ nodist_libffi_la_SOURCES = $(am__append_ + libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) + nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) + AM_CFLAGS = -Wall -g -fexceptions +-libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) + AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src + AM_CCASFLAGS = $(AM_CPPFLAGS) + all: fficonfig.h +--- a/libgcc/Makefile.in ++++ b/libgcc/Makefile.in +@@ -388,18 +388,24 @@ libgcc-s-objects += $(patsubst %,%_s$(ob + endif + endif + ++ifeq ($(LIB2_DIVMOD_EXCEPTION_FLAGS),) ++# Provide default flags for compiling divmod functions, if they haven't been ++# set already by a target-specific Makefile fragment. ++LIB2_DIVMOD_EXCEPTION_FLAGS := -fexceptions -fnon-call-exceptions ++endif ++ + # Build LIB2_DIVMOD_FUNCS. + lib2-divmod-o = $(patsubst %,%$(objext),$(LIB2_DIVMOD_FUNCS)) + $(lib2-divmod-o): %$(objext): $(gcc_srcdir)/libgcc2.c + $(gcc_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \ +- -fexceptions -fnon-call-exceptions $(vis_hide) ++ $(LIB2_DIVMOD_EXCEPTION_FLAGS) $(vis_hide) + libgcc-objects += $(lib2-divmod-o) + + ifeq ($(enable_shared),yes) + lib2-divmod-s-o = $(patsubst %,%_s$(objext),$(LIB2_DIVMOD_FUNCS)) + $(lib2-divmod-s-o): %_s$(objext): $(gcc_srcdir)/libgcc2.c + $(gcc_s_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \ +- -fexceptions -fnon-call-exceptions ++ $(LIB2_DIVMOD_EXCEPTION_FLAGS) + libgcc-s-objects += $(lib2-divmod-s-o) + endif + +--- a/libgcc/config.host ++++ b/libgcc/config.host +@@ -223,12 +223,15 @@ arm*-*-netbsdelf*) + arm*-*-netbsd*) + ;; + arm*-*-linux*) # ARM GNU/Linux with ELF ++ tmake_file="${tmake_file} arm/t-divmod-ef" + ;; + arm*-*-uclinux*) # ARM ucLinux ++ tmake_file="${tmake_file} arm/t-divmod-ef" + ;; + arm*-*-ecos-elf) + ;; + arm*-*-eabi* | arm*-*-symbianelf* ) ++ tmake_file="${tmake_file} arm/t-divmod-ef" + ;; + arm*-*-rtems*) + ;; +@@ -438,8 +441,12 @@ mips-sgi-irix[56]*) + mips*-*-netbsd*) # NetBSD/mips, either endian. + ;; + mips64*-*-linux*) ++ extra_parts="$extra_parts crtfastmath.o" ++ tmake_file="{$tmake_file} mips/t-crtfm" + ;; + mips*-*-linux*) # Linux MIPS, either endian. ++ extra_parts="$extra_parts crtfastmath.o" ++ tmake_file="{$tmake_file} mips/t-crtfm" + ;; + mips*-*-openbsd*) + ;; +@@ -461,6 +468,10 @@ mips64vr-*-elf* | mips64vrel-*-elf*) + ;; + mips64orion-*-elf* | mips64orionel-*-elf*) + ;; ++mips64octeon-wrs-elf* | mips64octeonel-wrs-elf*) ++ ;; ++mips64octeon-montavista-elf*) ++ ;; + mips*-*-rtems*) + ;; + mips-wrs-vxworks) +--- /dev/null ++++ b/libgcc/config/arm/t-divmod-ef +@@ -0,0 +1,4 @@ ++# On ARM, specifying -fnon-call-exceptions will needlessly pull in ++# the unwinder in simple programs which use 64-bit division. Omitting ++# the option is safe. ++LIB2_DIVMOD_EXCEPTION_FLAGS := -fexceptions +--- /dev/null ++++ b/libgcc/config/mips/t-crtfm +@@ -0,0 +1,3 @@ ++crtfastmath.o: $(gcc_srcdir)/config/mips/crtfastmath.c ++ $(gcc_compile) -c $(gcc_srcdir)/config/mips/crtfastmath.c ++ +--- a/libgcc/config/rs6000/t-ppccomm ++++ b/libgcc/config/rs6000/t-ppccomm +@@ -1,5 +1,21 @@ +-EXTRA_PARTS += ecrti$(objext) ecrtn$(objext) ncrti$(objext) ncrtn$(objext) \ +- crtsavres$(objext) ++LIB2ADD_ST += crtsavfpr.S crtresfpr.S \ ++ crtsavgpr.S crtresgpr.S \ ++ crtresxfpr.S crtresxgpr.S \ ++ e500crtres32gpr.S \ ++ e500crtres64gpr.S \ ++ e500crtres64gprctr.S \ ++ e500crtrest32gpr.S \ ++ e500crtrest64gpr.S \ ++ e500crtresx32gpr.S \ ++ e500crtresx64gpr.S \ ++ e500crtsav32gpr.S \ ++ e500crtsav64gpr.S \ ++ e500crtsav64gprctr.S \ ++ e500crtsavg32gpr.S \ ++ e500crtsavg64gpr.S \ ++ e500crtsavg64gprctr.S ++ ++EXTRA_PARTS += ecrti$(objext) ecrtn$(objext) ncrti$(objext) ncrtn$(objext) + + # We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and + # end labels to all of the special sections used when we link using gcc. +@@ -17,8 +33,62 @@ ncrti.S: $(gcc_srcdir)/config/rs6000/sol + ncrtn.S: $(gcc_srcdir)/config/rs6000/sol-cn.asm + cat $(gcc_srcdir)/config/rs6000/sol-cn.asm >ncrtn.S + +-crtsavres.S: $(gcc_srcdir)/config/rs6000/crtsavres.asm +- cat $(gcc_srcdir)/config/rs6000/crtsavres.asm >crtsavres.S ++crtsavfpr.S: $(gcc_srcdir)/config/rs6000/crtsavfpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S ++ ++crtresfpr.S: $(gcc_srcdir)/config/rs6000/crtresfpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S ++ ++crtsavgpr.S: $(gcc_srcdir)/config/rs6000/crtsavgpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S ++ ++crtresgpr.S: $(gcc_srcdir)/config/rs6000/crtresgpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S ++ ++crtresxfpr.S: $(gcc_srcdir)/config/rs6000/crtresxfpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S ++ ++crtresxgpr.S: $(gcc_srcdir)/config/rs6000/crtresxgpr.asm ++ cat $(gcc_srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S ++ ++e500crtres32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtres32gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtres32gpr.asm >e500crtres32gpr.S ++ ++e500crtres64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtres64gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtres64gpr.asm >e500crtres64gpr.S ++ ++e500crtres64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtres64gprctr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtres64gprctr.asm >e500crtres64gprctr.S ++ ++e500crtrest32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtrest32gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtrest32gpr.asm >e500crtrest32gpr.S ++ ++e500crtrest64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtrest64gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtrest64gpr.asm >e500crtrest64gpr.S ++ ++e500crtresx32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtresx32gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtresx32gpr.asm >e500crtresx32gpr.S ++ ++e500crtresx64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtresx64gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtresx64gpr.asm >e500crtresx64gpr.S ++ ++e500crtsav32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsav32gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsav32gpr.asm >e500crtsav32gpr.S ++ ++e500crtsav64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsav64gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsav64gpr.asm >e500crtsav64gpr.S ++ ++e500crtsav64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtsav64gprctr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsav64gprctr.asm >e500crtsav64gprctr.S ++ ++e500crtsavg32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg32gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsavg32gpr.asm >e500crtsavg32gpr.S ++ ++e500crtsavg64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg64gpr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsavg64gpr.asm >e500crtsavg64gpr.S ++ ++e500crtsavg64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg64gprctr.asm ++ cat $(gcc_srcdir)/config/rs6000/e500crtsavg64gprctr.asm >e500crtsavg64gprctr.S + + ecrti$(objext): ecrti.S + $(crt_compile) -c ecrti.S +@@ -34,3 +104,60 @@ ncrtn$(objext): ncrtn.S + + crtsavres$(objext): crtsavres.S + $(crt_compile) -c crtsavres.S ++ ++crtsavfpr$(objext): crtsavfpr.S ++ $(crt_compile) -c crtsavfpr.S ++ ++crtresfpr$(objext): crtresfpr.S ++ $(crt_compile) -c crtresfpr.S ++ ++crtsavgpr$(objext): crtsavgpr.S ++ $(crt_compile) -c crtsavgpr.S ++ ++crtresgpr$(objext): crtresgpr.S ++ $(crt_compile) -c crtresgpr.S ++ ++crtresxfpr$(objext): crtresxfpr.S ++ $(crt_compile) -c crtresxfpr.S ++ ++crtresxgpr$(objext): crtresxgpr.S ++ $(crt_compile) -c crtresxgpr.S ++ ++e500crtres32gpr$(objext): e500crtres32gpr.S ++ $(crt_compile) -c e500crtres32gpr.S ++ ++e500crtres64gpr$(objext): e500crtres64gpr.S ++ $(crt_compile) -c e500crtres64gpr.S ++ ++e500crtres64gprctr$(objext): e500crtres64gprctr.S ++ $(crt_compile) -c e500crtres64gprctr.S ++ ++e500crtrest32gpr$(objext): e500crtrest32gpr.S ++ $(crt_compile) -c e500crtrest32gpr.S ++ ++e500crtrest64gpr$(objext): e500crtrest64gpr.S ++ $(crt_compile) -c e500crtrest64gpr.S ++ ++e500crtresx32gpr$(objext): e500crtresx32gpr.S ++ $(crt_compile) -c e500crtresx32gpr.S ++ ++e500crtresx64gpr$(objext): e500crtresx64gpr.S ++ $(crt_compile) -c e500crtresx64gpr.S ++ ++e500crtsav32gpr$(objext): e500crtsav32gpr.S ++ $(crt_compile) -c e500crtsav32gpr.S ++ ++e500crtsav64gpr$(objext): e500crtsav64gpr.S ++ $(crt_compile) -c e500crtsav64gpr.S ++ ++e500crtsav64gprctr$(objext): e500crtsav64gprctr.S ++ $(crt_compile) -c e500crtsav64gprctr.S ++ ++e500crtsavg32gpr$(objext): e500crtsavg32gpr.S ++ $(crt_compile) -c e500crtsavg32gpr.S ++ ++e500crtsavg64gpr$(objext): e500crtsavg64gpr.S ++ $(crt_compile) -c e500crtsavg64gpr.S ++ ++e500crtsavg64gprctr$(objext): e500crtsavg64gprctr.S ++ $(crt_compile) -c e500crtsavg64gprctr.S +--- a/libgcc/shared-object.mk ++++ b/libgcc/shared-object.mk +@@ -8,11 +8,13 @@ base := $(basename $(notdir $o)) + + ifeq ($(suffix $o),.c) + ++c_flags-$(base)$(objext) := $(c_flags) + $(base)$(objext): $o +- $(gcc_compile) $(c_flags) -c $< $(vis_hide) ++ $(gcc_compile) $(c_flags-$@) -c $< $(vis_hide) + ++c_flags-$(base)_s$(objext) := $(c_flags) + $(base)_s$(objext): $o +- $(gcc_s_compile) $(c_flags) -c $< ++ $(gcc_s_compile) $(c_flags-$@) -c $< + + else + +--- a/libgcc/static-object.mk ++++ b/libgcc/static-object.mk +@@ -8,8 +8,9 @@ base := $(basename $(notdir $o)) + + ifeq ($(suffix $o),.c) + ++c_flags-$(base)$(objext) := $(c_flags) + $(base)$(objext): $o +- $(gcc_compile) $(c_flags) -c $< $(vis_hide) ++ $(gcc_compile) $(c_flags-$@) -c $< $(vis_hide) + + else + +--- a/libgomp/Makefile.am ++++ b/libgomp/Makefile.am +@@ -1,5 +1,10 @@ + ## Process this file with automake to produce Makefile.in + ++datarootdir = @datarootdir@ ++docdir = @docdir@ ++htmldir = @htmldir@ ++pdfdir = @pdfdir@ ++ + ACLOCAL_AMFLAGS = -I .. -I ../config + SUBDIRS = testsuite + +@@ -39,6 +44,12 @@ if USE_FORTRAN + nodist_finclude_HEADERS = omp_lib.h omp_lib.f90 omp_lib.mod omp_lib_kinds.mod + endif + ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++ ++LINK = $(LIBTOOL) --tag CC --mode=link $(CCLD) $(AM_CCFLAGS) $(CFLAGS) \ ++ $(AM_LDFLAGS) $(LTLDFLAGS) -o $@ ++ ++ + omp_lib_kinds.mod: omp_lib.mod + : + omp_lib.mod: omp_lib.f90 +@@ -48,10 +59,31 @@ fortran.o: libgomp_f.h + env.lo: libgomp_f.h + env.o: libgomp_f.h + ++HTMLS_INSTALL=libgomp ++HTMLS_BUILD=libgomp/index.html + +-# No install-html or install-pdf support in automake yet +-.PHONY: install-html install-pdf +-install-html: ++$(HTMLS_BUILD): $(info_TEXINFOS) ++ $(TEXI2HTML) $(MAKEINFOFLAGS) -I$(srcdir) -o $(@D) $< ++ ++html__strip_dir = `echo $$p | sed -e 's|^.*/||'`; ++ ++install-data-local: install-html ++install-html: $(HTMLS_BUILD) ++ @$(NORMAL_INSTALL) ++ test -z "$(htmldir)" || $(mkinstalldirs) "$(DESTDIR)$(htmldir)" ++ @list='$(HTMLS_INSTALL)'; for p in $$list; do \ ++ if test -f "$$p" || test -d "$$p"; then d=""; else d="$(srcdir)/"; fi; \ ++ f=$(html__strip_dir) \ ++ if test -d "$$d$$p"; then \ ++ echo " $(mkinstalldirs) '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(mkinstalldirs) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \ ++ echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f"; \ ++ else \ ++ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \ ++ fi; \ ++ done + + install-pdf: $(PDFS) + @$(NORMAL_INSTALL) +@@ -69,6 +101,7 @@ install-pdf: $(PDFS) + # `texinfo.tex' for your package. The value of this variable should be + # the relative path from the current `Makefile.am' to `texinfo.tex'. + TEXINFO_TEX = ../gcc/doc/include/texinfo.tex ++TEXI2HTML = $(MAKEINFO) --html + + # Defines info, dvi, pdf and html targets + MAKEINFOFLAGS = -I $(srcdir)/../gcc/doc/include +--- a/libgomp/Makefile.in ++++ b/libgomp/Makefile.in +@@ -94,8 +94,6 @@ LTCOMPILE = $(LIBTOOL) --tag=CC --mode=c + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) + CCLD = $(CC) +-LINK = $(LIBTOOL) --tag=CC --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ +- $(AM_LDFLAGS) $(LDFLAGS) -o $@ + SOURCES = $(libgomp_la_SOURCES) + DIST_SOURCES = $(libgomp_la_SOURCES) + MULTISRCTOP = +@@ -217,9 +215,12 @@ USE_FORTRAN_TRUE = @USE_FORTRAN_TRUE@ + VERSION = @VERSION@ + XCFLAGS = @XCFLAGS@ + XLDFLAGS = @XLDFLAGS@ ++ac_ct_AR = @ac_ct_AR@ + ac_ct_CC = @ac_ct_CC@ + ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ + ac_ct_FC = @ac_ct_FC@ ++ac_ct_RANLIB = @ac_ct_RANLIB@ ++ac_ct_STRIP = @ac_ct_STRIP@ + am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ + am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ + am__include = @am__include@ +@@ -237,7 +238,6 @@ config_path = @config_path@ + datadir = @datadir@ + datarootdir = @datarootdir@ + docdir = @docdir@ +-dvidir = @dvidir@ + enable_shared = @enable_shared@ + enable_static = @enable_static@ + exec_prefix = @exec_prefix@ +@@ -254,7 +254,6 @@ libdir = @libdir@ + libexecdir = @libexecdir@ + libtool_VERSION = @libtool_VERSION@ + link_gomp = @link_gomp@ +-localedir = @localedir@ + localstatedir = @localstatedir@ + lt_ECHO = @lt_ECHO@ + mandir = @mandir@ +@@ -264,7 +263,6 @@ oldincludedir = @oldincludedir@ + pdfdir = @pdfdir@ + prefix = @prefix@ + program_transform_name = @program_transform_name@ +-psdir = @psdir@ + sbindir = @sbindir@ + sharedstatedir = @sharedstatedir@ + sysconfdir = @sysconfdir@ +@@ -297,6 +295,13 @@ libgomp_la_SOURCES = alloc.c barrier.c c + nodist_noinst_HEADERS = libgomp_f.h + nodist_libsubinclude_HEADERS = omp.h + @USE_FORTRAN_TRUE@nodist_finclude_HEADERS = omp_lib.h omp_lib.f90 omp_lib.mod omp_lib_kinds.mod ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++LINK = $(LIBTOOL) --tag CC --mode=link $(CCLD) $(AM_CCFLAGS) $(CFLAGS) \ ++ $(AM_LDFLAGS) $(LTLDFLAGS) -o $@ ++ ++HTMLS_INSTALL = libgomp ++HTMLS_BUILD = libgomp/index.html ++html__strip_dir = `echo $$p | sed -e 's|^.*/||'`; + + # Automake Documentation: + # If your package has Texinfo files in many directories, you can use the +@@ -304,6 +309,7 @@ nodist_libsubinclude_HEADERS = omp.h + # `texinfo.tex' for your package. The value of this variable should be + # the relative path from the current `Makefile.am' to `texinfo.tex'. + TEXINFO_TEX = ../gcc/doc/include/texinfo.tex ++TEXI2HTML = $(MAKEINFO) --html + + # Defines info, dvi, pdf and html targets + MAKEINFOFLAGS = -I $(srcdir)/../gcc/doc/include +@@ -545,13 +551,10 @@ dist-info: $(INFO_DEPS) + $(srcdir)/*) base=`echo "$$base" | sed "s|^$$srcdirstrip/||"`;; \ + esac; \ + if test -f $$base; then d=.; else d=$(srcdir); fi; \ +- base_i=`echo "$$base" | sed 's|\.info$$||;s|$$|.i|'`; \ +- for file in $$d/$$base $$d/$$base-[0-9] $$d/$$base-[0-9][0-9] $$d/$$base_i[0-9] $$d/$$base_i[0-9][0-9]; do \ +- if test -f $$file; then \ +- relfile=`expr "$$file" : "$$d/\(.*\)"`; \ +- test -f $(distdir)/$$relfile || \ +- cp -p $$file $(distdir)/$$relfile; \ +- else :; fi; \ ++ for file in $$d/$$base*; do \ ++ relfile=`expr "$$file" : "$$d/\(.*\)"`; \ ++ test -f $(distdir)/$$relfile || \ ++ cp -p $$file $(distdir)/$$relfile; \ + done; \ + done + +@@ -955,7 +958,8 @@ info: info-recursive + + info-am: $(INFO_DEPS) + +-install-data-am: install-info-am install-nodist_fincludeHEADERS \ ++install-data-am: install-data-local install-info-am \ ++ install-nodist_fincludeHEADERS \ + install-nodist_libsubincludeHEADERS + + install-exec-am: install-multi install-nodist_toolexeclibHEADERS \ +@@ -1035,9 +1039,9 @@ uninstall-info: uninstall-info-recursive + distclean-multi distclean-recursive distclean-tags \ + distcleancheck distdir distuninstallcheck dvi dvi-am html \ + html-am info info-am install install-am install-data \ +- install-data-am install-exec install-exec-am install-info \ +- install-info-am install-man install-multi \ +- install-nodist_fincludeHEADERS \ ++ install-data-am install-data-local install-exec \ ++ install-exec-am install-info install-info-am install-man \ ++ install-multi install-nodist_fincludeHEADERS \ + install-nodist_libsubincludeHEADERS \ + install-nodist_toolexeclibHEADERS install-strip \ + install-toolexeclibLTLIBRARIES installcheck installcheck-am \ +@@ -1064,9 +1068,26 @@ fortran.o: libgomp_f.h + env.lo: libgomp_f.h + env.o: libgomp_f.h + +-# No install-html or install-pdf support in automake yet +-.PHONY: install-html install-pdf +-install-html: ++$(HTMLS_BUILD): $(info_TEXINFOS) ++ $(TEXI2HTML) $(MAKEINFOFLAGS) -I$(srcdir) -o $(@D) $< ++ ++install-data-local: install-html ++install-html: $(HTMLS_BUILD) ++ @$(NORMAL_INSTALL) ++ test -z "$(htmldir)" || $(mkinstalldirs) "$(DESTDIR)$(htmldir)" ++ @list='$(HTMLS_INSTALL)'; for p in $$list; do \ ++ if test -f "$$p" || test -d "$$p"; then d=""; else d="$(srcdir)/"; fi; \ ++ f=$(html__strip_dir) \ ++ if test -d "$$d$$p"; then \ ++ echo " $(mkinstalldirs) '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(mkinstalldirs) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \ ++ echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f"; \ ++ else \ ++ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \ ++ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \ ++ fi; \ ++ done + + install-pdf: $(PDFS) + @$(NORMAL_INSTALL) +--- /dev/null ++++ b/libgomp/config/linux/mips/futex.h +@@ -0,0 +1,75 @@ ++/* Copyright (C) 2005, 2008 Free Software Foundation, Inc. ++ Contributed by Ilie Garbacea , Chao-ying Fu . ++ ++ This file is part of the GNU OpenMP Library (libgomp). ++ ++ Libgomp is free software; you can redistribute it and/or modify it ++ under the terms of the GNU Lesser General Public License as published by ++ the Free Software Foundation; either version 2.1 of the License, or ++ (at your option) any later version. ++ ++ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for ++ more details. ++ ++ You should have received a copy of the GNU Lesser General Public License ++ along with libgomp; see the file COPYING.LIB. If not, write to the ++ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, ++ MA 02110-1301, USA. */ ++ ++/* As a special exception, if you link this library with other files, some ++ of which are compiled with GCC, to produce an executable, this library ++ does not by itself cause the resulting executable to be covered by the ++ GNU General Public License. This exception does not however invalidate ++ any other reasons why the executable file might be covered by the GNU ++ General Public License. */ ++ ++/* Provide target-specific access to the futex system call. */ ++ ++#include ++#define FUTEX_WAIT 0 ++#define FUTEX_WAKE 1 ++ ++static inline void ++sys_futex0 (int *addr, int op, int val) ++{ ++ register unsigned long __v0 asm("$2") = (unsigned long) SYS_futex; ++ register unsigned long __a0 asm("$4") = (unsigned long) addr; ++ register unsigned long __a1 asm("$5") = (unsigned long) op; ++ register unsigned long __a2 asm("$6") = (unsigned long) val; ++ register unsigned long __a3 asm("$7") = 0; ++ ++ __asm volatile ("syscall" ++ /* returns $a3 (errno), $v0 (return value) */ ++ : "=r" (__v0), "=r" (__a3) ++ /* arguments in v0 (syscall) a0-a3 */ ++ : "r" (__v0), "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a3) ++ /* clobbers at, v1, t0-t9, memory */ ++ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", ++ "$15", "$24", "$25", "memory"); ++} ++ ++static inline void ++futex_wait (int *addr, int val) ++{ ++ sys_futex0 (addr, FUTEX_WAIT, val); ++} ++ ++static inline void ++futex_wake (int *addr, int count) ++{ ++ sys_futex0 (addr, FUTEX_WAKE, count); ++} ++ ++static inline void ++cpu_relax (void) ++{ ++ __asm volatile ("" : : : "memory"); ++} ++ ++static inline void ++atomic_write_barrier (void) ++{ ++ __sync_synchronize (); ++} +--- a/libgomp/configure ++++ b/libgomp/configure +@@ -457,7 +457,7 @@ ac_includes_default="\ + # include + #endif" + +-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS GENINSRC_TRUE GENINSRC_FALSE build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar multi_basedir toolexecdir toolexeclibdir CC ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CFLAGS AR ac_ct_AR RANLIB ac_ct_RANLIB PERL BUILD_INFO_TRUE BUILD_INFO_FALSE LIBTOOL SED EGREP FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM LN_S lt_ECHO CPP CPPFLAGS enable_shared enable_static MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT FC FCFLAGS LDFLAGS ac_ct_FC libtool_VERSION SECTION_LDFLAGS OPT_LDFLAGS LIBGOMP_BUILD_VERSIONED_SHLIB_TRUE LIBGOMP_BUILD_VERSIONED_SHLIB_FALSE config_path XCFLAGS XLDFLAGS link_gomp USE_FORTRAN_TRUE USE_FORTRAN_FALSE OMP_LOCK_SIZE OMP_LOCK_ALIGN OMP_NEST_LOCK_SIZE OMP_NEST_LOCK_ALIGN OMP_LOCK_KIND OMP_NEST_LOCK_KIND LIBOBJS LTLIBOBJS' ++ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS GENINSRC_TRUE GENINSRC_FALSE build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar multi_basedir toolexecdir toolexeclibdir datarootdir docdir pdfdir htmldir CC ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CFLAGS AR ac_ct_AR RANLIB ac_ct_RANLIB PERL BUILD_INFO_TRUE BUILD_INFO_FALSE LIBTOOL SED EGREP FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM LN_S lt_ECHO CPP CPPFLAGS enable_shared enable_static MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT FC FCFLAGS LDFLAGS ac_ct_FC libtool_VERSION SECTION_LDFLAGS OPT_LDFLAGS LIBGOMP_BUILD_VERSIONED_SHLIB_TRUE LIBGOMP_BUILD_VERSIONED_SHLIB_FALSE config_path XCFLAGS XLDFLAGS link_gomp USE_FORTRAN_TRUE USE_FORTRAN_FALSE OMP_LOCK_SIZE OMP_LOCK_ALIGN OMP_NEST_LOCK_SIZE OMP_NEST_LOCK_ALIGN OMP_LOCK_KIND OMP_NEST_LOCK_KIND LIBOBJS LTLIBOBJS' + ac_subst_files='' + + # Initialize some variables set by options. +@@ -1028,6 +1028,10 @@ Optional Features: + Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) ++ --with-datarootdir=DIR Use DIR as the data root [PREFIX/share] ++ --with-docdir=DIR Install documentation in DIR [DATAROOTDIR] ++ --with-pdfdir install pdf in this directory. ++ --with-htmldir=DIR html documentation in in DIR [DOCDIR] + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] +@@ -2174,6 +2178,46 @@ esac + + + ++ ++# Check whether --with-datarootdir or --without-datarootdir was given. ++if test "${with_datarootdir+set}" = set; then ++ withval="$with_datarootdir" ++ datarootdir="\${prefix}/$with_datarootdir" ++else ++ datarootdir='$(prefix)/share' ++fi; ++ ++ ++ ++# Check whether --with-docdir or --without-docdir was given. ++if test "${with_docdir+set}" = set; then ++ withval="$with_docdir" ++ docdir="\${prefix}/$with_docdir" ++else ++ docdir='$(datarootdir)' ++fi; ++ ++ ++ ++# Check whether --with-pdfdir or --without-pdfdir was given. ++if test "${with_pdfdir+set}" = set; then ++ withval="$with_pdfdir" ++ pdfdir="\${prefix}/${withval}" ++else ++ pdfdir="\${docdir}" ++fi; ++ ++ ++ ++# Check whether --with-htmldir or --without-htmldir was given. ++if test "${with_htmldir+set}" = set; then ++ withval="$with_htmldir" ++ htmldir="\${prefix}/$with_htmldir" ++else ++ htmldir='$(docdir)' ++fi; ++ ++ + # Check the compiler. + # The same as in boehm-gc and libstdc++. Have to borrow it from there. + # We must force CC to /not/ be precious variables; otherwise +@@ -4219,13 +4263,13 @@ if test "${lt_cv_nm_interface+set}" = se + else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext +- (eval echo "\"\$as_me:4242: $ac_compile\"" >&5) ++ (eval echo "\"\$as_me:4266: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 +- (eval echo "\"\$as_me:4245: $NM \\\"conftest.$ac_objext\\\"\"" >&5) ++ (eval echo "\"\$as_me:4269: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 +- (eval echo "\"\$as_me:4248: output\"" >&5) ++ (eval echo "\"\$as_me:4272: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" +@@ -5281,7 +5325,7 @@ ia64-*-hpux*) + ;; + *-*-irix6*) + # Find out which ABI we are using. +- echo '#line 5304 "configure"' > conftest.$ac_ext ++ echo '#line 5328 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +@@ -6381,11 +6425,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:6404: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:6428: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:6408: \$? = $ac_status" >&5 ++ echo "$as_me:6432: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -6703,11 +6747,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:6726: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:6750: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:6730: \$? = $ac_status" >&5 ++ echo "$as_me:6754: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -6808,11 +6852,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:6831: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:6855: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:6835: \$? = $ac_status" >&5 ++ echo "$as_me:6859: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -6863,11 +6907,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:6886: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:6910: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:6890: \$? = $ac_status" >&5 ++ echo "$as_me:6914: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -9660,7 +9704,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 9683 "configure" ++#line 9707 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -9760,7 +9804,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 9783 "configure" ++#line 9807 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10115,7 +10159,7 @@ fi + + + # Provide some information about the compiler. +-echo "$as_me:10138:" \ ++echo "$as_me:10162:" \ + "checking for Fortran compiler version" >&5 + ac_compiler=`set X $ac_compile; echo $2` + { (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 +@@ -10351,7 +10395,7 @@ fi + + + # Provide some information about the compiler. +-echo "$as_me:10374:" \ ++echo "$as_me:10398:" \ + "checking for Fortran compiler version" >&5 + ac_compiler=`set X $ac_compile; echo $2` + { (eval echo "$as_me:$LINENO: \"$ac_compiler --version &5\"") >&5 +@@ -11067,11 +11111,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:11090: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:11114: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:11094: \$? = $ac_status" >&5 ++ echo "$as_me:11118: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -11166,11 +11210,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:11189: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:11213: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:11193: \$? = $ac_status" >&5 ++ echo "$as_me:11217: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -11218,11 +11262,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:11241: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:11265: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:11245: \$? = $ac_status" >&5 ++ echo "$as_me:11269: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -16777,7 +16821,7 @@ fi + rm -f conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ;; +- yes) ++ yes) + cat >conftest.$ac_ext <<_ACEOF + /* confdefs.h. */ + _ACEOF +@@ -20485,6 +20529,10 @@ s,@am__untar@,$am__untar,;t t + s,@multi_basedir@,$multi_basedir,;t t + s,@toolexecdir@,$toolexecdir,;t t + s,@toolexeclibdir@,$toolexeclibdir,;t t ++s,@datarootdir@,$datarootdir,;t t ++s,@docdir@,$docdir,;t t ++s,@pdfdir@,$pdfdir,;t t ++s,@htmldir@,$htmldir,;t t + s,@CC@,$CC,;t t + s,@ac_ct_CC@,$ac_ct_CC,;t t + s,@EXEEXT@,$EXEEXT,;t t +--- a/libgomp/configure.ac ++++ b/libgomp/configure.ac +@@ -94,6 +94,30 @@ esac + AC_SUBST(toolexecdir) + AC_SUBST(toolexeclibdir) + ++AC_ARG_WITH(datarootdir, ++[ --with-datarootdir=DIR Use DIR as the data root [[PREFIX/share]]], ++datarootdir="\${prefix}/$with_datarootdir", ++datarootdir='$(prefix)/share') ++AC_SUBST(datarootdir) ++ ++AC_ARG_WITH(docdir, ++[ --with-docdir=DIR Install documentation in DIR [[DATAROOTDIR]]], ++docdir="\${prefix}/$with_docdir", ++docdir='$(datarootdir)') ++AC_SUBST(docdir) ++ ++AC_ARG_WITH(pdfdir, ++[ --with-pdfdir install pdf in this directory.], ++[pdfdir="\${prefix}/${withval}"], ++[pdfdir="\${docdir}"]) ++AC_SUBST(pdfdir) ++ ++AC_ARG_WITH(htmldir, ++[ --with-htmldir=DIR html documentation in in DIR [[DOCDIR]]], ++htmldir="\${prefix}/$with_htmldir", ++htmldir='$(docdir)') ++AC_SUBST(htmldir) ++ + # Check the compiler. + # The same as in boehm-gc and libstdc++. Have to borrow it from there. + # We must force CC to /not/ be precious variables; otherwise +--- a/libgomp/configure.tgt ++++ b/libgomp/configure.tgt +@@ -35,6 +35,10 @@ if test $enable_linux_futex = yes; then + config_path="linux/ia64 linux posix" + ;; + ++ mips*-*-linux*) ++ config_path="linux/mips linux posix" ++ ;; ++ + powerpc*-*-linux*) + config_path="linux/powerpc linux posix" + ;; +--- a/libgomp/libgomp.texi ++++ b/libgomp/libgomp.texi +@@ -95,7 +95,7 @@ for multi-platform shared-memory paralle + How you can copy and share this manual. + * Funding:: How to help assure continued work for free + software. +-* Index:: Index of this documentation. ++* Library Index:: Index of this documentation. + @end menu + + +@@ -1367,8 +1367,8 @@ Bugs in the GNU OpenMP implementation sh + @c Index + @c --------------------------------------------------------------------- + +-@node Index +-@unnumbered Index ++@node Library Index ++@unnumbered Library Index + + @printindex cp + +--- a/libiberty/Makefile.in ++++ b/libiberty/Makefile.in +@@ -124,7 +124,7 @@ COMPILE.c = $(CC) -c @DEFS@ $(LIBCFLAGS) + CFILES = alloca.c argv.c asprintf.c atexit.c \ + basename.c bcmp.c bcopy.c bsearch.c bzero.c \ + calloc.c choose-temp.c clock.c concat.c cp-demangle.c \ +- cp-demint.c cplus-dem.c \ ++ cp-demint.c cplus-dem.c cygpath.c \ + dyn-string.c \ + fdmatch.c ffs.c fibheap.c filename_cmp.c floatformat.c \ + fnmatch.c fopen_unlocked.c \ +@@ -180,7 +180,7 @@ REQUIRED_OFILES = ./regex.o ./cplus-dem. + # maint-missing" and "make check". + CONFIGURED_OFILES = ./asprintf.o ./atexit.o \ + ./basename.o ./bcmp.o ./bcopy.o ./bsearch.o ./bzero.o \ +- ./calloc.o ./clock.o ./copysign.o \ ++ ./calloc.o ./clock.o ./copysign.o ./cygpath.o \ + ./_doprnt.o \ + ./ffs.o \ + ./getcwd.o ./getpagesize.o ./gettimeofday.o \ +@@ -615,6 +615,13 @@ $(CONFIGURED_OFILES): stamp-picdir + else true; fi + $(COMPILE.c) $(srcdir)/cplus-dem.c $(OUTPUT_OPTION) + ++./cygpath.o: $(srcdir)/cygpath.c stamp-h $(INCDIR)/ansidecl.h \ ++ $(INCDIR)/libiberty.h ++ if [ x"$(PICFLAG)" != x ]; then \ ++ $(COMPILE.c) $(PICFLAG) $(srcdir)/cygpath.c -o pic/$@; \ ++ else true; fi ++ $(COMPILE.c) $(srcdir)/cygpath.c $(OUTPUT_OPTION) ++ + ./dyn-string.o: $(srcdir)/dyn-string.c stamp-h $(INCDIR)/ansidecl.h \ + $(INCDIR)/dyn-string.h $(INCDIR)/libiberty.h + if [ x"$(PICFLAG)" != x ]; then \ +--- a/libiberty/configure ++++ b/libiberty/configure +@@ -8524,6 +8524,20 @@ case "${host}" in + esac + + ++# On MinGW, add support for Cygwin paths. ++case "${host}" in ++ *-*-mingw*) ++ case $LIBOBJS in ++ "cygpath.$ac_objext" | \ ++ *" cygpath.$ac_objext" | \ ++ "cygpath.$ac_objext "* | \ ++ *" cygpath.$ac_objext "* ) ;; ++ *) LIBOBJS="$LIBOBJS cygpath.$ac_objext" ;; ++esac ++ ++ ;; ++esac ++ + if test x$gcc_no_link = xyes; then + if test "x${ac_cv_func_mmap_fixed_mapped+set}" != xset; then + ac_cv_func_mmap_fixed_mapped=no +--- a/libiberty/configure.ac ++++ b/libiberty/configure.ac +@@ -686,6 +686,13 @@ case "${host}" in + esac + AC_SUBST(pexecute) + ++# On MinGW, add support for Cygwin paths. ++case "${host}" in ++ *-*-mingw*) ++ AC_LIBOBJ([cygpath]) ++ ;; ++esac ++ + libiberty_AC_FUNC_STRNCMP + + # Install a library built with a cross compiler in $(tooldir) rather +--- a/libiberty/cp-demangle.c ++++ b/libiberty/cp-demangle.c +@@ -1885,6 +1885,11 @@ cplus_demangle_builtin_types[D_BUILTIN_T + }; + + CP_STATIC_IF_GLIBCPP_V3 ++const struct demangle_builtin_type_info ++cplus_demangle_builtin_Dh_type = ++ { NL ("__fp16"), NL ("__fp16"), D_PRINT_DEFAULT }; ++ ++CP_STATIC_IF_GLIBCPP_V3 + struct demangle_component * + cplus_demangle_type (struct d_info *di) + { +@@ -1936,6 +1941,21 @@ cplus_demangle_type (struct d_info *di) + d_advance (di, 1); + break; + ++ case 'D': ++ d_advance (di, 1); ++ switch (d_peek_char (di)) ++ { ++ case 'h': ++ ret = d_make_builtin_type (di, &cplus_demangle_builtin_Dh_type); ++ di->expansion += ret->u.s_builtin.type->len; ++ can_subst = 0; ++ d_advance (di, 1); ++ break; ++ default: ++ return NULL; ++ } ++ break; ++ + case 'u': + d_advance (di, 1); + ret = d_make_comp (di, DEMANGLE_COMPONENT_VENDOR_TYPE, +--- /dev/null ++++ b/libiberty/cygpath.c +@@ -0,0 +1,591 @@ ++/* Support Cygwin paths under MinGW. ++ Copyright (C) 2006 Free Software Foundation, Inc. ++ Written by CodeSourcery. ++ ++This file is part of the libiberty library. ++Libiberty is free software; you can redistribute it and/or modify it ++under the terms of the GNU Library General Public License as published ++by the Free Software Foundation; either version 2 of the License, or ++(at your option) any later version. ++ ++Libiberty is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++Library General Public License for more details. ++ ++You should have received a copy of the GNU Library General Public ++License along with libiberty; see the file COPYING.LIB. If not, write ++to the Free Software Foundation, Inc., 51 Franklin Street - Fifth ++Floor, Boston, MA 02110-1301, USA. */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "libiberty.h" ++ ++/* If non-zero, we have attempted to use cygpath. CYGPATH_PEX may ++ still be NULL, if cygpath is unavailable. */ ++static int cygpath_initialized; ++ ++/* If non-NULL, an instance of cygpath connected via a pipe. */ ++static struct pex_obj *cygpath_pex; ++ ++/* The input to cygpath. */ ++static FILE *cygpath_in; ++ ++/* The output from cygpath. */ ++static FILE *cygpath_out; ++ ++/* If non-NULL, a file to which path translations are logged. */ ++static FILE *cygpath_log; ++ ++/* Record MESSAGE in the CYGPATH_LOG. MESSAGE is a format string, ++ which is expected to have a single "%s" field, to be replaced by ++ ARG. */ ++static void ++cygpath_log_msg_arg (const char *message, const char *arg) ++{ ++ if (!cygpath_log) ++ return; ++ fprintf (cygpath_log, "[%d] cygpath: ", _getpid ()); ++ fprintf (cygpath_log, message, arg); ++ fprintf (cygpath_log, "\n"); ++ fflush (cygpath_log); ++} ++ ++/* Record MESSAGE in the CYGPATH_LOG. */ ++static void ++cygpath_log_msg (const char *message) ++{ ++ cygpath_log_msg_arg ("%s", message); ++} ++ ++/* An error has occured. Add the MESSAGE to the CYGPATH_LOG, noting ++ the cause of the error based on errno. */ ++static void ++cygpath_perror (const char *message) ++{ ++ if (!cygpath_log) ++ return; ++ fprintf (cygpath_log, "[%d] cygpath: error: %s: %s\n", ++ _getpid(), message, strerror (errno)); ++ fflush (cygpath_log); ++} ++ ++/* Closes CYGPATH_PEX and frees all associated ++ resoures. */ ++static void ++cygpath_close (void) ++{ ++ /* Free resources. */ ++ if (cygpath_out) ++ { ++ fclose (cygpath_out); ++ cygpath_out = NULL; ++ } ++ if (cygpath_in) ++ { ++ fclose (cygpath_in); ++ cygpath_in = NULL; ++ } ++ if (cygpath_pex) ++ { ++ pex_free (cygpath_pex); ++ cygpath_pex = NULL; ++ } ++ if (cygpath_log) ++ { ++ cygpath_log_msg ("end"); ++ cygpath_log = NULL; ++ } ++} ++ ++/* CYG_PATH is a pointer to a Cygwin path. This function converts the ++ Cygwin path to a Windows path, storing the result in ++ WIN32_PATH. Returns true if the conversion was successful; false ++ otherwise. */ ++int ++cygpath (const char *cyg_path, char win32_path[MAX_PATH + 1]) ++{ ++ bool ok; ++ bool retrying; ++ ++ /* Special-case the empty path. cygpath cannot handle the empty ++ path correctly. It ignores the empty line, waiting for a ++ non-empty line, which in turn causes an application using this ++ function to appear stuck. */ ++ if (cyg_path[0] == '\0') ++ { ++ win32_path[0] = '\0'; ++ return true; ++ } ++ ++ retrying = false; ++ ++ retry: ++ if (!cygpath_initialized) ++ { ++ const char *argv[] = { "cygpath", "-w", "-f", "-", NULL }; ++ const char *cygpath_path; ++ const char *log; ++ int err; ++ ++ /* If we are unable to invoke cygpath, we do not want to try ++ again. So, we set the initialized flag at this point; if ++ errors occur during the invocation, it will remain set. */ ++ cygpath_initialized = 1; ++ /* Check to see if the user wants cygpath support. */ ++ cygpath_path = getenv ("CYGPATH"); ++ if (!cygpath_path) ++ /* The user doesn't need to support Cygwin paths. */ ++ goto error; ++ /* If debugging, open the log file. */ ++ log = getenv ("CSL_DEBUG_CYGPATH"); ++ if (log && log[0]) ++ { ++ /* The log file is opened for "append" so that multiple ++ processes (perhaps invoked from "make") can share it. */ ++ cygpath_log = fopen (log, "a"); ++ if (cygpath_log) ++ cygpath_log_msg ("begin"); ++ } ++ /* If the environment variable is set to a non-empty string, use ++ that string as the path to cygpath. */ ++ if (cygpath_path[0] != '\0') ++ argv[0] = cygpath_path; ++ /* Create the pex object. */ ++ cygpath_pex = pex_init (PEX_SEARCH | PEX_USE_PIPES, ++ "cygpath", NULL); ++ if (!cygpath_pex) ++ goto error; ++ /* Get the FILE we will use to write to the child. */ ++ cygpath_in = pex_input_pipe (cygpath_pex, /*binary=*/0); ++ if (!cygpath_in) ++ goto error; ++ /* Start the child process. */ ++ if (pex_run (cygpath_pex, PEX_SEARCH | PEX_USE_PIPES, ++ argv[0], (char**) argv, ++ NULL, NULL, ++ &err) != NULL) ++ goto error; ++ /* Get the FILE we will use to read from the child. */ ++ cygpath_out = pex_read_output (cygpath_pex, /*binary=*/1); ++ if (!cygpath_out) ++ goto error; ++ } ++ else if (!cygpath_pex) ++ /* We previously tried to use cygpath, but something went wrong. */ ++ return false; ++ ++ /* Write CYG_PATH to the child, on a line by itself. */ ++ cygpath_log_msg_arg ("-> %s", cyg_path); ++ if (fprintf (cygpath_in, "%s\n", cyg_path) < 0) ++ { ++ cygpath_perror ("write failed"); ++ goto error; ++ } ++ /* Flush the output. (We cannot set the stream into line-buffered ++ mode with setvbuf because Windows treats _IOLBF as a synonym for ++ _IOFBF.) */ ++ if (fflush (cygpath_in)) ++ cygpath_perror ("flush failed"); ++ /* Read the output. */ ++ ok = true; ++ while (1) ++ { ++ size_t pathlen; ++ if (!fgets (win32_path, MAX_PATH, cygpath_out)) ++ { ++ if (ferror (cygpath_out)) ++ cygpath_perror ("read failed"); ++ else ++ { ++ cygpath_log_msg ("error: EOF"); ++ /* Unfortunately, cygpath sometimes crashes for no ++ apparent reason. We give it two chances... */ ++ if (!retrying) ++ { ++ retrying = true; ++ cygpath_log_msg ("retrying"); ++ cygpath_close (); ++ cygpath_initialized = 0; ++ goto retry; ++ } ++ } ++ goto error; ++ } ++ pathlen = strlen (win32_path); ++ if (pathlen == 0 && ok) ++ /* This isn't a well-formed response from cygpath. */ ++ goto error; ++ if (win32_path[pathlen - 1] == '\n') ++ { ++ win32_path[pathlen - 1] = '\0'; ++ cygpath_log_msg_arg ("<- %s", win32_path); ++ break; ++ } ++ /* We didn't reach the end of the line. There's no point in ++ trying to use this output, since we know the length of ++ paths are limited to MAX_PATH characters, but we read the ++ entire line so that we are still in sync with ++ cygpath. */ ++ ok = false; ++ if (cygpath_log) ++ cygpath_log_msg_arg ("error: invalid response: %s", ++ win32_path); ++ } ++ ++ return ok; ++ ++ error: ++ cygpath_close(); ++ return false; ++} ++ ++/* Returns the handle for the MVCRT DLL, or NULL if it is not ++ available. */ ++static HMODULE ++msvcrt_dll (void) ++{ ++ static HMODULE dll = (HMODULE)(-1); ++ ++ /* After we call LoadLibrary, DLL will be either a valid handle or ++ NULL, so this check ensures that we only try to load the library ++ once. */ ++ if (dll == (HMODULE)(-1)) ++ dll = LoadLibrary ("msvcrt.dll"); ++ ++ return dll; ++} ++ ++/* Call the underlying MSVCRT fopen with PATH and MODE, and return ++ what it returns. */ ++static FILE * ++msvcrt_fopen (const char *path, const char *mode) ++{ ++ typedef FILE *(fopen_type)(const char *path, ++ const char *mode); ++ ++ static fopen_type *f = NULL; ++ ++ /* Get the address of "fopen". */ ++ if (!f) ++ { ++ HMODULE dll = msvcrt_dll (); ++ if (!dll) ++ { ++ errno = ENOSYS; ++ return NULL; ++ } ++ f = (fopen_type *) GetProcAddress (dll, "fopen"); ++ if (!f) ++ { ++ errno = ENOSYS; ++ return NULL; ++ } ++ } ++ ++ /* Call fopen. */ ++ return (*f)(path, mode); ++} ++ ++FILE * ++fopen (const char *path, const char *mode) ++{ ++ FILE *f; ++ char win32_path[MAX_PATH + 1]; ++ ++ /* Assume PATH is a Windows path. */ ++ f = msvcrt_fopen (path, mode); ++ if (f || errno != ENOENT) ++ return f; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (path, win32_path)) ++ f = msvcrt_fopen (win32_path, mode); ++ return f; ++} ++ ++int ++open (const char *path, int oflag, ...) ++{ ++ int fd; ++ char win32_path[MAX_PATH + 1]; ++ int pmode = 0; ++ ++ if ((oflag & _O_CREAT)) ++ { ++ va_list ap; ++ va_start (ap, oflag); ++ pmode = va_arg (ap, int); ++ va_end (ap); ++ } ++ ++ /* Assume PATH is a Windows path. */ ++ fd = _open (path, oflag, pmode); ++ if (fd != -1 || errno != ENOENT) ++ return fd; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (path, win32_path)) ++ fd = _open (win32_path, oflag, pmode); ++ return fd; ++} ++ ++int ++stat (const char *path, struct stat *buffer) ++{ ++ int r; ++ char win32_path[MAX_PATH + 1]; ++ ++ /* Assume PATH is a Windows path. */ ++ r = _stat (path, (struct _stat *) buffer); ++ if (r != -1 || errno != ENOENT) ++ return r; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (path, win32_path)) ++ r = _stat (win32_path, (struct _stat *) buffer); ++ return r; ++} ++ ++int ++access (const char *path, int mode) ++{ ++ int r; ++ char win32_path[MAX_PATH + 1]; ++ ++#ifdef _WIN32 ++ /* Some GNU tools mistakenly defined X_OK to 1 on Windows. */ ++ mode = mode & ~1; ++#endif ++ /* Assume PATH is a Windows path. */ ++ r = _access (path, mode); ++ if (r != -1 || errno != ENOENT) ++ return r; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (path, win32_path)) ++ r = _access (win32_path, mode); ++ return r; ++} ++ ++/* Given the WINDOWS_CODE (typically the result of GetLastError), set ++ ERRNO to the corresponding error code. If there is no obvious ++ correspondence, ERRNO will be set to EACCES. */ ++static void ++set_errno_from_windows_code (DWORD windows_code) ++{ ++ int mapping[][2] = { ++ {ERROR_ACCESS_DENIED, EACCES}, ++ {ERROR_ACCOUNT_DISABLED, EACCES}, ++ {ERROR_ACCOUNT_RESTRICTION, EACCES}, ++ {ERROR_ALREADY_ASSIGNED, EBUSY}, ++ {ERROR_ALREADY_EXISTS, EEXIST}, ++ {ERROR_ARITHMETIC_OVERFLOW, ERANGE}, ++ {ERROR_BAD_COMMAND, EIO}, ++ {ERROR_BAD_DEVICE, ENODEV}, ++ {ERROR_BAD_DRIVER_LEVEL, ENXIO}, ++ {ERROR_BAD_EXE_FORMAT, ENOEXEC}, ++ {ERROR_BAD_FORMAT, ENOEXEC}, ++ {ERROR_BAD_LENGTH, EINVAL}, ++ {ERROR_BAD_PATHNAME, ENOENT}, ++ {ERROR_BAD_PIPE, EPIPE}, ++ {ERROR_BAD_UNIT, ENODEV}, ++ {ERROR_BAD_USERNAME, EINVAL}, ++ {ERROR_BROKEN_PIPE, EPIPE}, ++ {ERROR_BUFFER_OVERFLOW, ENOMEM}, ++ {ERROR_BUSY, EBUSY}, ++ {ERROR_BUSY_DRIVE, EBUSY}, ++ {ERROR_CALL_NOT_IMPLEMENTED, ENOSYS}, ++ {ERROR_CRC, EIO}, ++ {ERROR_CURRENT_DIRECTORY, EINVAL}, ++ {ERROR_DEVICE_IN_USE, EBUSY}, ++ {ERROR_DIR_NOT_EMPTY, EEXIST}, ++ {ERROR_DIRECTORY, ENOENT}, ++ {ERROR_DISK_CHANGE, EIO}, ++ {ERROR_DISK_FULL, ENOSPC}, ++ {ERROR_DRIVE_LOCKED, EBUSY}, ++ {ERROR_ENVVAR_NOT_FOUND, EINVAL}, ++ {ERROR_EXE_MARKED_INVALID, ENOEXEC}, ++ {ERROR_FILE_EXISTS, EEXIST}, ++ {ERROR_FILE_INVALID, ENODEV}, ++ {ERROR_FILE_NOT_FOUND, ENOENT}, ++ {ERROR_FILENAME_EXCED_RANGE, ENAMETOOLONG}, ++ {ERROR_GEN_FAILURE, EIO}, ++ {ERROR_HANDLE_DISK_FULL, ENOSPC}, ++ {ERROR_INSUFFICIENT_BUFFER, ENOMEM}, ++ {ERROR_INVALID_ACCESS, EINVAL}, ++ {ERROR_INVALID_ADDRESS, EFAULT}, ++ {ERROR_INVALID_BLOCK, EFAULT}, ++ {ERROR_INVALID_DATA, EINVAL}, ++ {ERROR_INVALID_DRIVE, ENODEV}, ++ {ERROR_INVALID_EXE_SIGNATURE, ENOEXEC}, ++ {ERROR_INVALID_FLAGS, EINVAL}, ++ {ERROR_INVALID_FUNCTION, ENOSYS}, ++ {ERROR_INVALID_HANDLE, EBADF}, ++ {ERROR_INVALID_LOGON_HOURS, EACCES}, ++ {ERROR_INVALID_NAME, ENOENT}, ++ {ERROR_INVALID_OWNER, EINVAL}, ++ {ERROR_INVALID_PARAMETER, EINVAL}, ++ {ERROR_INVALID_PASSWORD, EPERM}, ++ {ERROR_INVALID_PRIMARY_GROUP, EINVAL}, ++ {ERROR_INVALID_SIGNAL_NUMBER, EINVAL}, ++ {ERROR_INVALID_TARGET_HANDLE, EIO}, ++ {ERROR_INVALID_WORKSTATION, EACCES}, ++ {ERROR_IO_DEVICE, EIO}, ++ {ERROR_IO_INCOMPLETE, EINTR}, ++ {ERROR_LOCKED, EBUSY}, ++ {ERROR_LOGON_FAILURE, EACCES}, ++ {ERROR_MAPPED_ALIGNMENT, EINVAL}, ++ {ERROR_META_EXPANSION_TOO_LONG, E2BIG}, ++ {ERROR_MORE_DATA, EPIPE}, ++ {ERROR_NEGATIVE_SEEK, ESPIPE}, ++ {ERROR_NO_DATA, EPIPE}, ++ {ERROR_NO_MORE_SEARCH_HANDLES, EIO}, ++ {ERROR_NO_PROC_SLOTS, EAGAIN}, ++ {ERROR_NO_SUCH_PRIVILEGE, EACCES}, ++ {ERROR_NOACCESS, EFAULT}, ++ {ERROR_NONE_MAPPED, EINVAL}, ++ {ERROR_NOT_ENOUGH_MEMORY, ENOMEM}, ++ {ERROR_NOT_READY, ENODEV}, ++ {ERROR_NOT_SAME_DEVICE, EXDEV}, ++ {ERROR_OPEN_FAILED, EIO}, ++ {ERROR_OPERATION_ABORTED, EINTR}, ++ {ERROR_OUTOFMEMORY, ENOMEM}, ++ {ERROR_PASSWORD_EXPIRED, EACCES}, ++ {ERROR_PATH_BUSY, EBUSY}, ++ {ERROR_PATH_NOT_FOUND, ENOTDIR}, ++ {ERROR_PIPE_BUSY, EBUSY}, ++ {ERROR_PIPE_CONNECTED, EPIPE}, ++ {ERROR_PIPE_LISTENING, EPIPE}, ++ {ERROR_PIPE_NOT_CONNECTED, EPIPE}, ++ {ERROR_PRIVILEGE_NOT_HELD, EACCES}, ++ {ERROR_READ_FAULT, EIO}, ++ {ERROR_SEEK, ESPIPE}, ++ {ERROR_SEEK_ON_DEVICE, ESPIPE}, ++ {ERROR_SHARING_BUFFER_EXCEEDED, ENFILE}, ++ {ERROR_STACK_OVERFLOW, ENOMEM}, ++ {ERROR_SWAPERROR, ENOENT}, ++ {ERROR_TOO_MANY_MODULES, EMFILE}, ++ {ERROR_TOO_MANY_OPEN_FILES, EMFILE}, ++ {ERROR_UNRECOGNIZED_MEDIA, ENXIO}, ++ {ERROR_UNRECOGNIZED_VOLUME, ENODEV}, ++ {ERROR_WAIT_NO_CHILDREN, ECHILD}, ++ {ERROR_WRITE_FAULT, EIO}, ++ {ERROR_WRITE_PROTECT, EROFS} ++/* MinGW does not define ETXTBSY as yet. ++ {ERROR_LOCK_VIOLATION, ETXTBSY}, ++ {ERROR_SHARING_VIOLATION, ETXTBSY}, ++*/ ++ }; ++ ++ size_t i; ++ ++ for (i = 0; i < sizeof (mapping)/sizeof (mapping[0]); ++i) ++ if (mapping[i][0] == windows_code) ++ { ++ errno = mapping[i][1]; ++ return; ++ } ++ ++ /* Unrecognized error. Use EACCESS to have some error code, ++ not misleading "No error" thing. */ ++ errno = EACCES; ++} ++ ++int rename (const char *oldpath, const char *newpath) ++{ ++ BOOL r; ++ int oldpath_converted = 0; ++ char win32_oldpath[MAX_PATH + 1]; ++ char win32_newpath[MAX_PATH + 1]; ++ ++ /* Older versions of the cygpath program called FindFirstFile, but ++ not FindClose. As a result, a long-running cygpath program ends ++ up leaking these handles, and, as a result, the Windows kernel ++ will not let us remove or rename things in directories. Therefore, ++ we kill the child cygpath program now. ++ ++ The defect in cygpath was corrected by this patch: ++ ++ http://cygwin.com/ml/cygwin-patches/2007-q1/msg00033.html ++ ++ but older versions of cygpath will be in use for the forseeable ++ future. */ ++ ++ cygpath_close (); ++ cygpath_initialized = 0; ++ ++ /* Assume all paths are Windows paths. */ ++ r = MoveFileEx (oldpath, newpath, MOVEFILE_REPLACE_EXISTING); ++ if (r) ++ return 0; ++ else if (GetLastError () != ERROR_PATH_NOT_FOUND) ++ goto error; ++ ++ /* Perhaps the old path is a cygwin path? */ ++ if (cygpath (oldpath, win32_oldpath)) ++ { ++ oldpath_converted = 1; ++ r = MoveFileEx (win32_oldpath, newpath, MOVEFILE_REPLACE_EXISTING); ++ if (r) ++ return 0; ++ else if (GetLastError () != ERROR_PATH_NOT_FOUND) ++ goto error; ++ } ++ ++ /* Perhaps the new path is a cygwin path? */ ++ if (cygpath (newpath, win32_newpath)) ++ { ++ r = MoveFileEx (oldpath_converted ? win32_oldpath : oldpath, ++ win32_newpath, MOVEFILE_REPLACE_EXISTING); ++ if (r == TRUE) ++ return 0; ++ } ++error: ++ set_errno_from_windows_code (GetLastError ()); ++ return -1; ++} ++ ++int remove (const char *pathname) ++{ ++ int r; ++ char win32_path[MAX_PATH + 1]; ++ ++ cygpath_close (); ++ cygpath_initialized = 0; ++ ++ /* Assume PATH is a Windows path. */ ++ r = _unlink (pathname); ++ if (r != -1 || errno != ENOENT) ++ return r; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (pathname, win32_path)) ++ r = _unlink (win32_path); ++ return r; ++} ++ ++int unlink(const char *pathname) ++{ ++ return remove (pathname); ++} ++ ++int ++chdir (const char *path) ++{ ++ int ret; ++ char win32_path[MAX_PATH + 1]; ++ ++ /* Assume PATH is a Windows path. */ ++ ret = _chdir (path); ++ if (ret != -1 || errno != ENOENT) ++ return ret; ++ /* Perhaps it is a Cygwin path? */ ++ if (cygpath (path, win32_path)) ++ ret = _chdir (win32_path); ++ return ret; ++} +--- a/libiberty/make-temp-file.c ++++ b/libiberty/make-temp-file.c +@@ -23,6 +23,7 @@ Boston, MA 02110-1301, USA. */ + + #include /* May get P_tmpdir. */ + #include ++#include + #ifdef HAVE_UNISTD_H + #include + #endif +@@ -35,6 +36,9 @@ Boston, MA 02110-1301, USA. */ + #ifdef HAVE_SYS_FILE_H + #include /* May get R_OK, etc. on some systems. */ + #endif ++#if defined(_WIN32) && !defined(__CYGWIN__) ++#include ++#endif + + #ifndef R_OK + #define R_OK 4 +@@ -55,6 +59,8 @@ extern int mkstemps (char *, int); + #define TEMP_FILE "ccXXXXXX" + #define TEMP_FILE_LEN (sizeof(TEMP_FILE) - 1) + ++#if !defined(_WIN32) || defined(__CYGWIN__) ++ + /* Subroutine of choose_tmpdir. + If BASE is non-NULL, return it. + Otherwise it checks if DIR is a usable directory. +@@ -80,6 +86,8 @@ static const char usrtmp[] = + static const char vartmp[] = + { DIR_SEPARATOR, 'v', 'a', 'r', DIR_SEPARATOR, 't', 'm', 'p', 0 }; + ++#endif ++ + static char *memoized_tmpdir; + + /* +@@ -96,40 +104,58 @@ files in. + char * + choose_tmpdir (void) + { +- const char *base = 0; +- char *tmpdir; +- unsigned int len; +- +- if (memoized_tmpdir) +- return memoized_tmpdir; +- +- base = try_dir (getenv ("TMPDIR"), base); +- base = try_dir (getenv ("TMP"), base); +- base = try_dir (getenv ("TEMP"), base); +- ++ if (!memoized_tmpdir) ++ { ++#if !defined(_WIN32) || defined(__CYGWIN__) ++ const char *base = 0; ++ char *tmpdir; ++ unsigned int len; ++ ++ base = try_dir (getenv ("TMPDIR"), base); ++ base = try_dir (getenv ("TMP"), base); ++ base = try_dir (getenv ("TEMP"), base); ++ + #ifdef P_tmpdir +- base = try_dir (P_tmpdir, base); ++ base = try_dir (P_tmpdir, base); + #endif + +- /* Try /var/tmp, /usr/tmp, then /tmp. */ +- base = try_dir (vartmp, base); +- base = try_dir (usrtmp, base); +- base = try_dir (tmp, base); +- +- /* If all else fails, use the current directory! */ +- if (base == 0) +- base = "."; +- +- /* Append DIR_SEPARATOR to the directory we've chosen +- and return it. */ +- len = strlen (base); +- tmpdir = XNEWVEC (char, len + 2); +- strcpy (tmpdir, base); +- tmpdir[len] = DIR_SEPARATOR; +- tmpdir[len+1] = '\0'; ++ /* Try /var/tmp, /usr/tmp, then /tmp. */ ++ base = try_dir (vartmp, base); ++ base = try_dir (usrtmp, base); ++ base = try_dir (tmp, base); ++ ++ /* If all else fails, use the current directory! */ ++ if (base == 0) ++ base = "."; ++ /* Append DIR_SEPARATOR to the directory we've chosen ++ and return it. */ ++ len = strlen (base); ++ tmpdir = XNEWVEC (char, len + 2); ++ strcpy (tmpdir, base); ++ tmpdir[len] = DIR_SEPARATOR; ++ tmpdir[len+1] = '\0'; ++ memoized_tmpdir = tmpdir; ++#else /* defined(_WIN32) && !defined(__CYGWIN__) */ ++ DWORD len; ++ ++ /* Figure out how much space we need. */ ++ len = GetTempPath(0, NULL); ++ if (len) ++ { ++ memoized_tmpdir = XNEWVEC (char, len); ++ if (!GetTempPath(len, memoized_tmpdir)) ++ { ++ XDELETEVEC (memoized_tmpdir); ++ memoized_tmpdir = NULL; ++ } ++ } ++ if (!memoized_tmpdir) ++ /* If all else fails, use the current directory. */ ++ memoized_tmpdir = xstrdup (".\\"); ++#endif /* defined(_WIN32) && !defined(__CYGWIN__) */ ++ } + +- memoized_tmpdir = tmpdir; +- return tmpdir; ++ return memoized_tmpdir; + } + + /* +@@ -166,11 +192,14 @@ make_temp_file (const char *suffix) + strcpy (temp_filename + base_len + TEMP_FILE_LEN, suffix); + + fd = mkstemps (temp_filename, suffix_len); +- /* If mkstemps failed, then something bad is happening. Maybe we should +- issue a message about a possible security attack in progress? */ ++ /* Mkstemps failed. It may be EPERM, ENOSPC etc. */ + if (fd == -1) +- abort (); +- /* Similarly if we can not close the file. */ ++ { ++ fprintf (stderr, "Cannot create temporary file in %s: %s\n", ++ base, strerror (errno)); ++ abort (); ++ } ++ /* We abort on failed close out of sheer paranoia. */ + if (close (fd)) + abort (); + return temp_filename; +--- a/libiberty/mkstemps.c ++++ b/libiberty/mkstemps.c +@@ -127,6 +127,13 @@ mkstemps (char *pattern, int suffix_len) + if (fd >= 0) + /* The file does not exist. */ + return fd; ++ if (errno != EEXIST ++#ifdef EISDIR ++ && errno != EISDIR ++#endif ++ ) ++ /* Fatal error (EPERM, ENOSPC etc). Doesn't make sense to loop. */ ++ break; + + /* This is a random value. It is only necessary that the next + TMP_MAX values generated by adding 7777 to VALUE are different +--- a/libiberty/pex-win32.c ++++ b/libiberty/pex-win32.c +@@ -119,7 +119,7 @@ static int + pex_win32_open_read (struct pex_obj *obj ATTRIBUTE_UNUSED, const char *name, + int binary) + { +- return _open (name, _O_RDONLY | (binary ? _O_BINARY : _O_TEXT)); ++ return open (name, _O_RDONLY | (binary ? _O_BINARY : _O_TEXT)); + } + + /* Open a file for writing. */ +@@ -130,10 +130,10 @@ pex_win32_open_write (struct pex_obj *ob + { + /* Note that we can't use O_EXCL here because gcc may have already + created the temporary file via make_temp_file. */ +- return _open (name, +- (_O_WRONLY | _O_CREAT | _O_TRUNC +- | (binary ? _O_BINARY : _O_TEXT)), +- _S_IREAD | _S_IWRITE); ++ return open (name, ++ (_O_WRONLY | _O_CREAT | _O_TRUNC ++ | (binary ? _O_BINARY : _O_TEXT)), ++ _S_IREAD | _S_IWRITE); + } + + /* Close a file. */ +--- a/libjava/Makefile.am ++++ b/libjava/Makefile.am +@@ -48,9 +48,14 @@ endif + + dbexec_LTLIBRARIES = libjvm.la + +-pkgconfigdir = $(libdir)/pkgconfig ++# Install the pkgconfig file in a target-specific directory, since the ++# libraries it indicates + +-jardir = $(datadir)/java ++pkgconfigdir = $(toolexeclibdir)/pkgconfig ++ ++# We install the JAR in a target-specific directory so that toolchains ++# build from different sources can be installed in the same directory. ++jardir = $(prefix)/$(target_noncanonical)/share/java + jar_DATA = libgcj-$(gcc_version).jar libgcj-tools-$(gcc_version).jar + if INSTALL_ECJ_JAR + jar_DATA += $(ECJ_BUILD_JAR) +@@ -81,7 +86,7 @@ bin_PROGRAMS = jv-convert gij grmic grmi + dbexec_DATA = $(db_name) + endif + +-bin_SCRIPTS = addr2name.awk ++bin_SCRIPTS = + + if BUILD_ECJ1 + ## We build ecjx and not ecj1 because in one mode, ecjx will not work +@@ -107,12 +112,15 @@ if ANONVERSCRIPT + extra_ldflags_libjava += -Wl,--version-script=$(srcdir)/libgcj.ver + endif + ++LTLDFLAGS = $(shell $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) + GCJLINK = $(LIBTOOL) --tag=GCJ --mode=link $(GCJ) -L$(here) $(JC1FLAGS) \ +- $(LDFLAGS) -o $@ ++ $(LTLDFLAGS) -o $@ + GCJ_FOR_ECJX = @GCJ_FOR_ECJX@ + GCJ_FOR_ECJX_LINK = $(GCJ_FOR_ECJX) -o $@ + LIBLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXX) -L$(here) $(JC1FLAGS) \ +- $(LDFLAGS) $(extra_ldflags_libjava) $(extra_ldflags) -o $@ ++ $(LTLDFLAGS) $(extra_ldflags_libjava) $(extra_ldflags) -o $@ ++CXXLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXXLD) $(AM_CXXFLAGS) \ ++ $(CXXFLAGS) $(AM_LDFLAGS) $(LTLDFLAGS) -o $@ + + GCC_UNWIND_INCLUDE = @GCC_UNWIND_INCLUDE@ + +--- a/libjava/Makefile.in ++++ b/libjava/Makefile.in +@@ -508,8 +508,6 @@ LTCXXCOMPILE = $(LIBTOOL) --tag=CXX --mo + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) + CXXLD = $(CXX) +-CXXLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXXLD) $(AM_CXXFLAGS) \ +- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ + GCJCOMPILE = $(GCJ) $(AM_GCJFLAGS) $(GCJFLAGS) + LTGCJCOMPILE = $(LIBTOOL) --tag=GCJ --mode=compile $(GCJ) \ + $(AM_GCJFLAGS) $(GCJFLAGS) +@@ -836,8 +834,14 @@ toolexeclib_LTLIBRARIES = libgcj.la libg + $(am__append_2) $(am__append_3) + toolexecmainlib_DATA = libgcj.spec + dbexec_LTLIBRARIES = libjvm.la +-pkgconfigdir = $(libdir)/pkgconfig +-jardir = $(datadir)/java ++ ++# Install the pkgconfig file in a target-specific directory, since the ++# libraries it indicates ++pkgconfigdir = $(toolexeclibdir)/pkgconfig ++ ++# We install the JAR in a target-specific directory so that toolchains ++# build from different sources can be installed in the same directory. ++jardir = $(prefix)/$(target_noncanonical)/share/java + jar_DATA = libgcj-$(gcc_version).jar libgcj-tools-$(gcc_version).jar \ + $(am__append_4) + @JAVA_HOME_SET_FALSE@JAVA_HOME_DIR = $(prefix) +@@ -847,14 +851,18 @@ jar_DATA = libgcj-$(gcc_version).jar lib + db_name = classmap.db + db_pathtail = $(gcjsubdir)/$(db_name) + @NATIVE_TRUE@dbexec_DATA = $(db_name) +-bin_SCRIPTS = addr2name.awk ++bin_SCRIPTS = + GCJ_WITH_FLAGS = $(GCJ) --encoding=UTF-8 -Wno-deprecated ++LTLDFLAGS = $(shell $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) + GCJLINK = $(LIBTOOL) --tag=GCJ --mode=link $(GCJ) -L$(here) $(JC1FLAGS) \ +- $(LDFLAGS) -o $@ ++ $(LTLDFLAGS) -o $@ + + GCJ_FOR_ECJX_LINK = $(GCJ_FOR_ECJX) -o $@ + LIBLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXX) -L$(here) $(JC1FLAGS) \ +- $(LDFLAGS) $(extra_ldflags_libjava) $(extra_ldflags) -o $@ ++ $(LTLDFLAGS) $(extra_ldflags_libjava) $(extra_ldflags) -o $@ ++ ++CXXLINK = $(LIBTOOL) --tag=CXX --mode=link $(CXXLD) $(AM_CXXFLAGS) \ ++ $(CXXFLAGS) $(AM_LDFLAGS) $(LTLDFLAGS) -o $@ + + WARNINGS = -Wextra -Wall + AM_CXXFLAGS = \ +--- a/libjava/classpath/Makefile.in ++++ b/libjava/classpath/Makefile.in +@@ -357,9 +357,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + + # lib first, to compile .class files before native code, last examples +--- a/libjava/classpath/configure ++++ b/libjava/classpath/configure +@@ -461,7 +461,7 @@ ac_includes_default="\ + # include + #endif" + +-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os JAVA_MAINTAINER_MODE_TRUE JAVA_MAINTAINER_MODE_FALSE GENINSRC_TRUE GENINSRC_FALSE multi_basedir LIBVERSION CLASSPATH_MODULE CLASSPATH_CONVENIENCE INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar CREATE_COLLECTIONS_TRUE CREATE_COLLECTIONS_FALSE CREATE_JNI_LIBRARIES_TRUE CREATE_JNI_LIBRARIES_FALSE CREATE_CORE_JNI_LIBRARIES_TRUE CREATE_CORE_JNI_LIBRARIES_FALSE CREATE_GCONF_PEER_LIBRARIES_TRUE CREATE_GCONF_PEER_LIBRARIES_FALSE CREATE_GSTREAMER_PEER_LIBRARIES_TRUE CREATE_GSTREAMER_PEER_LIBRARIES_FALSE default_toolkit CREATE_XMLJ_LIBRARY_TRUE CREATE_XMLJ_LIBRARY_FALSE CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CPP EGREP CREATE_ALSA_LIBRARIES_TRUE CREATE_ALSA_LIBRARIES_FALSE CREATE_DSSI_LIBRARIES_TRUE CREATE_DSSI_LIBRARIES_FALSE CREATE_GTK_PEER_LIBRARIES_TRUE CREATE_GTK_PEER_LIBRARIES_FALSE CREATE_QT_PEER_LIBRARIES_TRUE CREATE_QT_PEER_LIBRARIES_FALSE CREATE_PLUGIN_TRUE CREATE_PLUGIN_FALSE toolexeclibdir nativeexeclibdir glibjdir VM_BINARY CREATE_JNI_HEADERS_TRUE CREATE_JNI_HEADERS_FALSE CREATE_WRAPPERS_TRUE CREATE_WRAPPERS_FALSE LN_S LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM AR ac_ct_AR RANLIB ac_ct_RANLIB lt_ECHO CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CXXCPP PERL COLLECTIONS_PREFIX LIBMAGIC LIBICONV LTLIBICONV WARNING_CFLAGS STRICT_WARNING_CFLAGS ERROR_CFLAGS PKG_CONFIG XML_CFLAGS XML_LIBS XSLT_CFLAGS XSLT_LIBS X_CFLAGS X_PRE_LIBS X_LIBS X_EXTRA_LIBS GTK_CFLAGS GTK_LIBS FREETYPE2_CFLAGS FREETYPE2_LIBS PANGOFT2_CFLAGS PANGOFT2_LIBS CAIRO_CFLAGS CAIRO_LIBS XTEST_LIBS GCONF_CFLAGS GCONF_LIBS GDK_CFLAGS GDK_LIBS GSTREAMER_CFLAGS GSTREAMER_LIBS GSTREAMER_BASE_CFLAGS GSTREAMER_BASE_LIBS GSTREAMER_PLUGINS_BASE_CFLAGS GSTREAMER_PLUGINS_BASE_LIBS GST_PLUGIN_LDFLAGS GSTREAMER_FILE_READER GSTREAMER_MIXER_PROVIDER QT_CFLAGS QT_LIBS MOC MOZILLA_CFLAGS MOZILLA_LIBS GLIB_CFLAGS GLIB_LIBS PLUGIN_DIR USER_JAVAH CLASSPATH_INCLUDES GCJ JIKES JIKESENCODING JIKESWARNINGS KJC ECJ JAVAC FOUND_GCJ_TRUE FOUND_GCJ_FALSE FOUND_JIKES_TRUE FOUND_JIKES_FALSE FOUND_ECJ_TRUE FOUND_ECJ_FALSE FOUND_JAVAC_TRUE FOUND_JAVAC_FALSE FOUND_KJC_TRUE FOUND_KJC_FALSE USER_CLASSLIB USER_SPECIFIED_CLASSLIB_TRUE USER_SPECIFIED_CLASSLIB_FALSE vm_classes MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT LIBDEBUG INIT_LOAD_LIBRARY ECJ_JAR JAVA_LANG_SYSTEM_EXPLICIT_INITIALIZATION REMOVE MKDIR CP DATE FIND ZIP FASTJAR INSTALL_GLIBJ_ZIP_TRUE INSTALL_GLIBJ_ZIP_FALSE INSTALL_CLASS_FILES_TRUE INSTALL_CLASS_FILES_FALSE BUILD_CLASS_FILES_TRUE BUILD_CLASS_FILES_FALSE EXAMPLESDIR GJDOC CREATE_API_DOCS_TRUE CREATE_API_DOCS_FALSE JAY JAY_SKELETON REGEN_PARSERS_TRUE REGEN_PARSERS_FALSE USE_PREBUILT_GLIBJ_ZIP_TRUE USE_PREBUILT_GLIBJ_ZIP_FALSE PATH_TO_GLIBJ_ZIP USE_ESCHER_TRUE USE_ESCHER_FALSE PATH_TO_ESCHER ENABLE_LOCAL_SOCKETS_TRUE ENABLE_LOCAL_SOCKETS_FALSE DEFAULT_PREFS_PEER LIBOBJS LTLIBOBJS' ++ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os JAVA_MAINTAINER_MODE_TRUE JAVA_MAINTAINER_MODE_FALSE GENINSRC_TRUE GENINSRC_FALSE multi_basedir LIBVERSION CLASSPATH_MODULE CLASSPATH_CONVENIENCE INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar CREATE_COLLECTIONS_TRUE CREATE_COLLECTIONS_FALSE CREATE_JNI_LIBRARIES_TRUE CREATE_JNI_LIBRARIES_FALSE CREATE_CORE_JNI_LIBRARIES_TRUE CREATE_CORE_JNI_LIBRARIES_FALSE CREATE_GCONF_PEER_LIBRARIES_TRUE CREATE_GCONF_PEER_LIBRARIES_FALSE CREATE_GSTREAMER_PEER_LIBRARIES_TRUE CREATE_GSTREAMER_PEER_LIBRARIES_FALSE default_toolkit CREATE_XMLJ_LIBRARY_TRUE CREATE_XMLJ_LIBRARY_FALSE CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE CPP EGREP CREATE_ALSA_LIBRARIES_TRUE CREATE_ALSA_LIBRARIES_FALSE CREATE_DSSI_LIBRARIES_TRUE CREATE_DSSI_LIBRARIES_FALSE CREATE_GTK_PEER_LIBRARIES_TRUE CREATE_GTK_PEER_LIBRARIES_FALSE CREATE_QT_PEER_LIBRARIES_TRUE CREATE_QT_PEER_LIBRARIES_FALSE CREATE_PLUGIN_TRUE CREATE_PLUGIN_FALSE target_noncanonical toolexecdir toolexecmainlibdir toolexeclibdir nativeexeclibdir glibjdir VM_BINARY CREATE_JNI_HEADERS_TRUE CREATE_JNI_HEADERS_FALSE CREATE_WRAPPERS_TRUE CREATE_WRAPPERS_FALSE LN_S LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM AR ac_ct_AR RANLIB ac_ct_RANLIB lt_ECHO CXX CXXFLAGS ac_ct_CXX CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CXXCPP PERL COLLECTIONS_PREFIX LIBMAGIC LIBICONV LTLIBICONV WARNING_CFLAGS STRICT_WARNING_CFLAGS ERROR_CFLAGS PKG_CONFIG XML_CFLAGS XML_LIBS XSLT_CFLAGS XSLT_LIBS X_CFLAGS X_PRE_LIBS X_LIBS X_EXTRA_LIBS GTK_CFLAGS GTK_LIBS FREETYPE2_CFLAGS FREETYPE2_LIBS PANGOFT2_CFLAGS PANGOFT2_LIBS CAIRO_CFLAGS CAIRO_LIBS XTEST_LIBS GCONF_CFLAGS GCONF_LIBS GDK_CFLAGS GDK_LIBS GSTREAMER_CFLAGS GSTREAMER_LIBS GSTREAMER_BASE_CFLAGS GSTREAMER_BASE_LIBS GSTREAMER_PLUGINS_BASE_CFLAGS GSTREAMER_PLUGINS_BASE_LIBS GST_PLUGIN_LDFLAGS GSTREAMER_FILE_READER GSTREAMER_MIXER_PROVIDER QT_CFLAGS QT_LIBS MOC MOZILLA_CFLAGS MOZILLA_LIBS GLIB_CFLAGS GLIB_LIBS PLUGIN_DIR USER_JAVAH CLASSPATH_INCLUDES GCJ JIKES JIKESENCODING JIKESWARNINGS KJC ECJ JAVAC FOUND_GCJ_TRUE FOUND_GCJ_FALSE FOUND_JIKES_TRUE FOUND_JIKES_FALSE FOUND_ECJ_TRUE FOUND_ECJ_FALSE FOUND_JAVAC_TRUE FOUND_JAVAC_FALSE FOUND_KJC_TRUE FOUND_KJC_FALSE USER_CLASSLIB USER_SPECIFIED_CLASSLIB_TRUE USER_SPECIFIED_CLASSLIB_FALSE vm_classes MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT LIBDEBUG INIT_LOAD_LIBRARY ECJ_JAR JAVA_LANG_SYSTEM_EXPLICIT_INITIALIZATION REMOVE MKDIR CP DATE FIND ZIP FASTJAR INSTALL_GLIBJ_ZIP_TRUE INSTALL_GLIBJ_ZIP_FALSE INSTALL_CLASS_FILES_TRUE INSTALL_CLASS_FILES_FALSE BUILD_CLASS_FILES_TRUE BUILD_CLASS_FILES_FALSE EXAMPLESDIR GJDOC CREATE_API_DOCS_TRUE CREATE_API_DOCS_FALSE JAY JAY_SKELETON REGEN_PARSERS_TRUE REGEN_PARSERS_FALSE USE_PREBUILT_GLIBJ_ZIP_TRUE USE_PREBUILT_GLIBJ_ZIP_FALSE PATH_TO_GLIBJ_ZIP USE_ESCHER_TRUE USE_ESCHER_FALSE PATH_TO_ESCHER ENABLE_LOCAL_SOCKETS_TRUE ENABLE_LOCAL_SOCKETS_FALSE DEFAULT_PREFS_PEER LIBOBJS LTLIBOBJS' + ac_subst_files='' + + # Initialize some variables set by options. +@@ -1058,6 +1058,9 @@ Optional Features: + default=no + --disable-plugin compile gcjwebplugin (disabled by --disable-plugin) + default=yes ++ --enable-version-specific-runtime-libs ++ specify that runtime libraries should be installed ++ in a compiler-specific directory + --enable-regen-headers automatically regenerate JNI headers default=no + --enable-tool-wrappers create tool wrapper binaries default=no + --enable-static[=PKGS] +@@ -4753,16 +4756,64 @@ else + fi + + ++case ${host_alias} in ++ "") host_noncanonical=${build_noncanonical} ;; ++ *) host_noncanonical=${host_alias} ;; ++esac ++case ${target_alias} in ++ "") target_noncanonical=${host_noncanonical} ;; ++ *) target_noncanonical=${target_alias} ;; ++esac ++ ++ ++# Check whether --enable-version-specific-runtime-libs or --disable-version-specific-runtime-libs was given. ++if test "${enable_version_specific_runtime_libs+set}" = set; then ++ enableval="$enable_version_specific_runtime_libs" ++ case "$enableval" in ++ yes) version_specific_libs=yes ;; ++ no) version_specific_libs=no ;; ++ *) { { echo "$as_me:$LINENO: error: Unknown argument to enable/disable version-specific libs" >&5 ++echo "$as_me: error: Unknown argument to enable/disable version-specific libs" >&2;} ++ { (exit 1); exit 1; }; };; ++ esac ++else ++ version_specific_libs=no ++ ++fi; + +- multi_os_directory=`$CC -print-multi-os-directory` +- case $multi_os_directory in +- .) toolexeclibdir=${libdir} ;; # Avoid trailing /. +- *) toolexeclibdir=${libdir}/${multi_os_directory} ;; ++ case ${version_specific_libs} in ++ yes) ++ # Need the gcc compiler version to know where to install libraries ++ # and header files if --enable-version-specific-runtime-libs option ++ # is selected. ++ includedir='$(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/include/' ++ toolexecdir='$(libdir)/gcc/$(target_noncanonical)' ++ toolexecmainlibdir='$(toolexecdir)/$(gcc_version)$(MULTISUBDIR)' ++ toolexeclibdir=$toolexecmainlibdir ++ ;; ++ no) ++ if test -n "$with_cross_host" && ++ test x"$with_cross_host" != x"no"; then ++ # Install a library built with a cross compiler in tooldir, not libdir. ++ toolexecdir='$(exec_prefix)/$(target_noncanonical)' ++ toolexecmainlibdir='$(toolexecdir)/lib' ++ else ++ toolexecdir='$(libdir)/gcc-lib/$(target_noncanonical)' ++ toolexecmainlibdir='$(libdir)' ++ fi ++ multi_os_directory=`$CC -print-multi-os-directory` ++ case $multi_os_directory in ++ .) toolexeclibdir=$toolexecmainlibdir ;; # Avoid trailing /. ++ *) toolexeclibdir=$toolexecmainlibdir/$multi_os_directory ;; ++ esac ++ ;; + esac + + + + ++ ++ + # Check whether --with-native-libdir or --without-native-libdir was given. + if test "${with_native_libdir+set}" = set; then + withval="$with_native_libdir" +@@ -5702,13 +5753,13 @@ if test "${lt_cv_nm_interface+set}" = se + else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext +- (eval echo "\"\$as_me:5705: $ac_compile\"" >&5) ++ (eval echo "\"\$as_me:5756: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 +- (eval echo "\"\$as_me:5708: $NM \\\"conftest.$ac_objext\\\"\"" >&5) ++ (eval echo "\"\$as_me:5759: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 +- (eval echo "\"\$as_me:5711: output\"" >&5) ++ (eval echo "\"\$as_me:5762: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" +@@ -6752,7 +6803,7 @@ ia64-*-hpux*) + ;; + *-*-irix6*) + # Find out which ABI we are using. +- echo '#line 6755 "configure"' > conftest.$ac_ext ++ echo '#line 6806 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +@@ -7384,11 +7435,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:7387: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:7438: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:7391: \$? = $ac_status" >&5 ++ echo "$as_me:7442: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -7706,11 +7757,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:7709: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:7760: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:7713: \$? = $ac_status" >&5 ++ echo "$as_me:7764: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -7811,11 +7862,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:7814: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:7865: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:7818: \$? = $ac_status" >&5 ++ echo "$as_me:7869: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -7866,11 +7917,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:7869: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:7920: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:7873: \$? = $ac_status" >&5 ++ echo "$as_me:7924: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -10718,7 +10769,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10721 "configure" ++#line 10772 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10818,7 +10869,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10821 "configure" ++#line 10872 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -15215,11 +15266,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:15218: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:15269: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 +- echo "$as_me:15222: \$? = $ac_status" >&5 ++ echo "$as_me:15273: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. +@@ -15314,11 +15365,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:15317: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:15368: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:15321: \$? = $ac_status" >&5 ++ echo "$as_me:15372: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -15366,11 +15417,11 @@ else + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:15369: $lt_compile\"" >&5) ++ (eval echo "\"\$as_me:15420: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 +- echo "$as_me:15373: \$? = $ac_status" >&5 ++ echo "$as_me:15424: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized +@@ -30956,6 +31007,9 @@ s,@CREATE_QT_PEER_LIBRARIES_TRUE@,$CREAT + s,@CREATE_QT_PEER_LIBRARIES_FALSE@,$CREATE_QT_PEER_LIBRARIES_FALSE,;t t + s,@CREATE_PLUGIN_TRUE@,$CREATE_PLUGIN_TRUE,;t t + s,@CREATE_PLUGIN_FALSE@,$CREATE_PLUGIN_FALSE,;t t ++s,@target_noncanonical@,$target_noncanonical,;t t ++s,@toolexecdir@,$toolexecdir,;t t ++s,@toolexecmainlibdir@,$toolexecmainlibdir,;t t + s,@toolexeclibdir@,$toolexeclibdir,;t t + s,@nativeexeclibdir@,$nativeexeclibdir,;t t + s,@glibjdir@,$glibjdir,;t t +--- a/libjava/classpath/configure.ac ++++ b/libjava/classpath/configure.ac +@@ -289,6 +289,16 @@ dnl defined to the same value for all mu + dnl so that we can refer to the multilib installation directories from + dnl classpath's build files. + dnl ----------------------------------------------------------- ++AC_ARG_ENABLE(version-specific-runtime-libs, ++ AS_HELP_STRING([--enable-version-specific-runtime-libs], ++ [specify that runtime libraries should be installed in a compiler-specific directory]), ++ [case "$enableval" in ++ yes) version_specific_libs=yes ;; ++ no) version_specific_libs=no ;; ++ *) AC_MSG_ERROR([Unknown argument to enable/disable version-specific libs]);; ++ esac], ++ [version_specific_libs=no] ++) + CLASSPATH_TOOLEXECLIBDIR + + dnl ----------------------------------------------------------- +--- a/libjava/classpath/doc/Makefile.in ++++ b/libjava/classpath/doc/Makefile.in +@@ -334,9 +334,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + SUBDIRS = api + EXTRA_DIST = README.jaxp texi2pod.pl $(man_MANS) +--- a/libjava/classpath/doc/api/Makefile.in ++++ b/libjava/classpath/doc/api/Makefile.in +@@ -311,9 +311,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + @CREATE_API_DOCS_TRUE@noinst_DATA = html + sourcepath = $(top_builddir):$(top_srcdir):$(top_srcdir)/vm/reference:$(top_srcdir)/external/w3c_dom:$(top_srcdir)/external/sax +--- a/libjava/classpath/examples/Makefile.in ++++ b/libjava/classpath/examples/Makefile.in +@@ -320,9 +320,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + GLIBJ_CLASSPATH = '$(top_builddir)/lib':'$(top_builddir)/lib/glibj.zip':'$(top_builddir)/tools/tools.zip' + @FOUND_ECJ_FALSE@@FOUND_JAVAC_TRUE@JCOMPILER = $(JAVAC) -encoding UTF-8 -bootclasspath $(GLIBJ_CLASSPATH) -classpath . +--- a/libjava/classpath/external/Makefile.in ++++ b/libjava/classpath/external/Makefile.in +@@ -318,9 +318,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + SUBDIRS = sax w3c_dom relaxngDatatype jsr166 + EXTRA_DIST = README +--- a/libjava/classpath/external/jsr166/Makefile.in ++++ b/libjava/classpath/external/jsr166/Makefile.in +@@ -309,9 +309,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + EXTRA_DIST = IMPORTING \ + readme \ +--- a/libjava/classpath/external/relaxngDatatype/Makefile.in ++++ b/libjava/classpath/external/relaxngDatatype/Makefile.in +@@ -309,9 +309,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + EXTRA_DIST = README.txt \ + copying.txt \ +--- a/libjava/classpath/external/sax/Makefile.in ++++ b/libjava/classpath/external/sax/Makefile.in +@@ -309,9 +309,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + EXTRA_DIST = README \ + org/xml/sax/ext/Attributes2.java \ +--- a/libjava/classpath/external/w3c_dom/Makefile.in ++++ b/libjava/classpath/external/w3c_dom/Makefile.in +@@ -309,9 +309,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + EXTRA_DIST = README \ + org/w3c/dom/Attr.java \ +--- a/libjava/classpath/include/Makefile.in ++++ b/libjava/classpath/include/Makefile.in +@@ -310,9 +310,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + DISTCLEANFILES = jni_md.h config-int.h + ARG_JNI_JAVAH = -jni +--- a/libjava/classpath/lib/Makefile.in ++++ b/libjava/classpath/lib/Makefile.in +@@ -314,9 +314,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + JAVA_DEPEND = java.dep + compile_classpath = $(vm_classes):$(top_srcdir):$(top_srcdir)/external/w3c_dom:$(top_srcdir)/external/sax:$(top_srcdir)/external/relaxngDatatype:$(top_srcdir)/external/jsr166:.:$(USER_CLASSLIB):$(PATH_TO_ESCHER) +--- a/libjava/classpath/m4/acinclude.m4 ++++ b/libjava/classpath/m4/acinclude.m4 +@@ -427,11 +427,45 @@ dnl GCJ LOCAL: Calculate toolexeclibdir + dnl ----------------------------------------------------------- + AC_DEFUN([CLASSPATH_TOOLEXECLIBDIR], + [ +- multi_os_directory=`$CC -print-multi-os-directory` +- case $multi_os_directory in +- .) toolexeclibdir=${libdir} ;; # Avoid trailing /. +- *) toolexeclibdir=${libdir}/${multi_os_directory} ;; ++ case ${host_alias} in ++ "") host_noncanonical=${build_noncanonical} ;; ++ *) host_noncanonical=${host_alias} ;; + esac ++ case ${target_alias} in ++ "") target_noncanonical=${host_noncanonical} ;; ++ *) target_noncanonical=${target_alias} ;; ++ esac ++ AC_SUBST(target_noncanonical) ++ ++ case ${version_specific_libs} in ++ yes) ++ # Need the gcc compiler version to know where to install libraries ++ # and header files if --enable-version-specific-runtime-libs option ++ # is selected. ++ includedir='$(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/include/' ++ toolexecdir='$(libdir)/gcc/$(target_noncanonical)' ++ toolexecmainlibdir='$(toolexecdir)/$(gcc_version)$(MULTISUBDIR)' ++ toolexeclibdir=$toolexecmainlibdir ++ ;; ++ no) ++ if test -n "$with_cross_host" && ++ test x"$with_cross_host" != x"no"; then ++ # Install a library built with a cross compiler in tooldir, not libdir. ++ toolexecdir='$(exec_prefix)/$(target_noncanonical)' ++ toolexecmainlibdir='$(toolexecdir)/lib' ++ else ++ toolexecdir='$(libdir)/gcc-lib/$(target_noncanonical)' ++ toolexecmainlibdir='$(libdir)' ++ fi ++ multi_os_directory=`$CC -print-multi-os-directory` ++ case $multi_os_directory in ++ .) toolexeclibdir=$toolexecmainlibdir ;; # Avoid trailing /. ++ *) toolexeclibdir=$toolexecmainlibdir/$multi_os_directory ;; ++ esac ++ ;; ++ esac ++ AC_SUBST(toolexecdir) ++ AC_SUBST(toolexecmainlibdir) + AC_SUBST(toolexeclibdir) + ]) + +--- a/libjava/classpath/native/Makefile.in ++++ b/libjava/classpath/native/Makefile.in +@@ -317,9 +317,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + @CREATE_JNI_LIBRARIES_TRUE@JNIDIR = jni + @CREATE_GTK_PEER_LIBRARIES_TRUE@JAWTDIR = jawt +--- a/libjava/classpath/native/fdlibm/Makefile.in ++++ b/libjava/classpath/native/fdlibm/Makefile.in +@@ -336,9 +336,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + noinst_LTLIBRARIES = libfdlibm.la + libfdlibm_la_SOURCES = \ +--- a/libjava/classpath/native/jawt/Makefile.in ++++ b/libjava/classpath/native/jawt/Makefile.in +@@ -336,9 +336,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjawt.la + libjawt_la_SOURCES = jawt.c +--- a/libjava/classpath/native/jni/Makefile.in ++++ b/libjava/classpath/native/jni/Makefile.in +@@ -317,9 +317,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + @CREATE_CORE_JNI_LIBRARIES_TRUE@JNIDIRS = native-lib java-io java-lang java-net java-nio java-util + @CREATE_ALSA_LIBRARIES_TRUE@ALSADIR = midi-alsa +--- a/libjava/classpath/native/jni/classpath/Makefile.in ++++ b/libjava/classpath/native/jni/classpath/Makefile.in +@@ -327,9 +327,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + + # Header needed for jawt implementations such as the one found in ../gtk-peer. +--- a/libjava/classpath/native/jni/gconf-peer/Makefile.in ++++ b/libjava/classpath/native/jni/gconf-peer/Makefile.in +@@ -336,9 +336,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgconfpeer.la + libgconfpeer_la_SOURCES = GConfNativePeer.c +--- a/libjava/classpath/native/jni/gstreamer-peer/Makefile.in ++++ b/libjava/classpath/native/jni/gstreamer-peer/Makefile.in +@@ -337,9 +337,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgstreamerpeer.la + libgstreamerpeer_la_SOURCES = GStreamerIOPeer.c \ +--- a/libjava/classpath/native/jni/gtk-peer/Makefile.in ++++ b/libjava/classpath/native/jni/gtk-peer/Makefile.in +@@ -374,9 +374,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgtkpeer.la + +--- a/libjava/classpath/native/jni/java-io/Makefile.in ++++ b/libjava/classpath/native/jni/java-io/Makefile.in +@@ -338,9 +338,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjavaio.la + libjavaio_la_SOURCES = java_io_VMFile.c \ +--- a/libjava/classpath/native/jni/java-lang/Makefile.in ++++ b/libjava/classpath/native/jni/java-lang/Makefile.in +@@ -352,9 +352,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjavalang.la libjavalangreflect.la libjavalangmanagement.la + libjavalang_la_SOURCES = java_lang_VMSystem.c \ +--- a/libjava/classpath/native/jni/java-net/Makefile.in ++++ b/libjava/classpath/native/jni/java-net/Makefile.in +@@ -348,9 +348,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjavanet.la + @ENABLE_LOCAL_SOCKETS_FALSE@local_sources = gnu_java_net_local_LocalSocketImpl.c +--- a/libjava/classpath/native/jni/java-nio/Makefile.in ++++ b/libjava/classpath/native/jni/java-nio/Makefile.in +@@ -346,9 +346,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjavanio.la + libjavanio_la_SOURCES = gnu_java_nio_VMPipe.c \ +--- a/libjava/classpath/native/jni/java-util/Makefile.in ++++ b/libjava/classpath/native/jni/java-util/Makefile.in +@@ -335,9 +335,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libjavautil.la + libjavautil_la_SOURCES = java_util_VMTimeZone.c +--- a/libjava/classpath/native/jni/midi-alsa/Makefile.in ++++ b/libjava/classpath/native/jni/midi-alsa/Makefile.in +@@ -338,9 +338,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgjsmalsa.la + libgjsmalsa_la_SOURCES = gnu_javax_sound_midi_alsa_AlsaMidiSequencerDevice.c \ +--- a/libjava/classpath/native/jni/midi-dssi/Makefile.in ++++ b/libjava/classpath/native/jni/midi-dssi/Makefile.in +@@ -338,9 +338,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgjsmdssi.la + libgjsmdssi_la_SOURCES = gnu_javax_sound_midi_dssi_DSSIMidiDeviceProvider.c \ +--- a/libjava/classpath/native/jni/native-lib/Makefile.in ++++ b/libjava/classpath/native/jni/native-lib/Makefile.in +@@ -327,9 +327,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + noinst_LTLIBRARIES = libclasspathnative.la + libclasspathnative_la_SOURCES = cpnet.c \ +--- a/libjava/classpath/native/jni/qt-peer/Makefile.in ++++ b/libjava/classpath/native/jni/qt-peer/Makefile.in +@@ -353,9 +353,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + noinst_LTLIBRARIES = libqtpeer.la + AM_LDFLAGS = @CLASSPATH_MODULE@ @QT_LIBS@ +--- a/libjava/classpath/native/jni/xmlj/Makefile.in ++++ b/libjava/classpath/native/jni/xmlj/Makefile.in +@@ -337,9 +337,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libxmlj.la + libxmlj_la_SOURCES = \ +--- a/libjava/classpath/native/plugin/Makefile.in ++++ b/libjava/classpath/native/plugin/Makefile.in +@@ -335,9 +335,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + nativeexeclib_LTLIBRARIES = libgcjwebplugin.la + libgcjwebplugin_la_SOURCES = gcjwebplugin.cc +--- a/libjava/classpath/resource/Makefile.in ++++ b/libjava/classpath/resource/Makefile.in +@@ -320,9 +320,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + logging_DATA = java/util/logging/logging.properties + loggingdir = $(toolexeclibdir) +--- a/libjava/classpath/scripts/Makefile.in ++++ b/libjava/classpath/scripts/Makefile.in +@@ -310,9 +310,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + EXTRA_DIST = check_jni_methods.sh generate-locale-list.sh import-cacerts.sh + all: all-am +--- a/libjava/classpath/tools/Makefile.in ++++ b/libjava/classpath/tools/Makefile.in +@@ -412,9 +412,12 @@ sysconfdir = @sysconfdir@ + target = @target@ + target_alias = @target_alias@ + target_cpu = @target_cpu@ ++target_noncanonical = @target_noncanonical@ + target_os = @target_os@ + target_vendor = @target_vendor@ ++toolexecdir = @toolexecdir@ + toolexeclibdir = @toolexeclibdir@ ++toolexecmainlibdir = @toolexecmainlibdir@ + vm_classes = @vm_classes@ + GLIBJ_BOOTCLASSPATH = '$(top_srcdir)/lib' + GLIBJ_CLASSPATH = $(srcdir)/asm +--- a/libjava/configure ++++ b/libjava/configure +@@ -18552,6 +18552,9 @@ if { (eval echo "$as_me:$LINENO: \"$ac_c + enable_sjlj_exceptions=yes + elif grep _Unwind_Resume conftest.s >/dev/null 2>&1 ; then + enable_sjlj_exceptions=no ++ elif grep __cxa_end_cleanup conftest.s >/dev/null 2>&1 ; then ++ # ARM EH ABI. ++ enable_sjlj_exceptions=no + fi + fi + CXXFLAGS="$old_CXXFLAGS" +@@ -26229,10 +26232,10 @@ gcjsubdir=gcj-$gcjversion-$libgcj_sovers + multi_os_directory=`$CC -print-multi-os-directory` + case $multi_os_directory in + .) +- dbexecdir='$(libdir)/'$gcjsubdir # Avoid /. ++ dbexecdir='$(toolexeclibdir)/'$gcjsubdir # Avoid /. + ;; + *) +- dbexecdir='$(libdir)/'$multi_os_directory/$gcjsubdir ++ dbexecdir='$(toolexeclibdir)/'$multi_os_directory/$gcjsubdir + ;; + esac + +--- a/libjava/configure.ac ++++ b/libjava/configure.ac +@@ -605,6 +605,9 @@ if AC_TRY_EVAL(ac_compile); then + enable_sjlj_exceptions=yes + elif grep _Unwind_Resume conftest.s >/dev/null 2>&1 ; then + enable_sjlj_exceptions=no ++ elif grep __cxa_end_cleanup conftest.s >/dev/null 2>&1 ; then ++ # ARM EH ABI. ++ enable_sjlj_exceptions=no + fi + fi + CXXFLAGS="$old_CXXFLAGS" +@@ -1406,10 +1409,10 @@ gcjsubdir=gcj-$gcjversion-$libgcj_sovers + multi_os_directory=`$CC -print-multi-os-directory` + case $multi_os_directory in + .) +- dbexecdir='$(libdir)/'$gcjsubdir # Avoid /. ++ dbexecdir='$(toolexeclibdir)/'$gcjsubdir # Avoid /. + ;; + *) +- dbexecdir='$(libdir)/'$multi_os_directory/$gcjsubdir ++ dbexecdir='$(toolexeclibdir)/'$multi_os_directory/$gcjsubdir + ;; + esac + AC_SUBST(dbexecdir) +--- a/libstdc++-v3/Makefile.in ++++ b/libstdc++-v3/Makefile.in +@@ -189,6 +189,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ +--- a/libstdc++-v3/config.h.in ++++ b/libstdc++-v3/config.h.in +@@ -114,12 +114,6 @@ + /* Define to 1 if you have the `frexpl' function. */ + #undef HAVE_FREXPL + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_GCONF_H +- +-/* Define to 1 if you have the header file. */ +-#undef HAVE_GCONV_H +- + /* Define if _Unwind_GetIPInfo is available. */ + #undef HAVE_GETIPINFO + +--- a/libstdc++-v3/config/cpu/mips/atomicity.h ++++ b/libstdc++-v3/config/cpu/mips/atomicity.h +@@ -41,16 +41,18 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) + + __asm__ __volatile__ + ("/* Inline exchange & add */\n\t" +- "1:\n\t" + ".set push\n\t" + #if _MIPS_SIM == _ABIO32 + ".set mips2\n\t" + #endif ++ "sync \n\t" ++ "1:\n\t" + "ll %0,0(%2)\n\t" + "addu %1,%3,%0\n\t" + "sc %1,0(%2)\n\t" +- ".set pop\n\t" + "beqz %1,1b\n\t" ++ "sync \n\t" ++ ".set pop\n\t" + "/* End exchange & add */" + : "=&r"(__result), "=&r"(__tmp) + : "r"(__mem), "r"(__val) +@@ -67,16 +69,18 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) + + __asm__ __volatile__ + ("/* Inline atomic add */\n\t" +- "1:\n\t" + ".set push\n\t" + #if _MIPS_SIM == _ABIO32 + ".set mips2\n\t" + #endif ++ "sync \n\t" ++ "1:\n\t" + "ll %0,0(%1)\n\t" + "addu %0,%2,%0\n\t" + "sc %0,0(%1)\n\t" +- ".set pop\n\t" + "beqz %0,1b\n\t" ++ "sync \n\t" ++ ".set pop\n\t" + "/* End atomic add */" + : "=&r"(__result) + : "r"(__mem), "r"(__val) +--- a/libstdc++-v3/config/cpu/sh/atomicity.h ++++ b/libstdc++-v3/config/cpu/sh/atomicity.h +@@ -30,47 +30,48 @@ + + #ifdef __SH4A__ + +-#ifndef _GLIBCXX_ATOMICITY_H +-#define _GLIBCXX_ATOMICITY_H 1 ++#include + +-typedef int _Atomic_word; ++_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) + +-static inline _Atomic_word +-__attribute__ ((__unused__)) +-__exchange_and_add (volatile _Atomic_word* __mem, int __val) +-{ +- _Atomic_word __result; ++ typedef int _Atomic_word; + +- __asm__ __volatile__ +- ("0:\n" +- "\tmovli.l\t@%2,r0\n" +- "\tmov\tr0,%1\n" +- "\tadd\t%3,r0\n" +- "\tmovco.l\tr0,@%2\n" +- "\tbf\t0b" +- : "+m" (*__mem), "=r" (__result) +- : "r" (__mem), "rI08" (__val) +- : "r0"); +- +- return __result; +-} +- +- +-static inline void +-__attribute__ ((__unused__)) +-__atomic_add (volatile _Atomic_word* __mem, int __val) +-{ +- asm("0:\n" +- "\tmovli.l\t@%1,r0\n" +- "\tadd\t%2,r0\n" +- "\tmovco.l\tr0,@%1\n" +- "\tbf\t0b" +- : "+m" (*__mem) +- : "r" (__mem), "rI08" (__val) +- : "r0"); +-} ++ _Atomic_word ++ __attribute__ ((__unused__)) ++ __exchange_and_add (volatile _Atomic_word* __mem, int __val) ++ { ++ _Atomic_word __result; + +-#endif ++ __asm__ __volatile__ ++ ("0:\n" ++ "\tmovli.l\t@%2,r0\n" ++ "\tmov\tr0,%1\n" ++ "\tadd\t%3,r0\n" ++ "\tmovco.l\tr0,@%2\n" ++ "\tbf\t0b" ++ : "+m" (*__mem), "=r" (__result) ++ : "r" (__mem), "rI08" (__val) ++ : "r0"); ++ ++ return __result; ++ } ++ ++ ++ void ++ __attribute__ ((__unused__)) ++ __atomic_add (volatile _Atomic_word* __mem, int __val) ++ { ++ asm("0:\n" ++ "\tmovli.l\t@%1,r0\n" ++ "\tadd\t%2,r0\n" ++ "\tmovco.l\tr0,@%1\n" ++ "\tbf\t0b" ++ : "+m" (*__mem) ++ : "r" (__mem), "rI08" (__val) ++ : "r0"); ++ } ++ ++_GLIBCXX_END_NAMESPACE + + #else /* !__SH4A__ */ + +--- /dev/null ++++ b/libstdc++-v3/config/os/gnu-linux/arm-eabi-extra.ver +@@ -0,0 +1,18 @@ ++# Appended to version file. ++ ++CXXABI_ARM_1.3.3 { ++ # ARM ABI helper functions provided in libsupc++. ++ __aeabi_atexit; ++ __aeabi_vec_ctor_nocookie_nodtor; ++ __aeabi_vec_ctor_cookie_nodtor; ++ __aeabi_vec_cctor_nocookie_nodtor; ++ __aeabi_vec_new_cookie_noctor; ++ __aeabi_vec_new_nocookie; ++ __aeabi_vec_new_cookie_nodtor; ++ __aeabi_vec_new_cookie; ++ __aeabi_vec_dtor; ++ __aeabi_vec_dtor_cookie; ++ __aeabi_vec_delete; ++ __aeabi_vec_delete3; ++ __aeabi_vec_delete3_nodtor; ++}; +--- a/libstdc++-v3/configure ++++ b/libstdc++-v3/configure +@@ -458,7 +458,7 @@ ac_includes_default="\ + # include + #endif" + +-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS libtool_VERSION multi_basedir build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar glibcxx_builddir glibcxx_srcdir toplevel_srcdir CC ac_ct_CC EXEEXT OBJEXT CXX ac_ct_CXX CFLAGS CXXFLAGS LN_S AS ac_ct_AS AR ac_ct_AR RANLIB ac_ct_RANLIB MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT CPP CPPFLAGS EGREP LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM lt_ECHO LDFLAGS CXXCPP enable_shared enable_static GLIBCXX_HOSTED_TRUE GLIBCXX_HOSTED_FALSE GLIBCXX_BUILD_PCH_TRUE GLIBCXX_BUILD_PCH_FALSE glibcxx_PCHFLAGS CSTDIO_H BASIC_FILE_H BASIC_FILE_CC check_msgfmt glibcxx_MOFILES glibcxx_POFILES glibcxx_localedir USE_NLS CLOCALE_H CMESSAGES_H CCODECVT_CC CCOLLATE_CC CCTYPE_CC CMESSAGES_CC CMONEY_CC CNUMERIC_CC CTIME_H CTIME_CC CLOCALE_CC CLOCALE_INTERNAL_H ALLOCATOR_H ALLOCATOR_NAME C_INCLUDE_DIR GLIBCXX_C_HEADERS_C_TRUE GLIBCXX_C_HEADERS_C_FALSE GLIBCXX_C_HEADERS_C_STD_TRUE GLIBCXX_C_HEADERS_C_STD_FALSE GLIBCXX_C_HEADERS_C_GLOBAL_TRUE GLIBCXX_C_HEADERS_C_GLOBAL_FALSE GLIBCXX_C_HEADERS_COMPATIBILITY_TRUE GLIBCXX_C_HEADERS_COMPATIBILITY_FALSE GLIBCXX_C_HEADERS_EXTRA_TRUE GLIBCXX_C_HEADERS_EXTRA_FALSE DEBUG_FLAGS GLIBCXX_BUILD_DEBUG_TRUE GLIBCXX_BUILD_DEBUG_FALSE ENABLE_PARALLEL_TRUE ENABLE_PARALLEL_FALSE EXTRA_CXX_FLAGS glibcxx_thread_h WERROR SECTION_FLAGS SECTION_LDFLAGS OPT_LDFLAGS LIBMATHOBJS LIBICONV LTLIBICONV SYMVER_FILE port_specific_symbol_files ENABLE_SYMVERS_TRUE ENABLE_SYMVERS_FALSE ENABLE_SYMVERS_GNU_TRUE ENABLE_SYMVERS_GNU_FALSE ENABLE_SYMVERS_GNU_NAMESPACE_TRUE ENABLE_SYMVERS_GNU_NAMESPACE_FALSE ENABLE_SYMVERS_DARWIN_TRUE ENABLE_SYMVERS_DARWIN_FALSE ENABLE_VISIBILITY_TRUE ENABLE_VISIBILITY_FALSE GLIBCXX_LDBL_COMPAT_TRUE GLIBCXX_LDBL_COMPAT_FALSE baseline_dir ATOMICITY_SRCDIR ATOMIC_WORD_SRCDIR ATOMIC_FLAGS CPU_DEFINES_SRCDIR ABI_TWEAKS_SRCDIR OS_INC_SRCDIR ERROR_CONSTANTS_SRCDIR glibcxx_prefixdir gxx_include_dir glibcxx_toolexecdir glibcxx_toolexeclibdir GLIBCXX_INCLUDES TOPLEVEL_INCLUDES OPTIMIZE_CXXFLAGS WARN_FLAGS LIBSUPCXX_PICFLAGS LIBOBJS LTLIBOBJS' ++ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS libtool_VERSION multi_basedir build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar glibcxx_builddir glibcxx_srcdir toplevel_srcdir CC ac_ct_CC EXEEXT OBJEXT CXX ac_ct_CXX CFLAGS CXXFLAGS LN_S AS ac_ct_AS AR ac_ct_AR RANLIB ac_ct_RANLIB MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT CPP CPPFLAGS EGREP LIBTOOL SED FGREP GREP LD DUMPBIN ac_ct_DUMPBIN NM lt_ECHO LDFLAGS CXXCPP enable_shared enable_static GLIBCXX_HOSTED_TRUE GLIBCXX_HOSTED_FALSE GLIBCXX_BUILD_PCH_TRUE GLIBCXX_BUILD_PCH_FALSE glibcxx_PCHFLAGS CSTDIO_H BASIC_FILE_H BASIC_FILE_CC check_msgfmt glibcxx_MOFILES glibcxx_POFILES glibcxx_localedir USE_NLS CLOCALE_H CMESSAGES_H CCODECVT_CC CCOLLATE_CC CCTYPE_CC CMESSAGES_CC CMONEY_CC CNUMERIC_CC CTIME_H CTIME_CC CLOCALE_CC CLOCALE_INTERNAL_H ALLOCATOR_H ALLOCATOR_NAME C_INCLUDE_DIR GLIBCXX_C_HEADERS_C_TRUE GLIBCXX_C_HEADERS_C_FALSE GLIBCXX_C_HEADERS_C_STD_TRUE GLIBCXX_C_HEADERS_C_STD_FALSE GLIBCXX_C_HEADERS_C_GLOBAL_TRUE GLIBCXX_C_HEADERS_C_GLOBAL_FALSE GLIBCXX_C_HEADERS_COMPATIBILITY_TRUE GLIBCXX_C_HEADERS_COMPATIBILITY_FALSE GLIBCXX_C_HEADERS_EXTRA_TRUE GLIBCXX_C_HEADERS_EXTRA_FALSE DEBUG_FLAGS GLIBCXX_BUILD_DEBUG_TRUE GLIBCXX_BUILD_DEBUG_FALSE ENABLE_PARALLEL_TRUE ENABLE_PARALLEL_FALSE EXTRA_CXX_FLAGS glibcxx_thread_h WERROR SECTION_FLAGS SECTION_LDFLAGS OPT_LDFLAGS LIBMATHOBJS LIBICONV LTLIBICONV SYMVER_FILE port_specific_symbol_files ENABLE_SYMVERS_TRUE ENABLE_SYMVERS_FALSE ENABLE_SYMVERS_GNU_TRUE ENABLE_SYMVERS_GNU_FALSE ENABLE_SYMVERS_GNU_NAMESPACE_TRUE ENABLE_SYMVERS_GNU_NAMESPACE_FALSE ENABLE_SYMVERS_DARWIN_TRUE ENABLE_SYMVERS_DARWIN_FALSE ENABLE_VISIBILITY_TRUE ENABLE_VISIBILITY_FALSE GLIBCXX_LDBL_COMPAT_TRUE GLIBCXX_LDBL_COMPAT_FALSE baseline_dir ATOMICITY_SRCDIR ATOMIC_WORD_SRCDIR ATOMIC_FLAGS CPU_DEFINES_SRCDIR ABI_TWEAKS_SRCDIR OS_INC_SRCDIR ERROR_CONSTANTS_SRCDIR LIBSUPCXX_PRONLY_TRUE LIBSUPCXX_PRONLY_FALSE glibcxx_prefixdir gxx_include_dir glibcxx_toolexecdir glibcxx_toolexeclibdir GLIBCXX_INCLUDES TOPLEVEL_INCLUDES OPTIMIZE_CXXFLAGS WARN_FLAGS LIBSUPCXX_PICFLAGS LIBOBJS LTLIBOBJS' + ac_subst_files='' + + # Initialize some variables set by options. +@@ -17195,9 +17195,8 @@ if $GLIBCXX_IS_NATIVE; then + + + +- + for ac_header in nan.h ieeefp.h endian.h sys/isa_defs.h machine/endian.h \ +- machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h gconv.h \ ++ machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h \ + sys/types.h sys/ipc.h sys/sem.h + do + as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +@@ -62933,9 +62932,8 @@ _ACEOF + + + +- + for ac_header in nan.h ieeefp.h endian.h sys/isa_defs.h machine/endian.h \ +- machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h gconv.h \ ++ machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h \ + sys/types.h + do + as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +@@ -86333,11 +86331,10 @@ _ACEOF + + + +- + for ac_header in nan.h ieeefp.h endian.h sys/isa_defs.h \ + machine/endian.h machine/param.h sys/machine.h sys/types.h \ + fp.h float.h endian.h inttypes.h locale.h float.h stdint.h \ +- sys/ipc.h sys/sem.h gconf.h ++ sys/ipc.h sys/sem.h + do + as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` + if eval "test \"\${$as_ac_Header+set}\" = set"; then +@@ -108853,6 +108850,223 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + ++ cat >>confdefs.h <<\_ACEOF ++#define _GLIBCXX_USE_RANDOM_TR1 1 ++_ACEOF ++ ++ ++ ++ if test "${ac_cv_header_locale_h+set}" = set; then ++ echo "$as_me:$LINENO: checking for locale.h" >&5 ++echo $ECHO_N "checking for locale.h... $ECHO_C" >&6 ++if test "${ac_cv_header_locale_h+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++fi ++echo "$as_me:$LINENO: result: $ac_cv_header_locale_h" >&5 ++echo "${ECHO_T}$ac_cv_header_locale_h" >&6 ++else ++ # Is the header compilable? ++echo "$as_me:$LINENO: checking locale.h usability" >&5 ++echo $ECHO_N "checking locale.h usability... $ECHO_C" >&6 ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++$ac_includes_default ++#include ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_header_compiler=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_header_compiler=no ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 ++echo "${ECHO_T}$ac_header_compiler" >&6 ++ ++# Is the header present? ++echo "$as_me:$LINENO: checking locale.h presence" >&5 ++echo $ECHO_N "checking locale.h presence... $ECHO_C" >&6 ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++#include ++_ACEOF ++if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5 ++ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } >/dev/null; then ++ if test -s conftest.err; then ++ ac_cpp_err=$ac_c_preproc_warn_flag ++ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag ++ else ++ ac_cpp_err= ++ fi ++else ++ ac_cpp_err=yes ++fi ++if test -z "$ac_cpp_err"; then ++ ac_header_preproc=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_header_preproc=no ++fi ++rm -f conftest.err conftest.$ac_ext ++echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 ++echo "${ECHO_T}$ac_header_preproc" >&6 ++ ++# So? What about this header? ++case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in ++ yes:no: ) ++ { echo "$as_me:$LINENO: WARNING: locale.h: accepted by the compiler, rejected by the preprocessor!" >&5 ++echo "$as_me: WARNING: locale.h: accepted by the compiler, rejected by the preprocessor!" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: proceeding with the compiler's result" >&5 ++echo "$as_me: WARNING: locale.h: proceeding with the compiler's result" >&2;} ++ ac_header_preproc=yes ++ ;; ++ no:yes:* ) ++ { echo "$as_me:$LINENO: WARNING: locale.h: present but cannot be compiled" >&5 ++echo "$as_me: WARNING: locale.h: present but cannot be compiled" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: check for missing prerequisite headers?" >&5 ++echo "$as_me: WARNING: locale.h: check for missing prerequisite headers?" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: see the Autoconf documentation" >&5 ++echo "$as_me: WARNING: locale.h: see the Autoconf documentation" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: section \"Present But Cannot Be Compiled\"" >&5 ++echo "$as_me: WARNING: locale.h: section \"Present But Cannot Be Compiled\"" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: proceeding with the preprocessor's result" >&5 ++echo "$as_me: WARNING: locale.h: proceeding with the preprocessor's result" >&2;} ++ { echo "$as_me:$LINENO: WARNING: locale.h: in the future, the compiler will take precedence" >&5 ++echo "$as_me: WARNING: locale.h: in the future, the compiler will take precedence" >&2;} ++ ( ++ cat <<\_ASBOX ++## ----------------------------------------- ## ++## Report this to the package-unused lists. ## ++## ----------------------------------------- ## ++_ASBOX ++ ) | ++ sed "s/^/$as_me: WARNING: /" >&2 ++ ;; ++esac ++echo "$as_me:$LINENO: checking for locale.h" >&5 ++echo $ECHO_N "checking for locale.h... $ECHO_C" >&6 ++if test "${ac_cv_header_locale_h+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ ac_cv_header_locale_h=$ac_header_preproc ++fi ++echo "$as_me:$LINENO: result: $ac_cv_header_locale_h" >&5 ++echo "${ECHO_T}$ac_cv_header_locale_h" >&6 ++ ++fi ++if test $ac_cv_header_locale_h = yes; then ++ ++ echo "$as_me:$LINENO: checking for LC_MESSAGES" >&5 ++echo $ECHO_N "checking for LC_MESSAGES... $ECHO_C" >&6 ++if test "${ac_cv_val_LC_MESSAGES+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ if test x$gcc_no_link = xyes; then ++ { { echo "$as_me:$LINENO: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&5 ++echo "$as_me: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++#include ++int ++main () ++{ ++return LC_MESSAGES ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ ac_cv_val_LC_MESSAGES=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ac_cv_val_LC_MESSAGES=no ++fi ++rm -f conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++fi ++echo "$as_me:$LINENO: result: $ac_cv_val_LC_MESSAGES" >&5 ++echo "${ECHO_T}$ac_cv_val_LC_MESSAGES" >&6 ++ if test $ac_cv_val_LC_MESSAGES = yes; then ++ ++cat >>confdefs.h <<\_ACEOF ++#define HAVE_LC_MESSAGES 1 ++_ACEOF ++ ++ fi ++ ++fi ++ ++ ++ ++ + # Check for sigsetjmp + cat >conftest.$ac_ext <<_ACEOF + /* confdefs.h. */ +@@ -108905,6 +109119,266 @@ sed 's/^/| /' conftest.$ac_ext >&5 + + fi + rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ ++ cat >>confdefs.h <<\_ACEOF ++#define HAVE_MMAP 1 ++_ACEOF ++ ++ ++ # For iconv support. ++ ++ ++ ++ ++ ++ am_save_CPPFLAGS="$CPPFLAGS" ++ ++ for element in $INCICONV; do ++ haveit= ++ for x in $CPPFLAGS; do ++ ++ acl_save_prefix="$prefix" ++ prefix="$acl_final_prefix" ++ acl_save_exec_prefix="$exec_prefix" ++ exec_prefix="$acl_final_exec_prefix" ++ eval x=\"$x\" ++ exec_prefix="$acl_save_exec_prefix" ++ prefix="$acl_save_prefix" ++ ++ if test "X$x" = "X$element"; then ++ haveit=yes ++ break ++ fi ++ done ++ if test -z "$haveit"; then ++ CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" ++ fi ++ done ++ ++ ++ echo "$as_me:$LINENO: checking for iconv" >&5 ++echo $ECHO_N "checking for iconv... $ECHO_C" >&6 ++if test "${am_cv_func_iconv+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ ++ am_cv_func_iconv="no, consider installing GNU libiconv" ++ am_cv_lib_iconv=no ++ if test x$gcc_no_link = xyes; then ++ { { echo "$as_me:$LINENO: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&5 ++echo "$as_me: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++#include ++#include ++int ++main () ++{ ++iconv_t cd = iconv_open("",""); ++ iconv(cd,NULL,NULL,NULL,NULL); ++ iconv_close(cd); ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ am_cv_func_iconv=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++fi ++rm -f conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++ if test "$am_cv_func_iconv" != yes; then ++ am_save_LIBS="$LIBS" ++ LIBS="$LIBS $LIBICONV" ++ if test x$gcc_no_link = xyes; then ++ { { echo "$as_me:$LINENO: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&5 ++echo "$as_me: error: Link tests are not allowed after GCC_NO_EXECUTABLES." >&2;} ++ { (exit 1); exit 1; }; } ++fi ++cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++#include ++#include ++int ++main () ++{ ++iconv_t cd = iconv_open("",""); ++ iconv(cd,NULL,NULL,NULL,NULL); ++ iconv_close(cd); ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext conftest$ac_exeext ++if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 ++ (eval $ac_link) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest$ac_exeext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ am_cv_lib_iconv=yes ++ am_cv_func_iconv=yes ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++fi ++rm -f conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++ LIBS="$am_save_LIBS" ++ fi ++ ++fi ++echo "$as_me:$LINENO: result: $am_cv_func_iconv" >&5 ++echo "${ECHO_T}$am_cv_func_iconv" >&6 ++ if test "$am_cv_func_iconv" = yes; then ++ ++cat >>confdefs.h <<\_ACEOF ++#define HAVE_ICONV 1 ++_ACEOF ++ ++ fi ++ if test "$am_cv_lib_iconv" = yes; then ++ echo "$as_me:$LINENO: checking how to link with libiconv" >&5 ++echo $ECHO_N "checking how to link with libiconv... $ECHO_C" >&6 ++ echo "$as_me:$LINENO: result: $LIBICONV" >&5 ++echo "${ECHO_T}$LIBICONV" >&6 ++ else ++ CPPFLAGS="$am_save_CPPFLAGS" ++ LIBICONV= ++ LTLIBICONV= ++ fi ++ ++ ++ ++ if test "$am_cv_func_iconv" = yes; then ++ echo "$as_me:$LINENO: checking for iconv declaration" >&5 ++echo $ECHO_N "checking for iconv declaration... $ECHO_C" >&6 ++ if test "${am_cv_proto_iconv+set}" = set; then ++ echo $ECHO_N "(cached) $ECHO_C" >&6 ++else ++ ++ cat >conftest.$ac_ext <<_ACEOF ++/* confdefs.h. */ ++_ACEOF ++cat confdefs.h >>conftest.$ac_ext ++cat >>conftest.$ac_ext <<_ACEOF ++/* end confdefs.h. */ ++ ++#include ++#include ++extern ++#ifdef __cplusplus ++"C" ++#endif ++#if defined(__STDC__) || defined(__cplusplus) ++size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); ++#else ++size_t iconv(); ++#endif ++ ++int ++main () ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.$ac_objext ++if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 ++ (eval $ac_compile) 2>conftest.er1 ++ ac_status=$? ++ grep -v '^ *+' conftest.er1 >conftest.err ++ rm -f conftest.er1 ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } && ++ { ac_try='test -z "$ac_c_werror_flag" ++ || test ! -s conftest.err' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; } && ++ { ac_try='test -s conftest.$ac_objext' ++ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); }; }; then ++ am_cv_proto_iconv_arg1="" ++else ++ echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++am_cv_proto_iconv_arg1="const" ++fi ++rm -f conftest.err conftest.$ac_objext conftest.$ac_ext ++ am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);" ++fi ++ ++ am_cv_proto_iconv=`echo "$am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` ++ echo "$as_me:$LINENO: result: ${ac_t:- ++ }$am_cv_proto_iconv" >&5 ++echo "${ECHO_T}${ac_t:- ++ }$am_cv_proto_iconv" >&6 ++ ++cat >>confdefs.h <<_ACEOF ++#define ICONV_CONST $am_cv_proto_iconv_arg1 ++_ACEOF ++ ++ fi ++ + ;; + *-mingw32*) + +@@ -109059,6 +109533,14 @@ fi + + done + ++ cat >>confdefs.h <<\_ACEOF ++#define HAVE_STRTOF 1 ++_ACEOF ++ ++ cat >>confdefs.h <<\_ACEOF ++#define HAVE_STRTOLD 1 ++_ACEOF ++ + + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld +@@ -115963,6 +116445,24 @@ ABI_TWEAKS_SRCDIR=config/${abi_tweaks_di + + + ++# For SymbianOS, we use a highly cut-down libsupc++. This lets us ++# conditionalise libsupc++'s Makefile.am to include only the necessary sources. ++case "$target" in ++ *arm*-symbianelf) ++ LIBSUPCXX_PRONLY=yes;; ++ *);; ++esac ++ ++ ++if test x$LIBSUPCXX_PRONLY = xyes; then ++ LIBSUPCXX_PRONLY_TRUE= ++ LIBSUPCXX_PRONLY_FALSE='#' ++else ++ LIBSUPCXX_PRONLY_TRUE='#' ++ LIBSUPCXX_PRONLY_FALSE= ++fi ++ ++ + # Determine cross-compile flags and AM_CONDITIONALs. + #AC_SUBST(GLIBCXX_IS_NATIVE) + #AM_CONDITIONAL(CANADIAN, test $CANADIAN = yes) +@@ -116543,6 +117043,13 @@ echo "$as_me: error: conditional \"GLIBC + Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } + fi ++if test -z "${LIBSUPCXX_PRONLY_TRUE}" && test -z "${LIBSUPCXX_PRONLY_FALSE}"; then ++ { { echo "$as_me:$LINENO: error: conditional \"LIBSUPCXX_PRONLY\" was never defined. ++Usually this means the macro was only invoked conditionally." >&5 ++echo "$as_me: error: conditional \"LIBSUPCXX_PRONLY\" was never defined. ++Usually this means the macro was only invoked conditionally." >&2;} ++ { (exit 1); exit 1; }; } ++fi + + : ${CONFIG_STATUS=./config.status} + ac_clean_files_save=$ac_clean_files +@@ -117580,6 +118087,8 @@ s,@CPU_DEFINES_SRCDIR@,$CPU_DEFINES_SRCD + s,@ABI_TWEAKS_SRCDIR@,$ABI_TWEAKS_SRCDIR,;t t + s,@OS_INC_SRCDIR@,$OS_INC_SRCDIR,;t t + s,@ERROR_CONSTANTS_SRCDIR@,$ERROR_CONSTANTS_SRCDIR,;t t ++s,@LIBSUPCXX_PRONLY_TRUE@,$LIBSUPCXX_PRONLY_TRUE,;t t ++s,@LIBSUPCXX_PRONLY_FALSE@,$LIBSUPCXX_PRONLY_FALSE,;t t + s,@glibcxx_prefixdir@,$glibcxx_prefixdir,;t t + s,@gxx_include_dir@,$gxx_include_dir,;t t + s,@glibcxx_toolexecdir@,$glibcxx_toolexecdir,;t t +--- a/libstdc++-v3/configure.ac ++++ b/libstdc++-v3/configure.ac +@@ -138,7 +138,7 @@ if $GLIBCXX_IS_NATIVE; then + + # Check for available headers. + AC_CHECK_HEADERS([nan.h ieeefp.h endian.h sys/isa_defs.h machine/endian.h \ +- machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h gconv.h \ ++ machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h \ + sys/types.h sys/ipc.h sys/sem.h]) + + GLIBCXX_CHECK_LINKER_FEATURES +@@ -344,13 +344,22 @@ AC_SUBST(OS_INC_SRCDIR) + AC_SUBST(ERROR_CONSTANTS_SRCDIR) + + ++# For SymbianOS, we use a highly cut-down libsupc++. This lets us ++# conditionalise libsupc++'s Makefile.am to include only the necessary sources. ++case "$target" in ++ *arm*-symbianelf) ++ LIBSUPCXX_PRONLY=yes;; ++ *);; ++esac ++AM_CONDITIONAL(LIBSUPCXX_PRONLY, test x$LIBSUPCXX_PRONLY = xyes) ++ + # Determine cross-compile flags and AM_CONDITIONALs. + #AC_SUBST(GLIBCXX_IS_NATIVE) + #AM_CONDITIONAL(CANADIAN, test $CANADIAN = yes) + # from GLIBCXX_CHECK_COMPLEX_MATH_SUPPORT: + #AM_CONDITIONAL(GLIBCXX_BUILD_LIBMATH, test $need_libmath = yes) + GLIBCXX_EVALUATE_CONDITIONALS +- ++ + AC_CACHE_SAVE + + if test ${multilib} = yes; then +--- a/libstdc++-v3/configure.host ++++ b/libstdc++-v3/configure.host +@@ -320,6 +320,11 @@ case "${host}" in + abi_baseline_pair=${try_cpu}-linux-gnu + fi + esac ++ case "${host}" in ++ arm*-*-linux-*eabi) ++ port_specific_symbol_files="\$(srcdir)/../config/os/gnu-linux/arm-eabi-extra.ver" ++ ;; ++ esac + ;; + mips*-*-*) + case "${host_os}" in +--- a/libstdc++-v3/crossconfig.m4 ++++ b/libstdc++-v3/crossconfig.m4 +@@ -46,7 +46,7 @@ case "${host}" in + # so we just check for all the features here. + # Check for available headers. + AC_CHECK_HEADERS([nan.h ieeefp.h endian.h sys/isa_defs.h machine/endian.h \ +- machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h gconv.h \ ++ machine/param.h sys/machine.h fp.h locale.h float.h inttypes.h \ + sys/types.h]) + + # Don't call GLIBCXX_CHECK_LINKER_FEATURES, Darwin doesn't have a GNU ld +@@ -197,7 +197,7 @@ case "${host}" in + AC_CHECK_HEADERS([nan.h ieeefp.h endian.h sys/isa_defs.h \ + machine/endian.h machine/param.h sys/machine.h sys/types.h \ + fp.h float.h endian.h inttypes.h locale.h float.h stdint.h \ +- sys/ipc.h sys/sem.h gconf.h]) ++ sys/ipc.h sys/sem.h]) + SECTION_FLAGS='-ffunction-sections -fdata-sections' + AC_SUBST(SECTION_FLAGS) + GLIBCXX_CHECK_COMPILER_FEATURES +@@ -223,6 +223,10 @@ case "${host}" in + # For C99 support to TR1. + GLIBCXX_CHECK_C99_TR1 + ++ AC_DEFINE(_GLIBCXX_USE_RANDOM_TR1) ++ ++ AC_LC_MESSAGES ++ + # Check for sigsetjmp + AC_TRY_COMPILE( + [#include ], +@@ -231,9 +235,16 @@ case "${host}" in + siglongjmp (env, 1); + ], + [AC_DEFINE(HAVE_SIGSETJMP, 1, [Define if sigsetjmp is available.])]) ++ ++ AC_DEFINE(HAVE_MMAP) ++ ++ # For iconv support. ++ AM_ICONV + ;; + *-mingw32*) + AC_CHECK_HEADERS([sys/types.h locale.h float.h]) ++ AC_DEFINE(HAVE_STRTOF) ++ AC_DEFINE(HAVE_STRTOLD) + GLIBCXX_CHECK_LINKER_FEATURES + GLIBCXX_CHECK_COMPLEX_MATH_SUPPORT + ;; +--- a/libstdc++-v3/doc/Makefile.in ++++ b/libstdc++-v3/doc/Makefile.in +@@ -157,6 +157,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ +--- a/libstdc++-v3/include/Makefile.am ++++ b/libstdc++-v3/include/Makefile.am +@@ -1103,8 +1103,14 @@ ${pch3_output}: ${pch3_source} ${pch2_ou + if GLIBCXX_HOSTED + install-data-local: install-headers + else ++if LIBSUPCXX_PRONLY ++# Don't install any headers if we're only putting eh_personality in ++# libsupc++ (e.g. on SymbianOS) ++install-data-local: ++else + install-data-local: install-freestanding-headers + endif ++endif + + # This is a subset of the full install-headers rule. We only need , + # , , , , , , and any +--- a/libstdc++-v3/include/Makefile.in ++++ b/libstdc++-v3/include/Makefile.in +@@ -157,6 +157,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ +@@ -1492,7 +1494,10 @@ ${pch3_output}: ${pch3_source} ${pch2_ou + # the rest are taken from the original source tree. + + @GLIBCXX_HOSTED_TRUE@install-data-local: install-headers +-@GLIBCXX_HOSTED_FALSE@install-data-local: install-freestanding-headers ++# Don't install any headers if we're only putting eh_personality in ++# libsupc++ (e.g. on SymbianOS) ++@GLIBCXX_HOSTED_FALSE@@LIBSUPCXX_PRONLY_TRUE@install-data-local: ++@GLIBCXX_HOSTED_FALSE@@LIBSUPCXX_PRONLY_FALSE@install-data-local: install-freestanding-headers + + # This is a subset of the full install-headers rule. We only need , + # , , , , , , and any +--- a/libstdc++-v3/include/std/type_traits ++++ b/libstdc++-v3/include/std/type_traits +@@ -455,15 +455,18 @@ namespace std + struct __make_unsigned_selector<_Tp, false, true> + { + private: +- // GNU enums start with sizeof short. +- typedef unsigned short __smallest; +- static const bool __b1 = sizeof(_Tp) <= sizeof(__smallest); ++ // With -fshort-enums, an enum may be as small as a char. ++ typedef unsigned char __smallest; ++ static const bool __b0 = sizeof(_Tp) <= sizeof(__smallest); ++ static const bool __b1 = sizeof(_Tp) <= sizeof(unsigned short); + static const bool __b2 = sizeof(_Tp) <= sizeof(unsigned int); +- typedef conditional<__b2, unsigned int, unsigned long> __cond; +- typedef typename __cond::type __cond_type; ++ typedef conditional<__b2, unsigned int, unsigned long> __cond2; ++ typedef typename __cond2::type __cond2_type; ++ typedef conditional<__b1, unsigned short, __cond2_type> __cond1; ++ typedef typename __cond1::type __cond1_type; + + public: +- typedef typename conditional<__b1, __smallest, __cond_type>::type __type; ++ typedef typename conditional<__b0, __smallest, __cond1_type>::type __type; + }; + + // Given an integral/enum type, return the corresponding unsigned +@@ -530,15 +533,18 @@ namespace std + struct __make_signed_selector<_Tp, false, true> + { + private: +- // GNU enums start with sizeof short. +- typedef signed short __smallest; +- static const bool __b1 = sizeof(_Tp) <= sizeof(__smallest); ++ // With -fshort-enums, an enum may be as small as a char. ++ typedef signed char __smallest; ++ static const bool __b0 = sizeof(_Tp) <= sizeof(__smallest); ++ static const bool __b1 = sizeof(_Tp) <= sizeof(signed short); + static const bool __b2 = sizeof(_Tp) <= sizeof(signed int); +- typedef conditional<__b2, signed int, signed long> __cond; +- typedef typename __cond::type __cond_type; ++ typedef conditional<__b2, signed int, signed long> __cond2; ++ typedef typename __cond2::type __cond2_type; ++ typedef conditional<__b1, signed short, __cond2_type> __cond1; ++ typedef typename __cond1::type __cond1_type; + + public: +- typedef typename conditional<__b1, __smallest, __cond_type>::type __type; ++ typedef typename conditional<__b0, __smallest, __cond1_type>::type __type; + }; + + // Given an integral/enum type, return the corresponding signed +--- a/libstdc++-v3/libmath/Makefile.in ++++ b/libstdc++-v3/libmath/Makefile.in +@@ -172,6 +172,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + + # Only compiling "C" sources in this directory. + LIBTOOL = @LIBTOOL@ --tag CC +--- a/libstdc++-v3/libsupc++/Makefile.am ++++ b/libstdc++-v3/libsupc++/Makefile.am +@@ -31,6 +31,11 @@ toolexeclib_LTLIBRARIES = libsupc++.la + # 2) integrated libsupc++convenience.la that is to be a part of libstdc++.a + noinst_LTLIBRARIES = libsupc++convenience.la + ++if LIBSUPCXX_PRONLY ++sources = \ ++ eh_personality.cc ++ ++else + + headers = \ + exception new typeinfo cxxabi.h cxxabi-forced.h exception_defines.h +@@ -83,6 +88,7 @@ sources = \ + vec.cc \ + vmi_class_type_info.cc \ + vterminate.cc ++endif + + libsupc___la_SOURCES = $(sources) $(c_sources) + libsupc__convenience_la_SOURCES = $(sources) $(c_sources) +--- a/libstdc++-v3/libsupc++/Makefile.in ++++ b/libstdc++-v3/libsupc++/Makefile.in +@@ -38,7 +38,7 @@ POST_UNINSTALL = : + build_triplet = @build@ + host_triplet = @host@ + target_triplet = @target@ +-DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \ ++DIST_COMMON = $(am__glibcxxinstall_HEADERS_DIST) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in $(top_srcdir)/fragment.am + subdir = libsupc++ + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +@@ -84,19 +84,29 @@ am__libsupc___la_SOURCES_DIST = array_ty + pmem_type_info.cc pointer_type_info.cc pure.cc \ + si_class_type_info.cc tinfo.cc tinfo2.cc vec.cc \ + vmi_class_type_info.cc vterminate.cc cp-demangle.c +-am__objects_1 = array_type_info.lo atexit_arm.lo bad_cast.lo \ +- bad_typeid.lo class_type_info.lo del_op.lo del_opnt.lo \ +- del_opv.lo del_opvnt.lo dyncast.lo eh_alloc.lo eh_arm.lo \ +- eh_aux_runtime.lo eh_call.lo eh_catch.lo eh_exception.lo \ +- eh_globals.lo eh_personality.lo eh_term_handler.lo \ +- eh_terminate.lo eh_throw.lo eh_type.lo eh_unex_handler.lo \ +- enum_type_info.lo function_type_info.lo \ +- fundamental_type_info.lo guard.lo new_handler.lo new_op.lo \ +- new_opnt.lo new_opv.lo new_opvnt.lo pbase_type_info.lo \ +- pmem_type_info.lo pointer_type_info.lo pure.lo \ +- si_class_type_info.lo tinfo.lo tinfo2.lo vec.lo \ +- vmi_class_type_info.lo vterminate.lo +-@GLIBCXX_HOSTED_TRUE@am__objects_2 = cp-demangle.lo ++@LIBSUPCXX_PRONLY_FALSE@am__objects_1 = array_type_info.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ atexit_arm.lo bad_cast.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ bad_typeid.lo class_type_info.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ del_op.lo del_opnt.lo del_opv.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ del_opvnt.lo dyncast.lo eh_alloc.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_arm.lo eh_aux_runtime.lo eh_call.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_catch.lo eh_exception.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_globals.lo eh_personality.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_term_handler.lo eh_terminate.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_throw.lo eh_type.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_unex_handler.lo enum_type_info.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ function_type_info.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ fundamental_type_info.lo guard.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ new_handler.lo new_op.lo new_opnt.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ new_opv.lo new_opvnt.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ pbase_type_info.lo pmem_type_info.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ pointer_type_info.lo pure.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ si_class_type_info.lo tinfo.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ tinfo2.lo vec.lo \ ++@LIBSUPCXX_PRONLY_FALSE@ vmi_class_type_info.lo vterminate.lo ++@LIBSUPCXX_PRONLY_TRUE@am__objects_1 = eh_personality.lo ++@GLIBCXX_HOSTED_TRUE@@LIBSUPCXX_PRONLY_FALSE@am__objects_2 = \ ++@GLIBCXX_HOSTED_TRUE@@LIBSUPCXX_PRONLY_FALSE@ cp-demangle.lo + am_libsupc___la_OBJECTS = $(am__objects_1) $(am__objects_2) + libsupc___la_OBJECTS = $(am_libsupc___la_OBJECTS) + libsupc__convenience_la_LIBADD = +@@ -129,6 +139,8 @@ CXXLD = $(CXX) + SOURCES = $(libsupc___la_SOURCES) $(libsupc__convenience_la_SOURCES) + DIST_SOURCES = $(am__libsupc___la_SOURCES_DIST) \ + $(am__libsupc__convenience_la_SOURCES_DIST) ++am__glibcxxinstall_HEADERS_DIST = exception new typeinfo cxxabi.h \ ++ cxxabi-forced.h exception_defines.h + glibcxxinstallHEADERS_INSTALL = $(INSTALL_HEADER) + HEADERS = $(glibcxxinstall_HEADERS) + ETAGS = etags +@@ -227,6 +239,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ +@@ -350,55 +364,58 @@ AM_CPPFLAGS = $(GLIBCXX_INCLUDES) + toolexeclib_LTLIBRARIES = libsupc++.la + # 2) integrated libsupc++convenience.la that is to be a part of libstdc++.a + noinst_LTLIBRARIES = libsupc++convenience.la +-headers = \ +- exception new typeinfo cxxabi.h cxxabi-forced.h exception_defines.h ++@LIBSUPCXX_PRONLY_FALSE@sources = \ ++@LIBSUPCXX_PRONLY_FALSE@ array_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ atexit_arm.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ bad_cast.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ bad_typeid.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ class_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ del_op.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ del_opnt.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ del_opv.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ del_opvnt.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ dyncast.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_alloc.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_arm.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_aux_runtime.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_call.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_catch.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_exception.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_globals.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_personality.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_term_handler.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_terminate.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_throw.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_type.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ eh_unex_handler.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ enum_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ function_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ fundamental_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ guard.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ new_handler.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ new_op.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ new_opnt.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ new_opv.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ new_opvnt.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ pbase_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ pmem_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ pointer_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ pure.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ si_class_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ tinfo.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ tinfo2.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ vec.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ vmi_class_type_info.cc \ ++@LIBSUPCXX_PRONLY_FALSE@ vterminate.cc + +-@GLIBCXX_HOSTED_TRUE@c_sources = \ +-@GLIBCXX_HOSTED_TRUE@ cp-demangle.c ++@LIBSUPCXX_PRONLY_TRUE@sources = \ ++@LIBSUPCXX_PRONLY_TRUE@ eh_personality.cc + +-sources = \ +- array_type_info.cc \ +- atexit_arm.cc \ +- bad_cast.cc \ +- bad_typeid.cc \ +- class_type_info.cc \ +- del_op.cc \ +- del_opnt.cc \ +- del_opv.cc \ +- del_opvnt.cc \ +- dyncast.cc \ +- eh_alloc.cc \ +- eh_arm.cc \ +- eh_aux_runtime.cc \ +- eh_call.cc \ +- eh_catch.cc \ +- eh_exception.cc \ +- eh_globals.cc \ +- eh_personality.cc \ +- eh_term_handler.cc \ +- eh_terminate.cc \ +- eh_throw.cc \ +- eh_type.cc \ +- eh_unex_handler.cc \ +- enum_type_info.cc \ +- function_type_info.cc \ +- fundamental_type_info.cc \ +- guard.cc \ +- new_handler.cc \ +- new_op.cc \ +- new_opnt.cc \ +- new_opv.cc \ +- new_opvnt.cc \ +- pbase_type_info.cc \ +- pmem_type_info.cc \ +- pointer_type_info.cc \ +- pure.cc \ +- si_class_type_info.cc \ +- tinfo.cc \ +- tinfo2.cc \ +- vec.cc \ +- vmi_class_type_info.cc \ +- vterminate.cc ++@LIBSUPCXX_PRONLY_FALSE@headers = \ ++@LIBSUPCXX_PRONLY_FALSE@ exception new typeinfo cxxabi.h cxxabi-forced.h exception_defines.h ++ ++@GLIBCXX_HOSTED_TRUE@@LIBSUPCXX_PRONLY_FALSE@c_sources = \ ++@GLIBCXX_HOSTED_TRUE@@LIBSUPCXX_PRONLY_FALSE@ cp-demangle.c + + libsupc___la_SOURCES = $(sources) $(c_sources) + libsupc__convenience_la_SOURCES = $(sources) $(c_sources) +--- a/libstdc++-v3/libsupc++/eh_arm.cc ++++ b/libstdc++-v3/libsupc++/eh_arm.cc +@@ -46,12 +46,14 @@ __cxa_type_match(_Unwind_Exception* ue_h + bool is_reference __attribute__((__unused__)), + void** thrown_ptr_p) + { +- bool foreign_exception = !__is_gxx_exception_class(ue_header->exception_class); ++ bool forced_unwind = __is_gxx_forced_unwind_class(ue_header->exception_class); ++ bool foreign_exception = !forced_unwind && !__is_gxx_exception_class(ue_header->exception_class); + __cxa_exception* xh = __get_exception_header_from_ue(ue_header); + const std::type_info* throw_type; + +- // XXX What to do with forced unwind? +- if (foreign_exception) ++ if (forced_unwind) ++ throw_type = &typeid(abi::__forced_unwind); ++ else if (foreign_exception) + throw_type = &typeid(abi::__foreign_exception); + else + throw_type = xh->exceptionType; +--- a/libstdc++-v3/libsupc++/eh_personality.cc ++++ b/libstdc++-v3/libsupc++/eh_personality.cc +@@ -544,8 +544,12 @@ PERSONALITY_FUNCTION (int version, + + #ifdef __ARM_EABI_UNWINDER__ + throw_type = ue_header; +- if ((actions & _UA_FORCE_UNWIND) +- || foreign_exception) ++ if (actions & _UA_FORCE_UNWIND) ++ { ++ __GXX_INIT_FORCED_UNWIND_CLASS(ue_header->exception_class); ++ thrown_ptr = 0; ++ } ++ else if (foreign_exception) + thrown_ptr = 0; + #else + // During forced unwinding, match a magic exception type. +--- a/libstdc++-v3/libsupc++/unwind-cxx.h ++++ b/libstdc++-v3/libsupc++/unwind-cxx.h +@@ -201,6 +201,32 @@ __GXX_INIT_EXCEPTION_CLASS(_Unwind_Excep + c[7] = '\0'; + } + ++static inline bool ++__is_gxx_forced_unwind_class(_Unwind_Exception_Class c) ++{ ++ return c[0] == 'G' ++ && c[1] == 'N' ++ && c[2] == 'U' ++ && c[3] == 'C' ++ && c[4] == 'F' ++ && c[5] == 'O' ++ && c[6] == 'R' ++ && c[7] == '\0'; ++} ++ ++static inline void ++__GXX_INIT_FORCED_UNWIND_CLASS(_Unwind_Exception_Class c) ++{ ++ c[0] = 'G'; ++ c[1] = 'N'; ++ c[2] = 'U'; ++ c[3] = 'C'; ++ c[4] = 'F'; ++ c[5] = 'O'; ++ c[6] = 'R'; ++ c[7] = '\0'; ++} ++ + static inline void* + __gxx_caught_object(_Unwind_Exception* eo) + { +--- a/libstdc++-v3/libsupc++/vec.cc ++++ b/libstdc++-v3/libsupc++/vec.cc +@@ -461,6 +461,9 @@ namespace __aeabiv1 + __aeabi_vec_dtor_cookie (void *array_address, + abi::__cxa_cdtor_type destructor) + { ++ if (!array_address) ++ return NULL; ++ + abi::__cxa_vec_dtor (array_address, + reinterpret_cast(array_address)[-1], + reinterpret_cast(array_address)[-2], +@@ -473,6 +476,9 @@ namespace __aeabiv1 + __aeabi_vec_delete (void *array_address, + abi::__cxa_cdtor_type destructor) + { ++ if (!array_address) ++ return; ++ + abi::__cxa_vec_delete (array_address, + reinterpret_cast(array_address)[-2], + 2 * sizeof (std::size_t), +@@ -484,6 +490,9 @@ namespace __aeabiv1 + abi::__cxa_cdtor_type destructor, + void (*dealloc) (void *, std::size_t)) + { ++ if (!array_address) ++ return; ++ + abi::__cxa_vec_delete3 (array_address, + reinterpret_cast(array_address)[-2], + 2 * sizeof (std::size_t), +@@ -494,6 +503,9 @@ namespace __aeabiv1 + __aeabi_vec_delete3_nodtor (void *array_address, + void (*dealloc) (void *, std::size_t)) + { ++ if (!array_address) ++ return; ++ + abi::__cxa_vec_delete3 (array_address, + reinterpret_cast(array_address)[-2], + 2 * sizeof (std::size_t), +--- a/libstdc++-v3/po/Makefile.in ++++ b/libstdc++-v3/po/Makefile.in +@@ -157,6 +157,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ +--- a/libstdc++-v3/src/Makefile.in ++++ b/libstdc++-v3/src/Makefile.in +@@ -211,6 +211,8 @@ LIBMATHOBJS = @LIBMATHOBJS@ + LIBOBJS = @LIBOBJS@ + LIBS = @LIBS@ + LIBSUPCXX_PICFLAGS = @LIBSUPCXX_PICFLAGS@ ++LIBSUPCXX_PRONLY_FALSE = @LIBSUPCXX_PRONLY_FALSE@ ++LIBSUPCXX_PRONLY_TRUE = @LIBSUPCXX_PRONLY_TRUE@ + LIBTOOL = @LIBTOOL@ + LN_S = @LN_S@ + LTLIBICONV = @LTLIBICONV@ diff --git a/toolchain/gcc/patches/4.3.3+cs/105-libtool.patch b/toolchain/gcc/patches/4.3.3+cs/105-libtool.patch new file mode 100644 index 0000000000..015f28dfe1 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/105-libtool.patch @@ -0,0 +1,84 @@ +2008-03-02 Ralf Wildenhues + + Backport from upstream Libtool: + + 2007-10-12 Eric Blake + + Deal with Autoconf 2.62's semantic change in m4_append. + * ltsugar.m4 (lt_append): Replace broken versions of + m4_append. + (lt_if_append_uniq): Don't require separator to be overquoted, and + avoid broken m4_append. + (lt_dict_add): Fix typo. + * libtool.m4 (_LT_DECL): Don't overquote separator. + +diff --git a/libtool.m4 b/libtool.m4 +index e86cd02..26a039a 100644 +--- a/libtool.m4 ++++ b/libtool.m4 +@@ -319,7 +319,7 @@ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + # VALUE may be 0, 1 or 2 for a computed quote escaped value based on + # VARNAME. Any other value will be used directly. + m4_define([_LT_DECL], +-[lt_if_append_uniq([lt_decl_varnames], [$2], [[, ]], ++[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) +diff --git a/ltsugar.m4 b/ltsugar.m4 +index fc51dc7..dd4f871 100644 +--- a/ltsugar.m4 ++++ b/ltsugar.m4 +@@ -1,13 +1,13 @@ + # ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- + # +-# Copyright (C) 2004, 2005 Free Software Foundation, Inc. ++# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. + # Written by Gary V. Vaughan. + # + # This file is free software; the Free Software Foundation gives + # unlimited permission to copy and/or distribute it, with or without + # modifications, as long as this notice is preserved. + +-# serial 3 ltsugar.m4 ++# serial 4 ltsugar.m4 + + # This is to help aclocal find these macros, as it can't see m4_define. + AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) +@@ -46,6 +46,20 @@ m4_define([lt_cdr], + m4_define([lt_unquote], $1) + + ++# lt_append(MACRO-NAME, STRING, [SEPARATOR]) ++# ------------------------------------------ ++# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. ++# Note that neither SEPARATOR nor STRING are expanded. No SEPARATOR is ++# output if MACRO-NAME was previously undefined (different than defined ++# and empty). ++# This macro is needed until we can rely on Autoconf 2.62, since earlier ++# versions of m4 mistakenly expanded SEPARATOR. ++m4_define([lt_append], ++[m4_define([$1], ++ m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) ++ ++ ++ + # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) + # ---------------------------------------------------------- + # Produce a SEP delimited list of all paired combinations of elements of +@@ -67,10 +81,10 @@ m4_define([lt_combine], + # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. + m4_define([lt_if_append_uniq], + [m4_ifdef([$1], +- [m4_bmatch($3[]m4_defn([$1])$3, $3[]m4_re_escape([$2])$3, +- [$5], +- [m4_append([$1], [$2], [$3])$4])], +- [m4_append([$1], [$2], [$3])$4])]) ++ [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], ++ [lt_append([$1], [$2], [$3])$4], ++ [$5])], ++ [lt_append([$1], [$2], [$3])$4])]) + + + # lt_dict_add(DICT, KEY, VALUE) + diff --git a/toolchain/gcc/patches/4.3.3+cs/106-fix_linker_error.patch b/toolchain/gcc/patches/4.3.3+cs/106-fix_linker_error.patch new file mode 100644 index 0000000000..4dd83db20e --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/106-fix_linker_error.patch @@ -0,0 +1,11 @@ +--- a/gcc/cp/Make-lang.in ++++ b/gcc/cp/Make-lang.in +@@ -73,7 +73,7 @@ g++-cross$(exeext): g++$(exeext) + CXX_C_OBJS = attribs.o c-common.o c-format.o c-pragma.o c-semantics.o c-lex.o \ + c-dump.o $(CXX_TARGET_OBJS) c-pretty-print.o c-opts.o c-pch.o \ + c-incpath.o cppdefault.o c-ppoutput.o c-cppbuiltin.o prefix.o \ +- c-gimplify.o c-omp.o tree-inline.o ++ c-gimplify.o c-omp.o + + # Language-specific object files for C++ and Objective C++. + CXX_AND_OBJCXX_OBJS = cp/call.o cp/decl.o cp/expr.o cp/pt.o cp/typeck2.o \ diff --git a/toolchain/gcc/patches/4.3.3+cs/301-missing-execinfo_h.patch b/toolchain/gcc/patches/4.3.3+cs/301-missing-execinfo_h.patch new file mode 100644 index 0000000000..b3f1e68d3b --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/301-missing-execinfo_h.patch @@ -0,0 +1,11 @@ +--- a/boehm-gc/include/gc.h ++++ b/boehm-gc/include/gc.h +@@ -503,7 +503,7 @@ GC_API GC_PTR GC_malloc_atomic_ignore_of + #if defined(__linux__) || defined(__GLIBC__) + # include + # if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \ +- && !defined(__ia64__) ++ && !defined(__ia64__) && !defined(__UCLIBC__) + # ifndef GC_HAVE_BUILTIN_BACKTRACE + # define GC_HAVE_BUILTIN_BACKTRACE + # endif diff --git a/toolchain/gcc/patches/4.3.3+cs/302-c99-snprintf.patch b/toolchain/gcc/patches/4.3.3+cs/302-c99-snprintf.patch new file mode 100644 index 0000000000..ba51a0e1d4 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/302-c99-snprintf.patch @@ -0,0 +1,13 @@ +Index: gcc-4.3.0/libstdc++-v3/include/c_global/cstdio +=================================================================== +--- gcc-4.3.0/libstdc++-v3/include/c_global/cstdio (revision 129202) ++++ gcc-4.3.0/libstdc++-v3/include/c_global/cstdio (working copy) +@@ -144,7 +144,7 @@ + + _GLIBCXX_END_NAMESPACE + +-#if _GLIBCXX_USE_C99 ++#if _GLIBCXX_USE_C99 || defined __UCLIBC__ + + #undef snprintf + #undef vfscanf diff --git a/toolchain/gcc/patches/4.3.3+cs/305-libmudflap-susv3-legacy.patch b/toolchain/gcc/patches/4.3.3+cs/305-libmudflap-susv3-legacy.patch new file mode 100644 index 0000000000..374b1f8659 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/305-libmudflap-susv3-legacy.patch @@ -0,0 +1,49 @@ +Index: gcc-4.2/libmudflap/mf-hooks2.c +=================================================================== +--- gcc-4.2/libmudflap/mf-hooks2.c (revision 119834) ++++ gcc-4.2/libmudflap/mf-hooks2.c (working copy) +@@ -427,7 +427,7 @@ + { + TRACE ("%s\n", __PRETTY_FUNCTION__); + MF_VALIDATE_EXTENT(s, n, __MF_CHECK_WRITE, "bzero region"); +- bzero (s, n); ++ memset (s, 0, n); + } + + +@@ -437,7 +437,7 @@ + TRACE ("%s\n", __PRETTY_FUNCTION__); + MF_VALIDATE_EXTENT(src, n, __MF_CHECK_READ, "bcopy src"); + MF_VALIDATE_EXTENT(dest, n, __MF_CHECK_WRITE, "bcopy dest"); +- bcopy (src, dest, n); ++ memmove (dest, src, n); + } + + +@@ -447,7 +447,7 @@ + TRACE ("%s\n", __PRETTY_FUNCTION__); + MF_VALIDATE_EXTENT(s1, n, __MF_CHECK_READ, "bcmp 1st arg"); + MF_VALIDATE_EXTENT(s2, n, __MF_CHECK_READ, "bcmp 2nd arg"); +- return bcmp (s1, s2, n); ++ return n == 0 ? 0 : memcmp (s1, s2, n); + } + + +@@ -456,7 +456,7 @@ + size_t n = strlen (s); + TRACE ("%s\n", __PRETTY_FUNCTION__); + MF_VALIDATE_EXTENT(s, CLAMPADD(n, 1), __MF_CHECK_READ, "index region"); +- return index (s, c); ++ return strchr (s, c); + } + + +@@ -465,7 +465,7 @@ + size_t n = strlen (s); + TRACE ("%s\n", __PRETTY_FUNCTION__); + MF_VALIDATE_EXTENT(s, CLAMPADD(n, 1), __MF_CHECK_READ, "rindex region"); +- return rindex (s, c); ++ return strrchr (s, c); + } + + /* XXX: stpcpy, memccpy */ diff --git a/toolchain/gcc/patches/4.3.3+cs/410-fix_pr37436.patch b/toolchain/gcc/patches/4.3.3+cs/410-fix_pr37436.patch new file mode 100644 index 0000000000..e86768e4d1 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/410-fix_pr37436.patch @@ -0,0 +1,71 @@ +--- a/gcc/config/arm/arm.c ++++ b/gcc/config/arm/arm.c +@@ -5073,6 +5073,7 @@ arm_legitimate_address_p (enum machine_m + rtx xop1 = XEXP (x, 1); + + return ((arm_address_register_rtx_p (xop0, strict_p) ++ && GET_CODE(xop1) == CONST_INT + && arm_legitimate_index_p (mode, xop1, outer, strict_p)) + || (arm_address_register_rtx_p (xop1, strict_p) + && arm_legitimate_index_p (mode, xop0, outer, strict_p))); +--- a/gcc/config/arm/arm.md ++++ b/gcc/config/arm/arm.md +@@ -4683,7 +4683,7 @@ + + (define_expand "extendqihi2" + [(set (match_dup 2) +- (ashift:SI (match_operand:QI 1 "general_operand" "") ++ (ashift:SI (match_operand:QI 1 "arm_reg_or_extendqisi_mem_op" "") + (const_int 24))) + (set (match_operand:HI 0 "s_register_operand" "") + (ashiftrt:SI (match_dup 2) +@@ -4708,7 +4708,7 @@ + + (define_insn "*arm_extendqihi_insn" + [(set (match_operand:HI 0 "s_register_operand" "=r") +- (sign_extend:HI (match_operand:QI 1 "memory_operand" "Uq")))] ++ (sign_extend:HI (match_operand:QI 1 "arm_extendqisi_mem_op" "Uq")))] + "TARGET_ARM && arm_arch4" + "ldr%(sb%)\\t%0, %1" + [(set_attr "type" "load_byte") +@@ -4719,7 +4719,7 @@ + + (define_expand "extendqisi2" + [(set (match_dup 2) +- (ashift:SI (match_operand:QI 1 "general_operand" "") ++ (ashift:SI (match_operand:QI 1 "arm_reg_or_extendqisi_mem_op" "") + (const_int 24))) + (set (match_operand:SI 0 "s_register_operand" "") + (ashiftrt:SI (match_dup 2) +@@ -4751,7 +4751,7 @@ + + (define_insn "*arm_extendqisi" + [(set (match_operand:SI 0 "s_register_operand" "=r") +- (sign_extend:SI (match_operand:QI 1 "memory_operand" "Uq")))] ++ (sign_extend:SI (match_operand:QI 1 "arm_extendqisi_mem_op" "Uq")))] + "TARGET_ARM && arm_arch4 && !arm_arch6" + "ldr%(sb%)\\t%0, %1" + [(set_attr "type" "load_byte") +@@ -4762,7 +4762,8 @@ + + (define_insn "*arm_extendqisi_v6" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") +- (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,Uq")))] ++ (sign_extend:SI ++ (match_operand:QI 1 "arm_reg_or_extendqisi_mem_op" "r,Uq")))] + "TARGET_ARM && arm_arch6" + "@ + sxtb%?\\t%0, %1 +--- a/gcc/config/arm/predicates.md ++++ b/gcc/config/arm/predicates.md +@@ -239,6 +239,10 @@ + (match_test "arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, + 0)"))) + ++(define_special_predicate "arm_reg_or_extendqisi_mem_op" ++ (ior (match_operand 0 "arm_extendqisi_mem_op") ++ (match_operand 0 "s_register_operand"))) ++ + (define_predicate "power_of_two_operand" + (match_code "const_int") + { diff --git a/toolchain/gcc/patches/4.3.3+cs/420-fix_pr26515.patch b/toolchain/gcc/patches/4.3.3+cs/420-fix_pr26515.patch new file mode 100644 index 0000000000..24b3720726 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/420-fix_pr26515.patch @@ -0,0 +1,13 @@ +--- a/gcc/config/cris/cris.md ++++ b/gcc/config/cris/cris.md +@@ -4920,7 +4920,9 @@ + "REGNO (operands[2]) == REGNO (operands[0]) + && INTVAL (operands[3]) <= 65535 && INTVAL (operands[3]) >= 0 + && !CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'I') +- && !side_effects_p (operands[1])" ++ && !side_effects_p (operands[1]) ++ && (!REG_P (operands[1]) ++ || REGNO (operands[1]) <= CRIS_LAST_GENERAL_REGISTER)" + ;; FIXME: CC0 valid except for M (i.e. CC_NOT_NEGATIVE). + [(set (match_dup 0) (match_dup 4)) + (set (match_dup 5) (match_dup 6))] diff --git a/toolchain/gcc/patches/4.3.3+cs/810-arm-softfloat-libgcc.patch b/toolchain/gcc/patches/4.3.3+cs/810-arm-softfloat-libgcc.patch new file mode 100644 index 0000000000..e3385d2ed0 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/810-arm-softfloat-libgcc.patch @@ -0,0 +1,25 @@ +--- a/gcc/config/arm/t-linux ++++ b/gcc/config/arm/t-linux +@@ -3,7 +3,10 @@ + TARGET_LIBGCC2_CFLAGS = -fomit-frame-pointer -fPIC + + LIB1ASMSRC = arm/lib1funcs.asm +-LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx ++LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx \ ++ _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \ ++ _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \ ++ _fixsfsi _fixunssfsi _floatdidf _floatundidf _floatdisf _floatundisf + + # MULTILIB_OPTIONS = mhard-float/msoft-float + # MULTILIB_DIRNAMES = hard-float soft-float +--- a/gcc/config/arm/linux-elf.h ++++ b/gcc/config/arm/linux-elf.h +@@ -60,7 +60,7 @@ + %{shared:-lc} \ + %{!shared:%{profile:-lc_p}%{!profile:-lc}}" + +-#define LIBGCC_SPEC "%{msoft-float:-lfloat} %{mfloat-abi=soft*:-lfloat} -lgcc" ++#define LIBGCC_SPEC "-lgcc" + + #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" + diff --git a/toolchain/gcc/patches/4.3.3+cs/910-mbsd_multi.patch b/toolchain/gcc/patches/4.3.3+cs/910-mbsd_multi.patch new file mode 100644 index 0000000000..17a0173e8f --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/910-mbsd_multi.patch @@ -0,0 +1,270 @@ + + This patch brings over a few features from MirBSD: + * -fhonour-copts + If this option is not given, it's warned (depending + on environment variables). This is to catch errors + of misbuilt packages which override CFLAGS themselves. + * -Werror-maybe-reset + Has the effect of -Wno-error if GCC_NO_WERROR is + set and not '0', a no-operation otherwise. This is + to be able to use -Werror in "make" but prevent + GNU autoconf generated configure scripts from + freaking out. + * Make -fno-strict-aliasing and -fno-delete-null-pointer-checks + the default for -O2/-Os, because they trigger gcc bugs + and can delete code with security implications. + + This patch was authored by Thorsten Glaser + with copyright assignment to the FSF in effect. + +--- a/gcc/c-opts.c ++++ b/gcc/c-opts.c +@@ -108,6 +108,9 @@ static size_t deferred_count; + /* Number of deferred options scanned for -include. */ + static size_t include_cursor; + ++/* Check if a port honours COPTS. */ ++static int honour_copts = 0; ++ + static void set_Wimplicit (int); + static void handle_OPT_d (const char *); + static void set_std_cxx98 (int); +@@ -462,6 +465,14 @@ c_common_handle_option (size_t scode, co + enable_warning_as_error ("implicit-function-declaration", value, CL_C | CL_ObjC); + break; + ++ case OPT_Werror_maybe_reset: ++ { ++ char *ev = getenv ("GCC_NO_WERROR"); ++ if ((ev != NULL) && (*ev != '0')) ++ cpp_opts->warnings_are_errors = 0; ++ } ++ break; ++ + case OPT_Wformat: + set_Wformat (value); + break; +@@ -708,6 +719,12 @@ c_common_handle_option (size_t scode, co + flag_exceptions = value; + break; + ++ case OPT_fhonour_copts: ++ if (c_language == clk_c) { ++ honour_copts++; ++ } ++ break; ++ + case OPT_fimplement_inlines: + flag_implement_inlines = value; + break; +@@ -1248,6 +1265,47 @@ c_common_init (void) + /* Has to wait until now so that cpplib has its hash table. */ + init_pragma (); + ++ if (c_language == clk_c) { ++ char *ev = getenv ("GCC_HONOUR_COPTS"); ++ int evv; ++ if (ev == NULL) ++ evv = -1; ++ else if ((*ev == '0') || (*ev == '\0')) ++ evv = 0; ++ else if (*ev == '1') ++ evv = 1; ++ else if (*ev == '2') ++ evv = 2; ++ else if (*ev == 's') ++ evv = -1; ++ else { ++ warning (0, "unknown GCC_HONOUR_COPTS value, assuming 1"); ++ evv = 1; /* maybe depend this on something like MIRBSD_NATIVE? */ ++ } ++ if (evv == 1) { ++ if (honour_copts == 0) { ++ error ("someone does not honour COPTS at all in lenient mode"); ++ return false; ++ } else if (honour_copts != 1) { ++ warning (0, "someone does not honour COPTS correctly, passed %d times", ++ honour_copts); ++ } ++ } else if (evv == 2) { ++ if (honour_copts == 0) { ++ error ("someone does not honour COPTS at all in strict mode"); ++ return false; ++ } else if (honour_copts != 1) { ++ error ("someone does not honour COPTS correctly, passed %d times", ++ honour_copts); ++ return false; ++ } ++ } else if (evv == 0) { ++ if (honour_copts != 1) ++ inform ("someone does not honour COPTS correctly, passed %d times", ++ honour_copts); ++ } ++ } ++ + return true; + } + +--- a/gcc/c.opt ++++ b/gcc/c.opt +@@ -207,6 +207,10 @@ Werror-implicit-function-declaration + C ObjC RejectNegative Warning + This switch is deprecated; use -Werror=implicit-function-declaration instead + ++Werror-maybe-reset ++C ObjC C++ ObjC++ ++; Documented in common.opt ++ + Wfloat-equal + C ObjC C++ ObjC++ Var(warn_float_equal) Warning + Warn if testing floating point numbers for equality +@@ -590,6 +594,9 @@ C++ ObjC++ Optimization + fhonor-std + C++ ObjC++ + ++fhonour-copts ++C ObjC C++ ObjC++ RejectNegative ++ + fhosted + C ObjC + Assume normal C execution environment +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -102,6 +102,10 @@ Werror= + Common Joined + Treat specified warning as error + ++Werror-maybe-reset ++Common ++If environment variable GCC_NO_WERROR is set, act as -Wno-error ++ + Wextra + Common Warning + Print extra (possibly unwanted) warnings +@@ -542,6 +546,9 @@ fguess-branch-probability + Common Report Var(flag_guess_branch_prob) Optimization + Enable guessing of branch probabilities + ++fhonour-copts ++Common RejectNegative ++ + ; Nonzero means ignore `#ident' directives. 0 means handle them. + ; Generate position-independent code for executables if possible + ; On SVR4 targets, it also controls whether or not to emit a +--- a/gcc/opts.c ++++ b/gcc/opts.c +@@ -830,9 +830,6 @@ decode_options (unsigned int argc, const + flag_schedule_insns_after_reload = 1; + #endif + flag_regmove = 1; +- flag_strict_aliasing = 1; +- flag_strict_overflow = 1; +- flag_delete_null_pointer_checks = 1; + flag_reorder_blocks = 1; + flag_reorder_functions = 1; + flag_tree_store_ccp = 1; +@@ -853,6 +850,10 @@ decode_options (unsigned int argc, const + + if (optimize >= 3) + { ++ flag_strict_aliasing = 1; ++ flag_strict_overflow = 1; ++ flag_delete_null_pointer_checks = 1; ++ + flag_predictive_commoning = 1; + flag_inline_functions = 1; + flag_unswitch_loops = 1; +@@ -1444,6 +1445,17 @@ common_handle_option (size_t scode, cons + enable_warning_as_error (arg, value, lang_mask); + break; + ++ case OPT_Werror_maybe_reset: ++ { ++ char *ev = getenv ("GCC_NO_WERROR"); ++ if ((ev != NULL) && (*ev != '0')) ++ warnings_are_errors = 0; ++ } ++ break; ++ ++ case OPT_fhonour_copts: ++ break; ++ + case OPT_Wextra: + set_Wextra (value); + break; +--- a/gcc/doc/cppopts.texi ++++ b/gcc/doc/cppopts.texi +@@ -168,6 +168,11 @@ in older programs. This warning is on b + Make all warnings into hard errors. Source code which triggers warnings + will be rejected. + ++ at item -Werror-maybe-reset ++ at opindex Werror-maybe-reset ++Act like @samp{-Wno-error} if the @env{GCC_NO_WERROR} environment ++variable is set to anything other than 0 or empty. ++ + @item -Wsystem-headers + @opindex Wsystem-headers + Issue warnings for code in system headers. These are normally unhelpful +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -233,7 +233,7 @@ Objective-C and Objective-C++ Dialects}. + -Wconversion -Wcoverage-mismatch -Wno-deprecated-declarations @gol + -Wdisabled-optimization -Wno-div-by-zero @gol + -Wempty-body -Wno-endif-labels @gol +--Werror -Werror=* @gol ++-Werror -Werror=* -Werror-maybe-reset @gol + -Wfatal-errors -Wfloat-equal -Wformat -Wformat=2 @gol + -Wno-format-extra-args -Wformat-nonliteral @gol + -Wformat-security -Wformat-y2k -Wignored-qualifiers @gol +@@ -4047,6 +4047,22 @@ This option is only supported for C and + @option{-Wall} and by @option{-pedantic}, which can be disabled with + @option{-Wno-pointer-sign}. + ++ at item -Werror-maybe-reset ++ at opindex Werror-maybe-reset ++Act like @samp{-Wno-error} if the @env{GCC_NO_WERROR} environment ++variable is set to anything other than 0 or empty. ++ ++ at item -fhonour-copts ++ at opindex fhonour-copts ++If @env{GCC_HONOUR_COPTS} is set to 1, abort if this option is not ++given at least once, and warn if it is given more than once. ++If @env{GCC_HONOUR_COPTS} is set to 2, abort if this option is not ++given exactly once. ++If @env{GCC_HONOUR_COPTS} is set to 0 or unset, warn if this option ++is not given exactly once. ++The warning is quelled if @env{GCC_HONOUR_COPTS} is set to @samp{s}. ++This flag and environment variable only affect the C language. ++ + @item -Wstack-protector + @opindex Wstack-protector + @opindex Wno-stack-protector +@@ -5522,7 +5538,7 @@ so, the first branch is redirected to ei + second branch or a point immediately following it, depending on whether + the condition is known to be true or false. + +-Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}. ++Enabled at levels @option{-O3}. + + @item -fsplit-wide-types + @opindex fsplit-wide-types +@@ -5667,7 +5683,7 @@ safely dereference null pointers. Use + @option{-fno-delete-null-pointer-checks} to disable this optimization + for programs which depend on that behavior. + +-Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}. ++Enabled at levels @option{-O3}. + + @item -fexpensive-optimizations + @opindex fexpensive-optimizations +--- a/gcc/java/jvspec.c ++++ b/gcc/java/jvspec.c +@@ -670,6 +670,7 @@ lang_specific_pre_link (void) + class name. Append dummy `.c' that can be stripped by set_input so %b + is correct. */ + set_input (concat (main_class_name, "main.c", NULL)); ++ putenv ("GCC_HONOUR_COPTS=s"); /* XXX hack! */ + err = do_spec (jvgenmain_spec); + if (err == 0) + { diff --git a/toolchain/gcc/patches/4.3.3+cs/993-arm_insn-opinit-RTX_CODE-fixup.patch b/toolchain/gcc/patches/4.3.3+cs/993-arm_insn-opinit-RTX_CODE-fixup.patch new file mode 100644 index 0000000000..cbe9f1b576 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/993-arm_insn-opinit-RTX_CODE-fixup.patch @@ -0,0 +1,32 @@ +gcc/ChangeLog +2007-11-27 Bernhard Fischer <> + + * config/arm/arm-protos.h (arm_vector_mode_supported_p, + arm_hard_regno_mode_ok, const_ok_for_arm): Do not hide non-rtx related + function prototypes in RTX_CODE. + * genopinit.c: Include tm_p.h. + +--- a/gcc/config/arm/arm-protos.h ++++ b/gcc/config/arm/arm-protos.h +@@ -43,10 +43,10 @@ extern unsigned int arm_dbx_register_num + extern void arm_output_fn_unwind (FILE *, bool); + + +-#ifdef RTX_CODE + extern bool arm_vector_mode_supported_p (enum machine_mode); + extern int arm_hard_regno_mode_ok (unsigned int, enum machine_mode); + extern int const_ok_for_arm (HOST_WIDE_INT); ++#ifdef RTX_CODE + extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx, + HOST_WIDE_INT, rtx, rtx, int); + extern RTX_CODE arm_canonicalize_comparison (RTX_CODE, enum machine_mode, +--- a/gcc/genopinit.c ++++ b/gcc/genopinit.c +@@ -487,6 +487,7 @@ from the machine description file `md'. + printf ("#include \"expr.h\"\n"); + printf ("#include \"optabs.h\"\n"); + printf ("#include \"reload.h\"\n\n"); ++ printf ("#include \"tm_p.h\"\n\n"); + + printf ("void\ninit_all_optabs (void)\n{\n"); + diff --git a/toolchain/gcc/patches/4.3.3+cs/998-gcc-4.3.0-fix-header.00.patch b/toolchain/gcc/patches/4.3.3+cs/998-gcc-4.3.0-fix-header.00.patch new file mode 100644 index 0000000000..fd1987370b --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/998-gcc-4.3.0-fix-header.00.patch @@ -0,0 +1,13 @@ +\\\\ +\\ gcc PR33200 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -1460,7 +1460,7 @@ i[34567]86-*-uwin*) + if test x$enable_threads = xyes; then + thread_file='win32' + fi +- use_fixproto=yes ++ # XXX: why? use_fixproto=yes + ;; + i[34567]86-*-interix3*) + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/i386-interix.h i386/i386-interix3.h interix.h interix3.h" diff --git a/toolchain/gcc/patches/4.3.3+cs/999-coldfire.patch b/toolchain/gcc/patches/4.3.3+cs/999-coldfire.patch new file mode 100644 index 0000000000..0e2a8c19e4 --- /dev/null +++ b/toolchain/gcc/patches/4.3.3+cs/999-coldfire.patch @@ -0,0 +1,10 @@ +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -1678,6 +1678,7 @@ m68k-*-linux*) # Motorola m68k's runnin + if test x$sjlj != x1; then + tmake_file="$tmake_file m68k/t-slibgcc-elf-ver" + fi ++ tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-m68kelf" + ;; + m68k-*-rtems*) + default_m68k_cpu=68020 -- 2.30.2