[Committed] S/390: Fix trailing whitespaces

Andreas Krebbel krebbel@linux.vnet.ibm.com
Mon May 18 12:40:00 GMT 2009


Hi,

I've committed the attached patch fixing several trailing whitespace
issues in the S/390 back-end which crept in over time.  I personally
don't care enough about these kind of things to fix it.  But there are
actually people complaining about this.  Since it is that easy to make
them happy ...

Before applying I've made sure that the back-end still compiles.

Bye,

-Andreas-

2009-05-18  Andreas Krebbel  <krebbel1@de.ibm.com>

	* config/s390/2064.md: Remove trailing whitespaces.
	* config/s390/2084.md: Likewise.
	* config/s390/constraints.md: Likewise.
	* config/s390/fixdfdi.h: Likewise.
	* config/s390/libgcc-glibc.ver: Likewise.
	* config/s390/s390-modes.def: Likewise.
	* config/s390/s390-protos.h: Likewise.
	* config/s390/s390.c: Likewise.
	* config/s390/s390.h: Likewise.
	* config/s390/s390.md: Likewise.
	* config/s390/tpf-unwind.h: Likewise.


Index: gcc/gcc/config/s390/2064.md
===================================================================
--- gcc.orig/gcc/config/s390/2064.md
+++ gcc/gcc/config/s390/2064.md
@@ -21,22 +21,22 @@
 
 ;;
 ;; References:
-;;   The microarchitecture of the IBM eServer z900 processor. 
+;;   The microarchitecture of the IBM eServer z900 processor.
 ;;   E.M. Schwarz et al.
 ;;   IBM Journal of Research and Development Vol. 46 No 4/5, 2002.
-;; 
+;;
 ;;            z900 (cpu 2064) pipeline
-;;     
+;;
 ;;                 dec
 ;;              --> | <---
 ;;  LA bypass  |  agen    |
-;;             |    |     | 
+;;             |    |     |
 ;;              --- c1    |  Load bypass
-;;                  |     | 
+;;                  |     |
 ;;                  c2----
 ;;                  |
-;;                  e1 
-;;                  | 
+;;                  e1
+;;                  |
 ;;                  wr
 
 ;; This scheduler description is also used for the g5 and g6.
@@ -46,12 +46,12 @@
 (define_cpu_unit "z_wr"   "z_ipu")
 
 
-(define_insn_reservation "z_la" 1 
+(define_insn_reservation "z_la" 1
   (and (eq_attr "cpu" "z900,g5,g6")
        (eq_attr "type" "la"))
   "z_e1,z_wr")
 
-(define_insn_reservation "z_larl" 1 
+(define_insn_reservation "z_larl" 1
   (and (eq_attr "cpu" "z900,g5,g6")
        (eq_attr "type" "larl"))
   "z_e1,z_wr")
@@ -101,32 +101,32 @@
   "z_e1,z_wr")
 
 ;;
-;; s390_agen_dep_p returns 1, if a register is set in the 
+;; s390_agen_dep_p returns 1, if a register is set in the
 ;; first insn and used in the dependent insn to form a address.
 ;;
 
 ;;
 ;; If an instruction uses a register to address memory, it needs
 ;; to be set 5 cycles in advance.
-;; 
+;;
 
-(define_bypass 5 "z_int,z_agen" 
+(define_bypass 5 "z_int,z_agen"
 	       "z_agen,z_la,z_call,z_load,z_store" "s390_agen_dep_p")
 
 ;;
-;; A load type instruction uses a bypass to feed the result back	
-;; to the address generation pipeline stage. 
+;; A load type instruction uses a bypass to feed the result back
+;; to the address generation pipeline stage.
 ;;
 
-(define_bypass 3 "z_load"    
+(define_bypass 3 "z_load"
 	         "z_agen,z_la,z_call,z_load,z_store" "s390_agen_dep_p")
 
 ;;
-;; A load address type instruction uses a bypass to feed the 
-;; result back to the address generation pipeline stage. 
+;; A load address type instruction uses a bypass to feed the
+;; result back to the address generation pipeline stage.
 ;;
 
-(define_bypass 2 "z_larl,z_la" 
+(define_bypass 2 "z_larl,z_la"
 	         "z_agen,z_la,z_call,z_load,z_store" "s390_agen_dep_p")
 
 
Index: gcc/gcc/config/s390/2084.md
===================================================================
--- gcc.orig/gcc/config/s390/2084.md
+++ gcc/gcc/config/s390/2084.md
@@ -76,38 +76,38 @@
 (define_insn_reservation "x_lr" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "lr"))
-  "x-e1-st,x-wr-st") 
+  "x-e1-st,x-wr-st")
 
-(define_insn_reservation "x_la" 1 
+(define_insn_reservation "x_la" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "la"))
-  "x-e1-st,x-wr-st") 
+  "x-e1-st,x-wr-st")
 
-(define_insn_reservation "x_larl" 1 
+(define_insn_reservation "x_larl" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "larl"))
-  "x-e1-st,x-wr-st") 
+  "x-e1-st,x-wr-st")
 
-(define_insn_reservation "x_load" 1 
+(define_insn_reservation "x_load" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "load"))
-  "x-e1-st+x-mem,x-wr-st") 
+  "x-e1-st+x-mem,x-wr-st")
 
-(define_insn_reservation "x_store" 1 
+(define_insn_reservation "x_store" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "store"))
-  "x-e1-st+x_store_tok,x-wr-st") 
+  "x-e1-st+x_store_tok,x-wr-st")
 
-(define_insn_reservation "x_branch" 1 
+(define_insn_reservation "x_branch" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "branch"))
-  "x_e1_r,x_wr_r") 
+  "x_e1_r,x_wr_r")
 
-(define_insn_reservation "x_call" 5 
+(define_insn_reservation "x_call" 5
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "jsr"))
   "x-e1-np*5,x-wr-np")
- 
+
 (define_insn_reservation "x_mul_hi" 2
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "imulhi"))
@@ -123,162 +123,162 @@
        (eq_attr "type" "idiv"))
   "x-e1-np*10,x-wr-np")
 
-(define_insn_reservation "x_sem" 17 
+(define_insn_reservation "x_sem" 17
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "sem"))
-  "x-e1-np+x-mem,x-e1-np*16,x-wr-st") 
+  "x-e1-np+x-mem,x-e1-np*16,x-wr-st")
 
 ;;
 ;; Multicycle insns
 ;;
 
-(define_insn_reservation "x_cs" 1 
+(define_insn_reservation "x_cs" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "cs"))
-  "x-e1-np,x-wr-np") 
+  "x-e1-np,x-wr-np")
 
-(define_insn_reservation "x_vs" 1 
+(define_insn_reservation "x_vs" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "vs"))
-  "x-e1-np*10,x-wr-np") 
+  "x-e1-np*10,x-wr-np")
 
-(define_insn_reservation "x_stm" 1 
+(define_insn_reservation "x_stm" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "stm"))
-  "(x-e1-np+x_store_tok)*10,x-wr-np") 
+  "(x-e1-np+x_store_tok)*10,x-wr-np")
 
-(define_insn_reservation "x_lm" 1 
+(define_insn_reservation "x_lm" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "lm"))
-  "x-e1-np*10,x-wr-np") 
+  "x-e1-np*10,x-wr-np")
 
-(define_insn_reservation "x_other" 1 
+(define_insn_reservation "x_other" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "other"))
-  "x-e1-np,x-wr-np") 
+  "x-e1-np,x-wr-np")
 
 ;;
 ;; Floating point insns
 ;;
 
-(define_insn_reservation "x_fsimptf" 7 
+(define_insn_reservation "x_fsimptf" 7
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fsimptf"))
-  "x_e1_t*2,x-wr-fp") 
+  "x_e1_t*2,x-wr-fp")
 
-(define_insn_reservation "x_fsimpdf" 6 
+(define_insn_reservation "x_fsimpdf" 6
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fsimpdf,fmuldf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
-(define_insn_reservation "x_fsimpsf" 6 
+(define_insn_reservation "x_fsimpsf" 6
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fsimpsf,fmulsf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
 
 (define_insn_reservation "x_fmultf" 33
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fmultf"))
-  "x_e1_t*27,x-wr-fp") 
+  "x_e1_t*27,x-wr-fp")
 
 
 (define_insn_reservation "x_fdivtf" 82
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fdivtf,fsqrttf"))
-  "x_e1_t*76,x-wr-fp") 
+  "x_e1_t*76,x-wr-fp")
 
 (define_insn_reservation "x_fdivdf" 36
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fdivdf,fsqrtdf"))
-  "x_e1_t*30,x-wr-fp") 
+  "x_e1_t*30,x-wr-fp")
 
-(define_insn_reservation "x_fdivsf" 36 
+(define_insn_reservation "x_fdivsf" 36
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fdivsf,fsqrtsf"))
-  "x_e1_t*30,x-wr-fp") 
+  "x_e1_t*30,x-wr-fp")
 
 
-(define_insn_reservation "x_floadtf" 6 
+(define_insn_reservation "x_floadtf" 6
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "floadtf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
-(define_insn_reservation "x_floaddf" 6 
+(define_insn_reservation "x_floaddf" 6
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "floaddf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
-(define_insn_reservation "x_floadsf" 6 
+(define_insn_reservation "x_floadsf" 6
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "floadsf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
 
-(define_insn_reservation "x_fstoredf" 1 
+(define_insn_reservation "x_fstoredf" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fstoredf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
-(define_insn_reservation "x_fstoresf" 1 
+(define_insn_reservation "x_fstoresf" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "fstoresf"))
-  "x_e1_t,x-wr-fp") 
+  "x_e1_t,x-wr-fp")
 
 
 (define_insn_reservation "x_ftrunctf" 16
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "ftrunctf"))
-  "x_e1_t*10,x-wr-fp") 
+  "x_e1_t*10,x-wr-fp")
 
 (define_insn_reservation "x_ftruncdf" 11
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "ftruncdf"))
-  "x_e1_t*5,x-wr-fp") 
+  "x_e1_t*5,x-wr-fp")
 
 
-(define_insn_reservation "x_ftoi" 1 
+(define_insn_reservation "x_ftoi" 1
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "ftoi"))
-  "x_e1_t*3,x-wr-fp") 
+  "x_e1_t*3,x-wr-fp")
 
-(define_insn_reservation "x_itof" 7 
+(define_insn_reservation "x_itof" 7
   (and (eq_attr "cpu" "z990,z9_109")
        (eq_attr "type" "itoftf,itofdf,itofsf"))
-  "x_e1_t*3,x-wr-fp") 
+  "x_e1_t*3,x-wr-fp")
 
 (define_bypass 1 "x_fsimpdf" "x_fstoredf")
 
 (define_bypass 1 "x_fsimpsf" "x_fstoresf")
 
 (define_bypass 1 "x_floaddf" "x_fsimpdf,x_fstoredf,x_floaddf")
-	         
+
 (define_bypass 1 "x_floadsf" "x_fsimpsf,x_fstoresf,x_floadsf")
 
 ;;
-;; s390_agen_dep_p returns 1, if a register is set in the 
+;; s390_agen_dep_p returns 1, if a register is set in the
 ;; first insn and used in the dependent insn to form a address.
 ;;
 
 ;;
 ;; If an instruction uses a register to address memory, it needs
 ;; to be set 5 cycles in advance.
-;; 
+;;
 
-(define_bypass 5 "x_int,x_agen,x_lr" 
+(define_bypass 5 "x_int,x_agen,x_lr"
                  "x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
 	         "s390_agen_dep_p")
 
-(define_bypass 9 "x_int,x_agen,x_lr" 
+(define_bypass 9 "x_int,x_agen,x_lr"
                  "x_floadtf, x_floaddf, x_floadsf, x_fstoredf, x_fstoresf,\
 		  x_fsimpdf, x_fsimpsf, x_fdivdf, x_fdivsf"
 	         "s390_agen_dep_p")
 ;;
-;; A load type instruction uses a bypass to feed the result back	
-;; to the address generation pipeline stage. 
+;; A load type instruction uses a bypass to feed the result back
+;; to the address generation pipeline stage.
 ;;
 
-(define_bypass 4 "x_load"    
+(define_bypass 4 "x_load"
                  "x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
 	         "s390_agen_dep_p")
 
@@ -288,11 +288,11 @@
 	         "s390_agen_dep_p")
 
 ;;
-;; A load address type instruction uses a bypass to feed the 
-;; result back to the address generation pipeline stage. 
+;; A load address type instruction uses a bypass to feed the
+;; result back to the address generation pipeline stage.
 ;;
 
-(define_bypass 3 "x_larl,x_la" 
+(define_bypass 3 "x_larl,x_la"
                  "x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
 	         "s390_agen_dep_p")
 
Index: gcc/gcc/config/s390/constraints.md
===================================================================
--- gcc.orig/gcc/config/s390/constraints.md
+++ gcc/gcc/config/s390/constraints.md
@@ -76,27 +76,27 @@
 ;;  Register constraints.
 ;;
 
-(define_register_constraint "a" 
+(define_register_constraint "a"
   "ADDR_REGS"
   "Any address register from 1 to 15.")
 
 
-(define_register_constraint "c" 
+(define_register_constraint "c"
   "CC_REGS"
   "Condition code register 33")
 
 
-(define_register_constraint "d" 
+(define_register_constraint "d"
   "GENERAL_REGS"
   "Any register from 0 to 15")
 
 
-(define_register_constraint "f" 
+(define_register_constraint "f"
   "FP_REGS"
   "Floating point registers")
 
 
-(define_register_constraint "t" 
+(define_register_constraint "t"
   "ACCESS_REGS"
   "@internal
    Access registers 36 and 37")
@@ -187,9 +187,9 @@
 ;;         is specified instead of a part number, the constraint matches
 ;;         if there is any single part with non-default value.
 ;;
-;; The following patterns define only those constraints that are actually 
-;; used in s390.md.  If you need an additional one, simply add it in the 
-;; obvious way.  Function s390_N_constraint_str is ready to handle all 
+;; The following patterns define only those constraints that are actually
+;; used in s390.md.  If you need an additional one, simply add it in the
+;; obvious way.  Function s390_N_constraint_str is ready to handle all
 ;; combinations.
 ;;
 
@@ -409,53 +409,53 @@ constraint."
                || s390_mem_constraint (\"T\", op)"))
 
 (define_memory_constraint "AQ"
-  "@internal 
+  "@internal
    Offsettable memory reference without index register and with short displacement"
   (match_test "s390_mem_constraint (\"AQ\", op)"))
 
 
 (define_memory_constraint "AR"
-  "@internal 
+  "@internal
    Offsettable memory reference with index register and short displacement"
   (match_test "s390_mem_constraint (\"AR\", op)"))
 
 
 (define_memory_constraint "AS"
-  "@internal 
+  "@internal
    Offsettable memory reference without index register but with long displacement"
   (match_test "s390_mem_constraint (\"AS\", op)"))
 
 
 (define_memory_constraint "AT"
-  "@internal 
+  "@internal
    Offsettable memory reference with index register and long displacement"
   (match_test "s390_mem_constraint (\"AT\", op)"))
 
 
 
 (define_constraint "BQ"
-  "@internal 
-   Memory reference without index register and with short 
+  "@internal
+   Memory reference without index register and with short
    displacement that does *not* refer to a literal pool entry."
   (match_test "s390_mem_constraint (\"BQ\", op)"))
 
 
 (define_constraint "BR"
-  "@internal 
+  "@internal
    Memory reference with index register and short displacement that
    does *not* refer to a literal pool entry. "
   (match_test "s390_mem_constraint (\"BR\", op)"))
 
 
 (define_constraint "BS"
-  "@internal 
+  "@internal
    Memory reference without index register but with long displacement
    that does *not* refer to a literal pool entry. "
   (match_test "s390_mem_constraint (\"BS\", op)"))
 
 
 (define_constraint "BT"
-  "@internal 
+  "@internal
    Memory reference with index register and long displacement that
    does *not* refer to a literal pool entry. "
   (match_test "s390_mem_constraint (\"BT\", op)"))
Index: gcc/gcc/config/s390/fixdfdi.h
===================================================================
--- gcc.orig/gcc/config/s390/fixdfdi.h
+++ gcc/gcc/config/s390/fixdfdi.h
@@ -63,12 +63,12 @@ __fixunstfdi (long double a1)
     if (!EXPD (dl1) || SIGND(dl1))
       return 0;
 
-    /* The exponent - considered the binary point at the right end of 
+    /* The exponent - considered the binary point at the right end of
        the mantissa.  */
     exp = EXPD (dl1) - EXPONENT_BIAS - MANTISSA_BITS;
 
     /* number < 1: If the mantissa would need to be right-shifted more bits than
-       its size (plus the implied one bit on the left) the result would be 
+       its size (plus the implied one bit on the left) the result would be
        zero.  */
     if (exp <= -PRECISION)
       return 0;
@@ -238,7 +238,7 @@ __fixunsdfdi (double a1)
     /* shift down until exp < 12 or l = 0 */
     if (exp > 0)
       l <<= exp;
-    else 
+    else
       l >>= -exp;
 
     return l;
@@ -313,7 +313,7 @@ __fixdfdi (double a1)
     /* shift down until exp < 12 or l = 0 */
     if (exp > 0)
       l <<= exp;
-    else 
+    else
       l >>= -exp;
 
     return (SIGND (dl1) ? -l : l);
@@ -381,7 +381,7 @@ __fixunssfdi (float a1)
 
     if (exp > 0)
       l <<= exp;
-    else 
+    else
       l >>= -exp;
 
     return l;
@@ -452,7 +452,7 @@ __fixsfdi (float a1)
 
     if (exp > 0)
       l <<= exp;
-    else 
+    else
       l >>= -exp;
 
     return (SIGN (fl1) ? -l : l);
Index: gcc/gcc/config/s390/libgcc-glibc.ver
===================================================================
--- gcc.orig/gcc/config/s390/libgcc-glibc.ver
+++ gcc/gcc/config/s390/libgcc-glibc.ver
@@ -23,7 +23,7 @@
 # to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
 
 # Note that we cannot use the default libgcc-glibc.ver file on s390x,
-# because GLIBC_2.0 does not exist on this architecture, as the first 
+# because GLIBC_2.0 does not exist on this architecture, as the first
 # ever glibc release on the platform was GLIBC_2.2.
 
 %ifndef __s390x__
Index: gcc/gcc/config/s390/s390-modes.def
===================================================================
--- gcc.orig/gcc/config/s390/s390-modes.def
+++ gcc/gcc/config/s390/s390-modes.def
@@ -45,8 +45,8 @@ Signed compares
 
 CCS:  EQ          LT           GT          UNORDERED  (LTGFR, LTGR, LTR, ICM/Y,
                                                        LTDBR, LTDR, LTEBR, LTER,
-                                                       CG/R, C/R/Y, CGHI, CHI, 
-                                                       CDB/R, CD/R, CEB/R, CE/R, 
+                                                       CG/R, C/R/Y, CGHI, CHI,
+                                                       CDB/R, CD/R, CEB/R, CE/R,
                                                        ADB/R, AEB/R, SDB/R, SEB/R,
                                                        SRAG, SRA, SRDA)
 CCSR: EQ          GT           LT          UNORDERED  (CGF/R, CH/Y)
@@ -60,7 +60,7 @@ CCAN: EQ          LT           GT       
 Condition codes of unsigned adds and subs
 
 CCL:  EQ          NE           EQ          NE         (ALGF/R, ALG/R, AL/R/Y,
-                                                       ALCG/R, ALC/R, 
+                                                       ALCG/R, ALC/R,
                                                        SLGF/R, SLG/R, SL/R/Y,
                                                        SLBG/R, SLB/R)
 CCL1: GEU         GEU          LTU         LTU        (ALG/R, AL/R/Y)
@@ -69,14 +69,14 @@ CCL3: EQ          LTU          EQ       
 
 Test under mask checks
 
-CCT:  EQ          NE           NE          NE         (ICM/Y, TML, CG/R, CGHI, 
+CCT:  EQ          NE           NE          NE         (ICM/Y, TML, CG/R, CGHI,
                                                        C/R/Y, CHI, NG/R, N/R/Y,
                                                        OG/R, O/R/Y, XG/R, X/R/Y)
 CCT1: NE          EQ           NE          NE         (TMH, TML)
 CCT2: NE          NE           EQ          NE         (TMH, TML)
 CCT3: NE          NE           NE          EQ         (TMH, TML)
 
-CCA and CCT modes are request only modes. These modes are never returned by 
+CCA and CCT modes are request only modes. These modes are never returned by
 s390_select_cc_mode. They are only intended to match other modes.
 
 Requested mode            -> Destination CC register mode
@@ -89,11 +89,11 @@ CCA                       -> CCAP, CCAN
 
 CCAP, CCAN
 
-The CC obtained from add instruction usually can't be used for comparisons 
+The CC obtained from add instruction usually can't be used for comparisons
 because its coupling with overflow flag. In case of an overflow the
 less than/greater than data are lost. Nevertheless a comparison can be done
 whenever immediate values are involved because they are known at compile time.
-If you know whether the used constant is positive or negative you can predict 
+If you know whether the used constant is positive or negative you can predict
 the sign of the result even in case of an overflow.
 
 
@@ -103,7 +103,7 @@ If bits of an integer masked with an AND
 mask instructions turn out to be very handy for a set of special cases.
 The simple cases are checks whether all masked bits are zero or ones:
 
-  int a; 
+  int a;
   if ((a & (16 + 128)) == 0)          -> CCT/CCZ
   if ((a & (16 + 128)) == 16 + 128)   -> CCT3
 
@@ -120,15 +120,15 @@ CCSR, CCUR
 
 There are several instructions comparing 32 bit with 64-bit unsigned/signed
 values. Such instructions can be considered to have a builtin zero/sign_extend.
-The problem is that in the RTL (to be canonical) the zero/sign extended operand 
-has to be the first one but the machine instructions like it the other way 
-around. The following both modes can be considered as CCS and CCU modes with 
+The problem is that in the RTL (to be canonical) the zero/sign extended operand
+has to be the first one but the machine instructions like it the other way
+around. The following both modes can be considered as CCS and CCU modes with
 exchanged operands.
 
 
 CCL1, CCL2
 
-These modes represent the result of overflow checks. 
+These modes represent the result of overflow checks.
 
 if (a + b < a) -> CCL1 state of the carry bit   (CC2 | CC3)
 if (a - b > a) -> CCL2 state of the borrow bit  (CC0 | CC1)
@@ -142,7 +142,7 @@ CCL3
 
 A logical subtract instruction sets the borrow bit in case of an overflow.
 The resulting condition code of those instructions is represented by the
-CCL3 mode. Together with the CCU mode this mode is used for jumpless 
+CCL3 mode. Together with the CCU mode this mode is used for jumpless
 implementations of several if-constructs - see s390_expand_addcc for more
 details.
 
@@ -152,7 +152,7 @@ The compare and swap instructions sets t
 operands were equal/unequal. The CCZ1 mode ensures the result can be
 effectively placed into a register.
 
-*/   
+*/
 
 
 CC_MODE (CCZ);
Index: gcc/gcc/config/s390/s390-protos.h
===================================================================
--- gcc.orig/gcc/config/s390/s390-protos.h
+++ gcc/gcc/config/s390/s390-protos.h
@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3.  
 
 
 
-/* Prototypes of functions used for constraint evaluation in 
+/* Prototypes of functions used for constraint evaluation in
    constraints.c.  */
 
 extern int s390_mem_constraint (const char *str, rtx op);
@@ -92,7 +92,7 @@ extern void s390_expand_cmpmem (rtx, rtx
 extern bool s390_expand_addcc (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
 extern bool s390_expand_insv (rtx, rtx, rtx, rtx);
 extern void s390_expand_cs_hqi (enum machine_mode, rtx, rtx, rtx, rtx);
-extern void s390_expand_atomic (enum machine_mode, enum rtx_code, 
+extern void s390_expand_atomic (enum machine_mode, enum rtx_code,
 				rtx, rtx, rtx, bool);
 extern rtx s390_return_addr_rtx (int, rtx);
 extern rtx s390_back_chain_rtx (void);
Index: gcc/gcc/config/s390/s390.c
===================================================================
--- gcc.orig/gcc/config/s390/s390.c
+++ gcc/gcc/config/s390/s390.c
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3.  
 
 /* Define the specific costs for a given cpu.  */
 
-struct processor_costs 
+struct processor_costs
 {
   /* multiplication */
   const int m;        /* cost of an M instruction.  */
@@ -94,7 +94,7 @@ struct processor_costs 
 const struct processor_costs *s390_cost;
 
 static const
-struct processor_costs z900_cost = 
+struct processor_costs z900_cost =
 {
   COSTS_N_INSNS (5),     /* M     */
   COSTS_N_INSNS (10),    /* MGHI  */
@@ -126,7 +126,7 @@ struct processor_costs z900_cost = 
 };
 
 static const
-struct processor_costs z990_cost = 
+struct processor_costs z990_cost =
 {
   COSTS_N_INSNS (4),     /* M     */
   COSTS_N_INSNS (2),     /* MGHI  */
@@ -158,7 +158,7 @@ struct processor_costs z990_cost = 
 };
 
 static const
-struct processor_costs z9_109_cost = 
+struct processor_costs z9_109_cost =
 {
   COSTS_N_INSNS (4),     /* M     */
   COSTS_N_INSNS (2),     /* MGHI  */
@@ -252,7 +252,7 @@ HOST_WIDE_INT s390_warn_framesize = 0;
 HOST_WIDE_INT s390_stack_size = 0;
 HOST_WIDE_INT s390_stack_guard = 0;
 
-/* The following structure is embedded in the machine 
+/* The following structure is embedded in the machine
    specific part of struct function.  */
 
 struct GTY (()) s390_frame_layout
@@ -275,8 +275,8 @@ struct GTY (()) s390_frame_layout
   int last_save_gpr;
   int last_restore_gpr;
 
-  /* Bits standing for floating point registers. Set, if the 
-     respective register has to be saved. Starting with reg 16 (f0) 
+  /* Bits standing for floating point registers. Set, if the
+     respective register has to be saved. Starting with reg 16 (f0)
      at the rightmost bit.
      Bit 15 -  8  7  6  5  4  3  2  1  0
      fpr 15 -  8  7  5  3  1  6  4  2  0
@@ -400,7 +400,7 @@ s390_cc_modes_compatible (enum machine_m
     case CCZ1mode:
       if (m2 == CCZmode)
 	return m1;
-      
+
       return VOIDmode;
 
     default:
@@ -510,7 +510,7 @@ s390_tm_ccmode (rtx op1, rtx op2, bool m
   if (INTVAL (op2) == 0)
     return CCTmode;
 
-  /* Selected bits all one: CC3. 
+  /* Selected bits all one: CC3.
      e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
   if (INTVAL (op2) == INTVAL (op1))
     return CCT3mode;
@@ -582,7 +582,7 @@ s390_select_ccmode (enum rtx_code code, 
       case GT:
 	/* The only overflow condition of NEG and ABS happens when
 	   -INT_MAX is used as parameter, which stays negative. So
-	   we have an overflow from a positive value to a negative. 
+	   we have an overflow from a positive value to a negative.
 	   Using CCAP mode the resulting cc can be used for comparisons.  */
 	if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
 	    && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
@@ -591,7 +591,7 @@ s390_select_ccmode (enum rtx_code code, 
  	/* If constants are involved in an add instruction it is possible to use
  	   the resulting cc for comparisons with zero. Knowing the sign of the
 	   constant the overflow behavior gets predictable. e.g.:
- 	     int a, b; if ((b = a + c) > 0)  
+ 	     int a, b; if ((b = a + c) > 0)
  	   with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP  */
 	if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
 	    && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
@@ -714,7 +714,7 @@ s390_canonicalize_comparison (enum rtx_c
       && GET_CODE (*op1) == CONST_INT
       && INTVAL (*op1) == 0xffff
       && SCALAR_INT_MODE_P (GET_MODE (*op0))
-      && (nonzero_bits (*op0, GET_MODE (*op0)) 
+      && (nonzero_bits (*op0, GET_MODE (*op0))
 	  & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
     {
       *op0 = gen_lowpart (HImode, *op0);
@@ -822,7 +822,7 @@ s390_emit_compare (enum rtx_code code, r
       emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
     }
 
-  return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx); 
+  return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
 }
 
 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
@@ -1296,9 +1296,9 @@ s390_overlap_p (rtx mem1, rtx mem2, HOST
 
   /* This overlapping check is used by peepholes merging memory block operations.
      Overlapping operations would otherwise be recognized by the S/390 hardware
-     and would fall back to a slower implementation. Allowing overlapping 
+     and would fall back to a slower implementation. Allowing overlapping
      operations would lead to slow code but not to wrong code. Therefore we are
-     somewhat optimistic if we cannot prove that the memory blocks are 
+     somewhat optimistic if we cannot prove that the memory blocks are
      overlapping.
      That's why we return false here although this may accept operations on
      overlapping memory areas.  */
@@ -1621,7 +1621,7 @@ override_options (void)
 	error ("stack size must not be greater than 64k");
     }
   else if (s390_stack_guard)
-    error ("-mstack-guard implies use of -mstack-size"); 
+    error ("-mstack-guard implies use of -mstack-size");
 
 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
   if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
@@ -1804,7 +1804,7 @@ s390_decompose_address (rtx addr, struct
 	  {
 	  case UNSPEC_LTREF:
 	    if (!disp)
-	      disp = gen_rtx_UNSPEC (Pmode, 
+	      disp = gen_rtx_UNSPEC (Pmode,
 				     gen_rtvec (1, XVECEXP (base, 0, 0)),
 				     UNSPEC_LTREL_OFFSET);
 	    else
@@ -1824,8 +1824,8 @@ s390_decompose_address (rtx addr, struct
 	    return false;
 	  }
 
-      if (!REG_P (base) 
-	  || (GET_MODE (base) != SImode 
+      if (!REG_P (base)
+	  || (GET_MODE (base) != SImode
 	      && GET_MODE (base) != Pmode))
 	return false;
 
@@ -1852,7 +1852,7 @@ s390_decompose_address (rtx addr, struct
 	  {
 	  case UNSPEC_LTREF:
 	    if (!disp)
-	      disp = gen_rtx_UNSPEC (Pmode, 
+	      disp = gen_rtx_UNSPEC (Pmode,
 				     gen_rtvec (1, XVECEXP (indx, 0, 0)),
 				     UNSPEC_LTREL_OFFSET);
 	    else
@@ -1872,7 +1872,7 @@ s390_decompose_address (rtx addr, struct
 	    return false;
 	  }
 
-      if (!REG_P (indx) 
+      if (!REG_P (indx)
 	  || (GET_MODE (indx) != SImode
 	      && GET_MODE (indx) != Pmode))
 	return false;
@@ -1904,21 +1904,21 @@ s390_decompose_address (rtx addr, struct
   /* Validate displacement.  */
   if (!disp)
     {
-      /* If virtual registers are involved, the displacement will change later 
-	 anyway as the virtual registers get eliminated.  This could make a 
-	 valid displacement invalid, but it is more likely to make an invalid 
-	 displacement valid, because we sometimes access the register save area 
+      /* If virtual registers are involved, the displacement will change later
+	 anyway as the virtual registers get eliminated.  This could make a
+	 valid displacement invalid, but it is more likely to make an invalid
+	 displacement valid, because we sometimes access the register save area
 	 via negative offsets to one of those registers.
 	 Thus we don't check the displacement for validity here.  If after
 	 elimination the displacement turns out to be invalid after all,
 	 this is fixed up by reload in any case.  */
-      if (base != arg_pointer_rtx 
-	  && indx != arg_pointer_rtx 
-	  && base != return_address_pointer_rtx 
+      if (base != arg_pointer_rtx
+	  && indx != arg_pointer_rtx
+	  && base != return_address_pointer_rtx
 	  && indx != return_address_pointer_rtx
-	  && base != frame_pointer_rtx 
+	  && base != frame_pointer_rtx
 	  && indx != frame_pointer_rtx
-	  && base != virtual_stack_vars_rtx 
+	  && base != virtual_stack_vars_rtx
 	  && indx != virtual_stack_vars_rtx)
 	if (!DISP_IN_RANGE (offset))
 	  return false;
@@ -2271,8 +2271,8 @@ s390_float_const_zero_p (rtx value)
 
 /* Compute a (partial) cost for rtx X.  Return true if the complete
    cost has been computed, and false if subexpressions should be
-   scanned.  In either case, *TOTAL contains the cost result.  
-   CODE contains GET_CODE (x), OUTER_CODE contains the code 
+   scanned.  In either case, *TOTAL contains the cost result.
+   CODE contains GET_CODE (x), OUTER_CODE contains the code
    of the superexpression of x.  */
 
 static bool
@@ -2323,7 +2323,7 @@ s390_rtx_costs (rtx x, int code, int out
       *total = COSTS_N_INSNS (1);
       return false;
 
-    case MULT:      
+    case MULT:
       switch (GET_MODE (x))
 	{
 	case SImode:
@@ -3104,11 +3104,11 @@ s390_legitimate_address_p (enum machine_
     }
   else
     {
-      if (ad.base 
+      if (ad.base
 	  && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
 	       || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
 	return false;
-      
+
       if (ad.indx
 	  && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
 	       || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
@@ -3354,7 +3354,7 @@ legitimize_pic_address (rtx orig, rtx re
                     gcc_unreachable ();
                 }
 	    }
-	  else 
+	  else
 	    gcc_assert (GET_CODE (addr) == PLUS);
 	}
       if (GET_CODE (addr) == PLUS)
@@ -3743,7 +3743,7 @@ s390_legitimize_address (rtx x, rtx oldx
 	return x;
     }
   else if (GET_CODE (x) == PLUS
-	   && (TLS_SYMBOLIC_CONST (XEXP (x, 0)) 
+	   && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
 	       || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
     {
       return x;
@@ -3822,7 +3822,7 @@ s390_legitimize_address (rtx x, rtx oldx
    MODE is the mode of the enclosing MEM.  OPNUM is the operand number
    and TYPE is the reload type of the current reload.  */
 
-rtx 
+rtx
 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
 			   int opnum, int type)
 {
@@ -3854,7 +3854,7 @@ legitimize_reload_address (rtx ad, enum 
       new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
 
       push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
-		   BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, 
+		   BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
 		   opnum, (enum reload_type) type);
       return new_rtx;
     }
@@ -3952,7 +3952,7 @@ s390_expand_setmem (rtx dst, rtx len, rt
     return;
 
   gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
-  
+
   if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
     {
       if (val == const0_rtx && INTVAL (len) <= 256)
@@ -3961,7 +3961,7 @@ s390_expand_setmem (rtx dst, rtx len, rt
 	{
 	  /* Initialize memory by storing the first byte.  */
 	  emit_move_insn (adjust_address (dst, QImode, 0), val);
-	  
+
 	  if (INTVAL (len) > 1)
 	    {
 	      /* Initiate 1 byte overlap move.
@@ -3972,7 +3972,7 @@ s390_expand_setmem (rtx dst, rtx len, rt
 	      rtx dstp1 = adjust_address (dst, VOIDmode, 1);
 	      set_mem_size (dst, const1_rtx);
 
-	      emit_insn (gen_movmem_short (dstp1, dst, 
+	      emit_insn (gen_movmem_short (dstp1, dst,
 					   GEN_INT (INTVAL (len) - 2)));
 	    }
 	}
@@ -4018,7 +4018,7 @@ s390_expand_setmem (rtx dst, rtx len, rt
 
 	  /* Initialize memory by storing the first byte.  */
 	  emit_move_insn (adjust_address (dst, QImode, 0), val);
-	  
+
 	  /* If count is 1 we are done.  */
 	  emit_cmp_and_jump_insns (count, const1_rtx,
 				   EQ, NULL_RTX, mode, 1, end_label);
@@ -4268,9 +4268,9 @@ s390_expand_addcc (enum rtx_code cmp_cod
 	}
 
       p = rtvec_alloc (2);
-      RTVEC_ELT (p, 0) = 
+      RTVEC_ELT (p, 0) =
         gen_rtx_SET (VOIDmode, dst, op_res);
-      RTVEC_ELT (p, 1) = 
+      RTVEC_ELT (p, 1) =
 	gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
       emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
 
@@ -4329,15 +4329,15 @@ s390_expand_addcc (enum rtx_code cmp_cod
       if (!register_operand (src, GET_MODE (dst)))
 	src = force_reg (GET_MODE (dst), src);
 
-      op_res = gen_rtx_MINUS (GET_MODE (dst), 
-			      gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx), 
-			      gen_rtx_fmt_ee (cmp_code, GET_MODE (dst), 
-					      gen_rtx_REG (cc_mode, CC_REGNUM), 
+      op_res = gen_rtx_MINUS (GET_MODE (dst),
+			      gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
+			      gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
+					      gen_rtx_REG (cc_mode, CC_REGNUM),
 					      const0_rtx));
       p = rtvec_alloc (2);
-      RTVEC_ELT (p, 0) = 
+      RTVEC_ELT (p, 0) =
         gen_rtx_SET (VOIDmode, dst, op_res);
-      RTVEC_ELT (p, 1) = 
+      RTVEC_ELT (p, 1) =
 	gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
       emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
 
@@ -4397,7 +4397,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx
 	  set_mem_size (dest, GEN_INT (size));
 	  s390_expand_movmem (dest, src_mem, GEN_INT (size));
 	}
-	  
+
       /* (set (ze (mem)) (reg)).  */
       else if (register_operand (src, word_mode))
 	{
@@ -4410,7 +4410,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx
 	      int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
 	      int size = stcmh_width / BITS_PER_UNIT;
 
-	      emit_move_insn (adjust_address (dest, SImode, size), 
+	      emit_move_insn (adjust_address (dest, SImode, size),
 			      gen_lowpart (SImode, src));
 	      set_mem_size (dest, GEN_INT (size));
 	      emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
@@ -4427,7 +4427,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx
 
   /* (set (ze (reg)) (const_int)).  */
   if (TARGET_ZARCH
-      && register_operand (dest, word_mode) 
+      && register_operand (dest, word_mode)
       && (bitpos % 16) == 0
       && (bitsize % 16) == 0
       && const_int_operand (src, VOIDmode))
@@ -4447,9 +4447,9 @@ s390_expand_insv (rtx dest, rtx op1, rtx
 
 	  putsize = GET_MODE_BITSIZE (putmode);
 	  regpos -= putsize;
-	  emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, 
+	  emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
 						GEN_INT (putsize),
-						GEN_INT (regpos)), 
+						GEN_INT (regpos)),
 			  gen_int_mode (val, putmode));
 	  val >>= putsize;
 	}
@@ -4468,16 +4468,16 @@ s390_expand_mask_and_shift (rtx val, enu
 {
   val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
 			     NULL_RTX, 1, OPTAB_DIRECT);
-  return expand_simple_binop (SImode, ASHIFT, val, count, 
+  return expand_simple_binop (SImode, ASHIFT, val, count,
 			      NULL_RTX, 1, OPTAB_DIRECT);
 }
 
 /* Structure to hold the initial parameters for a compare_and_swap operation
-   in HImode and QImode.  */ 
+   in HImode and QImode.  */
 
 struct alignment_context
 {
-  rtx memsi;	  /* SI aligned memory location.  */ 
+  rtx memsi;	  /* SI aligned memory location.  */
   rtx shift;	  /* Bit offset with regard to lsb.  */
   rtx modemask;	  /* Mask of the HQImode shifted by SHIFT bits.  */
   rtx modemaski;  /* ~modemask */
@@ -4529,7 +4529,7 @@ init_alignment_context (struct alignment
   ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
 				  NULL_RTX, 1, OPTAB_DIRECT);
   /* Calculate masks.  */
-  ac->modemask = expand_simple_binop (SImode, ASHIFT, 
+  ac->modemask = expand_simple_binop (SImode, ASHIFT,
 				     GEN_INT (GET_MODE_MASK (mode)), ac->shift,
 				     NULL_RTX, 1, OPTAB_DIRECT);
   ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
@@ -4567,9 +4567,9 @@ s390_expand_cs_hqi (enum machine_mode mo
 
   /* Start CS loop.  */
   emit_label (csloop);
-  /* val = "<mem>00..0<mem>" 
+  /* val = "<mem>00..0<mem>"
    * cmp = "00..0<cmp>00..0"
-   * new = "00..0<new>00..0" 
+   * new = "00..0<new>00..0"
    */
 
   /* Patch cmp and new with val at correct position.  */
@@ -4595,17 +4595,17 @@ s390_expand_cs_hqi (enum machine_mode mo
 						     cmpv, newv));
 
   /* Check for changes outside mode.  */
-  resv = expand_simple_binop (SImode, AND, res, ac.modemaski, 
+  resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
 			      NULL_RTX, 1, OPTAB_DIRECT);
-  cc = s390_emit_compare (NE, resv, val); 
+  cc = s390_emit_compare (NE, resv, val);
   emit_move_insn (val, resv);
   /* Loop internal if so.  */
   s390_emit_jump (csloop, cc);
 
   emit_label (csend);
-  
+
   /* Return the correct part of the bitfield.  */
-  convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift, 
+  convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
 					     NULL_RTX, 1, OPTAB_DIRECT), 1);
 }
 
@@ -4659,7 +4659,7 @@ s390_expand_atomic (enum machine_mode mo
       val = expand_simple_binop (SImode, AND, val, ac.modemask,
 				 NULL_RTX, 1, OPTAB_DIRECT);
       /* FALLTHRU */
-    case SET: 
+    case SET:
       if (ac.aligned && MEM_P (val))
 	store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
       else
@@ -5511,8 +5511,8 @@ s390_split_branches (void)
 }
 
 
-/* Find an annotated literal pool symbol referenced in RTX X, 
-   and store it at REF.  Will abort if X contains references to 
+/* Find an annotated literal pool symbol referenced in RTX X,
+   and store it at REF.  Will abort if X contains references to
    more than one such pool symbol; multiple references to the same
    symbol are allowed, however.
 
@@ -5545,7 +5545,7 @@ find_constant_pool_ref (rtx x, rtx *ref)
 
       if (*ref == NULL_RTX)
 	*ref = sym;
-      else 
+      else
 	gcc_assert (*ref == sym);
 
       return;
@@ -5566,7 +5566,7 @@ find_constant_pool_ref (rtx x, rtx *ref)
     }
 }
 
-/* Replace every reference to the annotated literal pool 
+/* Replace every reference to the annotated literal pool
    symbol REF in X by its base plus OFFSET.  */
 
 static void
@@ -6511,7 +6511,7 @@ s390_chunkify_start (void)
 
   for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
     {
-      rtx new_insn = gen_reload_base (cfun->machine->base_reg, 
+      rtx new_insn = gen_reload_base (cfun->machine->base_reg,
 				      curr_pool->label);
       rtx insn = curr_pool->first_insn;
       INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
@@ -6526,7 +6526,7 @@ s390_chunkify_start (void)
 	struct constant_pool *pool = s390_find_pool (pool_list, insn);
 	if (pool)
 	  {
-	    rtx new_insn = gen_reload_base (cfun->machine->base_reg, 
+	    rtx new_insn = gen_reload_base (cfun->machine->base_reg,
 					    pool->label);
 	    INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
 	  }
@@ -6763,7 +6763,7 @@ find_unused_clobbered_reg (void)
 }
 
 
-/* Helper function for s390_regs_ever_clobbered.  Sets the fields in DATA for all 
+/* Helper function for s390_regs_ever_clobbered.  Sets the fields in DATA for all
    clobbered hard regs in SETREG.  */
 
 static void
@@ -6821,8 +6821,8 @@ s390_regs_ever_clobbered (int *regs_ever
      deal with this automatically.  */
   if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
     for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
-      if (crtl->calls_eh_return 
-	  || (cfun->machine->has_landing_pad_p 
+      if (crtl->calls_eh_return
+	  || (cfun->machine->has_landing_pad_p
 	      && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
 	regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
 
@@ -6841,16 +6841,16 @@ s390_regs_ever_clobbered (int *regs_ever
 	{
 	  if (INSN_P (cur_insn))
 	    note_stores (PATTERN (cur_insn),
-			 s390_reg_clobbered_rtx, 
+			 s390_reg_clobbered_rtx,
 			 regs_ever_clobbered);
 	}
     }
 }
 
-/* Determine the frame area which actually has to be accessed 
-   in the function epilogue. The values are stored at the 
+/* Determine the frame area which actually has to be accessed
+   in the function epilogue. The values are stored at the
    given pointers AREA_BOTTOM (address of the lowest used stack
-   address) and AREA_TOP (address of the first item which does 
+   address) and AREA_TOP (address of the first item which does
    not belong to the stack frame).  */
 
 static void
@@ -6884,7 +6884,7 @@ s390_frame_area (int *area_bottom, int *
 	  b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
 	  t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
 	}
-  
+
   *area_bottom = b;
   *area_top = t;
 }
@@ -6923,10 +6923,10 @@ s390_register_info (int clobbered_regs[]
     clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
 
   if (flag_pic)
-    clobbered_regs[PIC_OFFSET_TABLE_REGNUM] 
+    clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
       |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
 
-  clobbered_regs[BASE_REGNUM] 
+  clobbered_regs[BASE_REGNUM]
     |= (cfun->machine->base_reg
         && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
 
@@ -6969,8 +6969,8 @@ s390_register_info (int clobbered_regs[]
       cfun_frame_layout.first_save_gpr_slot = i;
       cfun_frame_layout.last_save_gpr_slot = j;
 
-      for (i = cfun_frame_layout.first_save_gpr_slot; 
-	   i < cfun_frame_layout.last_save_gpr_slot + 1; 
+      for (i = cfun_frame_layout.first_save_gpr_slot;
+	   i < cfun_frame_layout.last_save_gpr_slot + 1;
 	   i++)
 	if (clobbered_regs[i])
 	  break;
@@ -6978,7 +6978,7 @@ s390_register_info (int clobbered_regs[]
       for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
 	if (clobbered_regs[j])
 	  break;
-      
+
       if (i == cfun_frame_layout.last_save_gpr_slot + 1)
 	{
 	  /* Nothing to save/restore.  */
@@ -7058,7 +7058,7 @@ s390_frame_info (void)
   cfun_frame_layout.frame_size = get_frame_size ();
   if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
     fatal_error ("total size of local variables exceeds architecture limit");
-  
+
   if (!TARGET_PACKED_STACK)
     {
       cfun_frame_layout.backchain_offset = 0;
@@ -7072,46 +7072,46 @@ s390_frame_info (void)
     {
       cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
 					    - UNITS_PER_WORD);
-      cfun_frame_layout.gprs_offset 
-	= (cfun_frame_layout.backchain_offset 
+      cfun_frame_layout.gprs_offset
+	= (cfun_frame_layout.backchain_offset
 	   - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
 	   * UNITS_PER_WORD);
-	  
+
       if (TARGET_64BIT)
 	{
-	  cfun_frame_layout.f4_offset 
+	  cfun_frame_layout.f4_offset
 	    = (cfun_frame_layout.gprs_offset
 	       - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-	  
-	  cfun_frame_layout.f0_offset 
-	    = (cfun_frame_layout.f4_offset 
+
+	  cfun_frame_layout.f0_offset
+	    = (cfun_frame_layout.f4_offset
 	       - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
 	}
       else
 	{
 	  /* On 31 bit we have to care about alignment of the
 	     floating point regs to provide fastest access.  */
-	  cfun_frame_layout.f0_offset 
-	    = ((cfun_frame_layout.gprs_offset 
+	  cfun_frame_layout.f0_offset
+	    = ((cfun_frame_layout.gprs_offset
 		& ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
 	       - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-	  
-	  cfun_frame_layout.f4_offset 
+
+	  cfun_frame_layout.f4_offset
 	    = (cfun_frame_layout.f0_offset
 	       - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
 	}
     }
   else /* no backchain */
     {
-      cfun_frame_layout.f4_offset 
+      cfun_frame_layout.f4_offset
 	= (STACK_POINTER_OFFSET
 	   - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-      
-      cfun_frame_layout.f0_offset 
+
+      cfun_frame_layout.f0_offset
 	= (cfun_frame_layout.f4_offset
 	   - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-      
-      cfun_frame_layout.gprs_offset 
+
+      cfun_frame_layout.gprs_offset
 	= cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
     }
 
@@ -7132,7 +7132,7 @@ s390_frame_info (void)
       if (TARGET_BACKCHAIN)
 	cfun_frame_layout.frame_size += UNITS_PER_WORD;
 
-      /* No alignment trouble here because f8-f15 are only saved under 
+      /* No alignment trouble here because f8-f15 are only saved under
 	 64 bit.  */
       cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
 					       cfun_frame_layout.f4_offset),
@@ -7144,9 +7144,9 @@ s390_frame_info (void)
       for (i = 0; i < 8; i++)
 	if (cfun_fpr_bit_p (i))
 	  cfun_frame_layout.frame_size += 8;
-      
+
       cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
-      
+
       /* If under 31 bit an odd number of gprs has to be saved we have to adjust
 	 the frame size to sustain 8 byte alignment of stack frames.  */
       cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
@@ -7213,11 +7213,11 @@ s390_update_frame_layout (void)
 
   s390_register_info (clobbered_regs);
 
-  df_set_regs_ever_live (BASE_REGNUM, 
+  df_set_regs_ever_live (BASE_REGNUM,
 			 clobbered_regs[BASE_REGNUM] ? true : false);
-  df_set_regs_ever_live (RETURN_REGNUM, 
+  df_set_regs_ever_live (RETURN_REGNUM,
 			 clobbered_regs[RETURN_REGNUM] ? true : false);
-  df_set_regs_ever_live (STACK_POINTER_REGNUM, 
+  df_set_regs_ever_live (STACK_POINTER_REGNUM,
 			 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
 
   if (cfun->machine->base_reg)
@@ -7249,10 +7249,10 @@ s390_hard_regno_mode_ok (unsigned int re
     case GENERAL_REGS:
       if (REGNO_PAIR_OK (regno, mode))
 	{
-	  if (TARGET_64BIT 
+	  if (TARGET_64BIT
 	      || (mode != TFmode && mode != TCmode && mode != TDmode))
 	    return true;
-	}	  
+	}
       break;
     case CC_REGS:
       if (GET_MODE_CLASS (mode) == MODE_CC)
@@ -7268,7 +7268,7 @@ s390_hard_regno_mode_ok (unsigned int re
     default:
       return false;
     }
-  
+
   return false;
 }
 
@@ -7365,7 +7365,7 @@ s390_initial_elimination_offset (int fro
   switch (from)
     {
     case FRAME_POINTER_REGNUM:
-      offset = (get_frame_size() 
+      offset = (get_frame_size()
 		+ STACK_POINTER_OFFSET
 		+ crtl->outgoing_args_size);
       break;
@@ -7460,7 +7460,7 @@ save_gprs (rtx base, int offset, int fir
     for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
       {
 	rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
-	
+
 	if (first + i <= 6)
 	  set_mem_alias_set (mem, get_varargs_alias_set ());
       }
@@ -7624,8 +7624,8 @@ s390_emit_prologue (void)
   /* Choose best register to use for temp use within prologue.
      See below for why TPF must use the register 1.  */
 
-  if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM) 
-      && !current_function_is_leaf 
+  if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
+      && !current_function_is_leaf
       && !TARGET_TPF_PROFILING)
     temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
   else
@@ -7634,11 +7634,11 @@ s390_emit_prologue (void)
   /* Save call saved gprs.  */
   if (cfun_frame_layout.first_save_gpr != -1)
     {
-      insn = save_gprs (stack_pointer_rtx, 
-			cfun_frame_layout.gprs_offset + 
-			UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr 
+      insn = save_gprs (stack_pointer_rtx,
+			cfun_frame_layout.gprs_offset +
+			UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
 					  - cfun_frame_layout.first_save_gpr_slot),
-			cfun_frame_layout.first_save_gpr, 
+			cfun_frame_layout.first_save_gpr,
 			cfun_frame_layout.last_save_gpr);
       emit_insn (insn);
     }
@@ -7691,14 +7691,14 @@ s390_emit_prologue (void)
 	if (cfun_fpr_bit_p (i))
 	  {
 	    insn = save_fpr (stack_pointer_rtx, offset, i + 16);
-	    	       
+
 	    RTX_FRAME_RELATED_P (insn) = 1;
 	    offset -= 8;
 	  }
       if (offset >= cfun_frame_layout.f8_offset)
 	next_fpr = i + 16;
     }
-  
+
   if (!TARGET_PACKED_STACK)
     next_fpr = cfun_save_high_fprs_p ? 31 : 0;
 
@@ -7750,9 +7750,9 @@ s390_emit_prologue (void)
 	    }
   	}
 
-      if (s390_warn_framesize > 0 
+      if (s390_warn_framesize > 0
 	  && cfun_frame_layout.frame_size >= s390_warn_framesize)
-	warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes", 
+	warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
 		 current_function_name (), cfun_frame_layout.frame_size);
 
       if (s390_warn_dynamicstack_p && cfun->calls_alloca)
@@ -7767,7 +7767,7 @@ s390_emit_prologue (void)
       if (DISP_IN_RANGE (INTVAL (frame_off)))
 	{
 	  insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
-			      gen_rtx_PLUS (Pmode, stack_pointer_rtx, 
+			      gen_rtx_PLUS (Pmode, stack_pointer_rtx,
 					    frame_off));
 	  insn = emit_insn (insn);
 	}
@@ -7792,11 +7792,11 @@ s390_emit_prologue (void)
       if (TARGET_BACKCHAIN)
 	{
 	  if (cfun_frame_layout.backchain_offset)
-	    addr = gen_rtx_MEM (Pmode, 
-				plus_constant (stack_pointer_rtx, 
+	    addr = gen_rtx_MEM (Pmode,
+				plus_constant (stack_pointer_rtx,
 				  cfun_frame_layout.backchain_offset));
 	  else
-	    addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);  
+	    addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
 	  set_mem_alias_set (addr, get_frame_alias_set ());
 	  insn = emit_insn (gen_move_insn (addr, temp_reg));
 	}
@@ -7821,7 +7821,7 @@ s390_emit_prologue (void)
 	 moved below the use of the stack slots.  */
       s390_emit_stack_tie ();
 
-      insn = emit_insn (gen_add2_insn (temp_reg, 
+      insn = emit_insn (gen_add2_insn (temp_reg,
 				       GEN_INT (cfun_frame_layout.f8_offset)));
 
       offset = 0;
@@ -7833,7 +7833,7 @@ s390_emit_prologue (void)
 				      cfun_frame_layout.frame_size
 				      + cfun_frame_layout.f8_offset
 				      + offset);
-	    
+
 	    insn = save_fpr (temp_reg, offset, i);
 	    offset += 8;
 	    RTX_FRAME_RELATED_P (insn) = 1;
@@ -7904,7 +7904,7 @@ s390_emit_epilogue (bool sibcall)
 
   /* Check whether to use frame or stack pointer for restore.  */
 
-  frame_pointer = (frame_pointer_needed 
+  frame_pointer = (frame_pointer_needed
 		   ? hard_frame_pointer_rtx : stack_pointer_rtx);
 
   s390_frame_area (&area_bottom, &area_top);
@@ -7962,7 +7962,7 @@ s390_emit_epilogue (bool sibcall)
 		}
 	    }
 	}
-	      
+
     }
   else
     {
@@ -7978,7 +7978,7 @@ s390_emit_epilogue (bool sibcall)
 	  else if (!TARGET_PACKED_STACK)
 	    next_offset += 8;
 	}
-      
+
     }
 
   /* Return register.  */
@@ -8010,7 +8010,7 @@ s390_emit_epilogue (bool sibcall)
 	  if (global_regs[i])
 	    {
 	      addr = plus_constant (frame_pointer,
-				    offset + cfun_frame_layout.gprs_offset 
+				    offset + cfun_frame_layout.gprs_offset
 				    + (i - cfun_frame_layout.first_save_gpr_slot)
 				    * UNITS_PER_WORD);
 	      addr = gen_rtx_MEM (Pmode, addr);
@@ -8035,7 +8035,7 @@ s390_emit_epilogue (bool sibcall)
 
 	      addr = plus_constant (frame_pointer,
 				    offset + cfun_frame_layout.gprs_offset
-				    + (RETURN_REGNUM 
+				    + (RETURN_REGNUM
 				       - cfun_frame_layout.first_save_gpr_slot)
 				    * UNITS_PER_WORD);
 	      addr = gen_rtx_MEM (Pmode, addr);
@@ -8046,7 +8046,7 @@ s390_emit_epilogue (bool sibcall)
 
       insn = restore_gprs (frame_pointer,
 			   offset + cfun_frame_layout.gprs_offset
-			   + (cfun_frame_layout.first_restore_gpr 
+			   + (cfun_frame_layout.first_restore_gpr
 			      - cfun_frame_layout.first_save_gpr_slot)
 			   * UNITS_PER_WORD,
 			   cfun_frame_layout.first_restore_gpr,
@@ -8456,7 +8456,7 @@ s390_va_start (tree valist, rtx nextarg 
       t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
       t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
 	          size_int (-RETURN_REGNUM * UNITS_PER_WORD));
-  
+
       t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
       TREE_SIDE_EFFECTS (t) = 1;
       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -8488,7 +8488,7 @@ s390_va_start (tree valist, rtx nextarg 
    } */
 
 static tree
-s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, 
+s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
 		      gimple_seq *post_p ATTRIBUTE_UNUSED)
 {
   tree f_gpr, f_fpr, f_ovf, f_sav;
@@ -8588,9 +8588,9 @@ s390_gimplify_va_arg (tree valist, tree 
   t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
   gimplify_and_add (t, pre_p);
 
-  t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, 
+  t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
 	      size_int (sav_ofs));
-  u = build2 (MULT_EXPR, TREE_TYPE (reg), reg, 
+  u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
 	      fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
   t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
 
@@ -8605,14 +8605,14 @@ s390_gimplify_va_arg (tree valist, tree 
 
   t = ovf;
   if (size < UNITS_PER_WORD)
-    t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, 
+    t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
 		size_int (UNITS_PER_WORD - size));
 
   gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
 
   gimplify_assign (addr, t, pre_p);
 
-  t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, 
+  t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
 	      size_int (size));
   gimplify_assign (ovf, t, pre_p);
 
@@ -9268,7 +9268,7 @@ s390_fixed_condition_code_regs (unsigned
 {
   *p1 = CC_REGNUM;
   *p2 = INVALID_REGNUM;
- 
+
   return true;
 }
 
@@ -9434,10 +9434,10 @@ s390_optimize_prologue (void)
   /* If all special registers are in fact used, there's nothing we
      can do, so no point in walking the insn list.  */
 
-  if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM 
+  if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
       && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
-      && (TARGET_CPU_ZARCH 
-          || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM 
+      && (TARGET_CPU_ZARCH
+          || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
               && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
     return;
 
@@ -9477,9 +9477,9 @@ s390_optimize_prologue (void)
 
 	  if (cfun_frame_layout.first_save_gpr != -1)
 	    {
-	      new_insn 	= save_gprs (base, 
+	      new_insn 	= save_gprs (base,
 				     off + (cfun_frame_layout.first_save_gpr
-					    - first) * UNITS_PER_WORD, 
+					    - first) * UNITS_PER_WORD,
 				     cfun_frame_layout.first_save_gpr,
 				     cfun_frame_layout.last_save_gpr);
 	      new_insn = emit_insn_before (new_insn, insn);
@@ -9538,9 +9538,9 @@ s390_optimize_prologue (void)
 
 	  if (cfun_frame_layout.first_restore_gpr != -1)
 	    {
-	      new_insn = restore_gprs (base, 
+	      new_insn = restore_gprs (base,
 				       off + (cfun_frame_layout.first_restore_gpr
-					      - first) * UNITS_PER_WORD, 
+					      - first) * UNITS_PER_WORD,
 				       cfun_frame_layout.first_restore_gpr,
 				       cfun_frame_layout.last_restore_gpr);
 	      new_insn = emit_insn_before (new_insn, insn);
Index: gcc/gcc/config/s390/s390.h
===================================================================
--- gcc.orig/gcc/config/s390/s390.h
+++ gcc/gcc/config/s390/s390.h
@@ -180,7 +180,7 @@ extern int s390_arch_flags;
 #define S390_TDC_POSITIVE_NORMALIZED_DFP_NUMBER   (1 << 7)
 #define S390_TDC_NEGATIVE_NORMALIZED_DFP_NUMBER   (1 << 6)
 
-/* For signbit, the BFP-DFP-difference makes no difference. */ 
+/* For signbit, the BFP-DFP-difference makes no difference. */
 #define S390_TDC_SIGNBIT_SET (S390_TDC_NEGATIVE_ZERO \
                           | S390_TDC_NEGATIVE_NORMALIZED_BFP_NUMBER \
                           | S390_TDC_NEGATIVE_DENORMALIZED_BFP_NUMBER\
@@ -298,10 +298,10 @@ if (INTEGRAL_MODE_P (MODE) &&	        	 
    correspond to actual hardware:
    Reg 32: Argument pointer
    Reg 33: Condition code
-   Reg 34: Frame pointer  
+   Reg 34: Frame pointer
    Reg 35: Return address pointer
 
-   Registers 36 and 37 are mapped to access registers 
+   Registers 36 and 37 are mapped to access registers
    0 and 1, used to implement thread-local storage.  */
 
 #define FIRST_PSEUDO_REGISTER 38
@@ -455,7 +455,7 @@ if (INTEGRAL_MODE_P (MODE) &&	        	 
 enum reg_class
 {
   NO_REGS, CC_REGS, ADDR_REGS, GENERAL_REGS, ACCESS_REGS,
-  ADDR_CC_REGS, GENERAL_CC_REGS, 
+  ADDR_CC_REGS, GENERAL_CC_REGS,
   FP_REGS, ADDR_FP_REGS, GENERAL_FP_REGS,
   ALL_REGS, LIM_REG_CLASSES
 };
@@ -575,7 +575,7 @@ extern const enum reg_class regclass_map
    the argument area.  */
 #define FIRST_PARM_OFFSET(FNDECL) 0
 
-/* Defining this macro makes __builtin_frame_address(0) and 
+/* Defining this macro makes __builtin_frame_address(0) and
    __builtin_return_address(0) work with -fomit-frame-pointer.  */
 #define INITIAL_FRAME_ADDRESS_RTX                                             \
   (plus_constant (arg_pointer_rtx, -STACK_POINTER_OFFSET))
@@ -615,7 +615,7 @@ extern const enum reg_class regclass_map
 /* Describe how we implement __builtin_eh_return.  */
 #define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 6 : INVALID_REGNUM)
 #define EH_RETURN_HANDLER_RTX gen_rtx_MEM (Pmode, return_address_pointer_rtx)
-       
+
 /* Select a format to encode pointers in exception handling data.  */
 #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL)			    \
   (flag_pic								    \
@@ -807,7 +807,7 @@ do {									\
 #define SLOW_BYTE_ACCESS 1
 
 /* An integer expression for the size in bits of the largest integer machine
-   mode that should actually be used.  We allow pairs of registers.  */ 
+   mode that should actually be used.  We allow pairs of registers.  */
 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode)
 
 /* The maximum number of bytes that a single instruction can move quickly
Index: gcc/gcc/config/s390/s390.md
===================================================================
--- gcc.orig/gcc/config/s390/s390.md
+++ gcc/gcc/config/s390/s390.md
@@ -37,7 +37,7 @@
 ;;     %N: print the second word of a DImode operand.
 ;;     %M: print the second word of a TImode operand.
 ;;     %Y: print shift count operand.
-;;  
+;;
 ;;     %b: print integer X as if it's an unsigned byte.
 ;;     %c: print integer X as if it's an signed byte.
 ;;     %x: print integer X as if it's an unsigned halfword.
@@ -154,7 +154,7 @@
    (RETURN_REGNUM		14)
    ; Condition code register.
    (CC_REGNUM			33)
-   ; Thread local storage pointer register. 
+   ; Thread local storage pointer register.
    (TP_REGNUM			36)
   ])
 
@@ -220,7 +220,7 @@
 ;;   reg: Instruction does not use the agen unit
 
 (define_attr "atype" "agen,reg"
-  (if_then_else (eq_attr "op_type" "E,RR,RI,RRE")  
+  (if_then_else (eq_attr "op_type" "E,RR,RI,RRE")
 		(const_string "reg")
 		(const_string "agen")))
 
@@ -319,7 +319,7 @@
 
 ;; These mode iterators allow floating point patterns to be generated from the
 ;; same template.
-(define_mode_iterator FP_ALL [TF DF SF (TD "TARGET_HARD_DFP") (DD "TARGET_HARD_DFP") 
+(define_mode_iterator FP_ALL [TF DF SF (TD "TARGET_HARD_DFP") (DD "TARGET_HARD_DFP")
                               (SD "TARGET_HARD_DFP")])
 (define_mode_iterator FP [TF DF SF (TD "TARGET_HARD_DFP") (DD "TARGET_HARD_DFP")])
 (define_mode_iterator FPALL [TF DF SF TD DD SD])
@@ -360,15 +360,15 @@
 
 ;; This iterator and attribute allow to combine most atomic operations.
 (define_code_iterator ATOMIC [and ior xor plus minus mult])
-(define_code_attr atomic [(and "and") (ior "ior") (xor "xor") 
+(define_code_attr atomic [(and "and") (ior "ior") (xor "xor")
 			  (plus "add") (minus "sub") (mult "nand")])
 
-;; In FP templates, a string like "lt<de>br" will expand to "ltxbr" in 
+;; In FP templates, a string like "lt<de>br" will expand to "ltxbr" in
 ;; TF/TDmode, "ltdbr" in DF/DDmode, and "ltebr" in SF/SDmode.
 (define_mode_attr xde [(TF "x") (DF "d") (SF "e") (TD "x") (DD "d") (SD "e")])
 
-;; In FP templates, a <dee> in "m<dee><bt>r" will expand to "mx<bt>r" in 
-;; TF/TDmode, "md<bt>r" in DF/DDmode, "mee<bt>r" in SFmode and "me<bt>r in 
+;; In FP templates, a <dee> in "m<dee><bt>r" will expand to "mx<bt>r" in
+;; TF/TDmode, "md<bt>r" in DF/DDmode, "mee<bt>r" in SFmode and "me<bt>r in
 ;; SDmode.
 (define_mode_attr xdee [(TF "x") (DF "d") (SF "ee") (TD "x") (DD "d") (SD "e")])
 
@@ -382,14 +382,14 @@
 ;; dfp variants in a single insn definition.
 
 ;; This attribute is used to set op_type accordingly.
-(define_mode_attr RRer [(TF "RRE") (DF "RRE") (SF "RRE") (TD "RRR") 
+(define_mode_attr RRer [(TF "RRE") (DF "RRE") (SF "RRE") (TD "RRR")
                         (DD "RRR") (SD "RRR")])
 
-;; This attribute is used in the operand constraint list in order to have the 
+;; This attribute is used in the operand constraint list in order to have the
 ;; first and the second operand match for bfp modes.
 (define_mode_attr f0 [(TF "0") (DF "0") (SF "0") (TD "f") (DD "f") (DD "f")])
 
-;; This attribute is used in the operand list of the instruction to have an 
+;; This attribute is used in the operand list of the instruction to have an
 ;; additional operand for the dfp instructions.
 (define_mode_attr op1 [(TF "") (DF "") (SF "")
                        (TD "%1,") (DD "%1,") (SD "%1,")])
Index: gcc/gcc/config/s390/tpf-unwind.h
===================================================================
--- gcc.orig/gcc/config/s390/tpf-unwind.h
+++ gcc/gcc/config/s390/tpf-unwind.h
@@ -46,7 +46,7 @@ __isPATrange (void *addr)
 /* TPF return address offset from start of stack frame.  */
 #define TPFRA_OFFSET 168
 
-/* Exceptions macro defined for TPF so that functions without 
+/* Exceptions macro defined for TPF so that functions without
    dwarf frame information can be used with exceptions.  */
 #define MD_FALLBACK_FRAME_STATE_FOR s390_fallback_frame_state
 
@@ -165,20 +165,20 @@ __tpf_eh_return (void *target)
 
       /* Begin looping through stack frames.  Stop if invalid
          code information is retrieved or if a match between the
-         current stack frame iteration shared object's address 
+         current stack frame iteration shared object's address
          matches that of the target, calculated above.  */
       do
         {
           /* Get return address based on our stackptr iterator.  */
-          current = (void *) *((unsigned long int *) 
+          current = (void *) *((unsigned long int *)
                       (stackptr+RA_OFFSET));
 
           /* Is it a Pat Stub?  */
-          if (__isPATrange (current)) 
+          if (__isPATrange (current))
             {
-              /* Yes it was, get real return address 
+              /* Yes it was, get real return address
                  in TPF stack area.  */
-              current = (void *) *((unsigned long int *) 
+              current = (void *) *((unsigned long int *)
                           (stackptr+TPFRA_OFFSET));
               is_a_stub = 1;
             }
@@ -198,7 +198,7 @@ __tpf_eh_return (void *target)
                /* Yes! They are in the same module.
                   Force copy of TPF private stack area to
                   destination stack frame TPF private area. */
-               destination_frame = (void *) *((unsigned long int *) 
+               destination_frame = (void *) *((unsigned long int *)
                    (*PREVIOUS_STACK_PTR() + R15_OFFSET));
 
                /* Copy TPF linkage area from current frame to
@@ -209,24 +209,24 @@ __tpf_eh_return (void *target)
                /* Now overlay the
                   real target address into the TPF stack area of
                   the target frame we are jumping to.  */
-               *((unsigned long int *) (destination_frame + 
+               *((unsigned long int *) (destination_frame +
                    TPFRA_OFFSET)) = (unsigned long int) target;
 
                /* Before returning the desired pat stub address to
-                  the exception handling unwinder so that it can 
-                  actually do the "leap" shift out the low order 
+                  the exception handling unwinder so that it can
+                  actually do the "leap" shift out the low order
                   bit designated to determine if we are in 64BIT mode.
                   This is necessary for CTOA stubs.
-                  Otherwise we leap one byte past where we want to 
+                  Otherwise we leap one byte past where we want to
                   go to in the TPF pat stub linkage code.  */
-               shifter = *((unsigned long int *) 
+               shifter = *((unsigned long int *)
                      (stackptr + RA_OFFSET));
 
                shifter &= ~1ul;
 
                /* Store Pat Stub Address in destination Stack Frame.  */
                *((unsigned long int *) (destination_frame +
-                   RA_OFFSET)) = shifter;               
+                   RA_OFFSET)) = shifter;
 
                /* Re-adjust pat stub address to go to correct place
                   in linkage.  */



More information about the Gcc-patches mailing list