Another movdf_hardfloat32 patch

David Edelsohn dje@watson.ibm.com
Tue Jun 22 21:15:00 GMT 1999


	Does this patch catch all of the non-offsettable yet non-indexed
cases which were causing the problem?  The indexed version of the code
should be avoided where it can -- including LO_SUM addresses.  A LO_SUM
address is required to be the equivalent of an offsettable address:
reg+const.

David

	* rs6000.md (movdf_hardfloat32): Revert previous patch.
	Handle LO_SUM the same as offsettable in cases 1 and 2.

Index: rs6000.md
===================================================================
RCS file: /egcs/carton/cvsfiles/egcs/gcc/config/rs6000/rs6000.md,v
retrieving revision 1.55.4.4
diff -c -p -r1.55.4.4 rs6000.md
*** rs6000.md	1999/06/22 00:59:45	1.55.4.4
--- rs6000.md	1999/06/23 04:03:46
***************
*** 6327,6333 ****
        else
  	return \"mr %0,%1\;mr %L0,%L1\";
      case 1:
!       if (offsettable_memref_p (operands[1]))
  	{
  	  /* If the low-address word is used in the address, we must load
  	     it last.  Otherwise, load it first.  Note that we cannot have
--- 6327,6335 ----
        else
  	return \"mr %0,%1\;mr %L0,%L1\";
      case 1:
!       if (offsettable_memref_p (operands[1])
! 	  || (GET_CODE (operands[1]) == MEM
! 	      && GET_CODE (XEXP (operands[1], 0)) == LO_SUM))
  	{
  	  /* If the low-address word is used in the address, we must load
  	     it last.  Otherwise, load it first.  Note that we cannot have
***************
*** 6353,6373 ****
  				 operands[1], 0))
  	    {
  	      output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	      output_asm_insn (\"{l%X1|lwz%X1} %L0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
! 	      return \"{l%X1|lwz%X1} %0,%1\";
  	    }
  	  else
  	    {
! 	      output_asm_insn (\"{l%X1|lwz%X1} %0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	      output_asm_insn (\"{l%X1|lwz%X1} %L0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
  	      return \"\";
  	    }
  	}
      case 2:
!       if (offsettable_memref_p (operands[0]))
  	return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
        else
  	{
--- 6355,6377 ----
  				 operands[1], 0))
  	    {
  	      output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	      output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
! 	      return \"{lx|lwzx} %0,%1\";
  	    }
  	  else
  	    {
! 	      output_asm_insn (\"{lx|lwzx} %0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	      output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
  	      output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
  	      return \"\";
  	    }
  	}
      case 2:
!       if (offsettable_memref_p (operands[0])
! 	  || (GET_CODE (operands[0]) == MEM
! 	      && GET_CODE (XEXP (operands[0], 0)) == LO_SUM))
  	return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
        else
  	{
***************
*** 6378,6386 ****
  	    abort ();
  
  	  addreg = find_addr_reg (XEXP (operands[0], 0));
! 	  output_asm_insn (\"{st%X0|stw%X0} %1,%0\", operands);
  	  output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	  output_asm_insn (\"{st%X0|stw%X0} %L1,%0\", operands);
  	  output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
  	  return \"\";
  	}
--- 6382,6390 ----
  	    abort ();
  
  	  addreg = find_addr_reg (XEXP (operands[0], 0));
! 	  output_asm_insn (\"{stx|stwx} %1,%0\", operands);
  	  output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
! 	  output_asm_insn (\"{stx|stwx} %L1,%0\", operands);
  	  output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
  	  return \"\";
  	}


More information about the Gcc-patches mailing list