[PATCH 18/30] Changes to mips
acsawdey@linux.ibm.com
acsawdey@linux.ibm.com
Tue Jun 25 20:30:00 GMT 2019
From: Aaron Sawdey <acsawdey@linux.ibm.com>
* config/mips/mips.c (mips_use_by_pieces_infrastructure_p):
Change movmem to cpymem.
* config/mips/mips.h: Change movmem to cpymem.
* config/mips/mips.md (movmemsi): Change name to cpymemsi.
---
gcc/config/mips/mips.c | 10 +++++-----
gcc/config/mips/mips.h | 10 +++++-----
gcc/config/mips/mips.md | 2 +-
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 0e1a68a..cbebb45 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -7938,15 +7938,15 @@ mips_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
{
if (op == STORE_BY_PIECES)
return mips_store_by_pieces_p (size, align);
- if (op == MOVE_BY_PIECES && HAVE_movmemsi)
+ if (op == MOVE_BY_PIECES && HAVE_cpymemsi)
{
- /* movmemsi is meant to generate code that is at least as good as
- move_by_pieces. However, movmemsi effectively uses a by-pieces
+ /* cpymemsi is meant to generate code that is at least as good as
+ move_by_pieces. However, cpymemsi effectively uses a by-pieces
implementation both for moves smaller than a word and for
word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
bytes. We should allow the tree-level optimisers to do such
moves by pieces, as it often exposes other optimization
- opportunities. We might as well continue to use movmemsi at
+ opportunities. We might as well continue to use cpymemsi at
the rtl level though, as it produces better code when
scheduling is disabled (such as at -O). */
if (currently_expanding_to_rtl)
@@ -8165,7 +8165,7 @@ mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
emit_insn (gen_nop ());
}
-/* Expand a movmemsi instruction, which copies LENGTH bytes from
+/* Expand a cpymemsi instruction, which copies LENGTH bytes from
memory reference SRC to memory reference DEST. */
bool
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index 953d82e..a5be7fa3 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -3099,12 +3099,12 @@ while (0)
#define MIPS_MIN_MOVE_MEM_ALIGN 16
/* The maximum number of bytes that can be copied by one iteration of
- a movmemsi loop; see mips_block_move_loop. */
+ a cpymemsi loop; see mips_block_move_loop. */
#define MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER \
(UNITS_PER_WORD * 4)
/* The maximum number of bytes that can be copied by a straight-line
- implementation of movmemsi; see mips_block_move_straight. We want
+ implementation of cpymemsi; see mips_block_move_straight. We want
to make sure that any loop-based implementation will iterate at
least twice. */
#define MIPS_MAX_MOVE_BYTES_STRAIGHT \
@@ -3119,11 +3119,11 @@ while (0)
#define MIPS_CALL_RATIO 8
-/* Any loop-based implementation of movmemsi will have at least
+/* Any loop-based implementation of cpymemsi will have at least
MIPS_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
moves, so allow individual copies of fewer elements.
- When movmemsi is not available, use a value approximating
+ When cpymemsi is not available, use a value approximating
the length of a memcpy call sequence, so that move_by_pieces
will generate inline code if it is shorter than a function call.
Since move_by_pieces_ninsns counts memory-to-memory moves, but
@@ -3131,7 +3131,7 @@ while (0)
value of MIPS_CALL_RATIO to take that into account. */
#define MOVE_RATIO(speed) \
- (HAVE_movmemsi \
+ (HAVE_cpymemsi \
? MIPS_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
: MIPS_CALL_RATIO / 2)
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index 2ae1f7e..d260cf9 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -5638,7 +5638,7 @@
;; Argument 2 is the length
;; Argument 3 is the alignment
-(define_expand "movmemsi"
+(define_expand "cpymemsi"
[(parallel [(set (match_operand:BLK 0 "general_operand")
(match_operand:BLK 1 "general_operand"))
(use (match_operand:SI 2 ""))
--
2.7.4
More information about the Gcc-patches
mailing list