]> gcc.gnu.org Git - gcc.git/blame - gcc/tree-vect-loop.c
Fix PR47002: memory leaks.
[gcc.git] / gcc / tree-vect-loop.c
CommitLineData
ebfd146a 1/* Loop Vectorization
c75c517d
SB
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
ebfd146a
IR
5 Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "basic-block.h"
cf835838
JM
30#include "tree-pretty-print.h"
31#include "gimple-pretty-print.h"
ebfd146a
IR
32#include "tree-flow.h"
33#include "tree-dump.h"
34#include "cfgloop.h"
35#include "cfglayout.h"
36#include "expr.h"
37#include "recog.h"
38#include "optabs.h"
39#include "params.h"
718f9c0f 40#include "diagnostic-core.h"
ebfd146a
IR
41#include "tree-chrec.h"
42#include "tree-scalar-evolution.h"
43#include "tree-vectorizer.h"
35e1a5e7 44#include "target.h"
ebfd146a
IR
45
46/* Loop Vectorization Pass.
47
b8698a0f 48 This pass tries to vectorize loops.
ebfd146a
IR
49
50 For example, the vectorizer transforms the following simple loop:
51
52 short a[N]; short b[N]; short c[N]; int i;
53
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
56 }
57
58 as if it was manually vectorized by rewriting the source code into:
59
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
64
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
70 }
71
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
ff802fa1 78 ("data-refs"). These two types of data require different handling both
ebfd146a
IR
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
83
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
89
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
94
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
ff802fa1 99 the loop that needs to be vectorized. It inserts the vector code sequence
ebfd146a
IR
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
ff802fa1 102 attached to S). This pointer will be used for the vectorization of following
ebfd146a
IR
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
105
106 For example, say stmt S1 was vectorized into stmt VS1:
107
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
111
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
ff802fa1 114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
ebfd146a
IR
115 resulting sequence would be:
116
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
121
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
124
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
26983c22
L
128 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
129 Targets that can support different sizes of vectors, for now will need
ff802fa1 130 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
26983c22 131 flexibility will be added in the future.
ebfd146a
IR
132
133 Since we only vectorize operations which vector form can be
134 expressed using existing tree codes, to verify that an operation is
135 supported, the vectorizer checks the relevant optab at the relevant
ff802fa1 136 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
ebfd146a
IR
137 the value found is CODE_FOR_nothing, then there's no target support, and
138 we can't vectorize the stmt.
139
140 For additional information on this project see:
141 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
142*/
143
144/* Function vect_determine_vectorization_factor
145
ff802fa1 146 Determine the vectorization factor (VF). VF is the number of data elements
ebfd146a 147 that are operated upon in parallel in a single iteration of the vectorized
ff802fa1 148 loop. For example, when vectorizing a loop that operates on 4byte elements,
ebfd146a
IR
149 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
150 elements can fit in a single vector register.
151
152 We currently support vectorization of loops in which all types operated upon
ff802fa1 153 are of the same size. Therefore this function currently sets VF according to
ebfd146a
IR
154 the size of the types operated upon, and fails if there are multiple sizes
155 in the loop.
156
157 VF is also the factor by which the loop iterations are strip-mined, e.g.:
158 original loop:
159 for (i=0; i<N; i++){
160 a[i] = b[i] + c[i];
161 }
162
163 vectorized loop:
164 for (i=0; i<N; i+=VF){
165 a[i:VF] = b[i:VF] + c[i:VF];
166 }
167*/
168
169static bool
170vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
171{
172 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
173 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
174 int nbbs = loop->num_nodes;
175 gimple_stmt_iterator si;
176 unsigned int vectorization_factor = 0;
177 tree scalar_type;
178 gimple phi;
179 tree vectype;
180 unsigned int nunits;
181 stmt_vec_info stmt_info;
182 int i;
183 HOST_WIDE_INT dummy;
184
185 if (vect_print_dump_info (REPORT_DETAILS))
186 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
187
188 for (i = 0; i < nbbs; i++)
189 {
190 basic_block bb = bbs[i];
191
192 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
193 {
194 phi = gsi_stmt (si);
195 stmt_info = vinfo_for_stmt (phi);
196 if (vect_print_dump_info (REPORT_DETAILS))
197 {
198 fprintf (vect_dump, "==> examining phi: ");
199 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
200 }
201
202 gcc_assert (stmt_info);
203
204 if (STMT_VINFO_RELEVANT_P (stmt_info))
205 {
206 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
207 scalar_type = TREE_TYPE (PHI_RESULT (phi));
208
209 if (vect_print_dump_info (REPORT_DETAILS))
210 {
211 fprintf (vect_dump, "get vectype for scalar type: ");
212 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
213 }
214
215 vectype = get_vectype_for_scalar_type (scalar_type);
216 if (!vectype)
217 {
8644a673 218 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
219 {
220 fprintf (vect_dump,
221 "not vectorized: unsupported data-type ");
222 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
223 }
224 return false;
225 }
226 STMT_VINFO_VECTYPE (stmt_info) = vectype;
227
228 if (vect_print_dump_info (REPORT_DETAILS))
229 {
230 fprintf (vect_dump, "vectype: ");
231 print_generic_expr (vect_dump, vectype, TDF_SLIM);
232 }
233
234 nunits = TYPE_VECTOR_SUBPARTS (vectype);
235 if (vect_print_dump_info (REPORT_DETAILS))
236 fprintf (vect_dump, "nunits = %d", nunits);
237
238 if (!vectorization_factor
239 || (nunits > vectorization_factor))
240 vectorization_factor = nunits;
241 }
242 }
243
244 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
245 {
b690cc0f 246 tree vf_vectype;
ebfd146a
IR
247 gimple stmt = gsi_stmt (si);
248 stmt_info = vinfo_for_stmt (stmt);
249
250 if (vect_print_dump_info (REPORT_DETAILS))
251 {
252 fprintf (vect_dump, "==> examining statement: ");
253 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
254 }
255
256 gcc_assert (stmt_info);
257
258 /* skip stmts which do not need to be vectorized. */
259 if (!STMT_VINFO_RELEVANT_P (stmt_info)
260 && !STMT_VINFO_LIVE_P (stmt_info))
261 {
262 if (vect_print_dump_info (REPORT_DETAILS))
263 fprintf (vect_dump, "skip.");
264 continue;
265 }
266
267 if (gimple_get_lhs (stmt) == NULL_TREE)
268 {
8644a673 269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
270 {
271 fprintf (vect_dump, "not vectorized: irregular stmt.");
272 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
273 }
274 return false;
275 }
276
277 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
278 {
8644a673 279 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
280 {
281 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
282 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
283 }
284 return false;
285 }
286
287 if (STMT_VINFO_VECTYPE (stmt_info))
288 {
b8698a0f 289 /* The only case when a vectype had been already set is for stmts
ebfd146a
IR
290 that contain a dataref, or for "pattern-stmts" (stmts generated
291 by the vectorizer to represent/replace a certain idiom). */
b8698a0f 292 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
ebfd146a
IR
293 || is_pattern_stmt_p (stmt_info));
294 vectype = STMT_VINFO_VECTYPE (stmt_info);
295 }
296 else
297 {
06066f92 298 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
ebfd146a
IR
299 && !is_pattern_stmt_p (stmt_info));
300
b690cc0f 301 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
ebfd146a
IR
302 if (vect_print_dump_info (REPORT_DETAILS))
303 {
304 fprintf (vect_dump, "get vectype for scalar type: ");
305 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
306 }
ebfd146a
IR
307 vectype = get_vectype_for_scalar_type (scalar_type);
308 if (!vectype)
309 {
8644a673 310 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a 311 {
b8698a0f 312 fprintf (vect_dump,
ebfd146a
IR
313 "not vectorized: unsupported data-type ");
314 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
315 }
316 return false;
317 }
b690cc0f 318
ebfd146a
IR
319 STMT_VINFO_VECTYPE (stmt_info) = vectype;
320 }
321
b690cc0f
RG
322 /* The vectorization factor is according to the smallest
323 scalar type (or the largest vector size, but we only
324 support one vector size per loop). */
325 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
326 &dummy);
327 if (vect_print_dump_info (REPORT_DETAILS))
328 {
329 fprintf (vect_dump, "get vectype for scalar type: ");
330 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
331 }
332 vf_vectype = get_vectype_for_scalar_type (scalar_type);
333 if (!vf_vectype)
334 {
335 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
336 {
337 fprintf (vect_dump,
338 "not vectorized: unsupported data-type ");
339 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
340 }
341 return false;
342 }
343
344 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
345 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
346 {
347 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
348 {
349 fprintf (vect_dump,
350 "not vectorized: different sized vector "
351 "types in statement, ");
352 print_generic_expr (vect_dump, vectype, TDF_SLIM);
353 fprintf (vect_dump, " and ");
354 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
355 }
356 return false;
357 }
358
ebfd146a
IR
359 if (vect_print_dump_info (REPORT_DETAILS))
360 {
361 fprintf (vect_dump, "vectype: ");
b690cc0f 362 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
ebfd146a
IR
363 }
364
b690cc0f 365 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
ebfd146a
IR
366 if (vect_print_dump_info (REPORT_DETAILS))
367 fprintf (vect_dump, "nunits = %d", nunits);
368
369 if (!vectorization_factor
370 || (nunits > vectorization_factor))
371 vectorization_factor = nunits;
ebfd146a
IR
372 }
373 }
374
375 /* TODO: Analyze cost. Decide if worth while to vectorize. */
376 if (vect_print_dump_info (REPORT_DETAILS))
377 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
378 if (vectorization_factor <= 1)
379 {
8644a673 380 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
381 fprintf (vect_dump, "not vectorized: unsupported data-type");
382 return false;
383 }
384 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
385
386 return true;
387}
388
389
390/* Function vect_is_simple_iv_evolution.
391
392 FORNOW: A simple evolution of an induction variables in the loop is
393 considered a polynomial evolution with constant step. */
394
395static bool
396vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
397 tree * step)
398{
399 tree init_expr;
400 tree step_expr;
401 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
402
403 /* When there is no evolution in this loop, the evolution function
404 is not "simple". */
405 if (evolution_part == NULL_TREE)
406 return false;
407
408 /* When the evolution is a polynomial of degree >= 2
409 the evolution function is not "simple". */
410 if (tree_is_chrec (evolution_part))
411 return false;
412
413 step_expr = evolution_part;
414 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
415
416 if (vect_print_dump_info (REPORT_DETAILS))
417 {
418 fprintf (vect_dump, "step: ");
419 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
420 fprintf (vect_dump, ", init: ");
421 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
422 }
423
424 *init = init_expr;
425 *step = step_expr;
426
427 if (TREE_CODE (step_expr) != INTEGER_CST)
428 {
429 if (vect_print_dump_info (REPORT_DETAILS))
430 fprintf (vect_dump, "step unknown.");
431 return false;
432 }
433
434 return true;
435}
436
437/* Function vect_analyze_scalar_cycles_1.
438
439 Examine the cross iteration def-use cycles of scalar variables
ff802fa1 440 in LOOP. LOOP_VINFO represents the loop that is now being
ebfd146a
IR
441 considered for vectorization (can be LOOP, or an outer-loop
442 enclosing LOOP). */
443
444static void
445vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
446{
447 basic_block bb = loop->header;
448 tree dumy;
449 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
450 gimple_stmt_iterator gsi;
06066f92 451 bool double_reduc;
ebfd146a
IR
452
453 if (vect_print_dump_info (REPORT_DETAILS))
454 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
455
ff802fa1 456 /* First - identify all inductions. Reduction detection assumes that all the
b8698a0f 457 inductions have been identified, therefore, this order must not be
7c5222ff 458 changed. */
ebfd146a
IR
459 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
460 {
461 gimple phi = gsi_stmt (gsi);
462 tree access_fn = NULL;
463 tree def = PHI_RESULT (phi);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
465
466 if (vect_print_dump_info (REPORT_DETAILS))
467 {
468 fprintf (vect_dump, "Analyze phi: ");
469 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
470 }
471
ff802fa1 472 /* Skip virtual phi's. The data dependences that are associated with
ebfd146a
IR
473 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
474 if (!is_gimple_reg (SSA_NAME_VAR (def)))
475 continue;
476
477 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
478
479 /* Analyze the evolution function. */
480 access_fn = analyze_scalar_evolution (loop, def);
b2087e8d
RG
481 if (access_fn)
482 STRIP_NOPS (access_fn);
ebfd146a
IR
483 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
484 {
485 fprintf (vect_dump, "Access function of PHI: ");
486 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
487 }
488
489 if (!access_fn
b8698a0f 490 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
ebfd146a 491 {
b8698a0f 492 VEC_safe_push (gimple, heap, worklist, phi);
ebfd146a
IR
493 continue;
494 }
495
496 if (vect_print_dump_info (REPORT_DETAILS))
497 fprintf (vect_dump, "Detected induction.");
498 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
499 }
500
501
7c5222ff 502 /* Second - identify all reductions and nested cycles. */
ebfd146a
IR
503 while (VEC_length (gimple, worklist) > 0)
504 {
505 gimple phi = VEC_pop (gimple, worklist);
506 tree def = PHI_RESULT (phi);
507 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
508 gimple reduc_stmt;
7c5222ff 509 bool nested_cycle;
ebfd146a
IR
510
511 if (vect_print_dump_info (REPORT_DETAILS))
b8698a0f 512 {
ebfd146a
IR
513 fprintf (vect_dump, "Analyze phi: ");
514 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
515 }
516
517 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
518 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
519
7c5222ff 520 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
8a9ecffd
MM
521 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
522 &double_reduc);
ebfd146a
IR
523 if (reduc_stmt)
524 {
06066f92 525 if (double_reduc)
7c5222ff
IR
526 {
527 if (vect_print_dump_info (REPORT_DETAILS))
06066f92 528 fprintf (vect_dump, "Detected double reduction.");
7c5222ff 529
06066f92 530 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
7c5222ff 531 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
06066f92 532 vect_double_reduction_def;
7c5222ff 533 }
b8698a0f 534 else
7c5222ff 535 {
06066f92
IR
536 if (nested_cycle)
537 {
538 if (vect_print_dump_info (REPORT_DETAILS))
539 fprintf (vect_dump, "Detected vectorizable nested cycle.");
7c5222ff 540
06066f92
IR
541 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
542 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
543 vect_nested_cycle;
544 }
545 else
546 {
547 if (vect_print_dump_info (REPORT_DETAILS))
548 fprintf (vect_dump, "Detected reduction.");
549
550 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
551 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
552 vect_reduction_def;
b5aeb3bb
IR
553 /* Store the reduction cycles for possible vectorization in
554 loop-aware SLP. */
555 VEC_safe_push (gimple, heap,
556 LOOP_VINFO_REDUCTIONS (loop_vinfo),
557 reduc_stmt);
06066f92 558 }
7c5222ff 559 }
ebfd146a
IR
560 }
561 else
562 if (vect_print_dump_info (REPORT_DETAILS))
563 fprintf (vect_dump, "Unknown def-use cycle pattern.");
564 }
565
566 VEC_free (gimple, heap, worklist);
ebfd146a
IR
567}
568
569
570/* Function vect_analyze_scalar_cycles.
571
572 Examine the cross iteration def-use cycles of scalar variables, by
ff802fa1 573 analyzing the loop-header PHIs of scalar variables. Classify each
ebfd146a
IR
574 cycle as one of the following: invariant, induction, reduction, unknown.
575 We do that for the loop represented by LOOP_VINFO, and also to its
576 inner-loop, if exists.
577 Examples for scalar cycles:
578
579 Example1: reduction:
580
581 loop1:
582 for (i=0; i<N; i++)
583 sum += a[i];
584
585 Example2: induction:
586
587 loop2:
588 for (i=0; i<N; i++)
589 a[i] = i; */
590
591static void
592vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
593{
594 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
595
596 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
597
598 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
599 Reductions in such inner-loop therefore have different properties than
600 the reductions in the nest that gets vectorized:
601 1. When vectorized, they are executed in the same order as in the original
602 scalar loop, so we can't change the order of computation when
603 vectorizing them.
b8698a0f 604 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
ebfd146a
IR
605 current checks are too strict. */
606
607 if (loop->inner)
608 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
609}
610
ebfd146a
IR
611/* Function vect_get_loop_niters.
612
613 Determine how many iterations the loop is executed.
614 If an expression that represents the number of iterations
615 can be constructed, place it in NUMBER_OF_ITERATIONS.
616 Return the loop exit condition. */
617
618static gimple
619vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
620{
621 tree niters;
622
623 if (vect_print_dump_info (REPORT_DETAILS))
624 fprintf (vect_dump, "=== get_loop_niters ===");
625
626 niters = number_of_exit_cond_executions (loop);
627
628 if (niters != NULL_TREE
629 && niters != chrec_dont_know)
630 {
631 *number_of_iterations = niters;
632
633 if (vect_print_dump_info (REPORT_DETAILS))
8644a673
IR
634 {
635 fprintf (vect_dump, "==> get_loop_niters:" );
636 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
637 }
ebfd146a
IR
638 }
639
640 return get_loop_exit_condition (loop);
641}
642
643
644/* Function bb_in_loop_p
645
646 Used as predicate for dfs order traversal of the loop bbs. */
647
648static bool
649bb_in_loop_p (const_basic_block bb, const void *data)
650{
651 const struct loop *const loop = (const struct loop *)data;
652 if (flow_bb_inside_loop_p (loop, bb))
653 return true;
654 return false;
655}
656
657
658/* Function new_loop_vec_info.
659
660 Create and initialize a new loop_vec_info struct for LOOP, as well as
661 stmt_vec_info structs for all the stmts in LOOP. */
662
663static loop_vec_info
664new_loop_vec_info (struct loop *loop)
665{
666 loop_vec_info res;
667 basic_block *bbs;
668 gimple_stmt_iterator si;
669 unsigned int i, nbbs;
670
671 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
672 LOOP_VINFO_LOOP (res) = loop;
673
674 bbs = get_loop_body (loop);
675
676 /* Create/Update stmt_info for all stmts in the loop. */
677 for (i = 0; i < loop->num_nodes; i++)
678 {
679 basic_block bb = bbs[i];
680
681 /* BBs in a nested inner-loop will have been already processed (because
682 we will have called vect_analyze_loop_form for any nested inner-loop).
683 Therefore, for stmts in an inner-loop we just want to update the
684 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
685 loop_info of the outer-loop we are currently considering to vectorize
686 (instead of the loop_info of the inner-loop).
687 For stmts in other BBs we need to create a stmt_info from scratch. */
688 if (bb->loop_father != loop)
689 {
690 /* Inner-loop bb. */
691 gcc_assert (loop->inner && bb->loop_father == loop->inner);
692 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
693 {
694 gimple phi = gsi_stmt (si);
695 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
696 loop_vec_info inner_loop_vinfo =
697 STMT_VINFO_LOOP_VINFO (stmt_info);
698 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
699 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
700 }
701 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
702 {
703 gimple stmt = gsi_stmt (si);
704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
705 loop_vec_info inner_loop_vinfo =
706 STMT_VINFO_LOOP_VINFO (stmt_info);
707 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
708 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
709 }
710 }
711 else
712 {
713 /* bb in current nest. */
714 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
715 {
716 gimple phi = gsi_stmt (si);
717 gimple_set_uid (phi, 0);
a70d6342 718 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
ebfd146a
IR
719 }
720
721 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
722 {
723 gimple stmt = gsi_stmt (si);
724 gimple_set_uid (stmt, 0);
a70d6342 725 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
ebfd146a
IR
726 }
727 }
728 }
729
730 /* CHECKME: We want to visit all BBs before their successors (except for
731 latch blocks, for which this assertion wouldn't hold). In the simple
732 case of the loop forms we allow, a dfs order of the BBs would the same
733 as reversed postorder traversal, so we are safe. */
734
735 free (bbs);
736 bbs = XCNEWVEC (basic_block, loop->num_nodes);
737 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
738 bbs, loop->num_nodes, loop);
739 gcc_assert (nbbs == loop->num_nodes);
740
741 LOOP_VINFO_BBS (res) = bbs;
742 LOOP_VINFO_NITERS (res) = NULL;
743 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
744 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
745 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
746 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
747 LOOP_VINFO_VECT_FACTOR (res) = 0;
01be8516 748 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
ebfd146a
IR
749 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
750 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
751 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
752 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
753 VEC_alloc (gimple, heap,
754 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
755 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
756 VEC_alloc (ddr_p, heap,
757 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
758 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
b5aeb3bb 759 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
ebfd146a
IR
760 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
761 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
720f5239 762 LOOP_VINFO_PEELING_HTAB (res) = NULL;
ebfd146a
IR
763
764 return res;
765}
766
767
768/* Function destroy_loop_vec_info.
769
770 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
771 stmts in the loop. */
772
773void
774destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
775{
776 struct loop *loop;
777 basic_block *bbs;
778 int nbbs;
779 gimple_stmt_iterator si;
780 int j;
781 VEC (slp_instance, heap) *slp_instances;
782 slp_instance instance;
783
784 if (!loop_vinfo)
785 return;
786
787 loop = LOOP_VINFO_LOOP (loop_vinfo);
788
789 bbs = LOOP_VINFO_BBS (loop_vinfo);
790 nbbs = loop->num_nodes;
791
792 if (!clean_stmts)
793 {
794 free (LOOP_VINFO_BBS (loop_vinfo));
795 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
796 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
01be8516 797 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
ebfd146a 798 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
01be8516 799 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
ebfd146a
IR
800
801 free (loop_vinfo);
802 loop->aux = NULL;
803 return;
804 }
805
806 for (j = 0; j < nbbs; j++)
807 {
808 basic_block bb = bbs[j];
809 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
810 free_stmt_vec_info (gsi_stmt (si));
811
812 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
813 {
814 gimple stmt = gsi_stmt (si);
815 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
816
817 if (stmt_info)
818 {
819 /* Check if this is a "pattern stmt" (introduced by the
820 vectorizer during the pattern recognition pass). */
821 bool remove_stmt_p = false;
822 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
823 if (orig_stmt)
824 {
825 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
826 if (orig_stmt_info
827 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
828 remove_stmt_p = true;
829 }
830
831 /* Free stmt_vec_info. */
832 free_stmt_vec_info (stmt);
833
834 /* Remove dead "pattern stmts". */
835 if (remove_stmt_p)
836 gsi_remove (&si, true);
837 }
838 gsi_next (&si);
839 }
840 }
841
842 free (LOOP_VINFO_BBS (loop_vinfo));
843 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
844 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
01be8516 845 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
ebfd146a
IR
846 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
847 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
848 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
ac47786e 849 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
ebfd146a
IR
850 vect_free_slp_instance (instance);
851
852 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
853 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
b5aeb3bb 854 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
ebfd146a 855
720f5239
IR
856 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
857 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
858
ebfd146a
IR
859 free (loop_vinfo);
860 loop->aux = NULL;
861}
862
863
864/* Function vect_analyze_loop_1.
865
866 Apply a set of analyses on LOOP, and create a loop_vec_info struct
867 for it. The different analyses will record information in the
868 loop_vec_info struct. This is a subset of the analyses applied in
869 vect_analyze_loop, to be applied on an inner-loop nested in the loop
870 that is now considered for (outer-loop) vectorization. */
871
872static loop_vec_info
873vect_analyze_loop_1 (struct loop *loop)
874{
875 loop_vec_info loop_vinfo;
876
877 if (vect_print_dump_info (REPORT_DETAILS))
878 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
879
880 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
881
882 loop_vinfo = vect_analyze_loop_form (loop);
883 if (!loop_vinfo)
884 {
885 if (vect_print_dump_info (REPORT_DETAILS))
886 fprintf (vect_dump, "bad inner-loop form.");
887 return NULL;
888 }
889
890 return loop_vinfo;
891}
892
893
894/* Function vect_analyze_loop_form.
895
896 Verify that certain CFG restrictions hold, including:
897 - the loop has a pre-header
898 - the loop has a single entry and exit
899 - the loop exit condition is simple enough, and the number of iterations
900 can be analyzed (a countable loop). */
901
902loop_vec_info
903vect_analyze_loop_form (struct loop *loop)
904{
905 loop_vec_info loop_vinfo;
906 gimple loop_cond;
907 tree number_of_iterations = NULL;
908 loop_vec_info inner_loop_vinfo = NULL;
909
910 if (vect_print_dump_info (REPORT_DETAILS))
911 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
912
913 /* Different restrictions apply when we are considering an inner-most loop,
b8698a0f 914 vs. an outer (nested) loop.
ebfd146a
IR
915 (FORNOW. May want to relax some of these restrictions in the future). */
916
917 if (!loop->inner)
918 {
b8698a0f
L
919 /* Inner-most loop. We currently require that the number of BBs is
920 exactly 2 (the header and latch). Vectorizable inner-most loops
ebfd146a
IR
921 look like this:
922
923 (pre-header)
924 |
925 header <--------+
926 | | |
927 | +--> latch --+
928 |
929 (exit-bb) */
930
931 if (loop->num_nodes != 2)
932 {
933 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
e9dbe7bb 934 fprintf (vect_dump, "not vectorized: control flow in loop.");
ebfd146a
IR
935 return NULL;
936 }
937
938 if (empty_block_p (loop->header))
939 {
940 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
941 fprintf (vect_dump, "not vectorized: empty loop.");
942 return NULL;
943 }
944 }
945 else
946 {
947 struct loop *innerloop = loop->inner;
0f900dfa 948 edge entryedge;
ebfd146a
IR
949
950 /* Nested loop. We currently require that the loop is doubly-nested,
b8698a0f 951 contains a single inner loop, and the number of BBs is exactly 5.
ebfd146a
IR
952 Vectorizable outer-loops look like this:
953
954 (pre-header)
955 |
956 header <---+
957 | |
958 inner-loop |
959 | |
960 tail ------+
b8698a0f 961 |
ebfd146a
IR
962 (exit-bb)
963
964 The inner-loop has the properties expected of inner-most loops
965 as described above. */
966
967 if ((loop->inner)->inner || (loop->inner)->next)
968 {
969 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
970 fprintf (vect_dump, "not vectorized: multiple nested loops.");
971 return NULL;
972 }
973
974 /* Analyze the inner-loop. */
975 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
976 if (!inner_loop_vinfo)
977 {
978 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
979 fprintf (vect_dump, "not vectorized: Bad inner loop.");
980 return NULL;
981 }
982
983 if (!expr_invariant_in_loop_p (loop,
984 LOOP_VINFO_NITERS (inner_loop_vinfo)))
985 {
986 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
987 fprintf (vect_dump,
988 "not vectorized: inner-loop count not invariant.");
989 destroy_loop_vec_info (inner_loop_vinfo, true);
990 return NULL;
991 }
992
b8698a0f 993 if (loop->num_nodes != 5)
ebfd146a
IR
994 {
995 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
e9dbe7bb 996 fprintf (vect_dump, "not vectorized: control flow in loop.");
ebfd146a
IR
997 destroy_loop_vec_info (inner_loop_vinfo, true);
998 return NULL;
999 }
1000
1001 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
ebfd146a
IR
1002 entryedge = EDGE_PRED (innerloop->header, 0);
1003 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
0f900dfa 1004 entryedge = EDGE_PRED (innerloop->header, 1);
b8698a0f 1005
ebfd146a
IR
1006 if (entryedge->src != loop->header
1007 || !single_exit (innerloop)
1008 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1009 {
1010 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1011 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1012 destroy_loop_vec_info (inner_loop_vinfo, true);
1013 return NULL;
1014 }
1015
1016 if (vect_print_dump_info (REPORT_DETAILS))
1017 fprintf (vect_dump, "Considering outer-loop vectorization.");
1018 }
b8698a0f
L
1019
1020 if (!single_exit (loop)
ebfd146a
IR
1021 || EDGE_COUNT (loop->header->preds) != 2)
1022 {
1023 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1024 {
1025 if (!single_exit (loop))
1026 fprintf (vect_dump, "not vectorized: multiple exits.");
1027 else if (EDGE_COUNT (loop->header->preds) != 2)
1028 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1029 }
1030 if (inner_loop_vinfo)
1031 destroy_loop_vec_info (inner_loop_vinfo, true);
1032 return NULL;
1033 }
1034
1035 /* We assume that the loop exit condition is at the end of the loop. i.e,
1036 that the loop is represented as a do-while (with a proper if-guard
1037 before the loop if needed), where the loop header contains all the
1038 executable statements, and the latch is empty. */
1039 if (!empty_block_p (loop->latch)
8eacd016 1040 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
ebfd146a
IR
1041 {
1042 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1043 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1044 if (inner_loop_vinfo)
1045 destroy_loop_vec_info (inner_loop_vinfo, true);
1046 return NULL;
1047 }
1048
1049 /* Make sure there exists a single-predecessor exit bb: */
1050 if (!single_pred_p (single_exit (loop)->dest))
1051 {
1052 edge e = single_exit (loop);
1053 if (!(e->flags & EDGE_ABNORMAL))
1054 {
1055 split_loop_exit_edge (e);
1056 if (vect_print_dump_info (REPORT_DETAILS))
1057 fprintf (vect_dump, "split exit edge.");
1058 }
1059 else
1060 {
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1063 if (inner_loop_vinfo)
1064 destroy_loop_vec_info (inner_loop_vinfo, true);
1065 return NULL;
1066 }
1067 }
1068
1069 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1070 if (!loop_cond)
1071 {
1072 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1073 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1074 if (inner_loop_vinfo)
1075 destroy_loop_vec_info (inner_loop_vinfo, true);
1076 return NULL;
1077 }
b8698a0f
L
1078
1079 if (!number_of_iterations)
ebfd146a
IR
1080 {
1081 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
b8698a0f 1082 fprintf (vect_dump,
ebfd146a
IR
1083 "not vectorized: number of iterations cannot be computed.");
1084 if (inner_loop_vinfo)
1085 destroy_loop_vec_info (inner_loop_vinfo, true);
1086 return NULL;
1087 }
1088
1089 if (chrec_contains_undetermined (number_of_iterations))
1090 {
1091 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1092 fprintf (vect_dump, "Infinite number of iterations.");
1093 if (inner_loop_vinfo)
1094 destroy_loop_vec_info (inner_loop_vinfo, true);
1095 return NULL;
1096 }
1097
1098 if (!NITERS_KNOWN_P (number_of_iterations))
1099 {
1100 if (vect_print_dump_info (REPORT_DETAILS))
1101 {
1102 fprintf (vect_dump, "Symbolic number of iterations is ");
1103 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1104 }
1105 }
1106 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1107 {
8644a673 1108 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
ebfd146a
IR
1109 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1110 if (inner_loop_vinfo)
1111 destroy_loop_vec_info (inner_loop_vinfo, false);
1112 return NULL;
1113 }
1114
1115 loop_vinfo = new_loop_vec_info (loop);
1116 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1117 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1118
1119 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1120
1121 /* CHECKME: May want to keep it around it in the future. */
1122 if (inner_loop_vinfo)
1123 destroy_loop_vec_info (inner_loop_vinfo, false);
1124
1125 gcc_assert (!loop->aux);
1126 loop->aux = loop_vinfo;
1127 return loop_vinfo;
1128}
1129
8644a673 1130
35e1a5e7
IR
1131/* Get cost by calling cost target builtin. */
1132
ff802fa1
IR
1133static inline int
1134vect_get_cost (enum vect_cost_for_stmt type_of_cost)
35e1a5e7 1135{
720f5239
IR
1136 tree dummy_type = NULL;
1137 int dummy = 0;
1138
1139 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1140 dummy_type, dummy);
35e1a5e7
IR
1141}
1142
1143
8644a673
IR
1144/* Function vect_analyze_loop_operations.
1145
1146 Scan the loop stmts and make sure they are all vectorizable. */
1147
1148static bool
1149vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1150{
1151 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1152 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1153 int nbbs = loop->num_nodes;
1154 gimple_stmt_iterator si;
1155 unsigned int vectorization_factor = 0;
1156 int i;
1157 gimple phi;
1158 stmt_vec_info stmt_info;
1159 bool need_to_vectorize = false;
1160 int min_profitable_iters;
1161 int min_scalar_loop_bound;
1162 unsigned int th;
1163 bool only_slp_in_loop = true, ok;
1164
1165 if (vect_print_dump_info (REPORT_DETAILS))
1166 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1167
1168 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1169 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1170
1171 for (i = 0; i < nbbs; i++)
1172 {
1173 basic_block bb = bbs[i];
1174
1175 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1176 {
1177 phi = gsi_stmt (si);
1178 ok = true;
1179
1180 stmt_info = vinfo_for_stmt (phi);
1181 if (vect_print_dump_info (REPORT_DETAILS))
1182 {
1183 fprintf (vect_dump, "examining phi: ");
1184 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1185 }
1186
1187 if (! is_loop_header_bb_p (bb))
1188 {
1189 /* inner-loop loop-closed exit phi in outer-loop vectorization
1190 (i.e. a phi in the tail of the outer-loop).
1191 FORNOW: we currently don't support the case that these phis
06066f92 1192 are not used in the outerloop (unless it is double reduction,
b8698a0f 1193 i.e., this phi is vect_reduction_def), cause this case
06066f92
IR
1194 requires to actually do something here. */
1195 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1196 || STMT_VINFO_LIVE_P (stmt_info))
b8698a0f 1197 && STMT_VINFO_DEF_TYPE (stmt_info)
06066f92 1198 != vect_double_reduction_def)
8644a673
IR
1199 {
1200 if (vect_print_dump_info (REPORT_DETAILS))
1201 fprintf (vect_dump,
1202 "Unsupported loop-closed phi in outer-loop.");
1203 return false;
1204 }
1205 continue;
1206 }
1207
1208 gcc_assert (stmt_info);
1209
1210 if (STMT_VINFO_LIVE_P (stmt_info))
1211 {
1212 /* FORNOW: not yet supported. */
1213 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1214 fprintf (vect_dump, "not vectorized: value used after loop.");
1215 return false;
1216 }
1217
1218 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1219 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1220 {
1221 /* A scalar-dependence cycle that we don't support. */
1222 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1223 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1224 return false;
1225 }
1226
1227 if (STMT_VINFO_RELEVANT_P (stmt_info))
1228 {
1229 need_to_vectorize = true;
1230 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1231 ok = vectorizable_induction (phi, NULL, NULL);
1232 }
1233
1234 if (!ok)
1235 {
1236 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1237 {
1238 fprintf (vect_dump,
1239 "not vectorized: relevant phi not supported: ");
1240 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1241 }
1242 return false;
1243 }
1244 }
1245
1246 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1247 {
1248 gimple stmt = gsi_stmt (si);
1249 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1250
1251 gcc_assert (stmt_info);
1252
a70d6342 1253 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
8644a673
IR
1254 return false;
1255
99f51320
IR
1256 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1257 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1258 && !PURE_SLP_STMT (stmt_info))
8644a673
IR
1259 /* STMT needs both SLP and loop-based vectorization. */
1260 only_slp_in_loop = false;
b8698a0f 1261 }
8644a673
IR
1262 } /* bbs */
1263
1264 /* All operations in the loop are either irrelevant (deal with loop
1265 control, or dead), or only used outside the loop and can be moved
1266 out of the loop (e.g. invariants, inductions). The loop can be
1267 optimized away by scalar optimizations. We're better off not
1268 touching this loop. */
1269 if (!need_to_vectorize)
1270 {
1271 if (vect_print_dump_info (REPORT_DETAILS))
1272 fprintf (vect_dump,
1273 "All the computation can be taken out of the loop.");
1274 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1275 fprintf (vect_dump,
1276 "not vectorized: redundant loop. no profit to vectorize.");
1277 return false;
1278 }
1279
1280 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1281 vectorization factor of the loop is the unrolling factor required by the
1282 SLP instances. If that unrolling factor is 1, we say, that we perform
1283 pure SLP on loop - cross iteration parallelism is not exploited. */
1284 if (only_slp_in_loop)
1285 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1286 else
1287 vectorization_factor = least_common_multiple (vectorization_factor,
1288 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1289
1290 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1291
1292 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1293 && vect_print_dump_info (REPORT_DETAILS))
1294 fprintf (vect_dump,
1295 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1296 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1297
1298 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1299 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1300 {
1301 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1302 fprintf (vect_dump, "not vectorized: iteration count too small.");
1303 if (vect_print_dump_info (REPORT_DETAILS))
1304 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1305 "vectorization factor.");
1306 return false;
1307 }
1308
ff802fa1 1309 /* Analyze cost. Decide if worth while to vectorize. */
8644a673
IR
1310
1311 /* Once VF is set, SLP costs should be updated since the number of created
1312 vector stmts depends on VF. */
1313 vect_update_slp_costs_according_to_vf (loop_vinfo);
1314
1315 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1316 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1317
1318 if (min_profitable_iters < 0)
1319 {
1320 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1321 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1322 if (vect_print_dump_info (REPORT_DETAILS))
1323 fprintf (vect_dump, "not vectorized: vector version will never be "
1324 "profitable.");
1325 return false;
1326 }
1327
1328 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1329 * vectorization_factor) - 1);
1330
1331 /* Use the cost model only if it is more conservative than user specified
1332 threshold. */
1333
1334 th = (unsigned) min_scalar_loop_bound;
1335 if (min_profitable_iters
1336 && (!min_scalar_loop_bound
1337 || min_profitable_iters > min_scalar_loop_bound))
1338 th = (unsigned) min_profitable_iters;
1339
1340 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1341 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1342 {
1343 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1344 fprintf (vect_dump, "not vectorized: vectorization not "
1345 "profitable.");
1346 if (vect_print_dump_info (REPORT_DETAILS))
1347 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1348 "user specified loop bound parameter or minimum "
1349 "profitable iterations (whichever is more conservative).");
1350 return false;
1351 }
1352
1353 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1354 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1355 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1356 {
1357 if (vect_print_dump_info (REPORT_DETAILS))
1358 fprintf (vect_dump, "epilog loop required.");
1359 if (!vect_can_advance_ivs_p (loop_vinfo))
1360 {
1361 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1362 fprintf (vect_dump,
1363 "not vectorized: can't create epilog loop 1.");
1364 return false;
1365 }
1366 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1367 {
1368 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1369 fprintf (vect_dump,
1370 "not vectorized: can't create epilog loop 2.");
1371 return false;
1372 }
1373 }
1374
1375 return true;
1376}
1377
1378
bb67d9c7 1379/* Function vect_analyze_loop_2.
ebfd146a
IR
1380
1381 Apply a set of analyses on LOOP, and create a loop_vec_info struct
ff802fa1 1382 for it. The different analyses will record information in the
ebfd146a 1383 loop_vec_info struct. */
bb67d9c7
RG
1384static bool
1385vect_analyze_loop_2 (loop_vec_info loop_vinfo)
ebfd146a 1386{
e4a707c4 1387 bool ok, dummy;
777e1f09
RG
1388 int max_vf = MAX_VECTORIZATION_FACTOR;
1389 int min_vf = 2;
ebfd146a 1390
ebfd146a 1391 /* Find all data references in the loop (which correspond to vdefs/vuses)
777e1f09
RG
1392 and analyze their evolution in the loop. Also adjust the minimal
1393 vectorization factor according to the loads and stores.
ebfd146a
IR
1394
1395 FORNOW: Handle only simple, array references, which
1396 alignment can be forced, and aligned pointer-references. */
1397
777e1f09 1398 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
ebfd146a
IR
1399 if (!ok)
1400 {
1401 if (vect_print_dump_info (REPORT_DETAILS))
1402 fprintf (vect_dump, "bad data references.");
bb67d9c7 1403 return false;
ebfd146a
IR
1404 }
1405
1406 /* Classify all cross-iteration scalar data-flow cycles.
1407 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1408
1409 vect_analyze_scalar_cycles (loop_vinfo);
1410
1411 vect_pattern_recog (loop_vinfo);
1412
1413 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1414
1415 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1416 if (!ok)
1417 {
1418 if (vect_print_dump_info (REPORT_DETAILS))
1419 fprintf (vect_dump, "unexpected pattern.");
bb67d9c7 1420 return false;
ebfd146a
IR
1421 }
1422
777e1f09
RG
1423 /* Analyze data dependences between the data-refs in the loop
1424 and adjust the maximum vectorization factor according to
1425 the dependences.
1426 FORNOW: fail at the first data dependence that we encounter. */
ebfd146a 1427
e4a707c4 1428 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf, &dummy);
777e1f09
RG
1429 if (!ok
1430 || max_vf < min_vf)
ebfd146a
IR
1431 {
1432 if (vect_print_dump_info (REPORT_DETAILS))
777e1f09 1433 fprintf (vect_dump, "bad data dependence.");
bb67d9c7 1434 return false;
ebfd146a
IR
1435 }
1436
1437 ok = vect_determine_vectorization_factor (loop_vinfo);
1438 if (!ok)
1439 {
1440 if (vect_print_dump_info (REPORT_DETAILS))
1441 fprintf (vect_dump, "can't determine vectorization factor.");
bb67d9c7 1442 return false;
ebfd146a 1443 }
777e1f09
RG
1444 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1445 {
1446 if (vect_print_dump_info (REPORT_DETAILS))
1447 fprintf (vect_dump, "bad data dependence.");
bb67d9c7 1448 return false;
777e1f09 1449 }
ebfd146a 1450
777e1f09
RG
1451 /* Analyze the alignment of the data-refs in the loop.
1452 Fail if a data reference is found that cannot be vectorized. */
ebfd146a 1453
777e1f09 1454 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
ebfd146a
IR
1455 if (!ok)
1456 {
1457 if (vect_print_dump_info (REPORT_DETAILS))
777e1f09 1458 fprintf (vect_dump, "bad data alignment.");
bb67d9c7 1459 return false;
ebfd146a
IR
1460 }
1461
1462 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1463 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1464
a70d6342 1465 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
ebfd146a
IR
1466 if (!ok)
1467 {
1468 if (vect_print_dump_info (REPORT_DETAILS))
1469 fprintf (vect_dump, "bad data access.");
bb67d9c7 1470 return false;
ebfd146a
IR
1471 }
1472
1473 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1474 It is important to call pruning after vect_analyze_data_ref_accesses,
1475 since we use grouping information gathered by interleaving analysis. */
1476 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1477 if (!ok)
1478 {
1479 if (vect_print_dump_info (REPORT_DETAILS))
1480 fprintf (vect_dump, "too long list of versioning for alias "
1481 "run-time tests.");
bb67d9c7 1482 return false;
ebfd146a
IR
1483 }
1484
ebfd146a
IR
1485 /* This pass will decide on using loop versioning and/or loop peeling in
1486 order to enhance the alignment of data references in the loop. */
1487
1488 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1489 if (!ok)
1490 {
1491 if (vect_print_dump_info (REPORT_DETAILS))
720f5239 1492 fprintf (vect_dump, "bad data alignment.");
bb67d9c7 1493 return false;
ebfd146a
IR
1494 }
1495
720f5239
IR
1496 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1497 ok = vect_analyze_slp (loop_vinfo, NULL);
1498 if (ok)
1499 {
1500 /* Decide which possible SLP instances to SLP. */
1501 vect_make_slp_decision (loop_vinfo);
1502
1503 /* Find stmts that need to be both vectorized and SLPed. */
1504 vect_detect_hybrid_slp (loop_vinfo);
1505 }
1506
ebfd146a
IR
1507 /* Scan all the operations in the loop and make sure they are
1508 vectorizable. */
1509
8644a673 1510 ok = vect_analyze_loop_operations (loop_vinfo);
ebfd146a
IR
1511 if (!ok)
1512 {
1513 if (vect_print_dump_info (REPORT_DETAILS))
1514 fprintf (vect_dump, "bad operation or unsupported loop bound.");
bb67d9c7
RG
1515 return false;
1516 }
1517
1518 return true;
1519}
1520
1521/* Function vect_analyze_loop.
1522
1523 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1524 for it. The different analyses will record information in the
1525 loop_vec_info struct. */
1526loop_vec_info
1527vect_analyze_loop (struct loop *loop)
1528{
1529 loop_vec_info loop_vinfo;
1530 unsigned int vector_sizes;
1531
1532 /* Autodetect first vector size we try. */
1533 current_vector_size = 0;
1534 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1535
1536 if (vect_print_dump_info (REPORT_DETAILS))
1537 fprintf (vect_dump, "===== analyze_loop_nest =====");
1538
1539 if (loop_outer (loop)
1540 && loop_vec_info_for_loop (loop_outer (loop))
1541 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1542 {
1543 if (vect_print_dump_info (REPORT_DETAILS))
1544 fprintf (vect_dump, "outer-loop already vectorized.");
ebfd146a
IR
1545 return NULL;
1546 }
1547
bb67d9c7
RG
1548 while (1)
1549 {
1550 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1551 loop_vinfo = vect_analyze_loop_form (loop);
1552 if (!loop_vinfo)
1553 {
1554 if (vect_print_dump_info (REPORT_DETAILS))
1555 fprintf (vect_dump, "bad loop form.");
1556 return NULL;
1557 }
ebfd146a 1558
bb67d9c7
RG
1559 if (vect_analyze_loop_2 (loop_vinfo))
1560 {
1561 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1562
1563 return loop_vinfo;
1564 }
1565
1566 destroy_loop_vec_info (loop_vinfo, true);
1567
1568 vector_sizes &= ~current_vector_size;
1569 if (vector_sizes == 0
1570 || current_vector_size == 0)
1571 return NULL;
1572
1573 /* Try the next biggest vector size. */
1574 current_vector_size = 1 << floor_log2 (vector_sizes);
1575 if (vect_print_dump_info (REPORT_DETAILS))
1576 fprintf (vect_dump, "***** Re-trying analysis with "
1577 "vector size %d\n", current_vector_size);
1578 }
ebfd146a
IR
1579}
1580
1581
1582/* Function reduction_code_for_scalar_code
1583
1584 Input:
1585 CODE - tree_code of a reduction operations.
1586
1587 Output:
1588 REDUC_CODE - the corresponding tree-code to be used to reduce the
1589 vector of partial results into a single scalar result (which
06066f92
IR
1590 will also reside in a vector) or ERROR_MARK if the operation is
1591 a supported reduction operation, but does not have such tree-code.
ebfd146a 1592
06066f92 1593 Return FALSE if CODE currently cannot be vectorized as reduction. */
ebfd146a
IR
1594
1595static bool
1596reduction_code_for_scalar_code (enum tree_code code,
1597 enum tree_code *reduc_code)
1598{
1599 switch (code)
06066f92
IR
1600 {
1601 case MAX_EXPR:
1602 *reduc_code = REDUC_MAX_EXPR;
1603 return true;
ebfd146a 1604
06066f92
IR
1605 case MIN_EXPR:
1606 *reduc_code = REDUC_MIN_EXPR;
1607 return true;
ebfd146a 1608
06066f92
IR
1609 case PLUS_EXPR:
1610 *reduc_code = REDUC_PLUS_EXPR;
1611 return true;
ebfd146a 1612
06066f92
IR
1613 case MULT_EXPR:
1614 case MINUS_EXPR:
1615 case BIT_IOR_EXPR:
1616 case BIT_XOR_EXPR:
1617 case BIT_AND_EXPR:
1618 *reduc_code = ERROR_MARK;
1619 return true;
1620
1621 default:
1622 return false;
1623 }
ebfd146a
IR
1624}
1625
1626
ff802fa1 1627/* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
ebfd146a
IR
1628 STMT is printed with a message MSG. */
1629
1630static void
1631report_vect_op (gimple stmt, const char *msg)
1632{
1633 fprintf (vect_dump, "%s", msg);
1634 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1635}
1636
1637
8a9ecffd 1638/* Function vect_is_simple_reduction_1
ebfd146a 1639
06066f92 1640 (1) Detect a cross-iteration def-use cycle that represents a simple
ff802fa1 1641 reduction computation. We look for the following pattern:
ebfd146a
IR
1642
1643 loop_header:
1644 a1 = phi < a0, a2 >
1645 a3 = ...
1646 a2 = operation (a3, a1)
b8698a0f 1647
ebfd146a 1648 such that:
b8698a0f 1649 1. operation is commutative and associative and it is safe to
7c5222ff 1650 change the order of the computation (if CHECK_REDUCTION is true)
ebfd146a
IR
1651 2. no uses for a2 in the loop (a2 is used out of the loop)
1652 3. no uses of a1 in the loop besides the reduction operation.
1653
1654 Condition 1 is tested here.
b8698a0f 1655 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
7c5222ff 1656
b8698a0f
L
1657 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1658 nested cycles, if CHECK_REDUCTION is false.
06066f92
IR
1659
1660 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1661 reductions:
1662
1663 a1 = phi < a0, a2 >
1664 inner loop (def of a3)
b8698a0f 1665 a2 = phi < a3 >
8a9ecffd
MM
1666
1667 If MODIFY is true it tries also to rework the code in-place to enable
1668 detection of more reduction patterns. For the time being we rewrite
1669 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
06066f92 1670*/
ebfd146a 1671
8a9ecffd
MM
1672static gimple
1673vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1674 bool check_reduction, bool *double_reduc,
1675 bool modify)
ebfd146a
IR
1676{
1677 struct loop *loop = (gimple_bb (phi))->loop_father;
1678 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1679 edge latch_e = loop_latch_edge (loop);
1680 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
4bbe8262 1681 gimple def_stmt, def1 = NULL, def2 = NULL;
8a9ecffd 1682 enum tree_code orig_code, code;
4bbe8262 1683 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
ebfd146a
IR
1684 tree type;
1685 int nloop_uses;
1686 tree name;
1687 imm_use_iterator imm_iter;
1688 use_operand_p use_p;
06066f92
IR
1689 bool phi_def;
1690
1691 *double_reduc = false;
ebfd146a 1692
7c5222ff
IR
1693 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1694 otherwise, we assume outer loop vectorization. */
b8698a0f 1695 gcc_assert ((check_reduction && loop == vect_loop)
7c5222ff 1696 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
ebfd146a
IR
1697
1698 name = PHI_RESULT (phi);
1699 nloop_uses = 0;
1700 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1701 {
1702 gimple use_stmt = USE_STMT (use_p);
b5b8b0ac
AO
1703 if (is_gimple_debug (use_stmt))
1704 continue;
ebfd146a
IR
1705 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1706 && vinfo_for_stmt (use_stmt)
1707 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1708 nloop_uses++;
1709 if (nloop_uses > 1)
1710 {
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 fprintf (vect_dump, "reduction used in loop.");
1713 return NULL;
1714 }
1715 }
1716
1717 if (TREE_CODE (loop_arg) != SSA_NAME)
1718 {
1719 if (vect_print_dump_info (REPORT_DETAILS))
1720 {
1721 fprintf (vect_dump, "reduction: not ssa_name: ");
1722 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1723 }
1724 return NULL;
1725 }
1726
1727 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1728 if (!def_stmt)
1729 {
1730 if (vect_print_dump_info (REPORT_DETAILS))
1731 fprintf (vect_dump, "reduction: no def_stmt.");
1732 return NULL;
1733 }
1734
06066f92 1735 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
ebfd146a
IR
1736 {
1737 if (vect_print_dump_info (REPORT_DETAILS))
1738 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1739 return NULL;
1740 }
1741
06066f92
IR
1742 if (is_gimple_assign (def_stmt))
1743 {
1744 name = gimple_assign_lhs (def_stmt);
1745 phi_def = false;
1746 }
1747 else
1748 {
1749 name = PHI_RESULT (def_stmt);
1750 phi_def = true;
1751 }
1752
ebfd146a
IR
1753 nloop_uses = 0;
1754 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1755 {
1756 gimple use_stmt = USE_STMT (use_p);
b5b8b0ac
AO
1757 if (is_gimple_debug (use_stmt))
1758 continue;
ebfd146a
IR
1759 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1760 && vinfo_for_stmt (use_stmt)
1761 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1762 nloop_uses++;
1763 if (nloop_uses > 1)
1764 {
1765 if (vect_print_dump_info (REPORT_DETAILS))
1766 fprintf (vect_dump, "reduction used in loop.");
1767 return NULL;
1768 }
1769 }
1770
06066f92
IR
1771 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1772 defined in the inner loop. */
1773 if (phi_def)
1774 {
1775 op1 = PHI_ARG_DEF (def_stmt, 0);
1776
1777 if (gimple_phi_num_args (def_stmt) != 1
1778 || TREE_CODE (op1) != SSA_NAME)
1779 {
1780 if (vect_print_dump_info (REPORT_DETAILS))
1781 fprintf (vect_dump, "unsupported phi node definition.");
1782
1783 return NULL;
1784 }
1785
b8698a0f
L
1786 def1 = SSA_NAME_DEF_STMT (op1);
1787 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
06066f92
IR
1788 && loop->inner
1789 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1790 && is_gimple_assign (def1))
1791 {
1792 if (vect_print_dump_info (REPORT_DETAILS))
1793 report_vect_op (def_stmt, "detected double reduction: ");
b8698a0f 1794
06066f92
IR
1795 *double_reduc = true;
1796 return def_stmt;
1797 }
1798
1799 return NULL;
1800 }
1801
8a9ecffd
MM
1802 code = orig_code = gimple_assign_rhs_code (def_stmt);
1803
1804 /* We can handle "res -= x[i]", which is non-associative by
1805 simply rewriting this into "res += -x[i]". Avoid changing
1806 gimple instruction for the first simple tests and only do this
1807 if we're allowed to change code at all. */
0532869d
IR
1808 if (code == MINUS_EXPR
1809 && modify
1810 && (op1 = gimple_assign_rhs1 (def_stmt))
1811 && TREE_CODE (op1) == SSA_NAME
1812 && SSA_NAME_DEF_STMT (op1) == phi)
8a9ecffd 1813 code = PLUS_EXPR;
ebfd146a 1814
b8698a0f 1815 if (check_reduction
7c5222ff 1816 && (!commutative_tree_code (code) || !associative_tree_code (code)))
ebfd146a
IR
1817 {
1818 if (vect_print_dump_info (REPORT_DETAILS))
1819 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1820 return NULL;
1821 }
1822
b8698a0f 1823 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
ebfd146a 1824 {
4bbe8262
IR
1825 if (code != COND_EXPR)
1826 {
1827 if (vect_print_dump_info (REPORT_DETAILS))
1828 report_vect_op (def_stmt, "reduction: not binary operation: ");
ebfd146a 1829
4bbe8262
IR
1830 return NULL;
1831 }
1832
6f4454fc
IR
1833 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1834 if (COMPARISON_CLASS_P (op3))
1835 {
1836 op4 = TREE_OPERAND (op3, 1);
1837 op3 = TREE_OPERAND (op3, 0);
b8698a0f
L
1838 }
1839
4bbe8262
IR
1840 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1841 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1842
1843 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1844 {
1845 if (vect_print_dump_info (REPORT_DETAILS))
1846 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1847
1848 return NULL;
1849 }
ebfd146a 1850 }
4bbe8262
IR
1851 else
1852 {
1853 op1 = gimple_assign_rhs1 (def_stmt);
1854 op2 = gimple_assign_rhs2 (def_stmt);
1855
1856 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1857 {
1858 if (vect_print_dump_info (REPORT_DETAILS))
1859 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1860
1861 return NULL;
1862 }
1863 }
ebfd146a 1864
ebfd146a 1865 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
4bbe8262 1866 if ((TREE_CODE (op1) == SSA_NAME
9600efe1 1867 && !types_compatible_p (type,TREE_TYPE (op1)))
4bbe8262 1868 || (TREE_CODE (op2) == SSA_NAME
9600efe1 1869 && !types_compatible_p (type, TREE_TYPE (op2)))
4bbe8262 1870 || (op3 && TREE_CODE (op3) == SSA_NAME
9600efe1 1871 && !types_compatible_p (type, TREE_TYPE (op3)))
4bbe8262 1872 || (op4 && TREE_CODE (op4) == SSA_NAME
9600efe1 1873 && !types_compatible_p (type, TREE_TYPE (op4))))
ebfd146a
IR
1874 {
1875 if (vect_print_dump_info (REPORT_DETAILS))
1876 {
1877 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1878 print_generic_expr (vect_dump, type, TDF_SLIM);
1879 fprintf (vect_dump, ", operands types: ");
1880 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1881 fprintf (vect_dump, ",");
1882 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
6f4454fc 1883 if (op3)
4bbe8262
IR
1884 {
1885 fprintf (vect_dump, ",");
1886 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
6f4454fc
IR
1887 }
1888
1889 if (op4)
1890 {
4bbe8262
IR
1891 fprintf (vect_dump, ",");
1892 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1893 }
ebfd146a 1894 }
4bbe8262 1895
ebfd146a
IR
1896 return NULL;
1897 }
1898
b8698a0f 1899 /* Check that it's ok to change the order of the computation.
7c5222ff 1900 Generally, when vectorizing a reduction we change the order of the
ebfd146a 1901 computation. This may change the behavior of the program in some
b8698a0f 1902 cases, so we need to check that this is ok. One exception is when
ebfd146a
IR
1903 vectorizing an outer-loop: the inner-loop is executed sequentially,
1904 and therefore vectorizing reductions in the inner-loop during
1905 outer-loop vectorization is safe. */
1906
1907 /* CHECKME: check for !flag_finite_math_only too? */
1908 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
b8698a0f 1909 && check_reduction)
ebfd146a
IR
1910 {
1911 /* Changing the order of operations changes the semantics. */
1912 if (vect_print_dump_info (REPORT_DETAILS))
1913 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1914 return NULL;
1915 }
1916 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
7c5222ff 1917 && check_reduction)
ebfd146a
IR
1918 {
1919 /* Changing the order of operations changes the semantics. */
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1922 return NULL;
1923 }
7c5222ff 1924 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
ebfd146a
IR
1925 {
1926 /* Changing the order of operations changes the semantics. */
1927 if (vect_print_dump_info (REPORT_DETAILS))
b8698a0f 1928 report_vect_op (def_stmt,
ebfd146a
IR
1929 "reduction: unsafe fixed-point math optimization: ");
1930 return NULL;
1931 }
1932
8a9ecffd
MM
1933 /* If we detected "res -= x[i]" earlier, rewrite it into
1934 "res += -x[i]" now. If this turns out to be useless reassoc
1935 will clean it up again. */
1936 if (orig_code == MINUS_EXPR)
1937 {
1938 tree rhs = gimple_assign_rhs2 (def_stmt);
1939 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
1940 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
1941 rhs, NULL);
1942 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
1943 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
1944 loop_info, NULL));
1945 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
1946 gimple_assign_set_rhs2 (def_stmt, negrhs);
1947 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
1948 update_stmt (def_stmt);
1949 }
1950
7c5222ff 1951 /* Reduction is safe. We're dealing with one of the following:
ebfd146a 1952 1) integer arithmetic and no trapv
7c5222ff
IR
1953 2) floating point arithmetic, and special flags permit this optimization
1954 3) nested cycle (i.e., outer loop vectorization). */
4bbe8262
IR
1955 if (TREE_CODE (op1) == SSA_NAME)
1956 def1 = SSA_NAME_DEF_STMT (op1);
1957
1958 if (TREE_CODE (op2) == SSA_NAME)
1959 def2 = SSA_NAME_DEF_STMT (op2);
1960
b8698a0f 1961 if (code != COND_EXPR
4bbe8262 1962 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
ebfd146a
IR
1963 {
1964 if (vect_print_dump_info (REPORT_DETAILS))
1965 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1966 return NULL;
1967 }
1968
ebfd146a 1969 /* Check that one def is the reduction def, defined by PHI,
8644a673 1970 the other def is either defined in the loop ("vect_internal_def"),
ebfd146a
IR
1971 or it's an induction (defined by a loop-header phi-node). */
1972
4bbe8262
IR
1973 if (def2 && def2 == phi
1974 && (code == COND_EXPR
1975 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1976 && (is_gimple_assign (def1)
4fc7469a 1977 || is_gimple_call (def1)
b8698a0f 1978 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
4bbe8262
IR
1979 == vect_induction_def
1980 || (gimple_code (def1) == GIMPLE_PHI
b8698a0f 1981 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
4bbe8262
IR
1982 == vect_internal_def
1983 && !is_loop_header_bb_p (gimple_bb (def1)))))))
ebfd146a
IR
1984 {
1985 if (vect_print_dump_info (REPORT_DETAILS))
7c5222ff 1986 report_vect_op (def_stmt, "detected reduction: ");
ebfd146a
IR
1987 return def_stmt;
1988 }
4bbe8262
IR
1989 else if (def1 && def1 == phi
1990 && (code == COND_EXPR
1991 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1992 && (is_gimple_assign (def2)
4fc7469a 1993 || is_gimple_call (def2)
4bbe8262
IR
1994 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1995 == vect_induction_def
1996 || (gimple_code (def2) == GIMPLE_PHI
b8698a0f 1997 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
4bbe8262
IR
1998 == vect_internal_def
1999 && !is_loop_header_bb_p (gimple_bb (def2)))))))
ebfd146a 2000 {
7c5222ff
IR
2001 if (check_reduction)
2002 {
2003 /* Swap operands (just for simplicity - so that the rest of the code
2004 can assume that the reduction variable is always the last (second)
2005 argument). */
2006 if (vect_print_dump_info (REPORT_DETAILS))
2007 report_vect_op (def_stmt,
2008 "detected reduction: need to swap operands: ");
2009
2010 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2011 gimple_assign_rhs2_ptr (def_stmt));
2012 }
2013 else
2014 {
2015 if (vect_print_dump_info (REPORT_DETAILS))
2016 report_vect_op (def_stmt, "detected reduction: ");
2017 }
2018
ebfd146a
IR
2019 return def_stmt;
2020 }
2021 else
2022 {
2023 if (vect_print_dump_info (REPORT_DETAILS))
7c5222ff
IR
2024 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2025
ebfd146a
IR
2026 return NULL;
2027 }
2028}
2029
8a9ecffd
MM
2030/* Wrapper around vect_is_simple_reduction_1, that won't modify code
2031 in-place. Arguments as there. */
2032
2033static gimple
2034vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2035 bool check_reduction, bool *double_reduc)
2036{
2037 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2038 double_reduc, false);
2039}
2040
2041/* Wrapper around vect_is_simple_reduction_1, which will modify code
2042 in-place if it enables detection of more reductions. Arguments
2043 as there. */
2044
2045gimple
2046vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2047 bool check_reduction, bool *double_reduc)
2048{
2049 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2050 double_reduc, true);
2051}
ebfd146a 2052
720f5239
IR
2053/* Calculate the cost of one scalar iteration of the loop. */
2054int
2055vect_get_single_scalar_iteraion_cost (loop_vec_info loop_vinfo)
2056{
2057 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2058 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2059 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2060 int innerloop_iters, i, stmt_cost;
2061
ff802fa1 2062 /* Count statements in scalar loop. Using this as scalar cost for a single
720f5239
IR
2063 iteration for now.
2064
2065 TODO: Add outer loop support.
2066
2067 TODO: Consider assigning different costs to different scalar
2068 statements. */
2069
2070 /* FORNOW. */
0b3f0088 2071 innerloop_iters = 1;
720f5239
IR
2072 if (loop->inner)
2073 innerloop_iters = 50; /* FIXME */
2074
2075 for (i = 0; i < nbbs; i++)
2076 {
2077 gimple_stmt_iterator si;
2078 basic_block bb = bbs[i];
2079
2080 if (bb->loop_father == loop->inner)
2081 factor = innerloop_iters;
2082 else
2083 factor = 1;
2084
2085 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2086 {
2087 gimple stmt = gsi_stmt (si);
9940b13c 2088 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
720f5239
IR
2089
2090 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2091 continue;
2092
9940b13c
IR
2093 /* Skip stmts that are not vectorized inside the loop. */
2094 if (stmt_info
2095 && !STMT_VINFO_RELEVANT_P (stmt_info)
2096 && (!STMT_VINFO_LIVE_P (stmt_info)
2097 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2098 continue;
2099
720f5239
IR
2100 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2101 {
2102 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2103 stmt_cost = vect_get_cost (scalar_load);
2104 else
2105 stmt_cost = vect_get_cost (scalar_store);
2106 }
2107 else
2108 stmt_cost = vect_get_cost (scalar_stmt);
2109
2110 scalar_single_iter_cost += stmt_cost * factor;
2111 }
2112 }
2113 return scalar_single_iter_cost;
2114}
2115
2116/* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2117int
2118vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2119 int *peel_iters_epilogue,
2120 int scalar_single_iter_cost)
2121{
2122 int peel_guard_costs = 0;
2123 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2124
2125 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2126 {
2127 *peel_iters_epilogue = vf/2;
2128 if (vect_print_dump_info (REPORT_COST))
2129 fprintf (vect_dump, "cost model: "
2130 "epilogue peel iters set to vf/2 because "
2131 "loop iterations are unknown .");
2132
2133 /* If peeled iterations are known but number of scalar loop
2134 iterations are unknown, count a taken branch per peeled loop. */
2135 peel_guard_costs = 2 * vect_get_cost (cond_branch_taken);
2136 }
2137 else
2138 {
2139 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2140 peel_iters_prologue = niters < peel_iters_prologue ?
2141 niters : peel_iters_prologue;
2142 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2143 }
2144
2145 return (peel_iters_prologue * scalar_single_iter_cost)
2146 + (*peel_iters_epilogue * scalar_single_iter_cost)
2147 + peel_guard_costs;
2148}
2149
ebfd146a
IR
2150/* Function vect_estimate_min_profitable_iters
2151
2152 Return the number of iterations required for the vector version of the
2153 loop to be profitable relative to the cost of the scalar version of the
2154 loop.
2155
2156 TODO: Take profile info into account before making vectorization
2157 decisions, if available. */
2158
2159int
2160vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2161{
2162 int i;
2163 int min_profitable_iters;
2164 int peel_iters_prologue;
2165 int peel_iters_epilogue;
2166 int vec_inside_cost = 0;
2167 int vec_outside_cost = 0;
2168 int scalar_single_iter_cost = 0;
2169 int scalar_outside_cost = 0;
2170 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2171 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2172 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2173 int nbbs = loop->num_nodes;
720f5239 2174 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
ebfd146a
IR
2175 int peel_guard_costs = 0;
2176 int innerloop_iters = 0, factor;
2177 VEC (slp_instance, heap) *slp_instances;
2178 slp_instance instance;
2179
2180 /* Cost model disabled. */
2181 if (!flag_vect_cost_model)
2182 {
2183 if (vect_print_dump_info (REPORT_COST))
b8698a0f 2184 fprintf (vect_dump, "cost model disabled.");
ebfd146a
IR
2185 return 0;
2186 }
2187
2188 /* Requires loop versioning tests to handle misalignment. */
e9dbe7bb 2189 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
ebfd146a
IR
2190 {
2191 /* FIXME: Make cost depend on complexity of individual check. */
2192 vec_outside_cost +=
2193 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2194 if (vect_print_dump_info (REPORT_COST))
2195 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2196 "versioning to treat misalignment.\n");
2197 }
2198
e9dbe7bb
IR
2199 /* Requires loop versioning with alias checks. */
2200 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
ebfd146a
IR
2201 {
2202 /* FIXME: Make cost depend on complexity of individual check. */
2203 vec_outside_cost +=
2204 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2205 if (vect_print_dump_info (REPORT_COST))
2206 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2207 "versioning aliasing.\n");
2208 }
2209
e9dbe7bb
IR
2210 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2211 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
35e1a5e7 2212 vec_outside_cost += vect_get_cost (cond_branch_taken);
ebfd146a
IR
2213
2214 /* Count statements in scalar loop. Using this as scalar cost for a single
2215 iteration for now.
2216
2217 TODO: Add outer loop support.
2218
2219 TODO: Consider assigning different costs to different scalar
2220 statements. */
2221
2222 /* FORNOW. */
2223 if (loop->inner)
2224 innerloop_iters = 50; /* FIXME */
2225
2226 for (i = 0; i < nbbs; i++)
2227 {
2228 gimple_stmt_iterator si;
2229 basic_block bb = bbs[i];
2230
2231 if (bb->loop_father == loop->inner)
2232 factor = innerloop_iters;
2233 else
2234 factor = 1;
2235
2236 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2237 {
2238 gimple stmt = gsi_stmt (si);
2239 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2240 /* Skip stmts that are not vectorized inside the loop. */
2241 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2242 && (!STMT_VINFO_LIVE_P (stmt_info)
2243 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2244 continue;
ebfd146a
IR
2245 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2246 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2247 some of the "outside" costs are generated inside the outer-loop. */
2248 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2249 }
2250 }
2251
720f5239
IR
2252 scalar_single_iter_cost = vect_get_single_scalar_iteraion_cost (loop_vinfo);
2253
ebfd146a
IR
2254 /* Add additional cost for the peeled instructions in prologue and epilogue
2255 loop.
2256
2257 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2258 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2259
2260 TODO: Build an expression that represents peel_iters for prologue and
2261 epilogue to be used in a run-time test. */
2262
720f5239 2263 if (npeel < 0)
ebfd146a
IR
2264 {
2265 peel_iters_prologue = vf/2;
2266 if (vect_print_dump_info (REPORT_COST))
2267 fprintf (vect_dump, "cost model: "
2268 "prologue peel iters set to vf/2.");
2269
2270 /* If peeling for alignment is unknown, loop bound of main loop becomes
2271 unknown. */
2272 peel_iters_epilogue = vf/2;
2273 if (vect_print_dump_info (REPORT_COST))
2274 fprintf (vect_dump, "cost model: "
2275 "epilogue peel iters set to vf/2 because "
2276 "peeling for alignment is unknown .");
2277
2278 /* If peeled iterations are unknown, count a taken branch and a not taken
2279 branch per peeled loop. Even if scalar loop iterations are known,
2280 vector iterations are not known since peeled prologue iterations are
2281 not known. Hence guards remain the same. */
35e1a5e7
IR
2282 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2283 + vect_get_cost (cond_branch_not_taken));
720f5239
IR
2284 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2285 + (peel_iters_epilogue * scalar_single_iter_cost)
2286 + peel_guard_costs;
ebfd146a 2287 }
b8698a0f 2288 else
ebfd146a 2289 {
720f5239
IR
2290 peel_iters_prologue = npeel;
2291 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2292 peel_iters_prologue, &peel_iters_epilogue,
2293 scalar_single_iter_cost);
ebfd146a
IR
2294 }
2295
ebfd146a
IR
2296 /* FORNOW: The scalar outside cost is incremented in one of the
2297 following ways:
2298
2299 1. The vectorizer checks for alignment and aliasing and generates
2300 a condition that allows dynamic vectorization. A cost model
2301 check is ANDED with the versioning condition. Hence scalar code
2302 path now has the added cost of the versioning check.
2303
2304 if (cost > th & versioning_check)
2305 jmp to vector code
2306
2307 Hence run-time scalar is incremented by not-taken branch cost.
2308
2309 2. The vectorizer then checks if a prologue is required. If the
2310 cost model check was not done before during versioning, it has to
2311 be done before the prologue check.
2312
2313 if (cost <= th)
2314 prologue = scalar_iters
2315 if (prologue == 0)
2316 jmp to vector code
2317 else
2318 execute prologue
2319 if (prologue == num_iters)
2320 go to exit
2321
2322 Hence the run-time scalar cost is incremented by a taken branch,
2323 plus a not-taken branch, plus a taken branch cost.
2324
2325 3. The vectorizer then checks if an epilogue is required. If the
2326 cost model check was not done before during prologue check, it
2327 has to be done with the epilogue check.
2328
2329 if (prologue == 0)
2330 jmp to vector code
2331 else
2332 execute prologue
2333 if (prologue == num_iters)
2334 go to exit
2335 vector code:
2336 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2337 jmp to epilogue
2338
2339 Hence the run-time scalar cost should be incremented by 2 taken
2340 branches.
2341
2342 TODO: The back end may reorder the BBS's differently and reverse
2343 conditions/branch directions. Change the estimates below to
2344 something more reasonable. */
2345
2346 /* If the number of iterations is known and we do not do versioning, we can
ff802fa1 2347 decide whether to vectorize at compile time. Hence the scalar version
ebfd146a
IR
2348 do not carry cost model guard costs. */
2349 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
e9dbe7bb
IR
2350 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2351 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
ebfd146a
IR
2352 {
2353 /* Cost model check occurs at versioning. */
e9dbe7bb
IR
2354 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2355 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
35e1a5e7 2356 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
ebfd146a
IR
2357 else
2358 {
2359 /* Cost model check occurs at prologue generation. */
2360 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
35e1a5e7
IR
2361 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2362 + vect_get_cost (cond_branch_not_taken);
ebfd146a
IR
2363 /* Cost model check occurs at epilogue generation. */
2364 else
35e1a5e7 2365 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
ebfd146a
IR
2366 }
2367 }
2368
2369 /* Add SLP costs. */
2370 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
ac47786e 2371 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
ebfd146a
IR
2372 {
2373 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2374 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2375 }
2376
b8698a0f 2377 /* Calculate number of iterations required to make the vector version
ff802fa1 2378 profitable, relative to the loop bodies only. The following condition
b8698a0f 2379 must hold true:
ebfd146a
IR
2380 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2381 where
2382 SIC = scalar iteration cost, VIC = vector iteration cost,
2383 VOC = vector outside cost, VF = vectorization factor,
2384 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2385 SOC = scalar outside cost for run time cost model check. */
2386
2387 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2388 {
2389 if (vec_outside_cost <= 0)
2390 min_profitable_iters = 1;
2391 else
2392 {
2393 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2394 - vec_inside_cost * peel_iters_prologue
2395 - vec_inside_cost * peel_iters_epilogue)
2396 / ((scalar_single_iter_cost * vf)
2397 - vec_inside_cost);
2398
2399 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2400 <= ((vec_inside_cost * min_profitable_iters)
2401 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2402 min_profitable_iters++;
2403 }
2404 }
2405 /* vector version will never be profitable. */
2406 else
2407 {
2408 if (vect_print_dump_info (REPORT_COST))
49a71bc8
SP
2409 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2410 "divided by the scalar iteration cost = %d "
2411 "is greater or equal to the vectorization factor = %d.",
ebfd146a
IR
2412 vec_inside_cost, scalar_single_iter_cost, vf);
2413 return -1;
2414 }
2415
2416 if (vect_print_dump_info (REPORT_COST))
2417 {
2418 fprintf (vect_dump, "Cost model analysis: \n");
2419 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2420 vec_inside_cost);
2421 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2422 vec_outside_cost);
2423 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2424 scalar_single_iter_cost);
2425 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2426 fprintf (vect_dump, " prologue iterations: %d\n",
2427 peel_iters_prologue);
2428 fprintf (vect_dump, " epilogue iterations: %d\n",
2429 peel_iters_epilogue);
2430 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2431 min_profitable_iters);
2432 }
2433
b8698a0f 2434 min_profitable_iters =
ebfd146a
IR
2435 min_profitable_iters < vf ? vf : min_profitable_iters;
2436
2437 /* Because the condition we create is:
2438 if (niters <= min_profitable_iters)
2439 then skip the vectorized loop. */
2440 min_profitable_iters--;
2441
2442 if (vect_print_dump_info (REPORT_COST))
2443 fprintf (vect_dump, " Profitability threshold = %d\n",
2444 min_profitable_iters);
b8698a0f 2445
ebfd146a
IR
2446 return min_profitable_iters;
2447}
2448
2449
b8698a0f 2450/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
ebfd146a 2451 functions. Design better to avoid maintenance issues. */
ebfd146a 2452
b8698a0f
L
2453/* Function vect_model_reduction_cost.
2454
2455 Models cost for a reduction operation, including the vector ops
ebfd146a
IR
2456 generated within the strip-mine loop, the initial definition before
2457 the loop, and the epilogue code that must be generated. */
2458
b8698a0f 2459static bool
ebfd146a
IR
2460vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2461 int ncopies)
2462{
2463 int outer_cost = 0;
2464 enum tree_code code;
2465 optab optab;
2466 tree vectype;
2467 gimple stmt, orig_stmt;
2468 tree reduction_op;
2469 enum machine_mode mode;
2470 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2471 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2472
2473
2474 /* Cost of reduction op inside loop. */
35e1a5e7
IR
2475 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2476 += ncopies * vect_get_cost (vector_stmt);
ebfd146a
IR
2477
2478 stmt = STMT_VINFO_STMT (stmt_info);
2479
2480 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2481 {
2482 case GIMPLE_SINGLE_RHS:
2483 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2484 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2485 break;
2486 case GIMPLE_UNARY_RHS:
2487 reduction_op = gimple_assign_rhs1 (stmt);
2488 break;
2489 case GIMPLE_BINARY_RHS:
2490 reduction_op = gimple_assign_rhs2 (stmt);
2491 break;
2492 default:
2493 gcc_unreachable ();
2494 }
2495
2496 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2497 if (!vectype)
2498 {
2499 if (vect_print_dump_info (REPORT_COST))
2500 {
2501 fprintf (vect_dump, "unsupported data-type ");
2502 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2503 }
2504 return false;
2505 }
b8698a0f 2506
ebfd146a
IR
2507 mode = TYPE_MODE (vectype);
2508 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2509
b8698a0f 2510 if (!orig_stmt)
ebfd146a
IR
2511 orig_stmt = STMT_VINFO_STMT (stmt_info);
2512
2513 code = gimple_assign_rhs_code (orig_stmt);
2514
2515 /* Add in cost for initial definition. */
35e1a5e7 2516 outer_cost += vect_get_cost (scalar_to_vec);
ebfd146a
IR
2517
2518 /* Determine cost of epilogue code.
2519
2520 We have a reduction operator that will reduce the vector in one statement.
2521 Also requires scalar extract. */
2522
2523 if (!nested_in_vect_loop_p (loop, orig_stmt))
2524 {
32e8bb8e 2525 if (reduc_code != ERROR_MARK)
35e1a5e7
IR
2526 outer_cost += vect_get_cost (vector_stmt)
2527 + vect_get_cost (vec_to_scalar);
b8698a0f 2528 else
ebfd146a
IR
2529 {
2530 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2531 tree bitsize =
2532 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2533 int element_bitsize = tree_low_cst (bitsize, 1);
2534 int nelements = vec_size_in_bits / element_bitsize;
2535
2536 optab = optab_for_tree_code (code, vectype, optab_default);
2537
2538 /* We have a whole vector shift available. */
2539 if (VECTOR_MODE_P (mode)
947131ba
RS
2540 && optab_handler (optab, mode) != CODE_FOR_nothing
2541 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
ebfd146a
IR
2542 /* Final reduction via vector shifts and the reduction operator. Also
2543 requires scalar extract. */
35e1a5e7
IR
2544 outer_cost += ((exact_log2(nelements) * 2)
2545 * vect_get_cost (vector_stmt)
2546 + vect_get_cost (vec_to_scalar));
ebfd146a
IR
2547 else
2548 /* Use extracts and reduction op for final reduction. For N elements,
2549 we have N extracts and N-1 reduction ops. */
35e1a5e7
IR
2550 outer_cost += ((nelements + nelements - 1)
2551 * vect_get_cost (vector_stmt));
ebfd146a
IR
2552 }
2553 }
2554
2555 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2556
2557 if (vect_print_dump_info (REPORT_COST))
2558 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2559 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2560 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2561
2562 return true;
2563}
2564
2565
2566/* Function vect_model_induction_cost.
2567
2568 Models cost for induction operations. */
2569
2570static void
2571vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2572{
2573 /* loop cost for vec_loop. */
35e1a5e7
IR
2574 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2575 = ncopies * vect_get_cost (vector_stmt);
ebfd146a 2576 /* prologue cost for vec_init and vec_step. */
35e1a5e7
IR
2577 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2578 = 2 * vect_get_cost (scalar_to_vec);
b8698a0f 2579
ebfd146a
IR
2580 if (vect_print_dump_info (REPORT_COST))
2581 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2582 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2583 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2584}
2585
2586
2587/* Function get_initial_def_for_induction
2588
2589 Input:
2590 STMT - a stmt that performs an induction operation in the loop.
2591 IV_PHI - the initial value of the induction variable
2592
2593 Output:
2594 Return a vector variable, initialized with the first VF values of
ff802fa1 2595 the induction variable. E.g., for an iv with IV_PHI='X' and
b8698a0f 2596 evolution S, for a vector of 4 units, we want to return:
ebfd146a
IR
2597 [X, X + S, X + 2*S, X + 3*S]. */
2598
2599static tree
2600get_initial_def_for_induction (gimple iv_phi)
2601{
2602 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2603 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2604 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6dbbece6 2605 tree scalar_type;
b8698a0f 2606 tree vectype;
ebfd146a
IR
2607 int nunits;
2608 edge pe = loop_preheader_edge (loop);
2609 struct loop *iv_loop;
2610 basic_block new_bb;
2611 tree vec, vec_init, vec_step, t;
2612 tree access_fn;
2613 tree new_var;
2614 tree new_name;
2615 gimple init_stmt, induction_phi, new_stmt;
2616 tree induc_def, vec_def, vec_dest;
2617 tree init_expr, step_expr;
2618 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2619 int i;
2620 bool ok;
2621 int ncopies;
2622 tree expr;
2623 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2624 bool nested_in_vect_loop = false;
2625 gimple_seq stmts = NULL;
2626 imm_use_iterator imm_iter;
2627 use_operand_p use_p;
2628 gimple exit_phi;
2629 edge latch_e;
2630 tree loop_arg;
2631 gimple_stmt_iterator si;
2632 basic_block bb = gimple_bb (iv_phi);
795bd26a 2633 tree stepvectype;
6dbbece6 2634 tree resvectype;
ebfd146a
IR
2635
2636 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2637 if (nested_in_vect_loop_p (loop, iv_phi))
2638 {
2639 nested_in_vect_loop = true;
2640 iv_loop = loop->inner;
2641 }
2642 else
2643 iv_loop = loop;
2644 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2645
2646 latch_e = loop_latch_edge (iv_loop);
2647 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2648
2649 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2650 gcc_assert (access_fn);
6dbbece6 2651 STRIP_NOPS (access_fn);
ebfd146a 2652 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
06066f92 2653 &init_expr, &step_expr);
ebfd146a
IR
2654 gcc_assert (ok);
2655 pe = loop_preheader_edge (iv_loop);
2656
6dbbece6
RG
2657 scalar_type = TREE_TYPE (init_expr);
2658 vectype = get_vectype_for_scalar_type (scalar_type);
2659 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
2660 gcc_assert (vectype);
2661 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2662 ncopies = vf / nunits;
2663
2664 gcc_assert (phi_info);
2665 gcc_assert (ncopies >= 1);
2666
2667 /* Find the first insertion point in the BB. */
2668 si = gsi_after_labels (bb);
2669
ebfd146a
IR
2670 /* Create the vector that holds the initial_value of the induction. */
2671 if (nested_in_vect_loop)
2672 {
2673 /* iv_loop is nested in the loop to be vectorized. init_expr had already
ff802fa1
IR
2674 been created during vectorization of previous stmts. We obtain it
2675 from the STMT_VINFO_VEC_STMT of the defining stmt. */
b8698a0f 2676 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
06066f92 2677 loop_preheader_edge (iv_loop));
ebfd146a
IR
2678 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2679 }
2680 else
2681 {
2682 /* iv_loop is the loop to be vectorized. Create:
2683 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2684 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2685 add_referenced_var (new_var);
2686
2687 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2688 if (stmts)
2689 {
2690 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2691 gcc_assert (!new_bb);
2692 }
2693
2694 t = NULL_TREE;
6dbbece6 2695 t = tree_cons (NULL_TREE, new_name, t);
ebfd146a
IR
2696 for (i = 1; i < nunits; i++)
2697 {
2698 /* Create: new_name_i = new_name + step_expr */
2699 enum tree_code code = POINTER_TYPE_P (scalar_type)
2700 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2701 init_stmt = gimple_build_assign_with_ops (code, new_var,
2702 new_name, step_expr);
2703 new_name = make_ssa_name (new_var, init_stmt);
2704 gimple_assign_set_lhs (init_stmt, new_name);
2705
2706 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2707 gcc_assert (!new_bb);
2708
2709 if (vect_print_dump_info (REPORT_DETAILS))
2710 {
2711 fprintf (vect_dump, "created new init_stmt: ");
2712 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2713 }
2714 t = tree_cons (NULL_TREE, new_name, t);
2715 }
2716 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2717 vec = build_constructor_from_list (vectype, nreverse (t));
2718 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2719 }
2720
2721
2722 /* Create the vector that holds the step of the induction. */
2723 if (nested_in_vect_loop)
2724 /* iv_loop is nested in the loop to be vectorized. Generate:
2725 vec_step = [S, S, S, S] */
2726 new_name = step_expr;
2727 else
2728 {
2729 /* iv_loop is the loop to be vectorized. Generate:
2730 vec_step = [VF*S, VF*S, VF*S, VF*S] */
795bd26a
RG
2731 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2732 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2733 expr, step_expr);
ebfd146a
IR
2734 }
2735
b9acc9f1 2736 t = unshare_expr (new_name);
ebfd146a 2737 gcc_assert (CONSTANT_CLASS_P (new_name));
795bd26a
RG
2738 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2739 gcc_assert (stepvectype);
b9acc9f1 2740 vec = build_vector_from_val (stepvectype, t);
795bd26a 2741 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
ebfd146a
IR
2742
2743
2744 /* Create the following def-use cycle:
2745 loop prolog:
2746 vec_init = ...
2747 vec_step = ...
2748 loop:
2749 vec_iv = PHI <vec_init, vec_loop>
2750 ...
2751 STMT
2752 ...
2753 vec_loop = vec_iv + vec_step; */
2754
2755 /* Create the induction-phi that defines the induction-operand. */
2756 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2757 add_referenced_var (vec_dest);
2758 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2759 set_vinfo_for_stmt (induction_phi,
a70d6342 2760 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
ebfd146a
IR
2761 induc_def = PHI_RESULT (induction_phi);
2762
2763 /* Create the iv update inside the loop */
2764 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2765 induc_def, vec_step);
2766 vec_def = make_ssa_name (vec_dest, new_stmt);
2767 gimple_assign_set_lhs (new_stmt, vec_def);
2768 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
b8698a0f 2769 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
a70d6342 2770 NULL));
ebfd146a
IR
2771
2772 /* Set the arguments of the phi node: */
f5045c96 2773 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
b8698a0f 2774 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
f5045c96 2775 UNKNOWN_LOCATION);
ebfd146a
IR
2776
2777
2778 /* In case that vectorization factor (VF) is bigger than the number
2779 of elements that we can fit in a vectype (nunits), we have to generate
2780 more than one vector stmt - i.e - we need to "unroll" the
2781 vector stmt by a factor VF/nunits. For more details see documentation
2782 in vectorizable_operation. */
b8698a0f 2783
ebfd146a
IR
2784 if (ncopies > 1)
2785 {
2786 stmt_vec_info prev_stmt_vinfo;
2787 /* FORNOW. This restriction should be relaxed. */
2788 gcc_assert (!nested_in_vect_loop);
2789
2790 /* Create the vector that holds the step of the induction. */
795bd26a
RG
2791 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2792 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2793 expr, step_expr);
b9acc9f1 2794 t = unshare_expr (new_name);
ebfd146a 2795 gcc_assert (CONSTANT_CLASS_P (new_name));
b9acc9f1 2796 vec = build_vector_from_val (stepvectype, t);
795bd26a 2797 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
ebfd146a
IR
2798
2799 vec_def = induc_def;
2800 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2801 for (i = 1; i < ncopies; i++)
2802 {
2803 /* vec_i = vec_prev + vec_step */
2804 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2805 vec_def, vec_step);
2806 vec_def = make_ssa_name (vec_dest, new_stmt);
2807 gimple_assign_set_lhs (new_stmt, vec_def);
2808
2809 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
6dbbece6
RG
2810 if (!useless_type_conversion_p (resvectype, vectype))
2811 {
2812 new_stmt = gimple_build_assign_with_ops
2813 (VIEW_CONVERT_EXPR,
2814 vect_get_new_vect_var (resvectype, vect_simple_var,
2815 "vec_iv_"),
2816 build1 (VIEW_CONVERT_EXPR, resvectype,
2817 gimple_assign_lhs (new_stmt)), NULL_TREE);
2818 gimple_assign_set_lhs (new_stmt,
2819 make_ssa_name
2820 (gimple_assign_lhs (new_stmt), new_stmt));
2821 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2822 }
ebfd146a 2823 set_vinfo_for_stmt (new_stmt,
a70d6342 2824 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
ebfd146a 2825 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
b8698a0f 2826 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
ebfd146a
IR
2827 }
2828 }
2829
2830 if (nested_in_vect_loop)
2831 {
2832 /* Find the loop-closed exit-phi of the induction, and record
2833 the final vector of induction results: */
2834 exit_phi = NULL;
2835 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2836 {
2837 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2838 {
2839 exit_phi = USE_STMT (use_p);
2840 break;
2841 }
2842 }
b8698a0f 2843 if (exit_phi)
ebfd146a
IR
2844 {
2845 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2846 /* FORNOW. Currently not supporting the case that an inner-loop induction
2847 is not used in the outer-loop (i.e. only outside the outer-loop). */
2848 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2849 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2850
2851 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2852 if (vect_print_dump_info (REPORT_DETAILS))
2853 {
2854 fprintf (vect_dump, "vector of inductions after inner-loop:");
2855 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2856 }
2857 }
2858 }
2859
2860
2861 if (vect_print_dump_info (REPORT_DETAILS))
2862 {
2863 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2864 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2865 fprintf (vect_dump, "\n");
2866 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2867 }
2868
2869 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
6dbbece6
RG
2870 if (!useless_type_conversion_p (resvectype, vectype))
2871 {
2872 new_stmt = gimple_build_assign_with_ops
2873 (VIEW_CONVERT_EXPR,
2874 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
2875 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
2876 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
2877 gimple_assign_set_lhs (new_stmt, induc_def);
2878 si = gsi_start_bb (bb);
2879 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2880 }
2881
ebfd146a
IR
2882 return induc_def;
2883}
2884
2885
2886/* Function get_initial_def_for_reduction
2887
2888 Input:
2889 STMT - a stmt that performs a reduction operation in the loop.
2890 INIT_VAL - the initial value of the reduction variable
2891
2892 Output:
2893 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2894 of the reduction (used for adjusting the epilog - see below).
2895 Return a vector variable, initialized according to the operation that STMT
2896 performs. This vector will be used as the initial value of the
2897 vector of partial results.
2898
2899 Option1 (adjust in epilog): Initialize the vector as follows:
4bbe8262
IR
2900 add/bit or/xor: [0,0,...,0,0]
2901 mult/bit and: [1,1,...,1,1]
2902 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
ebfd146a
IR
2903 and when necessary (e.g. add/mult case) let the caller know
2904 that it needs to adjust the result by init_val.
2905
2906 Option2: Initialize the vector as follows:
4bbe8262
IR
2907 add/bit or/xor: [init_val,0,0,...,0]
2908 mult/bit and: [init_val,1,1,...,1]
2909 min/max/cond_expr: [init_val,init_val,...,init_val]
ebfd146a
IR
2910 and no adjustments are needed.
2911
2912 For example, for the following code:
2913
2914 s = init_val;
2915 for (i=0;i<n;i++)
2916 s = s + a[i];
2917
2918 STMT is 's = s + a[i]', and the reduction variable is 's'.
2919 For a vector of 4 units, we want to return either [0,0,0,init_val],
2920 or [0,0,0,0] and let the caller know that it needs to adjust
2921 the result at the end by 'init_val'.
2922
2923 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
06066f92
IR
2924 initialization vector is simpler (same element in all entries), if
2925 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
b8698a0f 2926
ebfd146a
IR
2927 A cost model should help decide between these two schemes. */
2928
2929tree
b8698a0f 2930get_initial_def_for_reduction (gimple stmt, tree init_val,
06066f92 2931 tree *adjustment_def)
ebfd146a
IR
2932{
2933 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2934 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2935 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
550918ca
RG
2936 tree scalar_type = TREE_TYPE (init_val);
2937 tree vectype = get_vectype_for_scalar_type (scalar_type);
2938 int nunits;
ebfd146a 2939 enum tree_code code = gimple_assign_rhs_code (stmt);
ebfd146a
IR
2940 tree def_for_init;
2941 tree init_def;
2942 tree t = NULL_TREE;
2943 int i;
b8698a0f 2944 bool nested_in_vect_loop = false;
06066f92
IR
2945 tree init_value;
2946 REAL_VALUE_TYPE real_init_val = dconst0;
2947 int int_init_val = 0;
2f3e235b 2948 gimple def_stmt = NULL;
ebfd146a 2949
550918ca
RG
2950 gcc_assert (vectype);
2951 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2952
2953 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2954 || SCALAR_FLOAT_TYPE_P (scalar_type));
06066f92 2955
ebfd146a
IR
2956 if (nested_in_vect_loop_p (loop, stmt))
2957 nested_in_vect_loop = true;
2958 else
2959 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2960
06066f92 2961 /* In case of double reduction we only create a vector variable to be put
ff802fa1 2962 in the reduction phi node. The actual statement creation is done in
06066f92 2963 vect_create_epilog_for_reduction. */
2f3e235b
IR
2964 if (adjustment_def && nested_in_vect_loop
2965 && TREE_CODE (init_val) == SSA_NAME
2966 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2967 && gimple_code (def_stmt) == GIMPLE_PHI
2968 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
b8698a0f
L
2969 && vinfo_for_stmt (def_stmt)
2970 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
06066f92
IR
2971 == vect_double_reduction_def)
2972 {
2973 *adjustment_def = NULL;
2974 return vect_create_destination_var (init_val, vectype);
2975 }
ebfd146a 2976
06066f92
IR
2977 if (TREE_CONSTANT (init_val))
2978 {
2979 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2980 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2981 else
2982 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2983 }
2984 else
2985 init_value = init_val;
ebfd146a 2986
06066f92
IR
2987 switch (code)
2988 {
2989 case WIDEN_SUM_EXPR:
2990 case DOT_PROD_EXPR:
2991 case PLUS_EXPR:
2992 case MINUS_EXPR:
2993 case BIT_IOR_EXPR:
2994 case BIT_XOR_EXPR:
2995 case MULT_EXPR:
2996 case BIT_AND_EXPR:
b8698a0f 2997 /* ADJUSMENT_DEF is NULL when called from
06066f92
IR
2998 vect_create_epilog_for_reduction to vectorize double reduction. */
2999 if (adjustment_def)
3000 {
3001 if (nested_in_vect_loop)
b8698a0f 3002 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
06066f92
IR
3003 NULL);
3004 else
3005 *adjustment_def = init_val;
3006 }
3007
c1e822d5 3008 if (code == MULT_EXPR)
06066f92
IR
3009 {
3010 real_init_val = dconst1;
3011 int_init_val = 1;
3012 }
3013
c1e822d5
IR
3014 if (code == BIT_AND_EXPR)
3015 int_init_val = -1;
3016
06066f92
IR
3017 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3018 def_for_init = build_real (scalar_type, real_init_val);
3019 else
3020 def_for_init = build_int_cst (scalar_type, int_init_val);
3021
b8698a0f 3022 /* Create a vector of '0' or '1' except the first element. */
06066f92
IR
3023 for (i = nunits - 2; i >= 0; --i)
3024 t = tree_cons (NULL_TREE, def_for_init, t);
3025
3026 /* Option1: the first element is '0' or '1' as well. */
3027 if (adjustment_def)
3028 {
3029 t = tree_cons (NULL_TREE, def_for_init, t);
3030 init_def = build_vector (vectype, t);
3031 break;
3032 }
3033
3034 /* Option2: the first element is INIT_VAL. */
3035 t = tree_cons (NULL_TREE, init_value, t);
3036 if (TREE_CONSTANT (init_val))
3037 init_def = build_vector (vectype, t);
3038 else
3039 init_def = build_constructor_from_list (vectype, t);
3040
3041 break;
3042
3043 case MIN_EXPR:
3044 case MAX_EXPR:
4bbe8262 3045 case COND_EXPR:
06066f92
IR
3046 if (adjustment_def)
3047 {
3048 *adjustment_def = NULL_TREE;
3049 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3050 break;
3051 }
3052
b9acc9f1 3053 init_def = build_vector_from_val (vectype, init_value);
06066f92
IR
3054 break;
3055
3056 default:
3057 gcc_unreachable ();
3058 }
ebfd146a
IR
3059
3060 return init_def;
3061}
3062
3063
3064/* Function vect_create_epilog_for_reduction
b8698a0f 3065
ebfd146a 3066 Create code at the loop-epilog to finalize the result of a reduction
b5aeb3bb
IR
3067 computation.
3068
3069 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3070 reduction statements.
3071 STMT is the scalar reduction stmt that is being vectorized.
ebfd146a 3072 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
ff802fa1 3073 number of elements that we can fit in a vectype (nunits). In this case
ebfd146a
IR
3074 we have to generate more than one vector stmt - i.e - we need to "unroll"
3075 the vector stmt by a factor VF/nunits. For more details see documentation
3076 in vectorizable_operation.
b5aeb3bb
IR
3077 REDUC_CODE is the tree-code for the epilog reduction.
3078 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3079 computation.
3080 REDUC_INDEX is the index of the operand in the right hand side of the
7c5222ff 3081 statement that is defined by REDUCTION_PHI.
06066f92 3082 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
b5aeb3bb
IR
3083 SLP_NODE is an SLP node containing a group of reduction statements. The
3084 first one in this group is STMT.
ebfd146a
IR
3085
3086 This function:
b5aeb3bb
IR
3087 1. Creates the reduction def-use cycles: sets the arguments for
3088 REDUCTION_PHIS:
ebfd146a 3089 The loop-entry argument is the vectorized initial-value of the reduction.
b5aeb3bb
IR
3090 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3091 sums.
3092 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3093 by applying the operation specified by REDUC_CODE if available, or by
ebfd146a 3094 other means (whole-vector shifts or a scalar loop).
b8698a0f 3095 The function also creates a new phi node at the loop exit to preserve
ebfd146a 3096 loop-closed form, as illustrated below.
b8698a0f 3097
ebfd146a 3098 The flow at the entry to this function:
b8698a0f 3099
ebfd146a
IR
3100 loop:
3101 vec_def = phi <null, null> # REDUCTION_PHI
3102 VECT_DEF = vector_stmt # vectorized form of STMT
3103 s_loop = scalar_stmt # (scalar) STMT
3104 loop_exit:
3105 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3106 use <s_out0>
3107 use <s_out0>
3108
3109 The above is transformed by this function into:
3110
3111 loop:
3112 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3113 VECT_DEF = vector_stmt # vectorized form of STMT
b8698a0f 3114 s_loop = scalar_stmt # (scalar) STMT
ebfd146a
IR
3115 loop_exit:
3116 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3117 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3118 v_out2 = reduce <v_out1>
3119 s_out3 = extract_field <v_out2, 0>
3120 s_out4 = adjust_result <s_out3>
3121 use <s_out4>
3122 use <s_out4>
3123*/
3124
3125static void
b5aeb3bb
IR
3126vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3127 int ncopies, enum tree_code reduc_code,
3128 VEC (gimple, heap) *reduction_phis,
3129 int reduc_index, bool double_reduc,
3130 slp_tree slp_node)
ebfd146a
IR
3131{
3132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3133 stmt_vec_info prev_phi_info;
3134 tree vectype;
3135 enum machine_mode mode;
3136 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
06066f92 3137 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
ebfd146a
IR
3138 basic_block exit_bb;
3139 tree scalar_dest;
3140 tree scalar_type;
3141 gimple new_phi = NULL, phi;
3142 gimple_stmt_iterator exit_gsi;
3143 tree vec_dest;
b5aeb3bb 3144 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
ebfd146a 3145 gimple epilog_stmt = NULL;
b5aeb3bb 3146 enum tree_code code = gimple_assign_rhs_code (stmt);
ebfd146a 3147 gimple exit_phi;
0f900dfa 3148 tree bitsize, bitpos;
b5aeb3bb
IR
3149 tree adjustment_def = NULL;
3150 tree vec_initial_def = NULL;
3151 tree reduction_op, expr, def;
3152 tree orig_name, scalar_result;
61869cc9
IR
3153 imm_use_iterator imm_iter, phi_imm_iter;
3154 use_operand_p use_p, phi_use_p;
ebfd146a 3155 bool extract_scalar_result = false;
b5aeb3bb 3156 gimple use_stmt, orig_stmt, reduction_phi = NULL;
ebfd146a 3157 bool nested_in_vect_loop = false;
b5aeb3bb 3158 VEC (gimple, heap) *new_phis = NULL;
ebfd146a
IR
3159 enum vect_def_type dt = vect_unknown_def_type;
3160 int j, i;
b5aeb3bb 3161 VEC (tree, heap) *scalar_results = NULL;
74500b3e 3162 unsigned int group_size = 1, k, ratio;
b5aeb3bb
IR
3163 VEC (tree, heap) *vec_initial_defs = NULL;
3164 VEC (gimple, heap) *phis;
3165
3166 if (slp_node)
3167 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
b8698a0f 3168
ebfd146a
IR
3169 if (nested_in_vect_loop_p (loop, stmt))
3170 {
06066f92 3171 outer_loop = loop;
ebfd146a
IR
3172 loop = loop->inner;
3173 nested_in_vect_loop = true;
b5aeb3bb 3174 gcc_assert (!slp_node);
ebfd146a 3175 }
b8698a0f 3176
ebfd146a
IR
3177 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3178 {
3179 case GIMPLE_SINGLE_RHS:
b8698a0f 3180 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
7c5222ff
IR
3181 == ternary_op);
3182 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
ebfd146a
IR
3183 break;
3184 case GIMPLE_UNARY_RHS:
3185 reduction_op = gimple_assign_rhs1 (stmt);
3186 break;
3187 case GIMPLE_BINARY_RHS:
b8698a0f 3188 reduction_op = reduc_index ?
7c5222ff 3189 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
ebfd146a
IR
3190 break;
3191 default:
3192 gcc_unreachable ();
3193 }
3194
3195 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3196 gcc_assert (vectype);
3197 mode = TYPE_MODE (vectype);
3198
b5aeb3bb
IR
3199 /* 1. Create the reduction def-use cycle:
3200 Set the arguments of REDUCTION_PHIS, i.e., transform
b8698a0f 3201
b5aeb3bb
IR
3202 loop:
3203 vec_def = phi <null, null> # REDUCTION_PHI
3204 VECT_DEF = vector_stmt # vectorized form of STMT
3205 ...
ebfd146a 3206
b5aeb3bb
IR
3207 into:
3208
3209 loop:
3210 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3211 VECT_DEF = vector_stmt # vectorized form of STMT
3212 ...
3213
3214 (in case of SLP, do it for all the phis). */
3215
3216 /* Get the loop-entry arguments. */
3217 if (slp_node)
9dc3f7de
IR
3218 vect_get_slp_defs (reduction_op, NULL_TREE, slp_node, &vec_initial_defs,
3219 NULL, reduc_index);
b5aeb3bb
IR
3220 else
3221 {
3222 vec_initial_defs = VEC_alloc (tree, heap, 1);
3223 /* For the case of reduction, vect_get_vec_def_for_operand returns
3224 the scalar def before the loop, that defines the initial value
3225 of the reduction variable. */
3226 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3227 &adjustment_def);
3228 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3229 }
3230
3231 /* Set phi nodes arguments. */
ac47786e 3232 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
ebfd146a 3233 {
b5aeb3bb
IR
3234 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3235 tree def = VEC_index (tree, vect_defs, i);
3236 for (j = 0; j < ncopies; j++)
3237 {
3238 /* Set the loop-entry arg of the reduction-phi. */
3239 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3240 UNKNOWN_LOCATION);
ebfd146a 3241
b5aeb3bb
IR
3242 /* Set the loop-latch arg for the reduction-phi. */
3243 if (j > 0)
3244 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
ebfd146a 3245
b5aeb3bb 3246 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
ebfd146a 3247
b5aeb3bb
IR
3248 if (vect_print_dump_info (REPORT_DETAILS))
3249 {
3250 fprintf (vect_dump, "transform reduction: created def-use"
3251 " cycle: ");
3252 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3253 fprintf (vect_dump, "\n");
3254 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3255 TDF_SLIM);
3256 }
3257
3258 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3259 }
ebfd146a
IR
3260 }
3261
b5aeb3bb
IR
3262 VEC_free (tree, heap, vec_initial_defs);
3263
3264 /* 2. Create epilog code.
3265 The reduction epilog code operates across the elements of the vector
3266 of partial results computed by the vectorized loop.
3267 The reduction epilog code consists of:
ebfd146a 3268
b5aeb3bb
IR
3269 step 1: compute the scalar result in a vector (v_out2)
3270 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3271 step 3: adjust the scalar result (s_out3) if needed.
3272
3273 Step 1 can be accomplished using one the following three schemes:
ebfd146a
IR
3274 (scheme 1) using reduc_code, if available.
3275 (scheme 2) using whole-vector shifts, if available.
b8698a0f 3276 (scheme 3) using a scalar loop. In this case steps 1+2 above are
ebfd146a 3277 combined.
b8698a0f 3278
ebfd146a
IR
3279 The overall epilog code looks like this:
3280
3281 s_out0 = phi <s_loop> # original EXIT_PHI
3282 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3283 v_out2 = reduce <v_out1> # step 1
3284 s_out3 = extract_field <v_out2, 0> # step 2
3285 s_out4 = adjust_result <s_out3> # step 3
3286
3287 (step 3 is optional, and steps 1 and 2 may be combined).
b5aeb3bb 3288 Lastly, the uses of s_out0 are replaced by s_out4. */
ebfd146a 3289
ebfd146a 3290
b5aeb3bb
IR
3291 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3292 v_out1 = phi <VECT_DEF>
3293 Store them in NEW_PHIS. */
ebfd146a
IR
3294
3295 exit_bb = single_exit (loop)->dest;
ebfd146a 3296 prev_phi_info = NULL;
b5aeb3bb 3297 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
ac47786e 3298 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
ebfd146a 3299 {
b5aeb3bb
IR
3300 for (j = 0; j < ncopies; j++)
3301 {
3302 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3303 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3304 if (j == 0)
3305 VEC_quick_push (gimple, new_phis, phi);
3306 else
3307 {
3308 def = vect_get_vec_def_for_stmt_copy (dt, def);
3309 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3310 }
3311
3312 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3313 prev_phi_info = vinfo_for_stmt (phi);
3314 }
ebfd146a 3315 }
7c5222ff 3316
61869cc9
IR
3317 /* The epilogue is created for the outer-loop, i.e., for the loop being
3318 vectorized. */
3319 if (double_reduc)
3320 {
3321 loop = outer_loop;
3322 exit_bb = single_exit (loop)->dest;
3323 }
3324
ebfd146a
IR
3325 exit_gsi = gsi_after_labels (exit_bb);
3326
b8698a0f 3327 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
ebfd146a
IR
3328 (i.e. when reduc_code is not available) and in the final adjustment
3329 code (if needed). Also get the original scalar reduction variable as
b8698a0f
L
3330 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3331 represents a reduction pattern), the tree-code and scalar-def are
3332 taken from the original stmt that the pattern-stmt (STMT) replaces.
ebfd146a 3333 Otherwise (it is a regular reduction) - the tree-code and scalar-def
b8698a0f 3334 are taken from STMT. */
ebfd146a
IR
3335
3336 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3337 if (!orig_stmt)
3338 {
3339 /* Regular reduction */
3340 orig_stmt = stmt;
3341 }
3342 else
3343 {
3344 /* Reduction pattern */
3345 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3346 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3347 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3348 }
7c5222ff 3349
ebfd146a 3350 code = gimple_assign_rhs_code (orig_stmt);
b5aeb3bb
IR
3351 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3352 partial results are added and not subtracted. */
3353 if (code == MINUS_EXPR)
3354 code = PLUS_EXPR;
3355
ebfd146a
IR
3356 scalar_dest = gimple_assign_lhs (orig_stmt);
3357 scalar_type = TREE_TYPE (scalar_dest);
b5aeb3bb 3358 scalar_results = VEC_alloc (tree, heap, group_size);
ebfd146a
IR
3359 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3360 bitsize = TYPE_SIZE (scalar_type);
ebfd146a 3361
ebfd146a
IR
3362 /* In case this is a reduction in an inner-loop while vectorizing an outer
3363 loop - we don't need to extract a single scalar result at the end of the
06066f92 3364 inner-loop (unless it is double reduction, i.e., the use of reduction is
ff802fa1 3365 outside the outer-loop). The final vector of partial results will be used
06066f92
IR
3366 in the vectorized outer-loop, or reduced to a scalar result at the end of
3367 the outer-loop. */
3368 if (nested_in_vect_loop && !double_reduc)
ebfd146a
IR
3369 goto vect_finalize_reduction;
3370
ebfd146a 3371 /* 2.3 Create the reduction code, using one of the three schemes described
b5aeb3bb
IR
3372 above. In SLP we simply need to extract all the elements from the
3373 vector (without reducing them), so we use scalar shifts. */
3374 if (reduc_code != ERROR_MARK && !slp_node)
ebfd146a
IR
3375 {
3376 tree tmp;
3377
3378 /*** Case 1: Create:
b5aeb3bb 3379 v_out2 = reduc_expr <v_out1> */
ebfd146a
IR
3380
3381 if (vect_print_dump_info (REPORT_DETAILS))
b5aeb3bb 3382 fprintf (vect_dump, "Reduce using direct vector reduction.");
ebfd146a
IR
3383
3384 vec_dest = vect_create_destination_var (scalar_dest, vectype);
b5aeb3bb 3385 new_phi = VEC_index (gimple, new_phis, 0);
ebfd146a
IR
3386 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3387 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3388 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3389 gimple_assign_set_lhs (epilog_stmt, new_temp);
3390 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3391
3392 extract_scalar_result = true;
3393 }
3394 else
3395 {
81f40b79 3396 enum tree_code shift_code = ERROR_MARK;
ebfd146a
IR
3397 bool have_whole_vector_shift = true;
3398 int bit_offset;
3399 int element_bitsize = tree_low_cst (bitsize, 1);
3400 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3401 tree vec_temp;
3402
947131ba 3403 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
b5aeb3bb 3404 shift_code = VEC_RSHIFT_EXPR;
ebfd146a 3405 else
b5aeb3bb 3406 have_whole_vector_shift = false;
ebfd146a
IR
3407
3408 /* Regardless of whether we have a whole vector shift, if we're
b5aeb3bb
IR
3409 emulating the operation via tree-vect-generic, we don't want
3410 to use it. Only the first round of the reduction is likely
3411 to still be profitable via emulation. */
ebfd146a 3412 /* ??? It might be better to emit a reduction tree code here, so that
b5aeb3bb 3413 tree-vect-generic can expand the first round via bit tricks. */
ebfd146a 3414 if (!VECTOR_MODE_P (mode))
b5aeb3bb 3415 have_whole_vector_shift = false;
ebfd146a 3416 else
ebfd146a 3417 {
b5aeb3bb 3418 optab optab = optab_for_tree_code (code, vectype, optab_default);
947131ba 3419 if (optab_handler (optab, mode) == CODE_FOR_nothing)
b5aeb3bb
IR
3420 have_whole_vector_shift = false;
3421 }
ebfd146a 3422
b5aeb3bb
IR
3423 if (have_whole_vector_shift && !slp_node)
3424 {
3425 /*** Case 2: Create:
3426 for (offset = VS/2; offset >= element_size; offset/=2)
3427 {
3428 Create: va' = vec_shift <va, offset>
3429 Create: va = vop <va, va'>
3430 } */
ebfd146a 3431
b5aeb3bb
IR
3432 if (vect_print_dump_info (REPORT_DETAILS))
3433 fprintf (vect_dump, "Reduce using vector shifts");
3434
3435 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3436 new_phi = VEC_index (gimple, new_phis, 0);
3437 new_temp = PHI_RESULT (new_phi);
3438 for (bit_offset = vec_size_in_bits/2;
3439 bit_offset >= element_bitsize;
3440 bit_offset /= 2)
3441 {
3442 tree bitpos = size_int (bit_offset);
3443
3444 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3445 vec_dest, new_temp, bitpos);
3446 new_name = make_ssa_name (vec_dest, epilog_stmt);
3447 gimple_assign_set_lhs (epilog_stmt, new_name);
3448 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3449
3450 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3451 new_name, new_temp);
3452 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3453 gimple_assign_set_lhs (epilog_stmt, new_temp);
3454 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3455 }
ebfd146a 3456
b5aeb3bb
IR
3457 extract_scalar_result = true;
3458 }
ebfd146a
IR
3459 else
3460 {
b5aeb3bb
IR
3461 tree rhs;
3462
3463 /*** Case 3: Create:
3464 s = extract_field <v_out2, 0>
3465 for (offset = element_size;
3466 offset < vector_size;
3467 offset += element_size;)
3468 {
3469 Create: s' = extract_field <v_out2, offset>
3470 Create: s = op <s, s'> // For non SLP cases
3471 } */
ebfd146a 3472
b5aeb3bb
IR
3473 if (vect_print_dump_info (REPORT_DETAILS))
3474 fprintf (vect_dump, "Reduce using scalar code. ");
ebfd146a 3475
b5aeb3bb 3476 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
ac47786e 3477 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
b5aeb3bb
IR
3478 {
3479 vec_temp = PHI_RESULT (new_phi);
3480 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3481 bitsize_zero_node);
3482 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3483 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3484 gimple_assign_set_lhs (epilog_stmt, new_temp);
3485 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3486
3487 /* In SLP we don't need to apply reduction operation, so we just
3488 collect s' values in SCALAR_RESULTS. */
3489 if (slp_node)
3490 VEC_safe_push (tree, heap, scalar_results, new_temp);
3491
3492 for (bit_offset = element_bitsize;
3493 bit_offset < vec_size_in_bits;
3494 bit_offset += element_bitsize)
3495 {
3496 tree bitpos = bitsize_int (bit_offset);
3497 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3498 bitsize, bitpos);
3499
3500 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3501 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3502 gimple_assign_set_lhs (epilog_stmt, new_name);
3503 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3504
3505 if (slp_node)
3506 {
3507 /* In SLP we don't need to apply reduction operation, so
3508 we just collect s' values in SCALAR_RESULTS. */
3509 new_temp = new_name;
3510 VEC_safe_push (tree, heap, scalar_results, new_name);
3511 }
3512 else
3513 {
3514 epilog_stmt = gimple_build_assign_with_ops (code,
3515 new_scalar_dest, new_name, new_temp);
3516 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3517 gimple_assign_set_lhs (epilog_stmt, new_temp);
3518 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3519 }
3520 }
3521 }
3522
3523 /* The only case where we need to reduce scalar results in SLP, is
ff802fa1 3524 unrolling. If the size of SCALAR_RESULTS is greater than
b5aeb3bb
IR
3525 GROUP_SIZE, we reduce them combining elements modulo
3526 GROUP_SIZE. */
3527 if (slp_node)
3528 {
3529 tree res, first_res, new_res;
3530 gimple new_stmt;
3531
3532 /* Reduce multiple scalar results in case of SLP unrolling. */
3533 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3534 j++)
3535 {
3536 first_res = VEC_index (tree, scalar_results, j % group_size);
3537 new_stmt = gimple_build_assign_with_ops (code,
3538 new_scalar_dest, first_res, res);
3539 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3540 gimple_assign_set_lhs (new_stmt, new_res);
3541 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3542 VEC_replace (tree, scalar_results, j % group_size, new_res);
3543 }
3544 }
3545 else
3546 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3547 VEC_safe_push (tree, heap, scalar_results, new_temp);
3548
3549 extract_scalar_result = false;
3550 }
ebfd146a
IR
3551 }
3552
3553 /* 2.4 Extract the final scalar result. Create:
b5aeb3bb 3554 s_out3 = extract_field <v_out2, bitpos> */
b8698a0f 3555
ebfd146a
IR
3556 if (extract_scalar_result)
3557 {
3558 tree rhs;
3559
ebfd146a 3560 if (vect_print_dump_info (REPORT_DETAILS))
b5aeb3bb 3561 fprintf (vect_dump, "extract scalar result");
ebfd146a
IR
3562
3563 if (BYTES_BIG_ENDIAN)
b5aeb3bb
IR
3564 bitpos = size_binop (MULT_EXPR,
3565 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3566 TYPE_SIZE (scalar_type));
ebfd146a 3567 else
b5aeb3bb 3568 bitpos = bitsize_zero_node;
ebfd146a
IR
3569
3570 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3571 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3572 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3573 gimple_assign_set_lhs (epilog_stmt, new_temp);
3574 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
b5aeb3bb 3575 VEC_safe_push (tree, heap, scalar_results, new_temp);
ebfd146a 3576 }
b5aeb3bb 3577
ebfd146a
IR
3578vect_finalize_reduction:
3579
61869cc9
IR
3580 if (double_reduc)
3581 loop = loop->inner;
3582
ebfd146a
IR
3583 /* 2.5 Adjust the final result by the initial value of the reduction
3584 variable. (When such adjustment is not needed, then
3585 'adjustment_def' is zero). For example, if code is PLUS we create:
3586 new_temp = loop_exit_def + adjustment_def */
3587
3588 if (adjustment_def)
3589 {
b5aeb3bb 3590 gcc_assert (!slp_node);
ebfd146a
IR
3591 if (nested_in_vect_loop)
3592 {
b5aeb3bb 3593 new_phi = VEC_index (gimple, new_phis, 0);
ebfd146a
IR
3594 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3595 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3596 new_dest = vect_create_destination_var (scalar_dest, vectype);
3597 }
3598 else
3599 {
b5aeb3bb 3600 new_temp = VEC_index (tree, scalar_results, 0);
ebfd146a
IR
3601 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3602 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3603 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3604 }
7c5222ff 3605
ebfd146a
IR
3606 epilog_stmt = gimple_build_assign (new_dest, expr);
3607 new_temp = make_ssa_name (new_dest, epilog_stmt);
3608 gimple_assign_set_lhs (epilog_stmt, new_temp);
3609 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3610 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
b5aeb3bb
IR
3611 if (nested_in_vect_loop)
3612 {
3613 set_vinfo_for_stmt (epilog_stmt,
3614 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3615 NULL));
3616 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3617 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3618
3619 if (!double_reduc)
3620 VEC_quick_push (tree, scalar_results, new_temp);
3621 else
3622 VEC_replace (tree, scalar_results, 0, new_temp);
3623 }
3624 else
3625 VEC_replace (tree, scalar_results, 0, new_temp);
3626
3627 VEC_replace (gimple, new_phis, 0, epilog_stmt);
ebfd146a
IR
3628 }
3629
ff802fa1 3630 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
b5aeb3bb
IR
3631 phis with new adjusted scalar results, i.e., replace use <s_out0>
3632 with use <s_out4>.
ebfd146a 3633
b5aeb3bb
IR
3634 Transform:
3635 loop_exit:
3636 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3637 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3638 v_out2 = reduce <v_out1>
3639 s_out3 = extract_field <v_out2, 0>
3640 s_out4 = adjust_result <s_out3>
3641 use <s_out0>
3642 use <s_out0>
3643
3644 into:
ebfd146a 3645
b5aeb3bb
IR
3646 loop_exit:
3647 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3648 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3649 v_out2 = reduce <v_out1>
3650 s_out3 = extract_field <v_out2, 0>
3651 s_out4 = adjust_result <s_out3>
74500b3e
IR
3652 use <s_out4>
3653 use <s_out4> */
b5aeb3bb
IR
3654
3655 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
ff802fa1
IR
3656 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3657 need to match SCALAR_RESULTS with corresponding statements. The first
b5aeb3bb
IR
3658 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3659 the first vector stmt, etc.
3660 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
74500b3e
IR
3661 if (group_size > VEC_length (gimple, new_phis))
3662 {
3663 ratio = group_size / VEC_length (gimple, new_phis);
3664 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3665 }
3666 else
3667 ratio = 1;
b5aeb3bb
IR
3668
3669 for (k = 0; k < group_size; k++)
ebfd146a 3670 {
b5aeb3bb
IR
3671 if (k % ratio == 0)
3672 {
3673 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3674 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3675 }
06066f92 3676
b5aeb3bb
IR
3677 if (slp_node)
3678 {
3679 gimple current_stmt = VEC_index (gimple,
3680 SLP_TREE_SCALAR_STMTS (slp_node), k);
ebfd146a 3681
b5aeb3bb
IR
3682 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
3683 /* SLP statements can't participate in patterns. */
3684 gcc_assert (!orig_stmt);
3685 scalar_dest = gimple_assign_lhs (current_stmt);
3686 }
3687
3688 phis = VEC_alloc (gimple, heap, 3);
3689 /* Find the loop-closed-use at the loop exit of the original scalar
ff802fa1 3690 result. (The reduction result is expected to have two immediate uses -
b5aeb3bb
IR
3691 one at the latch block, and one at the loop exit). */
3692 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3693 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3694 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3695
3696 /* We expect to have found an exit_phi because of loop-closed-ssa
3697 form. */
3698 gcc_assert (!VEC_empty (gimple, phis));
3699
ac47786e 3700 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
b5aeb3bb
IR
3701 {
3702 if (outer_loop)
06066f92 3703 {
b5aeb3bb
IR
3704 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
3705 gimple vect_phi;
3706
3707 /* FORNOW. Currently not supporting the case that an inner-loop
3708 reduction is not used in the outer-loop (but only outside the
3709 outer-loop), unless it is double reduction. */
3710 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
3711 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
3712 || double_reduc);
3713
3714 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
3715 if (!double_reduc
3716 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
3717 != vect_double_reduction_def)
06066f92
IR
3718 continue;
3719
b5aeb3bb 3720 /* Handle double reduction:
06066f92 3721
b5aeb3bb
IR
3722 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3723 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
3724 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
3725 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
06066f92 3726
b5aeb3bb
IR
3727 At that point the regular reduction (stmt2 and stmt3) is
3728 already vectorized, as well as the exit phi node, stmt4.
3729 Here we vectorize the phi node of double reduction, stmt1, and
3730 update all relevant statements. */
06066f92 3731
b5aeb3bb
IR
3732 /* Go through all the uses of s2 to find double reduction phi
3733 node, i.e., stmt1 above. */
3734 orig_name = PHI_RESULT (exit_phi);
3735 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
06066f92 3736 {
b5aeb3bb
IR
3737 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3738 stmt_vec_info new_phi_vinfo;
3739 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3740 basic_block bb = gimple_bb (use_stmt);
3741 gimple use;
3742
3743 /* Check that USE_STMT is really double reduction phi
3744 node. */
3745 if (gimple_code (use_stmt) != GIMPLE_PHI
3746 || gimple_phi_num_args (use_stmt) != 2
3747 || !use_stmt_vinfo
3748 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3749 != vect_double_reduction_def
3750 || bb->loop_father != outer_loop)
3751 continue;
3752
3753 /* Create vector phi node for double reduction:
3754 vs1 = phi <vs0, vs2>
3755 vs1 was created previously in this function by a call to
3756 vect_get_vec_def_for_operand and is stored in
3757 vec_initial_def;
3758 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3759 vs0 is created here. */
3760
3761 /* Create vector phi node. */
3762 vect_phi = create_phi_node (vec_initial_def, bb);
3763 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3764 loop_vec_info_for_loop (outer_loop), NULL);
3765 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3766
3767 /* Create vs0 - initial def of the double reduction phi. */
3768 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3769 loop_preheader_edge (outer_loop));
3770 init_def = get_initial_def_for_reduction (stmt,
3771 preheader_arg, NULL);
3772 vect_phi_init = vect_init_vector (use_stmt, init_def,
3773 vectype, NULL);
3774
3775 /* Update phi node arguments with vs0 and vs2. */
3776 add_phi_arg (vect_phi, vect_phi_init,
3777 loop_preheader_edge (outer_loop),
3778 UNKNOWN_LOCATION);
3779 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3780 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3781 if (vect_print_dump_info (REPORT_DETAILS))
3782 {
3783 fprintf (vect_dump, "created double reduction phi "
3784 "node: ");
3785 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3786 }
3787
3788 vect_phi_res = PHI_RESULT (vect_phi);
3789
3790 /* Replace the use, i.e., set the correct vs1 in the regular
ff802fa1 3791 reduction phi node. FORNOW, NCOPIES is always 1, so the
b5aeb3bb
IR
3792 loop is redundant. */
3793 use = reduction_phi;
3794 for (j = 0; j < ncopies; j++)
3795 {
3796 edge pr_edge = loop_preheader_edge (loop);
3797 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3798 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3799 }
06066f92
IR
3800 }
3801 }
61869cc9
IR
3802 }
3803
3804 VEC_free (gimple, heap, phis);
3805 if (nested_in_vect_loop)
3806 {
3807 if (double_reduc)
3808 loop = outer_loop;
3809 else
3810 continue;
3811 }
3812
3813 phis = VEC_alloc (gimple, heap, 3);
3814 /* Find the loop-closed-use at the loop exit of the original scalar
ff802fa1
IR
3815 result. (The reduction result is expected to have two immediate uses,
3816 one at the latch block, and one at the loop exit). For double
61869cc9
IR
3817 reductions we are looking for exit phis of the outer loop. */
3818 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3819 {
3820 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3821 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3822 else
3823 {
3824 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
3825 {
3826 tree phi_res = PHI_RESULT (USE_STMT (use_p));
3827
3828 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
3829 {
3830 if (!flow_bb_inside_loop_p (loop,
3831 gimple_bb (USE_STMT (phi_use_p))))
3832 VEC_safe_push (gimple, heap, phis,
3833 USE_STMT (phi_use_p));
3834 }
3835 }
3836 }
3837 }
ebfd146a 3838
ac47786e 3839 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
61869cc9 3840 {
b5aeb3bb
IR
3841 /* Replace the uses: */
3842 orig_name = PHI_RESULT (exit_phi);
3843 scalar_result = VEC_index (tree, scalar_results, k);
3844 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3845 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3846 SET_USE (use_p, scalar_result);
3847 }
3848
3849 VEC_free (gimple, heap, phis);
ebfd146a 3850 }
06066f92 3851
b5aeb3bb
IR
3852 VEC_free (tree, heap, scalar_results);
3853 VEC_free (gimple, heap, new_phis);
3854}
ebfd146a
IR
3855
3856
3857/* Function vectorizable_reduction.
3858
3859 Check if STMT performs a reduction operation that can be vectorized.
3860 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7c5222ff 3861 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
ebfd146a
IR
3862 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3863
b8698a0f 3864 This function also handles reduction idioms (patterns) that have been
ff802fa1 3865 recognized in advance during vect_pattern_recog. In this case, STMT may be
ebfd146a
IR
3866 of this form:
3867 X = pattern_expr (arg0, arg1, ..., X)
3868 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3869 sequence that had been detected and replaced by the pattern-stmt (STMT).
b8698a0f 3870
ebfd146a
IR
3871 In some cases of reduction patterns, the type of the reduction variable X is
3872 different than the type of the other arguments of STMT.
3873 In such cases, the vectype that is used when transforming STMT into a vector
3874 stmt is different than the vectype that is used to determine the
b8698a0f 3875 vectorization factor, because it consists of a different number of elements
ebfd146a
IR
3876 than the actual number of elements that are being operated upon in parallel.
3877
3878 For example, consider an accumulation of shorts into an int accumulator.
3879 On some targets it's possible to vectorize this pattern operating on 8
3880 shorts at a time (hence, the vectype for purposes of determining the
3881 vectorization factor should be V8HI); on the other hand, the vectype that
3882 is used to create the vector form is actually V4SI (the type of the result).
3883
3884 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3885 indicates what is the actual level of parallelism (V8HI in the example), so
ff802fa1 3886 that the right vectorization factor would be derived. This vectype
ebfd146a 3887 corresponds to the type of arguments to the reduction stmt, and should *NOT*
ff802fa1 3888 be used to create the vectorized stmt. The right vectype for the vectorized
ebfd146a
IR
3889 stmt is obtained from the type of the result X:
3890 get_vectype_for_scalar_type (TREE_TYPE (X))
3891
3892 This means that, contrary to "regular" reductions (or "regular" stmts in
3893 general), the following equation:
3894 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3895 does *NOT* necessarily hold for reduction patterns. */
3896
3897bool
3898vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
b5aeb3bb 3899 gimple *vec_stmt, slp_tree slp_node)
ebfd146a
IR
3900{
3901 tree vec_dest;
3902 tree scalar_dest;
3903 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3904 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
b690cc0f
RG
3905 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3906 tree vectype_in = NULL_TREE;
ebfd146a
IR
3907 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3908 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
32e8bb8e 3909 enum tree_code code, orig_code, epilog_reduc_code;
ebfd146a
IR
3910 enum machine_mode vec_mode;
3911 int op_type;
3912 optab optab, reduc_optab;
3913 tree new_temp = NULL_TREE;
3914 tree def;
3915 gimple def_stmt;
3916 enum vect_def_type dt;
3917 gimple new_phi = NULL;
3918 tree scalar_type;
3919 bool is_simple_use;
3920 gimple orig_stmt;
3921 stmt_vec_info orig_stmt_info;
3922 tree expr = NULL_TREE;
3923 int i;
b690cc0f 3924 int ncopies;
ebfd146a
IR
3925 int epilog_copies;
3926 stmt_vec_info prev_stmt_info, prev_phi_info;
ebfd146a 3927 bool single_defuse_cycle = false;
4bbe8262 3928 tree reduc_def = NULL_TREE;
ebfd146a
IR
3929 gimple new_stmt = NULL;
3930 int j;
3931 tree ops[3];
7c5222ff
IR
3932 bool nested_cycle = false, found_nested_cycle_def = false;
3933 gimple reduc_def_stmt = NULL;
3934 /* The default is that the reduction variable is the last in statement. */
3935 int reduc_index = 2;
06066f92
IR
3936 bool double_reduc = false, dummy;
3937 basic_block def_bb;
2f3e235b 3938 struct loop * def_stmt_loop, *outer_loop = NULL;
06066f92 3939 tree def_arg;
2f3e235b 3940 gimple def_arg_stmt;
b5aeb3bb
IR
3941 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
3942 VEC (gimple, heap) *phis = NULL;
3943 int vec_num;
43427f3c 3944 tree def0, def1, tem;
ebfd146a
IR
3945
3946 if (nested_in_vect_loop_p (loop, stmt))
7c5222ff 3947 {
2f3e235b 3948 outer_loop = loop;
7c5222ff
IR
3949 loop = loop->inner;
3950 nested_cycle = true;
3951 }
ebfd146a 3952
ebfd146a 3953 /* 1. Is vectorizable reduction? */
ebfd146a
IR
3954 /* Not supportable if the reduction variable is used in the loop. */
3955 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3956 return false;
3957
3958 /* Reductions that are not used even in an enclosing outer-loop,
3959 are expected to be "live" (used out of the loop). */
8644a673 3960 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
ebfd146a
IR
3961 && !STMT_VINFO_LIVE_P (stmt_info))
3962 return false;
3963
3964 /* Make sure it was already recognized as a reduction computation. */
7c5222ff
IR
3965 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3966 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
ebfd146a
IR
3967 return false;
3968
b8698a0f 3969 /* 2. Has this been recognized as a reduction pattern?
ebfd146a
IR
3970
3971 Check if STMT represents a pattern that has been recognized
3972 in earlier analysis stages. For stmts that represent a pattern,
3973 the STMT_VINFO_RELATED_STMT field records the last stmt in
3974 the original sequence that constitutes the pattern. */
3975
3976 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3977 if (orig_stmt)
3978 {
3979 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3980 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3981 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3982 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3983 }
b8698a0f 3984
ff802fa1 3985 /* 3. Check the operands of the operation. The first operands are defined
ebfd146a
IR
3986 inside the loop body. The last operand is the reduction variable,
3987 which is defined by the loop-header-phi. */
3988
3989 gcc_assert (is_gimple_assign (stmt));
3990
9dc3f7de 3991 /* Flatten RHS. */
ebfd146a
IR
3992 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3993 {
3994 case GIMPLE_SINGLE_RHS:
3995 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3996 if (op_type == ternary_op)
3997 {
3998 tree rhs = gimple_assign_rhs1 (stmt);
3999 ops[0] = TREE_OPERAND (rhs, 0);
4000 ops[1] = TREE_OPERAND (rhs, 1);
4001 ops[2] = TREE_OPERAND (rhs, 2);
4002 code = TREE_CODE (rhs);
4003 }
4004 else
4005 return false;
4006 break;
4007
4008 case GIMPLE_BINARY_RHS:
4009 code = gimple_assign_rhs_code (stmt);
4010 op_type = TREE_CODE_LENGTH (code);
4011 gcc_assert (op_type == binary_op);
4012 ops[0] = gimple_assign_rhs1 (stmt);
4013 ops[1] = gimple_assign_rhs2 (stmt);
4014 break;
4015
4016 case GIMPLE_UNARY_RHS:
4017 return false;
4018
4019 default:
4020 gcc_unreachable ();
4021 }
4022
4023 scalar_dest = gimple_assign_lhs (stmt);
4024 scalar_type = TREE_TYPE (scalar_dest);
b8698a0f 4025 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
ebfd146a
IR
4026 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4027 return false;
4028
4029 /* All uses but the last are expected to be defined in the loop.
ff802fa1 4030 The last use is the reduction variable. In case of nested cycle this
7c5222ff
IR
4031 assumption is not true: we use reduc_index to record the index of the
4032 reduction variable. */
ebfd146a
IR
4033 for (i = 0; i < op_type-1; i++)
4034 {
4bbe8262
IR
4035 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4036 if (i == 0 && code == COND_EXPR)
4037 continue;
4038
b690cc0f
RG
4039 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
4040 &def_stmt, &def, &dt, &tem);
4041 if (!vectype_in)
4042 vectype_in = tem;
ebfd146a 4043 gcc_assert (is_simple_use);
8644a673
IR
4044 if (dt != vect_internal_def
4045 && dt != vect_external_def
ebfd146a 4046 && dt != vect_constant_def
7c5222ff 4047 && dt != vect_induction_def
4bbe8262 4048 && !(dt == vect_nested_cycle && nested_cycle))
ebfd146a 4049 return false;
7c5222ff
IR
4050
4051 if (dt == vect_nested_cycle)
4052 {
4053 found_nested_cycle_def = true;
4054 reduc_def_stmt = def_stmt;
4055 reduc_index = i;
4056 }
ebfd146a
IR
4057 }
4058
43427f3c
RG
4059 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL, &def_stmt,
4060 &def, &dt, &tem);
4061 if (!vectype_in)
4062 vectype_in = tem;
ebfd146a 4063 gcc_assert (is_simple_use);
7c5222ff
IR
4064 gcc_assert (dt == vect_reduction_def
4065 || dt == vect_nested_cycle
b8698a0f 4066 || ((dt == vect_internal_def || dt == vect_external_def
7c5222ff 4067 || dt == vect_constant_def || dt == vect_induction_def)
b8698a0f 4068 && nested_cycle && found_nested_cycle_def));
7c5222ff
IR
4069 if (!found_nested_cycle_def)
4070 reduc_def_stmt = def_stmt;
4071
4072 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
b8698a0f
L
4073 if (orig_stmt)
4074 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4075 reduc_def_stmt,
4076 !nested_cycle,
06066f92 4077 &dummy));
ebfd146a 4078 else
b8698a0f 4079 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
06066f92 4080 !nested_cycle, &dummy));
b8698a0f 4081
7c5222ff 4082 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
ebfd146a
IR
4083 return false;
4084
b5aeb3bb
IR
4085 if (slp_node)
4086 ncopies = 1;
4087 else
4088 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4089 / TYPE_VECTOR_SUBPARTS (vectype_in));
b690cc0f 4090
b690cc0f
RG
4091 gcc_assert (ncopies >= 1);
4092
4093 vec_mode = TYPE_MODE (vectype_in);
ebfd146a 4094
4bbe8262 4095 if (code == COND_EXPR)
ebfd146a 4096 {
4bbe8262
IR
4097 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
4098 {
4099 if (vect_print_dump_info (REPORT_DETAILS))
4100 fprintf (vect_dump, "unsupported condition in reduction");
4101
4102 return false;
4103 }
ebfd146a 4104 }
4bbe8262 4105 else
ebfd146a 4106 {
4bbe8262 4107 /* 4. Supportable by target? */
ebfd146a 4108
4bbe8262 4109 /* 4.1. check support for the operation in the loop */
b690cc0f 4110 optab = optab_for_tree_code (code, vectype_in, optab_default);
4bbe8262
IR
4111 if (!optab)
4112 {
4113 if (vect_print_dump_info (REPORT_DETAILS))
4114 fprintf (vect_dump, "no optab.");
4115
4116 return false;
4117 }
4118
947131ba 4119 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4bbe8262
IR
4120 {
4121 if (vect_print_dump_info (REPORT_DETAILS))
4122 fprintf (vect_dump, "op not supported by target.");
4123
4124 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4125 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4126 < vect_min_worthwhile_factor (code))
4127 return false;
4128
4129 if (vect_print_dump_info (REPORT_DETAILS))
4130 fprintf (vect_dump, "proceeding using word mode.");
4131 }
4132
4133 /* Worthwhile without SIMD support? */
b690cc0f 4134 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4bbe8262
IR
4135 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4136 < vect_min_worthwhile_factor (code))
4137 {
4138 if (vect_print_dump_info (REPORT_DETAILS))
4139 fprintf (vect_dump, "not worthwhile without SIMD support.");
4140
4141 return false;
4142 }
ebfd146a
IR
4143 }
4144
4145 /* 4.2. Check support for the epilog operation.
4146
4147 If STMT represents a reduction pattern, then the type of the
4148 reduction variable may be different than the type of the rest
4149 of the arguments. For example, consider the case of accumulation
4150 of shorts into an int accumulator; The original code:
4151 S1: int_a = (int) short_a;
4152 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4153
4154 was replaced with:
4155 STMT: int_acc = widen_sum <short_a, int_acc>
4156
4157 This means that:
b8698a0f
L
4158 1. The tree-code that is used to create the vector operation in the
4159 epilog code (that reduces the partial results) is not the
4160 tree-code of STMT, but is rather the tree-code of the original
ff802fa1 4161 stmt from the pattern that STMT is replacing. I.e, in the example
b8698a0f 4162 above we want to use 'widen_sum' in the loop, but 'plus' in the
ebfd146a
IR
4163 epilog.
4164 2. The type (mode) we use to check available target support
b8698a0f
L
4165 for the vector operation to be created in the *epilog*, is
4166 determined by the type of the reduction variable (in the example
947131ba 4167 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
ebfd146a
IR
4168 However the type (mode) we use to check available target support
4169 for the vector operation to be created *inside the loop*, is
4170 determined by the type of the other arguments to STMT (in the
947131ba
RS
4171 example we'd check this: optab_handler (widen_sum_optab,
4172 vect_short_mode)).
b8698a0f
L
4173
4174 This is contrary to "regular" reductions, in which the types of all
4175 the arguments are the same as the type of the reduction variable.
4176 For "regular" reductions we can therefore use the same vector type
ebfd146a
IR
4177 (and also the same tree-code) when generating the epilog code and
4178 when generating the code inside the loop. */
4179
4180 if (orig_stmt)
4181 {
4182 /* This is a reduction pattern: get the vectype from the type of the
4183 reduction variable, and get the tree-code from orig_stmt. */
4184 orig_code = gimple_assign_rhs_code (orig_stmt);
b690cc0f
RG
4185 gcc_assert (vectype_out);
4186 vec_mode = TYPE_MODE (vectype_out);
ebfd146a
IR
4187 }
4188 else
4189 {
4190 /* Regular reduction: use the same vectype and tree-code as used for
4191 the vector code inside the loop can be used for the epilog code. */
4192 orig_code = code;
4193 }
4194
2f3e235b
IR
4195 if (nested_cycle)
4196 {
4197 def_bb = gimple_bb (reduc_def_stmt);
4198 def_stmt_loop = def_bb->loop_father;
4199 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4200 loop_preheader_edge (def_stmt_loop));
4201 if (TREE_CODE (def_arg) == SSA_NAME
4202 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4203 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4204 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4205 && vinfo_for_stmt (def_arg_stmt)
4206 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4207 == vect_double_reduction_def)
4208 double_reduc = true;
4209 }
06066f92 4210
4bbe8262
IR
4211 epilog_reduc_code = ERROR_MARK;
4212 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4213 {
b690cc0f 4214 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4bbe8262
IR
4215 optab_default);
4216 if (!reduc_optab)
4217 {
4218 if (vect_print_dump_info (REPORT_DETAILS))
4219 fprintf (vect_dump, "no optab for reduction.");
4220
4221 epilog_reduc_code = ERROR_MARK;
4222 }
4223
4224 if (reduc_optab
947131ba 4225 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4bbe8262
IR
4226 {
4227 if (vect_print_dump_info (REPORT_DETAILS))
4228 fprintf (vect_dump, "reduc op not supported by target.");
b8698a0f 4229
4bbe8262
IR
4230 epilog_reduc_code = ERROR_MARK;
4231 }
4232 }
4233 else
4234 {
4235 if (!nested_cycle || double_reduc)
4236 {
4237 if (vect_print_dump_info (REPORT_DETAILS))
4238 fprintf (vect_dump, "no reduc code for scalar code.");
4239
4240 return false;
4241 }
4242 }
4243
06066f92
IR
4244 if (double_reduc && ncopies > 1)
4245 {
4246 if (vect_print_dump_info (REPORT_DETAILS))
4247 fprintf (vect_dump, "multiple types in double reduction");
4248
4249 return false;
4250 }
b8698a0f 4251
ebfd146a
IR
4252 if (!vec_stmt) /* transformation not required. */
4253 {
4254 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4255 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4256 return false;
4257 return true;
4258 }
4259
4260 /** Transform. **/
4261
4262 if (vect_print_dump_info (REPORT_DETAILS))
4263 fprintf (vect_dump, "transform reduction.");
4264
4bbe8262
IR
4265 /* FORNOW: Multiple types are not supported for condition. */
4266 if (code == COND_EXPR)
4267 gcc_assert (ncopies == 1);
4268
ebfd146a 4269 /* Create the destination vector */
b690cc0f 4270 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
ebfd146a
IR
4271
4272 /* In case the vectorization factor (VF) is bigger than the number
4273 of elements that we can fit in a vectype (nunits), we have to generate
4274 more than one vector stmt - i.e - we need to "unroll" the
4275 vector stmt by a factor VF/nunits. For more details see documentation
4276 in vectorizable_operation. */
4277
4278 /* If the reduction is used in an outer loop we need to generate
4279 VF intermediate results, like so (e.g. for ncopies=2):
4280 r0 = phi (init, r0)
4281 r1 = phi (init, r1)
4282 r0 = x0 + r0;
4283 r1 = x1 + r1;
4284 (i.e. we generate VF results in 2 registers).
4285 In this case we have a separate def-use cycle for each copy, and therefore
4286 for each copy we get the vector def for the reduction variable from the
4287 respective phi node created for this copy.
4288
4289 Otherwise (the reduction is unused in the loop nest), we can combine
4290 together intermediate results, like so (e.g. for ncopies=2):
4291 r = phi (init, r)
4292 r = x0 + r;
4293 r = x1 + r;
4294 (i.e. we generate VF/2 results in a single register).
4295 In this case for each copy we get the vector def for the reduction variable
4296 from the vectorized reduction operation generated in the previous iteration.
4297 */
4298
8644a673 4299 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
ebfd146a
IR
4300 {
4301 single_defuse_cycle = true;
4302 epilog_copies = 1;
4303 }
4304 else
4305 epilog_copies = ncopies;
4306
4307 prev_stmt_info = NULL;
4308 prev_phi_info = NULL;
b5aeb3bb
IR
4309 if (slp_node)
4310 {
4311 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4312 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4313 == TYPE_VECTOR_SUBPARTS (vectype_in));
4314 }
4315 else
4316 {
4317 vec_num = 1;
4318 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4319 if (op_type == ternary_op)
4320 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4321 }
4322
4323 phis = VEC_alloc (gimple, heap, vec_num);
4324 vect_defs = VEC_alloc (tree, heap, vec_num);
4325 if (!slp_node)
4326 VEC_quick_push (tree, vect_defs, NULL_TREE);
4327
ebfd146a
IR
4328 for (j = 0; j < ncopies; j++)
4329 {
4330 if (j == 0 || !single_defuse_cycle)
4331 {
b5aeb3bb
IR
4332 for (i = 0; i < vec_num; i++)
4333 {
4334 /* Create the reduction-phi that defines the reduction
4335 operand. */
4336 new_phi = create_phi_node (vec_dest, loop->header);
4337 set_vinfo_for_stmt (new_phi,
4338 new_stmt_vec_info (new_phi, loop_vinfo,
4339 NULL));
4340 if (j == 0 || slp_node)
4341 VEC_quick_push (gimple, phis, new_phi);
4342 }
4343 }
ebfd146a 4344
4bbe8262
IR
4345 if (code == COND_EXPR)
4346 {
b5aeb3bb
IR
4347 gcc_assert (!slp_node);
4348 vectorizable_condition (stmt, gsi, vec_stmt,
4349 PHI_RESULT (VEC_index (gimple, phis, 0)),
4350 reduc_index);
4bbe8262
IR
4351 /* Multiple types are not supported for condition. */
4352 break;
4353 }
4354
ebfd146a
IR
4355 /* Handle uses. */
4356 if (j == 0)
4357 {
9dc3f7de
IR
4358 tree op0, op1 = NULL_TREE;
4359
4360 op0 = ops[!reduc_index];
4361 if (op_type == ternary_op)
4362 {
4363 if (reduc_index == 0)
4364 op1 = ops[2];
4365 else
4366 op1 = ops[1];
4367 }
4368
b5aeb3bb 4369 if (slp_node)
9dc3f7de
IR
4370 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0, &vec_oprnds1,
4371 -1);
b5aeb3bb 4372 else
ebfd146a 4373 {
b5aeb3bb
IR
4374 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4375 stmt, NULL);
4376 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4377 if (op_type == ternary_op)
4378 {
9dc3f7de
IR
4379 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4380 NULL);
b5aeb3bb
IR
4381 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4382 }
ebfd146a 4383 }
ebfd146a
IR
4384 }
4385 else
4386 {
b5aeb3bb
IR
4387 if (!slp_node)
4388 {
4389 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4390 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4391 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4392 if (op_type == ternary_op)
4393 {
4394 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4395 loop_vec_def1);
4396 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4397 }
4398 }
ebfd146a 4399
b5aeb3bb
IR
4400 if (single_defuse_cycle)
4401 reduc_def = gimple_assign_lhs (new_stmt);
ebfd146a 4402
b5aeb3bb 4403 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
ebfd146a
IR
4404 }
4405
ac47786e 4406 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
7c5222ff 4407 {
b5aeb3bb
IR
4408 if (slp_node)
4409 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
7c5222ff 4410 else
b5aeb3bb
IR
4411 {
4412 if (!single_defuse_cycle || j == 0)
4413 reduc_def = PHI_RESULT (new_phi);
4414 }
4415
4416 def1 = ((op_type == ternary_op)
4417 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4418 if (op_type == binary_op)
4419 {
4420 if (reduc_index == 0)
4421 expr = build2 (code, vectype_out, reduc_def, def0);
4422 else
4423 expr = build2 (code, vectype_out, def0, reduc_def);
4424 }
b8698a0f 4425 else
7c5222ff 4426 {
b5aeb3bb
IR
4427 if (reduc_index == 0)
4428 expr = build3 (code, vectype_out, reduc_def, def0, def1);
7c5222ff 4429 else
b5aeb3bb
IR
4430 {
4431 if (reduc_index == 1)
4432 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4433 else
4434 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4435 }
4436 }
4437
4438 new_stmt = gimple_build_assign (vec_dest, expr);
4439 new_temp = make_ssa_name (vec_dest, new_stmt);
4440 gimple_assign_set_lhs (new_stmt, new_temp);
4441 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4442 if (slp_node)
4443 {
4444 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4445 VEC_quick_push (tree, vect_defs, new_temp);
7c5222ff 4446 }
b5aeb3bb
IR
4447 else
4448 VEC_replace (tree, vect_defs, 0, new_temp);
7c5222ff
IR
4449 }
4450
b5aeb3bb
IR
4451 if (slp_node)
4452 continue;
b8698a0f 4453
ebfd146a
IR
4454 if (j == 0)
4455 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4456 else
4457 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4bbe8262 4458
ebfd146a
IR
4459 prev_stmt_info = vinfo_for_stmt (new_stmt);
4460 prev_phi_info = vinfo_for_stmt (new_phi);
4461 }
4462
4463 /* Finalize the reduction-phi (set its arguments) and create the
4464 epilog reduction code. */
b5aeb3bb
IR
4465 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4466 {
4467 new_temp = gimple_assign_lhs (*vec_stmt);
4468 VEC_replace (tree, vect_defs, 0, new_temp);
4469 }
4470
4471 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4472 epilog_reduc_code, phis, reduc_index,
4473 double_reduc, slp_node);
4474
4475 VEC_free (gimple, heap, phis);
4476 VEC_free (tree, heap, vec_oprnds0);
4477 if (vec_oprnds1)
4478 VEC_free (tree, heap, vec_oprnds1);
06066f92 4479
ebfd146a
IR
4480 return true;
4481}
4482
4483/* Function vect_min_worthwhile_factor.
4484
4485 For a loop where we could vectorize the operation indicated by CODE,
4486 return the minimum vectorization factor that makes it worthwhile
4487 to use generic vectors. */
4488int
4489vect_min_worthwhile_factor (enum tree_code code)
4490{
4491 switch (code)
4492 {
4493 case PLUS_EXPR:
4494 case MINUS_EXPR:
4495 case NEGATE_EXPR:
4496 return 4;
4497
4498 case BIT_AND_EXPR:
4499 case BIT_IOR_EXPR:
4500 case BIT_XOR_EXPR:
4501 case BIT_NOT_EXPR:
4502 return 2;
4503
4504 default:
4505 return INT_MAX;
4506 }
4507}
4508
4509
4510/* Function vectorizable_induction
4511
4512 Check if PHI performs an induction computation that can be vectorized.
4513 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4514 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4515 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4516
4517bool
4518vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4519 gimple *vec_stmt)
4520{
4521 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4522 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4523 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4524 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4525 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4526 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4527 tree vec_def;
4528
4529 gcc_assert (ncopies >= 1);
4530 /* FORNOW. This restriction should be relaxed. */
4531 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4532 {
4533 if (vect_print_dump_info (REPORT_DETAILS))
4534 fprintf (vect_dump, "multiple types in nested loop.");
4535 return false;
4536 }
4537
4538 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4539 return false;
4540
4541 /* FORNOW: SLP not supported. */
4542 if (STMT_SLP_TYPE (stmt_info))
4543 return false;
4544
4545 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4546
4547 if (gimple_code (phi) != GIMPLE_PHI)
4548 return false;
4549
4550 if (!vec_stmt) /* transformation not required. */
4551 {
4552 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4553 if (vect_print_dump_info (REPORT_DETAILS))
4554 fprintf (vect_dump, "=== vectorizable_induction ===");
4555 vect_model_induction_cost (stmt_info, ncopies);
4556 return true;
4557 }
4558
4559 /** Transform. **/
4560
4561 if (vect_print_dump_info (REPORT_DETAILS))
4562 fprintf (vect_dump, "transform induction phi.");
4563
4564 vec_def = get_initial_def_for_induction (phi);
4565 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4566 return true;
4567}
4568
4569/* Function vectorizable_live_operation.
4570
ff802fa1 4571 STMT computes a value that is used outside the loop. Check if
ebfd146a
IR
4572 it can be supported. */
4573
4574bool
4575vectorizable_live_operation (gimple stmt,
4576 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4577 gimple *vec_stmt ATTRIBUTE_UNUSED)
4578{
4579 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4580 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4581 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4582 int i;
4583 int op_type;
4584 tree op;
4585 tree def;
4586 gimple def_stmt;
b8698a0f 4587 enum vect_def_type dt;
ebfd146a
IR
4588 enum tree_code code;
4589 enum gimple_rhs_class rhs_class;
4590
4591 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4592
4593 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4594 return false;
4595
4596 if (!is_gimple_assign (stmt))
4597 return false;
4598
4599 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4600 return false;
4601
4602 /* FORNOW. CHECKME. */
4603 if (nested_in_vect_loop_p (loop, stmt))
4604 return false;
4605
4606 code = gimple_assign_rhs_code (stmt);
4607 op_type = TREE_CODE_LENGTH (code);
4608 rhs_class = get_gimple_rhs_class (code);
4609 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4610 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4611
ff802fa1 4612 /* FORNOW: support only if all uses are invariant. This means
ebfd146a
IR
4613 that the scalar operations can remain in place, unvectorized.
4614 The original last scalar value that they compute will be used. */
4615
4616 for (i = 0; i < op_type; i++)
4617 {
4618 if (rhs_class == GIMPLE_SINGLE_RHS)
4619 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4620 else
4621 op = gimple_op (stmt, i + 1);
a70d6342
IR
4622 if (op
4623 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
ebfd146a
IR
4624 {
4625 if (vect_print_dump_info (REPORT_DETAILS))
4626 fprintf (vect_dump, "use not simple.");
4627 return false;
4628 }
4629
8644a673 4630 if (dt != vect_external_def && dt != vect_constant_def)
ebfd146a
IR
4631 return false;
4632 }
4633
4634 /* No transformation is required for the cases we currently support. */
4635 return true;
4636}
4637
a83452e9
AO
4638/* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4639
4640static void
4641vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4642{
4643 ssa_op_iter op_iter;
4644 imm_use_iterator imm_iter;
4645 def_operand_p def_p;
4646 gimple ustmt;
4647
4648 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4649 {
4650 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4651 {
4652 basic_block bb;
4653
4654 if (!is_gimple_debug (ustmt))
4655 continue;
4656
4657 bb = gimple_bb (ustmt);
4658
4659 if (!flow_bb_inside_loop_p (loop, bb))
4660 {
4661 if (gimple_debug_bind_p (ustmt))
4662 {
4663 if (vect_print_dump_info (REPORT_DETAILS))
4664 fprintf (vect_dump, "killing debug use");
4665
4666 gimple_debug_bind_reset_value (ustmt);
4667 update_stmt (ustmt);
4668 }
4669 else
4670 gcc_unreachable ();
4671 }
4672 }
4673 }
4674}
4675
ebfd146a
IR
4676/* Function vect_transform_loop.
4677
4678 The analysis phase has determined that the loop is vectorizable.
4679 Vectorize the loop - created vectorized stmts to replace the scalar
4680 stmts in the loop, and update the loop exit condition. */
4681
4682void
4683vect_transform_loop (loop_vec_info loop_vinfo)
4684{
4685 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4686 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4687 int nbbs = loop->num_nodes;
4688 gimple_stmt_iterator si;
4689 int i;
4690 tree ratio = NULL;
4691 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4692 bool strided_store;
4693 bool slp_scheduled = false;
4694 unsigned int nunits;
86290011
RG
4695 tree cond_expr = NULL_TREE;
4696 gimple_seq cond_expr_stmt_list = NULL;
4697 bool do_peeling_for_loop_bound;
ebfd146a
IR
4698
4699 if (vect_print_dump_info (REPORT_DETAILS))
4700 fprintf (vect_dump, "=== vec_transform_loop ===");
4701
86290011
RG
4702 /* Peel the loop if there are data refs with unknown alignment.
4703 Only one data ref with unknown store is allowed. */
4704
4705 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4706 vect_do_peeling_for_alignment (loop_vinfo);
4707
4708 do_peeling_for_loop_bound
4709 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4710 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4711 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4712
e9dbe7bb
IR
4713 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4714 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
86290011
RG
4715 vect_loop_versioning (loop_vinfo,
4716 !do_peeling_for_loop_bound,
4717 &cond_expr, &cond_expr_stmt_list);
ebfd146a 4718
ebfd146a
IR
4719 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4720 compile time constant), or it is a constant that doesn't divide by the
4721 vectorization factor, then an epilog loop needs to be created.
4722 We therefore duplicate the loop: the original loop will be vectorized,
ff802fa1 4723 and will compute the first (n/VF) iterations. The second copy of the loop
ebfd146a
IR
4724 will remain scalar and will compute the remaining (n%VF) iterations.
4725 (VF is the vectorization factor). */
4726
86290011
RG
4727 if (do_peeling_for_loop_bound)
4728 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4729 cond_expr, cond_expr_stmt_list);
ebfd146a
IR
4730 else
4731 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4732 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4733
4734 /* 1) Make sure the loop header has exactly two entries
4735 2) Make sure we have a preheader basic block. */
4736
4737 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4738
4739 split_edge (loop_preheader_edge (loop));
4740
4741 /* FORNOW: the vectorizer supports only loops which body consist
b8698a0f
L
4742 of one basic block (header + empty latch). When the vectorizer will
4743 support more involved loop forms, the order by which the BBs are
ebfd146a
IR
4744 traversed need to be reconsidered. */
4745
4746 for (i = 0; i < nbbs; i++)
4747 {
4748 basic_block bb = bbs[i];
4749 stmt_vec_info stmt_info;
4750 gimple phi;
4751
4752 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4753 {
4754 phi = gsi_stmt (si);
4755 if (vect_print_dump_info (REPORT_DETAILS))
4756 {
4757 fprintf (vect_dump, "------>vectorizing phi: ");
4758 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4759 }
4760 stmt_info = vinfo_for_stmt (phi);
4761 if (!stmt_info)
4762 continue;
4763
aba09491
AO
4764 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4765 vect_loop_kill_debug_uses (loop, phi);
4766
ebfd146a
IR
4767 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4768 && !STMT_VINFO_LIVE_P (stmt_info))
aba09491 4769 continue;
ebfd146a
IR
4770
4771 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4772 != (unsigned HOST_WIDE_INT) vectorization_factor)
4773 && vect_print_dump_info (REPORT_DETAILS))
4774 fprintf (vect_dump, "multiple-types.");
4775
4776 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4777 {
4778 if (vect_print_dump_info (REPORT_DETAILS))
4779 fprintf (vect_dump, "transform phi.");
4780 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4781 }
4782 }
4783
4784 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4785 {
4786 gimple stmt = gsi_stmt (si);
4787 bool is_store;
4788
4789 if (vect_print_dump_info (REPORT_DETAILS))
4790 {
4791 fprintf (vect_dump, "------>vectorizing statement: ");
4792 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
b8698a0f 4793 }
ebfd146a
IR
4794
4795 stmt_info = vinfo_for_stmt (stmt);
4796
4797 /* vector stmts created in the outer-loop during vectorization of
4798 stmts in an inner-loop may not have a stmt_info, and do not
4799 need to be vectorized. */
4800 if (!stmt_info)
4801 {
4802 gsi_next (&si);
4803 continue;
4804 }
4805
aba09491
AO
4806 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4807 vect_loop_kill_debug_uses (loop, stmt);
4808
ebfd146a
IR
4809 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4810 && !STMT_VINFO_LIVE_P (stmt_info))
4811 {
4812 gsi_next (&si);
4813 continue;
4814 }
4815
4816 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4817 nunits =
4818 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4819 if (!STMT_SLP_TYPE (stmt_info)
4820 && nunits != (unsigned int) vectorization_factor
4821 && vect_print_dump_info (REPORT_DETAILS))
4822 /* For SLP VF is set according to unrolling factor, and not to
4823 vector size, hence for SLP this print is not valid. */
4824 fprintf (vect_dump, "multiple-types.");
4825
4826 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4827 reached. */
4828 if (STMT_SLP_TYPE (stmt_info))
4829 {
4830 if (!slp_scheduled)
4831 {
4832 slp_scheduled = true;
4833
4834 if (vect_print_dump_info (REPORT_DETAILS))
4835 fprintf (vect_dump, "=== scheduling SLP instances ===");
4836
a70d6342 4837 vect_schedule_slp (loop_vinfo, NULL);
ebfd146a
IR
4838 }
4839
4840 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
c4551b28 4841 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
ebfd146a
IR
4842 {
4843 gsi_next (&si);
4844 continue;
4845 }
4846 }
b8698a0f 4847
ebfd146a
IR
4848 /* -------- vectorize statement ------------ */
4849 if (vect_print_dump_info (REPORT_DETAILS))
4850 fprintf (vect_dump, "transform statement.");
4851
4852 strided_store = false;
4853 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4854 if (is_store)
4855 {
4856 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4857 {
4858 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4859 interleaving chain was completed - free all the stores in
4860 the chain. */
4861 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4862 gsi_remove (&si, true);
4863 continue;
4864 }
4865 else
4866 {
4867 /* Free the attached stmt_vec_info and remove the stmt. */
4868 free_stmt_vec_info (stmt);
4869 gsi_remove (&si, true);
4870 continue;
4871 }
4872 }
4873 gsi_next (&si);
4874 } /* stmts in BB */
4875 } /* BBs in loop */
4876
4877 slpeel_make_loop_iterate_ntimes (loop, ratio);
4878
ebfd146a
IR
4879 /* The memory tags and pointers in vectorized statements need to
4880 have their SSA forms updated. FIXME, why can't this be delayed
4881 until all the loops have been transformed? */
4882 update_ssa (TODO_update_ssa);
4883
8644a673 4884 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
ebfd146a 4885 fprintf (vect_dump, "LOOP VECTORIZED.");
8644a673 4886 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
ebfd146a
IR
4887 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");
4888}
This page took 1.742066 seconds and 5 git commands to generate.