]> gcc.gnu.org Git - gcc.git/blob - gcc/ipa-modref-tree.cc
ipa/105166 - avoid modref queries with mismatching types
[gcc.git] / gcc / ipa-modref-tree.cc
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
32
33 /* Return true if both accesses are the same. */
34 bool
35 modref_access_node::operator == (modref_access_node &a) const
36 {
37 if (parm_index != a.parm_index)
38 return false;
39 if (parm_index != MODREF_UNKNOWN_PARM
40 && parm_index != MODREF_GLOBAL_MEMORY_PARM)
41 {
42 if (parm_offset_known != a.parm_offset_known)
43 return false;
44 if (parm_offset_known
45 && !known_eq (parm_offset, a.parm_offset))
46 return false;
47 }
48 if (range_info_useful_p () != a.range_info_useful_p ())
49 return false;
50 if (range_info_useful_p ()
51 && (!known_eq (a.offset, offset)
52 || !known_eq (a.size, size)
53 || !known_eq (a.max_size, max_size)))
54 return false;
55 return true;
56 }
57
58 /* Return true A is a subaccess. */
59 bool
60 modref_access_node::contains (const modref_access_node &a) const
61 {
62 poly_int64 aoffset_adj = 0;
63 if (parm_index != MODREF_UNKNOWN_PARM)
64 {
65 if (parm_index != a.parm_index)
66 return false;
67 if (parm_offset_known)
68 {
69 if (!a.parm_offset_known)
70 return false;
71 /* Accesses are never below parm_offset, so look
72 for smaller offset.
73 If access ranges are known still allow merging
74 when bit offsets comparison passes. */
75 if (!known_le (parm_offset, a.parm_offset)
76 && !range_info_useful_p ())
77 return false;
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-negative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
82 is negative. */
83 aoffset_adj = (a.parm_offset - parm_offset)
84 * BITS_PER_UNIT;
85 }
86 }
87 if (range_info_useful_p ())
88 {
89 if (!a.range_info_useful_p ())
90 return false;
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown store is more general
93 than large store. */
94 if (known_size_p (size)
95 && (!known_size_p (a.size)
96 || !known_le (size, a.size)))
97 return false;
98 if (known_size_p (max_size))
99 return known_subrange_p (a.offset + aoffset_adj,
100 a.max_size, offset, max_size);
101 else
102 return known_le (offset, a.offset + aoffset_adj);
103 }
104 return true;
105 }
106
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
111 to converge. */
112 void
113 modref_access_node::update (poly_int64 parm_offset1,
114 poly_int64 offset1, poly_int64 size1,
115 poly_int64 max_size1, bool record_adjustments)
116 {
117 if (known_eq (parm_offset, parm_offset1)
118 && known_eq (offset, offset1)
119 && known_eq (size, size1)
120 && known_eq (max_size, max_size1))
121 return;
122 if (!record_adjustments
123 || (++adjustments) < param_modref_max_adjustments)
124 {
125 parm_offset = parm_offset1;
126 offset = offset1;
127 size = size1;
128 max_size = max_size1;
129 }
130 else
131 {
132 if (dump_file)
133 fprintf (dump_file, "--param modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset, parm_offset1))
135 {
136 if (dump_file)
137 fprintf (dump_file, " parm_offset cleared");
138 parm_offset_known = false;
139 }
140 if (!known_eq (size, size1))
141 {
142 size = -1;
143 if (dump_file)
144 fprintf (dump_file, " size cleared");
145 }
146 if (!known_eq (max_size, max_size1))
147 {
148 max_size = -1;
149 if (dump_file)
150 fprintf (dump_file, " max_size cleared");
151 }
152 if (!known_eq (offset, offset1))
153 {
154 offset = 0;
155 if (dump_file)
156 fprintf (dump_file, " offset cleared");
157 }
158 if (dump_file)
159 fprintf (dump_file, "\n");
160 }
161 }
162
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
167 bool
168 modref_access_node::merge (const modref_access_node &a,
169 bool record_adjustments)
170 {
171 poly_int64 offset1 = 0;
172 poly_int64 aoffset1 = 0;
173 poly_int64 new_parm_offset = 0;
174
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a) && !a.contains (*this));
177 if (parm_index != MODREF_UNKNOWN_PARM)
178 {
179 if (parm_index != a.parm_index)
180 return false;
181 if (parm_offset_known)
182 {
183 if (!a.parm_offset_known)
184 return false;
185 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
186 return false;
187 }
188 }
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
191 {
192 /* In this case we have containment that should be
193 handled earlier. */
194 gcc_checking_assert (a.range_info_useful_p ());
195
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size)
199 && (!known_size_p (a.size) || known_lt (a.size, size)))
200 {
201 if (((known_size_p (max_size) || known_size_p (a.max_size))
202 && !known_eq (max_size, a.max_size))
203 || !known_eq (offset1, aoffset1))
204 return false;
205 update (new_parm_offset, offset1, a.size, max_size,
206 record_adjustments);
207 return true;
208 }
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size) || known_size_p (a.size))
211 && !known_eq (size, a.size))
212 return false;
213 if (known_le (offset1, aoffset1))
214 {
215 if (!known_size_p (max_size)
216 || known_ge (offset1 + max_size, aoffset1))
217 {
218 update2 (new_parm_offset, offset1, size, max_size,
219 aoffset1, a.size, a.max_size,
220 record_adjustments);
221 return true;
222 }
223 }
224 else if (known_le (aoffset1, offset1))
225 {
226 if (!known_size_p (a.max_size)
227 || known_ge (aoffset1 + a.max_size, offset1))
228 {
229 update2 (new_parm_offset, offset1, size, max_size,
230 aoffset1, a.size, a.max_size,
231 record_adjustments);
232 return true;
233 }
234 }
235 return false;
236 }
237 update (new_parm_offset, offset1,
238 size, max_size, record_adjustments);
239 return true;
240 }
241
242 /* Return true if A1 and B1 can be merged with lower information
243 less than A2 and B2.
244 Assume that no containment or lossless merging is possible. */
245 bool
246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 const modref_access_node &b1,
248 const modref_access_node &a2,
249 const modref_access_node &b2)
250 {
251 /* Merging different parm indexes comes to complete loss
252 of range info. */
253 if (a1.parm_index != b1.parm_index)
254 return false;
255 if (a2.parm_index != b2.parm_index)
256 return true;
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
261
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
266 gcc_unreachable ();
267
268
269 /* Now compute distance of the intervals. */
270 poly_int64 dist1, dist2;
271 if (known_le (offseta1, offsetb1))
272 {
273 if (!known_size_p (a1.max_size))
274 dist1 = 0;
275 else
276 dist1 = offsetb1 - offseta1 - a1.max_size;
277 }
278 else
279 {
280 if (!known_size_p (b1.max_size))
281 dist1 = 0;
282 else
283 dist1 = offseta1 - offsetb1 - b1.max_size;
284 }
285 if (known_le (offseta2, offsetb2))
286 {
287 if (!known_size_p (a2.max_size))
288 dist2 = 0;
289 else
290 dist2 = offsetb2 - offseta2 - a2.max_size;
291 }
292 else
293 {
294 if (!known_size_p (b2.max_size))
295 dist2 = 0;
296 else
297 dist2 = offseta2 - offsetb2 - b2.max_size;
298 }
299 /* It may happen that intervals overlap in case size
300 is different. Prefer the overlap to non-overlap. */
301 if (known_lt (dist1, 0) && known_ge (dist2, 0))
302 return true;
303 if (known_lt (dist2, 0) && known_ge (dist1, 0))
304 return false;
305 if (known_lt (dist1, 0))
306 /* If both overlaps minimize overlap. */
307 return known_le (dist2, dist1);
308 else
309 /* If both are disjoint look for smaller distance. */
310 return known_le (dist1, dist2);
311 }
312
313 /* Merge in access A while losing precision. */
314 void
315 modref_access_node::forced_merge (const modref_access_node &a,
316 bool record_adjustments)
317 {
318 if (parm_index != a.parm_index)
319 {
320 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
321 parm_index = MODREF_UNKNOWN_PARM;
322 return;
323 }
324
325 /* We assume that containment and lossless merging
326 was tested earlier. */
327 gcc_checking_assert (!contains (a) && !a.contains (*this)
328 && !merge (a, record_adjustments));
329 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
330
331 poly_int64 new_parm_offset, offset1, aoffset1;
332 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
333 {
334 parm_offset_known = false;
335 return;
336 }
337 gcc_checking_assert (range_info_useful_p ()
338 && a.range_info_useful_p ());
339 if (record_adjustments)
340 adjustments += a.adjustments;
341 update2 (new_parm_offset,
342 offset1, size, max_size,
343 aoffset1, a.size, a.max_size,
344 record_adjustments);
345 }
346
347 /* Merge two ranges both starting at parm_offset1 and update THIS
348 with result. */
349 void
350 modref_access_node::update2 (poly_int64 parm_offset1,
351 poly_int64 offset1, poly_int64 size1,
352 poly_int64 max_size1,
353 poly_int64 offset2, poly_int64 size2,
354 poly_int64 max_size2,
355 bool record_adjustments)
356 {
357 poly_int64 new_size = size1;
358
359 if (!known_size_p (size2)
360 || known_le (size2, size1))
361 new_size = size2;
362 else
363 gcc_checking_assert (known_le (size1, size2));
364
365 if (known_le (offset1, offset2))
366 ;
367 else if (known_le (offset2, offset1))
368 {
369 std::swap (offset1, offset2);
370 std::swap (max_size1, max_size2);
371 }
372 else
373 gcc_unreachable ();
374
375 poly_int64 new_max_size;
376
377 if (!known_size_p (max_size1))
378 new_max_size = max_size1;
379 else if (!known_size_p (max_size2))
380 new_max_size = max_size2;
381 else
382 {
383 new_max_size = max_size2 + offset2 - offset1;
384 if (known_le (new_max_size, max_size1))
385 new_max_size = max_size1;
386 }
387
388 update (parm_offset1, offset1,
389 new_size, new_max_size, record_adjustments);
390 }
391
392 /* Given access nodes THIS and A, return true if they
393 can be done with common parm_offsets. In this case
394 return parm offset in new_parm_offset, new_offset
395 which is start of range in THIS and new_aoffset that
396 is start of range in A. */
397 bool
398 modref_access_node::combined_offsets (const modref_access_node &a,
399 poly_int64 *new_parm_offset,
400 poly_int64 *new_offset,
401 poly_int64 *new_aoffset) const
402 {
403 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
404 if (known_le (a.parm_offset, parm_offset))
405 {
406 *new_offset = offset
407 + ((parm_offset - a.parm_offset)
408 << LOG2_BITS_PER_UNIT);
409 *new_aoffset = a.offset;
410 *new_parm_offset = a.parm_offset;
411 return true;
412 }
413 else if (known_le (parm_offset, a.parm_offset))
414 {
415 *new_aoffset = a.offset
416 + ((a.parm_offset - parm_offset)
417 << LOG2_BITS_PER_UNIT);
418 *new_offset = offset;
419 *new_parm_offset = parm_offset;
420 return true;
421 }
422 else
423 return false;
424 }
425
426 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
427 void
428 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
429 size_t index)
430 {
431 size_t i;
432
433 for (i = 0; i < accesses->length ();)
434 if (i != index)
435 {
436 bool found = false, restart = false;
437 modref_access_node *a = &(*accesses)[i];
438 modref_access_node *n = &(*accesses)[index];
439
440 if (n->contains (*a))
441 found = true;
442 if (!found && n->merge (*a, false))
443 found = restart = true;
444 gcc_checking_assert (found || !a->merge (*n, false));
445 if (found)
446 {
447 accesses->unordered_remove (i);
448 if (index == accesses->length ())
449 {
450 index = i;
451 i++;
452 }
453 if (restart)
454 i = 0;
455 }
456 else
457 i++;
458 }
459 else
460 i++;
461 }
462
463 /* Stream out to OB. */
464
465 void
466 modref_access_node::stream_out (struct output_block *ob) const
467 {
468 streamer_write_hwi (ob, parm_index);
469 if (parm_index != MODREF_UNKNOWN_PARM)
470 {
471 streamer_write_uhwi (ob, parm_offset_known);
472 if (parm_offset_known)
473 {
474 streamer_write_poly_int64 (ob, parm_offset);
475 streamer_write_poly_int64 (ob, offset);
476 streamer_write_poly_int64 (ob, size);
477 streamer_write_poly_int64 (ob, max_size);
478 }
479 }
480 }
481
482 modref_access_node
483 modref_access_node::stream_in (struct lto_input_block *ib)
484 {
485 int parm_index = streamer_read_hwi (ib);
486 bool parm_offset_known = false;
487 poly_int64 parm_offset = 0;
488 poly_int64 offset = 0;
489 poly_int64 size = -1;
490 poly_int64 max_size = -1;
491
492 if (parm_index != MODREF_UNKNOWN_PARM)
493 {
494 parm_offset_known = streamer_read_uhwi (ib);
495 if (parm_offset_known)
496 {
497 parm_offset = streamer_read_poly_int64 (ib);
498 offset = streamer_read_poly_int64 (ib);
499 size = streamer_read_poly_int64 (ib);
500 max_size = streamer_read_poly_int64 (ib);
501 }
502 }
503 return {offset, size, max_size, parm_offset, parm_index,
504 parm_offset_known, false};
505 }
506
507 /* Insert access with OFFSET and SIZE.
508 Collapse tree if it has more than MAX_ACCESSES entries.
509 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
510 Return true if record was changed.
511
512 Return 0 if nothing changed, 1 if insert was successful and -1
513 if entries should be collapsed. */
514 int
515 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
516 modref_access_node a, size_t max_accesses,
517 bool record_adjustments)
518 {
519 size_t i, j;
520 modref_access_node *a2;
521
522 /* Verify that list does not contain redundant accesses. */
523 if (flag_checking)
524 {
525 size_t i, i2;
526 modref_access_node *a, *a2;
527
528 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
529 {
530 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
531 if (i != i2)
532 gcc_assert (!a->contains (*a2));
533 }
534 }
535
536 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
537 {
538 if (a2->contains (a))
539 return 0;
540 if (a.contains (*a2))
541 {
542 a.adjustments = 0;
543 a2->parm_index = a.parm_index;
544 a2->parm_offset_known = a.parm_offset_known;
545 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
546 record_adjustments);
547 modref_access_node::try_merge_with (accesses, i);
548 return 1;
549 }
550 if (a2->merge (a, record_adjustments))
551 {
552 modref_access_node::try_merge_with (accesses, i);
553 return 1;
554 }
555 gcc_checking_assert (!(a == *a2));
556 }
557
558 /* If this base->ref pair has too many accesses stored, we will clear
559 all accesses and bail out. */
560 if (accesses && accesses->length () >= max_accesses)
561 {
562 if (max_accesses < 2)
563 return -1;
564 /* Find least harmful merge and perform it. */
565 int best1 = -1, best2 = -1;
566 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
567 {
568 for (j = i + 1; j < accesses->length (); j++)
569 if (best1 < 0
570 || modref_access_node::closer_pair_p
571 (*a2, (*accesses)[j],
572 (*accesses)[best1],
573 best2 < 0 ? a : (*accesses)[best2]))
574 {
575 best1 = i;
576 best2 = j;
577 }
578 if (modref_access_node::closer_pair_p
579 (*a2, a,
580 (*accesses)[best1],
581 best2 < 0 ? a : (*accesses)[best2]))
582 {
583 best1 = i;
584 best2 = -1;
585 }
586 }
587 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
588 record_adjustments);
589 /* Check that merging indeed merged ranges. */
590 gcc_checking_assert ((*accesses)[best1].contains
591 (best2 < 0 ? a : (*accesses)[best2]));
592 if (!(*accesses)[best1].useful_p ())
593 return -1;
594 if (dump_file && best2 >= 0)
595 fprintf (dump_file,
596 "--param modref-max-accesses limit reached;"
597 " merging %i and %i\n", best1, best2);
598 else if (dump_file)
599 fprintf (dump_file,
600 "--param modref-max-accesses limit reached;"
601 " merging with %i\n", best1);
602 modref_access_node::try_merge_with (accesses, best1);
603 if (best2 >= 0)
604 insert (accesses, a, max_accesses, record_adjustments);
605 return 1;
606 }
607 a.adjustments = 0;
608 vec_safe_push (accesses, a);
609 return 1;
610 }
611
612 /* Return true if range info is useful. */
613 bool
614 modref_access_node::range_info_useful_p () const
615 {
616 return parm_index != MODREF_UNKNOWN_PARM
617 && parm_index != MODREF_GLOBAL_MEMORY_PARM
618 && parm_offset_known
619 && (known_size_p (size)
620 || known_size_p (max_size)
621 || known_ge (offset, 0));
622 }
623
624 /* Dump range to debug OUT. */
625 void
626 modref_access_node::dump (FILE *out)
627 {
628 if (parm_index != MODREF_UNKNOWN_PARM)
629 {
630 if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
631 fprintf (out, " Base in global memory");
632 else if (parm_index >= 0)
633 fprintf (out, " Parm %i", parm_index);
634 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
635 fprintf (out, " Static chain");
636 else
637 gcc_unreachable ();
638 if (parm_offset_known)
639 {
640 fprintf (out, " param offset:");
641 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
642 }
643 }
644 if (range_info_useful_p ())
645 {
646 fprintf (out, " offset:");
647 print_dec ((poly_int64_pod)offset, out, SIGNED);
648 fprintf (out, " size:");
649 print_dec ((poly_int64_pod)size, out, SIGNED);
650 fprintf (out, " max_size:");
651 print_dec ((poly_int64_pod)max_size, out, SIGNED);
652 if (adjustments)
653 fprintf (out, " adjusted %i times", adjustments);
654 }
655 fprintf (out, "\n");
656 }
657
658 /* Return tree corresponding to parameter of the range in STMT. */
659 tree
660 modref_access_node::get_call_arg (const gcall *stmt) const
661 {
662 if (parm_index == MODREF_UNKNOWN_PARM
663 || parm_index == MODREF_GLOBAL_MEMORY_PARM)
664 return NULL;
665 if (parm_index == MODREF_STATIC_CHAIN_PARM)
666 return gimple_call_chain (stmt);
667 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
668 is seen explicitly in the caller. */
669 gcc_checking_assert (parm_index >= 0);
670 if (parm_index >= (int)gimple_call_num_args (stmt))
671 return NULL;
672 return gimple_call_arg (stmt, parm_index);
673 }
674
675 /* Return tree corresponding to parameter of the range in STMT. */
676 bool
677 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
678 {
679 tree arg;
680
681 if (!parm_offset_known
682 || !(arg = get_call_arg (stmt))
683 || !POINTER_TYPE_P (TREE_TYPE (arg)))
684 return false;
685 poly_offset_int off = (poly_offset_int)offset
686 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
687 poly_int64 off2;
688 if (!off.to_shwi (&off2))
689 return false;
690 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
691 return true;
692 }
693
694 /* Return true A is a subkill. */
695 bool
696 modref_access_node::contains_for_kills (const modref_access_node &a) const
697 {
698 poly_int64 aoffset_adj = 0;
699
700 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
701 && a.parm_index != MODREF_UNKNOWN_PARM);
702 if (parm_index != a.parm_index)
703 return false;
704 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
705 aoffset_adj = (a.parm_offset - parm_offset)
706 * BITS_PER_UNIT;
707 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
708 return known_subrange_p (a.offset + aoffset_adj,
709 a.max_size, offset, max_size);
710 }
711
712 /* Merge two ranges both starting at parm_offset1 and update THIS
713 with result. */
714 bool
715 modref_access_node::update_for_kills (poly_int64 parm_offset1,
716 poly_int64 offset1,
717 poly_int64 max_size1,
718 poly_int64 offset2,
719 poly_int64 max_size2,
720 bool record_adjustments)
721 {
722 if (known_le (offset1, offset2))
723 ;
724 else if (known_le (offset2, offset1))
725 {
726 std::swap (offset1, offset2);
727 std::swap (max_size1, max_size2);
728 }
729 else
730 gcc_unreachable ();
731
732 poly_int64 new_max_size = max_size2 + offset2 - offset1;
733 if (known_le (new_max_size, max_size1))
734 new_max_size = max_size1;
735 if (known_eq (parm_offset, parm_offset1)
736 && known_eq (offset, offset1)
737 && known_eq (size, new_max_size)
738 && known_eq (max_size, new_max_size))
739 return false;
740
741 if (!record_adjustments
742 || (++adjustments) < param_modref_max_adjustments)
743 {
744 parm_offset = parm_offset1;
745 offset = offset1;
746 max_size = new_max_size;
747 size = new_max_size;
748 gcc_checking_assert (useful_for_kill_p ());
749 return true;
750 }
751 return false;
752 }
753
754 /* Merge in access A if it is possible to do without losing
755 precision. Return true if successful.
756 Unlike merge assume that both accesses are always executed
757 and merge size the same was as max_size. */
758 bool
759 modref_access_node::merge_for_kills (const modref_access_node &a,
760 bool record_adjustments)
761 {
762 poly_int64 offset1 = 0;
763 poly_int64 aoffset1 = 0;
764 poly_int64 new_parm_offset = 0;
765
766 /* We assume that containment was tested earlier. */
767 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
768 && useful_for_kill_p () && a.useful_for_kill_p ());
769
770 if (parm_index != a.parm_index
771 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
772 return false;
773
774 if (known_le (offset1, aoffset1))
775 {
776 if (!known_size_p (max_size)
777 || known_ge (offset1 + max_size, aoffset1))
778 return update_for_kills (new_parm_offset, offset1, max_size,
779 aoffset1, a.max_size, record_adjustments);
780 }
781 else if (known_le (aoffset1, offset1))
782 {
783 if (!known_size_p (a.max_size)
784 || known_ge (aoffset1 + a.max_size, offset1))
785 return update_for_kills (new_parm_offset, offset1, max_size,
786 aoffset1, a.max_size, record_adjustments);
787 }
788 return false;
789 }
790
791 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
792 of changes to each entry. Return true if something changed. */
793
794 bool
795 modref_access_node::insert_kill (vec<modref_access_node> &kills,
796 modref_access_node &a, bool record_adjustments)
797 {
798 size_t index;
799 modref_access_node *a2;
800 bool merge = false;
801
802 gcc_checking_assert (a.useful_for_kill_p ());
803
804 /* See if we have corresponding entry already or we can merge with
805 neighboring entry. */
806 FOR_EACH_VEC_ELT (kills, index, a2)
807 {
808 if (a2->contains_for_kills (a))
809 return false;
810 if (a.contains_for_kills (*a2))
811 {
812 a.adjustments = 0;
813 *a2 = a;
814 merge = true;
815 break;
816 }
817 if (a2->merge_for_kills (a, record_adjustments))
818 {
819 merge = true;
820 break;
821 }
822 }
823 /* If entry was not found, insert it. */
824 if (!merge)
825 {
826 if ((int)kills.length () >= param_modref_max_accesses)
827 {
828 if (dump_file)
829 fprintf (dump_file, "--param modref-max-accesses limit reached:");
830 return false;
831 }
832 a.adjustments = 0;
833 kills.safe_push (a);
834 return true;
835 }
836 /* Extending range in an entry may make it possible to merge it with
837 other entries. */
838 size_t i;
839
840 for (i = 0; i < kills.length ();)
841 if (i != index)
842 {
843 bool found = false, restart = false;
844 modref_access_node *a = &kills[i];
845 modref_access_node *n = &kills[index];
846
847 if (n->contains_for_kills (*a))
848 found = true;
849 if (!found && n->merge_for_kills (*a, false))
850 found = restart = true;
851 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
852 if (found)
853 {
854 kills.unordered_remove (i);
855 if (index == kills.length ())
856 {
857 index = i;
858 i++;
859 }
860 if (restart)
861 i = 0;
862 }
863 else
864 i++;
865 }
866 else
867 i++;
868 return true;
869 }
870
871
872 #if CHECKING_P
873
874 namespace selftest {
875
876 static void
877 test_insert_search_collapse ()
878 {
879 modref_base_node<alias_set_type> *base_node;
880 modref_ref_node<alias_set_type> *ref_node;
881 modref_access_node a = unspecified_modref_access_node;
882
883 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
884 ASSERT_FALSE (t->every_base);
885
886 /* Insert into an empty tree. */
887 t->insert (1, 2, 2, 1, 2, a, false);
888 ASSERT_NE (t->bases, NULL);
889 ASSERT_EQ (t->bases->length (), 1);
890 ASSERT_FALSE (t->every_base);
891 ASSERT_EQ (t->search (2), NULL);
892
893 base_node = t->search (1);
894 ASSERT_NE (base_node, NULL);
895 ASSERT_EQ (base_node->base, 1);
896 ASSERT_NE (base_node->refs, NULL);
897 ASSERT_EQ (base_node->refs->length (), 1);
898 ASSERT_EQ (base_node->search (1), NULL);
899
900 ref_node = base_node->search (2);
901 ASSERT_NE (ref_node, NULL);
902 ASSERT_EQ (ref_node->ref, 2);
903
904 /* Insert when base exists but ref does not. */
905 t->insert (1, 2, 2, 1, 3, a, false);
906 ASSERT_NE (t->bases, NULL);
907 ASSERT_EQ (t->bases->length (), 1);
908 ASSERT_EQ (t->search (1), base_node);
909 ASSERT_EQ (t->search (2), NULL);
910 ASSERT_NE (base_node->refs, NULL);
911 ASSERT_EQ (base_node->refs->length (), 2);
912
913 ref_node = base_node->search (3);
914 ASSERT_NE (ref_node, NULL);
915
916 /* Insert when base and ref exist, but access is not dominated by nor
917 dominates other accesses. */
918 t->insert (1, 2, 2, 1, 2, a, false);
919 ASSERT_EQ (t->bases->length (), 1);
920 ASSERT_EQ (t->search (1), base_node);
921
922 ref_node = base_node->search (2);
923 ASSERT_NE (ref_node, NULL);
924
925 /* Insert when base and ref exist and access is dominated. */
926 t->insert (1, 2, 2, 1, 2, a, false);
927 ASSERT_EQ (t->search (1), base_node);
928 ASSERT_EQ (base_node->search (2), ref_node);
929
930 /* Insert ref to trigger ref list collapse for base 1. */
931 t->insert (1, 2, 2, 1, 4, a, false);
932 ASSERT_EQ (t->search (1), base_node);
933 ASSERT_EQ (base_node->refs, NULL);
934 ASSERT_EQ (base_node->search (2), NULL);
935 ASSERT_EQ (base_node->search (3), NULL);
936 ASSERT_TRUE (base_node->every_ref);
937
938 /* Further inserts to collapsed ref list are ignored. */
939 t->insert (1, 2, 2, 1, 5, a, false);
940 ASSERT_EQ (t->search (1), base_node);
941 ASSERT_EQ (base_node->refs, NULL);
942 ASSERT_EQ (base_node->search (2), NULL);
943 ASSERT_EQ (base_node->search (3), NULL);
944 ASSERT_TRUE (base_node->every_ref);
945
946 /* Insert base to trigger base list collapse. */
947 t->insert (1, 2, 2, 5, 0, a, false);
948 ASSERT_TRUE (t->every_base);
949 ASSERT_EQ (t->bases, NULL);
950 ASSERT_EQ (t->search (1), NULL);
951
952 /* Further inserts to collapsed base list are ignored. */
953 t->insert (1, 2, 2, 7, 8, a, false);
954 ASSERT_TRUE (t->every_base);
955 ASSERT_EQ (t->bases, NULL);
956 ASSERT_EQ (t->search (1), NULL);
957
958 delete t;
959 }
960
961 static void
962 test_merge ()
963 {
964 modref_tree<alias_set_type> *t1, *t2;
965 modref_base_node<alias_set_type> *base_node;
966 modref_access_node a = unspecified_modref_access_node;
967
968 t1 = new modref_tree<alias_set_type>();
969 t1->insert (3, 4, 1, 1, 1, a, false);
970 t1->insert (3, 4, 1, 1, 2, a, false);
971 t1->insert (3, 4, 1, 1, 3, a, false);
972 t1->insert (3, 4, 1, 2, 1, a, false);
973 t1->insert (3, 4, 1, 3, 1, a, false);
974
975 t2 = new modref_tree<alias_set_type>();
976 t2->insert (10, 10, 10, 1, 2, a, false);
977 t2->insert (10, 10, 10, 1, 3, a, false);
978 t2->insert (10, 10, 10, 1, 4, a, false);
979 t2->insert (10, 10, 10, 3, 2, a, false);
980 t2->insert (10, 10, 10, 3, 3, a, false);
981 t2->insert (10, 10, 10, 3, 4, a, false);
982 t2->insert (10, 10, 10, 3, 5, a, false);
983
984 t1->merge (3, 4, 1, t2, NULL, NULL, false);
985
986 ASSERT_FALSE (t1->every_base);
987 ASSERT_NE (t1->bases, NULL);
988 ASSERT_EQ (t1->bases->length (), 3);
989
990 base_node = t1->search (1);
991 ASSERT_NE (base_node->refs, NULL);
992 ASSERT_FALSE (base_node->every_ref);
993 ASSERT_EQ (base_node->refs->length (), 4);
994
995 base_node = t1->search (2);
996 ASSERT_NE (base_node->refs, NULL);
997 ASSERT_FALSE (base_node->every_ref);
998 ASSERT_EQ (base_node->refs->length (), 1);
999
1000 base_node = t1->search (3);
1001 ASSERT_EQ (base_node->refs, NULL);
1002 ASSERT_TRUE (base_node->every_ref);
1003
1004 delete t1;
1005 delete t2;
1006 }
1007
1008
1009 void
1010 ipa_modref_tree_cc_tests ()
1011 {
1012 test_insert_search_collapse ();
1013 test_merge ();
1014 }
1015
1016 } // namespace selftest
1017
1018 #endif
1019
1020 void
1021 gt_ggc_mx (modref_tree < int >*const &tt)
1022 {
1023 if (tt->bases)
1024 {
1025 ggc_test_and_set_mark (tt->bases);
1026 gt_ggc_mx (tt->bases);
1027 }
1028 }
1029
1030 void
1031 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1032 {
1033 if (tt->bases)
1034 {
1035 ggc_test_and_set_mark (tt->bases);
1036 gt_ggc_mx (tt->bases);
1037 }
1038 }
1039
1040 void gt_pch_nx (modref_tree<int>* const&) {}
1041 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1042 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1043 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1044
1045 void gt_ggc_mx (modref_base_node<int>* &b)
1046 {
1047 ggc_test_and_set_mark (b);
1048 if (b->refs)
1049 {
1050 ggc_test_and_set_mark (b->refs);
1051 gt_ggc_mx (b->refs);
1052 }
1053 }
1054
1055 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1056 {
1057 ggc_test_and_set_mark (b);
1058 if (b->refs)
1059 {
1060 ggc_test_and_set_mark (b->refs);
1061 gt_ggc_mx (b->refs);
1062 }
1063 if (b->base)
1064 gt_ggc_mx (b->base);
1065 }
1066
1067 void gt_pch_nx (modref_base_node<int>*) {}
1068 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1069 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1070 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1071
1072 void gt_ggc_mx (modref_ref_node<int>* &r)
1073 {
1074 ggc_test_and_set_mark (r);
1075 if (r->accesses)
1076 {
1077 ggc_test_and_set_mark (r->accesses);
1078 gt_ggc_mx (r->accesses);
1079 }
1080 }
1081
1082 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1083 {
1084 ggc_test_and_set_mark (r);
1085 if (r->accesses)
1086 {
1087 ggc_test_and_set_mark (r->accesses);
1088 gt_ggc_mx (r->accesses);
1089 }
1090 if (r->ref)
1091 gt_ggc_mx (r->ref);
1092 }
1093
1094 void gt_pch_nx (modref_ref_node<int>* ) {}
1095 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1096 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1097 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1098
1099 void gt_ggc_mx (modref_access_node &)
1100 {
1101 }
This page took 0.082152 seconds and 5 git commands to generate.