This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[tree-ssa libmudflap] fixes, cleanups
- From: "Frank Ch. Eigler" <fche at redhat dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Mon, 16 Sep 2002 10:49:43 -0400
- Subject: [tree-ssa libmudflap] fixes, cleanups
Hi -
I'm about to commit:
2002-09-16 Frank Ch. Eigler <fche@redhat.com>
* test/pass20-frag.c: New test file.
* Makefile.am (TESTS): Reorganize. Add pass20 test.
* Makefile.in: Regenerated.
* mf-impl.h (TRACE_IN, TRACE_OUT): Remove macros. Update callers.
* mf-hooks.c (BEGIN_PROTECT): Add hook tracing here.
* mf-heuristic.c (__mf_heuristic_check): Track seen /proc/self/map
entries to avoid repeat registration.
* mf-runtime.c (__mf_object_cemetary): Don't bother bury GUESS regions.
(__mf_register, __mf_unregister): Rewrite GUESS handling logic.
Index: Makefile.am
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/Makefile.am,v
retrieving revision 1.1.2.10
diff -c -w -s -p -r1.1.2.10 Makefile.am
*** Makefile.am 9 Sep 2002 21:29:14 -0000 1.1.2.10
--- Makefile.am 16 Sep 2002 14:46:42 -0000
*************** MAINT_CHARSET = latin1
*** 9,22 ****
TESTS_ENVIRONMENT = LD_LIBRARY_PATH='.libs:../../gcc' MUDFLAP_OPTIONS='-mode-check -viol-abort -no-heur-proc-map'
! TESTS = test/fail1.x test/fail10.x test/fail11.x test/fail12.x \
! test/fail13.x test/fail14.x test/fail15.x test/fail16.x test/fail17.x \
! test/fail2.x test/fail3.x test/fail4.x test/fail5.x test/fail6.x \
! test/fail7.x test/fail8.x test/fail9.x test/pass1.x test/pass10.x \
test/pass11.x test/pass12.x test/pass13.x test/pass14.x test/pass15.x \
! test/pass16.x test/pass17.x test/pass18.x test/pass19.x test/pass2.x test/pass3.x test/pass4.x test/pass5.x \
! test/pass6.x test/pass7.x test/pass8.x test/pass9.x
!
test/%.c: test/%-frag.c test/mf-driver.c
@mkdir -p test
--- 9,22 ----
TESTS_ENVIRONMENT = LD_LIBRARY_PATH='.libs:../../gcc' MUDFLAP_OPTIONS='-mode-check -viol-abort -no-heur-proc-map'
! TESTS = test/fail1.x test/fail2.x test/fail3.x test/fail4.x test/fail5.x \
! test/fail6.x test/fail7.x test/fail8.x test/fail9.x test/fail10.x \
! test/fail11.x test/fail12.x test/fail13.x test/fail14.x test/fail15.x \
! test/fail16.x test/fail17.x \
! test/pass1.x test/pass2.x test/pass3.x test/pass4.x test/pass5.x \
! test/pass6.x test/pass7.x test/pass8.x test/pass9.x test/pass10.x \
test/pass11.x test/pass12.x test/pass13.x test/pass14.x test/pass15.x \
! test/pass16.x test/pass17.x test/pass18.x test/pass19.x test/pass20.x
test/%.c: test/%-frag.c test/mf-driver.c
@mkdir -p test
Index: mf-heuristics.c
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-heuristics.c,v
retrieving revision 1.1.2.8
diff -c -w -s -p -r1.1.2.8 mf-heuristics.c
*** mf-heuristics.c 6 Sep 2002 15:26:12 -0000 1.1.2.8
--- mf-heuristics.c 16 Sep 2002 14:46:42 -0000
*************** is_stack_address (uintptr_t addr)
*** 34,47 ****
-
-
/* Run some quick validation of the given region. If successful, return non-zero.
If the result is cacheworthy, return something positive. */
int
__mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
{
! TRACE_IN;
/* The first heuristic is to check stack bounds. Since this is a
transient condition and quick to check, don't cache its
--- 34,45 ----
/* Run some quick validation of the given region. If successful, return non-zero.
If the result is cacheworthy, return something positive. */
int
__mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
{
! VERBOSE_TRACE ("mf: heuristic check\n");
/* The first heuristic is to check stack bounds. Since this is a
transient condition and quick to check, don't cache its
*************** __mf_heuristic_check (uintptr_t ptr, uin
*** 57,74 ****
ptr >= stack_top_guess &&
ptr_high >= ptr)
{
- TRACE_OUT;
return 1; /* uncacheable */
}
}
/* The second heuristic is to scan the range of memory regions
listed in /proc/self/maps, a special file provided by the Linux
kernel. Its results may be cached, and in fact, a GUESS object
may as well be recorded for interesting matching sections. */
-
if (__mf_opts.heur_proc_map)
{
char buf[512];
char flags[4];
void *low, *high;
--- 55,99 ----
ptr >= stack_top_guess &&
ptr_high >= ptr)
{
return 1; /* uncacheable */
}
}
+
/* The second heuristic is to scan the range of memory regions
listed in /proc/self/maps, a special file provided by the Linux
kernel. Its results may be cached, and in fact, a GUESS object
may as well be recorded for interesting matching sections. */
if (__mf_opts.heur_proc_map)
{
+ /* Keep a record of seen records from /proc/self/map. */
+ enum { max_entries = 500 };
+ struct proc_self_map_entry
+ {
+ uintptr_t low;
+ uintptr_t high;
+ };
+ static struct proc_self_map_entry entry [max_entries];
+ static unsigned entry_used [max_entries];
+
+ /* Look for a known proc_self_map entry that may cover this
+ region. If one exists, then this heuristic has already run,
+ and should not be run again. The check should be allowed to
+ fail. */
+ unsigned i;
+ unsigned deja_vu = 0;
+ for (i=0; i<max_entries; i++)
+ {
+ if (entry_used[i] &&
+ (entry[i].low >= ptr) &&
+ (entry[i].high <= ptr_high))
+ deja_vu = 1;
+ }
+
+ if (! deja_vu)
+ {
+ /* Time to run the heuristic. Rescan /proc/self/maps; update the
+ entry[] array; remove expired entries, add new ones. */
char buf[512];
char flags[4];
void *low, *high;
*************** __mf_heuristic_check (uintptr_t ptr, uin
*** 84,91 ****
if ((uintptr_t) low <= ptr &&
(uintptr_t) high >= ptr_high)
{
! VERBOSE_TRACE ("mf: registering region %08lx-%08lx given %s",
! (uintptr_t) low, (uintptr_t) high, buf);
/* XXX: bad hack; permit __mf_register to do its job. */
__mf_state = active;
--- 109,127 ----
if ((uintptr_t) low <= ptr &&
(uintptr_t) high >= ptr_high)
{
! for (i=0; i<max_entries; i++)
! {
! if (! entry_used[i])
! {
! entry[i].low = (uintptr_t) low;
! entry[i].high = (uintptr_t) high;
! break;
! }
! }
!
! VERBOSE_TRACE ("mf: registering region #%d "
! "%08lx-%08lx given %s",
! i, (uintptr_t) low, (uintptr_t) high, buf);
/* XXX: bad hack; permit __mf_register to do its job. */
__mf_state = active;
*************** __mf_heuristic_check (uintptr_t ptr, uin
*** 94,121 ****
"/proc/self/maps segment");
__mf_state = reentrant;
! TRACE_OUT;
! return 0; /* undecided tending to cachable yes */
}
}
}
fclose (fp);
}
}
/* The third heuristic is to approve all accesses between _start and _end,
which should include all text and initialized data. */
-
if (__mf_opts.heur_start_end)
- {
if (ptr >= (uintptr_t) & _start && ptr_high <= (uintptr_t) & _end)
- {
- TRACE_OUT;
return 1; /* uncacheable */
- }
- }
- TRACE_OUT;
return -1; /* hard failure */
}
--- 130,151 ----
"/proc/self/maps segment");
__mf_state = reentrant;
! return 0; /* undecided (tending to cachable) */
}
}
}
fclose (fp);
}
}
+ }
+
/* The third heuristic is to approve all accesses between _start and _end,
which should include all text and initialized data. */
if (__mf_opts.heur_start_end)
if (ptr >= (uintptr_t) & _start && ptr_high <= (uintptr_t) & _end)
return 1; /* uncacheable */
return -1; /* hard failure */
}
Index: mf-hooks.c
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-hooks.c,v
retrieving revision 1.1.2.14
diff -c -w -s -p -r1.1.2.14 mf-hooks.c
*** mf-hooks.c 6 Sep 2002 15:26:12 -0000 1.1.2.14
--- mf-hooks.c 16 Sep 2002 14:46:42 -0000
*************** XXX: libgcc license?
*** 45,51 ****
} \
else \
{ \
! TRACE_IN; \
old_state = __mf_state; \
__mf_state = reentrant; \
}
--- 45,51 ----
} \
else \
{ \
! TRACE ("mf: %s\n", __PRETTY_FUNCTION__); \
old_state = __mf_state; \
__mf_state = reentrant; \
}
*************** XXX: libgcc license?
*** 53,59 ****
#define END_PROTECT(ty, fname, ...) \
result = (ty) CALL_REAL(fname, __VA_ARGS__); \
__mf_state = old_state; \
- TRACE_OUT; \
return result;
--- 53,58 ----
*************** WRAPPER(void *, malloc, size_t c)
*** 81,87 ****
__MF_TYPE_HEAP, "malloc region");
}
- TRACE_OUT;
return result;
}
#endif
--- 80,85 ----
*************** WRAPPER(void *, calloc, size_t c, size_t
*** 114,120 ****
__MF_TYPE_HEAP, "calloc region");
}
- TRACE_OUT;
return result;
}
#endif
--- 112,117 ----
*************** WRAPPER(void *, realloc, void *buf, size
*** 148,154 ****
__MF_TYPE_HEAP, "realloc region");
}
- TRACE_OUT;
return result;
}
#endif
--- 145,150 ----
*************** WRAPPER(void, free, void *buf)
*** 181,187 ****
if (UNLIKELY(buf == NULL))
return;
! TRACE_IN;
__mf_unregister ((uintptr_t) buf, 0);
--- 177,183 ----
if (UNLIKELY(buf == NULL))
return;
! TRACE ("mf: %s\n", __PRETTY_FUNCTION__);
__mf_unregister ((uintptr_t) buf, 0);
*************** WRAPPER(void, free, void *buf)
*** 224,230 ****
}
__mf_state = old_state;
- TRACE_OUT;
}
#endif
--- 220,225 ----
*************** WRAPPER(void *, dlopen, const char *file
*** 236,242 ****
BEGIN_PROTECT(void *, dlopen, filename, flag);
result = CALL_REAL(dlopen, filename, flag);
__mf_state = old_state;
- TRACE_OUT;
return result;
}
#endif
--- 231,236 ----
*************** WRAPPER(void *, mmap,
*** 269,275 ****
"(heuristic) mmap region");
}
- TRACE_OUT;
return result;
}
#endif
--- 263,268 ----
*************** WRAPPER(void *, alloca, size_t c)
*** 311,317 ****
char *result;
struct alloca_tracking *track;
! TRACE_IN;
VERBOSE_TRACE ("mf: alloca stack level %08lx\n", (uintptr_t) stack);
/* Free any previously alloca'd blocks that belong to deeper-nested functions,
--- 304,310 ----
char *result;
struct alloca_tracking *track;
! TRACE ("mf: %s\n", __PRETTY_FUNCTION__);
VERBOSE_TRACE ("mf: alloca stack level %08lx\n", (uintptr_t) stack);
/* Free any previously alloca'd blocks that belong to deeper-nested functions,
*************** WRAPPER(void *, alloca, size_t c)
*** 350,356 ****
}
}
- TRACE_OUT;
return result;
}
#endif
--- 343,348 ----
*************** WRAPPER(char *, strdup, const char *s)
*** 604,610 ****
if (UNLIKELY(!result))
{
__mf_state = old_state;
- TRACE_OUT;
return result;
}
--- 596,601 ----
*************** WRAPPER(char *, strdup, const char *s)
*** 615,621 ****
__mf_register ((uintptr_t) result, CLAMPADD(n,1),
__MF_TYPE_HEAP, "strdup region");
__mf_state = old_state;
- TRACE_OUT;
return result;
}
#endif
--- 606,611 ----
*************** WRAPPER(char *, strndup, const char *s,
*** 642,648 ****
if (UNLIKELY(!result))
{
__mf_state = old_state;
- TRACE_OUT;
return result;
}
--- 632,637 ----
*************** WRAPPER(char *, strndup, const char *s,
*** 653,659 ****
__mf_register ((uintptr_t) result, CLAMPADD(n,1),
__MF_TYPE_HEAP, "strndup region");
__mf_state = old_state;
- TRACE_OUT;
return result;
}
#endif
--- 642,647 ----
*************** WRAPPER(size_t, strlen, const char *s)
*** 727,733 ****
result = CALL_REAL(strlen, s);
MF_VALIDATE_EXTENT(s, CLAMPADD(result, 1), "strlen region");
__mf_state = old_state;
- TRACE_OUT;
return result;
}
#endif
--- 715,720 ----
*************** WRAPPER(size_t, strnlen, const char *s,
*** 741,747 ****
result = CALL_REAL(strnlen, s, n);
MF_VALIDATE_EXTENT(s, result, "strnlen region");
__mf_state = old_state;
- TRACE_OUT;
return result;
}
#endif
--- 728,733 ----
*************** WRAPPER(void, bzero, void *s, size_t n)
*** 757,769 ****
CALL_REAL(bzero, s, n);
return;
}
! TRACE_IN;
old_state = __mf_state;
__mf_state = reentrant;
MF_VALIDATE_EXTENT(s, n, "bzero region");
CALL_REAL(bzero, s, n);
__mf_state = old_state;
- TRACE_OUT;
}
#endif
--- 743,754 ----
CALL_REAL(bzero, s, n);
return;
}
! TRACE ("mf: %s\n", __PRETTY_FUNCTION__);
old_state = __mf_state;
__mf_state = reentrant;
MF_VALIDATE_EXTENT(s, n, "bzero region");
CALL_REAL(bzero, s, n);
__mf_state = old_state;
}
#endif
*************** WRAPPER(void, bcopy, const void *src, vo
*** 778,791 ****
CALL_REAL(bcopy, src, dest, n);
return;
}
! TRACE_IN;
old_state = __mf_state;
__mf_state = reentrant;
MF_VALIDATE_EXTENT(src, n, "bcopy src");
MF_VALIDATE_EXTENT(dest, n, "bcopy dest");
CALL_REAL(bcopy, src, dest, n);
__mf_state = old_state;
- TRACE_OUT;
}
#endif
--- 763,775 ----
CALL_REAL(bcopy, src, dest, n);
return;
}
! TRACE ("mf: %s\n", __PRETTY_FUNCTION__);
old_state = __mf_state;
__mf_state = reentrant;
MF_VALIDATE_EXTENT(src, n, "bcopy src");
MF_VALIDATE_EXTENT(dest, n, "bcopy dest");
CALL_REAL(bcopy, src, dest, n);
__mf_state = old_state;
}
#endif
Index: mf-impl.h
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-impl.h,v
retrieving revision 1.1.2.4
diff -c -w -s -p -r1.1.2.4 mf-impl.h
*** mf-impl.h 3 Sep 2002 15:48:09 -0000 1.1.2.4
--- mf-impl.h 16 Sep 2002 14:46:42 -0000
*************** extern struct __mf_dynamic __mf_dynamic;
*** 185,194 ****
if (UNLIKELY (__mf_opts.trace_mf_calls)) \
fprintf (stderr, __VA_ARGS__);
- #define TRACE_IN VERBOSE_TRACE ("mf: enter %s\n", __PRETTY_FUNCTION__)
- #define TRACE_OUT VERBOSE_TRACE ("mf: exit %s\n", __PRETTY_FUNCTION__)
-
-
#define __MF_PERSIST_MAX 256
#define __MF_FREEQ_MAX 256
--- 185,190 ----
Index: mf-runtime.c
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-runtime.c,v
retrieving revision 1.1.2.13
diff -c -w -s -p -r1.1.2.13 mf-runtime.c
*** mf-runtime.c 6 Sep 2002 15:26:12 -0000 1.1.2.13
--- mf-runtime.c 16 Sep 2002 14:46:42 -0000
*************** resolve_single_dynamic (void **target, c
*** 357,363 ****
void
__mf_resolve_dynamics ()
{
- TRACE_IN;
#define RESOLVE(fname) \
resolve_single_dynamic (&__mf_dynamic.dyn_ ## fname, #fname)
RESOLVE(bcmp);
--- 357,362 ----
*************** resolve_single_dynamic (&__mf_dynamic.dy
*** 395,401 ****
RESOLVE(strrchr);
RESOLVE(strstr);
#undef RESOLVE
- TRACE_OUT;
}
#endif /* PIC */
--- 394,399 ----
*************** void __mf_init ()
*** 427,443 ****
}
__mf_state = active;
-
- TRACE_OUT;
}
extern void __mf_fini () DTOR;
void __mf_fini ()
{
! TRACE_IN;
__mf_report ();
- TRACE_OUT;
}
/* ------------------------------------------------------------------------ */
--- 425,438 ----
}
__mf_state = active;
}
extern void __mf_fini () DTOR;
void __mf_fini ()
{
! TRACE ("mf: __mf_fini\n");
__mf_report ();
}
/* ------------------------------------------------------------------------ */
*************** typedef struct __mf_object_tree
*** 484,492 ****
/* Live objects: binary tree on __mf_object_t.low */
__mf_object_tree_t *__mf_object_root;
! /* Dead objects: circular arrays */
unsigned __mf_object_dead_head[__MF_TYPE_GUESS+1]; /* next empty spot */
! __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_GUESS+1][__MF_PERSIST_MAX];
static __mf_object_tree_t *__mf_find_object (uintptr_t low, uintptr_t high);
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
--- 479,487 ----
/* Live objects: binary tree on __mf_object_t.low */
__mf_object_tree_t *__mf_object_root;
! /* Dead objects: circular arrays; exclude __MF_TYPE_GUESS. */
unsigned __mf_object_dead_head[__MF_TYPE_GUESS+1]; /* next empty spot */
! __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_GUESS][__MF_PERSIST_MAX];
static __mf_object_tree_t *__mf_find_object (uintptr_t low, uintptr_t high);
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
*************** void __mf_check (uintptr_t ptr, uintptr_
*** 590,596 ****
__MF_VIOL_CHECK);
}
- /* __mf_register */
static __mf_object_tree_t *
__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
--- 585,590 ----
*************** __mf_register (uintptr_t ptr, uintptr_t
*** 650,655 ****
--- 644,657 ----
TRACE ("mf: register p=%08lx s=%lu t=%d n='%s'\n", ptr, sz,
type, name ? name : "");
+ if (__mf_opts.collect_stats)
+ {
+ __mf_count_register ++;
+ __mf_total_register_size [(type < 0) ? 0 :
+ (type > __MF_TYPE_MAX) ? 0 :
+ type] += sz;
+ }
+
switch (__mf_opts.mudflap_mode)
{
case mode_nop:
*************** __mf_register (uintptr_t ptr, uintptr_t
*** 669,676 ****
case mode_check:
{
! enum { max_objs = 1 };
! __mf_object_tree_t *ovr_obj [max_objs];
unsigned num_overlapping_objs;
uintptr_t low = ptr;
uintptr_t high = CLAMPSZ (ptr, sz);
--- 671,677 ----
case mode_check:
{
! __mf_object_tree_t *ovr_objs [1];
unsigned num_overlapping_objs;
uintptr_t low = ptr;
uintptr_t high = CLAMPSZ (ptr, sz);
*************** __mf_register (uintptr_t ptr, uintptr_t
*** 681,816 ****
/* Treat unknown size indication as 1. */
if (UNLIKELY (sz == 0)) sz = 1;
! num_overlapping_objs = __mf_find_objects (low, high, ovr_obj, max_objs);
if (UNLIKELY (num_overlapping_objs > 0))
{
! /* Normally, this would be a violation. However, accept a
! single duplicate registration for static objects, since these
! may come from distinct compilation units. */
if (type == __MF_TYPE_STATIC &&
! num_overlapping_objs == 1 &&
! ovr_obj[0]->data.type == __MF_TYPE_STATIC &&
! ovr_obj[0]->data.low == low &&
! ovr_obj[0]->data.high == high)
{
/* do nothing */
! VERBOSE_TRACE ("mf: duplicate static reg %08lx\n", low);
END_RECURSION_PROTECT;
return;
}
- else if (type == __MF_TYPE_GUESS)
- {
- unsigned i;
- int all_guesses = 1;
! /* XXX: need generalization for max_objs > 1 */
! for (i = 0; i < min (num_overlapping_objs, max_objs); ++i)
! {
! if (ovr_obj[i]->data.type != __MF_TYPE_GUESS)
{
! all_guesses = 0;
! break;
! }
}
! /* XXX: the following logic is too restrictive.
! We should handle the case of inserting a big GUESS
! on top of a little (say) HEAP area. The new GUESS
! thingie should be split up the same way as if the
! little HEAPie was added second. */
! if (all_guesses)
! {
! VERBOSE_TRACE ("mf: replacing %d existing guess%s at %08lx "
! "with %08lx - %08lx\n",
! num_overlapping_objs,
! (num_overlapping_objs > 1 ? "es" : ""),
! low,
! low, high);
!
! for (i = 0; i < min (max_objs, num_overlapping_objs); ++i)
{
! DECLARE (void, free, void *ptr);
! __mf_remove_old_object (ovr_obj[i]);
! CALL_REAL (free, ovr_obj[i]->data.alloc_backtrace);
! CALL_REAL (free, ovr_obj[i]);
! }
! __mf_insert_new_object (low, high, __MF_TYPE_GUESS,
! name, pc);
! }
! else
! {
! VERBOSE_TRACE ("mf: preserving %d regions at %08lx\n",
! num_overlapping_objs, low);
! }
END_RECURSION_PROTECT;
return;
}
- else
- {
- unsigned i;
- for (i = 0; i < min (num_overlapping_objs, max_objs) ; ++i)
- {
- if (ovr_obj[i]->data.type == __MF_TYPE_GUESS)
- {
- /* We're going to split our existing guess
- into 2 and put this new region in the
- middle. */
-
- uintptr_t guess1_low, guess1_high;
- uintptr_t guess2_low, guess2_high;
- uintptr_t guess_pc;
- const char *guess_name;
- DECLARE (void, free, void *ptr);
-
- guess_pc = ovr_obj[i]->data.alloc_pc;
- guess_name = ovr_obj[i]->data.name;
-
- guess1_low = ovr_obj[i]->data.low;
- guess1_high = CLAMPSUB (low, (1 + __mf_opts.crumple_zone));
-
- guess2_low = CLAMPADD (high, (1 + __mf_opts.crumple_zone));
- guess2_high = ovr_obj[i]->data.high;
-
- VERBOSE_TRACE ("mf: splitting guess region %08lx-%08lx\n",
- guess1_low, guess2_high);
-
- /* NB: split regions may disappear if low > high. */
! __mf_remove_old_object (ovr_obj[i]);
! CALL_REAL(free, ovr_obj[i]->data.alloc_backtrace);
! CALL_REAL(free, ovr_obj[i]);
! ovr_obj[i] = NULL;
!
! /* XXX: preserve other information: stats? backtraces */
!
! if (guess1_low <= guess1_high)
! {
! __mf_insert_new_object (guess1_low, guess1_high,
! __MF_TYPE_GUESS,
! guess_name, guess_pc);
}
! if (guess2_low <= guess2_high)
! {
! __mf_insert_new_object (guess2_low, guess2_high,
! __MF_TYPE_GUESS,
! guess_name, guess_pc);
! }
! }
else
{
/* Two or more *real* mappings here. */
! __mf_violation
! (ptr, sz,
(uintptr_t) __builtin_return_address (0), NULL,
__MF_VIOL_REGISTER);
}
}
- }
- }
__mf_insert_new_object (low, high, type, name, pc);
/* We could conceivably call __mf_check() here to prime the cache,
but then the check_count field is not reliable. */
--- 682,813 ----
/* Treat unknown size indication as 1. */
if (UNLIKELY (sz == 0)) sz = 1;
! num_overlapping_objs = __mf_find_objects (low, high, ovr_objs, 1);
!
! /* Handle overlaps. */
if (UNLIKELY (num_overlapping_objs > 0))
{
! __mf_object_tree_t *ovr_obj = ovr_objs[0];
!
! /* Quietly accept a single duplicate registration for
! static objects, since these may come from distinct
! compilation units. */
if (type == __MF_TYPE_STATIC &&
! ovr_obj->data.type == __MF_TYPE_STATIC &&
! ovr_obj->data.low == low &&
! ovr_obj->data.high == high)
{
/* do nothing */
! VERBOSE_TRACE ("mf: duplicate static reg %08lx-%08lx\n", low, high);
END_RECURSION_PROTECT;
return;
}
! /* Quietly accept a single duplicate registration for
! guess objects too. */
! if (type == __MF_TYPE_GUESS &&
! ovr_obj->data.type == __MF_TYPE_GUESS &&
! ovr_obj->data.low == low &&
! ovr_obj->data.high == high)
{
! /* do nothing */
! VERBOSE_TRACE ("mf: duplicate guess reg %08lx-%08lx\n", low, high);
! END_RECURSION_PROTECT;
! return;
}
! /* Quietly accept new a guess registration that overlaps
! at least one existing object. Trim it down to size. */
! else if (type == __MF_TYPE_GUESS)
{
! /* We need to split this new GUESS region into some
! smaller ones. Or we may not need to insert it at
! all if it is covered by the overlapping region. */
!
! /* First, identify all the overlapping objects. */
! __mf_object_tree_t **all_ovr_objs;
! unsigned num_ovr_objs, n;
! uintptr_t next_low;
! DECLARE (void *, malloc, size_t c);
! DECLARE (void, free, void *p);
!
! all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
! num_overlapping_objs));
! if (all_ovr_objs == NULL) abort ();
! num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
! num_overlapping_objs);
! /* assert (num_ovr_objs == num_overlapping_objs); */
! VERBOSE_TRACE ("mf: splitting guess %08lx-%08lx, # overlaps: %u\n",
! low, high, num_ovr_objs);
!
! /* Add GUESS regions between the holes: before each
! overlapping region. */
END_RECURSION_PROTECT;
+ next_low = low;
+ /* This makes use of the assumption that __mf_find_objects() returns
+ overlapping objects in an increasing sequence. */
+ for (n=0; n < min (num_ovr_objs, num_overlapping_objs); n++)
+ {
+ if (all_ovr_objs[n]->data.low > next_low) /* Gap? */
+ {
+ uintptr_t next_high = CLAMPSUB (all_ovr_objs[n]->data.low, 1);
+ __mf_register (next_low, next_high-next_low+1,
+ __MF_TYPE_GUESS, name);
+ }
+ next_low = CLAMPADD (all_ovr_objs[n]->data.high, 1);
+ }
+ /* Add in any leftover room at the top. */
+ if (next_low <= high)
+ __mf_register (next_low, high-next_low+1,
+ __MF_TYPE_GUESS, name);
+
+ /* XXX: future optimization: allow consecutive GUESS regions to
+ be glued together. */
+ CALL_REAL (free, all_ovr_objs);
return;
}
! /* Quietly accept a non-GUESS region overlaying a GUESS
! region. Handle it by removing the GUESS region
! temporarily, then recursively adding this new object,
! and then the GUESS back. The latter will be split up
! by the recursive process above. */
! else if (ovr_obj->data.type == __MF_TYPE_GUESS)
! {
! uintptr_t old_low = ovr_obj->data.low;
! uintptr_t old_high = ovr_obj->data.high;
! const char* old_name = ovr_obj->data.name;
!
! /* Now to recursively remove the guess piece, and
! reinsert them in the opposite order. Recursion
! should bottom out if another non-GUESS overlapping
! region is found for this new object (resulting in a
! violation), or if no further overlap occurs. The
! located GUESS region should end up being split up
! in any case. */
! END_RECURSION_PROTECT;
! __mf_unregister (old_low, old_high-old_low+1);
! __mf_register (low, sz, type, name);
! __mf_register (old_low, old_high-old_low+1, __MF_TYPE_GUESS, old_name);
! return;
}
! /* Alas, a genuine violation. */
else
{
/* Two or more *real* mappings here. */
! __mf_violation (ptr, sz,
(uintptr_t) __builtin_return_address (0), NULL,
__MF_VIOL_REGISTER);
}
}
+ /* No overlapping objects: AOK. */
+ else
+ {
__mf_insert_new_object (low, high, type, name, pc);
+ }
/* We could conceivably call __mf_check() here to prime the cache,
but then the check_count field is not reliable. */
*************** __mf_register (uintptr_t ptr, uintptr_t
*** 818,836 ****
END_RECURSION_PROTECT;
break;
}
-
} /* end switch (__mf_opts.mudflap_mode) */
-
- if (__mf_opts.collect_stats)
- {
- __mf_count_register ++;
- __mf_total_register_size [(type < 0) ? 0 :
- (type > __MF_TYPE_MAX) ? 0 :
- type] += sz;
- }
}
! /* __mf_unregister */
void
__mf_unregister (uintptr_t ptr, uintptr_t sz)
--- 815,824 ----
END_RECURSION_PROTECT;
break;
}
} /* end switch (__mf_opts.mudflap_mode) */
}
!
void
__mf_unregister (uintptr_t ptr, uintptr_t sz)
*************** __mf_unregister (uintptr_t ptr, uintptr_
*** 868,886 ****
num_overlapping_objs = __mf_find_objects (ptr, CLAMPSZ (ptr, sz), objs, 1);
! {
! /* do not unregister guessed regions */
! unsigned i;
! for (i = 0; i < num_overlapping_objs; ++i)
! {
! if (objs[i]->data.type == __MF_TYPE_GUESS)
! {
! VERBOSE_TRACE ("mf: ignored guess unreg %08lx\n", objs[i]->data.low);
! END_RECURSION_PROTECT;
! return;
! }
! }
! }
if (UNLIKELY (num_overlapping_objs != 1))
{
--- 856,863 ----
num_overlapping_objs = __mf_find_objects (ptr, CLAMPSZ (ptr, sz), objs, 1);
! /* XXX: handle unregistration of big old GUESS region, that has since
! been splintered. */
if (UNLIKELY (num_overlapping_objs != 1))
{
*************** __mf_unregister (uintptr_t ptr, uintptr_
*** 894,905 ****
old_obj = objs[0];
- VERBOSE_TRACE ("mf: removing %08lx-%08lx\n",
- old_obj->data.low, old_obj->data.high);
-
__mf_remove_old_object (old_obj);
! if (__mf_opts.persistent_count > 0)
{
old_obj->data.deallocated_p = 1;
old_obj->left = old_obj->right = NULL;
--- 871,880 ----
old_obj = objs[0];
__mf_remove_old_object (old_obj);
! if (__mf_opts.persistent_count > 0 &&
! old_obj->data.type != __MF_TYPE_GUESS)
{
old_obj->data.deallocated_p = 1;
old_obj->left = old_obj->right = NULL;
*************** __mf_unregister (uintptr_t ptr, uintptr_
*** 911,922 ****
__mf_backtrace (& old_obj->data.dealloc_backtrace,
NULL, 2);
-
/* Put this object into the cemetary. This may require this plot to
be recycled, and the previous resident to be designated del_obj. */
assert (old_obj->data.type >= __MF_TYPE_UNKNOWN &&
! old_obj->data.type <= __MF_TYPE_GUESS);
{
unsigned row = old_obj->data.type;
unsigned plot = __mf_object_dead_head [row];
--- 886,896 ----
__mf_backtrace (& old_obj->data.dealloc_backtrace,
NULL, 2);
/* Put this object into the cemetary. This may require this plot to
be recycled, and the previous resident to be designated del_obj. */
assert (old_obj->data.type >= __MF_TYPE_UNKNOWN &&
! old_obj->data.type < __MF_TYPE_GUESS);
{
unsigned row = old_obj->data.type;
unsigned plot = __mf_object_dead_head [row];
*************** __mf_unregister (uintptr_t ptr, uintptr_
*** 929,937 ****
__mf_object_dead_head [row] = plot;
}
- } else {
- del_obj = old_obj;
}
if (__mf_opts.print_leaks)
{
--- 903,911 ----
__mf_object_dead_head [row] = plot;
}
}
+ else
+ del_obj = old_obj;
if (__mf_opts.print_leaks)
{
*************** __mf_validate_object_cemetary ()
*** 1001,1007 ****
unsigned cls;
unsigned i;
! for (cls = __MF_TYPE_UNKNOWN; cls <= __MF_TYPE_GUESS; cls++)
{
assert (__mf_object_dead_head [cls] >= 0 &&
__mf_object_dead_head [cls] < __mf_opts.persistent_count);
--- 975,981 ----
unsigned cls;
unsigned i;
! for (cls = __MF_TYPE_UNKNOWN; cls < __MF_TYPE_GUESS; cls++)
{
assert (__mf_object_dead_head [cls] >= 0 &&
__mf_object_dead_head [cls] < __mf_opts.persistent_count);
*************** __mf_find_dead_objects (uintptr_t low, u
*** 1246,1252 ****
{
count = 0;
! for (row = __MF_TYPE_UNKNOWN; row <= __MF_TYPE_GUESS; row ++)
{
unsigned plot;
unsigned i;
--- 1220,1226 ----
{
count = 0;
! for (row = __MF_TYPE_UNKNOWN; row < __MF_TYPE_GUESS; row ++)
{
unsigned plot;
unsigned i;
*************** __mf_report ()
*** 1406,1412 ****
{
unsigned dead_count = 0;
unsigned row, plot;
! for (row = __MF_TYPE_UNKNOWN; row <= __MF_TYPE_GUESS; row ++)
for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
if (__mf_object_cemetary [row][plot] != 0)
dead_count ++;
--- 1380,1386 ----
{
unsigned dead_count = 0;
unsigned row, plot;
! for (row = __MF_TYPE_UNKNOWN; row < __MF_TYPE_GUESS; row ++)
for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
if (__mf_object_cemetary [row][plot] != 0)
dead_count ++;
Index: test/pass20-frag.c
===================================================================
RCS file: test/pass20-frag.c
diff -N test/pass20-frag.c
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- test/pass20-frag.c 16 Sep 2002 14:46:42 -0000
***************
*** 0 ****
--- 1,6 ----
+ struct bar {int stuff; int array[10]; };
+
+ struct bar *foo = malloc (sizeof (struct bar));
+ foo->array[5] = 4;
+ free (foo);
+