This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
Re: [tree-ssa libmudflap] mmap fixes
- From: "Frank Ch. Eigler" <fche at tooth dot toronto dot redhat dot com>
- To: gcc-patches at gcc dot gnu dot org
- Date: Tue, 01 Oct 2002 15:50:11 -0400
- Subject: Re: [tree-ssa libmudflap] mmap fixes
Sorry, the patch:
+ 2002-10-01 Frank Ch. Eigler <fche@redhat.com>
+
+ * test/pass23-frag.c: New test for bit_field_ref expressions.
+ * mf-hooks.c (mmap, munmap): Rewrite to track individual pages.
+ (MF_VALIDATE_EXTENT): Accept zero-size mem/str operations.
+ * mf-runtime.c (__mf_init): Register errno global.
+ (__mf_find_object): Removed function.
+ (__mf_check): Rewrite logic to support accesses across some
+ contiguous but distinctly registered objects.
+ (__mf_remove_old_object): Tolerate cache entries that span
+ contiguous objects.
+
Index: Makefile.am
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/Makefile.am,v
retrieving revision 1.1.2.13
diff -c -p -r1.1.2.13 Makefile.am
*** Makefile.am 30 Sep 2002 15:13:19 -0000 1.1.2.13
--- Makefile.am 1 Oct 2002 19:41:19 -0000
*************** TESTS = test/fail1.x test/fail2.x test/f
*** 17,23 ****
test/pass6.x test/pass7.x test/pass8.x test/pass9.x test/pass10.x \
test/pass11.x test/pass12.x test/pass13.x test/pass14.x test/pass15.x \
test/pass16.x test/pass17.x test/pass18.x test/pass19.x test/pass20.x \
! test/pass21.x test/pass22.x
test/%.c: test/%-frag.c test/mf-driver.c
@mkdir -p test
--- 17,23 ----
test/pass6.x test/pass7.x test/pass8.x test/pass9.x test/pass10.x \
test/pass11.x test/pass12.x test/pass13.x test/pass14.x test/pass15.x \
test/pass16.x test/pass17.x test/pass18.x test/pass19.x test/pass20.x \
! test/pass21.x test/pass22.x test/pass23.x
test/%.c: test/%-frag.c test/mf-driver.c
@mkdir -p test
Index: mf-hooks.c
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-hooks.c,v
retrieving revision 1.1.2.16
diff -c -p -r1.1.2.16 mf-hooks.c
*** mf-hooks.c 30 Sep 2002 15:13:19 -0000 1.1.2.16
--- mf-hooks.c 1 Oct 2002 19:41:19 -0000
*************** XXX: libgcc license?
*** 25,31 ****
#define MF_VALIDATE_EXTENT(value,size,context) \
{ \
! if (UNLIKELY (__MF_CACHE_MISS_P (value, size))) \
{ \
enum __mf_state resume_state = old_state; \
__mf_state = old_state; \
--- 25,31 ----
#define MF_VALIDATE_EXTENT(value,size,context) \
{ \
! if (UNLIKELY (size > 0 && __MF_CACHE_MISS_P (value, size))) \
{ \
enum __mf_state resume_state = old_state; \
__mf_state = old_state; \
*************** WRAPPER(void *, mmap,
*** 250,266 ****
result = CALL_REAL(mmap, start, length, prot,
flags, fd, offset);
__mf_state = old_state;
! if ((uintptr_t)result != -1)
{
! __mf_register ((uintptr_t) result,
! (uintptr_t) CLAMPADD(((uintptr_t)result),
! ((uintptr_t)
! CLAMPSUB(((uintptr_t)length),
! ((uintptr_t)1)))),
! __MF_TYPE_GUESS,
! "(heuristic) mmap region");
}
return result;
--- 250,281 ----
result = CALL_REAL(mmap, start, length, prot,
flags, fd, offset);
+ /*
+ VERBOSE_TRACE ("mf: mmap (%08lx, %08lx, ...) => %08lx\n",
+ (uintptr_t) start, (uintptr_t) length,
+ (uintptr_t) result);
+ */
+
__mf_state = old_state;
! if (result != (void *)-1)
{
! /* Register each page as a heap object. Why not register it all
! as a single segment? That's so that a later munmap() call
! can unmap individual pages. XXX: would __MF_TYPE_GUESS make
! this more automatic? */
! size_t ps = getpagesize ();
! uintptr_t base = (uintptr_t) result;
! uintptr_t offset;
!
! for (offset=0; offset<length; offset+=ps)
! {
! /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
! /* XXX: Unaccassed HEAP pages are reported as leaks. Is this
! appropriate for unaccessed mmap pages? */
! __mf_register (CLAMPADD (base, offset), ps,
! __MF_TYPE_HEAP, "mmap page");
! }
}
return result;
*************** WRAPPER(int , munmap, void *start, size_
*** 276,283 ****
result = CALL_REAL(munmap, start, length);
__mf_state = old_state;
! __mf_unregister ((uintptr_t)start, length);
return result;
}
#endif
--- 291,314 ----
result = CALL_REAL(munmap, start, length);
+ /*
+ VERBOSE_TRACE ("mf: munmap (%08lx, %08lx, ...) => %08lx\n",
+ (uintptr_t) start, (uintptr_t) length,
+ (uintptr_t) result);
+ */
+
__mf_state = old_state;
!
! if (result == 0)
! {
! /* Unregister each page as a heap object. */
! size_t ps = getpagesize ();
! uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
! uintptr_t offset;
!
! for (offset=0; offset<length; offset+=ps)
! __mf_unregister (CLAMPADD (base, offset), ps);
! }
return result;
}
#endif
Index: mf-runtime.c
===================================================================
RCS file: /cvs/gcc/gcc/libmudflap/Attic/mf-runtime.c,v
retrieving revision 1.1.2.17
diff -c -p -r1.1.2.17 mf-runtime.c
*** mf-runtime.c 26 Sep 2002 18:59:33 -0000 1.1.2.17
--- mf-runtime.c 1 Oct 2002 19:41:20 -0000
*************** XXX: libgcc license?
*** 19,24 ****
--- 19,25 ----
#include <limits.h>
#include <sys/types.h>
#include <signal.h>
+ #include <errno.h>
#include "mf-runtime.h"
#include "mf-impl.h"
*************** void __mf_init ()
*** 447,452 ****
--- 448,458 ----
(uintptr_t) 0xC0000000 - (uintptr_t) (& foo),
__MF_TYPE_GUESS,
"argv/environ area");
+ /* XXX: separate heuristic? */
+ __mf_register ((uintptr_t) & errno,
+ (uintptr_t) sizeof (errno),
+ __MF_TYPE_GUESS,
+ "errno area");
}
}
*************** __mf_object_tree_t *__mf_object_root;
*** 507,513 ****
unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM]; /* next empty spot */
__mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM][__MF_PERSIST_MAX];
- static __mf_object_tree_t *__mf_find_object (uintptr_t low, uintptr_t high);
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
__mf_object_tree_t **objs, unsigned max_objs);
static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
--- 513,518 ----
*************** void __mf_check (uintptr_t ptr, uintptr_
*** 525,531 ****
{
unsigned entry_idx = __MF_CACHE_INDEX (ptr);
struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
! int violation_p = 0;
uintptr_t ptr_high = CLAMPSZ (ptr, sz);
struct __mf_cache old_entry = *entry;
--- 530,536 ----
{
unsigned entry_idx = __MF_CACHE_INDEX (ptr);
struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
! int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
uintptr_t ptr_high = CLAMPSZ (ptr, sz);
struct __mf_cache old_entry = *entry;
*************** void __mf_check (uintptr_t ptr, uintptr_
*** 536,630 ****
switch (__mf_opts.mudflap_mode)
{
case mode_nop:
break;
case mode_populate:
entry->low = ptr;
entry->high = ptr_high;
break;
case mode_check:
{
unsigned heuristics = 0;
! /* Looping only occurs if heuristics were triggered. */
! while (1)
{
! /* XXX: This search embodied by __mf_find_object prevents
! an access that spans contiguous objects. Spanning two
! GUESS regions should be accepted, but can that happen?
! Maybe a heuristic that glues them together is
! needed. */
! __mf_object_tree_t *node = __mf_find_object (ptr, ptr_high);
! __mf_object_t *obj = (node != NULL ? (& node->data) : NULL);
!
! if (LIKELY (obj && ptr >= obj->low && ptr_high <= obj->high))
{
! obj->check_count ++; /* XXX: what about overflow? */
! /* Handle tree liveness aging and cache adaptation. */
! {
! static unsigned aging_count;
! static unsigned adapt_count;
! aging_count ++;
! adapt_count ++;
! if (UNLIKELY (__mf_opts.tree_aging > 0 &&
! aging_count > __mf_opts.tree_aging))
! {
! aging_count = 0;
! __mf_age_tree (__mf_object_root);
! }
! obj->liveness ++;
! if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
! adapt_count > __mf_opts.adapt_cache))
! {
! adapt_count = 0;
! __mf_adapt_cache ();
! }
! }
! if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
! {
! violation_p = 1;
! }
! else
{
! entry->low = obj->low;
! entry->high = obj->high;
}
! break;
}
! else if (heuristics++ < 2) /* XXX parametrize this number? */
{
! int judgement = __mf_heuristic_check (ptr, ptr_high);
! if (judgement < 0)
! {
! violation_p = 1;
! break;
! }
! else if (judgement > 0)
! {
! violation_p = 0;
! break;
! }
! else if (judgement == 0)
{
! /* Undecided: try again. Most likely, the heuristics function
! has deposited an object in the database and is expecting us
! to find it the next time around. */
! continue;
}
}
! else /* no more heuristics iterations allowed */
{
! violation_p = 1;
! break;
}
}
}
break;
case mode_violate:
! violation_p = 1;
break;
}
--- 541,696 ----
switch (__mf_opts.mudflap_mode)
{
case mode_nop:
+ judgement = 1;
break;
case mode_populate:
entry->low = ptr;
entry->high = ptr_high;
+ judgement = 1;
break;
case mode_check:
{
unsigned heuristics = 0;
!
! /* Advance aging/adaptation counters. */
! if (__mf_object_root)
{
! static unsigned aging_count;
! static unsigned adapt_count;
! aging_count ++;
! adapt_count ++;
! if (UNLIKELY (__mf_opts.tree_aging > 0 &&
! aging_count > __mf_opts.tree_aging))
{
! aging_count = 0;
! __mf_age_tree (__mf_object_root);
! }
! if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
! adapt_count > __mf_opts.adapt_cache))
! {
! adapt_count = 0;
! __mf_adapt_cache ();
! }
! }
! /* Looping only occurs if heuristics were triggered. */
! while (judgement == 0)
! {
! __mf_object_tree_t* ovr_obj[1];
! unsigned obj_count;
! obj_count = __mf_find_objects (ptr, ptr_high, ovr_obj, 1);
!
! if (LIKELY (obj_count == 1)) /* A single hit! */
! {
! __mf_object_t *obj = & ovr_obj[0]->data;
! assert (obj != NULL);
! if (LIKELY (ptr >= obj->low && ptr_high <= obj->high))
{
! obj->check_count ++; /* XXX: what about overflow? */
! obj->liveness ++;
!
! if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
! judgement = -1;
! else
! {
! /* Valid access. */
! entry->low = obj->low;
! entry->high = obj->high;
! judgement = 1;
! }
}
! /* The object did not cover the entire accessed region. */
}
! else if (LIKELY (obj_count > 1))
{
! __mf_object_tree_t **all_ovr_objs;
! unsigned n;
! DECLARE (void *, malloc, size_t c);
! DECLARE (void, free, void *p);
!
! all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
! obj_count));
! if (all_ovr_objs == NULL) abort ();
! n = __mf_find_objects (ptr, ptr_high, all_ovr_objs, obj_count);
! assert (n == obj_count);
!
! /* Confirm that accessed range is covered by first/last object. */
! if (LIKELY ((ptr >= all_ovr_objs[0]->data.low) &&
! (ptr_high <= all_ovr_objs[obj_count-1]->data.high)))
{
! /* Presume valid access. */
! judgement = 1;
!
! /* Confirm that intermediate objects are
! contiguous and share a single name. Thus they
! are likely split up GUESS regions, or mmap
! pages. The idea of the name check is to
! prevent an oversize access to a
! stack-registered object (followed by some GUESS
! type) from being accepted as a hit. */
! for (n=0; n<obj_count-1; n++)
! {
! __mf_object_t *obj = & (all_ovr_objs[n]->data);
! __mf_object_t *nextobj = & (all_ovr_objs[n+1]->data);
!
! if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
! judgement = -1; /* Force error. */
!
! if (UNLIKELY (judgement == 1 &&
! (obj->high + 1 != nextobj->low)))
! judgement = 0; /* Cancel presumption. */
!
! if (UNLIKELY (judgement == 1 &&
! (obj->name != nextobj->name)))
! judgement = 0; /* Cancel presumption. */
! /* NB: strcmp above is not necessary since the
! same literal string pointer is normally
! used when creating regions. */
!
! obj->check_count ++; /* XXX: what about overflow? */
! obj->liveness ++;
! }
!
! /* Fill out the cache with the bounds of the first
! object and the last object that covers this
! cache line (== includes the same __MF_CACHE_INDEX).
! This could let this cache line span *two* distinct
! registered objects: a peculiar but reasonable
! situation. The cache line may not include the
! entire object though. */
! if (judgement > 0)
! {
! unsigned i;
! entry->low = all_ovr_objs[0]->data.low;
! for (i=0; i<obj_count; i++)
! {
! uintptr_t high = all_ovr_objs[i]->data.high;
! if (__MF_CACHE_INDEX (high) == entry_idx)
! entry->high = high;
! }
! }
}
+
+ CALL_REAL (free, all_ovr_objs);
}
!
! if (judgement == 0)
{
! if (heuristics++ < 2) /* XXX parametrize this number? */
! judgement = __mf_heuristic_check (ptr, ptr_high);
! else
! judgement = -1;
}
}
+
}
break;
case mode_violate:
! judgement = -1;
break;
}
*************** void __mf_check (uintptr_t ptr, uintptr_
*** 639,645 ****
END_RECURSION_PROTECT;
! if (UNLIKELY (violation_p))
__mf_violation (ptr, sz,
(uintptr_t) __builtin_return_address (0), location,
__MF_VIOL_CHECK);
--- 705,711 ----
END_RECURSION_PROTECT;
! if (UNLIKELY (judgement < 0))
__mf_violation (ptr, sz,
(uintptr_t) __builtin_return_address (0), location,
__MF_VIOL_CHECK);
*************** __mf_remove_old_object (__mf_object_tree
*** 687,693 ****
for (i = idx_low; i <= idx_high; i++)
{
struct __mf_cache *entry = & __mf_lookup_cache [i];
! if (entry->low == low && entry->high == high)
{
entry->low = entry->high = (uintptr_t) 0;
}
--- 753,763 ----
for (i = idx_low; i <= idx_high; i++)
{
struct __mf_cache *entry = & __mf_lookup_cache [i];
! /* NB: the "||" in the following test permits this code to
! tolerate the situation introduced by __mf_check over
! contiguous objects, where a cache entry spans several
! objects. */
! if (entry->low == low || entry->high == high)
{
entry->low = entry->high = (uintptr_t) 0;
}
*************** __mf_register (uintptr_t ptr, uintptr_t
*** 798,804 ****
if (all_ovr_objs == NULL) abort ();
num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
num_overlapping_objs);
! /* assert (num_ovr_objs == num_overlapping_objs); */
VERBOSE_TRACE ("mf: splitting guess %08lx-%08lx, # overlaps: %u\n",
low, high, num_ovr_objs);
--- 868,874 ----
if (all_ovr_objs == NULL) abort ();
num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
num_overlapping_objs);
! assert (num_ovr_objs == num_overlapping_objs);
VERBOSE_TRACE ("mf: splitting guess %08lx-%08lx, # overlaps: %u\n",
low, high, num_ovr_objs);
*************** __mf_find_objects_rec (uintptr_t low, ui
*** 1262,1284 ****
}
return count;
- }
-
-
- __mf_object_tree_t *
- __mf_find_object (uintptr_t low, uintptr_t high)
- {
- __mf_object_tree_t* objects[1]; /* Find at most one. */
- unsigned count;
-
- if (UNLIKELY(__mf_opts.internal_checking))
- __mf_validate_objects ();
-
- count = __mf_find_objects_rec (low, high, & __mf_object_root, objects, 1);
- if (count == 1)
- return objects[0];
- else
- return NULL;
}
--- 1332,1337 ----
Index: test/pass23-frag.c
===================================================================
RCS file: test/pass23-frag.c
diff -N test/pass23-frag.c
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- test/pass23-frag.c 1 Oct 2002 19:41:20 -0000
***************
*** 0 ****
--- 1,20 ----
+ struct foo {
+ int part1: 8;
+ int nothing : 1;
+ int part2 : 5;
+ int lots_more_nothing : 3;
+ float some_more_nothing;
+ double yet_more_nothing;
+ };
+
+ #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *) 0)->MEMBER)
+
+ struct foo* q = (void *) malloc (offsetof (struct foo, some_more_nothing));
+ /* The RHS of the following expression is meant to trigger a
+ fold-const.c mapping the expression to a BIT_FIELD_REF. It glues
+ together the accesses to the two non-neighbouring bitfields into a
+ single bigger boolean test. */
+ q->lots_more_nothing = (q->part1 == 13 && q->part2 == 7);
+ free (q);
+
+