]> gcc.gnu.org Git - gcc.git/blame - libmudflap/mf-runtime.c
lex.c: Disable init_vectorized_lexer etc.
[gcc.git] / libmudflap / mf-runtime.c
CommitLineData
6de9cd9a 1/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
bd5c3aa5 2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010
0e5997c0 3 Free Software Foundation, Inc.
6de9cd9a
DN
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
fc5515a8
FCE
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
6de9cd9a
DN
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
748086b7 13Software Foundation; either version 3, or (at your option) any later
6de9cd9a
DN
14version.
15
6de9cd9a
DN
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
748086b7
JJ
21Under Section 7 of GPL version 3, you are granted additional
22permissions described in the GCC Runtime Library Exception, version
233.1, as published by the Free Software Foundation.
24
25You should have received a copy of the GNU General Public License and
26a copy of the GCC Runtime Library Exception along with this program;
27see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
28<http://www.gnu.org/licenses/>. */
6de9cd9a
DN
29
30#include "config.h"
31
32/* These attempt to coax various unix flavours to declare all our
33 needed tidbits in the system headers. */
029277b7 34#if !defined(__FreeBSD__) && !defined(__APPLE__)
6de9cd9a
DN
35#define _POSIX_SOURCE
36#endif /* Some BSDs break <sys/socket.h> if this is defined. */
fb925a51 37#define _GNU_SOURCE
6de9cd9a
DN
38#define _XOPEN_SOURCE
39#define _BSD_TYPES
40#define __EXTENSIONS__
41#define _ALL_SOURCE
42#define _LARGE_FILE_API
43#define _XOPEN_SOURCE_EXTENDED 1
44
45#include <stdio.h>
46#include <stdlib.h>
47#include <sys/types.h>
48#include <sys/time.h>
49#include <time.h>
50#include <unistd.h>
51#ifdef HAVE_EXECINFO_H
52#include <execinfo.h>
53#endif
54#ifdef HAVE_SIGNAL_H
55#include <signal.h>
56#endif
57#include <assert.h>
58
59#include <string.h>
60#include <limits.h>
61#include <sys/types.h>
62#include <signal.h>
63#include <errno.h>
dc88d66f 64#include <ctype.h>
6de9cd9a
DN
65
66#include "mf-runtime.h"
67#include "mf-impl.h"
68
69
fc5515a8
FCE
70/* ------------------------------------------------------------------------ */
71/* Splay-tree implementation. */
72
73typedef uintptr_t mfsplay_tree_key;
74typedef void *mfsplay_tree_value;
75
76/* Forward declaration for a node in the tree. */
77typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
78
79/* The type of a function used to iterate over the tree. */
80typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
81
82/* The nodes in the splay tree. */
83struct mfsplay_tree_node_s
84{
85 /* Data. */
86 mfsplay_tree_key key;
87 mfsplay_tree_value value;
88 /* Children. */
89 mfsplay_tree_node left;
90 mfsplay_tree_node right;
91 /* XXX: The addition of a parent pointer may eliminate some recursion. */
92};
93
94/* The splay tree itself. */
95struct mfsplay_tree_s
96{
97 /* The root of the tree. */
98 mfsplay_tree_node root;
99
100 /* The last key value for which the tree has been splayed, but not
101 since modified. */
102 mfsplay_tree_key last_splayed_key;
103 int last_splayed_key_p;
104
105 /* Statistics. */
106 unsigned num_keys;
107
108 /* Traversal recursion control flags. */
109 unsigned max_depth;
110 unsigned depth;
111 unsigned rebalance_p;
112};
113typedef struct mfsplay_tree_s *mfsplay_tree;
114
115static mfsplay_tree mfsplay_tree_new (void);
116static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
117static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
118static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
119static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
120static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
121static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
122static void mfsplay_tree_rebalance (mfsplay_tree sp);
123
6de9cd9a
DN
124/* ------------------------------------------------------------------------ */
125/* Utility macros */
126
127#define CTOR __attribute__ ((constructor))
128#define DTOR __attribute__ ((destructor))
129
130
131/* Codes to describe the context in which a violation occurs. */
132#define __MF_VIOL_UNKNOWN 0
133#define __MF_VIOL_READ 1
134#define __MF_VIOL_WRITE 2
135#define __MF_VIOL_REGISTER 3
136#define __MF_VIOL_UNREGISTER 4
137#define __MF_VIOL_WATCH 5
138
139/* Protect against recursive calls. */
6de9cd9a 140
7544a87f
RH
141static void
142begin_recursion_protect1 (const char *pf)
143{
144 if (__mf_get_state () == reentrant)
145 {
146 write (2, "mf: erroneous reentrancy detected in `", 38);
147 write (2, pf, strlen(pf));
148 write (2, "'\n", 2); \
149 abort ();
150 }
151 __mf_set_state (reentrant);
152}
6de9cd9a 153
7544a87f
RH
154#define BEGIN_RECURSION_PROTECT() \
155 begin_recursion_protect1 (__PRETTY_FUNCTION__)
6de9cd9a 156
7544a87f
RH
157#define END_RECURSION_PROTECT() \
158 __mf_set_state (active)
6de9cd9a
DN
159
160/* ------------------------------------------------------------------------ */
161/* Required globals. */
162
163#define LOOKUP_CACHE_MASK_DFL 1023
ddfabf89 164#define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
6de9cd9a
DN
165#define LOOKUP_CACHE_SHIFT_DFL 2
166
167struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
168uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
169unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
170#define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171
172struct __mf_options __mf_opts;
6de9cd9a 173int __mf_starting_p = 1;
7544a87f
RH
174
175#ifdef LIBMUDFLAPTH
5cf9cc96 176#if defined(HAVE_TLS) && !defined(USE_EMUTLS)
22f99b82 177__thread enum __mf_state_enum __mf_state_1 = reentrant;
7544a87f 178#endif
6de9cd9a 179#else
22f99b82 180enum __mf_state_enum __mf_state_1 = reentrant;
6de9cd9a
DN
181#endif
182
6de9cd9a
DN
183#ifdef LIBMUDFLAPTH
184pthread_mutex_t __mf_biglock =
185#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
187#else
188 PTHREAD_MUTEX_INITIALIZER;
189#endif
190#endif
191
192/* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193 the libmudflap.la (no threading support) can diagnose whether
194 the application is linked with -lpthread. See __mf_usage() below. */
195#if HAVE_PTHREAD_H
35a1e17e 196#ifdef _POSIX_THREADS
6de9cd9a 197#pragma weak pthread_join
35a1e17e
NC
198#else
199#define pthread_join NULL
200#endif
6de9cd9a
DN
201#endif
202
203
204/* ------------------------------------------------------------------------ */
205/* stats-related globals. */
206
207static unsigned long __mf_count_check;
208static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
6de9cd9a
DN
209static unsigned long __mf_count_register;
210static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
211static unsigned long __mf_count_unregister;
212static unsigned long __mf_total_unregister_size;
213static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
214static unsigned long __mf_sigusr1_received;
215static unsigned long __mf_sigusr1_handled;
216/* not static */ unsigned long __mf_reentrancy;
217#ifdef LIBMUDFLAPTH
218/* not static */ unsigned long __mf_lock_contention;
219#endif
220
221
222/* ------------------------------------------------------------------------ */
223/* mode-check-related globals. */
224
225typedef struct __mf_object
226{
227 uintptr_t low, high; /* __mf_register parameters */
228 const char *name;
229 char type; /* __MF_TYPE_something */
230 char watching_p; /* Trigger a VIOL_WATCH on access? */
231 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
232 unsigned write_count; /* Likewise for __mf_check/write. */
233 unsigned liveness; /* A measure of recent checking activity. */
234 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235
236 uintptr_t alloc_pc;
237 struct timeval alloc_time;
238 char **alloc_backtrace;
239 size_t alloc_backtrace_size;
240#ifdef LIBMUDFLAPTH
241 pthread_t alloc_thread;
242#endif
243
244 int deallocated_p;
245 uintptr_t dealloc_pc;
246 struct timeval dealloc_time;
247 char **dealloc_backtrace;
248 size_t dealloc_backtrace_size;
249#ifdef LIBMUDFLAPTH
250 pthread_t dealloc_thread;
251#endif
252} __mf_object_t;
253
cfbd22d7
FCE
254/* Live objects: splay trees, separated by type, ordered on .low (base address). */
255/* Actually stored as static vars within lookup function below. */
6de9cd9a
DN
256
257/* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
cfbd22d7 259static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
6de9cd9a
DN
260
261
262/* ------------------------------------------------------------------------ */
263/* Forward function declarations */
264
429b4470 265void __mf_init () CTOR;
6de9cd9a 266static void __mf_sigusr1_respond ();
fb925a51 267static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
cfbd22d7 268 __mf_object_t **objs, unsigned max_objs);
fb925a51 269static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
cfbd22d7 270 __mf_object_t **objs, unsigned max_objs, int type);
fb925a51 271static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
cfbd22d7 272 __mf_object_t **objs, unsigned max_objs);
6de9cd9a 273static void __mf_adapt_cache ();
6de9cd9a
DN
274static void __mf_describe_object (__mf_object_t *obj);
275static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
fc5515a8 276static mfsplay_tree __mf_object_tree (int type);
cfbd22d7
FCE
277static void __mf_link_object (__mf_object_t *node);
278static void __mf_unlink_object (__mf_object_t *node);
6de9cd9a
DN
279
280
281/* ------------------------------------------------------------------------ */
282/* Configuration engine */
283
284static void
285__mf_set_default_options ()
286{
287 memset (& __mf_opts, 0, sizeof (__mf_opts));
288
6de9cd9a
DN
289 __mf_opts.adapt_cache = 1000003;
290 __mf_opts.abbreviate = 1;
291 __mf_opts.verbose_violations = 1;
292 __mf_opts.free_queue_length = 4;
293 __mf_opts.persistent_count = 100;
294 __mf_opts.crumple_zone = 32;
295 __mf_opts.backtrace = 4;
a082fc7a 296 __mf_opts.timestamps = 1;
6de9cd9a
DN
297 __mf_opts.mudflap_mode = mode_check;
298 __mf_opts.violation_mode = viol_nop;
a548d7b7
FCE
299#ifdef HAVE___LIBC_FREERES
300 __mf_opts.call_libc_freeres = 1;
301#endif
6de9cd9a
DN
302 __mf_opts.heur_std_data = 1;
303#ifdef LIBMUDFLAPTH
304 __mf_opts.thread_stack = 0;
305#endif
5d0001f0
FCE
306
307 /* PR41443: Beware that the above flags will be applied to
308 setuid/setgid binaries, and cannot be overriden with
309 $MUDFLAP_OPTIONS. So the defaults must be non-exploitable.
310
311 Should we consider making the default violation_mode something
312 harsher than viol_nop? OTOH, glibc's MALLOC_CHECK_ is disabled
313 by default for these same programs. */
6de9cd9a
DN
314}
315
3b088eb0 316static struct mudoption
6de9cd9a
DN
317{
318 char *name;
319 char *description;
320 enum
321 {
322 set_option,
323 read_integer_option,
324 } type;
07c2f075
FCE
325 unsigned value;
326 unsigned *target;
fb925a51 327}
6de9cd9a
DN
328options [] =
329 {
fb925a51
MS
330 {"mode-nop",
331 "mudflaps do nothing",
332 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
333 {"mode-populate",
334 "mudflaps populate object tree",
335 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
336 {"mode-check",
6de9cd9a 337 "mudflaps check for memory violations",
07c2f075 338 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
fb925a51 339 {"mode-violate",
6de9cd9a 340 "mudflaps always cause violations (diagnostic)",
07c2f075 341 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
fb925a51
MS
342
343 {"viol-nop",
6de9cd9a 344 "violations do not change program execution",
07c2f075 345 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
fb925a51 346 {"viol-abort",
6de9cd9a 347 "violations cause a call to abort()",
07c2f075 348 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
fb925a51 349 {"viol-segv",
6de9cd9a 350 "violations are promoted to SIGSEGV signals",
07c2f075 351 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
fb925a51 352 {"viol-gdb",
6de9cd9a 353 "violations fork a gdb process attached to current program",
07c2f075 354 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
fb925a51 355 {"trace-calls",
6de9cd9a
DN
356 "trace calls to mudflap runtime library",
357 set_option, 1, &__mf_opts.trace_mf_calls},
fb925a51 358 {"verbose-trace",
6de9cd9a
DN
359 "trace internal events within mudflap runtime library",
360 set_option, 1, &__mf_opts.verbose_trace},
fb925a51 361 {"collect-stats",
6de9cd9a
DN
362 "collect statistics on mudflap's operation",
363 set_option, 1, &__mf_opts.collect_stats},
b9d71ce3 364#ifdef SIGUSR1
6de9cd9a
DN
365 {"sigusr1-report",
366 "print report upon SIGUSR1",
367 set_option, 1, &__mf_opts.sigusr1_report},
368#endif
fb925a51 369 {"internal-checking",
6de9cd9a
DN
370 "perform more expensive internal checking",
371 set_option, 1, &__mf_opts.internal_checking},
fb925a51 372 {"print-leaks",
6de9cd9a
DN
373 "print any memory leaks at program shutdown",
374 set_option, 1, &__mf_opts.print_leaks},
a548d7b7
FCE
375#ifdef HAVE___LIBC_FREERES
376 {"libc-freeres",
377 "call glibc __libc_freeres at shutdown for better leak data",
378 set_option, 1, &__mf_opts.call_libc_freeres},
379#endif
fb925a51 380 {"check-initialization",
6de9cd9a
DN
381 "detect uninitialized object reads",
382 set_option, 1, &__mf_opts.check_initialization},
fb925a51 383 {"verbose-violations",
6de9cd9a
DN
384 "print verbose messages when memory violations occur",
385 set_option, 1, &__mf_opts.verbose_violations},
fb925a51 386 {"abbreviate",
6de9cd9a
DN
387 "abbreviate repetitive listings",
388 set_option, 1, &__mf_opts.abbreviate},
fb925a51 389 {"timestamps",
a082fc7a
FCE
390 "track object lifetime timestamps",
391 set_option, 1, &__mf_opts.timestamps},
fb925a51 392 {"ignore-reads",
a082fc7a
FCE
393 "ignore read accesses - assume okay",
394 set_option, 1, &__mf_opts.ignore_reads},
6de9cd9a
DN
395 {"wipe-stack",
396 "wipe stack objects at unwind",
397 set_option, 1, &__mf_opts.wipe_stack},
398 {"wipe-heap",
399 "wipe heap objects at free",
400 set_option, 1, &__mf_opts.wipe_heap},
fb925a51 401 {"heur-proc-map",
6de9cd9a
DN
402 "support /proc/self/map heuristics",
403 set_option, 1, &__mf_opts.heur_proc_map},
404 {"heur-stack-bound",
405 "enable a simple upper stack bound heuristic",
406 set_option, 1, &__mf_opts.heur_stack_bound},
fb925a51 407 {"heur-start-end",
6de9cd9a
DN
408 "support _start.._end heuristics",
409 set_option, 1, &__mf_opts.heur_start_end},
fb925a51 410 {"heur-stdlib",
6de9cd9a
DN
411 "register standard library data (argv, errno, stdin, ...)",
412 set_option, 1, &__mf_opts.heur_std_data},
fb925a51 413 {"free-queue-length",
6de9cd9a
DN
414 "queue N deferred free() calls before performing them",
415 read_integer_option, 0, &__mf_opts.free_queue_length},
fb925a51 416 {"persistent-count",
6de9cd9a
DN
417 "keep a history of N unregistered regions",
418 read_integer_option, 0, &__mf_opts.persistent_count},
fb925a51 419 {"crumple-zone",
6de9cd9a
DN
420 "surround allocations with crumple zones of N bytes",
421 read_integer_option, 0, &__mf_opts.crumple_zone},
422 /* XXX: not type-safe.
fb925a51 423 {"lc-mask",
6de9cd9a
DN
424 "set lookup cache size mask to N (2**M - 1)",
425 read_integer_option, 0, (int *)(&__mf_lc_mask)},
fb925a51 426 {"lc-shift",
6de9cd9a
DN
427 "set lookup cache pointer shift",
428 read_integer_option, 0, (int *)(&__mf_lc_shift)},
429 */
fb925a51 430 {"lc-adapt",
6de9cd9a
DN
431 "adapt mask/shift parameters after N cache misses",
432 read_integer_option, 1, &__mf_opts.adapt_cache},
fb925a51 433 {"backtrace",
6de9cd9a
DN
434 "keep an N-level stack trace of each call context",
435 read_integer_option, 0, &__mf_opts.backtrace},
436#ifdef LIBMUDFLAPTH
fb925a51 437 {"thread-stack",
6de9cd9a
DN
438 "override thread stacks allocation: N kB",
439 read_integer_option, 0, &__mf_opts.thread_stack},
440#endif
441 {0, 0, set_option, 0, NULL}
442 };
443
444static void
445__mf_usage ()
446{
3b088eb0 447 struct mudoption *opt;
6de9cd9a 448
fb925a51 449 fprintf (stderr,
cfbd22d7 450 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
bd5c3aa5 451 "Mudflap is Copyright (C) 2002-2010 Free Software Foundation, Inc.\n"
cfbd22d7 452 "\n"
5d0001f0 453 "Unless setuid, a program's mudflap options be set by an environment variable:\n"
cfbd22d7
FCE
454 "\n"
455 "$ export MUDFLAP_OPTIONS='<options>'\n"
456 "$ <mudflapped_program>\n"
457 "\n"
458 "where <options> is a space-separated list of \n"
459 "any of the following options. Use `-no-OPTION' to disable options.\n"
460 "\n",
6de9cd9a 461#if HAVE_PTHREAD_H
7544a87f 462 (pthread_join ? "multi-threaded " : "single-threaded "),
6de9cd9a 463#else
cfbd22d7 464 "",
6de9cd9a
DN
465#endif
466#if LIBMUDFLAPTH
cfbd22d7 467 "thread-aware "
6de9cd9a 468#else
cfbd22d7 469 "thread-unaware "
6de9cd9a 470#endif
cfbd22d7 471 );
6de9cd9a
DN
472 /* XXX: The multi-threaded thread-unaware combination is bad. */
473
474 for (opt = options; opt->name; opt++)
475 {
476 int default_p = (opt->value == * opt->target);
477
478 switch (opt->type)
cfbd22d7
FCE
479 {
480 char buf[128];
481 case set_option:
482 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
483 if (default_p)
484 fprintf (stderr, " [active]\n");
485 else
486 fprintf (stderr, "\n");
487 break;
488 case read_integer_option:
489 strncpy (buf, opt->name, 128);
490 strncpy (buf + strlen (opt->name), "=N", 2);
491 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
492 fprintf (stderr, " [%d]\n", * opt->target);
fb925a51 493 break;
cfbd22d7
FCE
494 default: abort();
495 }
6de9cd9a
DN
496 }
497
498 fprintf (stderr, "\n");
499}
500
501
fb925a51 502int
6de9cd9a
DN
503__mf_set_options (const char *optstr)
504{
505 int rc;
506 LOCKTH ();
507 BEGIN_RECURSION_PROTECT ();
508 rc = __mfu_set_options (optstr);
509 /* XXX: It's not really that easy. A change to a bunch of parameters
fb925a51 510 can require updating auxiliary state or risk crashing:
6de9cd9a
DN
511 free_queue_length, crumple_zone ... */
512 END_RECURSION_PROTECT ();
513 UNLOCKTH ();
514 return rc;
515}
516
517
fb925a51 518int
6de9cd9a
DN
519__mfu_set_options (const char *optstr)
520{
3b088eb0 521 struct mudoption *opts = 0;
6de9cd9a
DN
522 char *nxt = 0;
523 long tmp = 0;
524 int rc = 0;
525 const char *saved_optstr = optstr;
526
527 /* XXX: bounds-check for optstr! */
528
529 while (*optstr)
530 {
531 switch (*optstr) {
532 case ' ':
533 case '\t':
534 case '\n':
cfbd22d7
FCE
535 optstr++;
536 break;
6de9cd9a
DN
537
538 case '-':
cfbd22d7 539 if (*optstr+1)
fb925a51 540 {
cfbd22d7
FCE
541 int negate = 0;
542 optstr++;
543
fb925a51 544 if (*optstr == '?' ||
cfbd22d7
FCE
545 strncmp (optstr, "help", 4) == 0)
546 {
547 /* Caller will print help and exit. */
548 return -1;
549 }
fb925a51 550
cfbd22d7
FCE
551 if (strncmp (optstr, "no-", 3) == 0)
552 {
553 negate = 1;
554 optstr = & optstr[3];
555 }
fb925a51 556
cfbd22d7
FCE
557 for (opts = options; opts->name; opts++)
558 {
559 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
560 {
561 optstr += strlen (opts->name);
562 assert (opts->target);
fb925a51 563 switch (opts->type)
cfbd22d7
FCE
564 {
565 case set_option:
566 if (negate)
567 *(opts->target) = 0;
568 else
569 *(opts->target) = opts->value;
570 break;
571 case read_integer_option:
572 if (! negate && (*optstr == '=' && *(optstr+1)))
573 {
574 optstr++;
575 tmp = strtol (optstr, &nxt, 10);
576 if ((optstr != nxt) && (tmp != LONG_MAX))
577 {
fb925a51 578 optstr = nxt;
cfbd22d7
FCE
579 *(opts->target) = (int)tmp;
580 }
581 }
582 else if (negate)
583 * opts->target = 0;
584 break;
585 }
586 }
587 }
588 }
589 break;
fb925a51 590
6de9cd9a 591 default:
fb925a51 592 fprintf (stderr,
cfbd22d7
FCE
593 "warning: unrecognized string '%s' in mudflap options\n",
594 optstr);
595 optstr += strlen (optstr);
596 rc = -1;
597 break;
6de9cd9a
DN
598 }
599 }
600
601 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
602 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
603 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
604
605 /* Clear the lookup cache, in case the parameters got changed. */
606 /* XXX: race */
607 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
608 /* void slot 0 */
609 __mf_lookup_cache[0].low = MAXPTR;
610
611 TRACE ("set options from `%s'\n", saved_optstr);
612
613 /* Call this unconditionally, in case -sigusr1-report was toggled. */
614 __mf_sigusr1_respond ();
615
616 return rc;
617}
618
619
620#ifdef PIC
621
fb925a51 622void
6de9cd9a
DN
623__mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
624{
625 char *err;
626
627 assert (e);
628 if (e->pointer) return;
629
630#if HAVE_DLVSYM
631 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
632 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
633 else
634#endif
635 e->pointer = dlsym (RTLD_NEXT, e->name);
fb925a51 636
6de9cd9a
DN
637 err = dlerror ();
638
639 if (err)
640 {
641 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
cfbd22d7 642 e->name, err);
6de9cd9a 643 abort ();
fb925a51 644 }
6de9cd9a
DN
645 if (! e->pointer)
646 {
647 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
648 abort ();
649 }
650}
651
652
fb925a51
MS
653static void
654__mf_resolve_dynamics ()
6de9cd9a
DN
655{
656 int i;
657 for (i = 0; i < dyn_INITRESOLVE; i++)
658 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
659}
660
661
662/* NB: order must match enums in mf-impl.h */
663struct __mf_dynamic_entry __mf_dynamic [] =
664{
665 {NULL, "calloc", NULL},
666 {NULL, "free", NULL},
667 {NULL, "malloc", NULL},
668 {NULL, "mmap", NULL},
669 {NULL, "munmap", NULL},
670 {NULL, "realloc", NULL},
671 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
672#ifdef LIBMUDFLAPTH
673 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
674 {NULL, "pthread_join", NULL},
675 {NULL, "pthread_exit", NULL}
676#endif
677};
678
679#endif /* PIC */
680
681
682
683/* ------------------------------------------------------------------------ */
684
cfbd22d7 685/* Lookup & manage automatic initialization of the five or so splay trees. */
fc5515a8 686static mfsplay_tree
cfbd22d7
FCE
687__mf_object_tree (int type)
688{
fc5515a8 689 static mfsplay_tree trees [__MF_TYPE_MAX+1];
cfbd22d7
FCE
690 assert (type >= 0 && type <= __MF_TYPE_MAX);
691 if (UNLIKELY (trees[type] == NULL))
fc5515a8 692 trees[type] = mfsplay_tree_new ();
cfbd22d7
FCE
693 return trees[type];
694}
6de9cd9a 695
cfbd22d7 696
429b4470 697/* not static */void
cfbd22d7 698__mf_init ()
6de9cd9a
DN
699{
700 char *ov = 0;
701
429b4470
FCE
702 /* Return if initialization has already been done. */
703 if (LIKELY (__mf_starting_p == 0))
704 return;
705
f05816a5
LR
706#if defined(__FreeBSD__) && defined(LIBMUDFLAPTH)
707 pthread_self();
708 LOCKTH ();
709 UNLOCKTH ();
710#endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */
711
6de9cd9a
DN
712 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
713#ifdef PIC
714 __mf_resolve_dynamics ();
715#endif
716 __mf_starting_p = 0;
717
22f99b82
UW
718 __mf_set_state (active);
719
6de9cd9a
DN
720 __mf_set_default_options ();
721
5d0001f0
FCE
722 if (getuid () == geteuid () && getgid () == getegid ()) /* PR41433, not setuid */
723 ov = getenv ("MUDFLAP_OPTIONS");
6de9cd9a
DN
724 if (ov)
725 {
726 int rc = __mfu_set_options (ov);
727 if (rc < 0)
cfbd22d7
FCE
728 {
729 __mf_usage ();
730 exit (1);
731 }
6de9cd9a
DN
732 }
733
734 /* Initialize to a non-zero description epoch. */
735 __mf_describe_object (NULL);
736
737#define REG_RESERVED(obj) \
738 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
739
740 REG_RESERVED (__mf_lookup_cache);
741 REG_RESERVED (__mf_lc_mask);
742 REG_RESERVED (__mf_lc_shift);
743 /* XXX: others of our statics? */
744
745 /* Prevent access to *NULL. */
746 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
747 __mf_lookup_cache[0].low = (uintptr_t) -1;
748}
749
750
751
752int
753__wrap_main (int argc, char* argv[])
754{
755 extern char **environ;
756 extern int main ();
07c2f075 757 extern int __real_main ();
6de9cd9a
DN
758 static int been_here = 0;
759
760 if (__mf_opts.heur_std_data && ! been_here)
761 {
762 unsigned i;
763
764 been_here = 1;
765 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
766 for (i=0; i<argc; i++)
cfbd22d7
FCE
767 {
768 unsigned j = strlen (argv[i]);
769 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
770 }
6de9cd9a
DN
771
772 for (i=0; ; i++)
cfbd22d7
FCE
773 {
774 char *e = environ[i];
775 unsigned j;
776 if (e == NULL) break;
777 j = strlen (environ[i]);
778 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
779 }
6de9cd9a
DN
780 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
781
782 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
783
784 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
785 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
786 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
dc88d66f
FCE
787
788 /* Make some effort to register ctype.h static arrays. */
789 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
790 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
791 with in mf-hooks2.c. */
6de9cd9a
DN
792 }
793
794#ifdef PIC
795 return main (argc, argv, environ);
796#else
797 return __real_main (argc, argv, environ);
798#endif
799}
800
801
802
803extern void __mf_fini () DTOR;
804void __mf_fini ()
805{
806 TRACE ("__mf_fini\n");
807 __mfu_report ();
6687a263
UW
808
809#ifndef PIC
810/* Since we didn't populate the tree for allocations in constructors
811 before __mf_init, we cannot check destructors after __mf_fini. */
812 __mf_opts.mudflap_mode = mode_nop;
813#endif
6de9cd9a
DN
814}
815
816
817
818/* ------------------------------------------------------------------------ */
819/* __mf_check */
820
821void __mf_check (void *ptr, size_t sz, int type, const char *location)
822{
823 LOCKTH ();
824 BEGIN_RECURSION_PROTECT ();
825 __mfu_check (ptr, sz, type, location);
826 END_RECURSION_PROTECT ();
827 UNLOCKTH ();
828}
829
830
831void __mfu_check (void *ptr, size_t sz, int type, const char *location)
832{
833 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
834 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
835 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
836 uintptr_t ptr_low = (uintptr_t) ptr;
837 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
838 struct __mf_cache old_entry = *entry;
839
840 if (UNLIKELY (__mf_opts.sigusr1_report))
841 __mf_sigusr1_respond ();
0ee4e76d
FCE
842 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
843 return;
6de9cd9a
DN
844
845 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
cfbd22d7
FCE
846 ptr, entry_idx, (unsigned long)sz,
847 (type == 0 ? "read" : "write"), location);
fb925a51 848
6de9cd9a
DN
849 switch (__mf_opts.mudflap_mode)
850 {
851 case mode_nop:
54419590
FCE
852 /* It is tempting to poison the cache here similarly to
853 mode_populate. However that eliminates a valuable
854 distinction between these two modes. mode_nop is useful to
855 let a user count & trace every single check / registration
856 call. mode_populate is useful to let a program run fast
857 while unchecked.
858 */
6de9cd9a
DN
859 judgement = 1;
860 break;
861
862 case mode_populate:
863 entry->low = ptr_low;
864 entry->high = ptr_high;
865 judgement = 1;
866 break;
867
868 case mode_check:
869 {
cfbd22d7 870 unsigned heuristics = 0;
fb925a51 871
cfbd22d7
FCE
872 /* Advance aging/adaptation counters. */
873 static unsigned adapt_count;
874 adapt_count ++;
875 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
876 adapt_count > __mf_opts.adapt_cache))
877 {
878 adapt_count = 0;
879 __mf_adapt_cache ();
880 }
fb925a51 881
cfbd22d7
FCE
882 /* Looping only occurs if heuristics were triggered. */
883 while (judgement == 0)
884 {
885 DECLARE (void, free, void *p);
886 __mf_object_t* ovr_obj[1];
887 unsigned obj_count;
888 __mf_object_t** all_ovr_obj = NULL;
889 __mf_object_t** dealloc_me = NULL;
890 unsigned i;
891
892 /* Find all overlapping objects. Be optimistic that there is just one. */
893 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
894 if (UNLIKELY (obj_count > 1))
895 {
896 /* Allocate a real buffer and do the search again. */
897 DECLARE (void *, malloc, size_t c);
898 unsigned n;
899 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
900 obj_count));
901 if (all_ovr_obj == NULL) abort ();
902 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
903 assert (n == obj_count);
904 dealloc_me = all_ovr_obj;
905 }
fb925a51 906 else
cfbd22d7
FCE
907 {
908 all_ovr_obj = ovr_obj;
909 dealloc_me = NULL;
910 }
911
912 /* Update object statistics. */
913 for (i = 0; i < obj_count; i++)
914 {
915 __mf_object_t *obj = all_ovr_obj[i];
916 assert (obj != NULL);
917 if (type == __MF_CHECK_READ)
918 obj->read_count ++;
919 else
920 obj->write_count ++;
921 obj->liveness ++;
922 }
fb925a51 923
cfbd22d7
FCE
924 /* Iterate over the various objects. There are a number of special cases. */
925 for (i = 0; i < obj_count; i++)
926 {
927 __mf_object_t *obj = all_ovr_obj[i];
928
929 /* Any __MF_TYPE_NOACCESS hit is bad. */
930 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
931 judgement = -1;
932
933 /* Any object with a watch flag is bad. */
934 if (UNLIKELY (obj->watching_p))
935 judgement = -2; /* trigger VIOL_WATCH */
fb925a51 936
cfbd22d7
FCE
937 /* A read from an uninitialized object is bad. */
938 if (UNLIKELY (__mf_opts.check_initialization
939 /* reading */
940 && type == __MF_CHECK_READ
941 /* not written */
942 && obj->write_count == 0
943 /* uninitialized (heap) */
944 && obj->type == __MF_TYPE_HEAP))
945 judgement = -1;
946 }
947
e1f4adc9 948 /* We now know that the access spans no invalid objects. */
cfbd22d7
FCE
949 if (LIKELY (judgement >= 0))
950 for (i = 0; i < obj_count; i++)
951 {
952 __mf_object_t *obj = all_ovr_obj[i];
fb925a51 953
cfbd22d7
FCE
954 /* Is this access entirely contained within this object? */
955 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
956 {
957 /* Valid access. */
958 entry->low = obj->low;
959 entry->high = obj->high;
960 judgement = 1;
961 }
ddfabf89
FCE
962 }
963
964 /* This access runs off the end of one valid object. That
965 could be okay, if other valid objects fill in all the
966 holes. We allow this only for HEAP and GUESS type
967 objects. Accesses to STATIC and STACK variables
968 should not be allowed to span. */
969 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
970 {
971 unsigned uncovered = 0;
972 for (i = 0; i < obj_count; i++)
973 {
974 __mf_object_t *obj = all_ovr_obj[i];
975 int j, uncovered_low_p, uncovered_high_p;
976 uintptr_t ptr_lower, ptr_higher;
977
978 uncovered_low_p = ptr_low < obj->low;
979 ptr_lower = CLAMPSUB (obj->low, 1);
980 uncovered_high_p = ptr_high > obj->high;
981 ptr_higher = CLAMPADD (obj->high, 1);
cfbd22d7 982
ddfabf89
FCE
983 for (j = 0; j < obj_count; j++)
984 {
985 __mf_object_t *obj2 = all_ovr_obj[j];
986
987 if (i == j) continue;
988
989 /* Filter out objects that cannot be spanned across. */
fb925a51 990 if (obj2->type == __MF_TYPE_STACK
ddfabf89
FCE
991 || obj2->type == __MF_TYPE_STATIC)
992 continue;
993
994 /* Consider a side "covered" if obj2 includes
995 the next byte on that side. */
996 if (uncovered_low_p
997 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
998 uncovered_low_p = 0;
999 if (uncovered_high_p
1000 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
1001 uncovered_high_p = 0;
1002 }
fb925a51 1003
ddfabf89
FCE
1004 if (uncovered_low_p || uncovered_high_p)
1005 uncovered ++;
1006 }
1007
1008 /* Success if no overlapping objects are uncovered. */
1009 if (uncovered == 0)
1010 judgement = 1;
cfbd22d7
FCE
1011 }
1012
ddfabf89 1013
cfbd22d7
FCE
1014 if (dealloc_me != NULL)
1015 CALL_REAL (free, dealloc_me);
1016
1017 /* If the judgment is still unknown at this stage, loop
1018 around at most one more time. */
1019 if (judgement == 0)
1020 {
1021 if (heuristics++ < 2) /* XXX parametrize this number? */
1022 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1023 else
1024 judgement = -1;
1025 }
1026 }
6de9cd9a
DN
1027
1028 }
1029 break;
1030
1031 case mode_violate:
1032 judgement = -1;
1033 break;
1034 }
1035
1036 if (__mf_opts.collect_stats)
1037 {
1038 __mf_count_check ++;
fb925a51 1039
6de9cd9a 1040 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
cfbd22d7 1041 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
fb925a51 1042 __mf_lookup_cache_reusecount [entry_idx] ++;
6de9cd9a 1043 }
fb925a51 1044
6de9cd9a
DN
1045 if (UNLIKELY (judgement < 0))
1046 __mf_violation (ptr, sz,
cfbd22d7 1047 (uintptr_t) __builtin_return_address (0), location,
fb925a51 1048 ((judgement == -1) ?
cfbd22d7
FCE
1049 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1050 __MF_VIOL_WATCH));
6de9cd9a
DN
1051}
1052
1053
cfbd22d7 1054static __mf_object_t *
fb925a51 1055__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
cfbd22d7 1056 const char *name, uintptr_t pc)
6de9cd9a
DN
1057{
1058 DECLARE (void *, calloc, size_t c, size_t n);
1059
cfbd22d7
FCE
1060 __mf_object_t *new_obj;
1061 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1062 new_obj->low = low;
1063 new_obj->high = high;
1064 new_obj->type = type;
1065 new_obj->name = name;
1066 new_obj->alloc_pc = pc;
6de9cd9a 1067#if HAVE_GETTIMEOFDAY
a082fc7a
FCE
1068 if (__mf_opts.timestamps)
1069 gettimeofday (& new_obj->alloc_time, NULL);
6de9cd9a
DN
1070#endif
1071#if LIBMUDFLAPTH
cfbd22d7 1072 new_obj->alloc_thread = pthread_self ();
6de9cd9a
DN
1073#endif
1074
1075 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
fb925a51 1076 new_obj->alloc_backtrace_size =
cfbd22d7
FCE
1077 __mf_backtrace (& new_obj->alloc_backtrace,
1078 (void *) pc, 2);
fb925a51 1079
6de9cd9a
DN
1080 __mf_link_object (new_obj);
1081 return new_obj;
1082}
1083
1084
fb925a51 1085static void
6de9cd9a
DN
1086__mf_uncache_object (__mf_object_t *old_obj)
1087{
1088 /* Remove any low/high pointers for this object from the lookup cache. */
fb925a51 1089
6de9cd9a
DN
1090 /* Can it possibly exist in the cache? */
1091 if (LIKELY (old_obj->read_count + old_obj->write_count))
1092 {
1093 uintptr_t low = old_obj->low;
1094 uintptr_t high = old_obj->high;
84174531 1095 struct __mf_cache *entry;
6de9cd9a 1096 unsigned i;
84174531
FCE
1097 if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1098 {
1099 /* For large objects (>= cache size - 1) check the whole cache. */
1100 entry = & __mf_lookup_cache [0];
1101 for (i = 0; i <= __mf_lc_mask; i++, entry++)
cfbd22d7 1102 {
84174531
FCE
1103 /* NB: the "||" in the following test permits this code to
1104 tolerate the situation introduced by __mf_check over
1105 contiguous objects, where a cache entry spans several
1106 objects. */
1107 if (entry->low == low || entry->high == high)
1108 {
1109 entry->low = MAXPTR;
1110 entry->high = MINPTR;
1111 }
cfbd22d7
FCE
1112 }
1113 }
84174531
FCE
1114 else
1115 {
1116 /* Object is now smaller then cache size. */
1117 unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1118 unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1119 if (entry_low_idx <= entry_high_idx)
1120 {
1121 entry = & __mf_lookup_cache [entry_low_idx];
1122 for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1123 {
1124 /* NB: the "||" in the following test permits this code to
1125 tolerate the situation introduced by __mf_check over
1126 contiguous objects, where a cache entry spans several
1127 objects. */
1128 if (entry->low == low || entry->high == high)
1129 {
1130 entry->low = MAXPTR;
1131 entry->high = MINPTR;
1132 }
1133 }
1134 }
1135 else
1136 {
1137 /* Object wrapped around the end of the cache. First search
1138 from low to end of cache and then from 0 to high. */
1139 entry = & __mf_lookup_cache [entry_low_idx];
1140 for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1141 {
1142 /* NB: the "||" in the following test permits this code to
1143 tolerate the situation introduced by __mf_check over
1144 contiguous objects, where a cache entry spans several
1145 objects. */
1146 if (entry->low == low || entry->high == high)
1147 {
1148 entry->low = MAXPTR;
1149 entry->high = MINPTR;
1150 }
1151 }
1152 entry = & __mf_lookup_cache [0];
1153 for (i = 0; i <= entry_high_idx; i++, entry++)
1154 {
1155 /* NB: the "||" in the following test permits this code to
1156 tolerate the situation introduced by __mf_check over
1157 contiguous objects, where a cache entry spans several
1158 objects. */
1159 if (entry->low == low || entry->high == high)
1160 {
1161 entry->low = MAXPTR;
1162 entry->high = MINPTR;
1163 }
1164 }
1165 }
1166 }
6de9cd9a
DN
1167 }
1168}
1169
1170
1171void
1172__mf_register (void *ptr, size_t sz, int type, const char *name)
1173{
1174 LOCKTH ();
1175 BEGIN_RECURSION_PROTECT ();
1176 __mfu_register (ptr, sz, type, name);
1177 END_RECURSION_PROTECT ();
1178 UNLOCKTH ();
1179}
1180
1181
1182void
1183__mfu_register (void *ptr, size_t sz, int type, const char *name)
1184{
fb925a51 1185 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
cfbd22d7 1186 ptr, (unsigned long) sz, type, name ? name : "");
6de9cd9a
DN
1187
1188 if (__mf_opts.collect_stats)
1189 {
1190 __mf_count_register ++;
1191 __mf_total_register_size [(type < 0) ? 0 :
fb925a51 1192 (type > __MF_TYPE_MAX) ? 0 :
cfbd22d7 1193 type] += sz;
6de9cd9a
DN
1194 }
1195
1196 if (UNLIKELY (__mf_opts.sigusr1_report))
1197 __mf_sigusr1_respond ();
1198
1199 switch (__mf_opts.mudflap_mode)
1200 {
1201 case mode_nop:
1202 break;
fb925a51 1203
6de9cd9a
DN
1204 case mode_violate:
1205 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
cfbd22d7 1206 __MF_VIOL_REGISTER);
6de9cd9a
DN
1207 break;
1208
1209 case mode_populate:
1210 /* Clear the cache. */
1211 /* XXX: why the entire cache? */
1212 /* XXX: race */
1213 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1214 /* void slot 0 */
1215 __mf_lookup_cache[0].low = MAXPTR;
1216 break;
1217
1218 case mode_check:
1219 {
cfbd22d7
FCE
1220 __mf_object_t *ovr_objs [1];
1221 unsigned num_overlapping_objs;
1222 uintptr_t low = (uintptr_t) ptr;
1223 uintptr_t high = CLAMPSZ (ptr, sz);
1224 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
fb925a51 1225
cfbd22d7
FCE
1226 /* Treat unknown size indication as 1. */
1227 if (UNLIKELY (sz == 0)) sz = 1;
1228
1229 /* Look for objects only of the same type. This will e.g. permit a registration
1230 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1231 __mf_check time however harmful overlaps will be detected. */
1232 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1233
1234 /* Handle overlaps. */
1235 if (UNLIKELY (num_overlapping_objs > 0))
1236 {
1237 __mf_object_t *ovr_obj = ovr_objs[0];
fb925a51 1238
cfbd22d7
FCE
1239 /* Accept certain specific duplication pairs. */
1240 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1241 && ovr_obj->low == low
1242 && ovr_obj->high == high
1243 && ovr_obj->type == type)
1244 {
1245 /* Duplicate registration for static objects may come
1246 from distinct compilation units. */
fb925a51
MS
1247 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1248 (void *) low, (void *) high,
cfbd22d7
FCE
1249 (ovr_obj->name ? ovr_obj->name : ""));
1250 break;
1251 }
1252
1253 /* Alas, a genuine violation. */
1254 else
1255 {
1256 /* Two or more *real* mappings here. */
1257 __mf_violation ((void *) ptr, sz,
1258 (uintptr_t) __builtin_return_address (0), NULL,
1259 __MF_VIOL_REGISTER);
1260 }
1261 }
1262 else /* No overlapping objects: AOK. */
1263 __mf_insert_new_object (low, high, type, name, pc);
fb925a51 1264
cfbd22d7
FCE
1265 /* We could conceivably call __mf_check() here to prime the cache,
1266 but then the read_count/write_count field is not reliable. */
1267 break;
6de9cd9a
DN
1268 }
1269 } /* end switch (__mf_opts.mudflap_mode) */
1270}
1271
1272
1273void
cfbd22d7 1274__mf_unregister (void *ptr, size_t sz, int type)
6de9cd9a
DN
1275{
1276 LOCKTH ();
1277 BEGIN_RECURSION_PROTECT ();
cfbd22d7 1278 __mfu_unregister (ptr, sz, type);
6de9cd9a
DN
1279 END_RECURSION_PROTECT ();
1280 UNLOCKTH ();
1281}
1282
1283
1284void
cfbd22d7 1285__mfu_unregister (void *ptr, size_t sz, int type)
6de9cd9a
DN
1286{
1287 DECLARE (void, free, void *ptr);
1288
1289 if (UNLIKELY (__mf_opts.sigusr1_report))
cfbd22d7 1290 __mf_sigusr1_respond ();
6de9cd9a 1291
cfbd22d7 1292 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
6de9cd9a
DN
1293
1294 switch (__mf_opts.mudflap_mode)
fb925a51 1295 {
6de9cd9a
DN
1296 case mode_nop:
1297 break;
1298
1299 case mode_violate:
1300 __mf_violation (ptr, sz,
cfbd22d7
FCE
1301 (uintptr_t) __builtin_return_address (0), NULL,
1302 __MF_VIOL_UNREGISTER);
6de9cd9a
DN
1303 break;
1304
1305 case mode_populate:
1306 /* Clear the cache. */
1307 /* XXX: race */
1308 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1309 /* void slot 0 */
1310 __mf_lookup_cache[0].low = MAXPTR;
1311 break;
1312
1313 case mode_check:
1314 {
cfbd22d7
FCE
1315 __mf_object_t *old_obj = NULL;
1316 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1317 __mf_object_t *objs[1] = {NULL};
1318 unsigned num_overlapping_objs;
1319
1320 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1321 CLAMPSZ (ptr, sz), objs, 1, type);
1322
1323 /* Special case for HEAP_I - see free & realloc hook. They don't
1324 know whether the input region was HEAP or HEAP_I before
1325 unmapping it. Here we give HEAP a try in case HEAP_I
1326 failed. */
1327 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1328 {
1329 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1330 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1331 }
1332
1333 old_obj = objs[0];
1334 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1335 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1336 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1337 {
1338 __mf_violation (ptr, sz,
1339 (uintptr_t) __builtin_return_address (0), NULL,
1340 __MF_VIOL_UNREGISTER);
1341 break;
1342 }
1343
1344 __mf_unlink_object (old_obj);
1345 __mf_uncache_object (old_obj);
1346
1347 /* Wipe buffer contents if desired. */
1348 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
fb925a51 1349 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
cfbd22d7
FCE
1350 || old_obj->type == __MF_TYPE_HEAP_I)))
1351 {
1352 memset ((void *) old_obj->low,
1353 0,
1354 (size_t) (old_obj->high - old_obj->low + 1));
1355 }
fb925a51 1356
cfbd22d7 1357 /* Manage the object cemetary. */
58dc8547
AM
1358 if (__mf_opts.persistent_count > 0
1359 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
cfbd22d7
FCE
1360 {
1361 old_obj->deallocated_p = 1;
1362 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
6de9cd9a 1363#if HAVE_GETTIMEOFDAY
a082fc7a
FCE
1364 if (__mf_opts.timestamps)
1365 gettimeofday (& old_obj->dealloc_time, NULL);
6de9cd9a
DN
1366#endif
1367#ifdef LIBMUDFLAPTH
cfbd22d7 1368 old_obj->dealloc_thread = pthread_self ();
6de9cd9a
DN
1369#endif
1370
cfbd22d7 1371 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
fb925a51 1372 old_obj->dealloc_backtrace_size =
cfbd22d7
FCE
1373 __mf_backtrace (& old_obj->dealloc_backtrace,
1374 NULL, 2);
1375
1376 /* Encourage this object to be displayed again in current epoch. */
1377 old_obj->description_epoch --;
1378
1379 /* Put this object into the cemetary. This may require this plot to
1380 be recycled, and the previous resident to be designated del_obj. */
1381 {
1382 unsigned row = old_obj->type;
1383 unsigned plot = __mf_object_dead_head [row];
fb925a51 1384
cfbd22d7
FCE
1385 del_obj = __mf_object_cemetary [row][plot];
1386 __mf_object_cemetary [row][plot] = old_obj;
1387 plot ++;
1388 if (plot == __mf_opts.persistent_count) plot = 0;
1389 __mf_object_dead_head [row] = plot;
1390 }
1391 }
1392 else
1393 del_obj = old_obj;
fb925a51 1394
cfbd22d7
FCE
1395 if (__mf_opts.print_leaks)
1396 {
1397 if ((old_obj->read_count + old_obj->write_count) == 0 &&
fb925a51 1398 (old_obj->type == __MF_TYPE_HEAP
cfbd22d7
FCE
1399 || old_obj->type == __MF_TYPE_HEAP_I))
1400 {
a548d7b7
FCE
1401 /* The problem with a warning message here is that we may not
1402 be privy to accesses to such objects that occur within
1403 uninstrumented libraries. */
1404#if 0
fb925a51 1405 fprintf (stderr,
cfbd22d7
FCE
1406 "*******\n"
1407 "mudflap warning: unaccessed registered object:\n");
1408 __mf_describe_object (old_obj);
a548d7b7 1409#endif
cfbd22d7
FCE
1410 }
1411 }
fb925a51 1412
cfbd22d7
FCE
1413 if (del_obj != NULL) /* May or may not equal old_obj. */
1414 {
1415 if (__mf_opts.backtrace > 0)
1416 {
1417 CALL_REAL(free, del_obj->alloc_backtrace);
1418 if (__mf_opts.persistent_count > 0)
1419 {
1420 CALL_REAL(free, del_obj->dealloc_backtrace);
1421 }
1422 }
1423 CALL_REAL(free, del_obj);
1424 }
fb925a51 1425
cfbd22d7 1426 break;
6de9cd9a
DN
1427 }
1428 } /* end switch (__mf_opts.mudflap_mode) */
1429
1430
1431 if (__mf_opts.collect_stats)
1432 {
1433 __mf_count_unregister ++;
1434 __mf_total_unregister_size += sz;
1435 }
1436}
1437
6de9cd9a
DN
1438
1439
1440struct tree_stats
1441{
1442 unsigned obj_count;
1443 unsigned long total_size;
1444 unsigned live_obj_count;
1445 double total_weight;
1446 double weighted_size;
1447 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1448};
1449
1450
6de9cd9a 1451
cfbd22d7 1452static int
fc5515a8 1453__mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
cfbd22d7
FCE
1454{
1455 __mf_object_t *obj = (__mf_object_t *) n->value;
1456 struct tree_stats *s = (struct tree_stats *) param;
6de9cd9a 1457
cfbd22d7 1458 assert (obj != NULL && s != NULL);
fb925a51 1459
6de9cd9a 1460 /* Exclude never-accessed objects. */
cfbd22d7 1461 if (obj->read_count + obj->write_count)
6de9cd9a
DN
1462 {
1463 s->obj_count ++;
cfbd22d7
FCE
1464 s->total_size += (obj->high - obj->low + 1);
1465
1466 if (obj->liveness)
1467 {
1468 unsigned i;
1469 uintptr_t addr;
1470
1471 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1472 (void *) obj->low, obj->liveness, obj->name); */
1473
1474 s->live_obj_count ++;
1475 s->total_weight += (double) obj->liveness;
1476 s->weighted_size +=
1477 (double) (obj->high - obj->low + 1) *
1478 (double) obj->liveness;
1479
1480 addr = obj->low;
1481 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1482 {
1483 unsigned bit = addr & 1;
1484 s->weighted_address_bits[i][bit] += obj->liveness;
1485 addr = addr >> 1;
1486 }
1487
1488 /* Age the liveness value. */
1489 obj->liveness >>= 1;
1490 }
6de9cd9a
DN
1491 }
1492
cfbd22d7 1493 return 0;
6de9cd9a
DN
1494}
1495
1496
1497static void
1498__mf_adapt_cache ()
1499{
1500 struct tree_stats s;
1501 uintptr_t new_mask = 0;
1502 unsigned char new_shift;
1503 float cache_utilization;
1504 float max_value;
1505 static float smoothed_new_shift = -1.0;
1506 unsigned i;
1507
1508 memset (&s, 0, sizeof (s));
cfbd22d7 1509
fc5515a8
FCE
1510 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1511 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1512 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1513 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1514 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
6de9cd9a
DN
1515
1516 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1517 empty tree. Just leave the cache alone in such cases, rather
1518 than risk dying by division-by-zero. */
1519 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1520 return;
1521
1522 /* Guess a good value for the shift parameter by finding an address bit that is a
1523 good discriminant of lively objects. */
1524 max_value = 0.0;
1525 for (i=0; i<sizeof (uintptr_t)*8; i++)
1526 {
1527 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1528 if (max_value < value) max_value = value;
1529 }
1530 for (i=0; i<sizeof (uintptr_t)*8; i++)
1531 {
1532 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1533 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1534 if (value >= max_value * shoulder_factor)
cfbd22d7 1535 break;
6de9cd9a
DN
1536 }
1537 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
fb925a51 1538 /* Converge toward this slowly to reduce flapping. */
6de9cd9a
DN
1539 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1540 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1541 assert (new_shift < sizeof (uintptr_t)*8);
1542
1543 /* Count number of used buckets. */
1544 cache_utilization = 0.0;
1545 for (i = 0; i < (1 + __mf_lc_mask); i++)
1546 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1547 cache_utilization += 1.0;
1548 cache_utilization /= (1 + __mf_lc_mask);
1549
ddfabf89 1550 new_mask |= 0xffff; /* XXX: force a large cache. */
6de9cd9a
DN
1551 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1552
1553 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
cfbd22d7
FCE
1554 "util=%u%% m=%p s=%u\n",
1555 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1556 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
6de9cd9a
DN
1557
1558 /* We should reinitialize cache if its parameters have changed. */
1559 if (new_mask != __mf_lc_mask ||
1560 new_shift != __mf_lc_shift)
1561 {
1562 __mf_lc_mask = new_mask;
1563 __mf_lc_shift = new_shift;
1564 /* XXX: race */
1565 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1566 /* void slot 0 */
1567 __mf_lookup_cache[0].low = MAXPTR;
1568 }
1569}
1570
1571
1572
6de9cd9a
DN
1573/* __mf_find_object[s] */
1574
1575/* Find overlapping live objecs between [low,high]. Return up to
1576 max_objs of their pointers in objs[]. Return total count of
1577 overlaps (may exceed max_objs). */
1578
fb925a51
MS
1579unsigned
1580__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
cfbd22d7 1581 __mf_object_t **objs, unsigned max_objs, int type)
6de9cd9a 1582{
cfbd22d7 1583 unsigned count = 0;
fc5515a8
FCE
1584 mfsplay_tree t = __mf_object_tree (type);
1585 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
cfbd22d7 1586 int direction;
6de9cd9a 1587
fc5515a8 1588 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
cfbd22d7
FCE
1589 /* An exact match for base address implies a hit. */
1590 if (n != NULL)
6de9cd9a 1591 {
cfbd22d7
FCE
1592 if (count < max_objs)
1593 objs[count] = (__mf_object_t *) n->value;
6de9cd9a 1594 count ++;
6de9cd9a
DN
1595 }
1596
cfbd22d7
FCE
1597 /* Iterate left then right near this key value to find all overlapping objects. */
1598 for (direction = 0; direction < 2; direction ++)
6de9cd9a 1599 {
cfbd22d7 1600 /* Reset search origin. */
fc5515a8 1601 k = (mfsplay_tree_key) ptr_low;
cfbd22d7
FCE
1602
1603 while (1)
1604 {
1605 __mf_object_t *obj;
fb925a51 1606
fc5515a8 1607 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
cfbd22d7
FCE
1608 if (n == NULL) break;
1609 obj = (__mf_object_t *) n->value;
fb925a51 1610
cfbd22d7
FCE
1611 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1612 break;
fb925a51 1613
cfbd22d7
FCE
1614 if (count < max_objs)
1615 objs[count] = (__mf_object_t *) n->value;
1616 count ++;
1617
fc5515a8 1618 k = (mfsplay_tree_key) obj->low;
cfbd22d7 1619 }
6de9cd9a
DN
1620 }
1621
1622 return count;
1623}
1624
1625
1626unsigned
1627__mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
cfbd22d7 1628 __mf_object_t **objs, unsigned max_objs)
6de9cd9a 1629{
cfbd22d7
FCE
1630 int type;
1631 unsigned count = 0;
6de9cd9a 1632
cfbd22d7
FCE
1633 /* Search each splay tree for overlaps. */
1634 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
6de9cd9a 1635 {
cfbd22d7
FCE
1636 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1637 if (c > max_objs)
1638 {
1639 max_objs = 0;
1640 objs = NULL;
1641 }
1642 else /* NB: C may equal 0 */
1643 {
1644 max_objs -= c;
1645 objs += c;
1646 }
1647 count += c;
6de9cd9a
DN
1648 }
1649
cfbd22d7 1650 return count;
6de9cd9a
DN
1651}
1652
1653
6de9cd9a 1654
cfbd22d7 1655/* __mf_link_object */
6de9cd9a
DN
1656
1657static void
cfbd22d7 1658__mf_link_object (__mf_object_t *node)
6de9cd9a 1659{
fc5515a8
FCE
1660 mfsplay_tree t = __mf_object_tree (node->type);
1661 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
6de9cd9a
DN
1662}
1663
cfbd22d7
FCE
1664/* __mf_unlink_object */
1665
6de9cd9a 1666static void
cfbd22d7 1667__mf_unlink_object (__mf_object_t *node)
6de9cd9a 1668{
fc5515a8
FCE
1669 mfsplay_tree t = __mf_object_tree (node->type);
1670 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
6de9cd9a
DN
1671}
1672
1673/* __mf_find_dead_objects */
1674
1675/* Find overlapping dead objecs between [low,high]. Return up to
1676 max_objs of their pointers in objs[]. Return total count of
1677 overlaps (may exceed max_objs). */
1678
1679static unsigned
1680__mf_find_dead_objects (uintptr_t low, uintptr_t high,
cfbd22d7 1681 __mf_object_t **objs, unsigned max_objs)
6de9cd9a
DN
1682{
1683 if (__mf_opts.persistent_count > 0)
1684 {
1685 unsigned count = 0;
1686 unsigned recollection = 0;
1687 unsigned row = 0;
fb925a51 1688
6de9cd9a
DN
1689 assert (low <= high);
1690 assert (max_objs == 0 || objs != NULL);
fb925a51 1691
6de9cd9a 1692 /* Widen the search from the most recent plots in each row, looking
cfbd22d7 1693 backward in time. */
6de9cd9a
DN
1694 recollection = 0;
1695 while (recollection < __mf_opts.persistent_count)
cfbd22d7
FCE
1696 {
1697 count = 0;
fb925a51 1698
cfbd22d7
FCE
1699 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1700 {
1701 unsigned plot;
1702 unsigned i;
fb925a51 1703
cfbd22d7
FCE
1704 plot = __mf_object_dead_head [row];
1705 for (i = 0; i <= recollection; i ++)
1706 {
1707 __mf_object_t *obj;
fb925a51 1708
cfbd22d7
FCE
1709 /* Look backward through row: it's a circular buffer. */
1710 if (plot > 0) plot --;
1711 else plot = __mf_opts.persistent_count - 1;
fb925a51 1712
cfbd22d7
FCE
1713 obj = __mf_object_cemetary [row][plot];
1714 if (obj && obj->low <= high && obj->high >= low)
1715 {
1716 /* Found an overlapping dead object! */
1717 if (count < max_objs)
1718 objs [count] = obj;
1719 count ++;
1720 }
1721 }
1722 }
fb925a51 1723
cfbd22d7
FCE
1724 if (count)
1725 break;
fb925a51 1726
cfbd22d7
FCE
1727 /* Look farther back in time. */
1728 recollection = (recollection * 2) + 1;
1729 }
fb925a51 1730
6de9cd9a
DN
1731 return count;
1732 } else {
1733 return 0;
1734 }
1735}
1736
1737/* __mf_describe_object */
1738
1739static void
1740__mf_describe_object (__mf_object_t *obj)
1741{
1742 static unsigned epoch = 0;
1743 if (obj == NULL)
1744 {
1745 epoch ++;
1746 return;
1747 }
1748
1749 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1750 {
1751 fprintf (stderr,
66a5d3b1
FCE
1752 "mudflap %sobject %p: name=`%s'\n",
1753 (obj->deallocated_p ? "dead " : ""),
cfbd22d7 1754 (void *) obj, (obj->name ? obj->name : ""));
6de9cd9a
DN
1755 return;
1756 }
1757 else
1758 obj->description_epoch = epoch;
1759
1760 fprintf (stderr,
66a5d3b1 1761 "mudflap %sobject %p: name=`%s'\n"
cfbd22d7
FCE
1762 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1763 "alloc time=%lu.%06lu pc=%p"
6de9cd9a 1764#ifdef LIBMUDFLAPTH
cfbd22d7 1765 " thread=%u"
6de9cd9a 1766#endif
cfbd22d7 1767 "\n",
66a5d3b1 1768 (obj->deallocated_p ? "dead " : ""),
fb925a51 1769 (void *) obj, (obj->name ? obj->name : ""),
cfbd22d7
FCE
1770 (void *) obj->low, (void *) obj->high,
1771 (unsigned long) (obj->high - obj->low + 1),
1772 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1773 obj->type == __MF_TYPE_HEAP ? "heap" :
1774 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1775 obj->type == __MF_TYPE_STACK ? "stack" :
1776 obj->type == __MF_TYPE_STATIC ? "static" :
1777 obj->type == __MF_TYPE_GUESS ? "guess" :
1778 "unknown"),
fb925a51 1779 obj->read_count, obj->write_count, obj->liveness,
cfbd22d7 1780 obj->watching_p ? " watching" : "",
fb925a51 1781 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
cfbd22d7 1782 (void *) obj->alloc_pc
6de9cd9a 1783#ifdef LIBMUDFLAPTH
cfbd22d7 1784 , (unsigned) obj->alloc_thread
6de9cd9a 1785#endif
cfbd22d7 1786 );
6de9cd9a
DN
1787
1788 if (__mf_opts.backtrace > 0)
1789 {
1790 unsigned i;
1791 for (i=0; i<obj->alloc_backtrace_size; i++)
1792 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1793 }
1794
1795 if (__mf_opts.persistent_count > 0)
1796 {
1797 if (obj->deallocated_p)
cfbd22d7
FCE
1798 {
1799 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
6de9cd9a 1800#ifdef LIBMUDFLAPTH
cfbd22d7 1801 " thread=%u"
6de9cd9a 1802#endif
cfbd22d7 1803 "\n",
fb925a51 1804 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
cfbd22d7 1805 (void *) obj->dealloc_pc
6de9cd9a 1806#ifdef LIBMUDFLAPTH
cfbd22d7 1807 , (unsigned) obj->dealloc_thread
6de9cd9a 1808#endif
cfbd22d7 1809 );
6de9cd9a
DN
1810
1811
cfbd22d7
FCE
1812 if (__mf_opts.backtrace > 0)
1813 {
1814 unsigned i;
1815 for (i=0; i<obj->dealloc_backtrace_size; i++)
1816 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1817 }
1818 }
6de9cd9a
DN
1819 }
1820}
1821
cfbd22d7
FCE
1822
1823static int
fc5515a8 1824__mf_report_leaks_fn (mfsplay_tree_node n, void *param)
6de9cd9a 1825{
cfbd22d7
FCE
1826 __mf_object_t *node = (__mf_object_t *) n->value;
1827 unsigned *count = (unsigned *) param;
6de9cd9a 1828
cfbd22d7
FCE
1829 if (count != NULL)
1830 (*count) ++;
6de9cd9a 1831
cfbd22d7
FCE
1832 fprintf (stderr, "Leaked object %u:\n", (*count));
1833 __mf_describe_object (node);
1834
1835 return 0;
1836}
1837
1838
1839static unsigned
1840__mf_report_leaks ()
1841{
1842 unsigned count = 0;
1843
fc5515a8 1844 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
cfbd22d7 1845 __mf_report_leaks_fn, & count);
fc5515a8 1846 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
cfbd22d7 1847 __mf_report_leaks_fn, & count);
6de9cd9a
DN
1848
1849 return count;
1850}
1851
1852/* ------------------------------------------------------------------------ */
1853/* __mf_report */
1854
1855void
1856__mf_report ()
1857{
1858 LOCKTH ();
1859 BEGIN_RECURSION_PROTECT ();
1860 __mfu_report ();
1861 END_RECURSION_PROTECT ();
1862 UNLOCKTH ();
1863}
1864
1865void
1866__mfu_report ()
1867{
1868 if (__mf_opts.collect_stats)
1869 {
1870 fprintf (stderr,
cfbd22d7
FCE
1871 "*******\n"
1872 "mudflap stats:\n"
1873 "calls to __mf_check: %lu\n"
1874 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1875 " __mf_unregister: %lu [%luB]\n"
1876 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1877 __mf_count_check,
1878 __mf_count_register,
1879 __mf_total_register_size[0], __mf_total_register_size[1],
1880 __mf_total_register_size[2], __mf_total_register_size[3],
1881 __mf_total_register_size[4], /* XXX */
1882 __mf_count_unregister, __mf_total_unregister_size,
1883 __mf_count_violation[0], __mf_count_violation[1],
1884 __mf_count_violation[2], __mf_count_violation[3],
1885 __mf_count_violation[4]);
6de9cd9a
DN
1886
1887 fprintf (stderr,
cfbd22d7 1888 "calls with reentrancy: %lu\n", __mf_reentrancy);
6de9cd9a
DN
1889#ifdef LIBMUDFLAPTH
1890 fprintf (stderr,
cfbd22d7 1891 " lock contention: %lu\n", __mf_lock_contention);
6de9cd9a
DN
1892#endif
1893
1894 /* Lookup cache stats. */
1895 {
cfbd22d7
FCE
1896 unsigned i;
1897 unsigned max_reuse = 0;
1898 unsigned num_used = 0;
1899 unsigned num_unused = 0;
1900
1901 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1902 {
1903 if (__mf_lookup_cache_reusecount[i])
1904 num_used ++;
1905 else
1906 num_unused ++;
1907 if (max_reuse < __mf_lookup_cache_reusecount[i])
1908 max_reuse = __mf_lookup_cache_reusecount[i];
1909 }
1910 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1911 num_used, num_unused, max_reuse);
6de9cd9a
DN
1912 }
1913
1914 {
cfbd22d7
FCE
1915 unsigned live_count;
1916 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1917 fprintf (stderr, "number of live objects: %u\n", live_count);
6de9cd9a
DN
1918 }
1919
1920 if (__mf_opts.persistent_count > 0)
cfbd22d7
FCE
1921 {
1922 unsigned dead_count = 0;
1923 unsigned row, plot;
1924 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1925 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1926 if (__mf_object_cemetary [row][plot] != 0)
1927 dead_count ++;
1928 fprintf (stderr, " zombie objects: %u\n", dead_count);
1929 }
6de9cd9a
DN
1930 }
1931 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1932 {
1933 unsigned l;
1934 extern void * __mf_wrap_alloca_indirect (size_t c);
1935
1936 /* Free up any remaining alloca()'d blocks. */
1937 __mf_wrap_alloca_indirect (0);
a548d7b7
FCE
1938#ifdef HAVE___LIBC_FREERES
1939 if (__mf_opts.call_libc_freeres)
1940 {
1941 extern void __libc_freeres (void);
1942 __libc_freeres ();
1943 }
1944#endif
1945
6de9cd9a 1946 __mf_describe_object (NULL); /* Reset description epoch. */
cfbd22d7 1947 l = __mf_report_leaks ();
6de9cd9a
DN
1948 fprintf (stderr, "number of leaked objects: %u\n", l);
1949 }
1950}
1951
1952/* __mf_backtrace */
1953
1954size_t
1955__mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1956{
1957 void ** pc_array;
1958 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1959 unsigned remaining_size;
1960 unsigned omitted_size = 0;
1961 unsigned i;
1962 DECLARE (void, free, void *ptr);
1963 DECLARE (void *, calloc, size_t c, size_t n);
1964 DECLARE (void *, malloc, size_t n);
1965
1966 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1967#ifdef HAVE_BACKTRACE
1968 pc_array_size = backtrace (pc_array, pc_array_size);
1969#else
1970#define FETCH(n) do { if (pc_array_size >= n) { \
1971 pc_array[n] = __builtin_return_address(n); \
1972 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1973
1974 /* Unroll some calls __builtin_return_address because this function
1975 only takes a literal integer parameter. */
1976 FETCH (0);
1977#if 0
1978 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1979 rather than simply returning 0. :-( */
1980 FETCH (1);
1981 FETCH (2);
1982 FETCH (3);
1983 FETCH (4);
1984 FETCH (5);
1985 FETCH (6);
1986 FETCH (7);
1987 FETCH (8);
1988 if (pc_array_size > 8) pc_array_size = 9;
1989#else
1990 if (pc_array_size > 0) pc_array_size = 1;
1991#endif
1992
1993#undef FETCH
1994#endif
1995
1996 /* We want to trim the first few levels of the stack traceback,
1997 since they contain libmudflap wrappers and junk. If pc_array[]
1998 ends up containing a non-NULL guess_pc, then trim everything
1999 before that. Otherwise, omit the first guess_omit_levels
2000 entries. */
fb925a51 2001
6de9cd9a
DN
2002 if (guess_pc != NULL)
2003 for (i=0; i<pc_array_size; i++)
2004 if (pc_array [i] == guess_pc)
cfbd22d7 2005 omitted_size = i;
6de9cd9a
DN
2006
2007 if (omitted_size == 0) /* No match? */
2008 if (pc_array_size > guess_omit_levels)
2009 omitted_size = guess_omit_levels;
2010
2011 remaining_size = pc_array_size - omitted_size;
2012
2013#ifdef HAVE_BACKTRACE_SYMBOLS
2014 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2015#else
2016 {
2017 /* Let's construct a buffer by hand. It will have <remaining_size>
2018 char*'s at the front, pointing at individual strings immediately
2019 afterwards. */
2020 void *buffer;
2021 char *chars;
2022 char **pointers;
2023 enum { perline = 30 };
2024 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2025 pointers = (char **) buffer;
2026 chars = (char *)buffer + (remaining_size * sizeof (char *));
2027 for (i = 0; i < remaining_size; i++)
2028 {
cfbd22d7
FCE
2029 pointers[i] = chars;
2030 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2031 chars = chars + perline;
6de9cd9a
DN
2032 }
2033 *symbols = pointers;
2034 }
2035#endif
2036 CALL_REAL (free, pc_array);
2037
2038 return remaining_size;
2039}
2040
2041/* ------------------------------------------------------------------------ */
2042/* __mf_violation */
2043
2044void
fb925a51 2045__mf_violation (void *ptr, size_t sz, uintptr_t pc,
cfbd22d7 2046 const char *location, int type)
6de9cd9a
DN
2047{
2048 char buf [128];
2049 static unsigned violation_number;
2050 DECLARE(void, free, void *ptr);
2051
fb925a51
MS
2052 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2053 (void *) pc,
cfbd22d7 2054 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
6de9cd9a
DN
2055
2056 if (__mf_opts.collect_stats)
2057 __mf_count_violation [(type < 0) ? 0 :
cfbd22d7
FCE
2058 (type > __MF_VIOL_WATCH) ? 0 :
2059 type] ++;
6de9cd9a
DN
2060
2061 /* Print out a basic warning message. */
2062 if (__mf_opts.verbose_violations)
2063 {
2064 unsigned dead_p;
2065 unsigned num_helpful = 0;
a082fc7a 2066 struct timeval now = { 0, 0 };
6de9cd9a
DN
2067#if HAVE_GETTIMEOFDAY
2068 gettimeofday (& now, NULL);
2069#endif
2070
2071 violation_number ++;
2072 fprintf (stderr,
cfbd22d7
FCE
2073 "*******\n"
2074 "mudflap violation %u (%s): time=%lu.%06lu "
fb925a51 2075 "ptr=%p size=%lu\npc=%p%s%s%s\n",
cfbd22d7
FCE
2076 violation_number,
2077 ((type == __MF_VIOL_READ) ? "check/read" :
2078 (type == __MF_VIOL_WRITE) ? "check/write" :
2079 (type == __MF_VIOL_REGISTER) ? "register" :
2080 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2081 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
fb925a51 2082 now.tv_sec, now.tv_usec,
cfbd22d7
FCE
2083 (void *) ptr, (unsigned long)sz, (void *) pc,
2084 (location != NULL ? " location=`" : ""),
2085 (location != NULL ? location : ""),
2086 (location != NULL ? "'" : ""));
6de9cd9a
DN
2087
2088 if (__mf_opts.backtrace > 0)
2089 {
cfbd22d7
FCE
2090 char ** symbols;
2091 unsigned i, num;
fb925a51 2092
cfbd22d7
FCE
2093 num = __mf_backtrace (& symbols, (void *) pc, 2);
2094 /* Note: backtrace_symbols calls malloc(). But since we're in
2095 __mf_violation and presumably __mf_check, it'll detect
2096 recursion, and not put the new string into the database. */
fb925a51 2097
cfbd22d7
FCE
2098 for (i=0; i<num; i++)
2099 fprintf (stderr, " %s\n", symbols[i]);
fb925a51 2100
cfbd22d7
FCE
2101 /* Calling free() here would trigger a violation. */
2102 CALL_REAL(free, symbols);
6de9cd9a 2103 }
fb925a51
MS
2104
2105
6de9cd9a
DN
2106 /* Look for nearby objects. For this, we start with s_low/s_high
2107 pointing to the given area, looking for overlapping objects.
2108 If none show up, widen the search area and keep looking. */
fb925a51 2109
6de9cd9a 2110 if (sz == 0) sz = 1;
fb925a51 2111
6de9cd9a
DN
2112 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2113 {
cfbd22d7
FCE
2114 enum {max_objs = 3}; /* magic */
2115 __mf_object_t *objs[max_objs];
2116 unsigned num_objs = 0;
2117 uintptr_t s_low, s_high;
2118 unsigned tries = 0;
2119 unsigned i;
fb925a51 2120
cfbd22d7
FCE
2121 s_low = (uintptr_t) ptr;
2122 s_high = CLAMPSZ (ptr, sz);
2123
2124 while (tries < 16) /* magic */
2125 {
2126 if (dead_p)
2127 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2128 else
2129 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2130
2131 if (num_objs) /* good enough */
2132 break;
2133
2134 tries ++;
2135
2136 /* XXX: tune this search strategy. It's too dependent on
2137 sz, which can vary from 1 to very big (when array index
2138 checking) numbers. */
2139 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2140 s_high = CLAMPADD (s_high, (sz * tries * tries));
2141 }
2142
2143 for (i = 0; i < min (num_objs, max_objs); i++)
2144 {
2145 __mf_object_t *obj = objs[i];
2146 uintptr_t low = (uintptr_t) ptr;
2147 uintptr_t high = CLAMPSZ (ptr, sz);
2148 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2149 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2150 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2151 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2152 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2153 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2154
2155 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2156 num_helpful + i + 1,
2157 (before1 ? before1 : after1 ? after1 : into1),
2158 (before1 ? "before" : after1 ? "after" : "into"),
2159 (before2 ? before2 : after2 ? after2 : into2),
2160 (before2 ? "before" : after2 ? "after" : "into"));
2161 __mf_describe_object (obj);
2162 }
2163 num_helpful += num_objs;
6de9cd9a
DN
2164 }
2165
2166 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2167 }
2168
2169 /* How to finally handle this violation? */
2170 switch (__mf_opts.violation_mode)
2171 {
2172 case viol_nop:
2173 break;
2174 case viol_segv:
2175 kill (getpid(), SIGSEGV);
2176 break;
2177 case viol_abort:
2178 abort ();
2179 break;
2180 case viol_gdb:
7954e85c
FCE
2181
2182 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
6de9cd9a
DN
2183 system (buf);
2184 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2185 instead, and let the forked child execlp() gdb. That way, this
2186 subject process can be resumed under the supervision of gdb.
2187 This can't happen now, since system() only returns when gdb
2188 dies. In that case, we need to beware of starting a second
2189 concurrent gdb child upon the next violation. (But if the first
2190 gdb dies, then starting a new one is appropriate.) */
2191 break;
2192 }
2193}
2194
2195/* ------------------------------------------------------------------------ */
2196
2197
2198unsigned __mf_watch (void *ptr, size_t sz)
2199{
2200 unsigned rc;
2201 LOCKTH ();
2202 BEGIN_RECURSION_PROTECT ();
2203 rc = __mf_watch_or_not (ptr, sz, 1);
2204 END_RECURSION_PROTECT ();
2205 UNLOCKTH ();
2206 return rc;
2207}
2208
2209unsigned __mf_unwatch (void *ptr, size_t sz)
2210{
2211 unsigned rc;
2212 LOCKTH ();
2213 rc = __mf_watch_or_not (ptr, sz, 0);
2214 UNLOCKTH ();
2215 return rc;
2216}
2217
2218
2219static unsigned
2220__mf_watch_or_not (void *ptr, size_t sz, char flag)
2221{
2222 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2223 uintptr_t ptr_low = (uintptr_t) ptr;
2224 unsigned count = 0;
2225
2226 TRACE ("%s ptr=%p size=%lu\n",
cfbd22d7 2227 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
fb925a51 2228
6de9cd9a
DN
2229 switch (__mf_opts.mudflap_mode)
2230 {
2231 case mode_nop:
2232 case mode_populate:
2233 case mode_violate:
2234 count = 0;
2235 break;
2236
2237 case mode_check:
2238 {
cfbd22d7
FCE
2239 __mf_object_t **all_ovr_objs;
2240 unsigned obj_count;
2241 unsigned n;
2242 DECLARE (void *, malloc, size_t c);
2243 DECLARE (void, free, void *p);
2244
2245 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2246 VERBOSE_TRACE (" %u:", obj_count);
2247
2248 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2249 if (all_ovr_objs == NULL) abort ();
2250 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2251 assert (n == obj_count);
2252
2253 for (n = 0; n < obj_count; n ++)
2254 {
2255 __mf_object_t *obj = all_ovr_objs[n];
2256
2257 VERBOSE_TRACE (" [%p]", (void *) obj);
2258 if (obj->watching_p != flag)
2259 {
2260 obj->watching_p = flag;
2261 count ++;
2262
2263 /* Remove object from cache, to ensure next access
2264 goes through __mf_check(). */
2265 if (flag)
2266 __mf_uncache_object (obj);
2267 }
2268 }
2269 CALL_REAL (free, all_ovr_objs);
6de9cd9a
DN
2270 }
2271 break;
2272 }
2273
2274 return count;
2275}
2276
2277
2278void
2279__mf_sigusr1_handler (int num)
2280{
2281 __mf_sigusr1_received ++;
2282}
2283
2284/* Install or remove SIGUSR1 handler as necessary.
2285 Also, respond to a received pending SIGUSR1. */
2286void
2287__mf_sigusr1_respond ()
2288{
2289 static int handler_installed;
2290
b9d71ce3 2291#ifdef SIGUSR1
6de9cd9a
DN
2292 /* Manage handler */
2293 if (__mf_opts.sigusr1_report && ! handler_installed)
2294 {
2295 signal (SIGUSR1, __mf_sigusr1_handler);
2296 handler_installed = 1;
2297 }
2298 else if(! __mf_opts.sigusr1_report && handler_installed)
2299 {
2300 signal (SIGUSR1, SIG_DFL);
2301 handler_installed = 0;
2302 }
2303#endif
2304
2305 /* Manage enqueued signals */
2306 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2307 {
2308 __mf_sigusr1_handled ++;
7544a87f 2309 assert (__mf_get_state () == reentrant);
6de9cd9a
DN
2310 __mfu_report ();
2311 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2312 }
2313}
2314
2315
2316/* XXX: provide an alternative __assert_fail function that cannot
2317 fail due to libmudflap infinite recursion. */
2318#ifndef NDEBUG
2319
2320static void
2321write_itoa (int fd, unsigned n)
2322{
2323 enum x { bufsize = sizeof(n)*4 };
2324 char buf [bufsize];
2325 unsigned i;
2326
2327 for (i=0; i<bufsize-1; i++)
2328 {
2329 unsigned digit = n % 10;
2330 buf[bufsize-2-i] = digit + '0';
2331 n /= 10;
fb925a51 2332 if (n == 0)
cfbd22d7
FCE
2333 {
2334 char *m = & buf [bufsize-2-i];
2335 buf[bufsize-1] = '\0';
2336 write (fd, m, strlen(m));
2337 break;
2338 }
6de9cd9a
DN
2339 }
2340}
2341
2342
2343void
2344__assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2345{
2346#define write2(string) write (2, (string), strlen ((string)));
2347 write2("mf");
2348#ifdef LIBMUDFLAPTH
2349 write2("(");
fb925a51 2350 write_itoa (2, (unsigned) pthread_self ());
6de9cd9a
DN
2351 write2(")");
2352#endif
2353 write2(": assertion failure: `");
2354 write (2, msg, strlen (msg));
2355 write2("' in ");
2356 write (2, func, strlen (func));
2357 write2(" at ");
2358 write (2, file, strlen (file));
2359 write2(":");
2360 write_itoa (2, line);
2361 write2("\n");
2362#undef write2
2363 abort ();
2364}
2365
2366
2367#endif
cfbd22d7
FCE
2368
2369
2370
fc5515a8
FCE
2371/* Adapted splay tree code, originally from libiberty. It has been
2372 specialized for libmudflap as requested by RMS. */
cfbd22d7
FCE
2373
2374static void
fc5515a8 2375mfsplay_tree_free (void *p)
cfbd22d7
FCE
2376{
2377 DECLARE (void, free, void *p);
2378 CALL_REAL (free, p);
2379}
2380
2381static void *
fc5515a8 2382mfsplay_tree_xmalloc (size_t s)
cfbd22d7
FCE
2383{
2384 DECLARE (void *, malloc, size_t s);
2385 return CALL_REAL (malloc, s);
2386}
2387
fc5515a8
FCE
2388
2389static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2390static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2391 mfsplay_tree_key,
2392 mfsplay_tree_node *,
2393 mfsplay_tree_node *,
2394 mfsplay_tree_node *);
fc5515a8
FCE
2395
2396
2397/* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2398 and grandparent, respectively, of NODE. */
2399
2400static mfsplay_tree_node
2401mfsplay_tree_splay_helper (mfsplay_tree sp,
2402 mfsplay_tree_key key,
2403 mfsplay_tree_node * node,
2404 mfsplay_tree_node * parent,
2405 mfsplay_tree_node * grandparent)
2406{
2407 mfsplay_tree_node *next;
2408 mfsplay_tree_node n;
2409 int comparison;
2410
2411 n = *node;
2412
2413 if (!n)
2414 return *parent;
2415
73c3d568 2416 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
fc5515a8
FCE
2417
2418 if (comparison == 0)
2419 /* We've found the target. */
2420 next = 0;
2421 else if (comparison < 0)
2422 /* The target is to the left. */
2423 next = &n->left;
2424 else
2425 /* The target is to the right. */
2426 next = &n->right;
2427
2428 if (next)
2429 {
2430 /* Check whether our recursion depth is too high. Abort this search,
2431 and signal that a rebalance is required to continue. */
2432 if (sp->depth > sp->max_depth)
2433 {
2434 sp->rebalance_p = 1;
2435 return n;
2436 }
2437
2438 /* Continue down the tree. */
2439 sp->depth ++;
2440 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2441 sp->depth --;
2442
2443 /* The recursive call will change the place to which NODE
2444 points. */
2445 if (*node != n || sp->rebalance_p)
2446 return n;
2447 }
2448
2449 if (!parent)
2450 /* NODE is the root. We are done. */
2451 return n;
2452
2453 /* First, handle the case where there is no grandparent (i.e.,
2454 *PARENT is the root of the tree.) */
2455 if (!grandparent)
2456 {
2457 if (n == (*parent)->left)
2458 {
2459 *node = n->right;
2460 n->right = *parent;
2461 }
2462 else
2463 {
2464 *node = n->left;
2465 n->left = *parent;
2466 }
2467 *parent = n;
2468 return n;
2469 }
2470
2471 /* Next handle the cases where both N and *PARENT are left children,
2472 or where both are right children. */
2473 if (n == (*parent)->left && *parent == (*grandparent)->left)
2474 {
2475 mfsplay_tree_node p = *parent;
2476
2477 (*grandparent)->left = p->right;
2478 p->right = *grandparent;
2479 p->left = n->right;
2480 n->right = p;
2481 *grandparent = n;
2482 return n;
2483 }
2484 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2485 {
2486 mfsplay_tree_node p = *parent;
2487
2488 (*grandparent)->right = p->left;
2489 p->left = *grandparent;
2490 p->right = n->left;
2491 n->left = p;
2492 *grandparent = n;
2493 return n;
2494 }
2495
2496 /* Finally, deal with the case where N is a left child, but *PARENT
2497 is a right child, or vice versa. */
2498 if (n == (*parent)->left)
2499 {
2500 (*parent)->left = n->right;
2501 n->right = *parent;
2502 (*grandparent)->right = n->left;
2503 n->left = *grandparent;
2504 *grandparent = n;
2505 return n;
2506 }
2507 else
2508 {
2509 (*parent)->right = n->left;
2510 n->left = *parent;
2511 (*grandparent)->left = n->right;
2512 n->right = *grandparent;
2513 *grandparent = n;
2514 return n;
2515 }
2516}
2517
2518
2519
2520static int
2521mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2522{
2523 mfsplay_tree_node **p = array_ptr;
2524 *(*p) = n;
2525 (*p)++;
2526 return 0;
2527}
2528
2529
2530static mfsplay_tree_node
2531mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2532 unsigned high)
2533{
2534 unsigned middle = low + (high - low) / 2;
2535 mfsplay_tree_node n = array[middle];
2536
2537 /* Note that since we're producing a balanced binary tree, it is not a problem
2538 that this function is recursive. */
2539 if (low + 1 <= middle)
2540 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2541 else
2542 n->left = NULL;
2543
2544 if (middle + 1 <= high)
2545 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2546 else
2547 n->right = NULL;
2548
2549 return n;
2550}
2551
2552
2553/* Rebalance the entire tree. Do this by copying all the node
2554 pointers into an array, then cleverly re-linking them. */
2555static void
2556mfsplay_tree_rebalance (mfsplay_tree sp)
2557{
2558 mfsplay_tree_node *all_nodes, *all_nodes_1;
2559
2560 if (sp->num_keys <= 2)
2561 return;
2562
2563 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2564
2565 /* Traverse all nodes to copy their addresses into this array. */
2566 all_nodes_1 = all_nodes;
2567 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2568 (void *) &all_nodes_1);
2569
2570 /* Relink all the nodes. */
2571 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2572
2573 mfsplay_tree_free (all_nodes);
2574}
2575
2576
2577/* Splay SP around KEY. */
2578static void
2579mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2580{
2581 if (sp->root == 0)
2582 return;
2583
2584 /* If we just splayed the tree with the same key, do nothing. */
2585 if (sp->last_splayed_key_p &&
73c3d568 2586 (sp->last_splayed_key == key))
fc5515a8
FCE
2587 return;
2588
2589 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2590 The idea is to limit excessive stack usage if we're facing
2591 degenerate access patterns. Unfortunately such patterns can occur
2592 e.g. during static initialization, where many static objects might
2593 be registered in increasing address sequence, or during a case where
fb925a51 2594 large tree-like heap data structures are allocated quickly.
fc5515a8 2595
fb925a51 2596 On x86, this corresponds to roughly 200K of stack usage.
fc5515a8
FCE
2597 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2598 sp->max_depth = 2500;
2599 sp->rebalance_p = sp->depth = 0;
2600
2601 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2602 if (sp->rebalance_p)
2603 {
2604 mfsplay_tree_rebalance (sp);
2605
2606 sp->rebalance_p = sp->depth = 0;
2607 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2608
2609 if (sp->rebalance_p)
2610 abort ();
2611 }
2612
2613
2614 /* Cache this splay key. */
2615 sp->last_splayed_key = key;
2616 sp->last_splayed_key_p = 1;
2617}
2618
2619
2620
2621/* Allocate a new splay tree. */
2622static mfsplay_tree
2623mfsplay_tree_new ()
2624{
2625 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2626 sp->root = NULL;
2627 sp->last_splayed_key_p = 0;
2628 sp->num_keys = 0;
2629
2630 return sp;
2631}
2632
2633
2634
2635/* Insert a new node (associating KEY with DATA) into SP. If a
2636 previous node with the indicated KEY exists, its data is replaced
2637 with the new value. Returns the new node. */
2638static mfsplay_tree_node
2639mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2640{
2641 int comparison = 0;
2642
2643 mfsplay_tree_splay (sp, key);
2644
2645 if (sp->root)
73c3d568
FCE
2646 comparison = ((sp->root->key > key) ? 1 :
2647 ((sp->root->key < key) ? -1 : 0));
fc5515a8
FCE
2648
2649 if (sp->root && comparison == 0)
2650 {
2651 /* If the root of the tree already has the indicated KEY, just
2652 replace the value with VALUE. */
2653 sp->root->value = value;
2654 }
2655 else
2656 {
2657 /* Create a new node, and insert it at the root. */
2658 mfsplay_tree_node node;
2659
2660 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2661 node->key = key;
2662 node->value = value;
2663 sp->num_keys++;
2664 if (!sp->root)
2665 node->left = node->right = 0;
2666 else if (comparison < 0)
2667 {
2668 node->left = sp->root;
2669 node->right = node->left->right;
2670 node->left->right = 0;
2671 }
2672 else
2673 {
2674 node->right = sp->root;
2675 node->left = node->right->left;
2676 node->right->left = 0;
2677 }
2678
2679 sp->root = node;
2680 sp->last_splayed_key_p = 0;
2681 }
2682
2683 return sp->root;
2684}
2685
2686/* Remove KEY from SP. It is not an error if it did not exist. */
2687
2688static void
2689mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2690{
2691 mfsplay_tree_splay (sp, key);
2692 sp->last_splayed_key_p = 0;
73c3d568 2693 if (sp->root && (sp->root->key == key))
fc5515a8
FCE
2694 {
2695 mfsplay_tree_node left, right;
2696 left = sp->root->left;
2697 right = sp->root->right;
2698 /* Delete the root node itself. */
2699 mfsplay_tree_free (sp->root);
2700 sp->num_keys--;
2701 /* One of the children is now the root. Doesn't matter much
2702 which, so long as we preserve the properties of the tree. */
2703 if (left)
2704 {
2705 sp->root = left;
fb925a51 2706 /* If there was a right child as well, hang it off the
fc5515a8
FCE
2707 right-most leaf of the left child. */
2708 if (right)
2709 {
2710 while (left->right)
2711 left = left->right;
2712 left->right = right;
2713 }
2714 }
2715 else
2716 sp->root = right;
2717 }
2718}
2719
fb925a51 2720/* Lookup KEY in SP, returning VALUE if present, and NULL
fc5515a8
FCE
2721 otherwise. */
2722
2723static mfsplay_tree_node
2724mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2725{
2726 mfsplay_tree_splay (sp, key);
73c3d568 2727 if (sp->root && (sp->root->key == key))
fc5515a8
FCE
2728 return sp->root;
2729 else
2730 return 0;
2731}
2732
2733
2734/* Return the immediate predecessor KEY, or NULL if there is no
2735 predecessor. KEY need not be present in the tree. */
2736
2737static mfsplay_tree_node
2738mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2739{
2740 int comparison;
2741 mfsplay_tree_node node;
2742 /* If the tree is empty, there is certainly no predecessor. */
2743 if (!sp->root)
2744 return NULL;
2745 /* Splay the tree around KEY. That will leave either the KEY
2746 itself, its predecessor, or its successor at the root. */
2747 mfsplay_tree_splay (sp, key);
73c3d568
FCE
2748 comparison = ((sp->root->key > key) ? 1 :
2749 ((sp->root->key < key) ? -1 : 0));
2750
fc5515a8
FCE
2751 /* If the predecessor is at the root, just return it. */
2752 if (comparison < 0)
2753 return sp->root;
2754 /* Otherwise, find the rightmost element of the left subtree. */
2755 node = sp->root->left;
2756 if (node)
2757 while (node->right)
2758 node = node->right;
2759 return node;
2760}
2761
2762/* Return the immediate successor KEY, or NULL if there is no
2763 successor. KEY need not be present in the tree. */
2764
2765static mfsplay_tree_node
2766mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2767{
2768 int comparison;
2769 mfsplay_tree_node node;
2770 /* If the tree is empty, there is certainly no successor. */
2771 if (!sp->root)
2772 return NULL;
2773 /* Splay the tree around KEY. That will leave either the KEY
2774 itself, its predecessor, or its successor at the root. */
2775 mfsplay_tree_splay (sp, key);
73c3d568
FCE
2776 comparison = ((sp->root->key > key) ? 1 :
2777 ((sp->root->key < key) ? -1 : 0));
fc5515a8
FCE
2778 /* If the successor is at the root, just return it. */
2779 if (comparison > 0)
2780 return sp->root;
2781 /* Otherwise, find the leftmost element of the right subtree. */
2782 node = sp->root->right;
2783 if (node)
2784 while (node->left)
2785 node = node->left;
2786 return node;
2787}
2788
2789/* Call FN, passing it the DATA, for every node in SP, following an
2790 in-order traversal. If FN every returns a non-zero value, the
2791 iteration ceases immediately, and the value is returned.
2792 Otherwise, this function returns 0.
fb925a51 2793
fc5515a8
FCE
2794 This function simulates recursion using dynamically allocated
2795 arrays, since it may be called from mfsplay_tree_rebalance(), which
2796 in turn means that the tree is already uncomfortably deep for stack
2797 space limits. */
2798static int
2799mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2800{
2801 mfsplay_tree_node *stack1;
2802 char *stack2;
2803 unsigned sp;
2804 int val = 0;
2805 enum s { s_left, s_here, s_right, s_up };
2806
2807 if (st->root == NULL) /* => num_keys == 0 */
2808 return 0;
2809
2810 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2811 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2812
2813 sp = 0;
2814 stack1 [sp] = st->root;
2815 stack2 [sp] = s_left;
2816
2817 while (1)
2818 {
2819 mfsplay_tree_node n;
2820 enum s s;
2821
2822 n = stack1 [sp];
2823 s = stack2 [sp];
2824
2825 /* Handle each of the four possible states separately. */
2826
2827 /* 1: We're here to traverse the left subtree (if any). */
2828 if (s == s_left)
2829 {
2830 stack2 [sp] = s_here;
2831 if (n->left != NULL)
2832 {
2833 sp ++;
2834 stack1 [sp] = n->left;
2835 stack2 [sp] = s_left;
2836 }
2837 }
2838
2839 /* 2: We're here to traverse this node. */
2840 else if (s == s_here)
2841 {
2842 stack2 [sp] = s_right;
2843 val = (*fn) (n, data);
2844 if (val) break;
2845 }
2846
2847 /* 3: We're here to traverse the right subtree (if any). */
2848 else if (s == s_right)
2849 {
2850 stack2 [sp] = s_up;
2851 if (n->right != NULL)
2852 {
2853 sp ++;
2854 stack1 [sp] = n->right;
2855 stack2 [sp] = s_left;
2856 }
2857 }
2858
2859 /* 4: We're here after both subtrees (if any) have been traversed. */
2860 else if (s == s_up)
2861 {
2862 /* Pop the stack. */
2863 if (sp == 0) break; /* Popping off the root note: we're finished! */
2864 sp --;
2865 }
2866
2867 else
2868 abort ();
2869 }
2870
2871 mfsplay_tree_free (stack1);
2872 mfsplay_tree_free (stack2);
2873 return val;
2874}
This page took 0.813205 seconds and 5 git commands to generate.