]> gcc.gnu.org Git - gcc.git/blob - libmudflap/mf-hooks1.c
All files: Update FSF address.
[gcc.git] / libmudflap / mf-hooks1.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 02110-1301, USA. */
31
32
33 #include "config.h"
34
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
38
39
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #define _POSIX_SOURCE
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define _GNU_SOURCE
46 #define _XOPEN_SOURCE
47 #define _BSD_TYPES
48 #define __EXTENSIONS__
49 #define _ALL_SOURCE
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
52
53 #include <string.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <time.h>
63
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
66
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
70
71
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
75
76
77 #if PIC
78 /* A special bootstrap variant. */
79 void *
80 __mf_0fn_malloc (size_t c)
81 {
82 enum foo { BS = 4096, NB=10 };
83 static char bufs[NB][BS];
84 static unsigned bufs_used[NB];
85 unsigned i;
86
87 for (i=0; i<NB; i++)
88 {
89 if (! bufs_used[i] && c < BS)
90 {
91 bufs_used[i] = 1;
92 return & bufs[i][0];
93 }
94 }
95 return NULL;
96 }
97 #endif
98
99
100 #undef malloc
101 WRAPPER(void *, malloc, size_t c)
102 {
103 size_t size_with_crumple_zones;
104 DECLARE(void *, malloc, size_t c);
105 void *result;
106 BEGIN_PROTECT (malloc, c);
107
108 size_with_crumple_zones =
109 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
110 __mf_opts.crumple_zone));
111 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
112
113 if (LIKELY(result))
114 {
115 result += __mf_opts.crumple_zone;
116 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
117 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
118 }
119
120 return result;
121 }
122
123
124 #ifdef PIC
125 /* A special bootstrap variant. */
126 void *
127 __mf_0fn_calloc (size_t c, size_t n)
128 {
129 return __mf_0fn_malloc (c * n);
130 }
131 #endif
132
133
134 #undef calloc
135 WRAPPER(void *, calloc, size_t c, size_t n)
136 {
137 size_t size_with_crumple_zones;
138 DECLARE(void *, calloc, size_t, size_t);
139 DECLARE(void *, malloc, size_t);
140 DECLARE(void *, memset, void *, int, size_t);
141 char *result;
142 BEGIN_PROTECT (calloc, c, n);
143
144 size_with_crumple_zones =
145 CLAMPADD((c * n), /* XXX: CLAMPMUL */
146 CLAMPADD(__mf_opts.crumple_zone,
147 __mf_opts.crumple_zone));
148 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
149
150 if (LIKELY(result))
151 memset (result, 0, size_with_crumple_zones);
152
153 if (LIKELY(result))
154 {
155 result += __mf_opts.crumple_zone;
156 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
157 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
158 }
159
160 return result;
161 }
162
163
164 #if PIC
165 /* A special bootstrap variant. */
166 void *
167 __mf_0fn_realloc (void *buf, size_t c)
168 {
169 return NULL;
170 }
171 #endif
172
173
174 #undef realloc
175 WRAPPER(void *, realloc, void *buf, size_t c)
176 {
177 DECLARE(void * , realloc, void *, size_t);
178 size_t size_with_crumple_zones;
179 char *base = buf;
180 unsigned saved_wipe_heap;
181 char *result;
182 BEGIN_PROTECT (realloc, buf, c);
183
184 if (LIKELY(buf))
185 base -= __mf_opts.crumple_zone;
186
187 size_with_crumple_zones =
188 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
189 __mf_opts.crumple_zone));
190 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
191
192 /* Ensure heap wiping doesn't occur during this peculiar
193 unregister/reregister pair. */
194 LOCKTH ();
195 __mf_set_state (reentrant);
196 saved_wipe_heap = __mf_opts.wipe_heap;
197 __mf_opts.wipe_heap = 0;
198
199 if (LIKELY(buf))
200 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
201 /* NB: underlying region may have been __MF_TYPE_HEAP. */
202
203 if (LIKELY(result))
204 {
205 result += __mf_opts.crumple_zone;
206 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
207 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
208 }
209
210 /* Restore previous setting. */
211 __mf_opts.wipe_heap = saved_wipe_heap;
212
213 __mf_set_state (active);
214 UNLOCKTH ();
215
216 return result;
217 }
218
219
220 #if PIC
221 /* A special bootstrap variant. */
222 void
223 __mf_0fn_free (void *buf)
224 {
225 return;
226 }
227 #endif
228
229 #undef free
230 WRAPPER(void, free, void *buf)
231 {
232 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
233 static void *free_queue [__MF_FREEQ_MAX];
234 static unsigned free_ptr = 0;
235 static int freeq_initialized = 0;
236 DECLARE(void, free, void *);
237
238 BEGIN_PROTECT (free, buf);
239
240 if (UNLIKELY(buf == NULL))
241 return;
242
243 LOCKTH ();
244 if (UNLIKELY(!freeq_initialized))
245 {
246 memset (free_queue, 0,
247 __MF_FREEQ_MAX * sizeof (void *));
248 freeq_initialized = 1;
249 }
250 UNLOCKTH ();
251
252 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
253 /* NB: underlying region may have been __MF_TYPE_HEAP. */
254
255 if (UNLIKELY(__mf_opts.free_queue_length > 0))
256 {
257 char *freeme = NULL;
258 LOCKTH ();
259 if (free_queue [free_ptr] != NULL)
260 {
261 freeme = free_queue [free_ptr];
262 freeme -= __mf_opts.crumple_zone;
263 }
264 free_queue [free_ptr] = buf;
265 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
266 UNLOCKTH ();
267 if (freeme)
268 {
269 if (__mf_opts.trace_mf_calls)
270 {
271 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
272 (void *) freeme,
273 __mf_opts.crumple_zone);
274 }
275 CALL_REAL (free, freeme);
276 }
277 }
278 else
279 {
280 /* back pointer up a bit to the beginning of crumple zone */
281 char *base = (char *)buf;
282 base -= __mf_opts.crumple_zone;
283 if (__mf_opts.trace_mf_calls)
284 {
285 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
286 (void *) base,
287 (void *) buf,
288 __mf_opts.crumple_zone);
289 }
290 CALL_REAL (free, base);
291 }
292 }
293
294
295 #if PIC
296 /* A special bootstrap variant. */
297 void *
298 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
299 {
300 return (void *) -1;
301 }
302 #endif
303
304
305 #undef mmap
306 WRAPPER(void *, mmap,
307 void *start, size_t length, int prot,
308 int flags, int fd, off_t offset)
309 {
310 DECLARE(void *, mmap, void *, size_t, int,
311 int, int, off_t);
312 void *result;
313 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
314
315 result = CALL_REAL (mmap, start, length, prot,
316 flags, fd, offset);
317
318 /*
319 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
320 (uintptr_t) start, (uintptr_t) length,
321 (uintptr_t) result);
322 */
323
324 if (result != (void *)-1)
325 {
326 /* Register each page as a heap object. Why not register it all
327 as a single segment? That's so that a later munmap() call
328 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
329 this more automatic? */
330 size_t ps = getpagesize ();
331 uintptr_t base = (uintptr_t) result;
332 uintptr_t offset;
333
334 for (offset=0; offset<length; offset+=ps)
335 {
336 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
337 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
338 appropriate for unaccessed mmap pages? */
339 __mf_register ((void *) CLAMPADD (base, offset), ps,
340 __MF_TYPE_HEAP_I, "mmap page");
341 }
342 }
343
344 return result;
345 }
346
347
348 #if PIC
349 /* A special bootstrap variant. */
350 int
351 __mf_0fn_munmap (void *start, size_t length)
352 {
353 return -1;
354 }
355 #endif
356
357
358 #undef munmap
359 WRAPPER(int , munmap, void *start, size_t length)
360 {
361 DECLARE(int, munmap, void *, size_t);
362 int result;
363 BEGIN_PROTECT (munmap, start, length);
364
365 result = CALL_REAL (munmap, start, length);
366
367 /*
368 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
369 (uintptr_t) start, (uintptr_t) length,
370 (uintptr_t) result);
371 */
372
373 if (result == 0)
374 {
375 /* Unregister each page as a heap object. */
376 size_t ps = getpagesize ();
377 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
378 uintptr_t offset;
379
380 for (offset=0; offset<length; offset+=ps)
381 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
382 }
383 return result;
384 }
385
386
387 /* This wrapper is a little different, as it's called indirectly from
388 __mf_fini also to clean up pending allocations. */
389 void *
390 __mf_wrap_alloca_indirect (size_t c)
391 {
392 DECLARE (void *, malloc, size_t);
393 DECLARE (void, free, void *);
394
395 /* This struct, a linked list, tracks alloca'd objects. The newest
396 object is at the head of the list. If we detect that we've
397 popped a few levels of stack, then the listed objects are freed
398 as needed. NB: The tracking struct is allocated with
399 real_malloc; the user data with wrap_malloc.
400 */
401 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
402 static struct alloca_tracking *alloca_history = NULL;
403
404 void *stack = __builtin_frame_address (0);
405 void *result;
406 struct alloca_tracking *track;
407
408 TRACE ("%s\n", __PRETTY_FUNCTION__);
409 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
410
411 /* XXX: thread locking! */
412
413 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
414 which must therefore have exited by now. */
415
416 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
417
418 while (alloca_history &&
419 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
420 {
421 struct alloca_tracking *next = alloca_history->next;
422 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
423 CALL_REAL (free, alloca_history->ptr);
424 CALL_REAL (free, alloca_history);
425 alloca_history = next;
426 }
427
428 /* Allocate new block. */
429 result = NULL;
430 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
431 {
432 track = (struct alloca_tracking *) CALL_REAL (malloc,
433 sizeof (struct alloca_tracking));
434 if (LIKELY (track != NULL))
435 {
436 result = CALL_REAL (malloc, c);
437 if (UNLIKELY (result == NULL))
438 {
439 CALL_REAL (free, track);
440 /* Too bad. XXX: What about errno? */
441 }
442 else
443 {
444 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
445 track->ptr = result;
446 track->stack = stack;
447 track->next = alloca_history;
448 alloca_history = track;
449 }
450 }
451 }
452
453 return result;
454 }
455
456
457 #undef alloca
458 WRAPPER(void *, alloca, size_t c)
459 {
460 return __mf_wrap_alloca_indirect (c);
461 }
462
This page took 0.05426 seconds and 6 git commands to generate.