]>
Commit | Line | Data |
---|---|---|
6de9cd9a | 1 | /* Mudflap: narrow-pointer bounds-checking by tree rewriting. |
748086b7 | 2 | Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc. |
6de9cd9a DN |
3 | Contributed by Frank Ch. Eigler <fche@redhat.com> |
4 | and Graydon Hoare <graydon@redhat.com> | |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
748086b7 | 10 | Software Foundation; either version 3, or (at your option) any later |
6de9cd9a DN |
11 | version. |
12 | ||
6de9cd9a DN |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
748086b7 JJ |
18 | Under Section 7 of GPL version 3, you are granted additional |
19 | permissions described in the GCC Runtime Library Exception, version | |
20 | 3.1, as published by the Free Software Foundation. | |
6de9cd9a | 21 | |
748086b7 JJ |
22 | You should have received a copy of the GNU General Public License and |
23 | a copy of the GCC Runtime Library Exception along with this program; | |
24 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
25 | <http://www.gnu.org/licenses/>. */ | |
6de9cd9a DN |
26 | |
27 | #include "config.h" | |
28 | ||
29 | #ifndef HAVE_SOCKLEN_T | |
30 | #define socklen_t int | |
31 | #endif | |
32 | ||
33 | ||
34 | /* These attempt to coax various unix flavours to declare all our | |
35 | needed tidbits in the system headers. */ | |
36 | #if !defined(__FreeBSD__) && !defined(__APPLE__) | |
37 | #define _POSIX_SOURCE | |
38 | #endif /* Some BSDs break <sys/socket.h> if this is defined. */ | |
fb925a51 | 39 | #define _GNU_SOURCE |
6de9cd9a DN |
40 | #define _XOPEN_SOURCE |
41 | #define _BSD_TYPES | |
42 | #define __EXTENSIONS__ | |
43 | #define _ALL_SOURCE | |
44 | #define _LARGE_FILE_API | |
45 | #define _XOPEN_SOURCE_EXTENDED 1 | |
46 | ||
47 | #include <string.h> | |
48 | #include <stdio.h> | |
49 | #include <stdlib.h> | |
50 | #include <sys/time.h> | |
51 | #include <sys/types.h> | |
52 | #include <unistd.h> | |
53 | #include <assert.h> | |
54 | #include <errno.h> | |
55 | #include <limits.h> | |
56 | #include <time.h> | |
57 | ||
58 | #include "mf-runtime.h" | |
59 | #include "mf-impl.h" | |
60 | ||
61 | #ifdef _MUDFLAP | |
62 | #error "Do not compile this file with -fmudflap!" | |
63 | #endif | |
64 | ||
65 | ||
66 | /* Memory allocation related hook functions. Some of these are | |
67 | intercepted via linker wrapping or symbol interposition. Others | |
68 | use plain macros in mf-runtime.h. */ | |
69 | ||
70 | ||
6de9cd9a | 71 | #if PIC |
f70d742f FCE |
72 | |
73 | enum { BS = 4096, NB=10 }; | |
74 | static char __mf_0fn_bufs[NB][BS]; | |
75 | static unsigned __mf_0fn_bufs_used[NB]; | |
76 | ||
77 | ||
6de9cd9a DN |
78 | /* A special bootstrap variant. */ |
79 | void * | |
80 | __mf_0fn_malloc (size_t c) | |
81 | { | |
7544a87f RH |
82 | unsigned i; |
83 | ||
84 | for (i=0; i<NB; i++) | |
85 | { | |
f70d742f | 86 | if (! __mf_0fn_bufs_used[i] && c < BS) |
7544a87f | 87 | { |
f70d742f FCE |
88 | __mf_0fn_bufs_used[i] = 1; |
89 | return & __mf_0fn_bufs[i][0]; | |
7544a87f RH |
90 | } |
91 | } | |
6de9cd9a DN |
92 | return NULL; |
93 | } | |
94 | #endif | |
95 | ||
7954e85c | 96 | |
6de9cd9a DN |
97 | #undef malloc |
98 | WRAPPER(void *, malloc, size_t c) | |
99 | { | |
100 | size_t size_with_crumple_zones; | |
101 | DECLARE(void *, malloc, size_t c); | |
102 | void *result; | |
103 | BEGIN_PROTECT (malloc, c); | |
104 | ||
fb925a51 | 105 | size_with_crumple_zones = |
6de9cd9a DN |
106 | CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone, |
107 | __mf_opts.crumple_zone)); | |
2483ad58 | 108 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 109 | result = (char *) CALL_REAL (malloc, size_with_crumple_zones); |
2483ad58 | 110 | END_MALLOC_PROTECT (); |
fb925a51 | 111 | |
6de9cd9a DN |
112 | if (LIKELY(result)) |
113 | { | |
114 | result += __mf_opts.crumple_zone; | |
115 | __mf_register (result, c, __MF_TYPE_HEAP, "malloc region"); | |
116 | /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */ | |
117 | } | |
118 | ||
119 | return result; | |
120 | } | |
6de9cd9a DN |
121 | |
122 | ||
6de9cd9a DN |
123 | #ifdef PIC |
124 | /* A special bootstrap variant. */ | |
125 | void * | |
126 | __mf_0fn_calloc (size_t c, size_t n) | |
127 | { | |
7544a87f | 128 | return __mf_0fn_malloc (c * n); |
6de9cd9a DN |
129 | } |
130 | #endif | |
131 | ||
7954e85c | 132 | |
6de9cd9a DN |
133 | #undef calloc |
134 | WRAPPER(void *, calloc, size_t c, size_t n) | |
135 | { | |
136 | size_t size_with_crumple_zones; | |
137 | DECLARE(void *, calloc, size_t, size_t); | |
138 | DECLARE(void *, malloc, size_t); | |
139 | DECLARE(void *, memset, void *, int, size_t); | |
140 | char *result; | |
141 | BEGIN_PROTECT (calloc, c, n); | |
fb925a51 MS |
142 | |
143 | size_with_crumple_zones = | |
6de9cd9a DN |
144 | CLAMPADD((c * n), /* XXX: CLAMPMUL */ |
145 | CLAMPADD(__mf_opts.crumple_zone, | |
fb925a51 | 146 | __mf_opts.crumple_zone)); |
2483ad58 | 147 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 148 | result = (char *) CALL_REAL (malloc, size_with_crumple_zones); |
2483ad58 | 149 | END_MALLOC_PROTECT (); |
fb925a51 | 150 | |
6de9cd9a DN |
151 | if (LIKELY(result)) |
152 | memset (result, 0, size_with_crumple_zones); | |
fb925a51 | 153 | |
6de9cd9a DN |
154 | if (LIKELY(result)) |
155 | { | |
156 | result += __mf_opts.crumple_zone; | |
157 | __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region"); | |
158 | /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */ | |
159 | } | |
fb925a51 | 160 | |
6de9cd9a DN |
161 | return result; |
162 | } | |
6de9cd9a | 163 | |
6de9cd9a DN |
164 | |
165 | #if PIC | |
166 | /* A special bootstrap variant. */ | |
167 | void * | |
168 | __mf_0fn_realloc (void *buf, size_t c) | |
169 | { | |
170 | return NULL; | |
171 | } | |
172 | #endif | |
173 | ||
7954e85c | 174 | |
6de9cd9a DN |
175 | #undef realloc |
176 | WRAPPER(void *, realloc, void *buf, size_t c) | |
177 | { | |
178 | DECLARE(void * , realloc, void *, size_t); | |
179 | size_t size_with_crumple_zones; | |
180 | char *base = buf; | |
181 | unsigned saved_wipe_heap; | |
182 | char *result; | |
183 | BEGIN_PROTECT (realloc, buf, c); | |
184 | ||
185 | if (LIKELY(buf)) | |
186 | base -= __mf_opts.crumple_zone; | |
187 | ||
fb925a51 | 188 | size_with_crumple_zones = |
6de9cd9a DN |
189 | CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone, |
190 | __mf_opts.crumple_zone)); | |
2483ad58 | 191 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 192 | result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones); |
2483ad58 | 193 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
194 | |
195 | /* Ensure heap wiping doesn't occur during this peculiar | |
196 | unregister/reregister pair. */ | |
197 | LOCKTH (); | |
7544a87f | 198 | __mf_set_state (reentrant); |
6de9cd9a DN |
199 | saved_wipe_heap = __mf_opts.wipe_heap; |
200 | __mf_opts.wipe_heap = 0; | |
201 | ||
202 | if (LIKELY(buf)) | |
fb925a51 | 203 | __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I); |
cfbd22d7 | 204 | /* NB: underlying region may have been __MF_TYPE_HEAP. */ |
fb925a51 | 205 | |
6de9cd9a DN |
206 | if (LIKELY(result)) |
207 | { | |
208 | result += __mf_opts.crumple_zone; | |
209 | __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region"); | |
210 | /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */ | |
211 | } | |
212 | ||
213 | /* Restore previous setting. */ | |
214 | __mf_opts.wipe_heap = saved_wipe_heap; | |
215 | ||
7544a87f | 216 | __mf_set_state (active); |
6de9cd9a DN |
217 | UNLOCKTH (); |
218 | ||
219 | return result; | |
220 | } | |
6de9cd9a DN |
221 | |
222 | ||
6de9cd9a DN |
223 | #if PIC |
224 | /* A special bootstrap variant. */ | |
225 | void | |
226 | __mf_0fn_free (void *buf) | |
227 | { | |
228 | return; | |
229 | } | |
230 | #endif | |
231 | ||
232 | #undef free | |
233 | WRAPPER(void, free, void *buf) | |
234 | { | |
235 | /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */ | |
236 | static void *free_queue [__MF_FREEQ_MAX]; | |
237 | static unsigned free_ptr = 0; | |
238 | static int freeq_initialized = 0; | |
fb925a51 MS |
239 | DECLARE(void, free, void *); |
240 | ||
6de9cd9a DN |
241 | BEGIN_PROTECT (free, buf); |
242 | ||
243 | if (UNLIKELY(buf == NULL)) | |
244 | return; | |
245 | ||
f70d742f FCE |
246 | #if PIC |
247 | /* Check whether the given buffer might have come from a | |
248 | __mf_0fn_malloc/calloc call that for whatever reason was not | |
249 | redirected back to __mf_0fn_free. If so, we just ignore the | |
250 | call. */ | |
251 | if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs && | |
252 | (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs)))) | |
253 | { | |
254 | VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf); | |
255 | return; | |
256 | } | |
257 | #endif | |
258 | ||
6de9cd9a DN |
259 | LOCKTH (); |
260 | if (UNLIKELY(!freeq_initialized)) | |
261 | { | |
fb925a51 | 262 | memset (free_queue, 0, |
6de9cd9a DN |
263 | __MF_FREEQ_MAX * sizeof (void *)); |
264 | freeq_initialized = 1; | |
265 | } | |
266 | UNLOCKTH (); | |
267 | ||
cfbd22d7 FCE |
268 | __mf_unregister (buf, 0, __MF_TYPE_HEAP_I); |
269 | /* NB: underlying region may have been __MF_TYPE_HEAP. */ | |
6de9cd9a DN |
270 | |
271 | if (UNLIKELY(__mf_opts.free_queue_length > 0)) | |
272 | { | |
273 | char *freeme = NULL; | |
274 | LOCKTH (); | |
275 | if (free_queue [free_ptr] != NULL) | |
276 | { | |
277 | freeme = free_queue [free_ptr]; | |
278 | freeme -= __mf_opts.crumple_zone; | |
279 | } | |
280 | free_queue [free_ptr] = buf; | |
281 | free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1); | |
282 | UNLOCKTH (); | |
283 | if (freeme) | |
284 | { | |
285 | if (__mf_opts.trace_mf_calls) | |
286 | { | |
fb925a51 | 287 | VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n", |
6de9cd9a DN |
288 | (void *) freeme, |
289 | __mf_opts.crumple_zone); | |
290 | } | |
2483ad58 | 291 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 292 | CALL_REAL (free, freeme); |
2483ad58 | 293 | END_MALLOC_PROTECT (); |
6de9cd9a | 294 | } |
fb925a51 MS |
295 | } |
296 | else | |
6de9cd9a DN |
297 | { |
298 | /* back pointer up a bit to the beginning of crumple zone */ | |
299 | char *base = (char *)buf; | |
300 | base -= __mf_opts.crumple_zone; | |
301 | if (__mf_opts.trace_mf_calls) | |
302 | { | |
303 | VERBOSE_TRACE ("freeing pointer %p = %p - %u\n", | |
fb925a51 MS |
304 | (void *) base, |
305 | (void *) buf, | |
6de9cd9a DN |
306 | __mf_opts.crumple_zone); |
307 | } | |
2483ad58 | 308 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 309 | CALL_REAL (free, base); |
2483ad58 | 310 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
311 | } |
312 | } | |
6de9cd9a DN |
313 | |
314 | ||
f1dff13a JW |
315 | /* We can only wrap mmap if the target supports it. Likewise for munmap. |
316 | We assume we have both if we have mmap. */ | |
317 | #ifdef HAVE_MMAP | |
318 | ||
6de9cd9a DN |
319 | #if PIC |
320 | /* A special bootstrap variant. */ | |
321 | void * | |
322 | __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off) | |
323 | { | |
f05816a5 LR |
324 | #if defined(__FreeBSD__) |
325 | if (f == 0x1000 && fd == -1 && prot == 0 && off == 0) | |
326 | return 0; | |
327 | #endif /* Ignore red zone allocation request for initial thread's stack. */ | |
328 | ||
6de9cd9a DN |
329 | return (void *) -1; |
330 | } | |
331 | #endif | |
332 | ||
333 | ||
334 | #undef mmap | |
fb925a51 MS |
335 | WRAPPER(void *, mmap, |
336 | void *start, size_t length, int prot, | |
6de9cd9a DN |
337 | int flags, int fd, off_t offset) |
338 | { | |
fb925a51 | 339 | DECLARE(void *, mmap, void *, size_t, int, |
6de9cd9a DN |
340 | int, int, off_t); |
341 | void *result; | |
342 | BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset); | |
343 | ||
fb925a51 | 344 | result = CALL_REAL (mmap, start, length, prot, |
6de9cd9a DN |
345 | flags, fd, offset); |
346 | ||
347 | /* | |
fb925a51 | 348 | VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n", |
6de9cd9a DN |
349 | (uintptr_t) start, (uintptr_t) length, |
350 | (uintptr_t) result); | |
351 | */ | |
352 | ||
353 | if (result != (void *)-1) | |
354 | { | |
355 | /* Register each page as a heap object. Why not register it all | |
356 | as a single segment? That's so that a later munmap() call | |
357 | can unmap individual pages. XXX: would __MF_TYPE_GUESS make | |
358 | this more automatic? */ | |
359 | size_t ps = getpagesize (); | |
360 | uintptr_t base = (uintptr_t) result; | |
361 | uintptr_t offset; | |
362 | ||
363 | for (offset=0; offset<length; offset+=ps) | |
364 | { | |
365 | /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */ | |
366 | /* XXX: Unaccessed HEAP pages are reported as leaks. Is this | |
367 | appropriate for unaccessed mmap pages? */ | |
368 | __mf_register ((void *) CLAMPADD (base, offset), ps, | |
369 | __MF_TYPE_HEAP_I, "mmap page"); | |
370 | } | |
371 | } | |
372 | ||
373 | return result; | |
374 | } | |
6de9cd9a | 375 | |
6de9cd9a DN |
376 | |
377 | #if PIC | |
378 | /* A special bootstrap variant. */ | |
379 | int | |
380 | __mf_0fn_munmap (void *start, size_t length) | |
381 | { | |
382 | return -1; | |
383 | } | |
384 | #endif | |
385 | ||
386 | ||
387 | #undef munmap | |
388 | WRAPPER(int , munmap, void *start, size_t length) | |
389 | { | |
390 | DECLARE(int, munmap, void *, size_t); | |
391 | int result; | |
392 | BEGIN_PROTECT (munmap, start, length); | |
fb925a51 | 393 | |
6de9cd9a DN |
394 | result = CALL_REAL (munmap, start, length); |
395 | ||
396 | /* | |
fb925a51 | 397 | VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n", |
6de9cd9a DN |
398 | (uintptr_t) start, (uintptr_t) length, |
399 | (uintptr_t) result); | |
400 | */ | |
401 | ||
402 | if (result == 0) | |
403 | { | |
404 | /* Unregister each page as a heap object. */ | |
405 | size_t ps = getpagesize (); | |
406 | uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */ | |
407 | uintptr_t offset; | |
408 | ||
409 | for (offset=0; offset<length; offset+=ps) | |
cfbd22d7 | 410 | __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I); |
6de9cd9a DN |
411 | } |
412 | return result; | |
413 | } | |
f1dff13a | 414 | #endif /* HAVE_MMAP */ |
6de9cd9a | 415 | |
6de9cd9a DN |
416 | |
417 | /* This wrapper is a little different, as it's called indirectly from | |
fb925a51 | 418 | __mf_fini also to clean up pending allocations. */ |
6de9cd9a DN |
419 | void * |
420 | __mf_wrap_alloca_indirect (size_t c) | |
421 | { | |
422 | DECLARE (void *, malloc, size_t); | |
423 | DECLARE (void, free, void *); | |
424 | ||
425 | /* This struct, a linked list, tracks alloca'd objects. The newest | |
426 | object is at the head of the list. If we detect that we've | |
427 | popped a few levels of stack, then the listed objects are freed | |
428 | as needed. NB: The tracking struct is allocated with | |
429 | real_malloc; the user data with wrap_malloc. | |
430 | */ | |
431 | struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; }; | |
432 | static struct alloca_tracking *alloca_history = NULL; | |
433 | ||
434 | void *stack = __builtin_frame_address (0); | |
435 | void *result; | |
436 | struct alloca_tracking *track; | |
437 | ||
438 | TRACE ("%s\n", __PRETTY_FUNCTION__); | |
439 | VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack); | |
440 | ||
441 | /* XXX: thread locking! */ | |
442 | ||
443 | /* Free any previously alloca'd blocks that belong to deeper-nested functions, | |
444 | which must therefore have exited by now. */ | |
7954e85c FCE |
445 | |
446 | #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */ | |
447 | ||
6de9cd9a DN |
448 | while (alloca_history && |
449 | ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack)) | |
450 | { | |
451 | struct alloca_tracking *next = alloca_history->next; | |
cfbd22d7 | 452 | __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP); |
2483ad58 | 453 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a DN |
454 | CALL_REAL (free, alloca_history->ptr); |
455 | CALL_REAL (free, alloca_history); | |
2483ad58 | 456 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
457 | alloca_history = next; |
458 | } | |
459 | ||
460 | /* Allocate new block. */ | |
461 | result = NULL; | |
462 | if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */ | |
463 | { | |
2483ad58 | 464 | BEGIN_MALLOC_PROTECT (); |
fb925a51 | 465 | track = (struct alloca_tracking *) CALL_REAL (malloc, |
6de9cd9a | 466 | sizeof (struct alloca_tracking)); |
2483ad58 | 467 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
468 | if (LIKELY (track != NULL)) |
469 | { | |
2483ad58 | 470 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 471 | result = CALL_REAL (malloc, c); |
2483ad58 | 472 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
473 | if (UNLIKELY (result == NULL)) |
474 | { | |
2483ad58 | 475 | BEGIN_MALLOC_PROTECT (); |
6de9cd9a | 476 | CALL_REAL (free, track); |
2483ad58 | 477 | END_MALLOC_PROTECT (); |
6de9cd9a DN |
478 | /* Too bad. XXX: What about errno? */ |
479 | } | |
480 | else | |
481 | { | |
482 | __mf_register (result, c, __MF_TYPE_HEAP, "alloca region"); | |
483 | track->ptr = result; | |
484 | track->stack = stack; | |
485 | track->next = alloca_history; | |
486 | alloca_history = track; | |
487 | } | |
488 | } | |
489 | } | |
fb925a51 | 490 | |
6de9cd9a DN |
491 | return result; |
492 | } | |
493 | ||
494 | ||
495 | #undef alloca | |
496 | WRAPPER(void *, alloca, size_t c) | |
497 | { | |
498 | return __mf_wrap_alloca_indirect (c); | |
499 | } | |
500 |