libsanitizer merge from upstream r175042

Jakub Jelinek jakub@redhat.com
Wed Feb 13 15:19:00 GMT 2013


On Wed, Feb 13, 2013 at 05:39:15PM +0400, Konstantin Serebryany wrote:
> > No.  You can disable it for the whole system (prelink -ua), but that is not
> > a sane requirement to running sanitized programs.
> 
> Why not?
> :)

Because that is a fully system operation, requires root access, etc.
The fact that some user wants to test one of his programs with Asan
shouldn't need to affect other users.

> This we can deal with.
> We already setenv+reexec on Mac to solve similar issue with Mac's
> dynamic run-time.

The reexec is problematic, what if the program already in constructors run
before __asan_init (perhaps ctors of other libraries etc.) does something
that really shouldn't be done twice?

> > Sure, but it will be then slower, I thought you are looking for ASAN speed
> > improvements.
> 
> Yes, and we already achieved it on ubuntu :)

AFAIK prelink is available even on ubuntu, perhaps not the default.

> > I'll try to implement it eventually and
> > try to convince you ;)
> 
> That's surely not hard to implement, but very hard to support.

Why?

Here is the patch, works just fine for me here during asan.exp testing.
You can very easily either install and enable prelink on one of your
x86_64-linux testing boxes, or just install it and add test that
will say prelink -r 0x3600000000 some test shared library and then
just use it in sanitized program (that will also verify that you can mmap
libraries in that range), or even just write a test that will in a
non-instrumented ctor with lower priority than asan's priority
mmap a few pages at 0x3000000000 and close to 0x3fffff0000
and store some data into those buffers later on in sanitized code.

--- asan_mapping.h.jj	2013-02-13 11:53:43.000000000 +0100
+++ asan_mapping.h	2013-02-13 16:00:22.821413836 +0100
@@ -61,13 +61,31 @@ extern SANITIZER_INTERFACE_ATTRIBUTE upt
 #define kHighShadowBeg  MEM_TO_SHADOW(kHighMemBeg)
 #define kHighShadowEnd  MEM_TO_SHADOW(kHighMemEnd)
 
+#if ASAN_LINUX && defined(__x86_64__)
+# define kMidMemBeg	(kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0)
+# define kMidMemEnd	(kLowMemEnd < 0x3000000000ULL ? 0x3fffffffffULL : 0)
+# define kMidShadowBeg	MEM_TO_SHADOW(kMidMemBeg)
+# define kMidShadowEnd	MEM_TO_SHADOW(kMidMemEnd)
+#else
+# define kMidMemBeg	0
+# define kMidMemEnd	0
+# define kMidShadowBeg	0
+# define kMidShadowEnd	0
+#endif
+
 // With the zero shadow base we can not actually map pages starting from 0.
 // This constant is somewhat arbitrary.
 #define kZeroBaseShadowStart (1 << 18)
 
 #define kShadowGapBeg   (kLowShadowEnd ? kLowShadowEnd + 1 \
                                        : kZeroBaseShadowStart)
-#define kShadowGapEnd   (kHighShadowBeg - 1)
+#define kShadowGapEnd   ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
+
+#define kShadowGap2Beg	(kMidMemBeg ? kMidShadowEnd + 1 : 0)
+#define kShadowGap2End	(kMidMemBeg ? kMidMemBeg - 1 : 0)
+
+#define kShadowGap3Beg	(kMidMemBeg ? kMidMemEnd + 1 : 0)
+#define kShadowGap3End	(kMidMemBeg ? kHighShadowBeg - 1 : 0)
 
 namespace __asan {
 
@@ -86,8 +104,12 @@ static inline bool AddrIsInHighMem(uptr
   return a >= kHighMemBeg && a <= kHighMemEnd;
 }
 
+static inline bool AddrIsInMidMem(uptr a) {
+  return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
+}
+
 static inline bool AddrIsInMem(uptr a) {
-  return AddrIsInLowMem(a) || AddrIsInHighMem(a);
+  return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
 }
 
 static inline uptr MemToShadow(uptr p) {
@@ -99,11 +121,22 @@ static inline bool AddrIsInHighShadow(up
   return a >= kHighShadowBeg && a <=  kHighMemEnd;
 }
 
+static inline bool AddrIsInMidShadow(uptr a) {
+  return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd;
+}
+
 static inline bool AddrIsInShadow(uptr a) {
-  return AddrIsInLowShadow(a) || AddrIsInHighShadow(a);
+  return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
 }
 
 static inline bool AddrIsInShadowGap(uptr a) {
+  if (kMidMemBeg)
+    {
+      if (a <= kShadowGapEnd)
+	return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+      return (a >= kShadowGap2Beg && a <= kShadowGap2End)
+	     || (a >= kShadowGap3Beg && a <= kShadowGap3End);
+    }
   // In zero-based shadow mode we treat addresses near zero as addresses
   // in shadow gap as well.
   if (SHADOW_OFFSET == 0)
--- asan_rtl.cc.jj	2013-02-13 11:53:44.000000000 +0100
+++ asan_rtl.cc	2013-02-13 16:00:10.815483846 +0100
@@ -35,8 +35,14 @@ static void AsanDie() {
     Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
     SleepForSeconds(flags()->sleep_before_dying);
   }
-  if (flags()->unmap_shadow_on_exit)
-    UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
+  if (flags()->unmap_shadow_on_exit) {
+    if (!kMidMemBeg)
+      UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
+    else {
+      UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
+      UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd);
+    }
+  }
   if (death_callback)
     death_callback();
   if (flags()->abort_on_error)
@@ -357,17 +363,33 @@ void __asan_init() {
            (void*)kHighMemBeg, (void*)kHighMemEnd);
     Printf("|| `[%p, %p]` || HighShadow ||\n",
            (void*)kHighShadowBeg, (void*)kHighShadowEnd);
+    if (kMidMemBeg) {
+      Printf("|| `[%p, %p]` || ShadowGap  ||\n",
+             (void*)kShadowGap3Beg, (void*)kShadowGap3End);
+      Printf("|| `[%p, %p]` || MidMem     ||\n",
+             (void*)kMidMemBeg, (void*)kMidMemEnd);
+      Printf("|| `[%p, %p]` || ShadowGap  ||\n",
+             (void*)kShadowGap2Beg, (void*)kShadowGap2End);
+      Printf("|| `[%p, %p]` || MidShadow  ||\n",
+             (void*)kMidShadowBeg, (void*)kMidShadowEnd);
+    }
     Printf("|| `[%p, %p]` || ShadowGap  ||\n",
            (void*)kShadowGapBeg, (void*)kShadowGapEnd);
     Printf("|| `[%p, %p]` || LowShadow  ||\n",
            (void*)kLowShadowBeg, (void*)kLowShadowEnd);
     Printf("|| `[%p, %p]` || LowMem     ||\n",
            (void*)kLowMemBeg, (void*)kLowMemEnd);
-    Printf("MemToShadow(shadow): %p %p %p %p\n",
+    Printf("MemToShadow(shadow): %p %p %p %p",
            (void*)MEM_TO_SHADOW(kLowShadowBeg),
            (void*)MEM_TO_SHADOW(kLowShadowEnd),
            (void*)MEM_TO_SHADOW(kHighShadowBeg),
            (void*)MEM_TO_SHADOW(kHighShadowEnd));
+    if (kMidMemBeg) {
+      Printf(" %p %p",
+           (void*)MEM_TO_SHADOW(kMidShadowBeg),
+           (void*)MEM_TO_SHADOW(kMidShadowEnd));
+    }
+    Printf("\n");
     Printf("red_zone=%zu\n", (uptr)flags()->redzone);
     Printf("malloc_context_size=%zu\n", (uptr)flags()->malloc_context_size);
 
@@ -375,6 +397,9 @@ void __asan_init() {
     Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY);
     Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET);
     CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
+    if (kMidMemBeg)
+      CHECK(kMidShadowBeg > kLowShadowEnd && kMidMemBeg > kMidShadowEnd
+            && kHighShadowBeg > kMidMemEnd);
   }
 
   if (flags()->disable_core) {
@@ -384,7 +409,26 @@ void __asan_init() {
   uptr shadow_start = kLowShadowBeg;
   if (kLowShadowBeg > 0) shadow_start -= GetMmapGranularity();
   uptr shadow_end = kHighShadowEnd;
-  if (MemoryRangeIsAvailable(shadow_start, shadow_end)) {
+  if (kMidMemBeg
+      && MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1)
+      && MemoryRangeIsAvailable(kMidMemEnd + 1, shadow_end)) {
+    if (kLowShadowBeg != kLowShadowEnd) {
+      // mmap the low shadow plus at least one page.
+      ReserveShadowMemoryRange(kLowShadowBeg - GetMmapGranularity(),
+                               kLowShadowEnd);
+    }
+    // mmap the mid shadow.
+    ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd);
+    // mmap the high shadow.
+    ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
+    // protect the gaps
+    void *prot = Mprotect(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+    CHECK(prot == (void*)kShadowGapBeg);
+    prot = Mprotect(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
+    CHECK(prot == (void*)kShadowGap2Beg);
+    prot = Mprotect(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
+    CHECK(prot == (void*)kShadowGap3Beg);
+  } else if (!kMidMemBeg && MemoryRangeIsAvailable(shadow_start, shadow_end)) {
     if (kLowShadowBeg != kLowShadowEnd) {
       // mmap the low shadow plus at least one page.
       ReserveShadowMemoryRange(kLowShadowBeg - GetMmapGranularity(),


	Jakub



More information about the Gcc-patches mailing list