This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] Do not build libsanitizer also for powerpc*-*-linux*


On Fri, May 30, 2014 at 08:09:22AM -0500, Peter Bergner wrote:
> On Thu, 2014-05-29 at 14:07 -0500, Peter Bergner wrote:
> > On Wed, 2014-05-28 at 09:36 +0200, Thomas Schwinge wrote:
> > > This is being discussed in the thread at
> > > <http://gcc.gnu.org/ml/gcc-patches/2014-05/msg02031.html>.  Until that
> > > has been resolved, I do agree to check in the following patch (and have
> > > successfully tested it, but cannot formally approve it for commit; thus
> > > copying the libsanitizer maintainers):
> > 
> > The re-enablement patch was submitted to the llvm mailing list here:
> > 
> >   http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20140526/219249.html
> > 
> > Once that is committed and merged into gcc, we can re-enable building
> > libsanitizer for powerpc*-linux.
> 
> Ok, it was committed upstream as revision 209879.  Kostya, can we get
> this merged into gcc trunk and powerpc*-linux re-enabled again?  Thanks.

I've cherry-picked the two upstream commits and after testing on
x86_64-linux/i686-linux committed to trunk.

2014-05-30  Jakub Jelinek  <jakub@redhat.com>

	* sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
	r209879.
	* sanitizer_common/sanitizer_common.h: Likewise.
	* asan/asan_mapping.h: Likewise.
	* asan/asan_linux.cc: Likewise.
	* tsan/tsan_mman.cc: Cherry pick upstream r209744.
	* sanitizer_common/sanitizer_allocator.h: Likewise.

--- libsanitizer/sanitizer_common/sanitizer_stacktrace.cc	(revision 209878)
+++ libsanitizer/sanitizer_common/sanitizer_stacktrace.cc	(revision 209879)
@@ -18,11 +18,13 @@
 namespace __sanitizer {
 
 uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
-#ifdef __arm__
+#if defined(__arm__)
   // Cancel Thumb bit.
   pc = pc & (~1);
-#endif
-#if defined(__sparc__)
+#elif defined(__powerpc__) || defined(__powerpc64__)
+  // PCs are always 4 byte aligned.
+  return pc - 4;
+#elif defined(__sparc__)
   return pc - 8;
 #else
   return pc - 1;
--- libsanitizer/sanitizer_common/sanitizer_common.h	(revision 209878)
+++ libsanitizer/sanitizer_common/sanitizer_common.h	(revision 209879)
@@ -28,7 +28,11 @@ struct StackTrace;
 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
 const uptr kWordSizeInBits = 8 * kWordSize;
 
-const uptr kCacheLineSize = 64;
+#if defined(__powerpc__) || defined(__powerpc64__)
+  const uptr kCacheLineSize = 128;
+#else
+  const uptr kCacheLineSize = 64;
+#endif
 
 const uptr kMaxPathLength = 512;
 
--- libsanitizer/asan/asan_mapping.h	(revision 209878)
+++ libsanitizer/asan/asan_mapping.h	(revision 209879)
@@ -87,6 +87,7 @@ static const u64 kDefaultShadowOffset64
 static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000;  // < 2G.
 static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
 static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30;  // 0x40000000
 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46;  // 0x400000000000
 
@@ -109,6 +110,8 @@ static const u64 kFreeBSD_ShadowOffset64
 # else
 #  if defined(__aarch64__)
 #    define SHADOW_OFFSET kAArch64_ShadowOffset64
+#  elif defined(__powerpc64__)
+#    define SHADOW_OFFSET kPPC64_ShadowOffset64
 #  elif SANITIZER_FREEBSD
 #    define SHADOW_OFFSET kFreeBSD_ShadowOffset64
 #  elif SANITIZER_MAC
--- libsanitizer/asan/asan_linux.cc	(revision 209878)
+++ libsanitizer/asan/asan_linux.cc	(revision 209879)
@@ -188,6 +188,13 @@ void GetPcSpBp(void *context, uptr *pc,
   *bp = ucontext->uc_mcontext.gregs[REG_EBP];
   *sp = ucontext->uc_mcontext.gregs[REG_ESP];
 # endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.regs->nip;
+  *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+  // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+  // pointer, but GCC always uses r31 when we need a frame pointer.
+  *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
 #elif defined(__sparc__)
   ucontext_t *ucontext = (ucontext_t*)context;
   uptr *stk_ptr;
--- libsanitizer/tsan/tsan_mman.cc	(revision 209743)
+++ libsanitizer/tsan/tsan_mman.cc	(revision 209744)
@@ -217,19 +217,15 @@ using namespace __tsan;
 
 extern "C" {
 uptr __tsan_get_current_allocated_bytes() {
-  u64 stats[AllocatorStatCount];
+  uptr stats[AllocatorStatCount];
   allocator()->GetStats(stats);
-  u64 m = stats[AllocatorStatMalloced];
-  u64 f = stats[AllocatorStatFreed];
-  return m >= f ? m - f : 1;
+  return stats[AllocatorStatAllocated];
 }
 
 uptr __tsan_get_heap_size() {
-  u64 stats[AllocatorStatCount];
+  uptr stats[AllocatorStatCount];
   allocator()->GetStats(stats);
-  u64 m = stats[AllocatorStatMmapped];
-  u64 f = stats[AllocatorStatUnmapped];
-  return m >= f ? m - f : 1;
+  return stats[AllocatorStatMapped];
 }
 
 uptr __tsan_get_free_bytes() {
--- libsanitizer/sanitizer_common/sanitizer_allocator.h	(revision 209743)
+++ libsanitizer/sanitizer_common/sanitizer_allocator.h	(revision 209744)
@@ -198,14 +198,12 @@ template<class SizeClassAllocator> struc
 
 // Memory allocator statistics
 enum AllocatorStat {
-  AllocatorStatMalloced,
-  AllocatorStatFreed,
-  AllocatorStatMmapped,
-  AllocatorStatUnmapped,
+  AllocatorStatAllocated,
+  AllocatorStatMapped,
   AllocatorStatCount
 };
 
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
 
 // Per-thread stats, live in per-thread cache.
 class AllocatorStats {
@@ -214,16 +212,21 @@ class AllocatorStats {
     internal_memset(this, 0, sizeof(*this));
   }
 
-  void Add(AllocatorStat i, u64 v) {
+  void Add(AllocatorStat i, uptr v) {
     v += atomic_load(&stats_[i], memory_order_relaxed);
     atomic_store(&stats_[i], v, memory_order_relaxed);
   }
 
-  void Set(AllocatorStat i, u64 v) {
+  void Sub(AllocatorStat i, uptr v) {
+    v = atomic_load(&stats_[i], memory_order_relaxed) - v;
     atomic_store(&stats_[i], v, memory_order_relaxed);
   }
 
-  u64 Get(AllocatorStat i) const {
+  void Set(AllocatorStat i, uptr v) {
+    atomic_store(&stats_[i], v, memory_order_relaxed);
+  }
+
+  uptr Get(AllocatorStat i) const {
     return atomic_load(&stats_[i], memory_order_relaxed);
   }
 
@@ -231,7 +234,7 @@ class AllocatorStats {
   friend class AllocatorGlobalStats;
   AllocatorStats *next_;
   AllocatorStats *prev_;
-  atomic_uint64_t stats_[AllocatorStatCount];
+  atomic_uintptr_t stats_[AllocatorStatCount];
 };
 
 // Global stats, used for aggregation and querying.
@@ -260,7 +263,7 @@ class AllocatorGlobalStats : public Allo
   }
 
   void Get(AllocatorStatCounters s) const {
-    internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+    internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
     SpinMutexLock l(&mu_);
     const AllocatorStats *stats = this;
     for (;;) {
@@ -270,6 +273,9 @@ class AllocatorGlobalStats : public Allo
       if (stats == this)
         break;
     }
+    // All stats must be positive.
+    for (int i = 0; i < AllocatorStatCount; i++)
+      s[i] = ((sptr)s[i]) > 0 ? s[i] : 1;
   }
 
  private:
@@ -522,7 +528,7 @@ class SizeClassAllocator64 {
         map_size += kUserMapSize;
       CHECK_GE(region->mapped_user + map_size, end_idx);
       MapWithCallback(region_beg + region->mapped_user, map_size);
-      stat->Add(AllocatorStatMmapped, map_size);
+      stat->Add(AllocatorStatMapped, map_size);
       region->mapped_user += map_size;
     }
     uptr total_count = (region->mapped_user - beg_idx - size)
@@ -841,7 +847,7 @@ class SizeClassAllocator32 {
     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
                                       "SizeClassAllocator32"));
     MapUnmapCallback().OnMap(res, kRegionSize);
-    stat->Add(AllocatorStatMmapped, kRegionSize);
+    stat->Add(AllocatorStatMapped, kRegionSize);
     CHECK_EQ(0U, (res & (kRegionSize - 1)));
     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
     return res;
@@ -907,7 +913,7 @@ struct SizeClassAllocatorLocalCache {
   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
     CHECK_NE(class_id, 0UL);
     CHECK_LT(class_id, kNumClasses);
-    stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+    stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
     PerClass *c = &per_class_[class_id];
     if (UNLIKELY(c->count == 0))
       Refill(allocator, class_id);
@@ -922,7 +928,7 @@ struct SizeClassAllocatorLocalCache {
     // If the first allocator call on a new thread is a deallocation, then
     // max_count will be zero, leading to check failure.
     InitCache();
-    stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+    stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
     PerClass *c = &per_class_[class_id];
     CHECK_NE(c->max_count, 0UL);
     if (UNLIKELY(c->count == c->max_count))
@@ -1033,8 +1039,8 @@ class LargeMmapAllocator {
       stats.currently_allocated += map_size;
       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
       stats.by_size_log[size_log]++;
-      stat->Add(AllocatorStatMalloced, map_size);
-      stat->Add(AllocatorStatMmapped, map_size);
+      stat->Add(AllocatorStatAllocated, map_size);
+      stat->Add(AllocatorStatMapped, map_size);
     }
     return reinterpret_cast<void*>(res);
   }
@@ -1052,8 +1058,8 @@ class LargeMmapAllocator {
       chunks_sorted_ = false;
       stats.n_frees++;
       stats.currently_allocated -= h->map_size;
-      stat->Add(AllocatorStatFreed, h->map_size);
-      stat->Add(AllocatorStatUnmapped, h->map_size);
+      stat->Sub(AllocatorStatAllocated, h->map_size);
+      stat->Sub(AllocatorStatMapped, h->map_size);
     }
     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);


	Jakub


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]