This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH][libstdc++-v3 parallel mode] Tackle further XXX todos


Okay, let's continue:

types.h  // XXX need to use <cstdint>
         // XXX numeric_limits::digits?

How do you like the attached patch?


Tested x86_64-unknown-linux-gnu: No regressions

Please approve for mainline.

2009-09-28  Johannes Singler  <singler@ira.uka.de>
        * include/parallel/base.h: Take integer types from
        <tr1/cstdint>.
        * include/parallel/compatibility.h: Likewise.
        * include/parallel/multiseq_selection.h: Likewise.
        * include/parallel/random_shuffle.h: Likewise.
        * include/parallel/random_number.h: Likewise;
        use numeric_limits<>::digit instead of sizeof()*8.
        * include/parallel/types.h: Likewise.



// XXX atomics interface?

Very good idea, but how do I bring it in?  Already

#include <cstdatomic>

or

#include <stdatomic.h>

make the compilation fail.  Is there any way to get access to the
functions without enabling the C++0x mode?  Hmm, declaring functions
volatile does not seem backwards-compatible.

Johannes

Index: include/parallel/types.h
===================================================================
--- include/parallel/types.h	(revision 152234)
+++ include/parallel/types.h	(working copy)
@@ -33,6 +33,8 @@
 #define _GLIBCXX_PARALLEL_TYPES_H 1
 
 #include <cstdlib>
+#include <limits>
+#include <tr1/cstdint>
 
 namespace __gnu_parallel
 {
@@ -108,45 +110,24 @@
       EQUAL_SPLIT 
     };
 
-  /// _Integer Types.
-  // XXX need to use <cstdint>
-  /** @brief 16-bit signed integer. */
-  typedef short int16;
-
-  /** @brief 16-bit unsigned integer. */
-  typedef unsigned short uint16;
-
-  /** @brief 32-bit signed integer. */
-  typedef int int32;
-
-  /** @brief 32-bit unsigned integer. */
-  typedef unsigned int uint32;
-
-  /** @brief 64-bit signed integer. */
-  typedef long long int64;
-
-  /** @brief 64-bit unsigned integer. */
-  typedef unsigned long long uint64;
-
   /**
    * @brief Unsigned integer to index __elements.
    * The total number of elements for each algorithm must fit into this type.
    */
-  typedef uint64 _SequenceIndex;
+  typedef uint64_t _SequenceIndex;
 
   /**
    * @brief Unsigned integer to index a thread number.
    * The maximum thread number (for each processor) must fit into this type.
    */
-  typedef uint16 _ThreadIndex;
+  typedef uint16_t _ThreadIndex;
 
   // XXX atomics interface?
   /// Longest compare-and-swappable integer type on this platform.
-  typedef int64 _CASable;
+  typedef int64_t _CASable;
 
-  // XXX numeric_limits::digits?
-  /// Number of bits of ::_CASable.
-  static const int _CASable_bits = sizeof(_CASable) * 8;
+  /// Number of bits of _CASable.
+  static const int _CASable_bits = std::numeric_limits<_CASable>::digits;
 
   /// ::_CASable with the right half of bits set to 1.
   static const _CASable _CASable_mask =
Index: include/parallel/multiseq_selection.h
===================================================================
--- include/parallel/multiseq_selection.h	(revision 152234)
+++ include/parallel/multiseq_selection.h	(working copy)
@@ -273,7 +273,7 @@
             }
           
           _DifferenceType __skew = static_cast<_DifferenceType>
-            (static_cast<uint64>(__total) * __rank / __N - __leftsize);
+            (static_cast<uint64_t>(__total) * __rank / __N - __leftsize);
 
           if (__skew > 0)
             {
Index: include/parallel/base.h
===================================================================
--- include/parallel/base.h	(revision 152234)
+++ include/parallel/base.h	(working copy)
@@ -392,8 +392,8 @@
   public:
     typedef _DifferenceTp _DifferenceType;
 
-    // Better case down to uint64, than up to _DifferenceTp.
-    typedef _PseudoSequenceIterator<_Tp, uint64> iterator;
+    // Better cast down to uint64_t, than up to _DifferenceTp.
+    typedef _PseudoSequenceIterator<_Tp, uint64_t> iterator;
 
     /** @brief Constructor.
       *  @param _M_val Element of the sequence.
Index: include/parallel/random_shuffle.h
===================================================================
--- include/parallel/random_shuffle.h	(revision 152234)
+++ include/parallel/random_shuffle.h	(working copy)
@@ -100,7 +100,7 @@
     _BinIndex __bins_end;
 
     /** @brief Random _M_seed for this thread. */
-    uint32 _M_seed;
+    uint32_t _M_seed;
 
     /** @brief Pointer to global data. */
     _DRandomShufflingGlobalData<_RAIter>* _M_sd;
@@ -372,7 +372,8 @@
                 for (; __j < bin_cursor; ++__j)
                   _M_sd._M_bin_proc[__j] = __i;
                 __pus[__i]._M_num_threads = __num_threads;
-                __pus[__i]._M_seed = __rng(std::numeric_limits<uint32>::max());
+                __pus[__i]._M_seed =
+                        __rng(std::numeric_limits<uint32_t>::max());
                 __pus[__i]._M_sd = &_M_sd;
               }
             _M_starts[__num_threads] = __start;
Index: include/parallel/compatibility.h
===================================================================
--- include/parallel/compatibility.h	(revision 152234)
+++ include/parallel/compatibility.h	(working copy)
@@ -62,7 +62,7 @@
 {
 #if defined(__ICC)
   template<typename _MustBeInt = int>
-  int32 __faa32(int32* __x, int32 __inc)
+  int32_t __faa32(int32_t* __x, int32_t __inc)
   {
     asm volatile("lock xadd %0,%1"
                  : "=__r" (__inc), "=__m" (*__x)
@@ -72,7 +72,7 @@
   }
 #if defined(__x86_64)
   template<typename _MustBeInt = int>
-  int64 __faa64(int64* __x, int64 __inc)
+  int64_t __faa64(int64_t* __x, int64_t __inc)
   {
     asm volatile("lock xadd %0,%1"
                  : "=__r" (__inc), "=__m" (*__x)
@@ -91,8 +91,8 @@
    *  @param __ptr Pointer to a 32-bit signed integer.
    *  @param __addend Value to add.
    */
-  inline int32
-  __fetch_and_add_32(volatile int32* __ptr, int32 __addend)
+  inline int32_t
+  __fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend)
   {
 #if defined(__ICC)      //x86 version
     return _InterlockedExchangeAdd((void*)__ptr, __addend);
@@ -104,7 +104,7 @@
 #elif defined(__GNUC__)
     return __sync_fetch_and_add(__ptr, __addend);
 #elif defined(__SUNPRO_CC) && defined(__sparc)
-    volatile int32 __before, __after;
+    volatile int32_t __before, __after;
     do
       {
         __before = *__ptr;
@@ -114,7 +114,7 @@
     return __before;
 #else   //fallback, slow
 #pragma message("slow __fetch_and_add_32")
-    int32 __res;
+    int32_t __res;
 #pragma omp critical
     {
       __res = *__ptr;
@@ -130,11 +130,11 @@
    *  @param __ptr Pointer to a 64-bit signed integer.
    *  @param __addend Value to add.
    */
-  inline int64
-  __fetch_and_add_64(volatile int64* __ptr, int64 __addend)
+  inline int64_t
+  __fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend)
   {
 #if defined(__ICC) && defined(__x86_64) //x86 version
-    return __faa64<int>((int64*)__ptr, __addend);
+    return __faa64<int>((int64_t*)__ptr, __addend);
 #elif defined(__ECC)    //IA-64 version
     return _InterlockedExchangeAdd64((void*)__ptr, __addend);
 #elif defined(__ICL) || defined(_MSC_VER)
@@ -150,7 +150,7 @@
   (defined(__i686) || defined(__pentium4) || defined(__athlon))
     return __sync_fetch_and_add(__ptr, __addend);
 #elif defined(__SUNPRO_CC) && defined(__sparc)
-    volatile int64 __before, __after;
+    volatile int64_t __before, __after;
     do
       {
         __before = *__ptr;
@@ -164,7 +164,7 @@
     //#warning "please compile with -march=i686 or better"
 #endif
 #pragma message("slow __fetch_and_add_64")
-    int64 __res;
+    int64_t __res;
 #pragma omp critical
     {
       __res = *__ptr;
@@ -184,10 +184,12 @@
   inline _Tp
   __fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
   {
-    if (sizeof(_Tp) == sizeof(int32))
-      return (_Tp)__fetch_and_add_32((volatile int32*) __ptr, (int32)__addend);
-    else if (sizeof(_Tp) == sizeof(int64))
-      return (_Tp)__fetch_and_add_64((volatile int64*) __ptr, (int64)__addend);
+    if (sizeof(_Tp) == sizeof(int32_t))
+      return
+        (_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend);
+    else if (sizeof(_Tp) == sizeof(int64_t))
+      return
+        (_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend);
     else
       _GLIBCXX_PARALLEL_ASSERT(false);
   }
@@ -196,10 +198,10 @@
 #if defined(__ICC)
 
   template<typename _MustBeInt = int>
-  inline int32
-  __cas32(volatile int32* __ptr, int32 __old, int32 __nw)
+  inline int32_t
+  __cas32(volatile int32_t* __ptr, int32_t __old, int32_t __nw)
   {
-    int32 __before;
+    int32_t __before;
     __asm__ __volatile__("lock; cmpxchgl %1,%2"
                          : "=a"(__before)
                          : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
@@ -210,10 +212,10 @@
 
 #if defined(__x86_64)
   template<typename _MustBeInt = int>
-  inline int64
-  __cas64(volatile int64 *__ptr, int64 __old, int64 __nw)
+  inline int64_t
+  __cas64(volatile int64_t *__ptr, int64_t __old, int64_t __nw)
   {
-    int64 __before;
+    int64_t __before;
     __asm__ __volatile__("lock; cmpxchgq %1,%2"
                          : "=a"(__before)
                          : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
@@ -234,8 +236,8 @@
    *  @param __replacement Replacement value.
    */
   inline bool
-  __compare_and_swap_32(volatile int32* __ptr, int32 __comparand,
-                        int32 __replacement)
+  __compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand,
+                        int32_t __replacement)
   {
 #if defined(__ICC)      //x86 version
     return _InterlockedCompareExchange((void*)__ptr, __replacement,
@@ -277,8 +279,8 @@
    *  @param __replacement Replacement value.
    */
   inline bool
-  __compare_and_swap_64(volatile int64* __ptr, int64 __comparand,
-                        int64 __replacement)
+  __compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand,
+                        int64_t __replacement)
   {
 #if defined(__ICC) && defined(__x86_64) //x86 version
     return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
@@ -332,12 +334,14 @@
   inline bool
   __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
   {
-    if (sizeof(_Tp) == sizeof(int32))
-      return __compare_and_swap_32((volatile int32*) __ptr, (int32)__comparand,
-                                   (int32)__replacement);
-    else if (sizeof(_Tp) == sizeof(int64))
-      return __compare_and_swap_64((volatile int64*) __ptr, (int64)__comparand,
-                                   (int64)__replacement);
+    if (sizeof(_Tp) == sizeof(int32_t))
+      return __compare_and_swap_32((volatile int32_t*) __ptr,
+                                   (int32_t)__comparand,
+                                   (int32_t)__replacement);
+    else if (sizeof(_Tp) == sizeof(int64_t))
+      return __compare_and_swap_64((volatile int64_t*) __ptr,
+                                   (int64_t)__comparand,
+                                   (int64_t)__replacement);
     else
       _GLIBCXX_PARALLEL_ASSERT(false);
   }
Index: include/parallel/random_number.h
===================================================================
--- include/parallel/random_number.h	(revision 152234)
+++ include/parallel/random_number.h	(working copy)
@@ -34,6 +34,7 @@
 
 #include <parallel/types.h>
 #include <tr1/random>
+#include <limits>
 
 namespace __gnu_parallel
 {
@@ -42,29 +43,29 @@
   {
   private:
     std::tr1::mt19937   _M_mt;
-    uint64              _M_supremum;
-    uint64              _M_rand_sup;
+    uint64_t            _M_supremum;
+    uint64_t            _M_rand_sup;
     double              _M_supremum_reciprocal;
     double              _M_rand_sup_reciprocal;
 
     // Assumed to be twice as long as the usual random number.
-    uint64              __cache;  
+    uint64_t              __cache;
 
     // Bit results.
     int __bits_left;
     
-    static uint32
-    __scale_down(uint64 __x,
+    static uint32_t
+    __scale_down(uint64_t __x,
 #if _GLIBCXX_SCALE_DOWN_FPU
-               uint64 /*_M_supremum*/, double _M_supremum_reciprocal)
+               uint64_t /*_M_supremum*/, double _M_supremum_reciprocal)
 #else
-               uint64 _M_supremum, double /*_M_supremum_reciprocal*/)
+               uint64_t _M_supremum, double /*_M_supremum_reciprocal*/)
 #endif
         {
 #if _GLIBCXX_SCALE_DOWN_FPU
-          return uint32(__x * _M_supremum_reciprocal);
+          return uint32_t(__x * _M_supremum_reciprocal);
 #else
-          return static_cast<uint32>(__x % _M_supremum);
+          return static_cast<uint32_t>(__x % _M_supremum);
 #endif
         }
 
@@ -72,7 +73,7 @@
     /** @brief Default constructor. Seed with 0. */
     _RandomNumber()
     : _M_mt(0), _M_supremum(0x100000000ULL),
-      _M_rand_sup(1ULL << (sizeof(uint32) * 8)),
+      _M_rand_sup(1ULL << std::numeric_limits<uint32_t>::digits),
       _M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
       _M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
       __cache(0), __bits_left(0) { }
@@ -81,22 +82,22 @@
      *  @param __seed Random __seed.
      *  @param _M_supremum Generate integer random numbers in the
      *                  interval @__c [0,_M_supremum). */
-    _RandomNumber(uint32 __seed, uint64 _M_supremum = 0x100000000ULL)
+    _RandomNumber(uint32_t __seed, uint64_t _M_supremum = 0x100000000ULL)
     : _M_mt(__seed), _M_supremum(_M_supremum),
-      _M_rand_sup(1ULL << (sizeof(uint32) * 8)),
+      _M_rand_sup(1ULL << std::numeric_limits<uint32_t>::digits),
       _M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
       _M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
       __cache(0), __bits_left(0) { }
 
     /** @brief Generate unsigned random 32-bit integer. */
-    uint32
+    uint32_t
     operator()()
     { return __scale_down(_M_mt(), _M_supremum, _M_supremum_reciprocal); }
 
     /** @brief Generate unsigned random 32-bit integer in the
         interval @__c [0,local_supremum). */
-    uint32
-    operator()(uint64 local_supremum)
+    uint32_t
+    operator()(uint64_t local_supremum)
     {
       return __scale_down(_M_mt(), local_supremum,
                         double(local_supremum * _M_rand_sup_reciprocal));
@@ -112,7 +113,7 @@
       __bits_left -= __bits;
       if (__bits_left < 32)
         {
-          __cache |= ((uint64(_M_mt())) << __bits_left);
+          __cache |= ((uint64_t(_M_mt())) << __bits_left);
           __bits_left += 32;
         }
       return __res;


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]