Attachment 'libatomic.c'

Download

   1 /* Basic implementation of libatomic for GCC.
   2    This basic implementation currently assumes everything is aligned. 
   3 
   4    Copyright (C) 2010, 2011
   5    Free Software Foundation, Inc.
   6 
   7    This file is part of GCC.
   8 
   9    GCC is free software; you can redistribute it and/or modify it
  10    under the terms of the GNU General Public License as published by the
  11    Free Software Foundation; either version 3, or (at your option) any
  12    later version.
  13 
  14    GCC is distributed in the hope that it will be useful, but WITHOUT
  15    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  17    for more details.
  18 
  19    You should have received a copy of the GNU General Public License
  20    along with GCC; see the file COPYING3.  If not see
  21    <http://www.gnu.org/licenses/>.  */
  22 
  23 
  24 #include <stdbool.h>
  25 #include <string.h>
  26 #include <stdint.h>
  27 #include <stddef.h>
  28 
  29 /* These defines should be defined based on configuration of what the target
  30    supports.  Changing these #defines is all that is required to use this
  31    file.  If your target supports unsigned int type of the appropriate
  32    number of bytes, simply define it as 1.  Note that for I16 you may 
  33    also have to change the type from __int128_t to int128_t if approriate.  
  34 
  35    Also note that you can expect to see warning from compiler similar to :
  36 warning: conflicting types for built-in function ‘__atomic_compare_exchange_1’
  37    This is expected behaviour.  */
  38 
  39 
  40 
  41 #ifndef  __LIBATOMIC_SUPPORTS_I1
  42 #define __LIBATOMIC_SUPPORTS_I1		1
  43 #endif
  44 
  45 #ifndef  __LIBATOMIC_SUPPORTS_I2
  46 #define __LIBATOMIC_SUPPORTS_I2		1
  47 #endif
  48 
  49 #ifndef  __LIBATOMIC_SUPPORTS_I4
  50 #define __LIBATOMIC_SUPPORTS_I4		1
  51 #endif
  52 
  53 #ifndef  __LIBATOMIC_SUPPORTS_I8
  54 #define __LIBATOMIC_SUPPORTS_I8		1
  55 #endif
  56 
  57 #ifndef  __LIBATOMIC_SUPPORTS_I16
  58 #define __LIBATOMIC_SUPPORTS_I16	1
  59 #endif
  60 
  61 /* Define types for all supported sizes.  */
  62 
  63 #if __LIBATOMIC_SUPPORTS_I1	
  64 typedef uint8_t		I1;
  65 #endif
  66 #if __LIBATOMIC_SUPPORTS_I2	
  67 typedef uint16_t	I2;
  68 #endif
  69 #if __LIBATOMIC_SUPPORTS_I4	
  70 typedef uint32_t	I4;
  71 #endif
  72 #if __LIBATOMIC_SUPPORTS_I8	
  73 typedef uint64_t	I8;
  74 #endif
  75 #if __LIBATOMIC_SUPPORTS_I16	
  76 typedef __int128_t	I16;
  77 #endif
  78 
  79 /* For testing the locked implementation, define this to make all routines use
  80    locks and run the testsuites.  */
  81 
  82 #if __LIBATOMIC_ALWAYS_LOCKED
  83 #define __atomic_always_lock_free(S,P)	false
  84 #endif
  85 
  86 /* This defines the number of unqiue locks which are available for mapping
  87    an address to.  */
  88 #define __LIBATOMIC_N_LOCKS	(1 << 4)
  89 
  90 
  91 /* Return a pointer to a boolean test_and_set flag for ADDR.  */
  92 
  93 static bool *
  94 __libatomic_flag_for_address (void *addr)
  95 {
  96   static bool flag_table[__LIBATOMIC_N_LOCKS]
  97 		      = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  98   uintptr_t p = (uintptr_t)addr;
  99 
 100   p += (p >> 2) + (p << 4);
 101   p += (p >> 7) + (p << 5);
 102   p += (p >> 17) + (p << 13);
 103   if (sizeof(void *) > 4)
 104     p += (p >> 31);
 105   p &= (__LIBATOMIC_N_LOCKS - 1);
 106   return flag_table + p;
 107 }
 108 
 109 /* If the specified memory MODEL can act as a release fence, issue the
 110    appropriate barrier.  Specify it such that it is a compile time constant.  */
 111 
 112 static inline void
 113 maybe_release_fence (int model)
 114 {
 115   switch (model)
 116   {
 117     case __ATOMIC_RELEASE:
 118       __atomic_thread_fence (__ATOMIC_RELEASE);
 119       break;
 120     case __ATOMIC_ACQ_REL:
 121       __atomic_thread_fence (__ATOMIC_ACQ_REL);
 122       break;
 123     case __ATOMIC_SEQ_CST:
 124       __atomic_thread_fence (__ATOMIC_SEQ_CST);
 125       break;
 126     default:
 127       break;
 128   }
 129 }
 130 
 131 /* If the specified memory MODEL can act as an acquire fence, issue the
 132    appropriate barrier.  Specify it such that it is a compile time constant.  */
 133 
 134 static inline void
 135 maybe_acquire_fence (int model)
 136 {
 137   switch (model)
 138   {
 139     case __ATOMIC_ACQUIRE:
 140       __atomic_thread_fence (__ATOMIC_ACQUIRE);
 141       break;
 142     case __ATOMIC_ACQ_REL:
 143       __atomic_thread_fence (__ATOMIC_ACQ_REL);
 144       break;
 145     case __ATOMIC_SEQ_CST:
 146       __atomic_thread_fence (__ATOMIC_SEQ_CST);
 147       break;
 148     default:
 149       break;
 150   }
 151 }
 152 
 153 /* Acquire the spin lock for ADDR, and issue any barrier which might be
 154    required.  */
 155 
 156 static inline void
 157 get_lock (void *addr, int model)
 158 {
 159   bool *lock_ptr = __libatomic_flag_for_address (addr);
 160 
 161   maybe_release_fence (model);
 162   while (__atomic_test_and_set (lock_ptr, __ATOMIC_ACQUIRE) == 1)
 163     ;
 164 }
 165 
 166 /* Release the spin lock for ADDR, and issue any barrier which might be
 167    required.  */
 168 
 169 static inline void
 170 free_lock (void *addr, int model)
 171 {
 172   bool *lock_ptr = __libatomic_flag_for_address (addr);
 173 
 174   __atomic_clear (lock_ptr, __ATOMIC_RELEASE);
 175   maybe_acquire_fence (model);
 176 }
 177 
 178 
 179 /* Return whether a size is lock free or not.  PTR is currently unused since
 180    we're assuming alignment for the moment.  */
 181 
 182 bool
 183 __atomic_is_lock_free (size_t size, void *ptr __attribute__ ((unused)))
 184 {
 185   /* __atomic_always_lock_free requires a compile time constant to evalutate
 186      properly, so provide individual cases and simply fill in the constant.  */
 187   switch (size)
 188     {
 189       case 1:
 190 	return __atomic_always_lock_free (1, 0);
 191       case 2:
 192 	return __atomic_always_lock_free (2, 0);
 193       case 4:
 194 	return __atomic_always_lock_free (4, 0);
 195       case 8:
 196 	return __atomic_always_lock_free (8, 0);
 197       case 16:
 198 	return __atomic_always_lock_free (16, 0);
 199       default:
 200         break;
 201     }
 202   return false;
 203 }
 204 
 205 
 206 /* If SIZE is lockfree, issue a lockfree sequence for the load, otherwise
 207    break from the switch element.  */
 208 #define LOAD(SIZE)  						\
 209   if (__atomic_always_lock_free (SIZE, 0)) 			\
 210     { 								\
 211       I ## SIZE tmp = __atomic_load_ ## SIZE (mem, model);	\
 212       memcpy (ret, &tmp, SIZE); 				\
 213       return;							\
 214     }								\
 215   else								\
 216     break;
 217 
 218 
 219 /* Implement a generic atomic load for an object of SIZE, copying the value
 220    from MEM into RET using MODEL.  */
 221 
 222 void 
 223 __atomic_load (size_t size, void *mem, void *ret, int model)
 224 {
 225   switch (size)
 226   {	
 227 #if __LIBATOMIC_SUPPORTS_I1
 228     case 1:
 229       LOAD (1);
 230 #endif
 231 #if __LIBATOMIC_SUPPORTS_I2
 232     case 2:
 233       LOAD (2);
 234 #endif
 235 #if __LIBATOMIC_SUPPORTS_I4
 236     case 4:
 237       LOAD (4);
 238 #endif
 239 #if __LIBATOMIC_SUPPORTS_I8
 240     case 8:
 241       LOAD (8);
 242 #endif
 243 #if __LIBATOMIC_SUPPORTS_I16
 244     case 16:
 245       LOAD (16);
 246 #endif
 247     default:
 248       break;
 249   }
 250 
 251   /* If control gets here, a lock is needed.  */
 252   get_lock (mem, model);
 253   memcpy (ret, mem, size);
 254   free_lock (mem, model);
 255 }
 256 
 257 
 258 /* If SIZE is lockfree, issue a lockfree sequence for the store, otherwise
 259    break from the switch element.  */
 260 #define STORE(SIZE)  					\
 261   if (__atomic_always_lock_free (SIZE, 0)) 		\
 262     { 							\
 263       I ## SIZE tmp;					\
 264       memcpy (&tmp, val, SIZE); 			\
 265       __atomic_store_ ## SIZE (mem, tmp, model); 	\
 266       return;						\
 267     }							\
 268   else							\
 269     break;
 270 
 271 /* Perform an atomic store for an object of SIZE.  Store VAL into MEM using 
 272    MODEL.  */
 273 
 274 void 
 275 __atomic_store (size_t size, void *mem, void *val, int model)
 276 {
 277   switch (size)
 278   {
 279 #if __LIBATOMIC_SUPPORTS_I1
 280     case 1:
 281       STORE (1);
 282 #endif
 283 #if __LIBATOMIC_SUPPORTS_I2
 284     case 2:
 285       STORE (2);
 286 #endif
 287 #if __LIBATOMIC_SUPPORTS_I4
 288     case 4:
 289       STORE (4);
 290 #endif
 291 #if __LIBATOMIC_SUPPORTS_I8
 292     case 8:
 293       STORE (8);
 294 #endif
 295 #if __LIBATOMIC_SUPPORTS_I16
 296     case 16:
 297       STORE (16);
 298 #endif
 299     default:
 300       break;
 301   }
 302 
 303   /* If control gets here, a lock is needed.  */
 304   get_lock (mem, model);
 305   memcpy (mem, val, size);
 306   free_lock (mem, model);
 307 }
 308 
 309 
 310 /* If SIZE is lockfree, issue a lockfree sequence for the exchange, otherwise
 311    break from the switch element.  */
 312 #define EXCHANGE(SIZE)						\
 313   if (__atomic_always_lock_free (SIZE, 0))			\
 314     { 						     		\
 315       I ## SIZE tmp1, tmp2;					\
 316       memcpy (&tmp2, val, SIZE);				\
 317       tmp1 = __atomic_exchange_ ## SIZE (mem, tmp2, model);	\
 318       memcpy (ret, &tmp1, SIZE); 				\
 319       return;					     		\
 320     }								\
 321   else						     		\
 322     break;
 323 
 324 /* Perform an atomic exchange for an object of SIZE.  Store VAL into MEM using 
 325    MODEL, and return the previous value of MEM in RET.  */
 326 
 327 void 
 328 __atomic_exchange (size_t size, void *mem, void *val, void *ret, int model)
 329 {
 330   switch (size)
 331   {
 332 #if __LIBATOMIC_SUPPORTS_I1
 333     case 1:
 334       EXCHANGE (1);
 335 #endif
 336 #if __LIBATOMIC_SUPPORTS_I2
 337     case 2:
 338       EXCHANGE (2);
 339 #endif
 340 #if __LIBATOMIC_SUPPORTS_I4
 341     case 4:
 342       EXCHANGE (4);
 343 #endif
 344 #if __LIBATOMIC_SUPPORTS_I8
 345     case 8:
 346       EXCHANGE (8);
 347 #endif
 348 #if __LIBATOMIC_SUPPORTS_I16
 349     case 16:
 350       EXCHANGE (16);
 351 #endif
 352     default:
 353       break;
 354   }
 355 
 356   /* If control gets here, a lock is needed.  */
 357   get_lock (mem, model);
 358   memcpy (ret, mem, size);
 359   memcpy (mem, val, size);
 360   free_lock (mem, model);
 361 }
 362 
 363 
 364 /* If SIZE is lockfree, issue a lockfree sequence for the compare_exchange,
 365    otherwise break from the switch element.  */
 366 #define COMPARE_EXCHANGE(SIZE)						\
 367   if (__atomic_always_lock_free (SIZE, 0))				\
 368     { 						     			\
 369       bool ret;								\
 370       I ## SIZE tmp;							\
 371       memcpy (&tmp, desired, SIZE); 					\
 372       ret = __atomic_compare_exchange_ ## SIZE (mem, expect, tmp, 0,	\
 373 						success, failure);	\
 374       return ret;				     			\
 375     }									\
 376   else						     			\
 377     break;
 378 
 379 /* Perform an atomic compare_exchange for an object of SIZE.  If MEM contains
 380    the value in EXPECT, copy DESIRED into MEM utilizing memory model SUCESS and
 381    return true.  Otherwise copy the contents of MEM into EXPECT using memory
 382    model FAILURE and return false.  */
 383 
 384 bool
 385 __atomic_compare_exchange (size_t size, void *mem, void *expect, void *desired, int success, int failure)
 386 {
 387   switch (size)
 388   {
 389 #if __LIBATOMIC_SUPPORTS_I1
 390     case 1:
 391       COMPARE_EXCHANGE (1);
 392 #endif
 393 #if __LIBATOMIC_SUPPORTS_I2
 394     case 2:
 395       COMPARE_EXCHANGE (2);
 396 #endif
 397 #if __LIBATOMIC_SUPPORTS_I4
 398     case 4:
 399       COMPARE_EXCHANGE (4);
 400 #endif
 401 #if __LIBATOMIC_SUPPORTS_I8
 402     case 8:
 403       COMPARE_EXCHANGE (8);
 404 #endif
 405 #if __LIBATOMIC_SUPPORTS_I16
 406     case 16:
 407       COMPARE_EXCHANGE (16);
 408 #endif
 409     default:
 410       break;
 411   }
 412 
 413   /* If control gets here, a lock is needed.  */
 414   get_lock (mem, success);
 415   if (memcmp (mem, expect, size) == 0)
 416     {
 417       memcpy (mem, desired, size);
 418       free_lock (mem, success);
 419       return true;
 420     }
 421   memcpy (expect, mem, size);
 422   free_lock (mem, failure);
 423   return false;
 424 }
 425 
 426 
 427 /* Issue a SIZE specific __atomic_load_N function.  */
 428 #define ATOMIC_LOAD(SIZE) I ## SIZE			\
 429 __atomic_load_ ## SIZE   (I ## SIZE *mem, int model)	\
 430 {							\
 431   I ## SIZE ret;					\
 432   if (__atomic_always_lock_free (sizeof (ret), 0))	\
 433     return __atomic_load_n (mem, model);		\
 434   get_lock (mem, model);				\
 435   ret = *mem;						\
 436   free_lock (mem, model);				\
 437   return ret;						\
 438 }
 439 
 440 
 441 /* Issue a SIZE specific __atomic_store_N function.  */
 442 #define ATOMIC_STORE(SIZE) void						\
 443 __atomic_store_ ## SIZE (I ## SIZE *mem, I ## SIZE val, int model)	\
 444 {									\
 445   if (__atomic_always_lock_free (sizeof (val), 0))			\
 446     __atomic_store_n (mem, val, model);					\
 447   else									\
 448     {									\
 449       get_lock (mem, model);						\
 450       *mem = val;							\
 451       free_lock (mem, model);						\
 452     }									\
 453 }
 454 
 455 /* Issue a SIZE specific __atomic_exchange_N function.  */
 456 #define ATOMIC_EXCHANGE(SIZE) I ## SIZE					\
 457 __atomic_exchange_ ## SIZE   (I ## SIZE *mem, I ## SIZE val, int model)	\
 458 {									\
 459   I ## SIZE ret;							\
 460   if (__atomic_always_lock_free (sizeof (ret), 0))			\
 461     return __atomic_exchange_n (mem, val, model);			\
 462   get_lock (mem, model);						\
 463   ret = *mem;								\
 464   *mem = val;								\
 465   free_lock (mem, model);						\
 466   return ret;								\
 467 }
 468 
 469 /* Issue a SIZE specific __atomic_compare_exchange_N function.
 470    Note the compiler complains when compiling these since these functions
 471    do not have the boolean weak parameter, so the params dont match the
 472    builtin exactly.  */
 473 
 474 #define ATOMIC_COMPARE_EXCHANGE(SIZE) bool			\
 475 __atomic_compare_exchange_ ## SIZE   (I ## SIZE *mem, I ## SIZE *expect, I ## SIZE desired, int success, int failure)				\
 476 {								\
 477   bool ret;							\
 478   if (__atomic_always_lock_free (sizeof (desired), 0))		\
 479     return __atomic_compare_exchange_n (mem, expect, desired, 0,\
 480 				        success, failure);	\
 481   get_lock (mem, success);					\
 482   if (*mem == *expect)						\
 483     {								\
 484       *mem = desired;						\
 485       free_lock (mem, success);					\
 486       return true;						\
 487     }								\
 488   *expect = *mem;						\
 489   free_lock (mem, failure);					\
 490   return false;							\
 491 }
 492 
 493 
 494 #define ATOMIC_FETCH(SIZE,OP,SYM) I ## SIZE				\
 495 __atomic_fetch_## OP ## SIZE (I ## SIZE *mem, I ## SIZE val, int model) \
 496 {									\
 497   I ## SIZE ret;							\
 498   if (__atomic_always_lock_free (sizeof (ret), 0))			\
 499     return __atomic_fetch_ ## OP ## SIZE (mem, val, model);		\
 500   get_lock (mem, model);						\
 501   ret = *mem;								\
 502   *mem SYM ## =  val;							\
 503   free_lock (mem, model);						\
 504   return ret;								\
 505 }
 506 
 507 #define ATOMIC_FETCH_NAND(SIZE) I ## SIZE				\
 508 __atomic_fetch_nand_ ## SIZE (I ## SIZE *mem, I ## SIZE val, int model)	\
 509 {									\
 510   I ## SIZE ret;							\
 511   if (__atomic_always_lock_free (sizeof (ret), 0))			\
 512     return __atomic_fetch_nand_ ## SIZE (mem, val, model);		\
 513   get_lock (mem, model);						\
 514   ret = *mem;								\
 515   *mem =  ~(*mem & val);						\
 516   free_lock (mem, model);						\
 517   return ret;								\
 518 }
 519 
 520 #if __LIBATOMIC_SUPPORTS_I1	
 521 ATOMIC_LOAD (1)
 522 ATOMIC_STORE (1)
 523 ATOMIC_EXCHANGE (1)
 524 ATOMIC_COMPARE_EXCHANGE (1)
 525 ATOMIC_FETCH (1, add_, +)
 526 ATOMIC_FETCH (1, sub_, -)
 527 ATOMIC_FETCH (1, and_, &)
 528 ATOMIC_FETCH (1, or_, |)
 529 ATOMIC_FETCH (1, xor_, ^)
 530 ATOMIC_FETCH_NAND (1)
 531 #endif
 532 
 533 #if __LIBATOMIC_SUPPORTS_I2	
 534 ATOMIC_LOAD (2)
 535 ATOMIC_STORE (2)
 536 ATOMIC_EXCHANGE (2)
 537 ATOMIC_COMPARE_EXCHANGE (2)
 538 ATOMIC_FETCH (2, add_, +)
 539 ATOMIC_FETCH (2, sub_, -)
 540 ATOMIC_FETCH (2, and_, &)
 541 ATOMIC_FETCH (2, or_, |)
 542 ATOMIC_FETCH (2, xor_, ^)
 543 ATOMIC_FETCH_NAND (2)
 544 #endif
 545 
 546 
 547 #if __LIBATOMIC_SUPPORTS_I4	
 548 ATOMIC_LOAD (4)
 549 ATOMIC_STORE (4)
 550 ATOMIC_EXCHANGE (4)
 551 ATOMIC_COMPARE_EXCHANGE (4)
 552 ATOMIC_FETCH (4, add_, +)
 553 ATOMIC_FETCH (4, sub_, -)
 554 ATOMIC_FETCH (4, and_, &)
 555 ATOMIC_FETCH (4, or_, |)
 556 ATOMIC_FETCH (4, xor_, ^)
 557 ATOMIC_FETCH_NAND (4)
 558 #endif
 559 
 560 
 561 #if __LIBATOMIC_SUPPORTS_I8	
 562 ATOMIC_LOAD (8)
 563 ATOMIC_STORE (8)
 564 ATOMIC_EXCHANGE (8)
 565 ATOMIC_COMPARE_EXCHANGE (8)
 566 ATOMIC_FETCH (8, add_, +)
 567 ATOMIC_FETCH (8, sub_, -)
 568 ATOMIC_FETCH (8, and_, &)
 569 ATOMIC_FETCH (8, or_, |)
 570 ATOMIC_FETCH (8, xor_, ^)
 571 ATOMIC_FETCH_NAND (8)
 572 #endif
 573 
 574 
 575 #if __LIBATOMIC_SUPPORTS_I16	
 576 ATOMIC_LOAD (16)
 577 ATOMIC_STORE (16)
 578 ATOMIC_EXCHANGE (16)
 579 ATOMIC_COMPARE_EXCHANGE (16)
 580 ATOMIC_FETCH (16, add_, +)
 581 ATOMIC_FETCH (16, sub_, -)
 582 ATOMIC_FETCH (16, and_, &)
 583 ATOMIC_FETCH (16, or_, |)
 584 ATOMIC_FETCH (16, xor_, ^)
 585 ATOMIC_FETCH_NAND (16)
 586 #endif
 587 

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.

You are not allowed to attach a file to this page.