[patch] 5/n: trans-mem: runtime
Aldy Hernandez
aldyh@redhat.com
Thu Nov 3 17:55:00 GMT 2011
Index: libitm/method-gl.cc
===================================================================
--- libitm/method-gl.cc (.../trunk) (revision 0)
+++ libitm/method-gl.cc (.../branches/transactional-memory) (revision
180773)
@@ -0,0 +1,303 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Torvald Riegel <triegel@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+
+using namespace GTM;
+
+namespace {
+
+// This group consists of all TM methods that synchronize via just a single
+// global lock (or ownership record).
+struct gl_mg : public method_group
+{
+ static const gtm_word LOCK_BIT = (~(gtm_word)0 >> 1) + 1;
+ // We can't use the full bitrange because ~0 in
gtm_thread::shared_state has
+ // special meaning.
+ static const gtm_word VERSION_MAX = (~(gtm_word)0 >> 1) - 1;
+ static bool is_locked(gtm_word l) { return l & LOCK_BIT; }
+ static gtm_word set_locked(gtm_word l) { return l | LOCK_BIT; }
+ static gtm_word clear_locked(gtm_word l) { return l & ~LOCK_BIT; }
+
+ // The global ownership record.
+ gtm_word orec;
+ virtual void init()
+ {
+ orec = 0;
+ }
+ virtual void fini() { }
+};
+
+static gl_mg o_gl_mg;
+
+
+// The global lock, write-through TM method.
+// Acquires the orec eagerly before the first write, and then writes
through.
+// Reads abort if the global orec's version number changed or if it is
locked.
+// Currently, writes require undo-logging to prevent deadlock between the
+// serial lock and the global orec (writer txn acquires orec, reader txn
+// upgrades to serial and waits for all other txns, writer tries to
upgrade to
+// serial too but cannot, writer cannot abort either, deadlock). We could
+// avoid this if the serial lock would allow us to prevent other
threads from
+// going to serial mode, but this probably is too much additional
complexity
+// just to optimize this TM method.
+// gtm_thread::shared_state is used to store a transaction's current
+// snapshot time (or commit time). The serial lock uses ~0 for inactive
+// transactions and 0 for active ones. Thus, we always have a meaningful
+// timestamp in shared_state that can be used to implement quiescence-based
+// privatization safety. This even holds if a writing transaction has the
+// lock bit set in its shared_state because this is fine for both the
serial
+// lock (the value will be smaller than ~0) and privatization safety (we
+// validate that no other update transaction comitted before we
acquired the
+// orec, so we have the most recent timestamp and no other transaction can
+// commit until we have committed).
+// However, we therefore cannot use this method for a serial transaction
+// (because shared_state needs to remain at ~0) and we have to be careful
+// when switching to serial mode (see the special handling in
trycommit() and
+// rollback()).
+// ??? This sharing adds some complexity wrt. serial mode. Just use a
separate
+// state variable?
+class gl_wt_dispatch : public abi_dispatch
+{
+protected:
+ static void pre_write(const void *addr, size_t len)
+ {
+ gtm_thread *tx = gtm_thr();
+ if (unlikely(!gl_mg::is_locked(tx->shared_state)))
+ {
+ // Check for and handle version number overflow.
+ if (unlikely(tx->shared_state >= gl_mg::VERSION_MAX))
+ tx->restart(RESTART_INIT_METHOD_GROUP);
+
+ // CAS global orec from our snapshot time to the locked state.
+ // This validates that we have a consistent snapshot, which is also
+ // for making privatization safety work (see the class' comments).
+ gtm_word now = o_gl_mg.orec;
+ if (now != tx->shared_state)
+ tx->restart(RESTART_VALIDATE_WRITE);
+ if (__sync_val_compare_and_swap(&o_gl_mg.orec, now,
+ gl_mg::set_locked(now)) != now)
+ tx->restart(RESTART_LOCKED_WRITE);
+
+ // Set shared_state to new value. The CAS is a full barrier, so the
+ // acquisition of the global orec is visible before this store
here,
+ // and the store will not be visible before earlier data loads,
which
+ // is required to correctly ensure privatization safety (see
+ // begin_and_restart() and release_orec() for further comments).
+ tx->shared_state = gl_mg::set_locked(now);
+ }
+
+ // TODO Ensure that this gets inlined: Use internal log interface
and LTO.
+ GTM_LB(addr, len);
+ }
+
+ static void validate()
+ {
+ // Check that snapshot is consistent. The barrier ensures that this
+ // happens after previous data loads.
+ atomic_read_barrier();
+ gtm_thread *tx = gtm_thr();
+ gtm_word l = o_gl_mg.orec;
+ if (l != tx->shared_state)
+ tx->restart(RESTART_VALIDATE_READ);
+ }
+
+ template <typename V> static V load(const V* addr, ls_modifier mod)
+ {
+ // Read-for-write should be unlikely, but we need to handle it or will
+ // break later WaW optimizations.
+ if (unlikely(mod == RfW))
+ {
+ pre_write(addr, sizeof(V));
+ return *addr;
+ }
+ V v = *addr;
+ if (likely(mod != RaW))
+ validate();
+ return v;
+ }
+
+ template <typename V> static void store(V* addr, const V value,
+ ls_modifier mod)
+ {
+ if (unlikely(mod != WaW))
+ pre_write(addr, sizeof(V));
+ *addr = value;
+ }
+
+public:
+ static void memtransfer_static(void *dst, const void* src, size_t size,
+ bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod)
+ {
+ if ((dst_mod != WaW && src_mod != RaW)
+ && (dst_mod != NONTXNAL || src_mod == RfW))
+ pre_write(dst, size);
+
+ if (!may_overlap)
+ ::memcpy(dst, src, size);
+ else
+ ::memmove(dst, src, size);
+
+ if (src_mod != RfW && src_mod != RaW && src_mod != NONTXNAL
+ && dst_mod != WaW)
+ validate();
+ }
+
+ static void memset_static(void *dst, int c, size_t size, ls_modifier mod)
+ {
+ if (mod != WaW)
+ pre_write(dst, size);
+ ::memset(dst, c, size);
+ }
+
+ virtual gtm_restart_reason begin_or_restart()
+ {
+ // We don't need to do anything for nested transactions.
+ gtm_thread *tx = gtm_thr();
+ if (tx->parent_txns.size() > 0)
+ return NO_RESTART;
+
+ // Spin until global orec is not locked.
+ // TODO This is not necessary if there are no pure loads (check txn
props).
+ gtm_word v;
+ unsigned i = 0;
+ while (gl_mg::is_locked(v = o_gl_mg.orec))
+ {
+ // TODO need method-specific max spin count
+ if (++i > gtm_spin_count_var) return RESTART_VALIDATE_READ;
+ cpu_relax();
+ }
+ // This barrier ensures that we have read the global orec before later
+ // data loads.
+ atomic_read_barrier();
+
+ // Everything is okay, we have a snapshot time.
+ // We don't need to enforce any ordering for the following store. There
+ // are no earlier data loads in this transaction, so the store cannot
+ // become visible before those (which could lead to the violation of
+ // privatization safety). The store can become visible after later
loads
+ // but this does not matter because the previous value will have been
+ // smaller or equal (the serial lock will set shared_state to zero when
+ // marking the transaction as active, and restarts enforce immediate
+ // visibility of a smaller or equal value with a barrier (see
+ // release_orec()).
+ tx->shared_state = v;
+ return NO_RESTART;
+ }
+
+ virtual bool trycommit(gtm_word& priv_time)
+ {
+ gtm_thread* tx = gtm_thr();
+ gtm_word v = tx->shared_state;
+
+ // Special case: If shared_state is ~0, then we have acquired the
+ // serial lock (tx->state is not updated yet). In this case, the
previous
+ // value isn't available anymore, so grab it from the global lock,
which
+ // must have a meaningful value because no other transactions are
active
+ // anymore. In particular, if it is locked, then we are an update
+ // transaction, which is all we care about for commit.
+ if (v == ~(typeof v)0)
+ v = o_gl_mg.orec;
+
+ // Release the orec but do not reset shared_state, which will be
modified
+ // by the serial lock right after our commit anyway. Also, resetting
+ // shared state here would interfere with the serial lock's use of this
+ // location.
+ if (gl_mg::is_locked(v))
+ {
+ // Release the global orec, increasing its version number /
timestamp.
+ // TODO replace with C++0x-style atomics (a release in this case)
+ atomic_write_barrier();
+ v = gl_mg::clear_locked(v) + 1;
+ o_gl_mg.orec = v;
+
+ // Need to ensure privatization safety. Every other transaction
must
+ // have a snapshot time that is at least as high as our commit time
+ // (i.e., our commit must be visible to them).
+ priv_time = v;
+ }
+ return true;
+ }
+
+ virtual void rollback(gtm_transaction_cp *cp)
+ {
+ // We don't do anything for rollbacks of nested transactions.
+ if (cp != 0)
+ return;
+
+ gtm_thread *tx = gtm_thr();
+ gtm_word v = tx->shared_state;
+ // Special case: If shared_state is ~0, then we have acquired the
+ // serial lock (tx->state is not updated yet). In this case, the
previous
+ // value isn't available anymore, so grab it from the global lock,
which
+ // must have a meaningful value because no other transactions are
active
+ // anymore. In particular, if it is locked, then we are an update
+ // transaction, which is all we care about for rollback.
+ if (v == ~(typeof v)0)
+ v = o_gl_mg.orec;
+
+ // Release lock and increment version number to prevent dirty reads.
+ // Also reset shared state here, so that begin_or_restart() can
expect a
+ // value that is correct wrt. privatization safety.
+ if (gl_mg::is_locked(v))
+ {
+ // Release the global orec, increasing its version number /
timestamp.
+ // TODO replace with C++0x-style atomics (a release in this case)
+ atomic_write_barrier();
+ v = gl_mg::clear_locked(v) + 1;
+ o_gl_mg.orec = v;
+
+ // Also reset the timestamp published via shared_state.
+ // Special case: Only do this if we are not a serial transaction
+ // because otherwise, we would interfere with the serial lock.
+ if (tx->shared_state != ~(typeof tx->shared_state)0)
+ tx->shared_state = v;
+
+ // We need a store-load barrier after this store to prevent it
+ // from becoming visible after later data loads because the
+ // previous value of shared_state has been higher than the actual
+ // snapshot time (the lock bit had been set), which could break
+ // privatization safety. We do not need a barrier before this
+ // store (see pre_write() for an explanation).
+ __sync_synchronize();
+ }
+
+ }
+
+ CREATE_DISPATCH_METHODS(virtual, )
+ CREATE_DISPATCH_METHODS_MEM()
+
+ gl_wt_dispatch() : abi_dispatch(false, true, false, false, &o_gl_mg)
+ { }
+};
+
+} // anon namespace
+
+static const gl_wt_dispatch o_gl_wt_dispatch;
+
+abi_dispatch *
+GTM::dispatch_gl_wt ()
+{
+ return const_cast<gl_wt_dispatch *>(&o_gl_wt_dispatch);
+}
Index: libitm/config/linux/futex.cc
===================================================================
--- libitm/config/linux/futex.cc (.../trunk) (revision 0)
+++ libitm/config/linux/futex.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,82 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Provide access to the futex system call. */
+
+#include "libitm_i.h"
+#include "futex.h"
+#include <errno.h>
+
+namespace GTM HIDDEN {
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_PRIVATE_FLAG 128L
+
+
+static long int gtm_futex_wait = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
+static long int gtm_futex_wake = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
+
+
+void
+futex_wait (int *addr, int val)
+{
+ long res;
+
+ res = sys_futex0 (addr, gtm_futex_wait, val);
+ if (__builtin_expect (res == -ENOSYS, 0))
+ {
+ gtm_futex_wait = FUTEX_WAIT;
+ gtm_futex_wake = FUTEX_WAKE;
+ res = sys_futex0 (addr, FUTEX_WAIT, val);
+ }
+ if (__builtin_expect (res < 0, 0))
+ {
+ if (res == -EWOULDBLOCK || res == -ETIMEDOUT)
+ ;
+ else if (res == -EFAULT)
+ GTM_fatal ("futex failed (EFAULT %p)", addr);
+ else
+ GTM_fatal ("futex failed (%s)", strerror(-res));
+ }
+}
+
+
+long
+futex_wake (int *addr, int count)
+{
+ long res = sys_futex0 (addr, gtm_futex_wake, count);
+ if (__builtin_expect (res == -ENOSYS, 0))
+ {
+ gtm_futex_wait = FUTEX_WAIT;
+ gtm_futex_wake = FUTEX_WAKE;
+ res = sys_futex0 (addr, FUTEX_WAKE, count);
+ }
+ if (__builtin_expect (res < 0, 0))
+ GTM_fatal ("futex failed (%s)", strerror(-res));
+ else
+ return res;
+}
+
+} // namespace GTM
Index: libitm/config/linux/futex.h
===================================================================
--- libitm/config/linux/futex.h (.../trunk) (revision 0)
+++ libitm/config/linux/futex.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,39 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Provide access to the futex system call. */
+
+#ifndef GTM_FUTEX_H
+#define GTM_FUTEX_H 1
+
+namespace GTM HIDDEN {
+
+#include "futex_bits.h"
+
+extern void futex_wait (int *addr, int val);
+extern long futex_wake (int *addr, int count);
+
+}
+
+#endif /* GTM_FUTEX_H */
Index: libitm/config/linux/alpha/futex_bits.h
===================================================================
--- libitm/config/linux/alpha/futex_bits.h (.../trunk) (revision 0)
+++ libitm/config/linux/alpha/futex_bits.h
(.../branches/transactional-memory) (revision 180773)
@@ -0,0 +1,56 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Provide target-specific access to the futex system call. */
+
+#ifndef SYS_futex
+#define SYS_futex 394
+#endif
+
+static inline long
+sys_futex0 (int *addr, long op, long val)
+{
+ register long sc_0 __asm__("$0");
+ register long sc_16 __asm__("$16");
+ register long sc_17 __asm__("$17");
+ register long sc_18 __asm__("$18");
+ register long sc_19 __asm__("$19");
+ long res;
+
+ sc_0 = SYS_futex;
+ sc_16 = (long) addr;
+ sc_17 = op;
+ sc_18 = val;
+ sc_19 = 0;
+ __asm volatile ("callsys"
+ : "=r" (sc_0), "=r"(sc_19)
+ : "0"(sc_0), "r" (sc_16), "r"(sc_17), "r"(sc_18), "1"(sc_19)
+ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",
+ "$22", "$23", "$24", "$25", "$27", "$28", "memory");
+
+ res = sc_0;
+ if (__builtin_expect (sc_19, 0))
+ res = -res;
+ return res;
+}
Index: libitm/config/linux/rwlock.cc
===================================================================
--- libitm/config/linux/rwlock.cc (.../trunk) (revision 0)
+++ libitm/config/linux/rwlock.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,235 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Torvald Riegel <triegel@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+#include "futex.h"
+#include <limits.h>
+
+namespace GTM HIDDEN {
+
+// Acquire a RW lock for reading.
+
+void
+gtm_rwlock::read_lock (gtm_thread *tx)
+{
+ for (;;)
+ {
+ // Fast path: first announce our intent to read, then check for
+ // conflicting intents to write. The barrier makes sure that this
+ // happens in exactly this order.
+ tx->shared_state = 0;
+ __sync_synchronize();
+ if (likely(writers == 0))
+ return;
+
+ // There seems to be an active, waiting, or confirmed writer, so
enter
+ // the futex-based slow path.
+
+ // Before waiting, we clear our read intent check whether there
are any
+ // writers that might potentially wait for readers. If so, wake them.
+ // We need the barrier here for the same reason that we need it in
+ // read_unlock().
+ // TODO Potentially too many wake-ups. See comments in read_unlock().
+ tx->shared_state = ~(typeof tx->shared_state)0;
+ __sync_synchronize();
+ if (writer_readers > 0)
+ {
+ writer_readers = 0;
+ futex_wake(&writer_readers, 1);
+ }
+
+ // Signal that there are waiting readers and wait until there is no
+ // writer anymore.
+ // TODO Spin here on writers for a while. Consider whether we woke
+ // any writers before?
+ while (writers)
+ {
+ // An active writer. Wait until it has finished. To avoid lost
+ // wake-ups, we need to use Dekker-like synchronization.
+ // Note that we cannot reset readers to zero when we see that
there
+ // are no writers anymore after the barrier because this pending
+ // store could then lead to lost wake-ups at other readers.
+ readers = 1;
+ __sync_synchronize();
+ if (writers)
+ futex_wait(&readers, 1);
+ }
+
+ // And we try again to acquire a read lock.
+ }
+}
+
+
+// Acquire a RW lock for writing. Generic version that also works for
+// upgrades.
+// Note that an upgrade might fail (and thus waste previous work done
during
+// this transaction) if there is another thread that tried to go into
serial
+// mode earlier (i.e., upgrades do not have higher priority than pure
writers).
+// However, this seems rare enough to not consider it further as we
need both
+// a non-upgrade writer and a writer to happen to switch to serial mode
+// concurrently. If we'd want to handle this, a writer waiting for readers
+// would have to coordinate with later arriving upgrades and hand over the
+// lock to them, including the the reader-waiting state. We can try to
support
+// this if this will actually happen often enough in real workloads.
+
+bool
+gtm_rwlock::write_lock_generic (gtm_thread *tx)
+{
+ // Try to acquire the write lock.
+ unsigned int w;
+ if (unlikely((w = __sync_val_compare_and_swap(&writers, 0, 1)) != 0))
+ {
+ // If this is an upgrade, we must not wait for other writers or
+ // upgrades.
+ if (tx != 0)
+ return false;
+
+ // There is already a writer. If there are no other waiting writers,
+ // switch to contended mode.
+ // Note that this is actually an atomic exchange, not a TAS. Also,
+ // it's only guaranteed to have acquire semantics, whereas we need a
+ // full barrier to make the Dekker-style synchronization work.
However,
+ // we rely on the xchg being a full barrier on the architectures
that we
+ // consider here.
+ // ??? Use C++0x atomics as soon as they are available.
+ if (w != 2)
+ w = __sync_lock_test_and_set(&writers, 2);
+ while (w != 0)
+ {
+ futex_wait(&writers, 2);
+ w = __sync_lock_test_and_set(&writers, 2);
+ }
+ }
+
+ // We have acquired the writer side of the R/W lock. Now wait for any
+ // readers that might still be active.
+ // We don't need an extra barrier here because the CAS and the xchg
+ // operations have full barrier semantics already.
+
+ // If this is an upgrade, we are not a reader anymore. This is only
safe to
+ // do after we have acquired the writer lock.
+ // TODO In the worst case, this requires one wait/wake pair for each
+ // active reader. Reduce this!
+ if (tx != 0)
+ tx->shared_state = ~(typeof tx->shared_state)0;
+
+ for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
+ it = it->next_thread)
+ {
+ // Use a loop here to check reader flags again after waiting.
+ while (it->shared_state != ~(typeof it->shared_state)0)
+ {
+ // An active reader. Wait until it has finished. To avoid lost
+ // wake-ups, we need to use Dekker-like synchronization.
+ // Note that we can reset writer_readers to zero when we see
after
+ // the barrier that the reader has finished in the meantime;
+ // however, this is only possible because we are the only writer.
+ // TODO Spin for a while on this reader flag.
+ writer_readers = 1;
+ __sync_synchronize();
+ if (it->shared_state != ~(typeof it->shared_state)0)
+ futex_wait(&writer_readers, 1);
+ else
+ writer_readers = 0;
+ }
+ }
+
+ return true;
+}
+
+// Acquire a RW lock for writing.
+
+void
+gtm_rwlock::write_lock ()
+{
+ write_lock_generic (0);
+}
+
+
+// Upgrade a RW lock that has been locked for reading to a writing lock.
+// Do this without possibility of another writer incoming. Return false
+// if this attempt fails (i.e. another thread also upgraded).
+
+bool
+gtm_rwlock::write_upgrade (gtm_thread *tx)
+{
+ return write_lock_generic (tx);
+}
+
+
+// Release a RW lock from reading.
+
+void
+gtm_rwlock::read_unlock (gtm_thread *tx)
+{
+ tx->shared_state = ~(typeof tx->shared_state)0;
+
+ // If there is a writer waiting for readers, wake it up. We need the
barrier
+ // to avoid lost wake-ups.
+ // ??? We might not be the last active reader, so the wake-up might
happen
+ // too early. How do we avoid this without slowing down readers too much?
+ // Each reader could scan the list of txns for other active readers but
+ // this can result in many cache misses. Use combining instead?
+ // TODO Sends out one wake-up for each reader in the worst case.
+ __sync_synchronize();
+ if (unlikely(writer_readers > 0))
+ {
+ writer_readers = 0;
+ futex_wake(&writer_readers, 1);
+ }
+}
+
+
+// Release a RW lock from writing.
+
+void
+gtm_rwlock::write_unlock ()
+{
+ // This is supposed to be a full barrier.
+ if (__sync_fetch_and_sub(&writers, 1) == 2)
+ {
+ // There might be waiting writers, so wake them.
+ writers = 0;
+ if (futex_wake(&writers, 1) == 0)
+ {
+ // If we did not wake any waiting writers, we might indeed be the
+ // last writer (this can happen because write_lock_generic()
+ // exchanges 0 or 1 to 2 and thus might go to contended mode
even if
+ // no other thread holds the write lock currently). Therefore, we
+ // have to wake up readers here as well.
+ futex_wake(&readers, INT_MAX);
+ }
+ return;
+ }
+ // No waiting writers, so wake up all waiting readers.
+ // Because the fetch_and_sub is a full barrier already, we don't need
+ // another barrier here (as in read_unlock()).
+ if (readers > 0)
+ {
+ readers = 0;
+ futex_wake(&readers, INT_MAX);
+ }
+}
+
+} // namespace GTM
Index: libitm/config/linux/x86/futex_bits.h
===================================================================
--- libitm/config/linux/x86/futex_bits.h (.../trunk) (revision 0)
+++ libitm/config/linux/x86/futex_bits.h
(.../branches/transactional-memory) (revision 180773)
@@ -0,0 +1,82 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __x86_64__
+# ifndef SYS_futex
+# define SYS_futex 202
+# endif
+
+static inline long
+sys_futex0 (int *addr, long op, long val)
+{
+ register long r10 __asm__("%r10") = 0;
+ long res;
+
+ __asm volatile ("syscall"
+ : "=a" (res)
+ : "0" (SYS_futex), "D" (addr), "S" (op), "d" (val), "r" (r10)
+ : "r11", "rcx", "memory");
+
+ return res;
+}
+
+#else
+# ifndef SYS_futex
+# define SYS_futex 240
+# endif
+
+# ifdef __PIC__
+
+static inline long
+sys_futex0 (int *addr, int op, int val)
+{
+ long res;
+
+ __asm volatile ("xchgl\t%%ebx, %2\n\t"
+ "int\t$0x80\n\t"
+ "xchgl\t%%ebx, %2"
+ : "=a" (res)
+ : "0"(SYS_futex), "r" (addr), "c"(op),
+ "d"(val), "S"(0)
+ : "memory");
+ return res;
+}
+
+# else
+
+static inline long
+sys_futex0 (int *addr, int op, int val)
+{
+ long res;
+
+ __asm volatile ("int $0x80"
+ : "=a" (res)
+ : "0"(SYS_futex), "b" (addr), "c"(op),
+ "d"(val), "S"(0)
+ : "memory");
+ return res;
+}
+
+# endif /* __PIC__ */
+#endif /* __x86_64__ */
Index: libitm/config/linux/x86/tls.h
===================================================================
--- libitm/config/linux/x86/tls.h (.../trunk) (revision 0)
+++ libitm/config/linux/x86/tls.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,105 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef LIBITM_X86_TLS_H
+#define LIBITM_X86_TLS_H 1
+
+#if defined(__GLIBC_PREREQ) && __GLIBC_PREREQ(2, 10)
+/* Use slots in the TCB head rather than __thread lookups.
+ GLIBC has reserved words 10 through 13 for TM. */
+#define HAVE_ARCH_GTM_THREAD 1
+#define HAVE_ARCH_GTM_THREAD_DISP 1
+#endif
+
+#include "config/generic/tls.h"
+
+#if defined(__GLIBC_PREREQ) && __GLIBC_PREREQ(2, 10)
+namespace GTM HIDDEN {
+
+#ifdef __x86_64__
+#ifdef __LP64__
+# define SEG_READ(OFS) "movq\t%%fs:(" #OFS "*8),%0"
+# define SEG_WRITE(OFS) "movq\t%0,%%fs:(" #OFS "*8)"
+# define SEG_DECODE_READ(OFS) SEG_READ(OFS) "\n\t" \
+ "rorq\t$17,%0\n\t" \
+ "xorq\t%%fs:48,%0"
+# define SEG_ENCODE_WRITE(OFS) "xorq\t%%fs:48,%0\n\t" \
+ "rolq\t$17,%0\n\t" \
+ SEG_WRITE(OFS)
+#else
+// For X32.
+# define SEG_READ(OFS) "movl\t%%fs:(" #OFS "*4),%0"
+# define SEG_WRITE(OFS) "movl\t%0,%%fs:(" #OFS "*4)"
+# define SEG_DECODE_READ(OFS) SEG_READ(OFS) "\n\t" \
+ "rorl\t$9,%0\n\t" \
+ "xorl\t%%fs:24,%0"
+# define SEG_ENCODE_WRITE(OFS) "xorl\t%%fs:24,%0\n\t" \
+ "roll\t$9,%0\n\t" \
+ SEG_WRITE(OFS)
+#endif
+#else
+# define SEG_READ(OFS) "movl\t%%gs:(" #OFS "*4),%0"
+# define SEG_WRITE(OFS) "movl\t%0,%%gs:(" #OFS "*4)"
+# define SEG_DECODE_READ(OFS) SEG_READ(OFS) "\n\t" \
+ "rorl\t$9,%0\n\t" \
+ "xorl\t%%gs:24,%0"
+# define SEG_ENCODE_WRITE(OFS) "xorl\t%%gs:24,%0\n\t" \
+ "roll\t$9,%0\n\t" \
+ SEG_WRITE(OFS)
+#endif
+
+static inline struct gtm_thread *gtm_thr(void)
+{
+ struct gtm_thread *r;
+ asm volatile (SEG_READ(10) : "=r"(r));
+ return r;
+}
+
+static inline void set_gtm_thr(struct gtm_thread *x)
+{
+ asm volatile (SEG_WRITE(10) : : "r"(x));
+}
+
+static inline struct abi_dispatch *abi_disp(void)
+{
+ struct abi_dispatch *r;
+ asm volatile (SEG_DECODE_READ(11) : "=r"(r));
+ return r;
+}
+
+static inline void set_abi_disp(struct abi_dispatch *x)
+{
+ void *scratch;
+ asm volatile (SEG_ENCODE_WRITE(11) : "=r"(scratch) : "0"(x));
+}
+
+#undef SEG_READ
+#undef SEG_WRITE
+#undef SEG_DECODE_READ
+#undef SEG_ENCODE_WRITE
+
+} // namespace GTM
+#endif /* >= GLIBC 2.10 */
+
+#endif // LIBITM_X86_TLS_H
Index: libitm/config/linux/rwlock.h
===================================================================
--- libitm/config/linux/rwlock.h (.../trunk) (revision 0)
+++ libitm/config/linux/rwlock.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,66 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Torvald Riegel <triegel@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GTM_RWLOCK_H
+#define GTM_RWLOCK_H
+
+#include "common.h"
+
+namespace GTM HIDDEN {
+
+struct gtm_thread;
+
+// This datastructure is the blocking, futex-based version of the
Dekker-style
+// reader-writer lock used to provide mutual exclusion between active and
+// serial transactions.
+// See libitm's documentation for further details.
+//
+// In this implementation, writers are given highest priority access but
+// read-to-write upgrades do not have a higher priority than writers.
+
+class gtm_rwlock
+{
+ // TODO Put futexes on different cachelines?
+ int writers; // Writers' futex.
+ int writer_readers; // A confirmed writer waits here for readers.
+ int readers; // Readers wait here for writers (iff true).
+
+ public:
+ gtm_rwlock() : writers(0), writer_readers(0), readers(0) {};
+
+ void read_lock (gtm_thread *tx);
+ void read_unlock (gtm_thread *tx);
+
+ void write_lock ();
+ void write_unlock ();
+
+ bool write_upgrade (gtm_thread *tx);
+
+ protected:
+ bool write_lock_generic (gtm_thread *tx);
+};
+
+} // namespace GTM
+
+#endif // GTM_RWLOCK_H
Index: libitm/config/posix/cachepage.cc
===================================================================
--- libitm/config/posix/cachepage.cc (.../trunk) (revision 0)
+++ libitm/config/posix/cachepage.cc
(.../branches/transactional-memory) (revision 180773)
@@ -0,0 +1,183 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+#include <pthread.h>
+
+//
+// We have three possibilities for alloction: mmap, memalign,
posix_memalign
+//
+
+#if defined(HAVE_MMAP_ANON) || defined(HAVE_MMAP_DEV_ZERO)
+#include <sys/mman.h>
+#include <fcntl.h>
+#endif
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+namespace GTM HIDDEN {
+
+#if defined(HAVE_MMAP_ANON)
+# if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+# define MAP_ANONYMOUS MAP_ANON
+# endif
+# define dev_zero -1
+#elif defined(HAVE_MMAP_DEV_ZERO)
+# ifndef MAP_ANONYMOUS
+# define MAP_ANONYMOUS 0
+# endif
+static int dev_zero = -1;
+#endif
+
+#if defined(HAVE_MMAP_ANON) || defined(HAVE_MMAP_DEV_ZERO)
+/* If we get here, we've already opened /dev/zero and verified that
+ PAGE_SIZE is valid for the system. */
+static gtm_cacheline_page * alloc_mmap (void) UNUSED;
+static gtm_cacheline_page *
+alloc_mmap (void)
+{
+ gtm_cacheline_page *r;
+ r = (gtm_cacheline_page *) mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, dev_zero, 0);
+ if (r == (gtm_cacheline_page *) MAP_FAILED)
+ abort ();
+ return r;
+}
+#endif /* MMAP_ANON | MMAP_DEV_ZERO */
+
+#ifdef HAVE_MEMALIGN
+static gtm_cacheline_page * alloc_memalign (void) UNUSED;
+static gtm_cacheline_page *
+alloc_memalign (void)
+{
+ gtm_cacheline_page *r;
+ r = (gtm_cacheline_page *) memalign (PAGE_SIZE, PAGE_SIZE);
+ if (r == NULL)
+ abort ();
+ return r;
+}
+#endif /* MEMALIGN */
+
+#ifdef HAVE_POSIX_MEMALIGN
+static gtm_cacheline_page *alloc_posix_memalign (void) UNUSED;
+static gtm_cacheline_page *
+alloc_posix_memalign (void)
+{
+ void *r;
+ if (posix_memalign (&r, PAGE_SIZE, PAGE_SIZE))
+ abort ();
+ return (gtm_cacheline_page *) r;
+}
+#endif /* POSIX_MEMALIGN */
+
+#if defined(HAVE_MMAP_ANON) && defined(FIXED_PAGE_SIZE)
+# define alloc_page alloc_mmap
+#elif defined(HAVE_MMAP_DEV_ZERO) && defined(FIXED_PAGE_SIZE)
+static gtm_cacheline_page *
+alloc_page (void)
+{
+ if (dev_zero < 0)
+ {
+ dev_zero = open ("/dev/zero", O_RDWR);
+ assert (dev_zero >= 0);
+ }
+ return alloc_mmap ();
+}
+#elif defined(HAVE_MMAP_ANON) || defined(HAVE_MMAP_DEV_ZERO)
+static gtm_cacheline_page * (*alloc_page) (void);
+static void __attribute__((constructor))
+init_alloc_page (void)
+{
+ size_t page_size = getpagesize ();
+ if (page_size <= PAGE_SIZE && PAGE_SIZE % page_size == 0)
+ {
+# ifndef HAVE_MMAP_ANON
+ dev_zero = open ("/dev/zero", O_RDWR);
+ assert (dev_zero >= 0);
+# endif
+ alloc_page = alloc_mmap;
+ return;
+ }
+# ifdef HAVE_MEMALIGN
+ alloc_page = alloc_memalign;
+# elif defined(HAVE_POSIX_MEMALIGN)
+ alloc_page = alloc_posix_memalign;
+# else
+# error "No fallback aligned memory allocation method"
+# endif
+}
+#elif defined(HAVE_MEMALIGN)
+# define alloc_page alloc_memalign
+#elif defined(HAVE_POSIX_MEMALIGN)
+# define alloc_page alloc_posix_memalign
+#else
+# error "No aligned memory allocation method"
+#endif
+
+static gtm_cacheline_page *free_pages;
+static pthread_mutex_t free_page_lock = PTHREAD_MUTEX_INITIALIZER;
+
+void *
+gtm_cacheline_page::operator new (size_t size)
+{
+ assert (size == sizeof (gtm_cacheline_page));
+ assert (size <= PAGE_SIZE);
+
+ pthread_mutex_lock(&free_page_lock);
+
+ gtm_cacheline_page *r = free_pages;
+ free_pages = r ? r->prev : NULL;
+
+ pthread_mutex_unlock(&free_page_lock);
+
+ if (r == NULL)
+ r = alloc_page ();
+
+ return r;
+}
+
+void
+gtm_cacheline_page::operator delete (void *xhead)
+{
+ gtm_cacheline_page *head = static_cast<gtm_cacheline_page *>(xhead);
+ gtm_cacheline_page *tail;
+
+ if (head == 0)
+ return;
+
+ /* ??? We should eventually really free some of these. */
+
+ for (tail = head; tail->prev != 0; tail = tail->prev)
+ continue;
+
+ pthread_mutex_lock(&free_page_lock);
+
+ tail->prev = free_pages;
+ free_pages = head;
+
+ pthread_mutex_unlock(&free_page_lock);
+}
+
+} // namespace GTM
Index: libitm/config/posix/rwlock.cc
===================================================================
--- libitm/config/posix/rwlock.cc (.../trunk) (revision 0)
+++ libitm/config/posix/rwlock.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,288 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+
+namespace GTM HIDDEN {
+
+// Initialize a new RW lock.
+// ??? Move this back to the header file when constexpr is implemented.
+
+gtm_rwlock::gtm_rwlock()
+ : mutex (PTHREAD_MUTEX_INITIALIZER),
+ c_readers (PTHREAD_COND_INITIALIZER),
+ c_writers (PTHREAD_COND_INITIALIZER),
+ c_confirmed_writers (PTHREAD_COND_INITIALIZER),
+ summary (0),
+ a_readers (0),
+ w_readers (0),
+ w_writers (0)
+{ }
+
+gtm_rwlock::~gtm_rwlock()
+{
+ pthread_mutex_destroy (&this->mutex);
+ pthread_cond_destroy (&this->c_readers);
+ pthread_cond_destroy (&this->c_writers);
+}
+
+// Acquire a RW lock for reading.
+
+void
+gtm_rwlock::read_lock (gtm_thread *tx)
+{
+ // Fast path: first announce our intent to read, then check for
conflicting
+ // intents to write. The barrier makes sure that this happens in exactly
+ // this order.
+ tx->shared_state = 0;
+ __sync_synchronize();
+ unsigned int sum = this->summary;
+ if (likely(!(sum & (a_writer | w_writer))))
+ return;
+
+ // There seems to be an active, waiting, or confirmed writer, so
enter the
+ // mutex-based slow path. To try to keep the number of readers small that
+ // the writer will see, we clear our read flag right away before entering
+ // the critical section. Otherwise, the writer would have to wait for
us to
+ // get into the critical section. (Note that for correctness, this
only has
+ // to happen before we leave the slow path and before we wait for any
+ // writer).
+ // ??? Add a barrier to enforce early visibility of this?
+ tx->shared_state = ~(typeof tx->shared_state)0;
+
+ pthread_mutex_lock (&this->mutex);
+
+ // Read summary again after acquiring the mutex because it might have
+ // changed during waiting for the mutex to become free.
+ sum = this->summary;
+
+ // If there is a writer waiting for readers, wake it up. Only do that
if we
+ // might be the last reader that could do the wake-up, otherwise skip the
+ // wake-up but decrease a_readers to show that we have entered the
slow path.
+ // This has to happen before we wait for any writers or upgraders.
+ // See write_lock_generic() for further explanations.
+ if (this->a_readers > 0)
+ {
+ this->a_readers--;
+ if (this->a_readers == 0)
+ pthread_cond_signal(&this->c_confirmed_writers);
+ }
+
+ // If there is an active or waiting writer, we must wait.
+ while (sum & (a_writer | w_writer))
+ {
+ this->summary = sum | w_reader;
+ this->w_readers++;
+ pthread_cond_wait (&this->c_readers, &this->mutex);
+ sum = this->summary;
+ if (--this->w_readers == 0)
+ sum &= ~w_reader;
+ }
+
+ // Otherwise we can acquire the lock for read.
+ tx->shared_state = 0;
+
+ pthread_mutex_unlock(&this->mutex);
+}
+
+
+// Acquire a RW lock for writing. Generic version that also works for
+// upgrades.
+// Note that an upgrade might fail (and thus waste previous work done
during
+// this transaction) if there is another thread that tried to go into
serial
+// mode earlier (i.e., upgrades do not have higher priority than pure
writers).
+// However, this seems rare enough to not consider it further as we
need both
+// a non-upgrade writer and a writer to happen to switch to serial mode
+// concurrently. If we'd want to handle this, a writer waiting for readers
+// would have to coordinate with later arriving upgrades and hand over the
+// lock to them, including the the reader-waiting state. We can try to
support
+// this if this will actually happen often enough in real workloads.
+
+bool
+gtm_rwlock::write_lock_generic (gtm_thread *tx)
+{
+ pthread_mutex_lock (&this->mutex);
+
+ unsigned int sum = this->summary;
+
+ // If there is an active writer, wait.
+ while (sum & a_writer)
+ {
+ if (tx != 0)
+ {
+ // If this is an upgrade, we must not wait for other writers or
+ // upgrades that already have gone in
+ pthread_mutex_unlock (&this->mutex);
+ return false;
+ }
+
+ this->summary = sum | w_writer;
+ this->w_writers++;
+ pthread_cond_wait (&this->c_writers, &this->mutex);
+ sum = this->summary;
+ if (--this->w_writers == 0)
+ sum &= ~w_writer;
+ }
+
+ // Otherwise we can acquire the lock for write. As a writer, we have
+ // priority, so we don't need to take this back.
+ this->summary = sum | a_writer;
+
+ // We still need to wait for active readers to finish. The barrier makes
+ // sure that we first set our write intent and check for active readers
+ // after that, in strictly this order (similar to the barrier in the fast
+ // path of read_lock()).
+ __sync_synchronize();
+
+ // If this is an upgrade, we are not a reader anymore.
+ if (tx != 0)
+ tx->shared_state = ~(typeof tx->shared_state)0;
+
+ // Count the number of active readers to be able to decrease the
number of
+ // wake-ups and wait calls that are necessary.
+ //
+ // This number is an upper bound of the number of readers that actually
+ // are still active and which we need to wait for:
+ // - We set our write flag before checking the reader flags, and readers
+ // check our write flag after clearing their read flags in
read_unlock().
+ // Therefore, they will enter the slow path whenever we have seen them.
+ // - Readers will have cleared their read flags before leaving the slow
+ // path in read_lock() (prevents lost wake-ups), and before waiting for
+ // any writer (prevents deadlocks).
+ //
+ // However, this number is also just a lower bound of the number of
readers
+ // that will actually enter the slow path in read_unlock() or
read_lock():
+ // - Because the read flag is cleared outside of a critical section,
writers
+ // can see it as cleared while the reader still goes into the slow
path.
+ //
+ // Therefore, readers can skip (lower bound - 1) wake-ups, but we do need
+ // the following loop to check that the readers that we wanted to
wait for
+ // are actually those that entered the slow path so far (and either
skipped
+ // or sent a wake-up).
+ //
+ // ??? Do we need to optimize further? (The writer could publish a
list of
+ // readers that it suspects to be active. Readers could check this
list and
+ // only decrement a_readers if they are in this list.)
+ for (;;)
+ {
+ // ??? Keep a list of active readers that we saw and update it on the
+ // next retry instead? This might reduce the number of cache
misses that
+ // we get when checking reader flags.
+ int readers = 0;
+ for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
+ it = it->next_thread)
+ {
+ // Don't count ourself if this is an upgrade.
+ if (it->shared_state != ~(typeof it->shared_state)0)
+ readers++;
+ }
+
+ // If we have not seen any readers, we will not wait.
+ if (readers == 0)
+ break;
+
+ // We've seen a number of readers, so we publish this number and
wait.
+ this->a_readers = readers;
+ pthread_cond_wait (&this->c_confirmed_writers, &this->mutex);
+ }
+
+ pthread_mutex_unlock (&this->mutex);
+ return true;
+}
+
+// Acquire a RW lock for writing.
+
+void
+gtm_rwlock::write_lock ()
+{
+ write_lock_generic (0);
+}
+
+
+// Upgrade a RW lock that has been locked for reading to a writing lock.
+// Do this without possibility of another writer incoming. Return false
+// if this attempt fails (i.e. another thread also upgraded).
+
+bool
+gtm_rwlock::write_upgrade (gtm_thread *tx)
+{
+ return write_lock_generic (tx);
+}
+
+
+// Release a RW lock from reading.
+
+void
+gtm_rwlock::read_unlock (gtm_thread *tx)
+{
+ tx->shared_state = ~(typeof tx->shared_state)0;
+ __sync_synchronize();
+ unsigned int sum = this->summary;
+ if (likely(!(sum & (a_writer | w_writer))))
+ return;
+
+ // There is a writer, either active or waiting for other readers or
writers.
+ // Thus, enter the mutex-based slow path.
+ pthread_mutex_lock (&this->mutex);
+
+ // If there is a writer waiting for readers, wake it up. Only do that
if we
+ // might be the last reader that could do the wake-up, otherwise skip the
+ // wake-up and decrease a_readers to publish that we have entered the
slow
+ // path but skipped the wake-up.
+ if (this->a_readers > 0)
+ {
+ this->a_readers--;
+ if (this->a_readers == 0)
+ pthread_cond_signal(&this->c_confirmed_writers);
+ }
+
+ // We don't need to wake up any writers waiting for other writers. Active
+ // writers will take care of that.
+
+ pthread_mutex_unlock (&this->mutex);
+}
+
+
+// Release a RW lock from writing.
+
+void
+gtm_rwlock::write_unlock ()
+{
+ pthread_mutex_lock (&this->mutex);
+
+ unsigned int sum = this->summary;
+ this->summary = sum & ~a_writer;
+
+ // If there is a waiting writer, wake it.
+ if (unlikely (sum & w_writer))
+ pthread_cond_signal (&this->c_writers);
+
+ // If there are waiting readers, wake them.
+ else if (unlikely (sum & w_reader))
+ pthread_cond_broadcast (&this->c_readers);
+
+ pthread_mutex_unlock (&this->mutex);
+}
+
+} // namespace GTM
Index: libitm/config/posix/rwlock.h
===================================================================
--- libitm/config/posix/rwlock.h (.../trunk) (revision 0)
+++ libitm/config/posix/rwlock.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,81 @@
+/* Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GTM_RWLOCK_H
+#define GTM_RWLOCK_H
+
+#include <pthread.h>
+
+namespace GTM HIDDEN {
+
+struct gtm_thread;
+
+// This datastructure is the blocking, mutex-based side of the Dekker-style
+// reader-writer lock used to provide mutual exclusion between active and
+// serial transactions. It has similarities to POSIX pthread_rwlock_t
except
+// that we also provide for upgrading a reader->writer lock, with a
+// positive indication of failure (another writer acquired the lock
+// before we were able to acquire). While the writer flag (a_writer
below) is
+// global and protected by the mutex, there are per-transaction reader
flags,
+// which are stored in a transaction's shared state.
+// See libitm's documentation for further details.
+//
+// In this implementation, writers are given highest priority access but
+// read-to-write upgrades do not have a higher priority than writers.
+
+class gtm_rwlock
+{
+ pthread_mutex_t mutex; // Held if manipulating any field.
+ pthread_cond_t c_readers; // Readers wait here
+ pthread_cond_t c_writers; // Writers wait here for writers
+ pthread_cond_t c_confirmed_writers; // Writers wait here for readers
+
+ static const unsigned a_writer = 1; // An active writer.
+ static const unsigned w_writer = 2; // The w_writers field != 0
+ static const unsigned w_reader = 4; // The w_readers field != 0
+
+ unsigned int summary; // Bitmask of the above.
+ unsigned int a_readers; // Nr active readers as observed by a writer
+ unsigned int w_readers; // Nr waiting readers
+ unsigned int w_writers; // Nr waiting writers
+
+ public:
+ gtm_rwlock();
+ ~gtm_rwlock();
+
+ void read_lock (gtm_thread *tx);
+ void read_unlock (gtm_thread *tx);
+
+ void write_lock ();
+ void write_unlock ();
+
+ bool write_upgrade (gtm_thread *tx);
+
+ protected:
+ bool write_lock_generic (gtm_thread *tx);
+};
+
+} // namespace GTM
+
+#endif // GTM_RWLOCK_H
Index: libitm/config/alpha/cacheline.h
===================================================================
--- libitm/config/alpha/cacheline.h (.../trunk) (revision 0)
+++ libitm/config/alpha/cacheline.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,122 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef LIBITM_ALPHA_CACHELINE_H
+#define LIBITM_ALPHA_CACHELINE_H 1
+
+// A cacheline is the smallest unit with which locks are associated.
+// The current implementation of the _ITM_[RW] barriers assumes that
+// all data types can fit (aligned) within a cachline, which means
+// in practice sizeof(complex long double) is the smallest cacheline size.
+// It ought to be small enough for efficient manipulation of the
+// modification mask, below.
+#define CACHELINE_SIZE 64
+
+#ifdef __alpha_bwx__
+# include "config/generic/cacheline.h"
+#else
+// If we don't have byte-word stores, then we'll never be able to
+// adjust *all* of the byte loads/stores to be truely atomic. So
+// only guarantee 4-byte aligned values atomicly stored, exactly
+// like the native system. Use byte zap instructions to accelerate
+// sub-word masked stores.
+
+namespace GTM HIDDEN {
+
+// A gtm_cacheline_mask stores a modified bit for every modified byte
+// in the cacheline with which it is associated.
+typedef sized_integral<CACHELINE_SIZE / 8>::type gtm_cacheline_mask;
+
+union gtm_cacheline
+{
+ // Byte access to the cacheline.
+ unsigned char b[CACHELINE_SIZE] __attribute__((aligned(CACHELINE_SIZE)));
+
+ // Larger sized access to the cacheline.
+ uint16_t u16[CACHELINE_SIZE / sizeof(uint16_t)];
+ uint32_t u32[CACHELINE_SIZE / sizeof(uint32_t)];
+ uint64_t u64[CACHELINE_SIZE / sizeof(uint64_t)];
+ gtm_word w[CACHELINE_SIZE / sizeof(gtm_word)];
+
+ // Store S into D, but only the bytes specified by M.
+ static void store_mask(uint32_t *d, uint32_t s, uint8_t m);
+ static void store_mask(uint64_t *d, uint64_t s, uint8_t m);
+
+ // Copy S to D, but only the bytes specified by M.
+ static void copy_mask (gtm_cacheline * __restrict d,
+ const gtm_cacheline * __restrict s,
+ gtm_cacheline_mask m);
+
+ // A write barrier to emit after (a series of) copy_mask.
+ static void copy_mask_wb () { atomic_write_barrier(); }
+};
+
+inline void ALWAYS_INLINE
+gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m)
+{
+ const uint8_t tm = (1 << sizeof(uint32_t)) - 1;
+
+ m &= tm;
+ if (__builtin_expect (m, tm))
+ {
+ if (__builtin_expect (m == tm, 1))
+ *d = s;
+ else
+ *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m);
+ }
+}
+
+inline void ALWAYS_INLINE
+gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m)
+{
+ if (__builtin_expect (m, 0xff))
+ {
+ if (__builtin_expect (m == 0xff, 1))
+ *d = s;
+ else
+ {
+ typedef uint32_t *p32 __attribute__((may_alias));
+ p32 d32 = reinterpret_cast<p32>(d);
+
+ if ((m & 0x0f) == 0x0f)
+ {
+ d32[0] = s;
+ m &= 0xf0;
+ }
+ else if ((m & 0xf0) == 0xf0)
+ {
+ d32[1] = s >> 32;
+ m &= 0x0f;
+ }
+
+ if (m)
+ *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m);
+ }
+ }
+}
+
+} // namespace GTM
+
+#endif // __alpha_bwx__
+#endif // LIBITM_ALPHA_CACHELINE_H
Index: libitm/config/alpha/sjlj.S
===================================================================
--- libitm/config/alpha/sjlj.S (.../trunk) (revision 0)
+++ libitm/config/alpha/sjlj.S (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,108 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+ .text
+ .align 4
+ .globl _ITM_beginTransaction
+ .ent _ITM_beginTransaction
+
+#define FRAME 144
+
+_ITM_beginTransaction:
+ ldgp $29, 0($27)
+ subq $30, FRAME, $30
+ .frame $30, FRAME, $26, 0
+ .mask 0x04000000, 0
+ stq $26, 0($30)
+ .prologue 1
+
+ stq $9, 8($30)
+ stq $10, 16($30)
+ addq $30, FRAME, $0
+ stq $11, 24($30)
+
+ stq $12, 32($30)
+ stq $13, 40($30)
+ stq $14, 48($30)
+ stq $15, 56($30)
+
+ stq $0, 64($30)
+ stt $f2, 72($30)
+ stt $f3, 80($30)
+ stt $f4, 88($30)
+
+ stt $f5, 96($30)
+ stt $f6, 104($30)
+ stt $f7, 112($30)
+ stt $f8, 120($30)
+
+ stt $f9, 128($30)
+ mov $30, $17
+#ifdef __PIC__
+ unop
+ bsr $26, GTM_begin_transaction !samegp
+#else
+ jsr $26, GTM_begin_transaction
+ ldgp $29, 0($26)
+#endif
+
+ ldq $26, 0($30)
+ addq $30, FRAME, $30
+ ret
+.end _ITM_beginTransaction
+
+ .align 4
+ .globl GTM_longjmp
+ .hidden GTM_longjmp
+ .ent GTM_longjmp
+
+GTM_longjmp:
+ .prologue 0
+ ldq $26, 0($16)
+ ldq $9, 8($16)
+ ldq $10, 16($16)
+ ldq $11, 24($16)
+
+ ldq $12, 32($16)
+ ldq $13, 40($16)
+ ldq $14, 48($16)
+ ldq $15, 56($16)
+
+ ldq $1, 64($16)
+ ldt $f2, 72($16)
+ ldt $f3, 80($16)
+ ldt $f4, 88($16)
+
+ ldt $f5, 96($16)
+ ldt $f6, 104($16)
+ ldt $f7, 112($16)
+ ldt $f8, 120($16)
+
+ ldt $f9, 128($16)
+ mov $17, $0
+ mov $1, $30
+ ret
+.end GTM_longjmp
+
+.section .note.GNU-stack, "", @progbits
Index: libitm/config/alpha/target.h
===================================================================
--- libitm/config/alpha/target.h (.../trunk) (revision 0)
+++ libitm/config/alpha/target.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,60 @@
+/* Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+namespace GTM HIDDEN {
+
+typedef struct gtm_jmpbuf
+{
+ unsigned long pc;
+ unsigned long s[7];
+ void *cfa;
+ unsigned long f[8];
+} gtm_jmpbuf;
+
+/* Alpha generally uses a fixed page size of 8K. */
+#define PAGE_SIZE 8192
+#define FIXED_PAGE_SIZE 1
+
+/* The size of one line in hardware caches (in bytes). */
+#define HW_CACHELINE_SIZE 64
+
+static inline void
+cpu_relax (void)
+{
+ __asm volatile ("" : : : "memory");
+}
+
+static inline void
+atomic_read_barrier (void)
+{
+ __sync_synchronize ();
+}
+
+static inline void
+atomic_write_barrier (void)
+{
+ __asm volatile ("wmb" : : : "memory");
+}
+
+} // namespace GTM
Index: libitm/config/alpha/unaligned.h
===================================================================
--- libitm/config/alpha/unaligned.h (.../trunk) (revision 0)
+++ libitm/config/alpha/unaligned.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,118 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef LIBITM_ALPHA_UNALIGNED_H
+#define LIBITM_ALPHA_UNALIGNED_H 1
+
+#define HAVE_ARCH_UNALIGNED_LOAD2_U2 1
+#define HAVE_ARCH_UNALIGNED_LOAD2_U4 1
+#define HAVE_ARCH_UNALIGNED_LOAD2_U8 1
+
+#ifndef __alpha_bwx__
+#define HAVE_ARCH_UNALIGNED_STORE2_U2 1
+#endif
+#define HAVE_ARCH_UNALIGNED_STORE2_U4 1
+#define HAVE_ARCH_UNALIGNED_STORE2_U8 1
+
+#include "config/generic/unaligned.h"
+
+namespace GTM HIDDEN {
+
+template<>
+inline uint16_t ALWAYS_INLINE
+unaligned_load2<uint16_t>(const gtm_cacheline *c1,
+ const gtm_cacheline *c2, size_t ofs)
+{
+ uint64_t v1 = c1->u64[CACHELINE_SIZE / sizeof(uint64_t) - 1];
+ uint64_t v2 = c2->u64[0];
+
+ return __builtin_alpha_extwl (v1, ofs) | __builtin_alpha_extwh (v2, ofs);
+}
+
+template<>
+inline uint32_t ALWAYS_INLINE
+unaligned_load2<uint32_t>(const gtm_cacheline *c1,
+ const gtm_cacheline *c2, size_t ofs)
+{
+ uint64_t v1 = c1->u64[CACHELINE_SIZE / sizeof(uint64_t) - 1];
+ uint64_t v2 = c2->u64[0];
+
+ return __builtin_alpha_extll (v1, ofs) + __builtin_alpha_extlh (v2, ofs);
+}
+
+template<>
+inline uint64_t ALWAYS_INLINE
+unaligned_load2<uint64_t>(const gtm_cacheline *c1,
+ const gtm_cacheline *c2, size_t ofs)
+{
+ uint64_t v1 = c1->u64[CACHELINE_SIZE / sizeof(uint64_t) - 1];
+ uint64_t v2 = c2->u64[0];
+
+ return __builtin_alpha_extql (v1, ofs) | __builtin_alpha_extqh (v2, ofs);
+}
+
+#ifndef __alpha_bwx__
+template<>
+inline void
+unaligned_store2<uint16_t>(gtm_cacheline *c1, gtm_cacheline *c2,
+ size_t ofs, uint16_t val)
+{
+ uint32_t vl = (uint32_t)val << 24, vh = val >> 8;
+
+ gtm_cacheline::store_mask (&c1->u32[CACHELINE_SIZE / 4 - 1], vl, 4);
+ gtm_cacheline::store_mask (&c2->u32[0], vh, 1);
+}
+#endif
+
+template<>
+inline void
+unaligned_store2<uint32_t>(gtm_cacheline *c1, gtm_cacheline *c2,
+ size_t ofs, uint32_t val)
+{
+ uint64_t vl = __builtin_alpha_insll (val, ofs);
+ uint64_t ml = __builtin_alpha_insll (~0u, ofs);
+ uint64_t vh = __builtin_alpha_inslh (val, ofs);
+ uint64_t mh = __builtin_alpha_inslh (~0u, ofs);
+
+ gtm_cacheline::store_mask (&c1->u64[CACHELINE_SIZE / 8 - 1], vl, ml);
+ gtm_cacheline::store_mask (&c2->u64[0], vh, mh);
+}
+
+template<>
+inline void
+unaligned_store2<uint64_t>(gtm_cacheline *c1, gtm_cacheline *c2,
+ size_t ofs, uint64_t val)
+{
+ uint64_t vl = __builtin_alpha_insql (val, ofs);
+ uint64_t ml = __builtin_alpha_insql (~0u, ofs);
+ uint64_t vh = __builtin_alpha_insqh (val, ofs);
+ uint64_t mh = __builtin_alpha_insqh (~0u, ofs);
+
+ gtm_cacheline::store_mask (&c1->u64[CACHELINE_SIZE / 8 - 1], vl, ml);
+ gtm_cacheline::store_mask (&c2->u64[0], vh, mh);
+}
+
+} // namespace GTM
+
+#endif // LIBITM_ALPHA_UNALIGNED_H
Index: libitm/config/x86/cacheline.cc
===================================================================
--- libitm/config/x86/cacheline.cc (.../trunk) (revision 0)
+++ libitm/config/x86/cacheline.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,73 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+
+namespace GTM HIDDEN {
+
+uint32_t const gtm_bit_to_byte_mask[16] =
+{
+ 0x00000000,
+ 0x000000ff,
+ 0x0000ff00,
+ 0x0000ffff,
+ 0x00ff0000,
+ 0x00ff00ff,
+ 0x00ffff00,
+ 0x00ffffff,
+ 0xff000000,
+ 0xff0000ff,
+ 0xff00ff00,
+ 0xff00ffff,
+ 0xffff0000,
+ 0xffff00ff,
+ 0xffffff00,
+ 0xffffffff
+};
+
+#ifdef __SSE2__
+# define MEMBER m128i
+#else
+# define MEMBER w
+#endif
+
+void
+gtm_cacheline::copy_mask (gtm_cacheline * __restrict d,
+ const gtm_cacheline * __restrict s,
+ gtm_cacheline_mask m)
+{
+ if (m == (gtm_cacheline_mask)-1)
+ {
+ *d = *s;
+ return;
+ }
+ if (__builtin_expect (m == 0, 0))
+ return;
+
+ size_t n = sizeof(d->MEMBER[0]);
+ for (size_t i = 0; i < CACHELINE_SIZE / n; ++i, m >>= n)
+ store_mask (&d->MEMBER[i], s->MEMBER[i], m);
+}
+
+} // namespace GTM
Index: libitm/config/x86/cacheline.h
===================================================================
--- libitm/config/x86/cacheline.h (.../trunk) (revision 0)
+++ libitm/config/x86/cacheline.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,242 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef LIBITM_CACHELINE_H
+#define LIBITM_CACHELINE_H 1
+
+// Minimum cacheline size is 32, due to both complex long double and
__m256.
+// There's no requirement that 64-bit use a 64-byte cacheline size, but do
+// so for now to make sure everything is parameterized properly.
+#ifdef __x86_64__
+# define CACHELINE_SIZE 64
+#else
+# define CACHELINE_SIZE 32
+#endif
+
+namespace GTM HIDDEN {
+
+// A gtm_cacheline_mask stores a modified bit for every modified byte
+// in the cacheline with which it is associated.
+typedef sized_integral<CACHELINE_SIZE / 8>::type gtm_cacheline_mask;
+
+extern uint32_t const gtm_bit_to_byte_mask[16];
+
+union gtm_cacheline
+{
+ // Byte access to the cacheline.
+ unsigned char b[CACHELINE_SIZE] __attribute__((aligned(CACHELINE_SIZE)));
+
+ // Larger sized access to the cacheline.
+ uint16_t u16[CACHELINE_SIZE / sizeof(uint16_t)];
+ uint32_t u32[CACHELINE_SIZE / sizeof(uint32_t)];
+ uint64_t u64[CACHELINE_SIZE / sizeof(uint64_t)];
+ gtm_word w[CACHELINE_SIZE / sizeof(gtm_word)];
+
+#ifdef __MMX__
+ __m64 m64[CACHELINE_SIZE / sizeof(__m64)];
+#endif
+#ifdef __SSE__
+ __m128 m128[CACHELINE_SIZE / sizeof(__m128)];
+#endif
+#ifdef __SSE2__
+ __m128i m128i[CACHELINE_SIZE / sizeof(__m128i)];
+#endif
+#ifdef __AVX__
+ __m256 m256[CACHELINE_SIZE / sizeof(__m256)];
+ __m256i m256i[CACHELINE_SIZE / sizeof(__m256i)];
+#endif
+
+ // Store S into D, but only the bytes specified by M.
+ static void store_mask (uint32_t *d, uint32_t s, uint8_t m);
+ static void store_mask (uint64_t *d, uint64_t s, uint8_t m);
+#ifdef __SSE2__
+ static void store_mask (__m128i *d, __m128i s, uint16_t m);
+#endif
+
+ // Copy S to D, but only the bytes specified by M.
+ static void copy_mask (gtm_cacheline * __restrict d,
+ const gtm_cacheline * __restrict s,
+ gtm_cacheline_mask m);
+
+ // A write barrier to emit after (a series of) copy_mask.
+ // When we're emitting non-temporal stores, the normal strong
+ // ordering of the machine doesn't apply.
+ static void copy_mask_wb ();
+
+#if defined(__SSE__) || defined(__AVX__)
+ // Copy S to D; only bother defining if we can do this more efficiently
+ // than the compiler-generated default implementation.
+ gtm_cacheline& operator= (const gtm_cacheline &s);
+#endif // SSE, AVX
+};
+
+inline void
+gtm_cacheline::copy_mask_wb ()
+{
+#ifdef __SSE2__
+ _mm_sfence ();
+#endif
+}
+
+#if defined(__SSE__) || defined(__AVX__)
+inline gtm_cacheline& ALWAYS_INLINE
+gtm_cacheline::operator= (const gtm_cacheline & __restrict s)
+{
+#ifdef __AVX__
+# define CP m256
+# define TYPE __m256
+#else
+# define CP m128
+# define TYPE __m128
+#endif
+
+ TYPE w, x, y, z;
+
+ // ??? Wouldn't it be nice to have a pragma to tell the compiler
+ // to completely unroll a given loop?
+ switch (CACHELINE_SIZE / sizeof(TYPE))
+ {
+ case 1:
+ this->CP[0] = s.CP[0];
+ break;
+ case 2:
+ x = s.CP[0];
+ y = s.CP[1];
+ this->CP[0] = x;
+ this->CP[1] = y;
+ break;
+ case 4:
+ w = s.CP[0];
+ x = s.CP[1];
+ y = s.CP[2];
+ z = s.CP[3];
+ this->CP[0] = w;
+ this->CP[1] = x;
+ this->CP[2] = y;
+ this->CP[3] = z;
+ break;
+ default:
+ __builtin_trap ();
+ }
+
+ return *this;
+}
+#endif
+
+// ??? Support masked integer stores more efficiently with an unlocked
cmpxchg
+// insn. My reasoning is that while we write to locations that we do
not wish
+// to modify, we do it in an uninterruptable insn, and so we either truely
+// write back the original data or the insn fails -- unlike with a
+// load/and/or/write sequence which can be interrupted either by a kernel
+// task switch or an unlucky cacheline steal by another processor.
Avoiding
+// the LOCK prefix improves performance by a factor of 10, and we don't
need
+// the memory barrier semantics implied by that prefix.
+
+inline void ALWAYS_INLINE
+gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m)
+{
+ gtm_cacheline_mask tm = (1 << sizeof (s)) - 1;
+ if (__builtin_expect (m & tm, tm))
+ {
+ if (__builtin_expect ((m & tm) == tm, 1))
+ *d = s;
+ else
+ {
+ gtm_cacheline_mask bm = gtm_bit_to_byte_mask[m & 15];
+ gtm_word n, o = *d;
+
+ __asm("\n0:\t"
+ "mov %[o], %[n]\n\t"
+ "and %[m], %[n]\n\t"
+ "or %[s], %[n]\n\t"
+ "cmpxchg %[n], %[d]\n\t"
+ "jnz,pn 0b"
+ : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o)
+ : [s] "r" (s & bm), [m] "r" (~bm));
+ }
+ }
+}
+
+inline void ALWAYS_INLINE
+gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m)
+{
+ gtm_cacheline_mask tm = (1 << sizeof (s)) - 1;
+ if (__builtin_expect (m & tm, tm))
+ {
+ if (__builtin_expect ((m & tm) == tm, 1))
+ *d = s;
+ else
+ {
+#ifdef __x86_64__
+ uint32_t bl = gtm_bit_to_byte_mask[m & 15];
+ uint32_t bh = gtm_bit_to_byte_mask[(m >> 4) & 15];
+ gtm_cacheline_mask bm = bl | ((gtm_cacheline_mask)bh << 31 << 1);
+ uint64_t n, o = *d;
+ __asm("\n0:\t"
+ "mov %[o], %[n]\n\t"
+ "and %[m], %[n]\n\t"
+ "or %[s], %[n]\n\t"
+ "cmpxchg %[n], %[d]\n\t"
+ "jnz,pn 0b"
+ : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o)
+ : [s] "r" (s & bm), [m] "r" (~bm));
+#else
+ /* ??? While it's possible to perform this operation with
+ cmpxchg8b, the sequence requires all 7 general registers
+ and thus cannot be performed with -fPIC. Don't even try. */
+ uint32_t *d32 = reinterpret_cast<uint32_t *>(d);
+ store_mask (d32, s, m);
+ store_mask (d32 + 1, s >> 32, m >> 4);
+#endif
+ }
+ }
+}
+
+#ifdef __SSE2__
+inline void ALWAYS_INLINE
+gtm_cacheline::store_mask (__m128i *d, __m128i s, uint16_t m)
+{
+ if (__builtin_expect (m == 0, 0))
+ return;
+ if (__builtin_expect (m == 0xffff, 1))
+ *d = s;
+ else
+ {
+ __m128i bm0, bm1, bm2, bm3;
+ bm0 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4;
+ bm1 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4;
+ bm2 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4;
+ bm3 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4;
+ bm0 = _mm_unpacklo_epi32 (bm0, bm1);
+ bm2 = _mm_unpacklo_epi32 (bm2, bm3);
+ bm0 = _mm_unpacklo_epi64 (bm0, bm2);
+
+ _mm_maskmoveu_si128 (s, bm0, (char *)d);
+ }
+}
+#endif // SSE2
+
+} // namespace GTM
+
+#endif // LIBITM_CACHELINE_H
Index: libitm/config/x86/sjlj.S
===================================================================
--- libitm/config/x86/sjlj.S (.../trunk) (revision 0)
+++ libitm/config/x86/sjlj.S (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,105 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+ .text
+ .p2align 4
+ .globl _ITM_beginTransaction
+ .type _ITM_beginTransaction, @function
+
+_ITM_beginTransaction:
+ .cfi_startproc
+#ifdef __x86_64__
+ leaq 8(%rsp), %rax
+ movq (%rsp), %r8
+ subq $72, %rsp
+ .cfi_def_cfa_offset 80
+ movq %rax, (%rsp)
+ movq %r8, 8(%rsp)
+ movq %rbx, 16(%rsp)
+ movq %rbp, 24(%rsp)
+ movq %r12, 32(%rsp)
+ movq %r13, 40(%rsp)
+ movq %r14, 48(%rsp)
+ movq %r15, 56(%rsp)
+ movq %rsp, %rsi
+ call GTM_begin_transaction
+ addq $72, %rsp
+ .cfi_def_cfa_offset 8
+ ret
+#else
+ leal 4(%esp), %ecx
+ subl $28, %esp
+ .cfi_def_cfa_offset 32
+ movl %ecx, 8(%esp)
+ movl %ebx, 12(%esp)
+ movl %esi, 16(%esp)
+ movl %edi, 20(%esp)
+ movl %ebp, 24(%esp)
+ leal 8(%esp), %edx
+ call GTM_begin_transaction
+ addl $28, %esp
+ .cfi_def_cfa_offset 4
+ ret
+#endif
+ .cfi_endproc
+ .size _ITM_beginTransaction, .-_ITM_beginTransaction
+
+ .p2align 4
+ .globl GTM_longjmp
+ .type GTM_longjmp, @function
+ .hidden GTM_longjmp
+
+GTM_longjmp:
+ .cfi_startproc
+#ifdef __x86_64__
+ movq (%rdi), %rcx
+ movq 8(%rdi), %rdx
+ movq 16(%rdi), %rbx
+ movq 24(%rdi), %rbp
+ movq 32(%rdi), %r12
+ movq 40(%rdi), %r13
+ movq 48(%rdi), %r14
+ movq 56(%rdi), %r15
+ movl %esi, %eax
+ .cfi_def_cfa %rcx, 0
+ .cfi_register %rip, %rdx
+ movq %rcx, %rsp
+ jmp *%rdx
+#else
+ xchgl %eax, %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %ebx
+ movl 8(%edx), %esi
+ movl 12(%edx), %edi
+ movl 16(%edx), %ebp
+ movl 20(%edx), %edx
+ .cfi_def_cfa %ecx, 0
+ .cfi_register %eip, %edx
+ movl %ecx, %esp
+ jmp *%edx
+#endif
+ .cfi_endproc
+ .size GTM_longjmp, .-GTM_longjmp
+
+.section .note.GNU-stack, "", @progbits
Index: libitm/config/x86/x86_sse.cc
===================================================================
--- libitm/config/x86/x86_sse.cc (.../trunk) (revision 0)
+++ libitm/config/x86/x86_sse.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,122 @@
+/* Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+#include "dispatch.h"
+
+// ??? Use memcpy for now, until we have figured out how to best
instantiate
+// these loads/stores.
+CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M64, GTM::abi_disp()->, )
+CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M128, GTM::abi_disp()->, )
+
+void ITM_REGPARM
+_ITM_LM64 (const _ITM_TYPE_M64 *ptr)
+{
+ GTM::GTM_LB (ptr, sizeof (*ptr));
+}
+
+void ITM_REGPARM
+_ITM_LM128 (const _ITM_TYPE_M128 *ptr)
+{
+ GTM::GTM_LB (ptr, sizeof (*ptr));
+}
+
+// Helpers for re-aligning two 128-bit values.
+#ifdef __SSSE3__
+# define INSN0 "movdqa %xmm1, %xmm0"
+# define INSN(N) "palignr $" #N ", %xmm1, %xmm0"
+# define TABLE_ENT_0 INSN0 "\n\tret\n\t"
+# define TABLE_ENT(N) ".balign 8\n\t" INSN(N) "\n\tret\n\t"
+
+asm(".pushsection .text\n\
+ .balign 16\n\
+ .globl GTM_palignr_table\n\
+ .hidden GTM_palignr_table\n\
+ .type GTM_palignr_table, @function\n\
+GTM_palignr_table:\n\t"
+ TABLE_ENT_0
+ TABLE_ENT(1)
+ TABLE_ENT(2)
+ TABLE_ENT(3)
+ TABLE_ENT(4)
+ TABLE_ENT(5)
+ TABLE_ENT(6)
+ TABLE_ENT(7)
+ TABLE_ENT(8)
+ TABLE_ENT(9)
+ TABLE_ENT(10)
+ TABLE_ENT(11)
+ TABLE_ENT(12)
+ TABLE_ENT(13)
+ TABLE_ENT(14)
+ TABLE_ENT(15)
+ ".balign 8\n\
+ .size GTM_palignr_table, .-GTM_palignr_table\n\
+ .popsection");
+
+# undef INSN0
+# undef INSN
+# undef TABLE_ENT_0
+# undef TABLE_ENT
+#elif defined(__SSE2__)
+# define INSNS_8 "punpcklqdq %xmm1, %xmm0"
+# define INSNS(N) "psrldq $"#N", %xmm0\n\t" \
+ "pslldq $(16-"#N"), %xmm1\n\t" \
+ "por %xmm1, %xmm0"
+# define TABLE_ENT_0 "ret\n\t"
+# define TABLE_ENT_8 ".balign 16\n\t" INSNS_8 "\n\tret\n\t"
+# define TABLE_ENT(N) ".balign 16\n\t" INSNS(N) "\n\tret\n\t"
+
+asm(".pushsection .text\n\
+ .balign 16\n\
+ .globl GTM_pshift_table\n\
+ .hidden GTM_pshift_table\n\
+ .type GTM_pshift_table, @function\n\
+GTM_pshift_table:\n\t"
+ TABLE_ENT_0
+ TABLE_ENT(1)
+ TABLE_ENT(2)
+ TABLE_ENT(3)
+ TABLE_ENT(4)
+ TABLE_ENT(5)
+ TABLE_ENT(6)
+ TABLE_ENT(7)
+ TABLE_ENT_8
+ TABLE_ENT(9)
+ TABLE_ENT(10)
+ TABLE_ENT(11)
+ TABLE_ENT(12)
+ TABLE_ENT(13)
+ TABLE_ENT(14)
+ TABLE_ENT(15)
+ ".balign 8\n\
+ .size GTM_pshift_table, .-GTM_pshift_table\n\
+ .popsection");
+
+# undef INSNS_8
+# undef INSNS
+# undef TABLE_ENT_0
+# undef TABLE_ENT_8
+# undef TABLE_ENT
+#endif
Index: libitm/config/x86/x86_avx.cc
===================================================================
--- libitm/config/x86/x86_avx.cc (.../trunk) (revision 0)
+++ libitm/config/x86/x86_avx.cc (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,95 @@
+/* Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "libitm_i.h"
+#include "dispatch.h"
+
+// ??? Use memcpy for now, until we have figured out how to best
instantiate
+// these loads/stores.
+CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M256, GTM::abi_disp()->, )
+
+void ITM_REGPARM
+_ITM_LM256 (const _ITM_TYPE_M256 *ptr)
+{
+ GTM::GTM_LB (ptr, sizeof (*ptr));
+}
+
+// Helpers for re-aligning two 128-bit values.
+#ifdef __XOP__
+const __v16qi GTM::GTM_vpperm_shift[16] =
+{
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 },
+ { 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 },
+ { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 },
+ { 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 },
+ { 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 },
+ { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 },
+ { 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 },
+ { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 },
+ { 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
+ { 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 },
+ { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
+ { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 },
+ { 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
+ { 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 },
+ { 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+};
+#else
+# define INSN0 "movdqa %xmm1, %xmm0"
+# define INSN(N) "vpalignr $" #N ", %xmm0, %xmm1, %xmm0"
+# define TABLE_ENT_0 INSN0 "\n\tret\n\t"
+# define TABLE_ENT(N) ".balign 8\n\t" INSN(N) "\n\tret\n\t"
+
+asm(".pushsection .text\n\
+ .balign 16\n\
+ .globl GTM_vpalignr_table\n\
+ .hidden GTM_vpalignr_table\n\
+ .type GTM_vpalignr_table, @function\n\
+GTM_vpalignr_table:\n\t"
+ TABLE_ENT_0
+ TABLE_ENT(1)
+ TABLE_ENT(2)
+ TABLE_ENT(3)
+ TABLE_ENT(4)
+ TABLE_ENT(5)
+ TABLE_ENT(6)
+ TABLE_ENT(7)
+ TABLE_ENT(8)
+ TABLE_ENT(9)
+ TABLE_ENT(10)
+ TABLE_ENT(11)
+ TABLE_ENT(12)
+ TABLE_ENT(13)
+ TABLE_ENT(14)
+ TABLE_ENT(15)
+ ".balign 8\n\
+ .size GTM_vpalignr_table, .-GTM_vpalignr_table\n\
+ .popsection");
+
+# undef INSN0
+# undef INSN
+# undef TABLE_ENT_0
+# undef TABLE_ENT
+#endif
Index: libitm/config/x86/target.h
===================================================================
--- libitm/config/x86/target.h (.../trunk) (revision 0)
+++ libitm/config/x86/target.h (.../branches/transactional-memory)
(revision 180773)
@@ -0,0 +1,98 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Transactional Memory Library (libitm).
+
+ Libitm is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ Libitm is distributed in the hope that it will be useful, but
WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+namespace GTM HIDDEN {
+
+#ifdef __x86_64__
+/* ??? This doesn't work for Win64. */
+typedef struct gtm_jmpbuf
+{
+ void *cfa;
+ unsigned long rip;
+ unsigned long rbx;
+ unsigned long rbp;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+} gtm_jmpbuf;
+#else
+typedef struct gtm_jmpbuf
+{
+ void *cfa;
+ unsigned long ebx;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned long ebp;
+ unsigned long eip;
+} gtm_jmpbuf;
+#endif
+
+/* x86 doesn't require strict alignment for the basic types. */
+#define STRICT_ALIGNMENT 0
+
+/* x86 uses a fixed page size of 4K. */
+#define PAGE_SIZE 4096
+#define FIXED_PAGE_SIZE 1
+
+/* The size of one line in hardware caches (in bytes). */
+#define HW_CACHELINE_SIZE 64
+
+
+static inline void
+cpu_relax (void)
+{
+ __asm volatile ("rep; nop" : : : "memory");
+}
+
+static inline void
+atomic_read_barrier (void)
+{
+ /* x86 is a strong memory ordering machine. */
+ __asm volatile ("" : : : "memory");
+}
+
+static inline void
+atomic_write_barrier (void)
+{
+ /* x86 is a strong memory ordering machine. */
+ __asm volatile ("" : : : "memory");
+}
+
+} // namespace GTM
+
+// We'll be using some of the cpu builtins, and their associated types.
+#ifndef __cplusplus
+/* ??? It's broken for C++. */
+#include <x86intrin.h>
+#else
+# ifdef __SSE2__
+# include <emmintrin.h>
+# elif defined(__SSE__)
+# include <xmmintrin.h>
+# endif
+# ifdef __AVX__
+# include <immintrin.h>
+# endif
+#endif
More information about the Gcc-patches
mailing list