Rework SGAtomic, move more into the implementation.

Mainly move many cases into the implementation file.
This commit is contained in:
Mathias Froehlich 2011-10-23 23:03:15 +02:00
parent 6250f675db
commit 83772c87ac
2 changed files with 127 additions and 81 deletions

View File

@ -1,6 +1,6 @@
/* -*-c++-*-
*
* Copyright (C) 2005-2009 Mathias Froehlich
* Copyright (C) 2005-2009,2011 Mathias Froehlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@ -17,62 +17,104 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include <simgear_config.h>
#endif
#include "SGAtomic.hxx"
#if defined(SGATOMIC_USE_GCC4_BUILTINS) && !defined (GCC_ATOMIC_BUILTINS_FOUND)
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
// Usually the appropriate functions are inlined by gcc.
// But if gcc is called with something equivalent to -march=i386,
// it will not assume that there is a lock instruction and instead
// calls this pair of functions. We will provide them here in this case.
// Note that this assembler code will not work on a i386 chip anymore.
// But I firmly believe that we can assume to run at least on a i486 ...
#if defined(_WIN32)
# include <windows.h>
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
#elif defined(__GNUC__) && defined(__i386__)
#else
# include <simgear/threads/SGGuard.hxx>
#endif
extern "C" {
unsigned __sync_sub_and_fetch_4(volatile void *ptr, unsigned value)
unsigned
SGAtomic::operator++()
{
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr);
register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem)
: "0" (-value), "m" (*mem)
: "memory");
return result - value;
#if defined(_WIN32)
return InterlockedIncrement(reinterpret_cast<long volatile*>(&mValue));
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_add_and_fetch(&mValue, 1);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem)
: "0" (1), "m" (*mem)
: "memory");
return result + 1;
#else
SGGuard<SGMutex> lock(mMutex);
return ++mValue;
#endif
}
unsigned __sync_add_and_fetch_4(volatile void *ptr, unsigned value)
unsigned
SGAtomic::operator--()
{
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr);
register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem)
: "0" (value), "m" (*mem)
: "memory");
return result + value;
#if defined(_WIN32)
return InterlockedDecrement(reinterpret_cast<long volatile*>(&mValue));
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_sub_and_fetch(&mValue, 1);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem)
: "0" (-1), "m" (*mem)
: "memory");
return result - 1;
#else
SGGuard<SGMutex> lock(mMutex);
return --mValue;
#endif
}
unsigned __sync_bool_compare_and_swap_4(volatile void *ptr,
unsigned oldValue, unsigned newValue)
SGAtomic::operator unsigned() const
{
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr);
unsigned before;
__asm__ __volatile__("lock; cmpxchg{l} {%1,%2|%1,%2}"
: "=a"(before)
: "q"(newValue), "m"(*mem), "0"(oldValue)
: "memory");
return before == oldValue;
#if defined(_WIN32)
return static_cast<unsigned const volatile &>(mValue);
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
__sync_synchronize();
return mValue;
#elif defined(__GNUC__) && defined(__i386__)
__asm__ __volatile__("": : : "memory");
return mValue;
#else
SGGuard<SGMutex> lock(mMutex);
return mValue;
#endif
}
void __sync_synchronize()
bool
SGAtomic::compareAndExchange(unsigned oldValue, unsigned newValue)
{
__asm__ __volatile__("": : : "memory");
#if defined(_WIN32)
long volatile* lvPtr = reinterpret_cast<long volatile*>(&mValue);
return oldValue == InterlockedCompareExchange(lvPtr, newValue, oldValue);
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_bool_compare_and_swap(&mValue, oldValue, newValue);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
unsigned before;
__asm__ __volatile__("lock; cmpxchg{l} {%1,%2|%1,%2}"
: "=a"(before)
: "q"(newValue), "m"(*mem), "0"(oldValue)
: "memory");
return before == oldValue;
#else
SGGuard<SGMutex> lock(mMutex);
if (mValue != oldValue)
return false;
mValue = newValue;
return true;
#endif
}
} // extern "C"
#endif

View File

@ -1,6 +1,6 @@
/* -*-c++-*-
*
* Copyright (C) 2005-2009 Mathias Froehlich
* Copyright (C) 2005-2009,2011 Mathias Froehlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@ -21,85 +21,89 @@
#ifndef SGAtomic_HXX
#define SGAtomic_HXX
#if defined(__GNUC__) && ((4 < __GNUC__)||(4 == __GNUC__ && 1 <= __GNUC_MINOR__)) \
&& (defined(__i386__) || defined(__x86_64__))
#if defined(__GNUC__) && ((4 < __GNUC__)||(4 == __GNUC__ && 1 <= __GNUC_MINOR__)) && \
defined(__x86_64__)
// No need to include something. Is a Compiler API ...
# define SGATOMIC_USE_GCC4_BUILTINS
#elif defined(__GNUC__) && defined(__i386__)
# define SGATOMIC_USE_LIBRARY_FUNCTIONS
#elif defined(__sgi) && defined(_COMPILER_VERSION) && (_COMPILER_VERSION>=730)
// No need to include something. Is a Compiler API ...
# define SGATOMIC_USE_MIPSPRO_BUILTINS
#elif defined(_WIN32)
# include <windows.h>
# define SGATOMIC_USE_WIN32_INTERLOCKED
# define SGATOMIC_USE_LIBRARY_FUNCTIONS
#else
// The sledge hammer ...
# define SGATOMIC_USE_LIBRARY_FUNCTIONS
# include <simgear/threads/SGThread.hxx>
# include <simgear/threads/SGGuard.hxx>
#endif
class SGAtomic {
public:
SGAtomic(unsigned value = 0) : mValue(value)
{ }
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
unsigned operator++();
#else
unsigned operator++()
{
#if defined(SGATOMIC_USE_GCC4_BUILTINS)
# if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_add_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __add_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return InterlockedIncrement(reinterpret_cast<long volatile*>(&mValue));
#else
SGGuard<SGMutex> lock(mMutex);
return ++mValue;
#endif
# else
# error
# endif
}
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
unsigned operator--();
#else
unsigned operator--()
{
#if defined(SGATOMIC_USE_GCC4_BUILTINS)
# if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_sub_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __sub_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return InterlockedDecrement(reinterpret_cast<long volatile*>(&mValue));
#else
SGGuard<SGMutex> lock(mMutex);
return --mValue;
#endif
# else
# error
# endif
}
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
operator unsigned() const;
#else
operator unsigned() const
{
#if defined(SGATOMIC_USE_GCC4_BUILTINS)
# if defined(SGATOMIC_USE_GCC4_BUILTINS)
__sync_synchronize();
return mValue;
#elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
__synchronize();
return mValue;
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return static_cast<unsigned const volatile &>(mValue);
#else
SGGuard<SGMutex> lock(mMutex);
return mValue;
#endif
# else
# error
# endif
}
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
bool compareAndExchange(unsigned oldValue, unsigned newValue);
#else
bool compareAndExchange(unsigned oldValue, unsigned newValue)
{
#if defined(SGATOMIC_USE_GCC4_BUILTINS)
# if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_bool_compare_and_swap(&mValue, oldValue, newValue);
#elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __compare_and_swap(&mValue, oldValue, newValue);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
long volatile* lvPtr = reinterpret_cast<long volatile*>(&mValue);
return oldValue == InterlockedCompareExchange(lvPtr, newValue, oldValue);
#else
SGGuard<SGMutex> lock(mMutex);
if (mValue != oldValue)
return false;
mValue = newValue;
return true;
#endif
# else
# error
# endif
}
#endif
private:
SGAtomic(const SGAtomic&);