Rework SGAtomic, move more into the implementation.

Mainly move many cases into the implementation file.
This commit is contained in:
Mathias Froehlich 2011-10-23 23:03:15 +02:00
parent 6250f675db
commit 83772c87ac
2 changed files with 127 additions and 81 deletions

View File

@ -1,6 +1,6 @@
/* -*-c++-*- /* -*-c++-*-
* *
* Copyright (C) 2005-2009 Mathias Froehlich * Copyright (C) 2005-2009,2011 Mathias Froehlich
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as * modify it under the terms of the GNU General Public License as
@ -17,62 +17,104 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *
*/ */
#ifdef HAVE_CONFIG_H #ifdef HAVE_CONFIG_H
# include <simgear_config.h> # include <simgear_config.h>
#endif #endif
#include "SGAtomic.hxx" #include "SGAtomic.hxx"
#if defined(SGATOMIC_USE_GCC4_BUILTINS) && !defined (GCC_ATOMIC_BUILTINS_FOUND) #if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
// Usually the appropriate functions are inlined by gcc. #if defined(_WIN32)
// But if gcc is called with something equivalent to -march=i386, # include <windows.h>
// it will not assume that there is a lock instruction and instead #elif defined(GCC_ATOMIC_BUILTINS_FOUND)
// calls this pair of functions. We will provide them here in this case. #elif defined(__GNUC__) && defined(__i386__)
// Note that this assembler code will not work on a i386 chip anymore. #else
// But I firmly believe that we can assume to run at least on a i486 ... # include <simgear/threads/SGGuard.hxx>
#endif
extern "C" { unsigned
SGAtomic::operator++()
unsigned __sync_sub_and_fetch_4(volatile void *ptr, unsigned value)
{ {
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr); #if defined(_WIN32)
return InterlockedIncrement(reinterpret_cast<long volatile*>(&mValue));
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_add_and_fetch(&mValue, 1);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
register unsigned result; register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem) : "=r" (result), "=m" (*mem)
: "0" (-value), "m" (*mem) : "0" (1), "m" (*mem)
: "memory"); : "memory");
return result - value; return result + 1;
#else
SGGuard<SGMutex> lock(mMutex);
return ++mValue;
#endif
} }
unsigned __sync_add_and_fetch_4(volatile void *ptr, unsigned value) unsigned
SGAtomic::operator--()
{ {
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr); #if defined(_WIN32)
return InterlockedDecrement(reinterpret_cast<long volatile*>(&mValue));
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_sub_and_fetch(&mValue, 1);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
register unsigned result; register unsigned result;
__asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}"
: "=r" (result), "=m" (*mem) : "=r" (result), "=m" (*mem)
: "0" (value), "m" (*mem) : "0" (-1), "m" (*mem)
: "memory"); : "memory");
return result + value; return result - 1;
#else
SGGuard<SGMutex> lock(mMutex);
return --mValue;
#endif
} }
unsigned __sync_bool_compare_and_swap_4(volatile void *ptr, SGAtomic::operator unsigned() const
unsigned oldValue, unsigned newValue)
{ {
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(ptr); #if defined(_WIN32)
return static_cast<unsigned const volatile &>(mValue);
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
__sync_synchronize();
return mValue;
#elif defined(__GNUC__) && defined(__i386__)
__asm__ __volatile__("": : : "memory");
return mValue;
#else
SGGuard<SGMutex> lock(mMutex);
return mValue;
#endif
}
bool
SGAtomic::compareAndExchange(unsigned oldValue, unsigned newValue)
{
#if defined(_WIN32)
long volatile* lvPtr = reinterpret_cast<long volatile*>(&mValue);
return oldValue == InterlockedCompareExchange(lvPtr, newValue, oldValue);
#elif defined(GCC_ATOMIC_BUILTINS_FOUND)
return __sync_bool_compare_and_swap(&mValue, oldValue, newValue);
#elif defined(__GNUC__) && defined(__i386__)
register volatile unsigned* mem = reinterpret_cast<volatile unsigned*>(&mValue);
unsigned before; unsigned before;
__asm__ __volatile__("lock; cmpxchg{l} {%1,%2|%1,%2}" __asm__ __volatile__("lock; cmpxchg{l} {%1,%2|%1,%2}"
: "=a"(before) : "=a"(before)
: "q"(newValue), "m"(*mem), "0"(oldValue) : "q"(newValue), "m"(*mem), "0"(oldValue)
: "memory"); : "memory");
return before == oldValue; return before == oldValue;
#else
SGGuard<SGMutex> lock(mMutex);
if (mValue != oldValue)
return false;
mValue = newValue;
return true;
#endif
} }
void __sync_synchronize()
{
__asm__ __volatile__("": : : "memory");
}
} // extern "C"
#endif #endif

View File

@ -1,6 +1,6 @@
/* -*-c++-*- /* -*-c++-*-
* *
* Copyright (C) 2005-2009 Mathias Froehlich * Copyright (C) 2005-2009,2011 Mathias Froehlich
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as * modify it under the terms of the GNU General Public License as
@ -21,52 +21,61 @@
#ifndef SGAtomic_HXX #ifndef SGAtomic_HXX
#define SGAtomic_HXX #define SGAtomic_HXX
#if defined(__GNUC__) && ((4 < __GNUC__)||(4 == __GNUC__ && 1 <= __GNUC_MINOR__)) \ #if defined(__GNUC__) && ((4 < __GNUC__)||(4 == __GNUC__ && 1 <= __GNUC_MINOR__)) && \
&& (defined(__i386__) || defined(__x86_64__)) defined(__x86_64__)
// No need to include something. Is a Compiler API ... // No need to include something. Is a Compiler API ...
# define SGATOMIC_USE_GCC4_BUILTINS # define SGATOMIC_USE_GCC4_BUILTINS
#elif defined(__GNUC__) && defined(__i386__)
# define SGATOMIC_USE_LIBRARY_FUNCTIONS
#elif defined(__sgi) && defined(_COMPILER_VERSION) && (_COMPILER_VERSION>=730) #elif defined(__sgi) && defined(_COMPILER_VERSION) && (_COMPILER_VERSION>=730)
// No need to include something. Is a Compiler API ... // No need to include something. Is a Compiler API ...
# define SGATOMIC_USE_MIPSPRO_BUILTINS # define SGATOMIC_USE_MIPSPRO_BUILTINS
#elif defined(_WIN32) #elif defined(_WIN32)
# include <windows.h> # define SGATOMIC_USE_LIBRARY_FUNCTIONS
# define SGATOMIC_USE_WIN32_INTERLOCKED
#else #else
// The sledge hammer ... // The sledge hammer ...
# define SGATOMIC_USE_LIBRARY_FUNCTIONS
# include <simgear/threads/SGThread.hxx> # include <simgear/threads/SGThread.hxx>
# include <simgear/threads/SGGuard.hxx>
#endif #endif
class SGAtomic { class SGAtomic {
public: public:
SGAtomic(unsigned value = 0) : mValue(value) SGAtomic(unsigned value = 0) : mValue(value)
{ } { }
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
unsigned operator++();
#else
unsigned operator++() unsigned operator++()
{ {
# if defined(SGATOMIC_USE_GCC4_BUILTINS) # if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_add_and_fetch(&mValue, 1); return __sync_add_and_fetch(&mValue, 1);
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS) # elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __add_and_fetch(&mValue, 1); return __add_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return InterlockedIncrement(reinterpret_cast<long volatile*>(&mValue));
# else # else
SGGuard<SGMutex> lock(mMutex); # error
return ++mValue;
# endif # endif
} }
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
unsigned operator--();
#else
unsigned operator--() unsigned operator--()
{ {
# if defined(SGATOMIC_USE_GCC4_BUILTINS) # if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_sub_and_fetch(&mValue, 1); return __sync_sub_and_fetch(&mValue, 1);
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS) # elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __sub_and_fetch(&mValue, 1); return __sub_and_fetch(&mValue, 1);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return InterlockedDecrement(reinterpret_cast<long volatile*>(&mValue));
# else # else
SGGuard<SGMutex> lock(mMutex); # error
return --mValue;
# endif # endif
} }
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
operator unsigned() const;
#else
operator unsigned() const operator unsigned() const
{ {
# if defined(SGATOMIC_USE_GCC4_BUILTINS) # if defined(SGATOMIC_USE_GCC4_BUILTINS)
@ -75,31 +84,26 @@ public:
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS) # elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
__synchronize(); __synchronize();
return mValue; return mValue;
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
return static_cast<unsigned const volatile &>(mValue);
# else # else
SGGuard<SGMutex> lock(mMutex); # error
return mValue;
# endif # endif
} }
#endif
#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS)
bool compareAndExchange(unsigned oldValue, unsigned newValue);
#else
bool compareAndExchange(unsigned oldValue, unsigned newValue) bool compareAndExchange(unsigned oldValue, unsigned newValue)
{ {
# if defined(SGATOMIC_USE_GCC4_BUILTINS) # if defined(SGATOMIC_USE_GCC4_BUILTINS)
return __sync_bool_compare_and_swap(&mValue, oldValue, newValue); return __sync_bool_compare_and_swap(&mValue, oldValue, newValue);
# elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS) # elif defined(SGATOMIC_USE_MIPOSPRO_BUILTINS)
return __compare_and_swap(&mValue, oldValue, newValue); return __compare_and_swap(&mValue, oldValue, newValue);
#elif defined(SGATOMIC_USE_WIN32_INTERLOCKED)
long volatile* lvPtr = reinterpret_cast<long volatile*>(&mValue);
return oldValue == InterlockedCompareExchange(lvPtr, newValue, oldValue);
# else # else
SGGuard<SGMutex> lock(mMutex); # error
if (mValue != oldValue)
return false;
mValue = newValue;
return true;
# endif # endif
} }
#endif
private: private:
SGAtomic(const SGAtomic&); SGAtomic(const SGAtomic&);