atomic.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_atomic_H
00022 #define __TBB_atomic_H
00023 
00024 #include <cstddef>
00025 #include "tbb_stddef.h"
00026 
00027 #if _MSC_VER 
00028 #define __TBB_LONG_LONG __int64
00029 #else
00030 #define __TBB_LONG_LONG long long
00031 #endif /* _MSC_VER */
00032 
00033 #include "tbb_machine.h"
00034 
00035 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00036     // Workaround for overzealous compiler warnings 
00037     #pragma warning (push)
00038     #pragma warning (disable: 4244 4267)
00039 #endif
00040 
00041 namespace tbb {
00042 
00044 enum memory_semantics {
00046     __TBB_full_fence,
00048     acquire,
00050     release
00051 };
00052 
00054 namespace internal {
00055 
00056 template<size_t Size, memory_semantics M>
00057 struct atomic_traits {       // Primary template
00058 };
00059 
00060 template<size_t Size>
00061 struct atomic_word {             // Primary template
00062     typedef intptr word;
00063 };
00064 
00065 template<typename I>            // Primary template
00066 struct atomic_base {
00067     I my_value;
00068 };
00069 
00070 #if __GNUC__ || __SUNPRO_CC
00071 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
00072 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
00073 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
00074 #else 
00075 #error Do not know syntax for forcing alignment.
00076 #endif /* __GNUC__ */
00077 
00078 template<>
00079 struct atomic_word<8> {          // Specialization
00080     typedef int64_t word;
00081 };
00082 
00083 #if _WIN32 && __TBB_x86_64
00084 // ATTENTION: On 64-bit Windows, we currently have to specialize atomic_word
00085 // for every size to avoid type conversion warnings
00086 // See declarations of atomic primitives in machine/windows_em64t.h
00087 template<>
00088 struct atomic_word<1> {          // Specialization
00089     typedef int8_t word;
00090 };
00091 template<>
00092 struct atomic_word<2> {          // Specialization
00093     typedef int16_t word;
00094 };
00095 template<>
00096 struct atomic_word<4> {          // Specialization
00097     typedef int32_t word;
00098 };
00099 #endif
00100 
00101 template<>
00102 struct atomic_base<uint64_t> {   // Specialization
00103     __TBB_DECL_ATOMIC_FIELD(uint64_t,my_value,8)
00104 };
00105 
00106 template<>
00107 struct atomic_base<int64_t> {    // Specialization
00108     __TBB_DECL_ATOMIC_FIELD(int64_t,my_value,8)
00109 };
00110 
00111 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
00112     template<> struct atomic_traits<S,M> {                               \
00113         typedef atomic_word<S>::word word;                               \
00114         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00115             return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
00116         }                                                                       \
00117         inline static word fetch_and_add( volatile void* location, word addend ) { \
00118             return __TBB_FetchAndAdd##S##M(location,addend);                    \
00119         }                                                                       \
00120         inline static word fetch_and_store( volatile void* location, word value ) {\
00121             return __TBB_FetchAndStore##S##M(location,value);                   \
00122         }                                                                       \
00123     };
00124 
00125 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
00126     template<memory_semantics M>                                         \
00127     struct atomic_traits<S,M> {                                          \
00128         typedef atomic_word<S>::word word;                               \
00129         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00130             return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
00131         }                                                                       \
00132         inline static word fetch_and_add( volatile void* location, word addend ) { \
00133             return __TBB_FetchAndAdd##S(location,addend);                       \
00134         }                                                                       \
00135         inline static word fetch_and_store( volatile void* location, word value ) {\
00136             return __TBB_FetchAndStore##S(location,value);                      \
00137         }                                                                       \
00138     };
00139 
00140 #if __TBB_DECL_FENCED_ATOMICS
00141 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
00142 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
00143 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
00144 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
00145 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
00146 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
00147 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
00148 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
00149 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
00150 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
00151 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
00152 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
00153 #else
00154 __TBB_DECL_ATOMIC_PRIMITIVES(1)
00155 __TBB_DECL_ATOMIC_PRIMITIVES(2)
00156 __TBB_DECL_ATOMIC_PRIMITIVES(4)
00157 __TBB_DECL_ATOMIC_PRIMITIVES(8)
00158 #endif
00159 
00161 
00163 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
00164 
00166 
00169 template<typename I, typename D, typename StepType>
00170 struct atomic_impl: private atomic_base<I> {
00171 private:
00172     typedef typename atomic_word<sizeof(I)>::word word;
00173 public:
00174     typedef I value_type;
00175 
00176     template<memory_semantics M>
00177     value_type fetch_and_add( D addend ) {
00178         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_value, addend*sizeof(StepType) ));
00179     }
00180 
00181     value_type fetch_and_add( D addend ) {
00182         return fetch_and_add<__TBB_full_fence>(addend);
00183     }
00184 
00185     template<memory_semantics M>
00186     value_type fetch_and_increment() {
00187         return fetch_and_add<M>(1);
00188     }
00189 
00190     value_type fetch_and_increment() {
00191         return fetch_and_add(1);
00192     }
00193 
00194     template<memory_semantics M>
00195     value_type fetch_and_decrement() {
00196         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
00197     }
00198 
00199     value_type fetch_and_decrement() {
00200         return fetch_and_add(__TBB_MINUS_ONE(D));
00201     }
00202 
00203     template<memory_semantics M>
00204     value_type fetch_and_store( value_type value ) {
00205         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&this->my_value,word(value)));
00206     }
00207 
00208     value_type fetch_and_store( value_type value ) {
00209         return fetch_and_store<__TBB_full_fence>(value);
00210     }
00211 
00212     template<memory_semantics M>
00213     value_type compare_and_swap( value_type value, value_type comparand ) {
00214         return value_type(internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&this->my_value,word(value),word(comparand)));
00215     }
00216 
00217     value_type compare_and_swap( value_type value, value_type comparand ) {
00218         return compare_and_swap<__TBB_full_fence>(value,comparand);
00219     }
00220 
00221     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
00222         return __TBB_load_with_acquire( this->my_value );
00223     }
00224 
00225 protected:
00226     value_type store_with_release( value_type rhs ) {
00227         __TBB_store_with_release(this->my_value,rhs);
00228         return rhs;
00229     }
00230 
00231 public:
00232     value_type operator+=( D addend ) {
00233         return fetch_and_add(addend)+addend;
00234     }
00235 
00236     value_type operator-=( D addend ) {
00237         // Additive inverse of addend computed using binary minus,
00238         // instead of unary minus, for sake of avoiding compiler warnings.
00239         return operator+=(D(0)-addend);    
00240     }
00241 
00242     value_type operator++() {
00243         return fetch_and_add(1)+1;
00244     }
00245 
00246     value_type operator--() {
00247         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
00248     }
00249 
00250     value_type operator++(int) {
00251         return fetch_and_add(1);
00252     }
00253 
00254     value_type operator--(int) {
00255         return fetch_and_add(__TBB_MINUS_ONE(D));
00256     }
00257 };
00258 
00259 #if __TBB_WORDSIZE == 4
00260 // Plaforms with 32-bit hardware require special effort for 64-bit loads and stores.
00261 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
00262 
00263 template<>
00264 inline atomic_impl<__TBB_LONG_LONG,__TBB_LONG_LONG,char>::operator atomic_impl<__TBB_LONG_LONG,__TBB_LONG_LONG,char>::value_type() const volatile {
00265     return __TBB_Load8(&this->my_value);
00266 }
00267 
00268 template<>
00269 inline atomic_impl<unsigned __TBB_LONG_LONG,unsigned __TBB_LONG_LONG,char>::operator atomic_impl<unsigned __TBB_LONG_LONG,unsigned __TBB_LONG_LONG,char>::value_type() const volatile {
00270     return __TBB_Load8(&this->my_value);
00271 }
00272 
00273 template<>
00274 inline atomic_impl<__TBB_LONG_LONG,__TBB_LONG_LONG,char>::value_type atomic_impl<__TBB_LONG_LONG,__TBB_LONG_LONG,char>::store_with_release( value_type rhs ) {
00275     __TBB_Store8(&this->my_value,rhs);
00276     return rhs;
00277 }
00278 
00279 template<>
00280 inline atomic_impl<unsigned __TBB_LONG_LONG,unsigned __TBB_LONG_LONG,char>::value_type atomic_impl<unsigned __TBB_LONG_LONG,unsigned __TBB_LONG_LONG,char>::store_with_release( value_type rhs ) {
00281     __TBB_Store8(&this->my_value,rhs);
00282     return rhs;
00283 }
00284 
00285 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
00286 #endif /* __TBB_WORDSIZE==4 */
00287 
00288 } /* Internal */
00290 
00292 
00294 template<typename T>
00295 struct atomic {
00296 };
00297 
00298 #define __TBB_DECL_ATOMIC(T) \
00299     template<> struct atomic<T>: internal::atomic_impl<T,T,char> {  \
00300         T operator=( T rhs ) {return store_with_release(rhs);}  \
00301         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00302     };
00303 
00304 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
00305 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
00306 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
00307 #else
00308 // Some old versions of MVSC cannot correctly compile templates with "long long".
00309 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
00310 
00311 __TBB_DECL_ATOMIC(long)
00312 __TBB_DECL_ATOMIC(unsigned long)
00313 
00314 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
00315 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
00316    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
00317    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
00318    type synonyms on the platform.  Type U should be the wider variant of T from the
00319    perspective of /Wp64. */
00320 #define __TBB_DECL_ATOMIC_ALT(T,U) \
00321     template<> struct atomic<T>: internal::atomic_impl<T,T,char> {  \
00322         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
00323         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00324     };
00325 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
00326 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
00327 #else
00328 __TBB_DECL_ATOMIC(unsigned)
00329 __TBB_DECL_ATOMIC(int)
00330 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
00331 
00332 __TBB_DECL_ATOMIC(unsigned short)
00333 __TBB_DECL_ATOMIC(short)
00334 __TBB_DECL_ATOMIC(char)
00335 __TBB_DECL_ATOMIC(signed char)
00336 __TBB_DECL_ATOMIC(unsigned char)
00337 
00338 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
00339 __TBB_DECL_ATOMIC(wchar_t)
00340 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
00341 
00342 template<typename T> struct atomic<T*>: internal::atomic_impl<T*,ptrdiff_t,T> {
00343     T* operator=( T* rhs ) {
00344         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00345         return this->store_with_release(rhs);
00346     }
00347     atomic<T*>& operator=( const atomic<T*>& rhs ) {this->store_with_release(rhs); return *this;}
00348     T* operator->() const {
00349         return (*this);
00350     }
00351 };
00352 
00353 template<>
00354 struct atomic<void*> {
00355 private:
00356     void* my_value;
00357 
00358 public:
00359     typedef void* value_type;
00360 
00361     template<memory_semantics M>
00362     value_type compare_and_swap( value_type value, value_type comparand ) {
00363         return value_type(internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&my_value,internal::intptr(value),internal::intptr(comparand)));
00364     }
00365 
00366     value_type compare_and_swap( value_type value, value_type comparand ) {
00367         return compare_and_swap<__TBB_full_fence>(value,comparand);
00368     }
00369 
00370     template<memory_semantics M>
00371     value_type fetch_and_store( value_type value ) {
00372         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&my_value,internal::intptr(value)));
00373     }
00374 
00375     value_type fetch_and_store( value_type value ) {
00376         return fetch_and_store<__TBB_full_fence>(value);
00377     }
00378 
00379     operator value_type() const {
00380         return __TBB_load_with_acquire(my_value);
00381     }
00382 
00383     value_type operator=( value_type rhs ) {
00384         __TBB_store_with_release(my_value,rhs);
00385         return rhs;
00386     }
00387 
00388     atomic<void*>& operator=( const atomic<void*>& rhs ) {
00389         __TBB_store_with_release(my_value,rhs);
00390         return *this;
00391     }
00392 };
00393 
00394 template<>
00395 struct atomic<bool> {
00396 private:
00397     bool my_value;
00398     typedef internal::atomic_word<sizeof(bool)>::word word;
00399 public:
00400     typedef bool value_type;
00401     template<memory_semantics M>
00402     value_type compare_and_swap( value_type value, value_type comparand ) {
00403         return internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&my_value,word(value),word(comparand))!=0;
00404     }
00405 
00406     value_type compare_and_swap( value_type value, value_type comparand ) {
00407         return compare_and_swap<__TBB_full_fence>(value,comparand);
00408     }
00409 
00410     template<memory_semantics M>
00411     value_type fetch_and_store( value_type value ) {
00412         return internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&my_value,word(value))!=0;
00413     }
00414 
00415     value_type fetch_and_store( value_type value ) {
00416         return fetch_and_store<__TBB_full_fence>(value);
00417     }
00418 
00419     operator value_type() const {
00420         return __TBB_load_with_acquire(my_value);
00421     }
00422 
00423     value_type operator=( value_type rhs ) {
00424         __TBB_store_with_release(my_value,rhs);
00425         return rhs;
00426     }
00427 
00428     atomic<bool>& operator=( const atomic<bool>& rhs ) {
00429         __TBB_store_with_release(my_value,rhs);
00430         return *this;
00431     }
00432 };
00433 
00434 } // namespace tbb
00435 
00436 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00437     #pragma warning (pop)
00438 #endif // warnings 4244, 4267 are back
00439 
00440 #endif /* __TBB_atomic_H */

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.