concurrent_hash_map.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023 
00024 #include <stdexcept>
00025 #include <iterator>
00026 #include <utility>      // Need std::pair
00027 #include <cstring>      // Need std::memset
00028 #include <string>
00029 #include "tbb_stddef.h"
00030 #include "cache_aligned_allocator.h"
00031 #include "tbb_allocator.h"
00032 #include "spin_rw_mutex.h"
00033 #include "atomic.h"
00034 #if TBB_USE_PERFORMANCE_WARNINGS
00035 #include <typeinfo>
00036 #endif
00037 
00038 namespace tbb {
00039 
00040 template<typename T> struct tbb_hash_compare;
00041 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00042 class concurrent_hash_map;
00043 
00045 namespace internal {
00047     typedef size_t hashcode_t;
00049     class hash_map_base {
00050     public:
00051         // Mutex types for each layer of the container
00052         typedef spin_rw_mutex node_mutex_t;
00053         typedef spin_rw_mutex chain_mutex_t;
00054         typedef spin_rw_mutex segment_mutex_t;
00055 
00057         typedef internal::hashcode_t hashcode_t;
00059         static const size_t n_segment_bits = 6;
00061         static const size_t n_segment = size_t(1)<<n_segment_bits; 
00063         static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits);
00064     };
00065 
00066     template<typename Iterator>
00067     class hash_map_range;
00068 
00069     struct hash_map_segment_base {
00071         hash_map_base::segment_mutex_t my_mutex;
00072 
00073         // Number of nodes
00074         atomic<size_t> my_logical_size;
00075 
00076         // Size of chains
00078         size_t my_physical_size;
00079 
00081 
00082         bool __TBB_EXPORTED_METHOD internal_grow_predicate() const;
00083     };
00084 
00086 
00088     template<typename Container, typename Value>
00089     class hash_map_iterator
00090         : public std::iterator<std::forward_iterator_tag,Value>
00091     {
00092         typedef typename Container::node node;
00093         typedef typename Container::chain chain;
00094         typedef typename Container::segment segment;
00095 
00097         Container* my_table;
00098 
00100         node* my_node;
00101 
00103         size_t my_array_index;
00104 
00106         size_t my_segment_index;
00107 
00108         template<typename C, typename T, typename U>
00109         friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00110 
00111         template<typename C, typename T, typename U>
00112         friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00113 
00114         template<typename C, typename T, typename U>
00115         friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00116     
00117         template<typename C, typename U>
00118         friend class internal::hash_map_iterator;
00119 
00120         template<typename I>
00121         friend class internal::hash_map_range;
00122 
00123         void advance_to_next_node() {
00124             size_t i = my_array_index+1;
00125             do {
00126                 segment &s = my_table->my_segment[my_segment_index];
00127                 while( i<s.my_physical_size ) {
00128                     my_node = s.my_array[i].node_list;
00129                     if( my_node ) goto done;
00130                     ++i;
00131                 }
00132                 i = 0;
00133             } while( ++my_segment_index<my_table->n_segment );
00134         done:
00135             my_array_index = i;
00136         }
00137 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00138         template<typename Key, typename T, typename HashCompare, typename A>
00139         friend class tbb::concurrent_hash_map;
00140 #else
00141     public: // workaround
00142 #endif
00143         hash_map_iterator( const Container& table, size_t segment_index, size_t array_index=0, node* b=NULL );
00144     public:
00146         hash_map_iterator() {}
00147         hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type>& other ) :
00148             my_table(other.my_table),
00149             my_node(other.my_node),
00150             my_array_index(other.my_array_index),
00151             my_segment_index(other.my_segment_index)
00152         {}
00153         Value& operator*() const {
00154             __TBB_ASSERT( my_node, "iterator uninitialized or at end of container?" );
00155             return my_node->item;
00156         }
00157         Value* operator->() const {return &operator*();}
00158         hash_map_iterator& operator++();
00159         
00161         Value* operator++(int) {
00162             Value* result = &operator*();
00163             operator++();
00164             return result;
00165         }
00166     };
00167 
00168     template<typename Container, typename Value>
00169     hash_map_iterator<Container,Value>::hash_map_iterator( const Container& table, size_t segment_index, size_t array_index, node* b ) : 
00170         my_table(const_cast<Container*>(&table)),
00171         my_node(b),
00172         my_array_index(array_index),
00173         my_segment_index(segment_index)
00174     {
00175         if( segment_index<my_table->n_segment ) {
00176             if( !my_node ) {
00177                 segment &s = my_table->my_segment[segment_index];
00178                 chain* first_chain = s.my_array;
00179                 if( first_chain && my_array_index < s.my_physical_size)
00180                     my_node = first_chain[my_array_index].node_list;
00181             }
00182             if( !my_node ) advance_to_next_node();
00183         }
00184     }
00185 
00186     template<typename Container, typename Value>
00187     hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00188         my_node=my_node->next;
00189         if( !my_node ) advance_to_next_node();
00190         return *this;
00191     }
00192 
00193     template<typename Container, typename T, typename U>
00194     bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00195         return i.my_node==j.my_node;
00196     }
00197 
00198     template<typename Container, typename T, typename U>
00199     bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00200         return i.my_node!=j.my_node;
00201     }
00202 
00204 
00205     template<typename Iterator>
00206     class hash_map_range {
00207     private:
00208         Iterator my_begin;
00209         Iterator my_end;
00210         mutable Iterator my_midpoint;
00211         size_t my_grainsize;
00213         void set_midpoint() const;
00214         template<typename U> friend class hash_map_range;
00215     public:
00217         typedef std::size_t size_type;
00218         typedef typename Iterator::value_type value_type;
00219         typedef typename Iterator::reference reference;
00220         typedef typename Iterator::difference_type difference_type;
00221         typedef Iterator iterator;
00222 
00224         bool empty() const {return my_begin==my_end;}
00225 
00227         bool is_divisible() const {
00228             return my_midpoint!=my_end;
00229         }
00231         hash_map_range( hash_map_range& r, split ) : 
00232             my_end(r.my_end),
00233             my_grainsize(r.my_grainsize)
00234         {
00235             r.my_end = my_begin = r.my_midpoint;
00236             __TBB_ASSERT( my_begin!=my_end, "Splitting despite the range is not divisible" );
00237             __TBB_ASSERT( r.my_begin!=r.my_end, "Splitting despite the range is not divisible" );
00238             set_midpoint();
00239             r.set_midpoint();
00240         }
00242         template<typename U>
00243         hash_map_range( hash_map_range<U>& r) : 
00244             my_begin(r.my_begin),
00245             my_end(r.my_end),
00246             my_midpoint(r.my_midpoint),
00247             my_grainsize(r.my_grainsize)
00248         {}
00250         hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize = 1 ) : 
00251             my_begin(begin_), 
00252             my_end(end_), 
00253             my_grainsize(grainsize) 
00254         {
00255             set_midpoint();
00256             __TBB_ASSERT( grainsize>0, "grainsize must be positive" );
00257         }
00258         const Iterator& begin() const {return my_begin;}
00259         const Iterator& end() const {return my_end;}
00261         size_type grainsize() const {return my_grainsize;}
00262     };
00263 
00264     template<typename Iterator>
00265     void hash_map_range<Iterator>::set_midpoint() const {
00266         size_t n = my_end.my_segment_index - my_begin.my_segment_index;
00267         if( n > 1 || (n == 1 && my_end.my_array_index > my_grainsize/2) ) {
00268             // Split by groups of segments
00269             my_midpoint = Iterator(*my_begin.my_table,(my_end.my_segment_index+my_begin.my_segment_index+1)/2u);
00270         } else {
00271             // Split by groups of nodes
00272             size_t m = my_end.my_array_index-my_begin.my_array_index;
00273             if( n ) m += my_begin.my_table->my_segment[my_begin.my_segment_index].my_physical_size;
00274             if( m > my_grainsize ) {
00275                 my_midpoint = Iterator(*my_begin.my_table,my_begin.my_segment_index,my_begin.my_array_index + m/2u);
00276             } else {
00277                 my_midpoint = my_end;
00278             }
00279         }
00280         __TBB_ASSERT( my_begin.my_segment_index < my_midpoint.my_segment_index
00281             || (my_begin.my_segment_index == my_midpoint.my_segment_index
00282             && my_begin.my_array_index <= my_midpoint.my_array_index),
00283             "my_begin is after my_midpoint" );
00284         __TBB_ASSERT( my_midpoint.my_segment_index < my_end.my_segment_index
00285             || (my_midpoint.my_segment_index == my_end.my_segment_index
00286             && my_midpoint.my_array_index <= my_end.my_array_index),
00287             "my_midpoint is after my_end" );
00288         __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00289             "[my_begin, my_midpoint) range should not be empty" );
00290     }
00292     static const hashcode_t hash_multiplier = sizeof(hashcode_t)==4? 2654435769U : 11400714819323198485ULL;
00294     template<typename T>
00295     inline static hashcode_t hasher( const T& t ) {
00296         return static_cast<hashcode_t>( t ) * hash_multiplier;
00297     }
00298     template<typename P>
00299     inline static hashcode_t hasher( P* ptr ) {
00300         hashcode_t const h = reinterpret_cast<hashcode_t>( ptr );
00301         return (h >> 3) ^ h;
00302     }
00303     template<typename E, typename S, typename A>
00304     inline static hashcode_t hasher( const std::basic_string<E,S,A>& s ) {
00305         hashcode_t h = 0;
00306         for( const E* c = s.c_str(); *c; c++ )
00307             h = static_cast<hashcode_t>(*c) ^ (h * hash_multiplier);
00308         return h;
00309     }
00310     template<typename F, typename S>
00311     inline static hashcode_t hasher( const std::pair<F,S>& p ) {
00312         return hasher(p.first) ^ hasher(p.second);
00313     }
00314 } // namespace internal
00316 
00318 template<typename T>
00319 struct tbb_hash_compare {
00320     static internal::hashcode_t hash( const T& t ) { return internal::hasher(t); }
00321     static bool equal( const T& a, const T& b ) { return a == b; }
00322 };
00323 
00325 
00350 template<typename Key, typename T, typename HashCompare, typename A>
00351 class concurrent_hash_map : protected internal::hash_map_base {
00352     template<typename Container, typename Value>
00353     friend class internal::hash_map_iterator;
00354 
00355     template<typename I>
00356     friend class internal::hash_map_range;
00357 
00358     struct node;
00359     friend struct node;
00360     typedef typename A::template rebind<node>::other node_allocator_type;
00361 
00362 public:
00363     class const_accessor;
00364     friend class const_accessor;
00365     class accessor;
00366 
00367     typedef Key key_type;
00368     typedef T mapped_type;
00369     typedef std::pair<const Key,T> value_type;
00370     typedef size_t size_type;
00371     typedef ptrdiff_t difference_type;
00372     typedef value_type *pointer;
00373     typedef const value_type *const_pointer;
00374     typedef value_type &reference;
00375     typedef const value_type &const_reference;
00376     typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00377     typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00378     typedef internal::hash_map_range<iterator> range_type;
00379     typedef internal::hash_map_range<const_iterator> const_range_type;
00380     typedef A allocator_type;
00381 
00383     class const_accessor {
00384         friend class concurrent_hash_map<Key,T,HashCompare,A>;
00385         friend class accessor;
00386         void operator=( const accessor& ) const; // Deny access
00387         const_accessor( const accessor& );       // Deny access
00388     public:
00390         typedef const std::pair<const Key,T> value_type;
00391 
00393         bool empty() const {return !my_node;}
00394 
00396         void release() {
00397             if( my_node ) {
00398                 my_lock.release();
00399                 my_node = NULL;
00400             }
00401         }
00402 
00404         const_reference operator*() const {
00405             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00406             return my_node->item;
00407         }
00408 
00410         const_pointer operator->() const {
00411             return &operator*();
00412         }
00413 
00415         const_accessor() : my_node(NULL) {}
00416 
00418         ~const_accessor() {
00419             my_node = NULL; // my_lock.release() is called in scoped_lock destructor
00420         }
00421     private:
00422         node* my_node;
00423         node_mutex_t::scoped_lock my_lock;
00424         hashcode_t my_hash;
00425     };
00426 
00428     class accessor: public const_accessor {
00429     public:
00431         typedef std::pair<const Key,T> value_type;
00432 
00434         reference operator*() const {
00435             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00436             return this->my_node->item;
00437         }
00438 
00440         pointer operator->() const {
00441             return &operator*();
00442         }       
00443     };
00444 
00446     concurrent_hash_map(const allocator_type &a = allocator_type())
00447         : my_allocator(a)
00448 
00449     {
00450         initialize();
00451     }
00452 
00454     concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00455         : my_allocator(a)
00456     {
00457         initialize();
00458         internal_copy(table);
00459     }
00460 
00462     template<typename I>
00463     concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00464         : my_allocator(a)
00465     {
00466         initialize();
00467         internal_copy(first, last);
00468     }
00469 
00471     concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00472         if( this!=&table ) {
00473             clear();
00474             internal_copy(table);
00475         } 
00476         return *this;
00477     }
00478 
00479 
00481     void clear();
00482 
00484     ~concurrent_hash_map();
00485 
00486     //------------------------------------------------------------------------
00487     // Parallel algorithm support
00488     //------------------------------------------------------------------------
00489     range_type range( size_type grainsize=1 ) {
00490         return range_type( begin(), end(), grainsize );
00491     }
00492     const_range_type range( size_type grainsize=1 ) const {
00493         return const_range_type( begin(), end(), grainsize );
00494     }
00495 
00496     //------------------------------------------------------------------------
00497     // STL support - not thread-safe methods
00498     //------------------------------------------------------------------------
00499     iterator begin() {return iterator(*this,0);}
00500     iterator end() {return iterator(*this,n_segment);}
00501     const_iterator begin() const {return const_iterator(*this,0);}
00502     const_iterator end() const {return const_iterator(*this,n_segment);}
00503     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00504     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00505     
00507 
00509     size_type size() const;
00510 
00512     bool empty() const;
00513 
00515     size_type max_size() const {return (~size_type(0))/sizeof(node);}
00516 
00518     allocator_type get_allocator() const { return this->my_allocator; }
00519 
00521     void swap(concurrent_hash_map &table);
00522 
00523     //------------------------------------------------------------------------
00524     // concurrent map operations
00525     //------------------------------------------------------------------------
00526 
00528     size_type count( const Key& key ) const {
00529         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(NULL, key, /*write=*/false, NULL );
00530     }
00531 
00533 
00534     bool find( const_accessor& result, const Key& key ) const {
00535         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(&result, key, /*write=*/false, NULL );
00536     }
00537 
00539 
00540     bool find( accessor& result, const Key& key ) {
00541         return lookup</*insert*/false>(&result, key, /*write=*/true, NULL );
00542     }
00543         
00545 
00546     bool insert( const_accessor& result, const Key& key ) {
00547         return lookup</*insert*/true>(&result, key, /*write=*/false, NULL );
00548     }
00549 
00551 
00552     bool insert( accessor& result, const Key& key ) {
00553         return lookup</*insert*/true>(&result, key, /*write=*/true, NULL );
00554     }
00555 
00557 
00558     bool insert( const_accessor& result, const value_type& value ) {
00559         return lookup</*insert*/true>(&result, value.first, /*write=*/false, &value.second );
00560     }
00561 
00563 
00564     bool insert( accessor& result, const value_type& value ) {
00565         return lookup</*insert*/true>(&result, value.first, /*write=*/true, &value.second );
00566     }
00567 
00569 
00570     bool insert( const value_type& value ) {
00571         return lookup</*insert*/true>(NULL, value.first, /*write=*/false, &value.second );
00572     }
00573 
00575     template<typename I>
00576     void insert(I first, I last) {
00577         for(; first != last; ++first)
00578             insert( *first );
00579     }
00580 
00582 
00583     bool erase( const Key& key );
00584 
00586 
00587     bool erase( const_accessor& item_accessor ) {
00588         return exclude( item_accessor, /*readonly=*/ true );
00589     }
00590 
00592 
00593     bool erase( accessor& item_accessor ) {
00594         return exclude( item_accessor, /*readonly=*/ false );
00595     }
00596 
00597 private:
00599     struct node: internal::no_copy {
00601         node* next;
00602         node_mutex_t mutex;
00603         value_type item;
00604         node( const Key& key ) : item(key, T()) {}
00605         node( const Key& key, const T& t ) : item(key, t) {}
00606         // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
00607         void* operator new( size_t /*size*/, node_allocator_type& a ) {
00608             void *ptr = a.allocate(1);
00609             if(!ptr) throw std::bad_alloc();
00610             return ptr;
00611         }
00612         // match placement-new form above to be called if exception thrown in constructor
00613         void operator delete( void* ptr, node_allocator_type& a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00614     };
00615 
00616     struct chain;
00617     friend struct chain;
00618 
00620 
00621     struct chain {
00622         void push_front( node& b ) {
00623             b.next = node_list;
00624             node_list = &b;
00625         }
00626         chain_mutex_t mutex;
00627         node* node_list;
00628     };
00629 
00630     struct segment;
00631     friend struct segment;
00632 
00634 
00636     struct segment: internal::hash_map_segment_base {
00637 #if TBB_USE_ASSERT
00638         ~segment() {
00639             __TBB_ASSERT( !my_array, "should have been cleared earlier" );
00640         }
00641 #endif /* TBB_USE_ASSERT */
00642 
00643         // Pointer to array of chains
00644         chain* my_array;
00645 
00646         // Get chain in this segment that corresponds to given hash code.
00647         chain& get_chain( hashcode_t hashcode, size_t n_segment_bits ) {
00648             return my_array[(hashcode>>n_segment_bits)&(my_physical_size-1)];
00649         }
00650      
00652 
00654         void allocate_array( size_t new_size ) {
00655             size_t n=(internal::NFS_GetLineSize()+sizeof(chain)-1)/sizeof(chain);
00656             __TBB_ASSERT((n&(n-1))==0, NULL);
00657             while( n<new_size ) n<<=1;
00658             chain* array = cache_aligned_allocator<chain>().allocate( n );
00659             // storing earlier might help overcome false positives of in deducing "bool grow" in concurrent threads
00660             __TBB_store_with_release(my_physical_size, n);
00661             std::memset( array, 0, n*sizeof(chain) );
00662             my_array = array;
00663         }
00664     };
00665 
00666     segment& get_segment( hashcode_t hashcode ) {
00667         return my_segment[hashcode&(n_segment-1)];
00668     }
00669 
00670     node_allocator_type my_allocator;
00671 
00672     HashCompare my_hash_compare;
00673 
00674     segment* my_segment;
00675 
00676     node* create_node(const Key& key, const T* t) {
00677         // exception-safe allocation and construction
00678         if(t) return new( my_allocator ) node(key, *t);
00679         else  return new( my_allocator ) node(key);
00680     }
00681 
00682     void delete_node(node* b) {
00683         my_allocator.destroy(b);
00684         my_allocator.deallocate(b, 1);
00685     }
00686 
00687     node* search_list( const Key& key, chain& c ) const {
00688         node* b = c.node_list;
00689         while( b && !my_hash_compare.equal(key, b->item.first) )
00690             b = b->next;
00691         return b;
00692     }
00694     template<typename I>
00695     std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00696 
00698     bool exclude( const_accessor& item_accessor, bool readonly );
00699 
00701     void grow_segment( segment_mutex_t::scoped_lock& segment_lock, segment& s );
00702 
00704     template<bool op_insert>
00705     bool lookup( const_accessor* result, const Key& key, bool write, const T* t );
00706 
00708     void initialize() {
00709         my_segment = cache_aligned_allocator<segment>().allocate(n_segment);
00710         std::memset( my_segment, 0, sizeof(segment)*n_segment );
00711      }
00712 
00714     void internal_copy( const concurrent_hash_map& source );
00715 
00716     template<typename I>
00717     void internal_copy(I first, I last);
00718 };
00719 
00720 template<typename Key, typename T, typename HashCompare, typename A>
00721 concurrent_hash_map<Key,T,HashCompare,A>::~concurrent_hash_map() {
00722     clear();
00723     cache_aligned_allocator<segment>().deallocate( my_segment, n_segment );
00724 }
00725 
00726 template<typename Key, typename T, typename HashCompare, typename A>
00727 typename concurrent_hash_map<Key,T,HashCompare,A>::size_type concurrent_hash_map<Key,T,HashCompare,A>::size() const {
00728     size_type result = 0;
00729     for( size_t k=0; k<n_segment; ++k )
00730         result += my_segment[k].my_logical_size;
00731     return result;
00732 }
00733 
00734 template<typename Key, typename T, typename HashCompare, typename A>
00735 bool concurrent_hash_map<Key,T,HashCompare,A>::empty() const {
00736     for( size_t k=0; k<n_segment; ++k )
00737         if( my_segment[k].my_logical_size )
00738             return false;
00739     return true;
00740 }
00741 
00742 #if _MSC_VER && !defined(__INTEL_COMPILER)
00743     // Suppress "conditional expression is constant" warning.
00744     #pragma warning( push )
00745     #pragma warning( disable: 4127 )
00746 #endif
00747 
00748 template<typename Key, typename T, typename HashCompare, typename A>
00749 template<bool op_insert>
00750 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( const_accessor* result, const Key& key, bool write, const T* t ) {
00751     if( result )
00752         result->release();
00753     const hashcode_t h = my_hash_compare.hash( key );
00754     segment& s = get_segment(h);
00755 restart:
00756     bool return_value = false;
00757     // first check in double-check sequence
00758 #if TBB_USE_THREADING_TOOLS
00759     bool grow = op_insert && s.internal_grow_predicate();
00760 #else
00761     bool grow = op_insert && s.my_logical_size >= s.my_physical_size
00762         && s.my_physical_size < max_physical_size; // check whether there are free bits
00763 #endif /* TBB_USE_THREADING_TOOLS */
00764     segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/grow );
00765     if( grow ) { // Load factor is too high  
00766         grow_segment( segment_lock, s );
00767     }
00768     if( !s.my_array ) {
00769         __TBB_ASSERT( !op_insert, NULL );
00770         return false;
00771     }
00772     __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00773     chain& c = s.get_chain( h, n_segment_bits );
00774     chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/false );
00775 
00776     node* b = search_list( key, c );
00777     if( op_insert ) {
00778         if( !b ) {
00779             b = create_node(key, t);
00780             // Search failed
00781             if( !chain_lock.upgrade_to_writer() ) {
00782                 // Rerun search_list, in case another thread inserted the item during the upgrade.
00783                 node* b_temp = search_list( key, c );
00784                 if( b_temp ) { // unfortunately, it did
00785                     chain_lock.downgrade_to_reader();
00786                     delete_node( b );
00787                     b = b_temp;
00788                     goto done;
00789                 }
00790             }
00791             ++s.my_logical_size; // we can't change it earlier due to correctness of size() and exception safety of equal()
00792             return_value = true;
00793             c.push_front( *b );
00794         }
00795     } else { // find or count
00796         if( !b )      return false;
00797         return_value = true;
00798     }
00799 done:
00800     if( !result ) return return_value;
00801     if( !result->my_lock.try_acquire( b->mutex, write ) ) {
00802         // we are unlucky, prepare for longer wait
00803         internal::AtomicBackoff trials;
00804         do {
00805             if( !trials.bounded_pause() ) {
00806                 // the wait takes really long, restart the operation
00807                 chain_lock.release(); segment_lock.release();
00808                 __TBB_Yield();
00809                 goto restart;
00810             }
00811         } while( !result->my_lock.try_acquire( b->mutex, write ) );
00812     }
00813     result->my_node = b;
00814     result->my_hash = h;
00815     return return_value;
00816 }
00817 
00818 #if _MSC_VER && !defined(__INTEL_COMPILER)
00819     #pragma warning( pop )
00820 #endif // warning 4127 is back
00821 
00822 template<typename Key, typename T, typename HashCompare, typename A>
00823 template<typename I>
00824 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end ) const {
00825     hashcode_t h = my_hash_compare.hash( key );
00826     size_t segment_index = h&(n_segment-1);
00827     segment& s = my_segment[segment_index ];
00828     size_t chain_index = (h>>n_segment_bits)&(s.my_physical_size-1);
00829     if( !s.my_array )
00830         return std::make_pair(end, end);
00831     chain& c = s.my_array[chain_index];
00832     node* b = search_list( key, c );
00833     if( !b )
00834         return std::make_pair(end, end);
00835     iterator lower(*this, segment_index, chain_index, b), upper(lower);
00836     return std::make_pair(lower, ++upper);
00837 }
00838 
00839 template<typename Key, typename T, typename HashCompare, typename A>
00840 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
00841     hashcode_t h = my_hash_compare.hash( key );
00842     segment& s = get_segment( h );
00843     node* b=NULL; // explicitly initialized to prevent compiler warnings
00844     {
00845         bool chain_locked_for_write = false;
00846         segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/false );
00847         if( !s.my_array ) return false;
00848         __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00849         chain& c = s.get_chain( h, n_segment_bits );
00850         chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/false );
00851     search:
00852         node** p = &c.node_list;
00853         b = *p;
00854         while( b && !my_hash_compare.equal(key, b->item.first ) ) {
00855             p = &b->next;
00856             b = *p;
00857         }
00858         if( !b ) return false;
00859         if( !chain_locked_for_write && !chain_lock.upgrade_to_writer() ) {
00860             chain_locked_for_write = true;
00861             goto search;
00862         }
00863         *p = b->next;
00864         --s.my_logical_size;
00865     }
00866     {
00867         node_mutex_t::scoped_lock item_locker( b->mutex, /*write=*/true );
00868     }
00869     // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
00870     delete_node( b ); // Only one thread can delete it due to write lock on the chain_mutex
00871     return true;        
00872 }
00873 
00874 template<typename Key, typename T, typename HashCompare, typename A>
00875 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
00876     __TBB_ASSERT( item_accessor.my_node, NULL );
00877     const hashcode_t h = item_accessor.my_hash;
00878     node *const b = item_accessor.my_node;
00879     item_accessor.my_node = NULL; // we ought release accessor anyway
00880     segment& s = get_segment( h );
00881     {
00882         segment_mutex_t::scoped_lock segment_lock( s.my_mutex, /*write=*/false );
00883         __TBB_ASSERT( s.my_array, NULL );
00884         __TBB_ASSERT( (s.my_physical_size&(s.my_physical_size-1))==0, NULL );
00885         chain& c = s.get_chain( h, n_segment_bits );
00886         chain_mutex_t::scoped_lock chain_lock( c.mutex, /*write=*/true );
00887         node** p = &c.node_list;
00888         while( *p && *p != b )
00889             p = &(*p)->next;
00890         if( !*p ) { // someone else was the first
00891             item_accessor.my_lock.release();
00892             return false;
00893         }
00894         __TBB_ASSERT( *p == b, NULL );
00895         *p = b->next;
00896         --s.my_logical_size;
00897     }
00898     if( readonly ) // need to get exclusive lock
00899         item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here
00900     item_accessor.my_lock.release();
00901     delete_node( b ); // Only one thread can delete it due to write lock on the chain_mutex
00902     return true;
00903 }
00904 
00905 template<typename Key, typename T, typename HashCompare, typename A>
00906 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
00907     std::swap(this->my_allocator, table.my_allocator);
00908     std::swap(this->my_hash_compare, table.my_hash_compare);
00909     std::swap(this->my_segment, table.my_segment);
00910 }
00911 
00912 template<typename Key, typename T, typename HashCompare, typename A>
00913 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
00914 #if TBB_USE_PERFORMANCE_WARNINGS
00915     size_t total_physical_size = 0, min_physical_size = size_t(-1L), max_physical_size = 0; //< usage statistics
00916     static bool reported = false;
00917 #endif
00918     for( size_t i=0; i<n_segment; ++i ) {
00919         segment& s = my_segment[i];
00920         size_t n = s.my_physical_size;
00921         if( chain* array = s.my_array ) {
00922             s.my_array = NULL;
00923             s.my_physical_size = 0;
00924             s.my_logical_size = 0;
00925             for( size_t j=0; j<n; ++j ) {
00926                 while( node* b = array[j].node_list ) {
00927                     array[j].node_list = b->next;
00928                     delete_node(b);
00929                 }
00930             }
00931             cache_aligned_allocator<chain>().deallocate( array, n );
00932         }
00933 #if TBB_USE_PERFORMANCE_WARNINGS
00934         total_physical_size += n;
00935         if(min_physical_size > n) min_physical_size = n;
00936         if(max_physical_size < n) max_physical_size = n;
00937     }
00938     if( !reported
00939         && ( (total_physical_size >= n_segment*48 && min_physical_size < total_physical_size/n_segment/2)
00940          || (total_physical_size >= n_segment*128 && max_physical_size > total_physical_size/n_segment*2) ) )
00941     {
00942         reported = true;
00943         internal::runtime_warning(
00944             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s",
00945             typeid(*this).name() );
00946 #endif
00947     }
00948 }
00949 
00950 template<typename Key, typename T, typename HashCompare, typename A>
00951 void concurrent_hash_map<Key,T,HashCompare,A>::grow_segment( segment_mutex_t::scoped_lock& segment_lock, segment& s ) {
00952     // Following is second check in a double-check.
00953     if( s.my_logical_size >= s.my_physical_size ) {
00954         chain* old_array = s.my_array;
00955         size_t old_size = s.my_physical_size;
00956         s.allocate_array( s.my_logical_size+1 );
00957         for( size_t k=0; k<old_size; ++k )
00958             while( node* b = old_array[k].node_list ) {
00959                 old_array[k].node_list = b->next;
00960                 hashcode_t h = my_hash_compare.hash( b->item.first );
00961                 __TBB_ASSERT( &get_segment(h)==&s, "hash function changed?" );
00962                 s.get_chain(h,n_segment_bits).push_front(*b);
00963             }
00964         cache_aligned_allocator<chain>().deallocate( old_array, old_size );
00965     }
00966     segment_lock.downgrade_to_reader();
00967 }
00968 
00969 template<typename Key, typename T, typename HashCompare, typename A>
00970 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
00971     for( size_t i=0; i<n_segment; ++i ) {
00972         segment& s = source.my_segment[i];
00973         __TBB_ASSERT( !my_segment[i].my_array, "caller should have cleared" );
00974         if( s.my_logical_size ) {
00975             segment& d = my_segment[i];
00976             d.allocate_array( s.my_logical_size );
00977             d.my_logical_size = s.my_logical_size;
00978             size_t s_size = s.my_physical_size;
00979             chain* s_array = s.my_array;
00980             chain* d_array = d.my_array;
00981             for( size_t k=0; k<s_size; ++k )
00982                 for( node* b = s_array[k].node_list; b; b=b->next ) {
00983                     __TBB_ASSERT( &get_segment(my_hash_compare.hash( b->item.first ))==&d, "hash function changed?" );
00984                     node* b_new = create_node(b->item.first, &b->item.second);
00985                     d_array[k].push_front(*b_new); // hashcode is the same and segment and my_physical sizes are the same
00986                 }
00987         }
00988     }
00989 }
00990 
00991 template<typename Key, typename T, typename HashCompare, typename A>
00992 template<typename I>
00993 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
00994     for(; first != last; ++first)
00995         insert( *first );
00996 }
00997 
00998 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
00999 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01000     if(a.size() != b.size()) return false;
01001     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01002     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01003     for(; i != i_end; ++i) {
01004         j = b.equal_range(i->first).first;
01005         if( j == j_end || !(i->second == j->second) ) return false;
01006     }
01007     return true;
01008 }
01009 
01010 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01011 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01012 {    return !(a == b); }
01013 
01014 template<typename Key, typename T, typename HashCompare, typename A>
01015 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01016 {    a.swap( b ); }
01017 
01018 } // namespace tbb
01019 
01020 #endif /* __TBB_concurrent_hash_map_H */

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.