Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::interface5::internal::hash_map_base Class Reference

base class of concurrent_hash_map More...

#include <concurrent_hash_map.h>

Inheritance diagram for tbb::interface5::internal::hash_map_base:
Collaboration diagram for tbb::interface5::internal::hash_map_base:

Classes

struct  bucket
 Bucket type. More...
 
struct  enable_segment_failsafe
 Exception safety helper. More...
 

Public Types

typedef size_t size_type
 Size type. More...
 
typedef size_t hashcode_t
 Type of a hash code. More...
 
typedef size_t segment_index_t
 Segment index type. More...
 
typedef hash_map_node_base node_base
 Node base type. More...
 
typedef bucketsegment_ptr_t
 Segment pointer. More...
 
typedef segment_ptr_t segments_table_t[pointers_per_table]
 Segment pointers table type. More...
 

Public Member Functions

 hash_map_base ()
 Constructor. More...
 
template<typename Allocator >
void enable_segment (segment_index_t k, const Allocator &allocator, bool is_initial=false)
 Enable segment. More...
 
template<typename Allocator >
void delete_segment (segment_index_t s, const Allocator &allocator)
 
bucketget_bucket (hashcode_t h) const throw ()
 Get bucket by (masked) hashcode. More...
 
void mark_rehashed_levels (hashcode_t h) throw ()
 
bool check_mask_race (const hashcode_t h, hashcode_t &m) const
 Check for mask race. More...
 
bool check_rehashing_collision (const hashcode_t h, hashcode_t m_old, hashcode_t m) const
 Process mask race, check for rehashing collision. More...
 
segment_index_t insert_new_node (bucket *b, node_base *n, hashcode_t mask)
 Insert a node and check for load factor. More...
 
template<typename Allocator >
void reserve (size_type buckets, const Allocator &allocator)
 Prepare enough segments for number of buckets. More...
 
void internal_swap (hash_map_base &table)
 Swap hash_map_bases. More...
 
void internal_move (hash_map_base &&other)
 

Static Public Member Functions

static segment_index_t segment_index_of (size_type index)
 
static segment_index_t segment_base (segment_index_t k)
 
static size_type segment_size (segment_index_t k)
 
static bool is_valid (void *ptr)
 
static void init_buckets (segment_ptr_t ptr, size_type sz, bool is_initial)
 Initialize buckets. More...
 
static void add_to_bucket (bucket *b, node_base *n)
 Add node. More...
 

Public Attributes

atomic< hashcode_tmy_mask
 Hash mask = sum of allocated segment sizes - 1. More...
 
segments_table_t my_table
 Segment pointers table. Also prevents false sharing between my_mask and my_size. More...
 
atomic< size_typemy_size
 Size of container in stored items. More...
 
bucket my_embedded_segment [embedded_buckets]
 Zero segment. More...
 

Static Public Attributes

static size_type const embedded_block = 1
 Count of segments in the first block. More...
 
static size_type const embedded_buckets = 1<<embedded_block
 Count of segments in the first block. More...
 
static size_type const first_block = 8
 Count of segments in the first block. More...
 
static size_type const pointers_per_table = sizeof(segment_index_t) * 8
 Size of a pointer / table size. More...
 

Detailed Description

base class of concurrent_hash_map

Definition at line 79 of file concurrent_hash_map.h.

Member Typedef Documentation

◆ hashcode_t

Type of a hash code.

Definition at line 84 of file concurrent_hash_map.h.

◆ node_base

◆ segment_index_t

Segment index type.

Definition at line 86 of file concurrent_hash_map.h.

◆ segment_ptr_t

Segment pointer.

Definition at line 107 of file concurrent_hash_map.h.

◆ segments_table_t

typedef segment_ptr_t tbb::interface5::internal::hash_map_base::segments_table_t[pointers_per_table]

Segment pointers table type.

Definition at line 109 of file concurrent_hash_map.h.

◆ size_type

Size type.

Definition at line 82 of file concurrent_hash_map.h.

Constructor & Destructor Documentation

◆ hash_map_base()

tbb::interface5::internal::hash_map_base::hash_map_base ( )
inline

Constructor.

Definition at line 124 of file concurrent_hash_map.h.

124  {
125  std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
126  + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
127  + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
128  for( size_type i = 0; i < embedded_block; i++ ) // fill the table
131  __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
132 #if __TBB_STATISTICS
133  my_info_resizes = 0; // concurrent ones
134  my_info_restarts = 0; // race collisions
135  my_info_rehashes = 0; // invocations of rehash_bucket
136 #endif
137  }
static size_type const first_block
Count of segments in the first block.
atomic< size_type > my_size
Size of container in stored items.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
static size_type const pointers_per_table
Size of a pointer / table size.
static size_type const embedded_buckets
Count of segments in the first block.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
static segment_index_t segment_base(segment_index_t k)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static size_type const embedded_block
Count of segments in the first block.

Member Function Documentation

◆ add_to_bucket()

static void tbb::interface5::internal::hash_map_base::add_to_bucket ( bucket b,
node_base n 
)
inlinestatic

Add node.

  • n to bucket
  • b

Definition at line 169 of file concurrent_hash_map.h.

169  {
170  __TBB_ASSERT(b->node_list != rehash_req, NULL);
171  n->next = b->node_list;
172  b->node_list = n; // its under lock and flag is set
173  }
static hash_map_node_base *const rehash_req
Incompleteness flag value.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::interface5::internal::hash_map_node_base::next, tbb::interface5::internal::hash_map_base::bucket::node_list, and tbb::interface5::internal::rehash_req.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::rehash_bucket().

Here is the caller graph for this function:

◆ check_mask_race()

bool tbb::interface5::internal::hash_map_base::check_mask_race ( const hashcode_t  h,
hashcode_t m 
) const
inline

Check for mask race.

Definition at line 250 of file concurrent_hash_map.h.

250  {
251  hashcode_t m_now, m_old = m;
253  if( m_old != m_now )
254  return check_rehashing_collision( h, m_old, m = m_now );
255  return false;
256  }
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

References h, and tbb::internal::itt_load_word_with_acquire().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_fast_find().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ check_rehashing_collision()

bool tbb::interface5::internal::hash_map_base::check_rehashing_collision ( const hashcode_t  h,
hashcode_t  m_old,
hashcode_t  m 
) const
inline

Process mask race, check for rehashing collision.

Definition at line 259 of file concurrent_hash_map.h.

259  {
260  __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
261  if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
262  // condition above proves that 'h' has some other bits set beside 'm_old'
263  // find next applicable mask after m_old //TODO: look at bsl instruction
264  for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
265  ;
266  m_old = (m_old<<1) - 1; // get full mask from a bit
267  __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
268  // check whether it is rehashing/ed
269  if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
270  {
271 #if __TBB_STATISTICS
272  my_info_restarts++; // race collisions
273 #endif
274  return true;
275  }
276  }
277  return false;
278  }
static hash_map_node_base *const rehash_req
Incompleteness flag value.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

References __TBB_ASSERT, h, tbb::internal::itt_load_word_with_acquire(), and tbb::interface5::internal::rehash_req.

Here is the call graph for this function:

◆ delete_segment()

template<typename Allocator >
void tbb::interface5::internal::hash_map_base::delete_segment ( segment_index_t  s,
const Allocator &  allocator 
)
inline

Definition at line 214 of file concurrent_hash_map.h.

214  {
215  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
216  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
217  bucket_allocator_type bucket_allocator(allocator);
218  segment_ptr_t buckets_ptr = my_table[s];
219  size_type sz = segment_size( s ? s : 1 );
220 
221  if( s >= first_block) // the first segment or the next
222  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
223  else if( s == embedded_block && embedded_block != first_block )
224  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
226  if( s >= embedded_block ) my_table[s] = 0;
227  }
static size_type const first_block
Count of segments in the first block.
void const char const char int ITT_FORMAT __itt_group_sync s
static size_type const embedded_buckets
Count of segments in the first block.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
static size_type segment_size(segment_index_t k)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
static size_type const embedded_block
Count of segments in the first block.

References s.

◆ enable_segment()

template<typename Allocator >
void tbb::interface5::internal::hash_map_base::enable_segment ( segment_index_t  k,
const Allocator &  allocator,
bool  is_initial = false 
)
inline

Enable segment.

Definition at line 186 of file concurrent_hash_map.h.

186  {
187  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
188  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
189  bucket_allocator_type bucket_allocator(allocator);
190  __TBB_ASSERT( k, "Zero segment must be embedded" );
191  enable_segment_failsafe watchdog( my_table, k );
192  size_type sz;
193  __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
194  if( k >= first_block ) {
195  sz = segment_size( k );
196  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
197  init_buckets( ptr, sz, is_initial );
198  itt_hide_store_word( my_table[k], ptr );
199  sz <<= 1;// double it to get entire capacity of the container
200  } else { // the first block
201  __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
202  sz = segment_size( first_block );
203  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
204  init_buckets( ptr, sz - embedded_buckets, is_initial );
206  for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
207  itt_hide_store_word( my_table[i], ptr + segment_base(i) );
208  }
210  watchdog.my_segment_ptr = 0;
211  }
static size_type const first_block
Count of segments in the first block.
void itt_hide_store_word(T &dst, T src)
static size_type const embedded_buckets
Count of segments in the first block.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
static segment_index_t segment_base(segment_index_t k)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static size_type segment_size(segment_index_t k)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
static size_type const embedded_block
Count of segments in the first block.

References __TBB_ASSERT, tbb::internal::itt_hide_store_word(), tbb::internal::itt_store_word_with_release(), and tbb::interface5::internal::hash_map_base::enable_segment_failsafe::my_segment_ptr.

Here is the call graph for this function:

◆ get_bucket()

bucket* tbb::interface5::internal::hash_map_base::get_bucket ( hashcode_t  h) const
throw (
)
inline

Get bucket by (masked) hashcode.

Definition at line 230 of file concurrent_hash_map.h.

230  { // TODO: add throw() everywhere?
232  h -= segment_base(s);
233  segment_ptr_t seg = my_table[s];
234  __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
235  return &seg[h];
236  }
void const char const char int ITT_FORMAT __itt_group_sync s
static segment_index_t segment_index_of(size_type index)
static segment_index_t segment_base(segment_index_t k)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

References __TBB_ASSERT, h, and s.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_accessor::acquire(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_fast_find().

Here is the caller graph for this function:

◆ init_buckets()

static void tbb::interface5::internal::hash_map_base::init_buckets ( segment_ptr_t  ptr,
size_type  sz,
bool  is_initial 
)
inlinestatic

Initialize buckets.

Definition at line 160 of file concurrent_hash_map.h.

160  {
161  if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
162  else for(size_type i = 0; i < sz; i++, ptr++) {
163  *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
164  ptr->node_list = rehash_req;
165  }
166  }
static hash_map_node_base *const rehash_req
Incompleteness flag value.

References tbb::interface5::internal::hash_map_base::bucket::mutex, tbb::interface5::internal::hash_map_base::bucket::node_list, and tbb::interface5::internal::rehash_req.

◆ insert_new_node()

segment_index_t tbb::interface5::internal::hash_map_base::insert_new_node ( bucket b,
node_base n,
hashcode_t  mask 
)
inline

Insert a node and check for load factor.

Returns
segment index to enable.

Definition at line 281 of file concurrent_hash_map.h.

281  {
282  size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
283  add_to_bucket( b, n );
284  // check load factor
285  if( sz >= mask ) { // TODO: add custom load_factor
286  segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
287  __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
288  static const segment_ptr_t is_allocating = (segment_ptr_t)2;
289  if( !itt_hide_load_word(my_table[new_seg])
290  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
291  return new_seg; // The value must be processed
292  }
293  return 0;
294  }
atomic< size_type > my_size
Size of container in stored items.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
atomic< T > & as_atomic(T &t)
Definition: atomic.h:543
static void add_to_bucket(bucket *b, node_base *n)
Add node.
T itt_hide_load_word(const T &src)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860

References __TBB_ASSERT, __TBB_Log2(), tbb::internal::as_atomic(), tbb::internal::itt_hide_load_word(), and mask.

Here is the call graph for this function:

◆ internal_move()

void tbb::interface5::internal::hash_map_base::internal_move ( hash_map_base &&  other)
inline

Definition at line 316 of file concurrent_hash_map.h.

316  {
317  my_mask = other.my_mask;
318  other.my_mask = embedded_buckets - 1;
319  my_size = other.my_size;
320  other.my_size = 0;
321 
322  for(size_type i = 0; i < embedded_buckets; ++i) {
323  my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
324  other.my_embedded_segment[i].node_list = NULL;
325  }
326 
327  for(size_type i = embedded_block; i < pointers_per_table; ++i) {
328  my_table[i] = other.my_table[i];
329  other.my_table[i] = NULL;
330  }
331  }
atomic< size_type > my_size
Size of container in stored items.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
static size_type const pointers_per_table
Size of a pointer / table size.
static size_type const embedded_buckets
Count of segments in the first block.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
static size_type const embedded_block
Count of segments in the first block.

References tbb::interface5::internal::hash_map_base::bucket::node_list.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_move_assign().

Here is the caller graph for this function:

◆ internal_swap()

void tbb::interface5::internal::hash_map_base::internal_swap ( hash_map_base table)
inline

Swap hash_map_bases.

Definition at line 305 of file concurrent_hash_map.h.

305  {
306  using std::swap;
307  swap(this->my_mask, table.my_mask);
308  swap(this->my_size, table.my_size);
309  for(size_type i = 0; i < embedded_buckets; i++)
310  swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
311  for(size_type i = embedded_block; i < pointers_per_table; i++)
312  swap(this->my_table[i], table.my_table[i]);
313  }
atomic< size_type > my_size
Size of container in stored items.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
static size_type const pointers_per_table
Size of a pointer / table size.
static size_type const embedded_buckets
Count of segments in the first block.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
static size_type const embedded_block
Count of segments in the first block.

References my_embedded_segment, my_mask, my_size, my_table, tbb::interface5::internal::hash_map_base::bucket::node_list, and tbb::swap().

Here is the call graph for this function:

◆ is_valid()

static bool tbb::interface5::internal::hash_map_base::is_valid ( void ptr)
inlinestatic
Returns
true if
  • ptr is valid pointer

Definition at line 155 of file concurrent_hash_map.h.

155  {
156  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
157  }

Referenced by tbb::interface5::internal::hash_map_iterator< Container, Value >::advance_to_next_bucket(), tbb::interface5::internal::hash_map_iterator< Container, Value >::hash_map_iterator(), tbb::interface5::internal::hash_map_iterator< Container, Value >::operator*(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::rehash_bucket(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::search_bucket().

Here is the caller graph for this function:

◆ mark_rehashed_levels()

void tbb::interface5::internal::hash_map_base::mark_rehashed_levels ( hashcode_t  h)
throw (
)
inline

Definition at line 239 of file concurrent_hash_map.h.

239  {
241  while( segment_ptr_t seg = my_table[++s] )
242  if( seg[h].node_list == rehash_req ) {
243  seg[h].node_list = empty_rehashed;
244  mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
245  }
246  }
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
static hash_map_node_base *const rehash_req
Incompleteness flag value.
void const char const char int ITT_FORMAT __itt_group_sync s
static segment_index_t segment_index_of(size_type index)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

References tbb::interface5::internal::empty_rehashed, h, tbb::interface5::internal::rehash_req, and s.

◆ reserve()

template<typename Allocator >
void tbb::interface5::internal::hash_map_base::reserve ( size_type  buckets,
const Allocator &  allocator 
)
inline

Prepare enough segments for number of buckets.

Definition at line 298 of file concurrent_hash_map.h.

298  {
299  if( !buckets-- ) return;
300  bool is_initial = !my_size;
301  for( size_type m = my_mask; buckets > m; m = my_mask )
302  enable_segment( segment_index_of( m+1 ), allocator, is_initial );
303  }
atomic< size_type > my_size
Size of container in stored items.
static segment_index_t segment_index_of(size_type index)
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map().

Here is the caller graph for this function:

◆ segment_base()

static segment_index_t tbb::interface5::internal::hash_map_base::segment_base ( segment_index_t  k)
inlinestatic
Returns
the first array index of given segment

Definition at line 145 of file concurrent_hash_map.h.

145  {
146  return (segment_index_t(1)<<k & ~segment_index_t(1));
147  }

◆ segment_index_of()

static segment_index_t tbb::interface5::internal::hash_map_base::segment_index_of ( size_type  index)
inlinestatic
Returns
segment index of given index in the array

Definition at line 140 of file concurrent_hash_map.h.

140  {
141  return segment_index_t( __TBB_Log2( index|1 ) );
142  }
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860

References __TBB_Log2().

Here is the call graph for this function:

◆ segment_size()

static size_type tbb::interface5::internal::hash_map_base::segment_size ( segment_index_t  k)
inlinestatic
Returns
segment size except for
  • k == 0

Definition at line 150 of file concurrent_hash_map.h.

150  {
151  return size_type(1)<<k; // fake value for k==0
152  }

Member Data Documentation

◆ embedded_block

size_type const tbb::interface5::internal::hash_map_base::embedded_block = 1
static

Count of segments in the first block.

Definition at line 99 of file concurrent_hash_map.h.

◆ embedded_buckets

size_type const tbb::interface5::internal::hash_map_base::embedded_buckets = 1<<embedded_block
static

Count of segments in the first block.

Definition at line 101 of file concurrent_hash_map.h.

◆ first_block

size_type const tbb::interface5::internal::hash_map_base::first_block = 8
static

Count of segments in the first block.

Definition at line 103 of file concurrent_hash_map.h.

◆ my_embedded_segment

bucket tbb::interface5::internal::hash_map_base::my_embedded_segment[embedded_buckets]

◆ my_mask

◆ my_size

◆ my_table

segments_table_t tbb::interface5::internal::hash_map_base::my_table

Segment pointers table. Also prevents false sharing between my_mask and my_size.

Definition at line 113 of file concurrent_hash_map.h.

Referenced by internal_swap().

◆ pointers_per_table

size_type const tbb::interface5::internal::hash_map_base::pointers_per_table = sizeof(segment_index_t) * 8
static

Size of a pointer / table size.

Definition at line 105 of file concurrent_hash_map.h.


The documentation for this class was generated from the following file:

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.