28 #if __TBB_STATISTICS_STDOUT 47 #if __TBB_TASK_GROUP_CONTEXT 52 #if __TBB_TASK_PRIORITY 59 my_ref_top_priority = &a->my_top_priority;
60 my_ref_reload_epoch = &a->my_reload_epoch;
62 my_local_reload_epoch = *my_ref_reload_epoch;
68 return !slot &&
as_atomic( slot ).compare_and_swap( &
s, NULL ) == NULL;
74 size_t index =
s.my_arena_index;
75 if ( index < lower || index >= upper ) index =
s.my_random.get() % (upper - lower) + lower;
78 for (
size_t i = index; i < upper; ++i )
80 for (
size_t i = lower; i < index; ++i )
85 template <
bool as_worker>
105 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
115 s.attach_arena(
this, index,
false );
117 #if !__TBB_FP_CONTEXT 121 #if __TBB_ARENA_OBSERVER 122 __TBB_ASSERT( !
s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
123 my_observers.notify_entry_observers(
s.my_last_local_observer,
true );
129 s.local_wait_for_all( *
s.my_dummy_task, NULL );
134 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
138 "Worker cannot leave arena while its task pool is not reset" );
144 || recall_by_mandatory_request()
155 s.my_innermost_running_task =
s.my_dummy_task;
156 s.local_wait_for_all(*
s.my_dummy_task,t);
159 #if __TBB_ARENA_OBSERVER 160 my_observers.notify_exit_observers(
s.my_last_local_observer,
true );
161 s.my_last_local_observer = NULL;
163 #if __TBB_TASK_PRIORITY 164 if (
s.my_offloaded_tasks )
165 orphan_offloaded_tasks(
s );
168 ++
s.my_counters.arena_roundtrips;
169 *
my_slots[index].my_counters +=
s.my_counters;
170 s.my_counters.reset();
176 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
183 on_thread_leaving<ref_worker>();
187 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
190 #if __TBB_TASK_PRIORITY 191 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
200 #if __TBB_TASK_PRIORITY 201 my_bottom_priority = my_top_priority = normalized_normal_priority;
204 #if __TBB_ARENA_OBSERVER 205 my_observers.my_arena =
this;
217 #if __TBB_PREVIEW_CRITICAL_TASKS 221 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
226 #if __TBB_PREVIEW_CRITICAL_TASKS 228 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
230 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 231 my_concurrency_mode = cm_normal;
233 #if !__TBB_FP_CONTEXT 243 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
245 memset( storage, 0, n );
254 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 255 __TBB_ASSERT( my_concurrency_mode != cm_enforced_global, NULL );
257 #if !__TBB_STATISTICS_EARLY_DUMP 261 intptr_t drained = 0;
274 #if __TBB_PREVIEW_CRITICAL_TASKS 275 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
277 #if __TBB_COUNT_TASK_NODES 278 my_market->update_task_node_count( -drained );
282 #if __TBB_TASK_GROUP_CONTEXT 283 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
284 my_default_ctx->~task_group_context();
287 #if __TBB_ARENA_OBSERVER 288 if ( !my_observers.empty() )
289 my_observers.clear();
295 #if TBB_USE_ASSERT > 1 302 void arena::dump_arena_statistics () {
303 statistics_counters total;
305 #if __TBB_STATISTICS_EARLY_DUMP 308 *
my_slots[i].my_counters +=
s->my_counters;
314 dump_statistics( *
my_slots[i].my_counters, i );
317 dump_statistics( *
my_slots[0].my_counters, 0 );
318 #if __TBB_STATISTICS_STDOUT 319 #if !__TBB_STATISTICS_TOTALS_ONLY 320 printf(
"----------------------------------------------\n" );
322 dump_statistics( total, workers_counters_total );
324 dump_statistics( total, arena_counters_total );
325 #if !__TBB_STATISTICS_TOTALS_ONLY 326 printf(
"==============================================\n" );
332 #if __TBB_TASK_PRIORITY 337 inline bool arena::may_have_tasks ( generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
338 if ( !
s ||
s->my_arena !=
this )
340 dequeuing_possible |=
s->worker_outermost_level();
341 if (
s->my_pool_reshuffling_pending ) {
344 tasks_present =
true;
347 if (
s->my_offloaded_tasks ) {
348 tasks_present =
true;
349 if (
s->my_local_reload_epoch < *
s->my_ref_reload_epoch ) {
358 void arena::orphan_offloaded_tasks(generic_scheduler&
s) {
361 ++my_abandonment_epoch;
362 __TBB_ASSERT(
s.my_offloaded_task_list_tail_link && !*
s.my_offloaded_task_list_tail_link, NULL );
365 orphans = const_cast<task*>(my_orphaned_tasks);
366 *
s.my_offloaded_task_list_tail_link = orphans;
367 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(
s.my_offloaded_tasks, orphans) != orphans );
368 s.my_offloaded_tasks = NULL;
370 s.my_offloaded_task_list_tail_link = NULL;
390 advertise_new_work<work_enqueued>();
391 #if __TBB_TASK_PRIORITY 396 if ( p < my_bottom_priority || p > my_top_priority )
422 #if __TBB_TASK_PRIORITY 424 intptr_t top_priority = my_top_priority;
428 for( k=0; k<n; ++k ) {
439 bool work_absent = k == n;
440 #if __TBB_PREVIEW_CRITICAL_TASKS 441 bool no_critical_tasks = my_critical_task_stream.empty(0);
442 work_absent &= no_critical_tasks;
444 #if __TBB_TASK_PRIORITY 447 bool tasks_present = !work_absent || my_orphaned_tasks;
448 bool dequeuing_possible =
false;
453 uintptr_t abandonment_epoch = my_abandonment_epoch;
459 the_context_state_propagation_mutex.lock();
460 work_absent = !may_have_tasks(
my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
461 the_context_state_propagation_mutex.unlock();
474 for( k = 1; work_absent && k < n; ++k ) {
477 work_absent = !may_have_tasks(
my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
480 work_absent = work_absent
482 && abandonment_epoch == my_abandonment_epoch;
487 #if __TBB_TASK_PRIORITY 489 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
490 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
493 work_absent = work_absent && no_fifo_tasks;
496 #if __TBB_TASK_PRIORITY 497 if ( top_priority > my_bottom_priority ) {
498 if (
my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
501 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
504 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
510 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 511 if( my_concurrency_mode==cm_enforced_global ) {
513 my_market->mandatory_concurrency_disable(
this );
525 #if __TBB_TASK_PRIORITY 542 #if __TBB_COUNT_TASK_NODES 543 intptr_t arena::workers_task_node_count() {
548 result +=
s->my_task_node_count;
556 #if __TBB_RECYCLE_TO_ENQUEUE 567 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
568 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
573 #if __TBB_PREVIEW_CRITICAL_TASKS 579 if(
s &&
s->my_arena_slot ) {
582 #if __TBB_TASK_ISOLATION 583 t.
prefix().isolation =
s->my_innermost_running_task->prefix().isolation;
585 unsigned& lane =
s->my_arena_slot->hint_for_critical;
590 my_critical_task_stream.push( &t, 0, internal::random_lane_selector(random) );
592 advertise_new_work<work_spawned>();
598 #if __TBB_TASK_PRIORITY 599 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
600 assert_priority_valid(
p);
601 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 606 if (
p != my_top_priority )
609 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
610 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 616 advertise_new_work<work_enqueued>();
617 #if __TBB_TASK_PRIORITY 618 if (
p != my_top_priority )
634 s->nested_arena_entry(a, slot_index);
638 #if __TBB_TASK_GROUP_CONTEXT 647 #if __TBB_TASK_PRIORITY 664 #if __TBB_PREVIEW_CRITICAL_TASKS 667 #if __TBB_TASK_GROUP_CONTEXT 680 #if __TBB_TASK_PRIORITY 681 if ( my_offloaded_tasks )
682 my_arena->orphan_offloaded_tasks( *
this );
683 my_offloaded_tasks = NULL;
693 #if __TBB_ARENA_OBSERVER 694 my_last_local_observer = 0;
695 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
700 #if __TBB_ARENA_OBSERVER 701 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
703 #if __TBB_TASK_PRIORITY 704 if ( my_offloaded_tasks )
705 my_arena->orphan_offloaded_tasks( *
this );
729 namespace interface7 {
741 #if __TBB_TASK_GROUP_CONTEXT 745 new_arena->my_default_ctx->capture_fp_settings();
754 #if __TBB_TASK_GROUP_CONTEXT 767 my_arena->my_market->release(
true,
false );
770 #if __TBB_TASK_GROUP_CONTEXT 779 if(
s &&
s->my_arena ) {
785 #if __TBB_TASK_GROUP_CONTEXT 801 #if __TBB_TASK_GROUP_CONTEXT 804 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
806 my_arena->enqueue_task( t, prio,
s->my_random );
815 __TBB_ASSERT(
s.outermost_level(),
"expected to be enqueued and received on the outermost level");
816 struct outermost_context : internal::no_copy {
823 : t(_t),
s(_s), orig_dummy(
s.my_dummy_task), orig_props(
s.my_properties) {
825 #if __TBB_TASK_GROUP_CONTEXT 826 orig_ctx = t->prefix().context;
827 t->prefix().context =
s.my_arena->my_default_ctx;
833 ~outermost_context() {
834 #if __TBB_TASK_GROUP_CONTEXT 836 t->prefix().context = orig_ctx;
838 s.my_properties = orig_props;
839 s.my_dummy_task = orig_dummy;
863 bool same_arena =
s->my_arena ==
my_arena;
864 size_t index1 =
s->my_arena_index;
866 index1 =
my_arena->occupy_free_slot<
false>(*s);
869 #if __TBB_USE_OPTIONAL_RTTI 877 internal::delegated_function< graph_funct, void >* deleg_funct =
878 dynamic_cast< internal::delegated_function< graph_funct, void>*
>(&
d);
883 (internal::forward< graph_funct >(deleg_funct->my_func)), 0);
888 #if __TBB_TASK_GROUP_CONTEXT 901 my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&
d);
903 my_arena->my_exit_monitors.cancel_wait(waiter);
906 index2 =
my_arena->occupy_free_slot<
false>(*s);
908 my_arena->my_exit_monitors.cancel_wait(waiter);
910 s->local_wait_for_all(root, NULL);
911 #if TBB_USE_EXCEPTIONS 917 my_arena->my_exit_monitors.commit_wait(waiter);
922 my_arena->my_exit_monitors.notify_one();
924 #if TBB_USE_EXCEPTIONS 927 TbbRethrowException(pe);
930 #if __TBB_USE_OPTIONAL_RTTI 938 #if TBB_USE_EXCEPTIONS 944 #if TBB_USE_EXCEPTIONS 967 __TBB_ASSERT(
s->outermost_level(),
"The enqueued task can be processed only on outermost level" );
968 if (
s->is_worker() ) {
971 s->my_innermost_running_task =
s->my_dummy_task;
972 s->local_wait_for_all( *
s->my_dummy_task, NULL );
973 s->my_innermost_running_task =
this;
974 }
else s->my_arena->is_out_of_work();
986 __TBB_ASSERT(
s->my_arena !=
my_arena ||
s->my_arena_index == 0,
"task_arena::wait_until_empty() is not supported within a worker context" );
990 if( !
s->my_arena_index )
991 while(
my_arena->num_workers_active() )
992 s->wait_until_empty();
996 &&
as_atomic(
my_arena->my_slots[0].my_scheduler).compare_and_swap(
s, NULL) == NULL ) {
998 s->wait_until_empty();
1005 if( !
my_arena->num_workers_active() && !
my_arena->my_slots[0].my_scheduler)
1013 return s?
int(
s->my_arena_index) : -1;
1016 #if __TBB_TASK_ISOLATION 1021 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1022 ~isolation_guard() {
1023 guarded = previous_value;
1031 __TBB_ASSERT(
s,
"this_task_arena::isolate() needs an initialized scheduler" );
1034 isolation_tag& current_isolation =
s->my_innermost_running_task->prefix().isolation;
1036 isolation_guard guard( current_isolation );
1037 current_isolation = reinterpret_cast<isolation_tag>(&
d);
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate.
void __TBB_EXPORTED_METHOD internal_attach()
Class representing where mail is put.
static const intptr_t num_priority_levels
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
T __TBB_load_relaxed(const volatile T &location)
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
static const int priority_critical
virtual void local_wait_for_all(task &parent, task *child)=0
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
task * my_dummy_task
Fake root task created by slave threads.
internal::tbb_exception_ptr exception_container_type
internal::delegate_base & my_delegate
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
intptr_t reference_count
A reference count.
void make_critical(task &t)
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
value_type compare_and_swap(value_type value, value_type comparand)
void nested_arena_entry(arena *, size_t)
void free_task_pool()
Deallocate task pool that was allocated by means of allocate_task_pool.
#define ITT_SYNC_CREATE(obj, type, name)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
delegated_task(internal::delegate_base &d, concurrent_monitor &s, task *t)
void const char const char int ITT_FORMAT __itt_group_sync s
task * execute() __TBB_override
Should be overridden by derived classes.
unsigned my_master_slots
Reserved master slots.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
#define GATHER_STATISTIC(x)
intptr_t drain()
Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
unsigned num_workers_active()
The number of workers active in the arena.
internal::arena * my_arena
NULL if not currently initialized.
state_type state() const
Current execution state.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
scheduler_properties my_properties
static void one_time_init()
void attach_mailbox(affinity_id id)
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t reserved=0)
Used to form groups of tasks.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
task object is freshly allocated or recycled.
A fast random number generator.
Bit-field representing properties of a sheduler.
bool empty(int level)
Checks existence of a task.
intptr_t my_version_and_traits
Special settings.
void construct()
Construct *this as a mailbox from zeroed memory.
concurrent_monitor & my_monitor
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
int my_max_concurrency
Concurrency level for deferred initialization.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
unsigned short affinity_id
An id as used for specifying affinity.
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
static int unsigned num_arena_slots(unsigned num_slots)
Base class for types that should not be copied or assigned.
unsigned my_num_slots
The number of slots in the arena.
atomic< T > & as_atomic(T &t)
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
binary_semaphore & my_signal
static generic_scheduler * local_scheduler_weak()
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available,...
void mimic_outermost_level(arena *a, bool type)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
task_group_context * my_orig_ctx
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
A functor that spawns a task.
intptr_t drain()
Drain the mailbox.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
task_group_context * my_context
default context of the arena
bool type
Indicates that a scheduler acts as a master or a worker.
scheduler_state my_orig_state
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
static const pool_state_t SNAPSHOT_FULL
At least one task has been offered for stealing since the last snapshot started.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
binary_semaphore for concurrent monitor
void on_thread_leaving()
Notification that worker or master leaves its arena.
atomic< unsigned > my_references
Reference counter for the arena.
wait_task(binary_semaphore &sema)
bool outermost
Indicates that a scheduler is on outermost level.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
#define __TBB_CONTEXT_ARG(arg1, context)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
Base class for user-defined tasks.
Work stealing task scheduler.
void __TBB_store_with_release(volatile T &location, V value)
void attach_arena(arena *, size_t index, bool is_master)
market * my_market
The market that owns this arena.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
static unsigned default_num_threads()
void free_arena()
Completes arena shutdown, destructs and deallocates it.
T __TBB_load_with_acquire(const volatile T &location)
#define __TBB_CONTEXT_ARG1(context)
#define ITT_NOTIFY(name, obj)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
static int allocation_size(unsigned num_slots)
static const unsigned ref_external
Reference increment values for externals and workers.
static int __TBB_EXPORTED_FUNC internal_current_slot()
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
bool is_critical(task &t)
Smart holder for the empty task class with automatic destruction.
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
void push(task *source, int level, FastRandom &random)
Push a task into a lane.
market * my_market
The market I am in.
Set if ref_count might be changed by another thread. Used for debugging.
intptr_t isolation_tag
A tag for task isolation.
void __TBB_EXPORTED_METHOD internal_terminate()
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
static const size_t out_of_arena
#define __TBB_ENQUEUE_ENFORCED_CONCURRENCY
bool operator()(uintptr_t ctx) const
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
uintptr_t my_aba_epoch
ABA prevention marker.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const isolation_tag no_isolation
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
static generic_scheduler * local_scheduler_if_initialized()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
bool is_out_of_work()
Check if there is job anywhere in arena.
static const int automatic
Typedef for number of threads that is automatic.
task is in ready pool, or is going to be put there, or was just taken off.
void const char const char int ITT_FORMAT __itt_group_sync p
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
bool is_worker() const
True if running on a worker thread, false otherwise.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
void notify_one()
Notify one thread about the event.
task * execute() __TBB_override
Should be overridden by derived classes.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
void __TBB_EXPORTED_METHOD internal_wait() const
void initialize(unsigned n_lanes)
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
generic_scheduler & my_scheduler
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
void __TBB_EXPORTED_METHOD internal_initialize()
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
int ref_count() const
The internal reference count.
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.