00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if __TBB_EXCEPTIONS
00027 #include "cache_aligned_allocator.h"
00028 #endif
00029
00030 namespace tbb {
00031
00032 class task;
00033 class task_list;
00034 #if __TBB_EXCEPTIONS
00035 class task_group_context;
00036 class tbb_exception;
00037 #endif
00038
00040 namespace internal {
00041
00042 class scheduler: no_copy {
00043 public:
00045 virtual void spawn( task& first, task*& next ) = 0;
00046
00048 virtual void wait_for_all( task& parent, task* child ) = 0;
00049
00051 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00052
00054
00055 virtual ~scheduler() = 0;
00056 };
00057
00059
00060 typedef intptr reference_count;
00061
00063 typedef unsigned short affinity_id;
00064
00065 #if __TBB_EXCEPTIONS
00066 struct context_list_node_t {
00067 context_list_node_t *my_prev,
00068 *my_next;
00069 };
00070
00071 class allocate_root_with_context_proxy: no_assign {
00072 task_group_context& my_context;
00073 public:
00074 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00075 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00076 void __TBB_EXPORTED_METHOD free( task& ) const;
00077 };
00078 #endif
00079
00080 class allocate_root_proxy: no_assign {
00081 public:
00082 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00083 static void __TBB_EXPORTED_FUNC free( task& );
00084 };
00085
00086 class allocate_continuation_proxy: no_assign {
00087 public:
00088 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00089 void __TBB_EXPORTED_METHOD free( task& ) const;
00090 };
00091
00092 class allocate_child_proxy: no_assign {
00093 public:
00094 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00095 void __TBB_EXPORTED_METHOD free( task& ) const;
00096 };
00097
00098 class allocate_additional_child_of_proxy: no_assign {
00099 task& self;
00100 task& parent;
00101 public:
00102 allocate_additional_child_of_proxy( task& self_, task& parent_ ) : self(self_), parent(parent_) {}
00103 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00104 void __TBB_EXPORTED_METHOD free( task& ) const;
00105 };
00106
00108
00113 class task_prefix {
00114 private:
00115 friend class tbb::task;
00116 friend class tbb::task_list;
00117 friend class internal::scheduler;
00118 friend class internal::allocate_root_proxy;
00119 friend class internal::allocate_child_proxy;
00120 friend class internal::allocate_continuation_proxy;
00121 friend class internal::allocate_additional_child_of_proxy;
00122
00123 #if __TBB_EXCEPTIONS
00125
00128 task_group_context *context;
00129 #endif
00130
00132
00137 scheduler* origin;
00138
00140 scheduler* owner;
00141
00143
00146 tbb::task* parent;
00147
00149
00153 reference_count ref_count;
00154
00156 int depth;
00157
00159
00160 unsigned char state;
00161
00163
00167 unsigned char extra_state;
00168
00169 affinity_id affinity;
00170
00172 tbb::task* next;
00173
00175 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00176 };
00177
00178 }
00180
00181 #if __TBB_EXCEPTIONS
00182
00183
00185
00200 class task_group_context : internal::no_copy
00201 {
00202 public:
00203 enum kind_type {
00204 isolated,
00205 bound
00206 };
00207
00208 private:
00209 union {
00211 kind_type my_kind;
00212 uintptr_t _my_kind_aligner;
00213 };
00214
00216 task_group_context *my_parent;
00217
00219
00221 internal::context_list_node_t my_node;
00222
00224
00227 char _leading_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)];
00228
00230 uintptr_t my_cancellation_requested;
00231
00233
00236 uintptr_t my_version;
00237
00239 tbb_exception *my_exception;
00240
00242
00245 void *my_owner;
00246
00248
00249 char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00250
00251 public:
00252
00253
00255
00272 task_group_context ( kind_type relation_with_parent = bound )
00273 : my_kind(relation_with_parent)
00274 , my_version(0)
00275 {
00276 init();
00277 }
00278
00279 __TBB_EXPORTED_METHOD ~task_group_context ();
00280
00282
00289 void __TBB_EXPORTED_METHOD reset ();
00290
00292
00299 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00300
00302 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00303
00304 protected:
00306
00307 void __TBB_EXPORTED_METHOD init ();
00308
00309 private:
00310 friend class task;
00311 friend class internal::allocate_root_with_context_proxy;
00312
00313 static const kind_type binding_required = bound;
00314 static const kind_type binding_completed = kind_type(bound+1);
00315
00318 void propagate_cancellation_from_ancestors ();
00319 };
00320
00321 #endif
00322
00324
00325 class task: internal::no_copy {
00327 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00328
00329 protected:
00331 task() {prefix().extra_state=1;}
00332
00333 public:
00335 virtual ~task() {}
00336
00338 virtual task* execute() = 0;
00339
00341 enum state_type {
00343 executing,
00345 reexecute,
00347 ready,
00349 allocated,
00351 freed,
00353 recycle
00354 };
00355
00356
00357
00358
00359
00361 static internal::allocate_root_proxy allocate_root() {
00362 return internal::allocate_root_proxy();
00363 }
00364
00365 #if __TBB_EXCEPTIONS
00367 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00368 return internal::allocate_root_with_context_proxy(ctx);
00369 }
00370 #endif
00371
00373
00374 internal::allocate_continuation_proxy& allocate_continuation() {
00375 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00376 }
00377
00379 internal::allocate_child_proxy& allocate_child() {
00380 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00381 }
00382
00384
00386 internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00387 return internal::allocate_additional_child_of_proxy(*this,t);
00388 }
00389
00391
00395 void __TBB_EXPORTED_METHOD destroy( task& victim );
00396
00397
00398
00399
00400
00402
00408 void recycle_as_continuation() {
00409 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00410 prefix().state = allocated;
00411 }
00412
00414
00415 void recycle_as_safe_continuation() {
00416 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00417 prefix().state = recycle;
00418 }
00419
00421 void recycle_as_child_of( task& new_parent ) {
00422 internal::task_prefix& p = prefix();
00423 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00424 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00425 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00426 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00427 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00428 p.state = allocated;
00429 p.parent = &new_parent;
00430 p.depth = new_parent.prefix().depth+1;
00431 #if __TBB_EXCEPTIONS
00432 p.context = new_parent.prefix().context;
00433 #endif
00434 }
00435
00437
00438 void recycle_to_reexecute() {
00439 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00440 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00441 prefix().state = reexecute;
00442 }
00443
00445
00446 typedef internal::intptr depth_type;
00447
00449 depth_type depth() const {return prefix().depth;}
00450
00452
00453 void set_depth( depth_type new_depth ) {
00454 __TBB_ASSERT( state()!=ready, "cannot change depth of ready task" );
00455 __TBB_ASSERT( new_depth>=0, "depth cannot be negative" );
00456 __TBB_ASSERT( new_depth==int(new_depth), "integer overflow error");
00457 prefix().depth = int(new_depth);
00458 }
00459
00461
00462 void add_to_depth( int delta ) {
00463 __TBB_ASSERT( state()!=ready, "cannot change depth of ready task" );
00464 __TBB_ASSERT( prefix().depth>=-delta, "depth cannot be negative" );
00465 prefix().depth+=delta;
00466 }
00467
00468
00469
00470
00471
00473 void set_ref_count( int count ) {
00474 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00475 internal_set_ref_count(count);
00476 #else
00477 prefix().ref_count = count;
00478 #endif
00479 }
00480
00482
00486 void spawn( task& child ) {
00487 __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00488 prefix().owner->spawn( child, child.prefix().next );
00489 }
00490
00492
00493 void spawn( task_list& list );
00494
00496 void spawn_and_wait_for_all( task& child ) {
00497 __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00498 prefix().owner->wait_for_all( *this, &child );
00499 }
00500
00502 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00503
00505
00507 static void spawn_root_and_wait( task& root ) {
00508 __TBB_ASSERT( root.is_owned_by_current_thread(), "root not owned by current thread" );
00509 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00510 }
00511
00513
00515 static void spawn_root_and_wait( task_list& root_list );
00516
00518
00519 void wait_for_all() {
00520 __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00521 prefix().owner->wait_for_all( *this, NULL );
00522 }
00523
00525 static task& __TBB_EXPORTED_FUNC self();
00526
00528 task* parent() const {return prefix().parent;}
00529
00531 bool is_stolen_task() const {
00532 internal::task_prefix& p = prefix();
00533 internal::task_prefix& q = parent()->prefix();
00534 return p.owner!=q.owner;
00535 }
00536
00537
00538
00539
00540
00542 state_type state() const {return state_type(prefix().state);}
00543
00545 int ref_count() const {
00546 #if TBB_USE_ASSERT
00547 internal::reference_count ref_count = prefix().ref_count;
00548 __TBB_ASSERT( ref_count==int(ref_count), "integer overflow error");
00549 #endif
00550 return int(prefix().ref_count);
00551 }
00552
00554 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00555
00556
00557
00558
00559
00561
00562 typedef internal::affinity_id affinity_id;
00563
00565 void set_affinity( affinity_id id ) {prefix().affinity = id;}
00566
00568 affinity_id affinity() const {return prefix().affinity;}
00569
00571
00575 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00576
00577 #if __TBB_EXCEPTIONS
00579
00580 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00581
00583 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00584 #endif
00585
00586 private:
00587 friend class task_list;
00588 friend class internal::scheduler;
00589 friend class internal::allocate_root_proxy;
00590 #if __TBB_EXCEPTIONS
00591 friend class internal::allocate_root_with_context_proxy;
00592 #endif
00593 friend class internal::allocate_continuation_proxy;
00594 friend class internal::allocate_child_proxy;
00595 friend class internal::allocate_additional_child_of_proxy;
00596
00598
00599 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00600 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00601 }
00602 };
00603
00605
00606 class empty_task: public task {
00607 task* execute() {
00608 return NULL;
00609 }
00610 };
00611
00613
00615 class task_list: internal::no_copy {
00616 private:
00617 task* first;
00618 task** next_ptr;
00619 friend class task;
00620 public:
00622 task_list() : first(NULL), next_ptr(&first) {}
00623
00625 ~task_list() {}
00626
00628 bool empty() const {return !first;}
00629
00631 void push_back( task& task ) {
00632 task.prefix().next = NULL;
00633 *next_ptr = &task;
00634 next_ptr = &task.prefix().next;
00635 }
00636
00638 task& pop_front() {
00639 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00640 task* result = first;
00641 first = result->prefix().next;
00642 if( !first ) next_ptr = &first;
00643 return *result;
00644 }
00645
00647 void clear() {
00648 first=NULL;
00649 next_ptr=&first;
00650 }
00651 };
00652
00653 inline void task::spawn( task_list& list ) {
00654 __TBB_ASSERT( is_owned_by_current_thread(), "'this' not owned by current thread" );
00655 if( task* t = list.first ) {
00656 prefix().owner->spawn( *t, *list.next_ptr );
00657 list.clear();
00658 }
00659 }
00660
00661 inline void task::spawn_root_and_wait( task_list& root_list ) {
00662 if( task* t = root_list.first ) {
00663 __TBB_ASSERT( t->is_owned_by_current_thread(), "'this' not owned by current thread" );
00664 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00665 root_list.clear();
00666 }
00667 }
00668
00669 }
00670
00671 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00672 return &tbb::internal::allocate_root_proxy::allocate(bytes);
00673 }
00674
00675 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00676 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00677 }
00678
00679 #if __TBB_EXCEPTIONS
00680 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00681 return &p.allocate(bytes);
00682 }
00683
00684 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00685 p.free( *static_cast<tbb::task*>(task) );
00686 }
00687 #endif
00688
00689 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00690 return &p.allocate(bytes);
00691 }
00692
00693 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00694 p.free( *static_cast<tbb::task*>(task) );
00695 }
00696
00697 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00698 return &p.allocate(bytes);
00699 }
00700
00701 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00702 p.free( *static_cast<tbb::task*>(task) );
00703 }
00704
00705 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00706 return &p.allocate(bytes);
00707 }
00708
00709 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00710 p.free( *static_cast<tbb::task*>(task) );
00711 }
00712
00713 #endif