reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

projects/openmp/runtime/src/kmp.h
 3067   return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
projects/openmp/runtime/src/kmp_barrier.cpp
   54   kmp_info_t **other_threads = team->t.t_threads;
   86     kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
  166     other_threads = team->t.t_threads;
  179             __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i],
  179             __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i],
  181             ngo_store_icvs(&team->t.t_implicit_task_taskdata[i].td_icvs,
  182                            &team->t.t_implicit_task_taskdata[0].td_icvs);
  299   kmp_info_t **other_threads = team->t.t_threads;
  324     new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
  388       team->t.t_bar[bt].b_arrived = new_state;
  390       team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
  464     kmp_info_t **other_threads = team->t.t_threads;
  481           __kmp_init_implicit_task(team->t.t_ident,
  482                                    team->t.t_threads[child_tid], team,
  484           copy_icvs(&team->t.t_implicit_task_taskdata[child_tid].td_icvs,
  485                     &team->t.t_implicit_task_taskdata[0].td_icvs);
  516   kmp_info_t **other_threads = team->t.t_threads;
  567       new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
  614       team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
  616       team->t.t_bar[bt].b_arrived = new_state;
  657                 &team->t.t_implicit_task_taskdata[tid].td_icvs);
  699   other_threads = team->t.t_threads;
  774     __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
  774     __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
  776     copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
  839         &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
  844         &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
  868   kmp_info_t **other_threads = team->t.t_threads;
  871   int level = team->t.t_level;
  899         (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
  907                 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state;
 1020       thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
 1026     team->t.t_bar[bt].b_arrived = new_state;
 1099   int level = team->t.t_level;
 1100   if (team->t.t_threads[0]
 1102     if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
 1125     __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
 1125     __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
 1130                 &team->t.t_implicit_task_taskdata[tid].td_icvs);
 1136         copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
 1144         copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
 1168               &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
 1198             kmp_info_t *child_thr = team->t.t_threads[child_tid];
 1225           kmp_info_t *child_thr = team->t.t_threads[child_tid];
 1243       copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
 1307     my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
 1328   if (!team->t.t_serialized) {
 1355       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1425         kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request);
 1429           KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq);
 1445           team->t.t_active_level == 1) {
 1448         kmp_info_t **other_threads = team->t.t_threads;
 1624   if (!team->t.t_serialized) {
 1714       codeptr = team->t.ompt_team_info.master_return_address;
 1716     my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
 1763     this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1805       KMP_CHECK_UPDATE(team->t.t_display_affinity, 0);
 1831         team->t.t_active_level == 1) {
 1833       ident_t *loc = team->t.t_ident;
 1834       kmp_info_t **other_threads = team->t.t_threads;
 1957       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1999       codeptr = team->t.ompt_team_info.master_return_address;
 2072   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
 2075     if (__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) {
 2076       __kmp_balanced_affinity(this_thr, team->t.t_nproc);
 2090     if (team->t.t_display_affinity
 2092         || (__kmp_affinity_type == affinity_balanced && team->t.t_size_changed)
 2097       this_thr->th.th_prev_num_threads = team->t.t_nproc;
 2098       this_thr->th.th_prev_level = team->t.t_level;
 2102     KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator);
projects/openmp/runtime/src/kmp_cancel.cpp
   52         this_team->t.t_cancel_request.compare_exchange_strong(old, cncl_kind);
  159         if (this_team->t.t_cancel_request) {
  160           if (cncl_kind == this_team->t.t_cancel_request) {
  257     switch (KMP_ATOMIC_LD_RLX(&(this_team->t.t_cancel_request))) {
  263       this_team->t.t_cancel_request = cancel_noreq;
  273       this_team->t.t_cancel_request = cancel_noreq;
  318       return this_team->t.t_cancel_request == cancel_kind;
projects/openmp/runtime/src/kmp_csupport.cpp
  149   return __kmp_entry_thread()->th.th_team->t.t_nproc;
  295       ompt_lw_taskteam_t *lwt = parent_team->t.ompt_serialized_team_info;
  301             parent_team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame);
  393       this_thr->th.th_team->t.t_level; // AC: can be >0 on host
  399     parent_team->t.t_implicit_task_taskdata[tid]
  525   KMP_ASSERT(serial_team->t.t_serialized);
  547           &(serial_team->t.ompt_team_info.parallel_data), parent_task_data,
  558   top = serial_team->t.t_control_stack_top;
  559   if (top && top->serial_nesting_level == serial_team->t.t_serialized) {
  560     copy_icvs(&serial_team->t.t_threads[0]->th.th_current_task->td_icvs, top);
  561     serial_team->t.t_control_stack_top = top->next;
  566   serial_team->t.t_level--;
  572         serial_team->t.t_dispatch->th_disp_buffer;
  573     serial_team->t.t_dispatch->th_disp_buffer =
  574         serial_team->t.t_dispatch->th_disp_buffer->next;
  577   this_thr->th.th_def_allocator = serial_team->t.t_def_allocator; // restore
  579   --serial_team->t.t_serialized;
  580   if (serial_team->t.t_serialized == 0) {
  585     if (__kmp_inherit_fp_control && serial_team->t.t_fp_control_saved) {
  587       __kmp_load_x87_fpu_control_word(&serial_team->t.t_x87_fpu_control_word);
  588       __kmp_load_mxcsr(&serial_team->t.t_mxcsr);
  592     this_thr->th.th_team = serial_team->t.t_parent;
  593     this_thr->th.th_info.ds.ds_tid = serial_team->t.t_master_tid;
  596     this_thr->th.th_team_nproc = serial_team->t.t_parent->t.t_nproc; /*  JPH */
  596     this_thr->th.th_team_nproc = serial_team->t.t_parent->t.t_nproc; /*  JPH */
  598         serial_team->t.t_parent->t.t_threads[0]; /* JPH */
  598         serial_team->t.t_parent->t.t_threads[0]; /* JPH */
  599     this_thr->th.th_team_serialized = this_thr->th.th_team->t.t_serialized;
  603         &this_thr->th.th_team->t.t_dispatch[serial_team->t.t_master_tid];
  603         &this_thr->th.th_team->t.t_dispatch[serial_team->t.t_master_tid];
  613           this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
  788           ompt_scope_begin, &(team->t.ompt_team_info.parallel_data),
  789           &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
  832         ompt_scope_end, &(team->t.ompt_team_info.parallel_data),
  833         &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
  880     lck = (ompt_wait_id_t)(uintptr_t)&team->t.t_ordered.dt.t_value;
  950             ->t.t_ordered.dt.t_value,
 1758             &(team->t.ompt_team_info.parallel_data),
 1759             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
 1766             &(team->t.ompt_team_info.parallel_data),
 1767             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
 1771             &(team->t.ompt_team_info.parallel_data),
 1772             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
 1803         &(team->t.ompt_team_info.parallel_data),
 1804         &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 1,
 2076   data_ptr = &__kmp_team_from_gtid(gtid)->t.t_copypriv_data;
 3332     if (team->t.t_level == th->th.th_teams_level) {
 3336       th->th.th_info.ds.ds_tid = team->t.t_master_tid;
 3337       th->th.th_team = team->t.t_parent;
 3338       th->th.th_team_nproc = th->th.th_team->t.t_nproc;
 3339       th->th.th_task_team = th->th.th_team->t.t_task_team[0];
 3354   th->th.th_team_nproc = team->t.t_nproc;
 3355   th->th.th_task_team = team->t.t_task_team[task_state];
 3884   if (team->t.t_serialized) {
 3891   sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
 3997   if (team->t.t_serialized) {
 4090   if (team->t.t_serialized) {
 4141   if (team->t.t_serialized) {
 4150         &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
projects/openmp/runtime/src/kmp_dispatch.cpp
  133   active = !team->t.t_serialized;
  139       team->t.t_active_level == 1;
  179       schedule = team->t.t_sched.r_sched_type;
  191       chunk = team->t.t_sched.chunk;
  246       schedule = team->t.t_sched.r_sched_type;
  258         chunk = team->t.t_sched.chunk * chunk;
  800   active = !team->t.t_serialized;
  848       team->t.t_active_level == 1;
  864         &team->t.t_disp_buffer[my_buffer_index % __kmp_dispatch_num_buffers]);
 1002   if (!th->th.th_team->t.t_serialized) {
 1065   if (!th->th.th_team->t.t_serialized) {
 1209         kmp_info_t **other_threads = team->t.t_threads;
 1307         kmp_info_t **other_threads = team->t.t_threads;
 1910   if (team->t.t_serialized) {
 2070           kmp_info_t **other_threads = team->t.t_threads;
 2192   team_id = team->t.t_master_tid;
projects/openmp/runtime/src/kmp_dispatch.h
  340   if (!th->th.th_team->t.t_serialized) {
  412   if (!th->th.th_team->t.t_serialized) {
projects/openmp/runtime/src/kmp_ftn_entry.h
  136   else if (__kmp_zero_bt && !get__bt_set(team, tid)) {
  145     return get__blocktime(team, tid);
  655     return (th->th.th_team->t.t_active_level ? 1 : 0);
  704   return __kmp_entry_thread()->th.th_team->t.t_active_level;
  713   return __kmp_entry_thread()->th.th_team->t.t_level;
projects/openmp/runtime/src/kmp_gsupport.cpp
  138             &(team->t.ompt_team_info.parallel_data),
  139             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
  146             &(team->t.ompt_team_info.parallel_data),
  147             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
  151             &(team->t.ompt_team_info.parallel_data),
  152             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
  193   retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
  216   __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
  401       ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
  466   if (!thr->th.th_team->t.t_serialized) {
projects/openmp/runtime/src/kmp_itt.inl
   86   if (team->t.t_active_level > 1) {
  190     if (team->t.t_active_level + serialized > 1) {
  439   if (team->t.t_active_level > 1) {
  501         team->t.t_bar[bt].b_arrived / KMP_BARRIER_STATE_BUMP + delta;
  566         loc = team->t.t_ident;
  924     if (!t->t.t_serialized) {
  936     if (!t->t.t_serialized) {
  948     if (!t->t.t_serialized) {
projects/openmp/runtime/src/kmp_runtime.cpp
  489   int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
  493   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
  494                                &team->t.t_bar[bs_last_barrier],
  498   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
  499                                &team->t.t_bar[bs_plain_barrier + 1],
  503   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
  504                                &team->t.t_bar[bs_forkjoin_barrier + 1],
  509   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
  510                                &team->t.t_bar[bs_reduction_barrier + 1],
  516       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
  516       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
  520       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
  520       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
  523   __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
  524                                &team->t.t_disp_buffer[num_disp_buff],
  671   if (!team->t.t_serialized) {
  673     KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
  693   if (!team->t.t_serialized) {
  698     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
  698     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
  723   if (team->t.t_serialized) {
  732     if (team->t.t_construct == old_this) {
  733       status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
  739         team->t.t_active_level ==
  782   kmp_info_t *this_thr = parent_team->t.t_threads[master_tid];
  787   if (!get__dynamic_2(parent_team, master_tid)) {
  808                    (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
  824       new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
  844           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
  847                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
  853     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  875           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
  878                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
  884     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  910           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
  914                         (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
  923       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  969   master_th->th.th_team_nproc = team->t.t_nproc;
  972   master_th->th.th_dispatch = &team->t.t_dispatch[0];
  980     int level = team->t.t_active_level - 1; // index in array of hot teams
  986       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
  987           master_th->th.th_teams_level == team->t.t_level) {
 1000         hot_teams[level].hot_team_nth = team->t.t_nproc;
 1012     team->t.t_threads[0] = master_th;
 1016     for (i = 1; i < team->t.t_nproc; i++) {
 1020       team->t.t_threads[i] = thr;
 1035         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
 1037           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
 1051   if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
 1052     for (i = 0; i < team->t.t_nproc; i++) {
 1053       kmp_info_t *thr = team->t.t_threads[i];
 1054       if (thr->th.th_prev_num_threads != team->t.t_nproc ||
 1055           thr->th.th_prev_level != team->t.t_level) {
 1056         team->t.t_display_affinity = 1;
 1088     KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
 1089     KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
 1092     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
 1096     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
 1103   if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
 1112     if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
 1114       __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
 1117     if (team->t.t_mxcsr != mxcsr) {
 1118       __kmp_load_mxcsr(&team->t.t_mxcsr);
 1201     int level = this_thr->th.th_team->t.t_level;
 1203     if (serial_team->t.t_serialized) {
 1221       new_team->t.t_threads[0] = this_thr;
 1222       new_team->t.t_parent = this_thr->th.th_team;
 1245     serial_team->t.t_ident = loc;
 1246     serial_team->t.t_serialized = 1;
 1247     serial_team->t.t_nproc = 1;
 1248     serial_team->t.t_parent = this_thr->th.th_team;
 1249     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
 1249     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
 1251     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
 1289     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
 1289     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
 1289     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
 1290     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
 1290     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
 1290     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
 1291     serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
 1297     if (!serial_team->t.t_dispatch->th_disp_buffer) {
 1298       serial_team->t.t_dispatch->th_disp_buffer =
 1302     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
 1312     ++serial_team->t.t_serialized;
 1313     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
 1316     int level = this_thr->th.th_team->t.t_level;
 1323     serial_team->t.t_level++;
 1334       disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
 1335       serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
 1337     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
 1341   KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
 1346     if (this_thr->th.th_prev_level != serial_team->t.t_level ||
 1350       this_thr->th.th_prev_level = serial_team->t.t_level;
 1358   serial_team->t.ompt_team_info.master_return_address = codeptr;
 1374           ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
 1462     level = parent_team->t.t_level;
 1464     active_level = parent_team->t.t_active_level;
 1483                             : get__nproc_2(parent_team, master_tid);
 1503       parent_team->t.t_ident = loc;
 1505       parent_team->t.t_argc = argc;
 1506       argv = (void **)parent_team->t.t_argv;
 1540                 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1553         parent_team->t.t_serialized--;
 1558           __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
 1575           ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 1589       parent_team->t.t_pkfn = microtask;
 1590       parent_team->t.t_invoke = invoker;
 1592       parent_team->t.t_active_level++;
 1593       parent_team->t.t_level++;
 1594       parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
 1609           kmp_info_t **other_threads = parent_team->t.t_threads;
 1610           parent_team->t.t_nproc = master_set_numthreads;
 1640       if (!parent_team->t.t_invoke(gtid)) {
 1659     if (parent_team->t.t_active_level >=
 1668               : get__nproc_2(
 1725         master_th->th.th_serial_team->t.t_ident = loc;
 1728           master_th->th.th_serial_team->t.t_level--;
 1751                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1768                                    parent_team->t.t_argv
 1785             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 1801           team->t.t_invoke = invoker;
 1803           team->t.t_argc = argc;
 1804           argv = (void **)team->t.t_argv;
 1816               argv[i] = parent_team->t.t_argv[i];
 1820           team->t.t_level--;
 1870                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1905             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 2027     KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
 2028     KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
 2029     KMP_CHECK_UPDATE(team->t.t_ident, loc);
 2030     KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
 2031     KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
 2033     KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
 2036     KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
 2039       int new_level = parent_team->t.t_level + 1;
 2040       KMP_CHECK_UPDATE(team->t.t_level, new_level);
 2041       new_level = parent_team->t.t_active_level + 1;
 2042       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
 2045       int new_level = parent_team->t.t_level;
 2046       KMP_CHECK_UPDATE(team->t.t_level, new_level);
 2047       new_level = parent_team->t.t_active_level;
 2048       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
 2050     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
 2052     KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
 2054     KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
 2055     KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
 2130     argv = (void **)team->t.t_argv;
 2145         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
 2145         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
 2150     KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
 2165     if (team->t.t_active_level == 1 // only report frames at level 1
 2177           team->t.t_region_time = tmp_time;
 2184         __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
 2198       team->t.t_stack_id =
 2231   if (!team->t.t_invoke(gtid)) {
 2262       ((team->t.t_serialized) ? ompt_state_work_serial
 2300   parent_team = team->t.t_parent;
 2305   void *team_microtask = (void *)team->t.t_pkfn;
 2323   if (team->t.t_serialized) {
 2326       int level = team->t.t_level;
 2331         team->t.t_level++;
 2336         team->t.t_serialized++;
 2350   master_active = team->t.t_master_active;
 2364   ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
 2365   void *codeptr = team->t.ompt_team_info.master_return_address;
 2371         (__itt_caller)team->t
 2376   if (team->t.t_active_level == 1 &&
 2383       __kmp_itt_frame_submit(gtid, team->t.t_region_time,
 2393       team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
 2394       team->t.t_level == master_th->th.th_teams_level + 1) {
 2403         int ompt_team_size = team->t.t_nproc;
 2410       ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 2415     team->t.t_level--;
 2416     team->t.t_active_level--;
 2426       kmp_info_t **other_threads = team->t.t_threads;
 2427       team->t.t_nproc = new_num;
 2437           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
 2461   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
 2462   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
 2464   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
 2464   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
 2472       team->t.t_level > master_th->th.th_teams_level) {
 2485       int ompt_team_size = (flags == ompt_task_initial) ? 0 : team->t.t_nproc;
 2501   master_th->th.th_first_place = team->t.t_first_place;
 2502   master_th->th.th_last_place = team->t.t_last_place;
 2504   master_th->th.th_def_allocator = team->t.t_def_allocator;
 2520   master_th->th.th_team_nproc = parent_team->t.t_nproc;
 2521   master_th->th.th_team_master = parent_team->t.t_threads[0];
 2522   master_th->th.th_team_serialized = parent_team->t.t_serialized;
 2525   if (parent_team->t.t_serialized &&
 2548         parent_team->t.t_task_team[master_th->th.th_task_state];
 2584   if (thread->th.th_team->t.t_serialized > 1) {
 2587     if (thread->th.th_team->t.t_control_stack_top == NULL) {
 2590       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
 2591           thread->th.th_team->t.t_serialized) {
 2602       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
 2604       control->next = thread->th.th_team->t.t_control_stack_top;
 2605       thread->th.th_team->t.t_control_stack_top = control;
 2637       (root->r.r_hot_team->t.t_nproc > new_nth)
 2648     for (f = new_nth; f < hot_team->t.t_nproc; f++) {
 2653         hot_team->t.t_threads[f]->th.th_task_team = NULL;
 2655       __kmp_free_thread(hot_team->t.t_threads[f]);
 2656       hot_team->t.t_threads[f] = NULL;
 2658     hot_team->t.t_nproc = new_nth;
 2671       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
 2674     hot_team->t.t_size_changed = -1;
 2864   ii = team->t.t_level;
 2887   dd = team->t.t_serialized;
 2890     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
 2892     if ((team->t.t_serialized) && (!dd)) {
 2893       team = team->t.t_parent;
 2897       team = team->t.t_parent;
 2898       dd = team->t.t_serialized;
 2903   return (dd > 1) ? (0) : (team->t.t_master_tid);
 2922   ii = team->t.t_level;
 2943     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
 2945     if (team->t.t_serialized && (!dd)) {
 2946       team = team->t.t_parent;
 2950       team = team->t.t_parent;
 2955   return team->t.t_nproc;
 2997   if (!realloc || argc > team->t.t_max_argc) {
 3003     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
 3003     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
 3004       __kmp_free((void *)team->t.t_argv);
 3008       team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
 3012       team->t.t_argv = &team->t.t_inline_argv[0];
 3012       team->t.t_argv = &team->t.t_inline_argv[0];
 3015             -1, &team->t.t_inline_argv[0],
 3016             &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
 3018             team->t.t_id);
 3022       team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
 3028       team->t.t_argv =
 3029           (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
 3031         __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
 3032                                      &team->t.t_argv[team->t.t_max_argc],
 3032                                      &team->t.t_argv[team->t.t_max_argc],
 3033                                      sizeof(void *) * team->t.t_max_argc,
 3034                                      "team_%d.t_argv", team->t.t_id);
 3043   team->t.t_threads =
 3045   team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
 3047   team->t.t_dispatch =
 3049   team->t.t_implicit_task_taskdata =
 3051   team->t.t_max_nproc = max_nth;
 3055     team->t.t_disp_buffer[i].buffer_index = i;
 3056     team->t.t_disp_buffer[i].doacross_buf_idx = i;
 3063   for (i = 0; i < team->t.t_max_nproc; ++i) {
 3064     if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
 3065       __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
 3066       team->t.t_dispatch[i].th_disp_buffer = NULL;
 3072   __kmp_free(team->t.t_threads);
 3073   __kmp_free(team->t.t_disp_buffer);
 3074   __kmp_free(team->t.t_dispatch);
 3075   __kmp_free(team->t.t_implicit_task_taskdata);
 3076   team->t.t_threads = NULL;
 3077   team->t.t_disp_buffer = NULL;
 3078   team->t.t_dispatch = NULL;
 3079   team->t.t_implicit_task_taskdata = 0;
 3083   kmp_info_t **oldThreads = team->t.t_threads;
 3085   __kmp_free(team->t.t_disp_buffer);
 3086   __kmp_free(team->t.t_dispatch);
 3087   __kmp_free(team->t.t_implicit_task_taskdata);
 3090   KMP_MEMCPY(team->t.t_threads, oldThreads,
 3091              team->t.t_nproc * sizeof(kmp_info_t *));
 3135   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
 3183   root_team->t.t_control_stack_top = NULL;
 3186   root_team->t.t_threads[0] = NULL;
 3187   root_team->t.t_nproc = 1;
 3188   root_team->t.t_serialized = 1;
 3190   root_team->t.t_sched.sched = r_sched.sched;
 3214   root_team->t.t_control_stack_top = NULL;
 3217   hot_team->t.t_parent = root_team;
 3220   hot_team_max_nth = hot_team->t.t_max_nproc;
 3222     hot_team->t.t_threads[f] = NULL;
 3224   hot_team->t.t_nproc = 1;
 3226   hot_team->t.t_sched.sched = r_sched.sched;
 3227   hot_team->t.t_size_changed = 0;
 3728   root->r.r_root_team->t.t_threads[0] = root_thread;
 3729   root->r.r_hot_team->t.t_threads[0] = root_thread;
 3730   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
 3732   root_thread->th.th_serial_team->t.t_serialized = 0;
 3837       kmp_info_t *th = team->t.t_threads[i];
 3855   int n = hot_team->t.t_nproc;
 3868     for (i = 0; i < hot_team->t.t_nproc; ++i) {
 3869       kmp_info_t *th = hot_team->t.t_threads[i];
 4035   kmp_info_t *master = team->t.t_threads[0];
 4063   this_thr->th.th_team_nproc = team->t.t_nproc;
 4065   this_thr->th.th_team_serialized = team->t.t_serialized;
 4082   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
 4130         (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
 4146             &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
 4151             gtid, team->t.t_id, gtid);
 4321   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
 4323   serial_team->t.t_threads[0] = new_thr;
 4429   KMP_CHECK_UPDATE(team->t.t_ident, loc);
 4431   KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
 4433   __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
 4434   copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
 4454   team->t.t_master_tid = 0; /* not needed */
 4456   team->t.t_serialized = new_nproc > 1 ? 0 : 1;
 4457   team->t.t_nproc = new_nproc;
 4460   team->t.t_next_pool = NULL;
 4464   TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
 4465   team->t.t_invoke = NULL; /* not needed */
 4468   team->t.t_sched.sched = new_icvs->sched.sched;
 4471   team->t.t_fp_control_saved = FALSE; /* not needed */
 4472   team->t.t_x87_fpu_control_word = 0; /* not needed */
 4473   team->t.t_mxcsr = 0; /* not needed */
 4476   team->t.t_construct = 0;
 4478   team->t.t_ordered.dt.t_value = 0;
 4479   team->t.t_master_active = FALSE;
 4488   team->t.t_control_stack_top = NULL;
 4523   kmp_info_t *master_th = team->t.t_threads[0];
 4525   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
 4529   team->t.t_first_place = first_place;
 4530   team->t.t_last_place = last_place;
 4547     int n_th = team->t.t_nproc;
 4549       kmp_info_t *th = team->t.t_threads[f];
 4555           team->t.t_display_affinity != 1) {
 4556         team->t.t_display_affinity = 1;
 4568     int n_th = team->t.t_nproc;
 4578         kmp_info_t *th = team->t.t_threads[f];
 4592             team->t.t_display_affinity != 1) {
 4593           team->t.t_display_affinity = 1;
 4610         kmp_info_t *th = team->t.t_threads[f];
 4617             team->t.t_display_affinity != 1) {
 4618           team->t.t_display_affinity = 1;
 4660     int n_th = team->t.t_nproc;
 4683           kmp_info_t *th = team->t.t_threads[f];
 4689               team->t.t_display_affinity != 1) {
 4690             team->t.t_display_affinity = 1;
 4778             th = team->t.t_threads[f];
 4784                 team->t.t_display_affinity != 1) {
 4785               team->t.t_display_affinity = 1;
 4809         kmp_info_t *th = team->t.t_threads[f];
 4816             team->t.t_display_affinity != 1) {
 4817           team->t.t_display_affinity = 1;
 4891     level = team->t.t_active_level;
 4895               team->t.t_pkfn ==
 4898                   team->t.t_level)) { // or nested parallel inside the teams
 4932     if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
 4936       if (team->t.t_size_changed == -1) {
 4937         team->t.t_size_changed = 1;
 4939         KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
 4945       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
 4952       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
 4955       if ((team->t.t_size_changed == 0) &&
 4956           (team->t.t_proc_bind == new_proc_bind)) {
 4966         KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
 4972     } else if (team->t.t_nproc > new_nproc) {
 4977       team->t.t_size_changed = 1;
 4986         for (f = new_nproc; f < team->t.t_nproc; f++) {
 4991             team->t.t_threads[f]->th.th_task_team = NULL;
 4993           __kmp_free_thread(team->t.t_threads[f]);
 4994           team->t.t_threads[f] = NULL;
 5001         for (f = new_nproc; f < team->t.t_nproc; ++f) {
 5003           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
 5013       team->t.t_nproc = new_nproc;
 5015       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
 5021         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
 5029       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
 5039       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
 5055       team->t.t_size_changed = 1;
 5061       kmp_info_t **other_threads = team->t.t_threads;
 5062       for (f = team->t.t_nproc; f < avail_threads; ++f) {
 5068           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
 5079         team->t.t_nproc = new_nproc; // just get reserved threads involved
 5082         team->t.t_nproc =
 5087         if (team->t.t_max_nproc < new_nproc) {
 5102         for (f = team->t.t_nproc; f < new_nproc; f++) {
 5105           team->t.t_threads[f] = new_worker;
 5118               balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
 5139       int old_nproc = team->t.t_nproc; // save old value and use to update only
 5146       for (f = 0; f < team->t.t_nproc; ++f)
 5147         __kmp_initialize_info(team->t.t_threads[f], team, f,
 5156         for (f = old_nproc; f < team->t.t_nproc; ++f)
 5157           team->t.t_threads[f]->th.th_task_state =
 5158               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
 5161             team->t.t_threads[0]->th.th_task_state; // copy master's state
 5162         for (f = old_nproc; f < team->t.t_nproc; ++f)
 5163           team->t.t_threads[f]->th.th_task_state = old_state;
 5174       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
 5180     kmp_info_t *master = team->t.t_threads[0];
 5184         kmp_info_t *thr = team->t.t_threads[f];
 5195         kmp_info_t *thr = team->t.t_threads[f];
 5199           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
 5211     KMP_CHECK_UPDATE(team->t.t_argc, argc);
 5239     if (team->t.t_max_nproc >= max_nproc) {
 5241       __kmp_team_pool = team->t.t_next_pool;
 5249       team->t.t_task_team[0] = NULL;
 5250       team->t.t_task_team[1] = NULL;
 5254       KMP_CHECK_UPDATE(team->t.t_argc, argc);
 5262           team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
 5270       team->t.t_proc_bind = new_proc_bind;
 5297   team->t.t_max_nproc = max_nproc;
 5308   team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
 5310   team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
 5314     __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
 5319   team->t.t_argc = argc;
 5327       team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
 5335   team->t.t_proc_bind = new_proc_bind;
 5339   team->t.ompt_serialized_team_info = NULL;
 5372     level = team->t.t_active_level - 1;
 5378       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
 5379           master->th.th_teams_level == team->t.t_level) {
 5393   TCW_SYNC_PTR(team->t.t_pkfn,
 5404       for (f = 1; f < team->t.t_nproc; ++f) {
 5406         kmp_info_t *th = team->t.t_threads[f];
 5428         kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
 5430           for (f = 0; f < team->t.t_nproc; ++f) { // threads unref task teams
 5432             team->t.t_threads[f]->th.th_task_team = NULL;
 5441           team->t.t_task_team[tt_idx] = NULL;
 5447     team->t.t_parent = NULL;
 5448     team->t.t_level = 0;
 5449     team->t.t_active_level = 0;
 5452     for (f = 1; f < team->t.t_nproc; ++f) {
 5454       __kmp_free_thread(team->t.t_threads[f]);
 5455       team->t.t_threads[f] = NULL;
 5460     team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
 5466     if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
 5466     if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
 5468       for (f = 1; f < team->t.t_nproc; ++f) {
 5469         kmp_info_t *thr = team->t.t_threads[f];
 5495   kmp_team_t *next_pool = team->t.t_next_pool;
 5507   if (team->t.t_argv != &team->t.t_inline_argv[0])
 5507   if (team->t.t_argv != &team->t.t_inline_argv[0])
 5508     __kmp_free((void *)team->t.t_argv);
 5713       if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
 5728         rc = (*pteam)->t.t_invoke(gtid);
 6036       __kmp_team_pool = team->t.t_next_pool;
 6038       team->t.t_next_pool = NULL;
 6984     __kmp_push_parallel(gtid, team->t.t_ident);
 6992     __kmp_pop_parallel(gtid, team->t.t_ident);
 7008             team->t.t_stack_id); // inform ittnotify about entering user's code
 7024         team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
 7030       &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
 7031   my_parallel_data = &(team->t.ompt_team_info.parallel_data);
 7033     ompt_team_size = team->t.t_nproc;
 7051   rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
 7052                               tid, (int)team->t.t_argc, (void **)team->t.t_argv
 7052                               tid, (int)team->t.t_argc, (void **)team->t.t_argv
 7074             team->t.t_stack_id); // inform ittnotify about leaving user's code
 7086   ident_t *loc = team->t.t_ident;
 7110   __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
 7142       &team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data;
 7143   ompt_data_t *parallel_data = &team->t.ompt_team_info.parallel_data;
 7146         ompt_scope_begin, parallel_data, task_data, team->t.t_nproc, tid,
 7255   team->t.t_construct = 0; /* no single directives seen yet */
 7256   team->t.t_ordered.dt.t_value =
 7261   if (team->t.t_max_nproc > 1) {
 7264       team->t.t_disp_buffer[i].buffer_index = i;
 7265       team->t.t_disp_buffer[i].doacross_buf_idx = i;
 7268     team->t.t_disp_buffer[0].buffer_index = 0;
 7269     team->t.t_disp_buffer[0].doacross_buf_idx = 0;
 7323       codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
 7363     return hot_team->t.t_nproc - 1; // Don't count master thread
 7368   for (i = 1; i < hot_team->t.t_nproc; i++) {
 7369     if (hot_team->t.t_threads[i]->th.th_active) {
 7421              (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
 7692     int ii = team->t.t_level;
 7693     teams_serialized = team->t.t_serialized;
 7697       for (teams_serialized = team->t.t_serialized;
 7700       if (team->t.t_serialized && (!teams_serialized)) {
 7701         team = team->t.t_parent;
 7705         team = team->t.t_parent;
 7721       return team->t.t_master_tid;
 7734       return team->t.t_parent->t.t_nproc;
 7734       return team->t.t_parent->t.t_nproc;
 7906     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
 7924     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
 7928         __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
 8032   set__blocktime_team(thread->th.th_team, tid, blocktime);
 8033   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
 8046   set__bt_set_team(thread->th.th_team, tid, bt_set);
 8047   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
 8104   team_size = __kmp_get_team_num_threads(global_tid);
projects/openmp/runtime/src/kmp_sched.cpp
  189     tid = th->th.th_team->t.t_master_tid;
  190     team = th->th.th_team->t.t_parent;
  197   if (team->t.t_serialized) {
  229   nth = team->t.t_nproc;
  382       team->t.t_active_level == 1) {
  481   team_id = team->t.t_master_tid;
  722   team_id = team->t.t_master_tid;
projects/openmp/runtime/src/kmp_tasking.cpp
  469     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
  470       team->t.t_implicit_task_taskdata[0].td_parent =
  472       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
  475     team->t.t_implicit_task_taskdata[tid].td_parent =
  476         team->t.t_implicit_task_taskdata[0].td_parent;
  477     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
 1036   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
 1059   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
 1178   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
 1205       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
 1299   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
 1472         (this_team->t.t_cancel_request == cancel_parallel)) {
 1813       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
 2317   reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
 2319       __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data,
 2330     KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
 2333         (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
 2420     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
 2446     my_parallel_data = team->t.ompt_team_info.parallel_data;
 2513     if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
 2516       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
 2524         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
 2525         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
 2531     } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
 2535       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
 2542         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
 2543         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
 3261       thread_data->td.td_thr = team->t.t_threads[i];
 3335   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
 3460   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
 3461       (always || team->t.t_nproc > 1)) {
 3462     team->t.t_task_team[this_thr->th.th_task_state] =
 3479   if (team->t.t_nproc > 1) {
 3481     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
 3482       team->t.t_task_team[other_team] =
 3491       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
 3493           team->t.t_nproc != task_team->tt.tt_nproc) {
 3494         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
 3498                           team->t.t_nproc);
 3524           team->t.t_task_team[this_thr->th.th_task_state]);
 3542   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
 3586       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
 3804   kmp_int32 nthreads = team->t.t_nproc;
 3815     thread = team->t.t_threads[k];
projects/openmp/runtime/src/kmp_wait_release.h
  190     if (team && team->t.t_cancel_request == cancel_parallel)
  261           this_thr->th.th_team->t.ompt_serialized_team_info;
  380       if (team && team->t.t_cancel_request == cancel_parallel)
  463     if (team && team->t.t_cancel_request == cancel_parallel) {
projects/openmp/runtime/src/ompt-specific.cpp
   59     ompt_lw_taskteam_t *next_lwt = LWT_FROM_TEAM(team), *lwt = NULL;
   73           team = team->t.t_parent;
   75             next_lwt = LWT_FROM_TEAM(team);
   93         *size = team->t.t_nproc;
   96       return &team->t.ompt_team_info;
  110                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
  126             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
  151                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
  171             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
  275       thr->th.th_team->t.t_serialized >
  285     link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
  286     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
  294         thr->th.th_team->t.ompt_serialized_team_info;
  296     thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
  300     *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
  306   ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
  308     thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
  311     lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
  312     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
  355                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team),
  380           team = team->t.t_parent;
  382             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
  397       team_info = &team->t.ompt_team_info;
  424         *thread_num = prev_team->t.t_master_tid;
  469   team->t.ompt_team_info.parallel_data = ompt_pid;