reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
300 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td); 311 for (i = thread_data->td.td_deque_head, j = 0; j < size; 312 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++) 313 new_deque[j] = thread_data->td.td_deque[i]; 315 __kmp_free(thread_data->td.td_deque); 317 thread_data->td.td_deque_head = 0; 318 thread_data->td.td_deque_tail = size; 319 thread_data->td.td_deque = new_deque; 320 thread_data->td.td_deque_size = new_size; 366 if (thread_data->td.td_deque == NULL) { 372 if (TCR_4(thread_data->td.td_deque_ntasks) >= 373 TASK_DEQUE_SIZE(thread_data->td)) { 382 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 390 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 392 if (TCR_4(thread_data->td.td_deque_ntasks) >= 393 TASK_DEQUE_SIZE(thread_data->td)) { 397 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 412 thread_data->td.td_deque[thread_data->td.td_deque_tail] = 412 thread_data->td.td_deque[thread_data->td.td_deque_tail] = 415 thread_data->td.td_deque_tail = 416 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 416 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 417 TCW_4(thread_data->td.td_deque_ntasks, 418 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count 425 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 1218 if (thread_data->td.td_deque == NULL) { 2590 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) { 2599 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 2601 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) { 2602 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2611 tail = (thread_data->td.td_deque_tail - 1) & 2612 TASK_DEQUE_MASK(thread_data->td); // Wrap index. 2613 taskdata = thread_data->td.td_deque[tail]; 2618 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2627 thread_data->td.td_deque_tail = tail; 2628 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1); 2628 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1); 2630 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2670 if (TCR_4(victim_td->td.td_deque_ntasks) == 0) { 2679 __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock); 2681 int ntasks = TCR_4(victim_td->td.td_deque_ntasks); 2684 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2694 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head]; 2694 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head]; 2697 victim_td->td.td_deque_head = 2698 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td); 2698 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td); 2702 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2711 target = victim_td->td.td_deque_head; 2714 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td); 2715 taskdata = victim_td->td.td_deque[target]; 2724 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2734 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td); 2735 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target]; 2735 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target]; 2741 victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped)) 2758 TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1); 2760 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2827 victim_tid = threads_data[tid].td.td_deque_last_stolen; 2830 other_thread = threads_data[victim_tid].td.td_thr; 2845 other_thread = threads_data[victim_tid].td.td_thr; 2879 if (threads_data[tid].td.td_deque_last_stolen != victim_tid) { 2880 threads_data[tid].td.td_deque_last_stolen = victim_tid; 2887 KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1); 2928 if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) { 3055 kmp_info_t *thread = threads_data[i].td.td_thr; 3127 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock); 3131 thread_data->td.td_deque_last_stolen = -1; 3144 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate( 3146 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE; 3153 if (thread_data->td.td_deque != NULL) { 3154 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3155 TCW_4(thread_data->td.td_deque_ntasks, 0); 3156 __kmp_free(thread_data->td.td_deque); 3157 thread_data->td.td_deque = NULL; 3158 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 3261 thread_data->td.td_thr = team->t.t_threads[i]; 3263 if (thread_data->td.td_deque_last_stolen >= nthreads) { 3267 thread_data->td.td_deque_last_stolen = -1; 3632 if (thread_data->td.td_deque == NULL) { 3641 if (TCR_4(thread_data->td.td_deque_ntasks) >= 3642 TASK_DEQUE_SIZE(thread_data->td)) { 3650 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass) 3653 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3658 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3660 if (TCR_4(thread_data->td.td_deque_ntasks) >= 3661 TASK_DEQUE_SIZE(thread_data->td)) { 3668 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass) 3677 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata; 3677 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata; 3679 thread_data->td.td_deque_tail = 3680 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 3680 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 3681 TCW_4(thread_data->td.td_deque_ntasks, 3682 TCR_4(thread_data->td.td_deque_ntasks) + 1); 3689 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);