reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
//===---- parallel.cu - NVPTX OpenMP parallel implementation ----- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Parallel implemention in the GPU. Here is the pattern:
//
//    while (not finished) {
//
//    if (master) {
//      sequential code, decide which par loop to do, or if finished
//     __kmpc_kernel_prepare_parallel() // exec by master only
//    }
//    syncthreads // A
//    __kmpc_kernel_parallel() // exec by all
//    if (this thread is included in the parallel) {
//      switch () for all parallel loops
//      __kmpc_kernel_end_parallel() // exec only by threads in parallel
//    }
//
//
//    The reason we don't exec end_parallel for the threads not included
//    in the parallel loop is that for each barrier in the parallel
//    region, these non-included threads will cycle through the
//    syncthread A. Thus they must preserve their current threadId that
//    is larger than thread in team.
//
//    To make a long story short...
//
//===----------------------------------------------------------------------===//

#include "omptarget-nvptx.h"
#include "target_impl.h"

typedef struct ConvergentSimdJob {
  omptarget_nvptx_TaskDescr taskDescr;
  omptarget_nvptx_TaskDescr *convHeadTaskDescr;
  uint16_t slimForNextSimd;
} ConvergentSimdJob;

////////////////////////////////////////////////////////////////////////////////
// support for convergent simd (team of threads in a warp only)
////////////////////////////////////////////////////////////////////////////////
EXTERN bool __kmpc_kernel_convergent_simd(void *buffer,
                                          __kmpc_impl_lanemask_t Mask,
                                          bool *IsFinal, int32_t *LaneSource,
                                          int32_t *LaneId, int32_t *NumLanes) {
  PRINT0(LD_IO, "call to __kmpc_kernel_convergent_simd\n");
  __kmpc_impl_lanemask_t ConvergentMask = Mask;
  int32_t ConvergentSize = __kmpc_impl_popc(ConvergentMask);
  __kmpc_impl_lanemask_t WorkRemaining = ConvergentMask >> (*LaneSource + 1);
  *LaneSource += __kmpc_impl_ffs(WorkRemaining);
  *IsFinal = __kmpc_impl_popc(WorkRemaining) == 1;
  __kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
  *LaneId = __kmpc_impl_popc(ConvergentMask & lanemask_lt);

  int threadId = GetLogicalThreadIdInBlock(isSPMDMode());
  int sourceThreadId = (threadId & ~(WARPSIZE - 1)) + *LaneSource;

  ConvergentSimdJob *job = (ConvergentSimdJob *)buffer;
  int32_t SimdLimit =
      omptarget_nvptx_threadPrivateContext->SimdLimitForNextSimd(threadId);
  job->slimForNextSimd = SimdLimit;

  int32_t SimdLimitSource = __kmpc_impl_shfl_sync(Mask, SimdLimit, *LaneSource);
  // reset simdlimit to avoid propagating to successive #simd
  if (SimdLimitSource > 0 && threadId == sourceThreadId)
    omptarget_nvptx_threadPrivateContext->SimdLimitForNextSimd(threadId) = 0;

  // We cannot have more than the # of convergent threads.
  if (SimdLimitSource > 0)
    *NumLanes = min(ConvergentSize, SimdLimitSource);
  else
    *NumLanes = ConvergentSize;
  ASSERT(LT_FUSSY, *NumLanes > 0, "bad thread request of %d threads",
         (int)*NumLanes);

  // Set to true for lanes participating in the simd region.
  bool isActive = false;
  // Initialize state for active threads.
  if (*LaneId < *NumLanes) {
    omptarget_nvptx_TaskDescr *currTaskDescr =
        omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(threadId);
    omptarget_nvptx_TaskDescr *sourceTaskDescr =
        omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(
            sourceThreadId);
    job->convHeadTaskDescr = currTaskDescr;
    // install top descriptor from the thread for which the lanes are working.
    omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
                                                               sourceTaskDescr);
    isActive = true;
  }

  // requires a memory fence between threads of a warp
  return isActive;
}

EXTERN void __kmpc_kernel_end_convergent_simd(void *buffer) {
  PRINT0(LD_IO | LD_PAR, "call to __kmpc_kernel_end_convergent_parallel\n");
  // pop stack
  int threadId = GetLogicalThreadIdInBlock(isSPMDMode());
  ConvergentSimdJob *job = (ConvergentSimdJob *)buffer;
  omptarget_nvptx_threadPrivateContext->SimdLimitForNextSimd(threadId) =
      job->slimForNextSimd;
  omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
      threadId, job->convHeadTaskDescr);
}

typedef struct ConvergentParallelJob {
  omptarget_nvptx_TaskDescr taskDescr;
  omptarget_nvptx_TaskDescr *convHeadTaskDescr;
  uint16_t tnumForNextPar;
} ConvergentParallelJob;

////////////////////////////////////////////////////////////////////////////////
// support for convergent parallelism (team of threads in a warp only)
////////////////////////////////////////////////////////////////////////////////
EXTERN bool __kmpc_kernel_convergent_parallel(void *buffer,
                                              __kmpc_impl_lanemask_t Mask,
                                              bool *IsFinal,
                                              int32_t *LaneSource) {
  PRINT0(LD_IO, "call to __kmpc_kernel_convergent_parallel\n");
  __kmpc_impl_lanemask_t ConvergentMask = Mask;
  int32_t ConvergentSize = __kmpc_impl_popc(ConvergentMask);
  __kmpc_impl_lanemask_t WorkRemaining = ConvergentMask >> (*LaneSource + 1);
  *LaneSource += __kmpc_impl_ffs(WorkRemaining);
  *IsFinal = __kmpc_impl_popc(WorkRemaining) == 1;
  __kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
  uint32_t OmpId = __kmpc_impl_popc(ConvergentMask & lanemask_lt);

  int threadId = GetLogicalThreadIdInBlock(isSPMDMode());
  int sourceThreadId = (threadId & ~(WARPSIZE - 1)) + *LaneSource;

  ConvergentParallelJob *job = (ConvergentParallelJob *)buffer;
  int32_t NumThreadsClause =
      omptarget_nvptx_threadPrivateContext->NumThreadsForNextParallel(threadId);
  job->tnumForNextPar = NumThreadsClause;

  int32_t NumThreadsSource =
      __kmpc_impl_shfl_sync(Mask, NumThreadsClause, *LaneSource);
  // reset numthreads to avoid propagating to successive #parallel
  if (NumThreadsSource > 0 && threadId == sourceThreadId)
    omptarget_nvptx_threadPrivateContext->NumThreadsForNextParallel(threadId) =
        0;

  // We cannot have more than the # of convergent threads.
  uint16_t NumThreads;
  if (NumThreadsSource > 0)
    NumThreads = min(ConvergentSize, NumThreadsSource);
  else
    NumThreads = ConvergentSize;
  ASSERT(LT_FUSSY, NumThreads > 0, "bad thread request of %d threads",
         (int)NumThreads);

  // Set to true for workers participating in the parallel region.
  bool isActive = false;
  // Initialize state for active threads.
  if (OmpId < NumThreads) {
    // init L2 task descriptor and storage for the L1 parallel task descriptor.
    omptarget_nvptx_TaskDescr *newTaskDescr = &job->taskDescr;
    ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr");
    omptarget_nvptx_TaskDescr *currTaskDescr =
        omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(threadId);
    omptarget_nvptx_TaskDescr *sourceTaskDescr =
        omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(
            sourceThreadId);
    job->convHeadTaskDescr = currTaskDescr;
    newTaskDescr->CopyConvergentParent(sourceTaskDescr, OmpId, NumThreads);
    // install new top descriptor
    omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
                                                               newTaskDescr);
    isActive = true;
  }

  // requires a memory fence between threads of a warp
  return isActive;
}

EXTERN void __kmpc_kernel_end_convergent_parallel(void *buffer) {
  PRINT0(LD_IO | LD_PAR, "call to __kmpc_kernel_end_convergent_parallel\n");
  // pop stack
  int threadId = GetLogicalThreadIdInBlock(isSPMDMode());
  ConvergentParallelJob *job = (ConvergentParallelJob *)buffer;
  omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
      threadId, job->convHeadTaskDescr);
  omptarget_nvptx_threadPrivateContext->NumThreadsForNextParallel(threadId) =
      job->tnumForNextPar;
}

////////////////////////////////////////////////////////////////////////////////
// support for parallel that goes parallel (1 static level only)
////////////////////////////////////////////////////////////////////////////////

INLINE static uint16_t determineNumberOfThreads(uint16_t NumThreadsClause,
                                                uint16_t NThreadsICV,
                                                uint16_t ThreadLimit) {
  uint16_t ThreadsRequested = NThreadsICV;
  if (NumThreadsClause != 0) {
    ThreadsRequested = NumThreadsClause;
  }

  uint16_t ThreadsAvailable = GetNumberOfWorkersInTeam();
  if (ThreadLimit != 0 && ThreadLimit < ThreadsAvailable) {
    ThreadsAvailable = ThreadLimit;
  }

  uint16_t NumThreads = ThreadsAvailable;
  if (ThreadsRequested != 0 && ThreadsRequested < NumThreads) {
    NumThreads = ThreadsRequested;
  }

#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
  // On Volta and newer architectures we require that all lanes in
  // a warp participate in the parallel region.  Round down to a
  // multiple of WARPSIZE since it is legal to do so in OpenMP.
  if (NumThreads < WARPSIZE) {
    NumThreads = 1;
  } else {
    NumThreads = (NumThreads & ~((uint16_t)WARPSIZE - 1));
  }
#endif

  return NumThreads;
}

// This routine is always called by the team master..
EXTERN void __kmpc_kernel_prepare_parallel(void *WorkFn,
                                           int16_t IsOMPRuntimeInitialized) {
  PRINT0(LD_IO, "call to __kmpc_kernel_prepare_parallel\n");
  ASSERT0(LT_FUSSY, IsOMPRuntimeInitialized, "Expected initialized runtime.");

  omptarget_nvptx_workFn = WorkFn;

  // This routine is only called by the team master.  The team master is
  // the first thread of the last warp.  It always has the logical thread
  // id of 0 (since it is a shadow for the first worker thread).
  const int threadId = 0;
  omptarget_nvptx_TaskDescr *currTaskDescr =
      omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(threadId);
  ASSERT0(LT_FUSSY, currTaskDescr, "expected a top task descr");
  ASSERT0(LT_FUSSY, !currTaskDescr->InParallelRegion(),
          "cannot be called in a parallel region.");
  if (currTaskDescr->InParallelRegion()) {
    PRINT0(LD_PAR, "already in parallel: go seq\n");
    return;
  }

  uint16_t &NumThreadsClause =
      omptarget_nvptx_threadPrivateContext->NumThreadsForNextParallel(threadId);

  uint16_t NumThreads =
      determineNumberOfThreads(NumThreadsClause, nThreads, threadLimit);

  if (NumThreadsClause != 0) {
    // Reset request to avoid propagating to successive #parallel
    NumThreadsClause = 0;
  }

  ASSERT(LT_FUSSY, NumThreads > 0, "bad thread request of %d threads",
         (int)NumThreads);
  ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
          "only team master can create parallel");

  // Set number of threads on work descriptor.
  omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
  workDescr.WorkTaskDescr()->CopyToWorkDescr(currTaskDescr);
  threadsInTeam = NumThreads;
}

// All workers call this function.  Deactivate those not needed.
// Fn - the outlined work function to execute.
// returns True if this thread is active, else False.
//
// Only the worker threads call this routine.
EXTERN bool __kmpc_kernel_parallel(void **WorkFn,
                                   int16_t IsOMPRuntimeInitialized) {
  PRINT0(LD_IO | LD_PAR, "call to __kmpc_kernel_parallel\n");

  ASSERT0(LT_FUSSY, IsOMPRuntimeInitialized, "Expected initialized runtime.");

  // Work function and arguments for L1 parallel region.
  *WorkFn = omptarget_nvptx_workFn;

  // If this is the termination signal from the master, quit early.
  if (!*WorkFn) {
    PRINT0(LD_IO | LD_PAR, "call to __kmpc_kernel_parallel finished\n");
    return false;
  }

  // Only the worker threads call this routine and the master warp
  // never arrives here.  Therefore, use the nvptx thread id.
  int threadId = GetThreadIdInBlock();
  omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
  // Set to true for workers participating in the parallel region.
  bool isActive = false;
  // Initialize state for active threads.
  if (threadId < threadsInTeam) {
    // init work descriptor from workdesccr
    omptarget_nvptx_TaskDescr *newTaskDescr =
        omptarget_nvptx_threadPrivateContext->Level1TaskDescr(threadId);
    ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr");
    newTaskDescr->CopyFromWorkDescr(workDescr.WorkTaskDescr());
    // install new top descriptor
    omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
                                                               newTaskDescr);
    // init private from int value
    PRINT(LD_PAR,
          "thread will execute parallel region with id %d in a team of "
          "%d threads\n",
          (int)newTaskDescr->ThreadId(), (int)nThreads);

    isActive = true;
    // Reconverge the threads at the end of the parallel region to correctly
    // handle parallel levels.
    // In Cuda9+ in non-SPMD mode we have either 1 worker thread or the whole
    // warp. If only 1 thread is active, not need to reconverge the threads.
    // If we have the whole warp, reconverge all the threads in the warp before
    // actually trying to change the parallel level. Otherwise, parallel level
    // can be changed incorrectly because of threads divergence.
    bool IsActiveParallelRegion = threadsInTeam != 1;
    IncParallelLevel(IsActiveParallelRegion,
                     IsActiveParallelRegion ? __kmpc_impl_all_lanes : 1u);
  }

  return isActive;
}

EXTERN void __kmpc_kernel_end_parallel() {
  // pop stack
  PRINT0(LD_IO | LD_PAR, "call to __kmpc_kernel_end_parallel\n");
  ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");

  // Only the worker threads call this routine and the master warp
  // never arrives here.  Therefore, use the nvptx thread id.
  int threadId = GetThreadIdInBlock();
  omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
  omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
      threadId, currTaskDescr->GetPrevTaskDescr());

  // Reconverge the threads at the end of the parallel region to correctly
  // handle parallel levels.
  // In Cuda9+ in non-SPMD mode we have either 1 worker thread or the whole
  // warp. If only 1 thread is active, not need to reconverge the threads.
  // If we have the whole warp, reconverge all the threads in the warp before
  // actually trying to change the parallel level. Otherwise, parallel level can
  // be changed incorrectly because of threads divergence.
    bool IsActiveParallelRegion = threadsInTeam != 1;
    DecParallelLevel(IsActiveParallelRegion,
                     IsActiveParallelRegion ? __kmpc_impl_all_lanes : 1u);
}

////////////////////////////////////////////////////////////////////////////////
// support for parallel that goes sequential
////////////////////////////////////////////////////////////////////////////////

EXTERN void __kmpc_serialized_parallel(kmp_Ident *loc, uint32_t global_tid) {
  PRINT0(LD_IO, "call to __kmpc_serialized_parallel\n");

  IncParallelLevel(/*ActiveParallel=*/false, __kmpc_impl_activemask());

  if (checkRuntimeUninitialized(loc)) {
    ASSERT0(LT_FUSSY, checkSPMDMode(loc),
            "Expected SPMD mode with uninitialized runtime.");
    return;
  }

  // assume this is only called for nested parallel
  int threadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));

  // unlike actual parallel, threads in the same team do not share
  // the workTaskDescr in this case and num threads is fixed to 1

  // get current task
  omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
  currTaskDescr->SaveLoopData();

  // allocate new task descriptor and copy value from current one, set prev to
  // it
  omptarget_nvptx_TaskDescr *newTaskDescr =
      (omptarget_nvptx_TaskDescr *)SafeMalloc(sizeof(omptarget_nvptx_TaskDescr),
                                              "new seq parallel task");
  newTaskDescr->CopyParent(currTaskDescr);

  // tweak values for serialized parallel case:
  // - each thread becomes ID 0 in its serialized parallel, and
  // - there is only one thread per team
  newTaskDescr->ThreadId() = 0;

  // set new task descriptor as top
  omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
                                                             newTaskDescr);
}

EXTERN void __kmpc_end_serialized_parallel(kmp_Ident *loc,
                                           uint32_t global_tid) {
  PRINT0(LD_IO, "call to __kmpc_end_serialized_parallel\n");

  DecParallelLevel(/*ActiveParallel=*/false, __kmpc_impl_activemask());

  if (checkRuntimeUninitialized(loc)) {
    ASSERT0(LT_FUSSY, checkSPMDMode(loc),
            "Expected SPMD mode with uninitialized runtime.");
    return;
  }

  // pop stack
  int threadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
  omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
  // set new top
  omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
      threadId, currTaskDescr->GetPrevTaskDescr());
  // free
  SafeFree(currTaskDescr, (char *)"new seq parallel task");
  currTaskDescr = getMyTopTaskDescriptor(threadId);
  currTaskDescr->RestoreLoopData();
}

EXTERN uint16_t __kmpc_parallel_level(kmp_Ident *loc, uint32_t global_tid) {
  PRINT0(LD_IO, "call to __kmpc_parallel_level\n");

  return parallelLevel[GetWarpId()] & (OMP_ACTIVE_PARALLEL_LEVEL - 1);
}

// This kmpc call returns the thread id across all teams. It's value is
// cached by the compiler and used when calling the runtime. On nvptx
// it's cheap to recalculate this value so we never use the result
// of this call.
EXTERN int32_t __kmpc_global_thread_num(kmp_Ident *loc) {
  int tid = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
  return GetOmpThreadId(tid, checkSPMDMode(loc));
}

////////////////////////////////////////////////////////////////////////////////
// push params
////////////////////////////////////////////////////////////////////////////////

EXTERN void __kmpc_push_num_threads(kmp_Ident *loc, int32_t tid,
                                    int32_t num_threads) {
  PRINT(LD_IO, "call kmpc_push_num_threads %d\n", num_threads);
  ASSERT0(LT_FUSSY, checkRuntimeInitialized(loc), "Runtime must be initialized.");
  tid = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
  omptarget_nvptx_threadPrivateContext->NumThreadsForNextParallel(tid) =
      num_threads;
}

EXTERN void __kmpc_push_simd_limit(kmp_Ident *loc, int32_t tid,
                                   int32_t simd_limit) {
  PRINT(LD_IO, "call kmpc_push_simd_limit %d\n", (int)simd_limit);
  ASSERT0(LT_FUSSY, checkRuntimeInitialized(loc), "Runtime must be initialized.");
  tid = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
  omptarget_nvptx_threadPrivateContext->SimdLimitForNextSimd(tid) = simd_limit;
}

// Do nothing. The host guarantees we started the requested number of
// teams and we only need inspection of gridDim.

EXTERN void __kmpc_push_num_teams(kmp_Ident *loc, int32_t tid,
                                  int32_t num_teams, int32_t thread_limit) {
  PRINT(LD_IO, "call kmpc_push_num_teams %d\n", (int)num_teams);
  ASSERT0(LT_FUSSY, 0,
          "should never have anything with new teams on device");
}

EXTERN void __kmpc_push_proc_bind(kmp_Ident *loc, uint32_t tid,
                                  int proc_bind) {
  PRINT(LD_IO, "call kmpc_push_proc_bind %d\n", (int)proc_bind);
}