reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
  688
  689
  690
  691
  692
  693
  694
  695
  696
  697
  698
  699
  700
  701
  702
  703
  704
  705
  706
  707
  708
  709
  710
  711
  712
  713
  714
  715
  716
  717
  718
  719
  720
  721
  722
  723
  724
  725
  726
  727
  728
  729
  730
  731
  732
  733
  734
  735
  736
  737
  738
  739
  740
  741
  742
  743
  744
  745
  746
  747
  748
  749
  750
  751
  752
  753
  754
  755
  756
  757
  758
  759
  760
  761
  762
  763
  764
  765
  766
  767
  768
  769
  770
  771
  772
  773
  774
  775
  776
  777
  778
  779
  780
  781
  782
  783
  784
  785
  786
  787
  788
  789
  790
  791
  792
  793
  794
  795
  796
  797
  798
  799
  800
  801
  802
  803
  804
  805
  806
  807
  808
  809
  810
  811
  812
  813
  814
  815
  816
  817
  818
  819
  820
  821
  822
  823
  824
  825
  826
  827
  828
  829
  830
  831
  832
  833
  834
  835
  836
  837
  838
  839
  840
  841
  842
  843
  844
  845
  846
  847
  848
  849
  850
  851
  852
  853
  854
  855
  856
  857
  858
  859
  860
  861
  862
  863
  864
  865
  866
  867
  868
  869
  870
  871
  872
  873
  874
  875
  876
  877
  878
  879
  880
  881
  882
  883
  884
  885
  886
  887
  888
  889
  890
  891
  892
  893
  894
  895
  896
  897
  898
  899
  900
  901
  902
  903
  904
  905
  906
  907
  908
  909
  910
  911
  912
  913
  914
  915
  916
  917
  918
  919
  920
  921
  922
  923
  924
  925
  926
  927
  928
  929
  930
  931
  932
  933
  934
  935
  936
  937
  938
  939
  940
  941
  942
  943
  944
  945
  946
  947
  948
  949
  950
  951
  952
  953
  954
  955
  956
  957
  958
  959
  960
  961
  962
  963
  964
  965
  966
  967
  968
  969
  970
  971
  972
  973
  974
  975
  976
  977
  978
  979
  980
  981
  982
  983
  984
  985
  986
  987
  988
  989
  990
  991
  992
  993
  994
  995
  996
  997
  998
  999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------===//
//
// This file implements the MemorySSAUpdater class.
//
//===----------------------------------------------------------------===//
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/IteratedDominanceFrontier.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include <algorithm>

#define DEBUG_TYPE "memoryssa"
using namespace llvm;

// This is the marker algorithm from "Simple and Efficient Construction of
// Static Single Assignment Form"
// The simple, non-marker algorithm places phi nodes at any join
// Here, we place markers, and only place phi nodes if they end up necessary.
// They are only necessary if they break a cycle (IE we recursively visit
// ourselves again), or we discover, while getting the value of the operands,
// that there are two or more definitions needing to be merged.
// This still will leave non-minimal form in the case of irreducible control
// flow, where phi nodes may be in cycles with themselves, but unnecessary.
MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
    BasicBlock *BB,
    DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
  // First, do a cache lookup. Without this cache, certain CFG structures
  // (like a series of if statements) take exponential time to visit.
  auto Cached = CachedPreviousDef.find(BB);
  if (Cached != CachedPreviousDef.end())
    return Cached->second;

  // If this method is called from an unreachable block, return LoE.
  if (!MSSA->DT->isReachableFromEntry(BB))
    return MSSA->getLiveOnEntryDef();

  if (BasicBlock *Pred = BB->getUniquePredecessor()) {
    VisitedBlocks.insert(BB);
    // Single predecessor case, just recurse, we can only have one definition.
    MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
    CachedPreviousDef.insert({BB, Result});
    return Result;
  }

  if (VisitedBlocks.count(BB)) {
    // We hit our node again, meaning we had a cycle, we must insert a phi
    // node to break it so we have an operand. The only case this will
    // insert useless phis is if we have irreducible control flow.
    MemoryAccess *Result = MSSA->createMemoryPhi(BB);
    CachedPreviousDef.insert({BB, Result});
    return Result;
  }

  if (VisitedBlocks.insert(BB).second) {
    // Mark us visited so we can detect a cycle
    SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;

    // Recurse to get the values in our predecessors for placement of a
    // potential phi node. This will insert phi nodes if we cycle in order to
    // break the cycle and have an operand.
    bool UniqueIncomingAccess = true;
    MemoryAccess *SingleAccess = nullptr;
    for (auto *Pred : predecessors(BB)) {
      if (MSSA->DT->isReachableFromEntry(Pred)) {
        auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
        if (!SingleAccess)
          SingleAccess = IncomingAccess;
        else if (IncomingAccess != SingleAccess)
          UniqueIncomingAccess = false;
        PhiOps.push_back(IncomingAccess);
      } else
        PhiOps.push_back(MSSA->getLiveOnEntryDef());
    }

    // Now try to simplify the ops to avoid placing a phi.
    // This may return null if we never created a phi yet, that's okay
    MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));

    // See if we can avoid the phi by simplifying it.
    auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
    // If we couldn't simplify, we may have to create a phi
    if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
      // A concrete Phi only exists if we created an empty one to break a cycle.
      if (Phi) {
        assert(Phi->operands().empty() && "Expected empty Phi");
        Phi->replaceAllUsesWith(SingleAccess);
        removeMemoryAccess(Phi);
      }
      Result = SingleAccess;
    } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
      if (!Phi)
        Phi = MSSA->createMemoryPhi(BB);

      // See if the existing phi operands match what we need.
      // Unlike normal SSA, we only allow one phi node per block, so we can't just
      // create a new one.
      if (Phi->getNumOperands() != 0) {
        // FIXME: Figure out whether this is dead code and if so remove it.
        if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
          // These will have been filled in by the recursive read we did above.
          llvm::copy(PhiOps, Phi->op_begin());
          std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
        }
      } else {
        unsigned i = 0;
        for (auto *Pred : predecessors(BB))
          Phi->addIncoming(&*PhiOps[i++], Pred);
        InsertedPHIs.push_back(Phi);
      }
      Result = Phi;
    }

    // Set ourselves up for the next variable by resetting visited state.
    VisitedBlocks.erase(BB);
    CachedPreviousDef.insert({BB, Result});
    return Result;
  }
  llvm_unreachable("Should have hit one of the three cases above");
}

// This starts at the memory access, and goes backwards in the block to find the
// previous definition. If a definition is not found the block of the access,
// it continues globally, creating phi nodes to ensure we have a single
// definition.
MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
  if (auto *LocalResult = getPreviousDefInBlock(MA))
    return LocalResult;
  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
}

// This starts at the memory access, and goes backwards in the block to the find
// the previous definition. If the definition is not found in the block of the
// access, it returns nullptr.
MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());

  // It's possible there are no defs, or we got handed the first def to start.
  if (Defs) {
    // If this is a def, we can just use the def iterators.
    if (!isa<MemoryUse>(MA)) {
      auto Iter = MA->getReverseDefsIterator();
      ++Iter;
      if (Iter != Defs->rend())
        return &*Iter;
    } else {
      // Otherwise, have to walk the all access iterator.
      auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
      for (auto &U : make_range(++MA->getReverseIterator(), End))
        if (!isa<MemoryUse>(U))
          return cast<MemoryAccess>(&U);
      // Note that if MA comes before Defs->begin(), we won't hit a def.
      return nullptr;
    }
  }
  return nullptr;
}

// This starts at the end of block
MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
    BasicBlock *BB,
    DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
  auto *Defs = MSSA->getWritableBlockDefs(BB);

  if (Defs) {
    CachedPreviousDef.insert({BB, &*Defs->rbegin()});
    return &*Defs->rbegin();
  }

  return getPreviousDefRecursive(BB, CachedPreviousDef);
}
// Recurse over a set of phi uses to eliminate the trivial ones
MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
  if (!Phi)
    return nullptr;
  TrackingVH<MemoryAccess> Res(Phi);
  SmallVector<TrackingVH<Value>, 8> Uses;
  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
  for (auto &U : Uses)
    if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
      tryRemoveTrivialPhi(UsePhi);
  return Res;
}

// Eliminate trivial phis
// Phis are trivial if they are defined either by themselves, or all the same
// argument.
// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
// We recursively try to remove them.
MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
  assert(Phi && "Can only remove concrete Phi.");
  auto OperRange = Phi->operands();
  return tryRemoveTrivialPhi(Phi, OperRange);
}
template <class RangeType>
MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
                                                    RangeType &Operands) {
  // Bail out on non-opt Phis.
  if (NonOptPhis.count(Phi))
    return Phi;

  // Detect equal or self arguments
  MemoryAccess *Same = nullptr;
  for (auto &Op : Operands) {
    // If the same or self, good so far
    if (Op == Phi || Op == Same)
      continue;
    // not the same, return the phi since it's not eliminatable by us
    if (Same)
      return Phi;
    Same = cast<MemoryAccess>(&*Op);
  }
  // Never found a non-self reference, the phi is undef
  if (Same == nullptr)
    return MSSA->getLiveOnEntryDef();
  if (Phi) {
    Phi->replaceAllUsesWith(Same);
    removeMemoryAccess(Phi);
  }

  // We should only end up recursing in case we replaced something, in which
  // case, we may have made other Phis trivial.
  return recursePhi(Same);
}

void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
  InsertedPHIs.clear();
  MU->setDefiningAccess(getPreviousDef(MU));

  // In cases without unreachable blocks, because uses do not create new
  // may-defs, there are only two cases:
  // 1. There was a def already below us, and therefore, we should not have
  // created a phi node because it was already needed for the def.
  //
  // 2. There is no def below us, and therefore, there is no extra renaming work
  // to do.

  // In cases with unreachable blocks, where the unnecessary Phis were
  // optimized out, adding the Use may re-insert those Phis. Hence, when
  // inserting Uses outside of the MSSA creation process, and new Phis were
  // added, rename all uses if we are asked.

  if (!RenameUses && !InsertedPHIs.empty()) {
    auto *Defs = MSSA->getBlockDefs(MU->getBlock());
    (void)Defs;
    assert((!Defs || (++Defs->begin() == Defs->end())) &&
           "Block may have only a Phi or no defs");
  }

  if (RenameUses && InsertedPHIs.size()) {
    SmallPtrSet<BasicBlock *, 16> Visited;
    BasicBlock *StartBlock = MU->getBlock();

    if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
      MemoryAccess *FirstDef = &*Defs->begin();
      // Convert to incoming value if it's a memorydef. A phi *is* already an
      // incoming value.
      if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
        FirstDef = MD->getDefiningAccess();

      MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
    }
    // We just inserted a phi into this block, so the incoming value will
    // become the phi anyway, so it does not matter what we pass.
    for (auto &MP : InsertedPHIs)
      if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
        MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
  }
}

// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
                                      MemoryAccess *NewDef) {
  // Replace any operand with us an incoming block with the new defining
  // access.
  int i = MP->getBasicBlockIndex(BB);
  assert(i != -1 && "Should have found the basic block in the phi");
  // We can't just compare i against getNumOperands since one is signed and the
  // other not. So use it to index into the block iterator.
  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
       ++BBIter) {
    if (*BBIter != BB)
      break;
    MP->setIncomingValue(i, NewDef);
    ++i;
  }
}

// A brief description of the algorithm:
// First, we compute what should define the new def, using the SSA
// construction algorithm.
// Then, we update the defs below us (and any new phi nodes) in the graph to
// point to the correct new defs, to ensure we only have one variable, and no
// disconnected stores.
void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
  InsertedPHIs.clear();

  // See if we had a local def, and if not, go hunting.
  MemoryAccess *DefBefore = getPreviousDef(MD);
  bool DefBeforeSameBlock = false;
  if (DefBefore->getBlock() == MD->getBlock() &&
      !(isa<MemoryPhi>(DefBefore) &&
        std::find(InsertedPHIs.begin(), InsertedPHIs.end(), DefBefore) !=
            InsertedPHIs.end()))
    DefBeforeSameBlock = true;

  // There is a def before us, which means we can replace any store/phi uses
  // of that thing with us, since we are in the way of whatever was there
  // before.
  // We now define that def's memorydefs and memoryphis
  if (DefBeforeSameBlock) {
    DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
      // Leave the MemoryUses alone.
      // Also make sure we skip ourselves to avoid self references.
      User *Usr = U.getUser();
      return !isa<MemoryUse>(Usr) && Usr != MD;
      // Defs are automatically unoptimized when the user is set to MD below,
      // because the isOptimized() call will fail to find the same ID.
    });
  }

  // and that def is now our defining access.
  MD->setDefiningAccess(DefBefore);

  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());

  // Remember the index where we may insert new phis.
  unsigned NewPhiIndex = InsertedPHIs.size();
  if (!DefBeforeSameBlock) {
    // If there was a local def before us, we must have the same effect it
    // did. Because every may-def is the same, any phis/etc we would create, it
    // would also have created.  If there was no local def before us, we
    // performed a global update, and have to search all successors and make
    // sure we update the first def in each of them (following all paths until
    // we hit the first def along each path). This may also insert phi nodes.
    // TODO: There are other cases we can skip this work, such as when we have a
    // single successor, and only used a straight line of single pred blocks
    // backwards to find the def.  To make that work, we'd have to track whether
    // getDefRecursive only ever used the single predecessor case.  These types
    // of paths also only exist in between CFG simplifications.

    // If this is the first def in the block and this insert is in an arbitrary
    // place, compute IDF and place phis.
    SmallPtrSet<BasicBlock *, 2> DefiningBlocks;

    // If this is the last Def in the block, also compute IDF based on MD, since
    // this may a new Def added, and we may need additional Phis.
    auto Iter = MD->getDefsIterator();
    ++Iter;
    auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
    if (Iter == IterEnd)
      DefiningBlocks.insert(MD->getBlock());

    for (const auto &VH : InsertedPHIs)
      if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
        DefiningBlocks.insert(RealPHI->getBlock());
    ForwardIDFCalculator IDFs(*MSSA->DT);
    SmallVector<BasicBlock *, 32> IDFBlocks;
    IDFs.setDefiningBlocks(DefiningBlocks);
    IDFs.calculate(IDFBlocks);
    SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
    for (auto *BBIDF : IDFBlocks) {
      auto *MPhi = MSSA->getMemoryAccess(BBIDF);
      if (!MPhi) {
        MPhi = MSSA->createMemoryPhi(BBIDF);
        NewInsertedPHIs.push_back(MPhi);
      }
      // Add the phis created into the IDF blocks to NonOptPhis, so they are not
      // optimized out as trivial by the call to getPreviousDefFromEnd below.
      // Once they are complete, all these Phis are added to the FixupList, and
      // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
      // need fixing as well, and potentially be trivial before this insertion,
      // hence add all IDF Phis. See PR43044.
      NonOptPhis.insert(MPhi);
    }
    for (auto &MPhi : NewInsertedPHIs) {
      auto *BBIDF = MPhi->getBlock();
      for (auto *Pred : predecessors(BBIDF)) {
        DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
        MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
      }
    }

    // Re-take the index where we're adding the new phis, because the above call
    // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
    NewPhiIndex = InsertedPHIs.size();
    for (auto &MPhi : NewInsertedPHIs) {
      InsertedPHIs.push_back(&*MPhi);
      FixupList.push_back(&*MPhi);
    }

    FixupList.push_back(MD);
  }

  // Remember the index where we stopped inserting new phis above, since the
  // fixupDefs call in the loop below may insert more, that are already minimal.
  unsigned NewPhiIndexEnd = InsertedPHIs.size();

  while (!FixupList.empty()) {
    unsigned StartingPHISize = InsertedPHIs.size();
    fixupDefs(FixupList);
    FixupList.clear();
    // Put any new phis on the fixup list, and process them
    FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
  }

  // Optimize potentially non-minimal phis added in this method.
  unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
  if (NewPhiSize)
    tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));

  // Now that all fixups are done, rename all uses if we are asked.
  if (RenameUses) {
    SmallPtrSet<BasicBlock *, 16> Visited;
    BasicBlock *StartBlock = MD->getBlock();
    // We are guaranteed there is a def in the block, because we just got it
    // handed to us in this function.
    MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
    // Convert to incoming value if it's a memorydef. A phi *is* already an
    // incoming value.
    if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
      FirstDef = MD->getDefiningAccess();

    MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
    // We just inserted a phi into this block, so the incoming value will become
    // the phi anyway, so it does not matter what we pass.
    for (auto &MP : InsertedPHIs) {
      MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
      if (Phi)
        MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
    }
  }
}

void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
  SmallPtrSet<const BasicBlock *, 8> Seen;
  SmallVector<const BasicBlock *, 16> Worklist;
  for (auto &Var : Vars) {
    MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
    if (!NewDef)
      continue;
    // First, see if there is a local def after the operand.
    auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
    auto DefIter = NewDef->getDefsIterator();

    // The temporary Phi is being fixed, unmark it for not to optimize.
    if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
      NonOptPhis.erase(Phi);

    // If there is a local def after us, we only have to rename that.
    if (++DefIter != Defs->end()) {
      cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
      continue;
    }

    // Otherwise, we need to search down through the CFG.
    // For each of our successors, handle it directly if their is a phi, or
    // place on the fixup worklist.
    for (const auto *S : successors(NewDef->getBlock())) {
      if (auto *MP = MSSA->getMemoryAccess(S))
        setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
      else
        Worklist.push_back(S);
    }

    while (!Worklist.empty()) {
      const BasicBlock *FixupBlock = Worklist.back();
      Worklist.pop_back();

      // Get the first def in the block that isn't a phi node.
      if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
        auto *FirstDef = &*Defs->begin();
        // The loop above and below should have taken care of phi nodes
        assert(!isa<MemoryPhi>(FirstDef) &&
               "Should have already handled phi nodes!");
        // We are now this def's defining access, make sure we actually dominate
        // it
        assert(MSSA->dominates(NewDef, FirstDef) &&
               "Should have dominated the new access");

        // This may insert new phi nodes, because we are not guaranteed the
        // block we are processing has a single pred, and depending where the
        // store was inserted, it may require phi nodes below it.
        cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
        return;
      }
      // We didn't find a def, so we must continue.
      for (const auto *S : successors(FixupBlock)) {
        // If there is a phi node, handle it.
        // Otherwise, put the block on the worklist
        if (auto *MP = MSSA->getMemoryAccess(S))
          setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
        else {
          // If we cycle, we should have ended up at a phi node that we already
          // processed.  FIXME: Double check this
          if (!Seen.insert(S).second)
            continue;
          Worklist.push_back(S);
        }
      }
    }
  }
}

void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
    MPhi->unorderedDeleteIncomingBlock(From);
    tryRemoveTrivialPhi(MPhi);
  }
}

void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
                                                      const BasicBlock *To) {
  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
    bool Found = false;
    MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
      if (From != B)
        return false;
      if (Found)
        return true;
      Found = true;
      return false;
    });
    tryRemoveTrivialPhi(MPhi);
  }
}

static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
                                                  const ValueToValueMapTy &VMap,
                                                  PhiToDefMap &MPhiMap,
                                                  bool CloneWasSimplified,
                                                  MemorySSA *MSSA) {
  MemoryAccess *InsnDefining = MA;
  if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
    if (!MSSA->isLiveOnEntryDef(DefMUD)) {
      Instruction *DefMUDI = DefMUD->getMemoryInst();
      assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
      if (Instruction *NewDefMUDI =
              cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
        InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
        if (!CloneWasSimplified)
          assert(InsnDefining && "Defining instruction cannot be nullptr.");
        else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
          // The clone was simplified, it's no longer a MemoryDef, look up.
          auto DefIt = DefMUD->getDefsIterator();
          // Since simplified clones only occur in single block cloning, a
          // previous definition must exist, otherwise NewDefMUDI would not
          // have been found in VMap.
          assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
                 "Previous def must exist");
          InsnDefining = getNewDefiningAccessForClone(
              &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
        }
      }
    }
  } else {
    MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
    if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
      InsnDefining = NewDefPhi;
  }
  assert(InsnDefining && "Defining instruction cannot be nullptr.");
  return InsnDefining;
}

void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
                                        const ValueToValueMapTy &VMap,
                                        PhiToDefMap &MPhiMap,
                                        bool CloneWasSimplified) {
  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
  if (!Acc)
    return;
  for (const MemoryAccess &MA : *Acc) {
    if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
      Instruction *Insn = MUD->getMemoryInst();
      // Entry does not exist if the clone of the block did not clone all
      // instructions. This occurs in LoopRotate when cloning instructions
      // from the old header to the old preheader. The cloned instruction may
      // also be a simplified Value, not an Instruction (see LoopRotate).
      // Also in LoopRotate, even when it's an instruction, due to it being
      // simplified, it may be a Use rather than a Def, so we cannot use MUD as
      // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
      if (Instruction *NewInsn =
              dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
        MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
            NewInsn,
            getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
                                         MPhiMap, CloneWasSimplified, MSSA),
            /*Template=*/CloneWasSimplified ? nullptr : MUD,
            /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
        if (NewUseOrDef)
          MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
      }
    }
  }
}

void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
    BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
  auto *MPhi = MSSA->getMemoryAccess(Header);
  if (!MPhi)
    return;

  // Create phi node in the backedge block and populate it with the same
  // incoming values as MPhi. Skip incoming values coming from Preheader.
  auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
  bool HasUniqueIncomingValue = true;
  MemoryAccess *UniqueValue = nullptr;
  for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
    BasicBlock *IBB = MPhi->getIncomingBlock(I);
    MemoryAccess *IV = MPhi->getIncomingValue(I);
    if (IBB != Preheader) {
      NewMPhi->addIncoming(IV, IBB);
      if (HasUniqueIncomingValue) {
        if (!UniqueValue)
          UniqueValue = IV;
        else if (UniqueValue != IV)
          HasUniqueIncomingValue = false;
      }
    }
  }

  // Update incoming edges into MPhi. Remove all but the incoming edge from
  // Preheader. Add an edge from NewMPhi
  auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
  MPhi->setIncomingValue(0, AccFromPreheader);
  MPhi->setIncomingBlock(0, Preheader);
  for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
    MPhi->unorderedDeleteIncoming(I);
  MPhi->addIncoming(NewMPhi, BEBlock);

  // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
  // replaced with the unique value.
  tryRemoveTrivialPhi(NewMPhi);
}

void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
                                           ArrayRef<BasicBlock *> ExitBlocks,
                                           const ValueToValueMapTy &VMap,
                                           bool IgnoreIncomingWithNoClones) {
  PhiToDefMap MPhiMap;

  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
    assert(Phi && NewPhi && "Invalid Phi nodes.");
    BasicBlock *NewPhiBB = NewPhi->getBlock();
    SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
                                               pred_end(NewPhiBB));
    for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
      MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
      BasicBlock *IncBB = Phi->getIncomingBlock(It);

      if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
        IncBB = NewIncBB;
      else if (IgnoreIncomingWithNoClones)
        continue;

      // Now we have IncBB, and will need to add incoming from it to NewPhi.

      // If IncBB is not a predecessor of NewPhiBB, then do not add it.
      // NewPhiBB was cloned without that edge.
      if (!NewPhiBBPreds.count(IncBB))
        continue;

      // Determine incoming value and add it as incoming from IncBB.
      if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
        if (!MSSA->isLiveOnEntryDef(IncMUD)) {
          Instruction *IncI = IncMUD->getMemoryInst();
          assert(IncI && "Found MemoryUseOrDef with no Instruction.");
          if (Instruction *NewIncI =
                  cast_or_null<Instruction>(VMap.lookup(IncI))) {
            IncMUD = MSSA->getMemoryAccess(NewIncI);
            assert(IncMUD &&
                   "MemoryUseOrDef cannot be null, all preds processed.");
          }
        }
        NewPhi->addIncoming(IncMUD, IncBB);
      } else {
        MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
        if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
          NewPhi->addIncoming(NewDefPhi, IncBB);
        else
          NewPhi->addIncoming(IncPhi, IncBB);
      }
    }
  };

  auto ProcessBlock = [&](BasicBlock *BB) {
    BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
    if (!NewBlock)
      return;

    assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
           "Cloned block should have no accesses");

    // Add MemoryPhi.
    if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
      MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
      MPhiMap[MPhi] = NewPhi;
    }
    // Update Uses and Defs.
    cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
  };

  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
    ProcessBlock(BB);

  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
    if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
      if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
        FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
}

void MemorySSAUpdater::updateForClonedBlockIntoPred(
    BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
  // Since those defs/phis must have dominated BB, and also dominate P1.
  // Defs from BB being used in BB will be replaced with the cloned defs from
  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
  // incoming def into the Phi from P1.
  // Instructions cloned into the predecessor are in practice sometimes
  // simplified, so disable the use of the template, and create an access from
  // scratch.
  PhiToDefMap MPhiMap;
  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
    MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
  cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
}

template <typename Iter>
void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
    ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
    DominatorTree &DT) {
  SmallVector<CFGUpdate, 4> Updates;
  // Update/insert phis in all successors of exit blocks.
  for (auto *Exit : ExitBlocks)
    for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
      if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
        BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
        Updates.push_back({DT.Insert, NewExit, ExitSucc});
      }
  applyInsertUpdates(Updates, DT);
}

void MemorySSAUpdater::updateExitBlocksForClonedLoop(
    ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
    DominatorTree &DT) {
  const ValueToValueMapTy *const Arr[] = {&VMap};
  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
                                       std::end(Arr), DT);
}

void MemorySSAUpdater::updateExitBlocksForClonedLoop(
    ArrayRef<BasicBlock *> ExitBlocks,
    ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
    return I.get();
  };
  using MappedIteratorType =
      mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
                      decltype(GetPtr)>;
  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
}

void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
                                    DominatorTree &DT) {
  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
  SmallVector<CFGUpdate, 4> InsertUpdates;
  for (auto &Update : Updates) {
    if (Update.getKind() == DT.Insert)
      InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
    else
      RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
  }

  if (!RevDeleteUpdates.empty()) {
    // Update for inserted edges: use newDT and snapshot CFG as if deletes had
    // not occurred.
    // FIXME: This creates a new DT, so it's more expensive to do mix
    // delete/inserts vs just inserts. We can do an incremental update on the DT
    // to revert deletes, than re-delete the edges. Teaching DT to do this, is
    // part of a pending cleanup.
    DominatorTree NewDT(DT, RevDeleteUpdates);
    GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
    applyInsertUpdates(InsertUpdates, NewDT, &GD);
  } else {
    GraphDiff<BasicBlock *> GD;
    applyInsertUpdates(InsertUpdates, DT, &GD);
  }

  // Update for deleted edges
  for (auto &Update : RevDeleteUpdates)
    removeEdge(Update.getFrom(), Update.getTo());
}

void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
                                          DominatorTree &DT) {
  GraphDiff<BasicBlock *> GD;
  applyInsertUpdates(Updates, DT, &GD);
}

void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
                                          DominatorTree &DT,
                                          const GraphDiff<BasicBlock *> *GD) {
  // Get recursive last Def, assuming well formed MSSA and updated DT.
  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
    while (true) {
      MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
      // Return last Def or Phi in BB, if it exists.
      if (Defs)
        return &*(--Defs->end());

      // Check number of predecessors, we only care if there's more than one.
      unsigned Count = 0;
      BasicBlock *Pred = nullptr;
      for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
        Pred = Pair.second;
        Count++;
        if (Count == 2)
          break;
      }

      // If BB has multiple predecessors, get last definition from IDom.
      if (Count != 1) {
        // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
        // DT is invalidated. Return LoE as its last def. This will be added to
        // MemoryPhi node, and later deleted when the block is deleted.
        if (!DT.getNode(BB))
          return MSSA->getLiveOnEntryDef();
        if (auto *IDom = DT.getNode(BB)->getIDom())
          if (IDom->getBlock() != BB) {
            BB = IDom->getBlock();
            continue;
          }
        return MSSA->getLiveOnEntryDef();
      } else {
        // Single predecessor, BB cannot be dead. GetLastDef of Pred.
        assert(Count == 1 && Pred && "Single predecessor expected.");
        // BB can be unreachable though, return LoE if that is the case.
        if (!DT.getNode(BB))
          return MSSA->getLiveOnEntryDef();
        BB = Pred;
      }
    };
    llvm_unreachable("Unable to get last definition.");
  };

  // Get nearest IDom given a set of blocks.
  // TODO: this can be optimized by starting the search at the node with the
  // lowest level (highest in the tree).
  auto FindNearestCommonDominator =
      [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
    BasicBlock *PrevIDom = *BBSet.begin();
    for (auto *BB : BBSet)
      PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
    return PrevIDom;
  };

  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
  // include CurrIDom.
  auto GetNoLongerDomBlocks =
      [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
          SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
        if (PrevIDom == CurrIDom)
          return;
        BlocksPrevDom.push_back(PrevIDom);
        BasicBlock *NextIDom = PrevIDom;
        while (BasicBlock *UpIDom =
                   DT.getNode(NextIDom)->getIDom()->getBlock()) {
          if (UpIDom == CurrIDom)
            break;
          BlocksPrevDom.push_back(UpIDom);
          NextIDom = UpIDom;
        }
      };

  // Map a BB to its predecessors: added + previously existing. To get a
  // deterministic order, store predecessors as SetVectors. The order in each
  // will be defined by the order in Updates (fixed) and the order given by
  // children<> (also fixed). Since we further iterate over these ordered sets,
  // we lose the information of multiple edges possibly existing between two
  // blocks, so we'll keep and EdgeCount map for that.
  // An alternate implementation could keep unordered set for the predecessors,
  // traverse either Updates or children<> each time to get  the deterministic
  // order, and drop the usage of EdgeCount. This alternate approach would still
  // require querying the maps for each predecessor, and children<> call has
  // additional computation inside for creating the snapshot-graph predecessors.
  // As such, we favor using a little additional storage and less compute time.
  // This decision can be revisited if we find the alternative more favorable.

  struct PredInfo {
    SmallSetVector<BasicBlock *, 2> Added;
    SmallSetVector<BasicBlock *, 2> Prev;
  };
  SmallDenseMap<BasicBlock *, PredInfo> PredMap;

  for (auto &Edge : Updates) {
    BasicBlock *BB = Edge.getTo();
    auto &AddedBlockSet = PredMap[BB].Added;
    AddedBlockSet.insert(Edge.getFrom());
  }

  // Store all existing predecessor for each BB, at least one must exist.
  SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
  SmallPtrSet<BasicBlock *, 2> NewBlocks;
  for (auto &BBPredPair : PredMap) {
    auto *BB = BBPredPair.first;
    const auto &AddedBlockSet = BBPredPair.second.Added;
    auto &PrevBlockSet = BBPredPair.second.Prev;
    for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
      BasicBlock *Pi = Pair.second;
      if (!AddedBlockSet.count(Pi))
        PrevBlockSet.insert(Pi);
      EdgeCountMap[{Pi, BB}]++;
    }

    if (PrevBlockSet.empty()) {
      assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
      LLVM_DEBUG(
          dbgs()
          << "Adding a predecessor to a block with no predecessors. "
             "This must be an edge added to a new, likely cloned, block. "
             "Its memory accesses must be already correct, assuming completed "
             "via the updateExitBlocksForClonedLoop API. "
             "Assert a single such edge is added so no phi addition or "
             "additional processing is required.\n");
      assert(AddedBlockSet.size() == 1 &&
             "Can only handle adding one predecessor to a new block.");
      // Need to remove new blocks from PredMap. Remove below to not invalidate
      // iterator here.
      NewBlocks.insert(BB);
    }
  }
  // Nothing to process for new/cloned blocks.
  for (auto *BB : NewBlocks)
    PredMap.erase(BB);

  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
  SmallVector<WeakVH, 8> InsertedPhis;

  // First create MemoryPhis in all blocks that don't have one. Create in the
  // order found in Updates, not in PredMap, to get deterministic numbering.
  for (auto &Edge : Updates) {
    BasicBlock *BB = Edge.getTo();
    if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
      InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
  }

  // Now we'll fill in the MemoryPhis with the right incoming values.
  for (auto &BBPredPair : PredMap) {
    auto *BB = BBPredPair.first;
    const auto &PrevBlockSet = BBPredPair.second.Prev;
    const auto &AddedBlockSet = BBPredPair.second.Added;
    assert(!PrevBlockSet.empty() &&
           "At least one previous predecessor must exist.");

    // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
    // keeping this map before the loop. We can reuse already populated entries
    // if an edge is added from the same predecessor to two different blocks,
    // and this does happen in rotate. Note that the map needs to be updated
    // when deleting non-necessary phis below, if the phi is in the map by
    // replacing the value with DefP1.
    SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
    for (auto *AddedPred : AddedBlockSet) {
      auto *DefPn = GetLastDef(AddedPred);
      assert(DefPn != nullptr && "Unable to find last definition.");
      LastDefAddedPred[AddedPred] = DefPn;
    }

    MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
    // If Phi is not empty, add an incoming edge from each added pred. Must
    // still compute blocks with defs to replace for this block below.
    if (NewPhi->getNumOperands()) {
      for (auto *Pred : AddedBlockSet) {
        auto *LastDefForPred = LastDefAddedPred[Pred];
        for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
          NewPhi->addIncoming(LastDefForPred, Pred);
      }
    } else {
      // Pick any existing predecessor and get its definition. All other
      // existing predecessors should have the same one, since no phi existed.
      auto *P1 = *PrevBlockSet.begin();
      MemoryAccess *DefP1 = GetLastDef(P1);

      // Check DefP1 against all Defs in LastDefPredPair. If all the same,
      // nothing to add.
      bool InsertPhi = false;
      for (auto LastDefPredPair : LastDefAddedPred)
        if (DefP1 != LastDefPredPair.second) {
          InsertPhi = true;
          break;
        }
      if (!InsertPhi) {
        // Since NewPhi may be used in other newly added Phis, replace all uses
        // of NewPhi with the definition coming from all predecessors (DefP1),
        // before deleting it.
        NewPhi->replaceAllUsesWith(DefP1);
        removeMemoryAccess(NewPhi);
        continue;
      }

      // Update Phi with new values for new predecessors and old value for all
      // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
      // sets, the order of entries in NewPhi is deterministic.
      for (auto *Pred : AddedBlockSet) {
        auto *LastDefForPred = LastDefAddedPred[Pred];
        for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
          NewPhi->addIncoming(LastDefForPred, Pred);
      }
      for (auto *Pred : PrevBlockSet)
        for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
          NewPhi->addIncoming(DefP1, Pred);
    }

    // Get all blocks that used to dominate BB and no longer do after adding
    // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
    assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
    BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
    assert(PrevIDom && "Previous IDom should exists");
    BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
    assert(NewIDom && "BB should have a new valid idom");
    assert(DT.dominates(NewIDom, PrevIDom) &&
           "New idom should dominate old idom");
    GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
  }

  tryRemoveTrivialPhis(InsertedPhis);
  // Create the set of blocks that now have a definition. We'll use this to
  // compute IDF and add Phis there next.
  SmallVector<BasicBlock *, 8> BlocksToProcess;
  for (auto &VH : InsertedPhis)
    if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
      BlocksToProcess.push_back(MPhi->getBlock());

  // Compute IDF and add Phis in all IDF blocks that do not have one.
  SmallVector<BasicBlock *, 32> IDFBlocks;
  if (!BlocksToProcess.empty()) {
    ForwardIDFCalculator IDFs(DT, GD);
    SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
                                                 BlocksToProcess.end());
    IDFs.setDefiningBlocks(DefiningBlocks);
    IDFs.calculate(IDFBlocks);

    SmallSetVector<MemoryPhi *, 4> PhisToFill;
    // First create all needed Phis.
    for (auto *BBIDF : IDFBlocks)
      if (!MSSA->getMemoryAccess(BBIDF)) {
        auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
        InsertedPhis.push_back(IDFPhi);
        PhisToFill.insert(IDFPhi);
      }
    // Then update or insert their correct incoming values.
    for (auto *BBIDF : IDFBlocks) {
      auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
      assert(IDFPhi && "Phi must exist");
      if (!PhisToFill.count(IDFPhi)) {
        // Update existing Phi.
        // FIXME: some updates may be redundant, try to optimize and skip some.
        for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
          IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
      } else {
        for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
          BasicBlock *Pi = Pair.second;
          IDFPhi->addIncoming(GetLastDef(Pi), Pi);
        }
      }
    }
  }

  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
  // longer dominate, replace those with the closest dominating def.
  // This will also update optimized accesses, as they're also uses.
  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
    if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
      for (auto &DefToReplaceUses : *DefsList) {
        BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
        Value::use_iterator UI = DefToReplaceUses.use_begin(),
                            E = DefToReplaceUses.use_end();
        for (; UI != E;) {
          Use &U = *UI;
          ++UI;
          MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
          if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
            BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
            if (!DT.dominates(DominatingBlock, DominatedBlock))
              U.set(GetLastDef(DominatedBlock));
          } else {
            BasicBlock *DominatedBlock = Usr->getBlock();
            if (!DT.dominates(DominatingBlock, DominatedBlock)) {
              if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
                U.set(DomBlPhi);
              else {
                auto *IDom = DT.getNode(DominatedBlock)->getIDom();
                assert(IDom && "Block must have a valid IDom.");
                U.set(GetLastDef(IDom->getBlock()));
              }
              cast<MemoryUseOrDef>(Usr)->resetOptimized();
            }
          }
        }
      }
    }
  }
  tryRemoveTrivialPhis(InsertedPhis);
}

// Move What before Where in the MemorySSA IR.
template <class WhereType>
void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
                              WhereType Where) {
  // Mark MemoryPhi users of What not to be optimized.
  for (auto *U : What->users())
    if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
      NonOptPhis.insert(PhiUser);

  // Replace all our users with our defining access.
  What->replaceAllUsesWith(What->getDefiningAccess());

  // Let MemorySSA take care of moving it around in the lists.
  MSSA->moveTo(What, BB, Where);

  // Now reinsert it into the IR and do whatever fixups needed.
  if (auto *MD = dyn_cast<MemoryDef>(What))
    insertDef(MD, /*RenameUses=*/true);
  else
    insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);

  // Clear dangling pointers. We added all MemoryPhi users, but not all
  // of them are removed by fixupDefs().
  NonOptPhis.clear();
}

// Move What before Where in the MemorySSA IR.
void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
  moveTo(What, Where->getBlock(), Where->getIterator());
}

// Move What after Where in the MemorySSA IR.
void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
  moveTo(What, Where->getBlock(), ++Where->getIterator());
}

void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
                                   MemorySSA::InsertionPlace Where) {
  return moveTo(What, BB, Where);
}

// All accesses in To used to be in From. Move to end and update access lists.
void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
                                       Instruction *Start) {

  MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
  if (!Accs)
    return;

  assert(Start->getParent() == To && "Incorrect Start instruction");
  MemoryAccess *FirstInNew = nullptr;
  for (Instruction &I : make_range(Start->getIterator(), To->end()))
    if ((FirstInNew = MSSA->getMemoryAccess(&I)))
      break;
  if (FirstInNew) {
    auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
    do {
      auto NextIt = ++MUD->getIterator();
      MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
                                    ? nullptr
                                    : cast<MemoryUseOrDef>(&*NextIt);
      MSSA->moveTo(MUD, To, MemorySSA::End);
      // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
      // to retrieve it again.
      Accs = MSSA->getWritableBlockAccesses(From);
      MUD = NextMUD;
    } while (MUD);
  }

  // If all accesses were moved and only a trivial Phi remains, we try to remove
  // that Phi. This is needed when From is going to be deleted.
  auto *Defs = MSSA->getWritableBlockDefs(From);
  if (Defs && !Defs->empty())
    if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
      tryRemoveTrivialPhi(Phi);
}

void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
                                                BasicBlock *To,
                                                Instruction *Start) {
  assert(MSSA->getBlockAccesses(To) == nullptr &&
         "To block is expected to be free of MemoryAccesses.");
  moveAllAccesses(From, To, Start);
  for (BasicBlock *Succ : successors(To))
    if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
      MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
}

void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
                                               Instruction *Start) {
  assert(From->getUniquePredecessor() == To &&
         "From block is expected to have a single predecessor (To).");
  moveAllAccesses(From, To, Start);
  for (BasicBlock *Succ : successors(From))
    if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
      MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
}

/// If all arguments of a MemoryPHI are defined by the same incoming
/// argument, return that argument.
static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
  MemoryAccess *MA = nullptr;

  for (auto &Arg : MP->operands()) {
    if (!MA)
      MA = cast<MemoryAccess>(Arg);
    else if (MA != Arg)
      return nullptr;
  }
  return MA;
}

void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
    BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
    bool IdenticalEdgesWereMerged) {
  assert(!MSSA->getWritableBlockAccesses(New) &&
         "Access list should be null for a new block.");
  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
  if (!Phi)
    return;
  if (Old->hasNPredecessors(1)) {
    assert(pred_size(New) == Preds.size() &&
           "Should have moved all predecessors.");
    MSSA->moveTo(Phi, New, MemorySSA::Beginning);
  } else {
    assert(!Preds.empty() && "Must be moving at least one predecessor to the "
                             "new immediate predecessor.");
    MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
    SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
    // Currently only support the case of removing a single incoming edge when
    // identical edges were not merged.
    if (!IdenticalEdgesWereMerged)
      assert(PredsSet.size() == Preds.size() &&
             "If identical edges were not merged, we cannot have duplicate "
             "blocks in the predecessors");
    Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
      if (PredsSet.count(B)) {
        NewPhi->addIncoming(MA, B);
        if (!IdenticalEdgesWereMerged)
          PredsSet.erase(B);
        return true;
      }
      return false;
    });
    Phi->addIncoming(NewPhi, New);
    tryRemoveTrivialPhi(NewPhi);
  }
}

void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
  assert(!MSSA->isLiveOnEntryDef(MA) &&
         "Trying to remove the live on entry def");
  // We can only delete phi nodes if they have no uses, or we can replace all
  // uses with a single definition.
  MemoryAccess *NewDefTarget = nullptr;
  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
    // Note that it is sufficient to know that all edges of the phi node have
    // the same argument.  If they do, by the definition of dominance frontiers
    // (which we used to place this phi), that argument must dominate this phi,
    // and thus, must dominate the phi's uses, and so we will not hit the assert
    // below.
    NewDefTarget = onlySingleValue(MP);
    assert((NewDefTarget || MP->use_empty()) &&
           "We can't delete this memory phi");
  } else {
    NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
  }

  SmallSetVector<MemoryPhi *, 4> PhisToCheck;

  // Re-point the uses at our defining access
  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
    // Reset optimized on users of this store, and reset the uses.
    // A few notes:
    // 1. This is a slightly modified version of RAUW to avoid walking the
    // uses twice here.
    // 2. If we wanted to be complete, we would have to reset the optimized
    // flags on users of phi nodes if doing the below makes a phi node have all
    // the same arguments. Instead, we prefer users to removeMemoryAccess those
    // phi nodes, because doing it here would be N^3.
    if (MA->hasValueHandle())
      ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
    // Note: We assume MemorySSA is not used in metadata since it's not really
    // part of the IR.

    while (!MA->use_empty()) {
      Use &U = *MA->use_begin();
      if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
        MUD->resetOptimized();
      if (OptimizePhis)
        if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
          PhisToCheck.insert(MP);
      U.set(NewDefTarget);
    }
  }

  // The call below to erase will destroy MA, so we can't change the order we
  // are doing things here
  MSSA->removeFromLookups(MA);
  MSSA->removeFromLists(MA);

  // Optionally optimize Phi uses. This will recursively remove trivial phis.
  if (!PhisToCheck.empty()) {
    SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
                                           PhisToCheck.end()};
    PhisToCheck.clear();

    unsigned PhisSize = PhisToOptimize.size();
    while (PhisSize-- > 0)
      if (MemoryPhi *MP =
              cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
        tryRemoveTrivialPhi(MP);
  }
}

void MemorySSAUpdater::removeBlocks(
    const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
  // First delete all uses of BB in MemoryPhis.
  for (BasicBlock *BB : DeadBlocks) {
    Instruction *TI = BB->getTerminator();
    assert(TI && "Basic block expected to have a terminator instruction");
    for (BasicBlock *Succ : successors(TI))
      if (!DeadBlocks.count(Succ))
        if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
          MP->unorderedDeleteIncomingBlock(BB);
          tryRemoveTrivialPhi(MP);
        }
    // Drop all references of all accesses in BB
    if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
      for (MemoryAccess &MA : *Acc)
        MA.dropAllReferences();
  }

  // Next, delete all memory accesses in each block
  for (BasicBlock *BB : DeadBlocks) {
    MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
    if (!Acc)
      continue;
    for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
      MemoryAccess *MA = &*AB;
      ++AB;
      MSSA->removeFromLookups(MA);
      MSSA->removeFromLists(MA);
    }
  }
}

void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
  for (auto &VH : UpdatedPHIs)
    if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
      tryRemoveTrivialPhi(MPhi);
}

void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
  const BasicBlock *BB = I->getParent();
  // Remove memory accesses in BB for I and all following instructions.
  auto BBI = I->getIterator(), BBE = BB->end();
  // FIXME: If this becomes too expensive, iterate until the first instruction
  // with a memory access, then iterate over MemoryAccesses.
  while (BBI != BBE)
    removeMemoryAccess(&*(BBI++));
  // Update phis in BB's successors to remove BB.
  SmallVector<WeakVH, 16> UpdatedPHIs;
  for (const BasicBlock *Successor : successors(BB)) {
    removeDuplicatePhiEdgesBetween(BB, Successor);
    if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
      MPhi->unorderedDeleteIncomingBlock(BB);
      UpdatedPHIs.push_back(MPhi);
    }
  }
  // Optimize trivial phis.
  tryRemoveTrivialPhis(UpdatedPHIs);
}

void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
                                                         const BasicBlock *To) {
  const BasicBlock *BB = BI->getParent();
  SmallVector<WeakVH, 16> UpdatedPHIs;
  for (const BasicBlock *Succ : successors(BB)) {
    removeDuplicatePhiEdgesBetween(BB, Succ);
    if (Succ != To)
      if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
        MPhi->unorderedDeleteIncomingBlock(BB);
        UpdatedPHIs.push_back(MPhi);
      }
  }
  // Optimize trivial phis.
  tryRemoveTrivialPhis(UpdatedPHIs);
}

MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
    Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
    MemorySSA::InsertionPlace Point) {
  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
  return NewAccess;
}

MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
    Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
  assert(I->getParent() == InsertPt->getBlock() &&
         "New and old access must be in the same block");
  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
                              InsertPt->getIterator());
  return NewAccess;
}

MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
    Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
  assert(I->getParent() == InsertPt->getBlock() &&
         "New and old access must be in the same block");
  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
                              ++InsertPt->getIterator());
  return NewAccess;
}