1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
| //===---------------------- ExecuteStage.cpp --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the execution stage of an instruction pipeline.
///
/// The ExecuteStage is responsible for managing the hardware scheduler
/// and issuing notifications that an instruction has been executed.
///
//===----------------------------------------------------------------------===//
#include "llvm/MCA/Stages/ExecuteStage.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "llvm-mca"
namespace llvm {
namespace mca {
HWStallEvent::GenericEventType toHWStallEventType(Scheduler::Status Status) {
switch (Status) {
case Scheduler::SC_LOAD_QUEUE_FULL:
return HWStallEvent::LoadQueueFull;
case Scheduler::SC_STORE_QUEUE_FULL:
return HWStallEvent::StoreQueueFull;
case Scheduler::SC_BUFFERS_FULL:
return HWStallEvent::SchedulerQueueFull;
case Scheduler::SC_DISPATCH_GROUP_STALL:
return HWStallEvent::DispatchGroupStall;
case Scheduler::SC_AVAILABLE:
return HWStallEvent::Invalid;
}
llvm_unreachable("Don't know how to process this StallKind!");
}
bool ExecuteStage::isAvailable(const InstRef &IR) const {
if (Scheduler::Status S = HWS.isAvailable(IR)) {
HWStallEvent::GenericEventType ET = toHWStallEventType(S);
notifyEvent<HWStallEvent>(HWStallEvent(ET, IR));
return false;
}
return true;
}
Error ExecuteStage::issueInstruction(InstRef &IR) {
SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> Used;
SmallVector<InstRef, 4> Pending;
SmallVector<InstRef, 4> Ready;
HWS.issueInstruction(IR, Used, Pending, Ready);
Instruction &IS = *IR.getInstruction();
NumIssuedOpcodes += IS.getNumMicroOps();
notifyReservedOrReleasedBuffers(IR, /* Reserved */ false);
notifyInstructionIssued(IR, Used);
if (IS.isExecuted()) {
notifyInstructionExecuted(IR);
// FIXME: add a buffer of executed instructions.
if (Error S = moveToTheNextStage(IR))
return S;
}
for (const InstRef &I : Pending)
notifyInstructionPending(I);
for (const InstRef &I : Ready)
notifyInstructionReady(I);
return ErrorSuccess();
}
Error ExecuteStage::issueReadyInstructions() {
InstRef IR = HWS.select();
while (IR) {
if (Error Err = issueInstruction(IR))
return Err;
// Select the next instruction to issue.
IR = HWS.select();
}
return ErrorSuccess();
}
Error ExecuteStage::cycleStart() {
SmallVector<ResourceRef, 8> Freed;
SmallVector<InstRef, 4> Executed;
SmallVector<InstRef, 4> Pending;
SmallVector<InstRef, 4> Ready;
HWS.cycleEvent(Freed, Executed, Pending, Ready);
NumDispatchedOpcodes = 0;
NumIssuedOpcodes = 0;
for (const ResourceRef &RR : Freed)
notifyResourceAvailable(RR);
for (InstRef &IR : Executed) {
notifyInstructionExecuted(IR);
// FIXME: add a buffer of executed instructions.
if (Error S = moveToTheNextStage(IR))
return S;
}
for (const InstRef &IR : Pending)
notifyInstructionPending(IR);
for (const InstRef &IR : Ready)
notifyInstructionReady(IR);
return issueReadyInstructions();
}
Error ExecuteStage::cycleEnd() {
if (!EnablePressureEvents)
return ErrorSuccess();
// Always conservatively report any backpressure events if the dispatch logic
// was stalled due to unavailable scheduler resources.
if (!HWS.hadTokenStall() && NumDispatchedOpcodes <= NumIssuedOpcodes)
return ErrorSuccess();
SmallVector<InstRef, 8> Insts;
uint64_t Mask = HWS.analyzeResourcePressure(Insts);
if (Mask) {
LLVM_DEBUG(dbgs() << "[E] Backpressure increased because of unavailable "
"pipeline resources: "
<< format_hex(Mask, 16) << '\n');
HWPressureEvent Ev(HWPressureEvent::RESOURCES, Insts, Mask);
notifyEvent(Ev);
}
SmallVector<InstRef, 8> RegDeps;
SmallVector<InstRef, 8> MemDeps;
HWS.analyzeDataDependencies(RegDeps, MemDeps);
if (RegDeps.size()) {
LLVM_DEBUG(
dbgs() << "[E] Backpressure increased by register dependencies\n");
HWPressureEvent Ev(HWPressureEvent::REGISTER_DEPS, RegDeps);
notifyEvent(Ev);
}
if (MemDeps.size()) {
LLVM_DEBUG(dbgs() << "[E] Backpressure increased by memory dependencies\n");
HWPressureEvent Ev(HWPressureEvent::MEMORY_DEPS, MemDeps);
notifyEvent(Ev);
}
return ErrorSuccess();
}
#ifndef NDEBUG
static void verifyInstructionEliminated(const InstRef &IR) {
const Instruction &Inst = *IR.getInstruction();
assert(Inst.isEliminated() && "Instruction was not eliminated!");
assert(Inst.isReady() && "Instruction in an inconsistent state!");
// Ensure that instructions eliminated at register renaming stage are in a
// consistent state.
const InstrDesc &Desc = Inst.getDesc();
assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
}
#endif
Error ExecuteStage::handleInstructionEliminated(InstRef &IR) {
#ifndef NDEBUG
verifyInstructionEliminated(IR);
#endif
notifyInstructionPending(IR);
notifyInstructionReady(IR);
notifyInstructionIssued(IR, {});
IR.getInstruction()->forceExecuted();
notifyInstructionExecuted(IR);
return moveToTheNextStage(IR);
}
// Schedule the instruction for execution on the hardware.
Error ExecuteStage::execute(InstRef &IR) {
assert(isAvailable(IR) && "Scheduler is not available!");
#ifndef NDEBUG
// Ensure that the HWS has not stored this instruction in its queues.
HWS.sanityCheck(IR);
#endif
if (IR.getInstruction()->isEliminated())
return handleInstructionEliminated(IR);
// Reserve a slot in each buffered resource. Also, mark units with
// BufferSize=0 as reserved. Resources with a buffer size of zero will only
// be released after MCIS is issued, and all the ResourceCycles for those
// units have been consumed.
bool IsReadyInstruction = HWS.dispatch(IR);
const Instruction &Inst = *IR.getInstruction();
unsigned NumMicroOps = Inst.getNumMicroOps();
NumDispatchedOpcodes += NumMicroOps;
notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
if (!IsReadyInstruction) {
if (Inst.isPending())
notifyInstructionPending(IR);
return ErrorSuccess();
}
notifyInstructionPending(IR);
// If we did not return early, then the scheduler is ready for execution.
notifyInstructionReady(IR);
// If we cannot issue immediately, the HWS will add IR to its ready queue for
// execution later, so we must return early here.
if (!HWS.mustIssueImmediately(IR))
return ErrorSuccess();
// Issue IR to the underlying pipelines.
return issueInstruction(IR);
}
void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) const {
LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n');
notifyEvent<HWInstructionEvent>(
HWInstructionEvent(HWInstructionEvent::Executed, IR));
}
void ExecuteStage::notifyInstructionPending(const InstRef &IR) const {
LLVM_DEBUG(dbgs() << "[E] Instruction Pending: #" << IR << '\n');
notifyEvent<HWInstructionEvent>(
HWInstructionEvent(HWInstructionEvent::Pending, IR));
}
void ExecuteStage::notifyInstructionReady(const InstRef &IR) const {
LLVM_DEBUG(dbgs() << "[E] Instruction Ready: #" << IR << '\n');
notifyEvent<HWInstructionEvent>(
HWInstructionEvent(HWInstructionEvent::Ready, IR));
}
void ExecuteStage::notifyResourceAvailable(const ResourceRef &RR) const {
LLVM_DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.'
<< RR.second << "]\n");
for (HWEventListener *Listener : getListeners())
Listener->onResourceAvailable(RR);
}
void ExecuteStage::notifyInstructionIssued(
const InstRef &IR,
MutableArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const {
LLVM_DEBUG({
dbgs() << "[E] Instruction Issued: #" << IR << '\n';
for (const std::pair<ResourceRef, ResourceCycles> &Resource : Used) {
assert(Resource.second.getDenominator() == 1 && "Invalid cycles!");
dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
<< Resource.first.second << "], ";
dbgs() << "cycles: " << Resource.second.getNumerator() << '\n';
}
});
// Replace resource masks with valid resource processor IDs.
for (std::pair<ResourceRef, ResourceCycles> &Use : Used)
Use.first.first = HWS.getResourceID(Use.first.first);
notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(IR, Used));
}
void ExecuteStage::notifyReservedOrReleasedBuffers(const InstRef &IR,
bool Reserved) const {
uint64_t UsedBuffers = IR.getInstruction()->getDesc().UsedBuffers;
if (!UsedBuffers)
return;
SmallVector<unsigned, 4> BufferIDs(countPopulation(UsedBuffers), 0);
for (unsigned I = 0, E = BufferIDs.size(); I < E; ++I) {
uint64_t CurrentBufferMask = UsedBuffers & (-UsedBuffers);
BufferIDs[I] = HWS.getResourceID(CurrentBufferMask);
UsedBuffers ^= CurrentBufferMask;
}
if (Reserved) {
for (HWEventListener *Listener : getListeners())
Listener->onReservedBuffers(IR, BufferIDs);
return;
}
for (HWEventListener *Listener : getListeners())
Listener->onReleasedBuffers(IR, BufferIDs);
}
} // namespace mca
} // namespace llvm
|