1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; Check indexed and unindexed, sext, zext and anyext loads
define i64 @lb(i8 *%a) nounwind {
; RV64I-LABEL: lb:
; RV64I: # %bb.0:
; RV64I-NEXT: lb a1, 1(a0)
; RV64I-NEXT: lb a0, 0(a0)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = getelementptr i8, i8* %a, i32 1
%2 = load i8, i8* %1
%3 = sext i8 %2 to i64
; the unused load will produce an anyext for selection
%4 = load volatile i8, i8* %a
ret i64 %3
}
define i64 @lh(i16 *%a) nounwind {
; RV64I-LABEL: lh:
; RV64I: # %bb.0:
; RV64I-NEXT: lh a1, 4(a0)
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = getelementptr i16, i16* %a, i32 2
%2 = load i16, i16* %1
%3 = sext i16 %2 to i64
; the unused load will produce an anyext for selection
%4 = load volatile i16, i16* %a
ret i64 %3
}
define i64 @lw(i32 *%a) nounwind {
; RV64I-LABEL: lw:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a1, 12(a0)
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = getelementptr i32, i32* %a, i32 3
%2 = load i32, i32* %1
%3 = sext i32 %2 to i64
; the unused load will produce an anyext for selection
%4 = load volatile i32, i32* %a
ret i64 %3
}
define i64 @lbu(i8 *%a) nounwind {
; RV64I-LABEL: lbu:
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a1, 4(a0)
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = getelementptr i8, i8* %a, i32 4
%2 = load i8, i8* %1
%3 = zext i8 %2 to i64
%4 = load volatile i8, i8* %a
%5 = zext i8 %4 to i64
%6 = add i64 %3, %5
ret i64 %6
}
define i64 @lhu(i16 *%a) nounwind {
; RV64I-LABEL: lhu:
; RV64I: # %bb.0:
; RV64I-NEXT: lhu a1, 10(a0)
; RV64I-NEXT: lhu a0, 0(a0)
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = getelementptr i16, i16* %a, i32 5
%2 = load i16, i16* %1
%3 = zext i16 %2 to i64
%4 = load volatile i16, i16* %a
%5 = zext i16 %4 to i64
%6 = add i64 %3, %5
ret i64 %6
}
define i64 @lwu(i32 *%a) nounwind {
; RV64I-LABEL: lwu:
; RV64I: # %bb.0:
; RV64I-NEXT: lwu a1, 24(a0)
; RV64I-NEXT: lwu a0, 0(a0)
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = getelementptr i32, i32* %a, i32 6
%2 = load i32, i32* %1
%3 = zext i32 %2 to i64
%4 = load volatile i32, i32* %a
%5 = zext i32 %4 to i64
%6 = add i64 %3, %5
ret i64 %6
}
; Check indexed and unindexed stores
define void @sb(i8 *%a, i8 %b) nounwind {
; RV64I-LABEL: sb:
; RV64I: # %bb.0:
; RV64I-NEXT: sb a1, 0(a0)
; RV64I-NEXT: sb a1, 7(a0)
; RV64I-NEXT: ret
store i8 %b, i8* %a
%1 = getelementptr i8, i8* %a, i32 7
store i8 %b, i8* %1
ret void
}
define void @sh(i16 *%a, i16 %b) nounwind {
; RV64I-LABEL: sh:
; RV64I: # %bb.0:
; RV64I-NEXT: sh a1, 0(a0)
; RV64I-NEXT: sh a1, 16(a0)
; RV64I-NEXT: ret
store i16 %b, i16* %a
%1 = getelementptr i16, i16* %a, i32 8
store i16 %b, i16* %1
ret void
}
define void @sw(i32 *%a, i32 %b) nounwind {
; RV64I-LABEL: sw:
; RV64I: # %bb.0:
; RV64I-NEXT: sw a1, 0(a0)
; RV64I-NEXT: sw a1, 36(a0)
; RV64I-NEXT: ret
store i32 %b, i32* %a
%1 = getelementptr i32, i32* %a, i32 9
store i32 %b, i32* %1
ret void
}
; 64-bit loads and stores
define i64 @ld(i64 *%a) nounwind {
; RV64I-LABEL: ld:
; RV64I: # %bb.0:
; RV64I-NEXT: ld a1, 80(a0)
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = getelementptr i64, i64* %a, i32 10
%2 = load i64, i64* %1
%3 = load volatile i64, i64* %a
ret i64 %2
}
define void @sd(i64 *%a, i64 %b) nounwind {
; RV64I-LABEL: sd:
; RV64I: # %bb.0:
; RV64I-NEXT: sd a1, 0(a0)
; RV64I-NEXT: sd a1, 88(a0)
; RV64I-NEXT: ret
store i64 %b, i64* %a
%1 = getelementptr i64, i64* %a, i32 11
store i64 %b, i64* %1
ret void
}
; Check load and store to an i1 location
define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
; RV64I-LABEL: load_sext_zext_anyext_i1:
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a1, 1(a0)
; RV64I-NEXT: lbu a2, 2(a0)
; RV64I-NEXT: lb a0, 0(a0)
; RV64I-NEXT: sub a0, a2, a1
; RV64I-NEXT: ret
; sextload i1
%1 = getelementptr i1, i1* %a, i32 1
%2 = load i1, i1* %1
%3 = sext i1 %2 to i64
; zextload i1
%4 = getelementptr i1, i1* %a, i32 2
%5 = load i1, i1* %4
%6 = zext i1 %5 to i64
%7 = add i64 %3, %6
; extload i1 (anyext). Produced as the load is unused.
%8 = load volatile i1, i1* %a
ret i64 %7
}
define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
; RV64I-LABEL: load_sext_zext_anyext_i1_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a1, 1(a0)
; RV64I-NEXT: lbu a2, 2(a0)
; RV64I-NEXT: lb a0, 0(a0)
; RV64I-NEXT: sub a0, a2, a1
; RV64I-NEXT: ret
; sextload i1
%1 = getelementptr i1, i1* %a, i32 1
%2 = load i1, i1* %1
%3 = sext i1 %2 to i16
; zextload i1
%4 = getelementptr i1, i1* %a, i32 2
%5 = load i1, i1* %4
%6 = zext i1 %5 to i16
%7 = add i16 %3, %6
; extload i1 (anyext). Produced as the load is unused.
%8 = load volatile i1, i1* %a
ret i16 %7
}
; Check load and store to a global
@G = global i64 0
define i64 @ld_sd_global(i64 %a) nounwind {
; RV64I-LABEL: ld_sd_global:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, %hi(G)
; RV64I-NEXT: ld a1, %lo(G)(a2)
; RV64I-NEXT: sd a0, %lo(G)(a2)
; RV64I-NEXT: addi a2, a2, %lo(G)
; RV64I-NEXT: ld a3, 72(a2)
; RV64I-NEXT: sd a0, 72(a2)
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
%1 = load volatile i64, i64* @G
store i64 %a, i64* @G
%2 = getelementptr i64, i64* @G, i64 9
%3 = load volatile i64, i64* %2
store i64 %a, i64* %2
ret i64 %1
}
|