1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
define i32 @foo(i32* nocapture %A, i32 %n, i32 %m) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[N:%.*]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[N]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[N]], i32 2
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[N]], i32 3
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
; CHECK-NEXT: [[TMP5:%.*]] = shl <4 x i32> [[TMP3]], <i32 5, i32 9, i32 3, i32 10>
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], <i32 9, i32 9, i32 9, i32 9>
; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP7]], i32 0
; CHECK-NEXT: [[EXTERNALUSE1:%.*]] = add nsw i32 [[TMP9]], [[M:%.*]]
; CHECK-NEXT: [[EXTERNALUSE2:%.*]] = mul nsw i32 [[TMP9]], [[M]]
; CHECK-NEXT: [[ADD10:%.*]] = add nsw i32 [[EXTERNALUSE1]], [[EXTERNALUSE2]]
; CHECK-NEXT: ret i32 [[ADD10]]
;
entry:
%mul = mul nsw i32 %n, 5
%add = add nsw i32 %mul, 9
store i32 %add, i32* %A, align 4
%mul1 = mul nsw i32 %n, 9
%add2 = add nsw i32 %mul1, 9
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %add2, i32* %arrayidx3, align 4
%mul4 = shl i32 %n, 3
%add5 = add nsw i32 %mul4, 9
%arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %add5, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %n, 10
%add8 = add nsw i32 %mul7, 9
%arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %add8, i32* %arrayidx9, align 4
%externaluse1 = add nsw i32 %add, %m
%externaluse2 = mul nsw i32 %add, %m ; we should add the extract cost only once and the store will be vectorized
%add10 = add nsw i32 %externaluse1, %externaluse2
ret i32 %add10
}
|