1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -slp-vectorizer %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-ios5.0.0"
define i64 @mismatched_intrinsics(<4 x i32> %in1, <2 x i32> %in2) nounwind {
; CHECK-LABEL: @mismatched_intrinsics(
; CHECK-NEXT: [[VADDLVQ_S32_I:%.*]] = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> [[IN1:%.*]])
; CHECK-NEXT: [[VADDLV_S32_I:%.*]] = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> [[IN2:%.*]])
; CHECK-NEXT: [[TST:%.*]] = icmp sgt i64 [[VADDLVQ_S32_I]], [[VADDLV_S32_I]]
; CHECK-NEXT: [[EQUAL:%.*]] = sext i1 [[TST]] to i64
; CHECK-NEXT: ret i64 [[EQUAL]]
;
%vaddlvq_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %in1) #2
%vaddlv_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in2) #2
%tst = icmp sgt i64 %vaddlvq_s32.i, %vaddlv_s32.i
%equal = sext i1 %tst to i64
ret i64 %equal
}
declare i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %in1)
declare i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in1)
|