From 198259becbcf14fb8dd9abdf246c5e49a253a91e Mon Sep 17 00:00:00 2001 From: David Green Date: Tue, 31 Aug 2021 14:24:08 +0100 Subject: [PATCH] [ARM] Test for VMINNM/VMAXNM in tail predicated loops. --- .../CodeGen/Thumb2/mve-vmaxnma-tailpred.ll | 257 ++++++++++++++++++ 1 file changed, 257 insertions(+) create mode 100644 llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll diff --git a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll new file mode 100644 index 000000000000..d0de94e7e074 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll @@ -0,0 +1,257 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define float @minf32(float* noalias nocapture readonly %s1, float* noalias nocapture readonly %s2, float* noalias nocapture %d, i32 %n) { +; CHECK-LABEL: minf32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: cmp r3, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: poplt {r7, pc} +; CHECK-NEXT: .LBB0_1: @ %vector.ph +; CHECK-NEXT: add.w r12, r3, #3 +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: bic r12, r12, #3 +; CHECK-NEXT: sub.w r12, r12, #4 +; CHECK-NEXT: add.w r12, lr, r12, lsr #2 +; CHECK-NEXT: dls lr, r12 +; CHECK-NEXT: .LBB0_2: @ %vector.body +; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vctp.32 r3 +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrwt.u32 q0, [r0], #16 +; CHECK-NEXT: vldrwt.u32 q1, [r1], #16 +; CHECK-NEXT: subs r3, #4 +; CHECK-NEXT: vabs.f32 q0, q0 +; CHECK-NEXT: vminnm.f32 q0, q0, q1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q0, [r2], #16 +; CHECK-NEXT: le lr, .LBB0_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: pop {r7, pc} +entry: + %cmp8 = icmp sgt i32 %n, 0 + br i1 %cmp8, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %n, 3 + %n.vec = and i32 %n.rnd.up, -4 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) + %0 = getelementptr inbounds float, float* %s1, i32 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) + %2 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load) + %3 = getelementptr inbounds float, float* %s2, i32 %index + %4 = bitcast float* %3 to <4 x float>* + %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) + %5 = call fast <4 x float> @llvm.minnum.v4f32(<4 x float> %2, <4 x float> %wide.masked.load10) + %6 = getelementptr inbounds float, float* %d, i32 %index + %7 = bitcast float* %6 to <4 x float>* + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %active.lane.mask) + %index.next = add i32 %index, 4 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret float undef +} + +define float @maxaf32(float* noalias nocapture readonly %s1, float* noalias nocapture readonly %s2, float* noalias nocapture %d, i32 %n) { +; CHECK-LABEL: maxaf32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: cmp r3, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: poplt {r7, pc} +; CHECK-NEXT: .LBB1_1: @ %vector.ph +; CHECK-NEXT: add.w r12, r3, #3 +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: bic r12, r12, #3 +; CHECK-NEXT: sub.w r12, r12, #4 +; CHECK-NEXT: add.w r12, lr, r12, lsr #2 +; CHECK-NEXT: dls lr, r12 +; CHECK-NEXT: .LBB1_2: @ %vector.body +; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vctp.32 r3 +; CHECK-NEXT: subs r3, #4 +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrwt.u32 q0, [r1], #16 +; CHECK-NEXT: vldrwt.u32 q1, [r0], #16 +; CHECK-NEXT: vmaxnma.f32 q1, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q1, [r2], #16 +; CHECK-NEXT: le lr, .LBB1_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: pop {r7, pc} +entry: + %cmp8 = icmp sgt i32 %n, 0 + br i1 %cmp8, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %n, 3 + %n.vec = and i32 %n.rnd.up, -4 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) + %0 = getelementptr inbounds float, float* %s1, i32 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) + %2 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load) + %3 = getelementptr inbounds float, float* %s2, i32 %index + %4 = bitcast float* %3 to <4 x float>* + %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison) + %5 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load10) + %6 = call fast <4 x float> @llvm.maxnum.v4f32(<4 x float> %2, <4 x float> %5) + %7 = getelementptr inbounds float, float* %d, i32 %index + %8 = bitcast float* %7 to <4 x float>* + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %active.lane.mask) + %index.next = add i32 %index, 4 + %9 = icmp eq i32 %index.next, %n.vec + br i1 %9, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret float undef +} + + +define half @maxf16(half* noalias nocapture readonly %s1, half* noalias nocapture readonly %s2, half* noalias nocapture %d, i32 %n) { +; CHECK-LABEL: maxf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: cmp r3, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: poplt {r7, pc} +; CHECK-NEXT: .LBB2_1: @ %vector.ph +; CHECK-NEXT: add.w r12, r3, #7 +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: bic r12, r12, #7 +; CHECK-NEXT: sub.w r12, r12, #8 +; CHECK-NEXT: add.w r12, lr, r12, lsr #3 +; CHECK-NEXT: dls lr, r12 +; CHECK-NEXT: .LBB2_2: @ %vector.body +; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vctp.16 r3 +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrht.u16 q0, [r0], #16 +; CHECK-NEXT: subs r3, #8 +; CHECK-NEXT: vabs.f16 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrht.u16 q1, [r1], #16 +; CHECK-NEXT: vmaxnm.f16 q0, q0, q1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrht.16 q0, [r2], #16 +; CHECK-NEXT: le lr, .LBB2_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: pop {r7, pc} +entry: + %cmp10 = icmp sgt i32 %n, 0 + br i1 %cmp10, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %n, 7 + %n.vec = and i32 %n.rnd.up, -8 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n) + %0 = getelementptr inbounds half, half* %s1, i32 %index + %1 = bitcast half* %0 to <8 x half>* + %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) + %2 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load) + %3 = getelementptr inbounds half, half* %s2, i32 %index + %4 = bitcast half* %3 to <8 x half>* + %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) + %5 = call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %2, <8 x half> %wide.masked.load12) + %6 = getelementptr inbounds half, half* %d, i32 %index + %7 = bitcast half* %6 to <8 x half>* + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %5, <8 x half>* %7, i32 2, <8 x i1> %active.lane.mask) + %index.next = add i32 %index, 8 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret half undef +} + +define half @minaf16(half* noalias nocapture readonly %s1, half* noalias nocapture readonly %s2, half* noalias nocapture %d, i32 %n) { +; CHECK-LABEL: minaf16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: cmp r3, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: poplt {r7, pc} +; CHECK-NEXT: .LBB3_1: @ %vector.ph +; CHECK-NEXT: add.w r12, r3, #7 +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: bic r12, r12, #7 +; CHECK-NEXT: sub.w r12, r12, #8 +; CHECK-NEXT: add.w r12, lr, r12, lsr #3 +; CHECK-NEXT: dls lr, r12 +; CHECK-NEXT: .LBB3_2: @ %vector.body +; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vctp.16 r3 +; CHECK-NEXT: subs r3, #8 +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrht.u16 q0, [r1], #16 +; CHECK-NEXT: vldrht.u16 q1, [r0], #16 +; CHECK-NEXT: vminnma.f16 q1, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrht.16 q1, [r2], #16 +; CHECK-NEXT: le lr, .LBB3_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: pop {r7, pc} +entry: + %cmp10 = icmp sgt i32 %n, 0 + br i1 %cmp10, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %n, 7 + %n.vec = and i32 %n.rnd.up, -8 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n) + %0 = getelementptr inbounds half, half* %s1, i32 %index + %1 = bitcast half* %0 to <8 x half>* + %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) + %2 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load) + %3 = getelementptr inbounds half, half* %s2, i32 %index + %4 = bitcast half* %3 to <8 x half>* + %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison) + %5 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load12) + %6 = call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %2, <8 x half> %5) + %7 = getelementptr inbounds half, half* %d, i32 %index + %8 = bitcast half* %7 to <8 x half>* + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %6, <8 x half>* %8, i32 2, <8 x i1> %active.lane.mask) + %index.next = add i32 %index, 8 + %9 = icmp eq i32 %index.next, %n.vec + br i1 %9, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret half undef +} + +declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) +declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>) +declare <4 x float> @llvm.fabs.v4f32(<4 x float>) +declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) +declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) +declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>) +declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) +declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>) +declare <8 x half> @llvm.fabs.v8f16(<8 x half>) +declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) +declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) +declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)