| OLD | NEW |
| 1 ; This tests parsing NaCl intrinsics not related to atomic operations. | 1 ; This tests parsing NaCl intrinsics not related to atomic operations. |
| 2 | 2 |
| 3 ; RUN: %p2i -i %s --insts | FileCheck %s | 3 ; RUN: %p2i -i %s --insts --args -allow-externally-defined-symbols \ |
| 4 ; RUN: | FileCheck %s |
| 4 ; RUN: %if --need=allow_disable_ir_gen --command \ | 5 ; RUN: %if --need=allow_disable_ir_gen --command \ |
| 5 ; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \ | 6 ; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \ |
| 7 ; RUN: -allow-externally-defined-symbols \ |
| 6 ; RUN: | %if --need=allow_disable_ir_gen --command \ | 8 ; RUN: | %if --need=allow_disable_ir_gen --command \ |
| 7 ; RUN: FileCheck --check-prefix=NOIR %s | 9 ; RUN: FileCheck --check-prefix=NOIR %s |
| 8 | 10 |
| 9 declare i8* @llvm.nacl.read.tp() | 11 declare i8* @llvm.nacl.read.tp() |
| 10 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) | 12 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) |
| 11 declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) | 13 declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) |
| 12 declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) | 14 declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) |
| 13 declare void @llvm.nacl.longjmp(i8*, i32) | 15 declare void @llvm.nacl.longjmp(i8*, i32) |
| 14 declare i32 @llvm.nacl.setjmp(i8*) | 16 declare i32 @llvm.nacl.setjmp(i8*) |
| 15 declare float @llvm.sqrt.f32(float) | 17 declare float @llvm.sqrt.f32(float) |
| 16 declare double @llvm.sqrt.f64(double) | 18 declare double @llvm.sqrt.f64(double) |
| 17 declare float @llvm.fabs.f32(float) | 19 declare float @llvm.fabs.f32(float) |
| 18 declare double @llvm.fabs.f64(double) | 20 declare double @llvm.fabs.f64(double) |
| 19 declare <4 x float> @llvm.fabs.v4f32(<4 x float>) | 21 declare <4 x float> @llvm.fabs.v4f32(<4 x float>) |
| 20 declare void @llvm.trap() | 22 declare void @llvm.trap() |
| 21 declare i16 @llvm.bswap.i16(i16) | 23 declare i16 @llvm.bswap.i16(i16) |
| 22 declare i32 @llvm.bswap.i32(i32) | 24 declare i32 @llvm.bswap.i32(i32) |
| 23 declare i64 @llvm.bswap.i64(i64) | 25 declare i64 @llvm.bswap.i64(i64) |
| 24 declare i32 @llvm.ctlz.i32(i32, i1) | 26 declare i32 @llvm.ctlz.i32(i32, i1) |
| 25 declare i64 @llvm.ctlz.i64(i64, i1) | 27 declare i64 @llvm.ctlz.i64(i64, i1) |
| 26 declare i32 @llvm.cttz.i32(i32, i1) | 28 declare i32 @llvm.cttz.i32(i32, i1) |
| 27 declare i64 @llvm.cttz.i64(i64, i1) | 29 declare i64 @llvm.cttz.i64(i64, i1) |
| 28 declare i32 @llvm.ctpop.i32(i32) | 30 declare i32 @llvm.ctpop.i32(i32) |
| 29 declare i64 @llvm.ctpop.i64(i64) | 31 declare i64 @llvm.ctpop.i64(i64) |
| 30 declare i8* @llvm.stacksave() | 32 declare i8* @llvm.stacksave() |
| 31 declare void @llvm.stackrestore(i8*) | 33 declare void @llvm.stackrestore(i8*) |
| 32 | 34 |
| 33 define i32 @test_nacl_read_tp() { | 35 define internal i32 @test_nacl_read_tp() { |
| 34 entry: | 36 entry: |
| 35 %ptr = call i8* @llvm.nacl.read.tp() | 37 %ptr = call i8* @llvm.nacl.read.tp() |
| 36 %__1 = ptrtoint i8* %ptr to i32 | 38 %__1 = ptrtoint i8* %ptr to i32 |
| 37 ret i32 %__1 | 39 ret i32 %__1 |
| 38 } | 40 } |
| 39 | 41 |
| 40 ; CHECK: define i32 @test_nacl_read_tp() { | 42 ; CHECK: define internal i32 @test_nacl_read_tp() { |
| 41 ; CHECK-NEXT: entry: | 43 ; CHECK-NEXT: entry: |
| 42 ; CHECK-NEXT: %ptr = call i32 @llvm.nacl.read.tp() | 44 ; CHECK-NEXT: %ptr = call i32 @llvm.nacl.read.tp() |
| 43 ; CHECK-NEXT: ret i32 %ptr | 45 ; CHECK-NEXT: ret i32 %ptr |
| 44 ; CHECK-NEXT: } | 46 ; CHECK-NEXT: } |
| 45 | 47 |
| 46 define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) { | 48 define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) { |
| 47 entry: | 49 entry: |
| 48 %dst = inttoptr i32 %iptr_dst to i8* | 50 %dst = inttoptr i32 %iptr_dst to i8* |
| 49 %src = inttoptr i32 %iptr_src to i8* | 51 %src = inttoptr i32 %iptr_src to i8* |
| 50 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, | 52 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, |
| 51 i32 %len, i32 1, i1 false) | 53 i32 %len, i32 1, i1 false) |
| 52 ret void | 54 ret void |
| 53 } | 55 } |
| 54 | 56 |
| 55 ; CHECK-NEXT: define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) { | 57 ; CHECK-NEXT: define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i3
2 %len) { |
| 56 ; CHECK-NEXT: entry: | 58 ; CHECK-NEXT: entry: |
| 57 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_sr
c, i32 %len, i32 1, i1 false) | 59 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_sr
c, i32 %len, i32 1, i1 false) |
| 58 ; CHECK-NEXT: ret void | 60 ; CHECK-NEXT: ret void |
| 59 ; CHECK-NEXT: } | 61 ; CHECK-NEXT: } |
| 60 | 62 |
| 61 define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) { | 63 define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) { |
| 62 entry: | 64 entry: |
| 63 %dst = inttoptr i32 %iptr_dst to i8* | 65 %dst = inttoptr i32 %iptr_dst to i8* |
| 64 %src = inttoptr i32 %iptr_src to i8* | 66 %src = inttoptr i32 %iptr_src to i8* |
| 65 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, | 67 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, |
| 66 i32 %len, i32 1, i1 false) | 68 i32 %len, i32 1, i1 false) |
| 67 ret void | 69 ret void |
| 68 } | 70 } |
| 69 | 71 |
| 70 ; CHECK-NEXT: define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len)
{ | 72 ; CHECK-NEXT: define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i
32 %len) { |
| 71 ; CHECK-NEXT: entry: | 73 ; CHECK-NEXT: entry: |
| 72 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_s
rc, i32 %len, i32 1, i1 false) | 74 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_s
rc, i32 %len, i32 1, i1 false) |
| 73 ; CHECK-NEXT: ret void | 75 ; CHECK-NEXT: ret void |
| 74 ; CHECK-NEXT: } | 76 ; CHECK-NEXT: } |
| 75 | 77 |
| 76 define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) { | 78 define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) { |
| 77 entry: | 79 entry: |
| 78 %val = trunc i32 %wide_val to i8 | 80 %val = trunc i32 %wide_val to i8 |
| 79 %dst = inttoptr i32 %iptr_dst to i8* | 81 %dst = inttoptr i32 %iptr_dst to i8* |
| 80 call void @llvm.memset.p0i8.i32(i8* %dst, i8 %val, | 82 call void @llvm.memset.p0i8.i32(i8* %dst, i8 %val, |
| 81 i32 %len, i32 1, i1 false) | 83 i32 %len, i32 1, i1 false) |
| 82 ret void | 84 ret void |
| 83 } | 85 } |
| 84 | 86 |
| 85 ; CHECK-NEXT: define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) { | 87 ; CHECK-NEXT: define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i3
2 %len) { |
| 86 ; CHECK-NEXT: entry: | 88 ; CHECK-NEXT: entry: |
| 87 ; CHECK-NEXT: %val = trunc i32 %wide_val to i8 | 89 ; CHECK-NEXT: %val = trunc i32 %wide_val to i8 |
| 88 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i32 %iptr_dst, i8 %val, i32 %len
, i32 1, i1 false) | 90 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i32 %iptr_dst, i8 %val, i32 %len
, i32 1, i1 false) |
| 89 ; CHECK-NEXT: ret void | 91 ; CHECK-NEXT: ret void |
| 90 ; CHECK-NEXT: } | 92 ; CHECK-NEXT: } |
| 91 | 93 |
| 92 define i32 @test_setjmplongjmp(i32 %iptr_env) { | 94 define internal i32 @test_setjmplongjmp(i32 %iptr_env) { |
| 93 entry: | 95 entry: |
| 94 %env = inttoptr i32 %iptr_env to i8* | 96 %env = inttoptr i32 %iptr_env to i8* |
| 95 %i = call i32 @llvm.nacl.setjmp(i8* %env) | 97 %i = call i32 @llvm.nacl.setjmp(i8* %env) |
| 96 %r1 = icmp eq i32 %i, 0 | 98 %r1 = icmp eq i32 %i, 0 |
| 97 br i1 %r1, label %Zero, label %NonZero | 99 br i1 %r1, label %Zero, label %NonZero |
| 98 Zero: | 100 Zero: |
| 99 ; Redundant inttoptr, to make --pnacl cast-eliding/re-insertion happy. | 101 ; Redundant inttoptr, to make --pnacl cast-eliding/re-insertion happy. |
| 100 %env2 = inttoptr i32 %iptr_env to i8* | 102 %env2 = inttoptr i32 %iptr_env to i8* |
| 101 call void @llvm.nacl.longjmp(i8* %env2, i32 1) | 103 call void @llvm.nacl.longjmp(i8* %env2, i32 1) |
| 102 ret i32 0 | 104 ret i32 0 |
| 103 NonZero: | 105 NonZero: |
| 104 ret i32 1 | 106 ret i32 1 |
| 105 } | 107 } |
| 106 | 108 |
| 107 ; CHECK-NEXT: define i32 @test_setjmplongjmp(i32 %iptr_env) { | 109 ; CHECK-NEXT: define internal i32 @test_setjmplongjmp(i32 %iptr_env) { |
| 108 ; CHECK-NEXT: entry: | 110 ; CHECK-NEXT: entry: |
| 109 ; CHECK-NEXT: %i = call i32 @llvm.nacl.setjmp(i32 %iptr_env) | 111 ; CHECK-NEXT: %i = call i32 @llvm.nacl.setjmp(i32 %iptr_env) |
| 110 ; CHECK-NEXT: %r1 = icmp eq i32 %i, 0 | 112 ; CHECK-NEXT: %r1 = icmp eq i32 %i, 0 |
| 111 ; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero | 113 ; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero |
| 112 ; CHECK-NEXT: Zero: | 114 ; CHECK-NEXT: Zero: |
| 113 ; CHECK-NEXT: call void @llvm.nacl.longjmp(i32 %iptr_env, i32 1) | 115 ; CHECK-NEXT: call void @llvm.nacl.longjmp(i32 %iptr_env, i32 1) |
| 114 ; CHECK-NEXT: ret i32 0 | 116 ; CHECK-NEXT: ret i32 0 |
| 115 ; CHECK-NEXT: NonZero: | 117 ; CHECK-NEXT: NonZero: |
| 116 ; CHECK-NEXT: ret i32 1 | 118 ; CHECK-NEXT: ret i32 1 |
| 117 ; CHECK-NEXT: } | 119 ; CHECK-NEXT: } |
| 118 | 120 |
| 119 define float @test_sqrt_float(float %x, i32 %iptr) { | 121 define internal float @test_sqrt_float(float %x, i32 %iptr) { |
| 120 entry: | 122 entry: |
| 121 %r = call float @llvm.sqrt.f32(float %x) | 123 %r = call float @llvm.sqrt.f32(float %x) |
| 122 %r2 = call float @llvm.sqrt.f32(float %r) | 124 %r2 = call float @llvm.sqrt.f32(float %r) |
| 123 %r3 = call float @llvm.sqrt.f32(float -0.0) | 125 %r3 = call float @llvm.sqrt.f32(float -0.0) |
| 124 %r4 = fadd float %r2, %r3 | 126 %r4 = fadd float %r2, %r3 |
| 125 ret float %r4 | 127 ret float %r4 |
| 126 } | 128 } |
| 127 | 129 |
| 128 ; CHECK-NEXT: define float @test_sqrt_float(float %x, i32 %iptr) { | 130 ; CHECK-NEXT: define internal float @test_sqrt_float(float %x, i32 %iptr) { |
| 129 ; CHECK-NEXT: entry: | 131 ; CHECK-NEXT: entry: |
| 130 ; CHECK-NEXT: %r = call float @llvm.sqrt.f32(float %x) | 132 ; CHECK-NEXT: %r = call float @llvm.sqrt.f32(float %x) |
| 131 ; CHECK-NEXT: %r2 = call float @llvm.sqrt.f32(float %r) | 133 ; CHECK-NEXT: %r2 = call float @llvm.sqrt.f32(float %r) |
| 132 ; CHECK-NEXT: %r3 = call float @llvm.sqrt.f32(float -0.000000e+00) | 134 ; CHECK-NEXT: %r3 = call float @llvm.sqrt.f32(float -0.000000e+00) |
| 133 ; CHECK-NEXT: %r4 = fadd float %r2, %r3 | 135 ; CHECK-NEXT: %r4 = fadd float %r2, %r3 |
| 134 ; CHECK-NEXT: ret float %r4 | 136 ; CHECK-NEXT: ret float %r4 |
| 135 ; CHECK-NEXT: } | 137 ; CHECK-NEXT: } |
| 136 | 138 |
| 137 define double @test_sqrt_double(double %x, i32 %iptr) { | 139 define internal double @test_sqrt_double(double %x, i32 %iptr) { |
| 138 entry: | 140 entry: |
| 139 %r = call double @llvm.sqrt.f64(double %x) | 141 %r = call double @llvm.sqrt.f64(double %x) |
| 140 %r2 = call double @llvm.sqrt.f64(double %r) | 142 %r2 = call double @llvm.sqrt.f64(double %r) |
| 141 %r3 = call double @llvm.sqrt.f64(double -0.0) | 143 %r3 = call double @llvm.sqrt.f64(double -0.0) |
| 142 %r4 = fadd double %r2, %r3 | 144 %r4 = fadd double %r2, %r3 |
| 143 ret double %r4 | 145 ret double %r4 |
| 144 } | 146 } |
| 145 | 147 |
| 146 ; CHECK-NEXT: define double @test_sqrt_double(double %x, i32 %iptr) { | 148 ; CHECK-NEXT: define internal double @test_sqrt_double(double %x, i32 %iptr) { |
| 147 ; CHECK-NEXT: entry: | 149 ; CHECK-NEXT: entry: |
| 148 ; CHECK-NEXT: %r = call double @llvm.sqrt.f64(double %x) | 150 ; CHECK-NEXT: %r = call double @llvm.sqrt.f64(double %x) |
| 149 ; CHECK-NEXT: %r2 = call double @llvm.sqrt.f64(double %r) | 151 ; CHECK-NEXT: %r2 = call double @llvm.sqrt.f64(double %r) |
| 150 ; CHECK-NEXT: %r3 = call double @llvm.sqrt.f64(double -0.000000e+00) | 152 ; CHECK-NEXT: %r3 = call double @llvm.sqrt.f64(double -0.000000e+00) |
| 151 ; CHECK-NEXT: %r4 = fadd double %r2, %r3 | 153 ; CHECK-NEXT: %r4 = fadd double %r2, %r3 |
| 152 ; CHECK-NEXT: ret double %r4 | 154 ; CHECK-NEXT: ret double %r4 |
| 153 ; CHECK-NEXT: } | 155 ; CHECK-NEXT: } |
| 154 | 156 |
| 155 define float @test_fabs_float(float %x) { | 157 define internal float @test_fabs_float(float %x) { |
| 156 entry: | 158 entry: |
| 157 %r = call float @llvm.fabs.f32(float %x) | 159 %r = call float @llvm.fabs.f32(float %x) |
| 158 %r2 = call float @llvm.fabs.f32(float %r) | 160 %r2 = call float @llvm.fabs.f32(float %r) |
| 159 %r3 = call float @llvm.fabs.f32(float -0.0) | 161 %r3 = call float @llvm.fabs.f32(float -0.0) |
| 160 %r4 = fadd float %r2, %r3 | 162 %r4 = fadd float %r2, %r3 |
| 161 ret float %r4 | 163 ret float %r4 |
| 162 } | 164 } |
| 163 | 165 |
| 164 ; CHECK-NEXT: define float @test_fabs_float(float %x) { | 166 ; CHECK-NEXT: define internal float @test_fabs_float(float %x) { |
| 165 ; CHECK-NEXT: entry: | 167 ; CHECK-NEXT: entry: |
| 166 ; CHECK-NEXT: %r = call float @llvm.fabs.f32(float %x) | 168 ; CHECK-NEXT: %r = call float @llvm.fabs.f32(float %x) |
| 167 ; CHECK-NEXT: %r2 = call float @llvm.fabs.f32(float %r) | 169 ; CHECK-NEXT: %r2 = call float @llvm.fabs.f32(float %r) |
| 168 ; CHECK-NEXT: %r3 = call float @llvm.fabs.f32(float -0.000000e+00) | 170 ; CHECK-NEXT: %r3 = call float @llvm.fabs.f32(float -0.000000e+00) |
| 169 ; CHECK-NEXT: %r4 = fadd float %r2, %r3 | 171 ; CHECK-NEXT: %r4 = fadd float %r2, %r3 |
| 170 ; CHECK-NEXT: ret float %r4 | 172 ; CHECK-NEXT: ret float %r4 |
| 171 ; CHECK-NEXT: } | 173 ; CHECK-NEXT: } |
| 172 | 174 |
| 173 define double @test_fabs_double(double %x) { | 175 define internal double @test_fabs_double(double %x) { |
| 174 entry: | 176 entry: |
| 175 %r = call double @llvm.fabs.f64(double %x) | 177 %r = call double @llvm.fabs.f64(double %x) |
| 176 %r2 = call double @llvm.fabs.f64(double %r) | 178 %r2 = call double @llvm.fabs.f64(double %r) |
| 177 %r3 = call double @llvm.fabs.f64(double -0.0) | 179 %r3 = call double @llvm.fabs.f64(double -0.0) |
| 178 %r4 = fadd double %r2, %r3 | 180 %r4 = fadd double %r2, %r3 |
| 179 ret double %r4 | 181 ret double %r4 |
| 180 } | 182 } |
| 181 | 183 |
| 182 ; CHECK-NEXT: define double @test_fabs_double(double %x) { | 184 ; CHECK-NEXT: define internal double @test_fabs_double(double %x) { |
| 183 ; CHECK-NEXT: entry: | 185 ; CHECK-NEXT: entry: |
| 184 ; CHECK-NEXT: %r = call double @llvm.fabs.f64(double %x) | 186 ; CHECK-NEXT: %r = call double @llvm.fabs.f64(double %x) |
| 185 ; CHECK-NEXT: %r2 = call double @llvm.fabs.f64(double %r) | 187 ; CHECK-NEXT: %r2 = call double @llvm.fabs.f64(double %r) |
| 186 ; CHECK-NEXT: %r3 = call double @llvm.fabs.f64(double -0.000000e+00) | 188 ; CHECK-NEXT: %r3 = call double @llvm.fabs.f64(double -0.000000e+00) |
| 187 ; CHECK-NEXT: %r4 = fadd double %r2, %r3 | 189 ; CHECK-NEXT: %r4 = fadd double %r2, %r3 |
| 188 ; CHECK-NEXT: ret double %r4 | 190 ; CHECK-NEXT: ret double %r4 |
| 189 ; CHECK-NEXT: } | 191 ; CHECK-NEXT: } |
| 190 | 192 |
| 191 define <4 x float> @test_fabs_v4f32(<4 x float> %x) { | 193 define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) { |
| 192 entry: | 194 entry: |
| 193 %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) | 195 %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) |
| 194 %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) | 196 %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) |
| 195 %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) | 197 %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) |
| 196 %r4 = fadd <4 x float> %r2, %r3 | 198 %r4 = fadd <4 x float> %r2, %r3 |
| 197 ret <4 x float> %r4 | 199 ret <4 x float> %r4 |
| 198 } | 200 } |
| 199 | 201 |
| 200 ; CHECK-NEXT: define <4 x float> @test_fabs_v4f32(<4 x float> %x) { | 202 ; CHECK-NEXT: define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) { |
| 201 ; CHECK-NEXT: entry: | 203 ; CHECK-NEXT: entry: |
| 202 ; CHECK-NEXT: %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) | 204 ; CHECK-NEXT: %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) |
| 203 ; CHECK-NEXT: %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) | 205 ; CHECK-NEXT: %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) |
| 204 ; CHECK-NEXT: %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) | 206 ; CHECK-NEXT: %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) |
| 205 ; CHECK-NEXT: %r4 = fadd <4 x float> %r2, %r3 | 207 ; CHECK-NEXT: %r4 = fadd <4 x float> %r2, %r3 |
| 206 ; CHECK-NEXT: ret <4 x float> %r4 | 208 ; CHECK-NEXT: ret <4 x float> %r4 |
| 207 ; CHECK-NEXT: } | 209 ; CHECK-NEXT: } |
| 208 | 210 |
| 209 define i32 @test_trap(i32 %br) { | 211 define internal i32 @test_trap(i32 %br) { |
| 210 entry: | 212 entry: |
| 211 %r1 = icmp eq i32 %br, 0 | 213 %r1 = icmp eq i32 %br, 0 |
| 212 br i1 %r1, label %Zero, label %NonZero | 214 br i1 %r1, label %Zero, label %NonZero |
| 213 Zero: | 215 Zero: |
| 214 call void @llvm.trap() | 216 call void @llvm.trap() |
| 215 unreachable | 217 unreachable |
| 216 NonZero: | 218 NonZero: |
| 217 ret i32 1 | 219 ret i32 1 |
| 218 } | 220 } |
| 219 | 221 |
| 220 ; CHECK-NEXT: define i32 @test_trap(i32 %br) { | 222 ; CHECK-NEXT: define internal i32 @test_trap(i32 %br) { |
| 221 ; CHECK-NEXT: entry: | 223 ; CHECK-NEXT: entry: |
| 222 ; CHECK-NEXT: %r1 = icmp eq i32 %br, 0 | 224 ; CHECK-NEXT: %r1 = icmp eq i32 %br, 0 |
| 223 ; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero | 225 ; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero |
| 224 ; CHECK-NEXT: Zero: | 226 ; CHECK-NEXT: Zero: |
| 225 ; CHECK-NEXT: call void @llvm.trap() | 227 ; CHECK-NEXT: call void @llvm.trap() |
| 226 ; CHECK-NEXT: unreachable | 228 ; CHECK-NEXT: unreachable |
| 227 ; CHECK-NEXT: NonZero: | 229 ; CHECK-NEXT: NonZero: |
| 228 ; CHECK-NEXT: ret i32 1 | 230 ; CHECK-NEXT: ret i32 1 |
| 229 ; CHECK-NEXT: } | 231 ; CHECK-NEXT: } |
| 230 | 232 |
| 231 define i32 @test_bswap_16(i32 %x) { | 233 define internal i32 @test_bswap_16(i32 %x) { |
| 232 entry: | 234 entry: |
| 233 %x_trunc = trunc i32 %x to i16 | 235 %x_trunc = trunc i32 %x to i16 |
| 234 %r = call i16 @llvm.bswap.i16(i16 %x_trunc) | 236 %r = call i16 @llvm.bswap.i16(i16 %x_trunc) |
| 235 %r_zext = zext i16 %r to i32 | 237 %r_zext = zext i16 %r to i32 |
| 236 ret i32 %r_zext | 238 ret i32 %r_zext |
| 237 } | 239 } |
| 238 | 240 |
| 239 ; CHECK-NEXT: define i32 @test_bswap_16(i32 %x) { | 241 ; CHECK-NEXT: define internal i32 @test_bswap_16(i32 %x) { |
| 240 ; CHECK-NEXT: entry: | 242 ; CHECK-NEXT: entry: |
| 241 ; CHECK-NEXT: %x_trunc = trunc i32 %x to i16 | 243 ; CHECK-NEXT: %x_trunc = trunc i32 %x to i16 |
| 242 ; CHECK-NEXT: %r = call i16 @llvm.bswap.i16(i16 %x_trunc) | 244 ; CHECK-NEXT: %r = call i16 @llvm.bswap.i16(i16 %x_trunc) |
| 243 ; CHECK-NEXT: %r_zext = zext i16 %r to i32 | 245 ; CHECK-NEXT: %r_zext = zext i16 %r to i32 |
| 244 ; CHECK-NEXT: ret i32 %r_zext | 246 ; CHECK-NEXT: ret i32 %r_zext |
| 245 ; CHECK-NEXT: } | 247 ; CHECK-NEXT: } |
| 246 | 248 |
| 247 define i32 @test_bswap_32(i32 %x) { | 249 define internal i32 @test_bswap_32(i32 %x) { |
| 248 entry: | 250 entry: |
| 249 %r = call i32 @llvm.bswap.i32(i32 %x) | 251 %r = call i32 @llvm.bswap.i32(i32 %x) |
| 250 ret i32 %r | 252 ret i32 %r |
| 251 } | 253 } |
| 252 | 254 |
| 253 ; CHECK-NEXT: define i32 @test_bswap_32(i32 %x) { | 255 ; CHECK-NEXT: define internal i32 @test_bswap_32(i32 %x) { |
| 254 ; CHECK-NEXT: entry: | 256 ; CHECK-NEXT: entry: |
| 255 ; CHECK-NEXT: %r = call i32 @llvm.bswap.i32(i32 %x) | 257 ; CHECK-NEXT: %r = call i32 @llvm.bswap.i32(i32 %x) |
| 256 ; CHECK-NEXT: ret i32 %r | 258 ; CHECK-NEXT: ret i32 %r |
| 257 ; CHECK-NEXT: } | 259 ; CHECK-NEXT: } |
| 258 | 260 |
| 259 define i64 @test_bswap_64(i64 %x) { | 261 define internal i64 @test_bswap_64(i64 %x) { |
| 260 entry: | 262 entry: |
| 261 %r = call i64 @llvm.bswap.i64(i64 %x) | 263 %r = call i64 @llvm.bswap.i64(i64 %x) |
| 262 ret i64 %r | 264 ret i64 %r |
| 263 } | 265 } |
| 264 | 266 |
| 265 ; CHECK-NEXT: define i64 @test_bswap_64(i64 %x) { | 267 ; CHECK-NEXT: define internal i64 @test_bswap_64(i64 %x) { |
| 266 ; CHECK-NEXT: entry: | 268 ; CHECK-NEXT: entry: |
| 267 ; CHECK-NEXT: %r = call i64 @llvm.bswap.i64(i64 %x) | 269 ; CHECK-NEXT: %r = call i64 @llvm.bswap.i64(i64 %x) |
| 268 ; CHECK-NEXT: ret i64 %r | 270 ; CHECK-NEXT: ret i64 %r |
| 269 ; CHECK-NEXT: } | 271 ; CHECK-NEXT: } |
| 270 | 272 |
| 271 define i32 @test_ctlz_32(i32 %x) { | 273 define internal i32 @test_ctlz_32(i32 %x) { |
| 272 entry: | 274 entry: |
| 273 %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) | 275 %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) |
| 274 ret i32 %r | 276 ret i32 %r |
| 275 } | 277 } |
| 276 | 278 |
| 277 ; CHECK-NEXT: define i32 @test_ctlz_32(i32 %x) { | 279 ; CHECK-NEXT: define internal i32 @test_ctlz_32(i32 %x) { |
| 278 ; CHECK-NEXT: entry: | 280 ; CHECK-NEXT: entry: |
| 279 ; CHECK-NEXT: %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) | 281 ; CHECK-NEXT: %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) |
| 280 ; CHECK-NEXT: ret i32 %r | 282 ; CHECK-NEXT: ret i32 %r |
| 281 ; CHECK-NEXT: } | 283 ; CHECK-NEXT: } |
| 282 | 284 |
| 283 define i64 @test_ctlz_64(i64 %x) { | 285 define internal i64 @test_ctlz_64(i64 %x) { |
| 284 entry: | 286 entry: |
| 285 %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) | 287 %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) |
| 286 ret i64 %r | 288 ret i64 %r |
| 287 } | 289 } |
| 288 | 290 |
| 289 ; CHECK-NEXT: define i64 @test_ctlz_64(i64 %x) { | 291 ; CHECK-NEXT: define internal i64 @test_ctlz_64(i64 %x) { |
| 290 ; CHECK-NEXT: entry: | 292 ; CHECK-NEXT: entry: |
| 291 ; CHECK-NEXT: %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) | 293 ; CHECK-NEXT: %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) |
| 292 ; CHECK-NEXT: ret i64 %r | 294 ; CHECK-NEXT: ret i64 %r |
| 293 ; CHECK-NEXT: } | 295 ; CHECK-NEXT: } |
| 294 | 296 |
| 295 define i32 @test_cttz_32(i32 %x) { | 297 define internal i32 @test_cttz_32(i32 %x) { |
| 296 entry: | 298 entry: |
| 297 %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) | 299 %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) |
| 298 ret i32 %r | 300 ret i32 %r |
| 299 } | 301 } |
| 300 | 302 |
| 301 ; CHECK-NEXT: define i32 @test_cttz_32(i32 %x) { | 303 ; CHECK-NEXT: define internal i32 @test_cttz_32(i32 %x) { |
| 302 ; CHECK-NEXT: entry: | 304 ; CHECK-NEXT: entry: |
| 303 ; CHECK-NEXT: %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) | 305 ; CHECK-NEXT: %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) |
| 304 ; CHECK-NEXT: ret i32 %r | 306 ; CHECK-NEXT: ret i32 %r |
| 305 ; CHECK-NEXT: } | 307 ; CHECK-NEXT: } |
| 306 | 308 |
| 307 define i64 @test_cttz_64(i64 %x) { | 309 define internal i64 @test_cttz_64(i64 %x) { |
| 308 entry: | 310 entry: |
| 309 %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) | 311 %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) |
| 310 ret i64 %r | 312 ret i64 %r |
| 311 } | 313 } |
| 312 | 314 |
| 313 ; CHECK-NEXT: define i64 @test_cttz_64(i64 %x) { | 315 ; CHECK-NEXT: define internal i64 @test_cttz_64(i64 %x) { |
| 314 ; CHECK-NEXT: entry: | 316 ; CHECK-NEXT: entry: |
| 315 ; CHECK-NEXT: %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) | 317 ; CHECK-NEXT: %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) |
| 316 ; CHECK-NEXT: ret i64 %r | 318 ; CHECK-NEXT: ret i64 %r |
| 317 ; CHECK-NEXT: } | 319 ; CHECK-NEXT: } |
| 318 | 320 |
| 319 define i32 @test_popcount_32(i32 %x) { | 321 define internal i32 @test_popcount_32(i32 %x) { |
| 320 entry: | 322 entry: |
| 321 %r = call i32 @llvm.ctpop.i32(i32 %x) | 323 %r = call i32 @llvm.ctpop.i32(i32 %x) |
| 322 ret i32 %r | 324 ret i32 %r |
| 323 } | 325 } |
| 324 | 326 |
| 325 ; CHECK-NEXT: define i32 @test_popcount_32(i32 %x) { | 327 ; CHECK-NEXT: define internal i32 @test_popcount_32(i32 %x) { |
| 326 ; CHECK-NEXT: entry: | 328 ; CHECK-NEXT: entry: |
| 327 ; CHECK-NEXT: %r = call i32 @llvm.ctpop.i32(i32 %x) | 329 ; CHECK-NEXT: %r = call i32 @llvm.ctpop.i32(i32 %x) |
| 328 ; CHECK-NEXT: ret i32 %r | 330 ; CHECK-NEXT: ret i32 %r |
| 329 ; CHECK-NEXT: } | 331 ; CHECK-NEXT: } |
| 330 | 332 |
| 331 define i64 @test_popcount_64(i64 %x) { | 333 define internal i64 @test_popcount_64(i64 %x) { |
| 332 entry: | 334 entry: |
| 333 %r = call i64 @llvm.ctpop.i64(i64 %x) | 335 %r = call i64 @llvm.ctpop.i64(i64 %x) |
| 334 ret i64 %r | 336 ret i64 %r |
| 335 } | 337 } |
| 336 | 338 |
| 337 ; CHECK-NEXT: define i64 @test_popcount_64(i64 %x) { | 339 ; CHECK-NEXT: define internal i64 @test_popcount_64(i64 %x) { |
| 338 ; CHECK-NEXT: entry: | 340 ; CHECK-NEXT: entry: |
| 339 ; CHECK-NEXT: %r = call i64 @llvm.ctpop.i64(i64 %x) | 341 ; CHECK-NEXT: %r = call i64 @llvm.ctpop.i64(i64 %x) |
| 340 ; CHECK-NEXT: ret i64 %r | 342 ; CHECK-NEXT: ret i64 %r |
| 341 ; CHECK-NEXT: } | 343 ; CHECK-NEXT: } |
| 342 | 344 |
| 343 define void @test_stacksave_noalloca() { | 345 define internal void @test_stacksave_noalloca() { |
| 344 entry: | 346 entry: |
| 345 %sp = call i8* @llvm.stacksave() | 347 %sp = call i8* @llvm.stacksave() |
| 346 call void @llvm.stackrestore(i8* %sp) | 348 call void @llvm.stackrestore(i8* %sp) |
| 347 ret void | 349 ret void |
| 348 } | 350 } |
| 349 | 351 |
| 350 ; CHECK-NEXT: define void @test_stacksave_noalloca() { | 352 ; CHECK-NEXT: define internal void @test_stacksave_noalloca() { |
| 351 ; CHECK-NEXT: entry: | 353 ; CHECK-NEXT: entry: |
| 352 ; CHECK-NEXT: %sp = call i32 @llvm.stacksave() | 354 ; CHECK-NEXT: %sp = call i32 @llvm.stacksave() |
| 353 ; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp) | 355 ; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp) |
| 354 ; CHECK-NEXT: ret void | 356 ; CHECK-NEXT: ret void |
| 355 ; CHECK-NEXT: } | 357 ; CHECK-NEXT: } |
| 356 | 358 |
| 357 declare i32 @foo(i32 %x) | 359 declare i32 @foo(i32 %x) |
| 358 | 360 |
| 359 define void @test_stacksave_multiple(i32 %x) { | 361 define internal void @test_stacksave_multiple(i32 %x) { |
| 360 entry: | 362 entry: |
| 361 %x_4 = mul i32 %x, 4 | 363 %x_4 = mul i32 %x, 4 |
| 362 %sp1 = call i8* @llvm.stacksave() | 364 %sp1 = call i8* @llvm.stacksave() |
| 363 %tmp1 = alloca i8, i32 %x_4, align 4 | 365 %tmp1 = alloca i8, i32 %x_4, align 4 |
| 364 | 366 |
| 365 %sp2 = call i8* @llvm.stacksave() | 367 %sp2 = call i8* @llvm.stacksave() |
| 366 %tmp2 = alloca i8, i32 %x_4, align 4 | 368 %tmp2 = alloca i8, i32 %x_4, align 4 |
| 367 | 369 |
| 368 %y = call i32 @foo(i32 %x) | 370 %y = call i32 @foo(i32 %x) |
| 369 | 371 |
| 370 %sp3 = call i8* @llvm.stacksave() | 372 %sp3 = call i8* @llvm.stacksave() |
| 371 %tmp3 = alloca i8, i32 %x_4, align 4 | 373 %tmp3 = alloca i8, i32 %x_4, align 4 |
| 372 | 374 |
| 373 %__9 = bitcast i8* %tmp1 to i32* | 375 %__9 = bitcast i8* %tmp1 to i32* |
| 374 store i32 %y, i32* %__9, align 1 | 376 store i32 %y, i32* %__9, align 1 |
| 375 | 377 |
| 376 %__10 = bitcast i8* %tmp2 to i32* | 378 %__10 = bitcast i8* %tmp2 to i32* |
| 377 store i32 %x, i32* %__10, align 1 | 379 store i32 %x, i32* %__10, align 1 |
| 378 | 380 |
| 379 %__11 = bitcast i8* %tmp3 to i32* | 381 %__11 = bitcast i8* %tmp3 to i32* |
| 380 store i32 %x, i32* %__11, align 1 | 382 store i32 %x, i32* %__11, align 1 |
| 381 | 383 |
| 382 call void @llvm.stackrestore(i8* %sp1) | 384 call void @llvm.stackrestore(i8* %sp1) |
| 383 ret void | 385 ret void |
| 384 } | 386 } |
| 385 | 387 |
| 386 ; CHECK-NEXT: define void @test_stacksave_multiple(i32 %x) { | 388 ; CHECK-NEXT: define internal void @test_stacksave_multiple(i32 %x) { |
| 387 ; CHECK-NEXT: entry: | 389 ; CHECK-NEXT: entry: |
| 388 ; CHECK-NEXT: %x_4 = mul i32 %x, 4 | 390 ; CHECK-NEXT: %x_4 = mul i32 %x, 4 |
| 389 ; CHECK-NEXT: %sp1 = call i32 @llvm.stacksave() | 391 ; CHECK-NEXT: %sp1 = call i32 @llvm.stacksave() |
| 390 ; CHECK-NEXT: %tmp1 = alloca i8, i32 %x_4, align 4 | 392 ; CHECK-NEXT: %tmp1 = alloca i8, i32 %x_4, align 4 |
| 391 ; CHECK-NEXT: %sp2 = call i32 @llvm.stacksave() | 393 ; CHECK-NEXT: %sp2 = call i32 @llvm.stacksave() |
| 392 ; CHECK-NEXT: %tmp2 = alloca i8, i32 %x_4, align 4 | 394 ; CHECK-NEXT: %tmp2 = alloca i8, i32 %x_4, align 4 |
| 393 ; CHECK-NEXT: %y = call i32 @foo(i32 %x) | 395 ; CHECK-NEXT: %y = call i32 @foo(i32 %x) |
| 394 ; CHECK-NEXT: %sp3 = call i32 @llvm.stacksave() | 396 ; CHECK-NEXT: %sp3 = call i32 @llvm.stacksave() |
| 395 ; CHECK-NEXT: %tmp3 = alloca i8, i32 %x_4, align 4 | 397 ; CHECK-NEXT: %tmp3 = alloca i8, i32 %x_4, align 4 |
| 396 ; CHECK-NEXT: store i32 %y, i32* %tmp1, align 1 | 398 ; CHECK-NEXT: store i32 %y, i32* %tmp1, align 1 |
| 397 ; CHECK-NEXT: store i32 %x, i32* %tmp2, align 1 | 399 ; CHECK-NEXT: store i32 %x, i32* %tmp2, align 1 |
| 398 ; CHECK-NEXT: store i32 %x, i32* %tmp3, align 1 | 400 ; CHECK-NEXT: store i32 %x, i32* %tmp3, align 1 |
| 399 ; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp1) | 401 ; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp1) |
| 400 ; CHECK-NEXT: ret void | 402 ; CHECK-NEXT: ret void |
| 401 ; CHECK-NEXT: } | 403 ; CHECK-NEXT: } |
| 402 | 404 |
| 403 ; NOIR: Total across all functions | 405 ; NOIR: Total across all functions |
| OLD | NEW |