diff options
Diffstat (limited to 'test/Bitcode')
-rw-r--r-- | test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc | bin | 0 -> 448 bytes | |||
-rw-r--r-- | test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc | bin | 0 -> 492 bytes | |||
-rw-r--r-- | test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc | bin | 0 -> 432 bytes | |||
-rw-r--r-- | test/Bitcode/arm32_neon_vcnt_upgrade.ll | 4 | ||||
-rw-r--r-- | test/Bitcode/case-ranges-3.3.ll | 4 | ||||
-rw-r--r-- | test/Bitcode/constantsTest.3.2.ll | 8 | ||||
-rw-r--r-- | test/Bitcode/function-encoding-rel-operands.ll | 4 | ||||
-rw-r--r-- | test/Bitcode/highLevelStructure.3.2.ll | 2 | ||||
-rw-r--r-- | test/Bitcode/invalid.test | 9 | ||||
-rw-r--r-- | test/Bitcode/memInstructions.3.2.ll | 142 | ||||
-rw-r--r-- | test/Bitcode/metadata-2.ll | 6 | ||||
-rw-r--r-- | test/Bitcode/old-aliases.ll | 4 | ||||
-rw-r--r-- | test/Bitcode/upgrade-loop-metadata.ll | 4 | ||||
-rw-r--r-- | test/Bitcode/use-list-order.ll | 14 |
14 files changed, 105 insertions, 96 deletions
diff --git a/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc b/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc Binary files differnew file mode 100644 index 0000000..0d828e8 --- /dev/null +++ b/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc diff --git a/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc b/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc Binary files differnew file mode 100644 index 0000000..3af687f --- /dev/null +++ b/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc diff --git a/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc b/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc Binary files differnew file mode 100644 index 0000000..1b8cbc7 --- /dev/null +++ b/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc diff --git a/test/Bitcode/arm32_neon_vcnt_upgrade.ll b/test/Bitcode/arm32_neon_vcnt_upgrade.ll index ed3981b..0032c4a 100644 --- a/test/Bitcode/arm32_neon_vcnt_upgrade.ll +++ b/test/Bitcode/arm32_neon_vcnt_upgrade.ll @@ -4,7 +4,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { ;CHECK: @vclz16 - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}} ret <4 x i16> %tmp2 @@ -12,7 +12,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind { ;CHECK: @vcnt8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> ret <8 x i8> %tmp2 diff --git a/test/Bitcode/case-ranges-3.3.ll b/test/Bitcode/case-ranges-3.3.ll index 020b37f..eb55ef1 100644 --- a/test/Bitcode/case-ranges-3.3.ll +++ b/test/Bitcode/case-ranges-3.3.ll @@ -10,7 +10,7 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { %1 = alloca i32, align 4 %2 = alloca i32, align 4 store i32 %x, i32* %2, align 4 - %3 = load i32* %2, align 4 + %3 = load i32, i32* %2, align 4 switch i32 %3, label %9 [ ; CHECK: switch i32 %3, label %9 i32 -3, label %4 @@ -63,6 +63,6 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { br label %11 ; <label>:11 - %12 = load i32* %1 + %12 = load i32, i32* %1 ret i32 %12 } diff --git a/test/Bitcode/constantsTest.3.2.ll b/test/Bitcode/constantsTest.3.2.ll index b4973cf..3dea935 100644 --- a/test/Bitcode/constantsTest.3.2.ll +++ b/test/Bitcode/constantsTest.3.2.ll @@ -99,10 +99,10 @@ entry: inttoptr i8 1 to i8* ; CHECK-NEXT: bitcast i32 1 to <2 x i16> bitcast i32 1 to <2 x i16> - ; CHECK-NEXT: getelementptr i32* @X, i32 0 - getelementptr i32* @X, i32 0 - ; CHECK-NEXT: getelementptr inbounds i32* @X, i32 0 - getelementptr inbounds i32* @X, i32 0 + ; CHECK-NEXT: getelementptr i32, i32* @X, i32 0 + getelementptr i32, i32* @X, i32 0 + ; CHECK-NEXT: getelementptr inbounds i32, i32* @X, i32 0 + getelementptr inbounds i32, i32* @X, i32 0 ; CHECK: select i1 true, i32 1, i32 0 select i1 true ,i32 1, i32 0 ; CHECK-NEXT: icmp eq i32 1, 0 diff --git a/test/Bitcode/function-encoding-rel-operands.ll b/test/Bitcode/function-encoding-rel-operands.ll index a96253b..1307dd4 100644 --- a/test/Bitcode/function-encoding-rel-operands.ll +++ b/test/Bitcode/function-encoding-rel-operands.ll @@ -43,8 +43,8 @@ define double @test_float_binops(i32 %a) nounwind { ; CHECK: INST_RET {{.*}}op0=1 define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind { entry: - %0 = getelementptr inbounds {i32, i32}* %ptr, i32 %a, i32 0 - %1 = load i32* %0 + %0 = getelementptr inbounds {i32, i32}, {i32, i32}* %ptr, i32 %a, i32 0 + %1 = load i32, i32* %0 %2 = icmp eq i32 %1, %a ret i1 %2 } diff --git a/test/Bitcode/highLevelStructure.3.2.ll b/test/Bitcode/highLevelStructure.3.2.ll index 88fb340..54356b9f 100644 --- a/test/Bitcode/highLevelStructure.3.2.ll +++ b/test/Bitcode/highLevelStructure.3.2.ll @@ -5,7 +5,7 @@ ; older bitcode files. ; Data Layout Test -; CHECK: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-f80:32-n8:16:32-S32" +; CHECK: target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32" target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32" ; Module-Level Inline Assembly Test diff --git a/test/Bitcode/invalid.test b/test/Bitcode/invalid.test index fb81888..0eacb9d 100644 --- a/test/Bitcode/invalid.test +++ b/test/Bitcode/invalid.test @@ -12,6 +12,12 @@ RUN: not llvm-dis -disable-output %p/Inputs/invalid-bitwidth.bc 2>&1 | \ RUN: FileCheck --check-prefix=BAD-BITWIDTH %s RUN: not llvm-dis -disable-output %p/Inputs/invalid-align.bc 2>&1 | \ RUN: FileCheck --check-prefix=BAD-ALIGN %s +RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-mismatched-explicit-type.bc 2>&1 | \ +RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP %s +RUN: not llvm-dis -disable-output %p/Inputs/invalid-load-mismatched-explicit-type.bc 2>&1 | \ +RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-LOAD %s +RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-operator-mismatched-explicit-type.bc 2>&1 | \ +RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP-OPERATOR %s INVALID-ENCODING: Invalid encoding BAD-ABBREV: Abbreviation starts with an Array or a Blob @@ -20,6 +26,9 @@ BAD-ABBREV-NUMBER: Invalid abbrev number BAD-TYPE-TABLE-FORWARD-REF: Invalid TYPE table: Only named structs can be forward referenced BAD-BITWIDTH: Bitwidth for integer type out of range BAD-ALIGN: Invalid alignment value +MISMATCHED-EXPLICIT-GEP: Explicit gep type does not match pointee type of pointer operand +MISMATCHED-EXPLICIT-LOAD: Explicit load type does not match pointee type of pointer operand +MISMATCHED-EXPLICIT-GEP-OPERATOR: Explicit gep operator type does not match pointee type of pointer operand RUN: not llvm-dis -disable-output %p/Inputs/invalid-extractval-array-idx.bc 2>&1 | \ RUN: FileCheck --check-prefix=EXTRACT-ARRAY %s diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll index d826dd1..1ab05b6 100644 --- a/test/Bitcode/memInstructions.3.2.ll +++ b/test/Bitcode/memInstructions.3.2.ll @@ -27,53 +27,53 @@ entry: %ptr1 = alloca i8 store i8 2, i8* %ptr1 -; CHECK: %res1 = load i8* %ptr1 - %res1 = load i8* %ptr1 +; CHECK: %res1 = load i8, i8* %ptr1 + %res1 = load i8, i8* %ptr1 -; CHECK-NEXT: %res2 = load volatile i8* %ptr1 - %res2 = load volatile i8* %ptr1 +; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1 + %res2 = load volatile i8, i8* %ptr1 -; CHECK-NEXT: %res3 = load i8* %ptr1, align 1 - %res3 = load i8* %ptr1, align 1 +; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1 + %res3 = load i8, i8* %ptr1, align 1 -; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1 - %res4 = load volatile i8* %ptr1, align 1 +; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1 + %res4 = load volatile i8, i8* %ptr1, align 1 -; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0 - %res5 = load i8* %ptr1, !nontemporal !0 +; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0 + %res5 = load i8, i8* %ptr1, !nontemporal !0 -; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0 - %res6 = load volatile i8* %ptr1, !nontemporal !0 +; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0 + %res6 = load volatile i8, i8* %ptr1, !nontemporal !0 -; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0 - %res7 = load i8* %ptr1, align 1, !nontemporal !0 +; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 + %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 -; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 - %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 +; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 + %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 -; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1 - %res9 = load i8* %ptr1, !invariant.load !1 +; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1 + %res9 = load i8, i8* %ptr1, !invariant.load !1 -; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1 - %res10 = load volatile i8* %ptr1, !invariant.load !1 +; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1 + %res10 = load volatile i8, i8* %ptr1, !invariant.load !1 -; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1 - %res11 = load i8* %ptr1, align 1, !invariant.load !1 +; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 + %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 -; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 - %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 +; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 + %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 -; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 ret void } @@ -83,53 +83,53 @@ entry: %ptr1 = alloca i8 store i8 2, i8* %ptr1 -; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1 - %res1 = load atomic i8* %ptr1 unordered, align 1 +; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1 + %res1 = load atomic i8, i8* %ptr1 unordered, align 1 -; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1 - %res2 = load atomic i8* %ptr1 monotonic, align 1 +; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 + %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 -; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1 - %res3 = load atomic i8* %ptr1 acquire, align 1 +; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1 + %res3 = load atomic i8, i8* %ptr1 acquire, align 1 -; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1 - %res4 = load atomic i8* %ptr1 seq_cst, align 1 +; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 + %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 -; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1 - %res5 = load atomic volatile i8* %ptr1 unordered, align 1 +; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 + %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 -; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 - %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 +; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 + %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 -; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1 - %res7 = load atomic volatile i8* %ptr1 acquire, align 1 +; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 + %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 -; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 - %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 +; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 + %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 -; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 - %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 +; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1 + %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1 -; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 - %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 +; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1 + %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1 -; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 - %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 +; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1 + %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1 -; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 - %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 +; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1 + %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1 -; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 - %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 +; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1 + %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1 -; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 - %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 +; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1 + %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1 -; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 - %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 +; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1 + %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1 -; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 - %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 +; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1 + %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1 ret void } @@ -311,16 +311,16 @@ entry: ret void } -define void @getelementptr({i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){ +define void @getelementptr({i8, i8}, {i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){ entry: -; CHECK: %res1 = getelementptr { i8, i8 }* %s, i32 1, i32 1 - %res1 = getelementptr {i8, i8}* %s, i32 1, i32 1 +; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1 + %res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1 -; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }* %s, i32 1, i32 1 - %res2 = getelementptr inbounds {i8, i8}* %s, i32 1, i32 1 +; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1 + %res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1 -; CHECK-NEXT: %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets - %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets +; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets + %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets ret void } diff --git a/test/Bitcode/metadata-2.ll b/test/Bitcode/metadata-2.ll index 07371a3..e906526 100644 --- a/test/Bitcode/metadata-2.ll +++ b/test/Bitcode/metadata-2.ll @@ -21,7 +21,7 @@ %"void*[]" = type { i64, i8** } @_D10ModuleInfo6__vtblZ = external constant %object.ModuleInfo.__vtbl ; <%object.ModuleInfo.__vtbl*> [#uses=1] @.str = internal constant [20 x i8] c"tango.core.BitManip\00" ; <[20 x i8]*> [#uses=1] -@_D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null } ; <%0*> [#uses=1] +@_D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8], [20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null } ; <%0*> [#uses=1] @_D5tango4core8BitManip11__moduleRefZ = internal global %ModuleReference { %ModuleReference* null, %object.ModuleInfo* bitcast (%0* @_D5tango4core8BitManip8__ModuleZ to %object.ModuleInfo*) } ; <%ModuleReference*> [#uses=2] @_Dmodule_ref = external global %ModuleReference* ; <%ModuleReference**> [#uses=2] @llvm.global_ctors = appending constant [1 x %2] [%2 { i32 65535, void ()* @_D5tango4core8BitManip16__moduleinfoCtorZ }] ; <[1 x %2]*> [#uses=0] @@ -77,8 +77,8 @@ entry: define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind { moduleinfoCtorEntry: - %current = load %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1] - store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0) + %current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1] + store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference, %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0) store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref ret void } diff --git a/test/Bitcode/old-aliases.ll b/test/Bitcode/old-aliases.ll index b73b1a9..8527f07 100644 --- a/test/Bitcode/old-aliases.ll +++ b/test/Bitcode/old-aliases.ll @@ -13,8 +13,8 @@ @v3 = alias bitcast (i32* @v1 to i16*) ; CHECK: @v3 = alias bitcast (i32* @v1 to i16*) -@v4 = alias getelementptr ([1 x i32]* @v2, i32 0, i32 0) -; CHECK: @v4 = alias getelementptr inbounds ([1 x i32]* @v2, i32 0, i32 0) +@v4 = alias getelementptr ([1 x i32], [1 x i32]* @v2, i32 0, i32 0) +; CHECK: @v4 = alias getelementptr inbounds ([1 x i32], [1 x i32]* @v2, i32 0, i32 0) @v5 = alias i32 addrspace(2)* addrspacecast (i32 addrspace(0)* @v1 to i32 addrspace(2)*) ; CHECK: @v5 = alias addrspacecast (i32* @v1 to i32 addrspace(2)*) diff --git a/test/Bitcode/upgrade-loop-metadata.ll b/test/Bitcode/upgrade-loop-metadata.ll index be2a99a..8dee907 100644 --- a/test/Bitcode/upgrade-loop-metadata.ll +++ b/test/Bitcode/upgrade-loop-metadata.ll @@ -10,7 +10,7 @@ entry: br label %for.cond for.cond: ; preds = %for.inc, %entry - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %cmp = icmp slt i32 %0, 16 br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1 @@ -18,7 +18,7 @@ for.body: ; preds = %for.cond br label %for.inc for.inc: ; preds = %for.body - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %i, align 4 br label %for.cond diff --git a/test/Bitcode/use-list-order.ll b/test/Bitcode/use-list-order.ll index 6617b9c5..09ec448 100644 --- a/test/Bitcode/use-list-order.ll +++ b/test/Bitcode/use-list-order.ll @@ -1,7 +1,7 @@ ; RUN: verify-uselistorder < %s @a = global [4 x i1] [i1 0, i1 1, i1 0, i1 1] -@b = alias i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) +@b = alias i1* getelementptr ([4 x i1], [4 x i1]* @a, i64 0, i64 2) ; Check use-list order of constants used by globals. @glob1 = global i5 7 @@ -79,13 +79,13 @@ entry: define i1 @loadb() { entry: - %b = load i1* @b + %b = load i1, i1* @b ret i1 %b } define i1 @loada() { entry: - %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) + %a = load i1, i1* getelementptr ([4 x i1], [4 x i1]* @a, i64 0, i64 2) ret i1 %a } @@ -115,7 +115,7 @@ first: define i4 @globalAndFunctionFunctionUser() { entry: - %local = load i4* @globalAndFunction + %local = load i4, i4* @globalAndFunction ret i4 %local } @@ -134,11 +134,11 @@ loop2: ; Check that block addresses work. @ba1 = constant i8* blockaddress (@bafunc1, %bb) -@ba2 = constant i8* getelementptr (i8* blockaddress (@bafunc2, %bb), i61 0) -@ba3 = constant i8* getelementptr (i8* blockaddress (@bafunc2, %bb), i61 0) +@ba2 = constant i8* getelementptr (i8, i8* blockaddress (@bafunc2, %bb), i61 0) +@ba3 = constant i8* getelementptr (i8, i8* blockaddress (@bafunc2, %bb), i61 0) define i8* @babefore() { - ret i8* getelementptr (i8* blockaddress (@bafunc2, %bb), i61 0) + ret i8* getelementptr (i8, i8* blockaddress (@bafunc2, %bb), i61 0) bb1: ret i8* blockaddress (@bafunc1, %bb) bb2: |