diff options
Diffstat (limited to 'test/CodeGen/X86/vec_anyext.ll')
-rw-r--r-- | test/CodeGen/X86/vec_anyext.ll | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/test/CodeGen/X86/vec_anyext.ll b/test/CodeGen/X86/vec_anyext.ll index d2a4c7f..c088d7f 100644 --- a/test/CodeGen/X86/vec_anyext.ll +++ b/test/CodeGen/X86/vec_anyext.ll @@ -2,9 +2,9 @@ ; PR 9267 define<4 x i16> @func_16_32() { - %F = load <4 x i32>* undef + %F = load <4 x i32>, <4 x i32>* undef %G = trunc <4 x i32> %F to <4 x i16> - %H = load <4 x i32>* undef + %H = load <4 x i32>, <4 x i32>* undef %Y = trunc <4 x i32> %H to <4 x i16> %T = add <4 x i16> %Y, %G store <4 x i16>%T , <4 x i16>* undef @@ -12,9 +12,9 @@ define<4 x i16> @func_16_32() { } define<4 x i16> @func_16_64() { - %F = load <4 x i64>* undef + %F = load <4 x i64>, <4 x i64>* undef %G = trunc <4 x i64> %F to <4 x i16> - %H = load <4 x i64>* undef + %H = load <4 x i64>, <4 x i64>* undef %Y = trunc <4 x i64> %H to <4 x i16> %T = xor <4 x i16> %Y, %G store <4 x i16>%T , <4 x i16>* undef @@ -22,36 +22,36 @@ define<4 x i16> @func_16_64() { } define<4 x i32> @func_32_64() { - %F = load <4 x i64>* undef + %F = load <4 x i64>, <4 x i64>* undef %G = trunc <4 x i64> %F to <4 x i32> - %H = load <4 x i64>* undef + %H = load <4 x i64>, <4 x i64>* undef %Y = trunc <4 x i64> %H to <4 x i32> %T = or <4 x i32> %Y, %G ret <4 x i32> %T } define<4 x i8> @func_8_16() { - %F = load <4 x i16>* undef + %F = load <4 x i16>, <4 x i16>* undef %G = trunc <4 x i16> %F to <4 x i8> - %H = load <4 x i16>* undef + %H = load <4 x i16>, <4 x i16>* undef %Y = trunc <4 x i16> %H to <4 x i8> %T = add <4 x i8> %Y, %G ret <4 x i8> %T } define<4 x i8> @func_8_32() { - %F = load <4 x i32>* undef + %F = load <4 x i32>, <4 x i32>* undef %G = trunc <4 x i32> %F to <4 x i8> - %H = load <4 x i32>* undef + %H = load <4 x i32>, <4 x i32>* undef %Y = trunc <4 x i32> %H to <4 x i8> %T = sub <4 x i8> %Y, %G ret <4 x i8> %T } define<4 x i8> @func_8_64() { - %F = load <4 x i64>* undef + %F = load <4 x i64>, <4 x i64>* undef %G = trunc <4 x i64> %F to <4 x i8> - %H = load <4 x i64>* undef + %H = load <4 x i64>, <4 x i64>* undef %Y = trunc <4 x i64> %H to <4 x i8> %T = add <4 x i8> %Y, %G ret <4 x i8> %T |