aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2012-08-31 15:34:59 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2012-08-31 15:34:59 +0000
commit908c0c01f634798a4c1b335a6aca0870ad1fce77 (patch)
treef1222d5f0e0d1af37095e14f6a8a738732687a80
parentcb5f63d7fa717b67a666712a3a0d7eebd4d8fb8b (diff)
downloadexternal_llvm-908c0c01f634798a4c1b335a6aca0870ad1fce77.zip
external_llvm-908c0c01f634798a4c1b335a6aca0870ad1fce77.tar.gz
external_llvm-908c0c01f634798a4c1b335a6aca0870ad1fce77.tar.bz2
Don't enforce ordered inline asm operands.
I was too optimistic, inline asm can have tied operands that don't follow the def order. Fixes PR13742. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162998 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp10
-rw-r--r--test/CodeGen/X86/inline-asm-tied.ll9
2 files changed, 9 insertions, 10 deletions
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 4d09c44..5399a51 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -895,7 +895,6 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
// Remember to operand index of the group flags.
SmallVector<unsigned, 8> GroupIdx;
- unsigned PrevDefGroup = 0;
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
@@ -944,15 +943,6 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
unsigned DefGroup = 0;
if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
- // Check that the def groups are monotonically increasing.
- // Otherwise, the tied uses and defs won't line up, and
- // MI::findTiedOperandIdx() will find the wrong operand. This
- // should be automatically enforced by the front ends when
- // translating "+" constraints into tied def+use pairs.
- assert(DefGroup >= PrevDefGroup &&
- "Tied inline asm operands must be in increasing order.");
- PrevDefGroup = DefGroup;
-
unsigned DefIdx = GroupIdx[DefGroup] + 1;
unsigned UseIdx = GroupIdx.back() + 1;
for (unsigned j = 0; j != NumVals; ++j) {
diff --git a/test/CodeGen/X86/inline-asm-tied.ll b/test/CodeGen/X86/inline-asm-tied.ll
index 91576fb..597236e 100644
--- a/test/CodeGen/X86/inline-asm-tied.ll
+++ b/test/CodeGen/X86/inline-asm-tied.ll
@@ -19,3 +19,12 @@ entry:
%1 = load i64* %retval ; <i64> [#uses=1]
ret i64 %1
}
+
+; The tied operands are not necessarily in the same order as the defs.
+; PR13742
+define i64 @swapped(i64 %x, i64 %y) nounwind {
+entry:
+ %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind
+ %x1 = extractvalue { i64, i64 } %x0, 0
+ ret i64 %x1
+}