diff options
author | Tim Northover <Tim.Northover@arm.com> | 2013-04-20 12:32:43 +0000 |
---|---|---|
committer | Tim Northover <Tim.Northover@arm.com> | 2013-04-20 12:32:43 +0000 |
commit | 8b71994fde0f0fcdf7a8260dc773fb7376b1231f (patch) | |
tree | d1f2a8fb857f69681fa26a311e1709a581cf6825 /lib/Target | |
parent | 6265d5c91a18b2fb6499eb581c488315880c044d (diff) | |
download | external_llvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.zip external_llvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.tar.gz external_llvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.tar.bz2 |
Remove unused ShouldFoldAtomicFences flag.
I think it's almost impossible to fold atomic fences profitably under
LLVM/C++11 semantics. As a result, this is now unused and just
cluttering up the target interface.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179940 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.cpp | 4 | ||||
-rw-r--r-- | lib/Target/ARM/ARMISelLowering.cpp | 2 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 7 |
3 files changed, 0 insertions, 13 deletions
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 6deae75..786b1ba 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -59,10 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) computeRegisterProperties(); - // We have particularly efficient implementations of atomic fences if they can - // be combined with nearby atomic loads and stores. - setShouldFoldAtomicFences(true); - // We combine OR nodes for bitfield and NEON BSL operations. setTargetDAGCombine(ISD::OR); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 698c8a7..23d7ef1 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -763,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // Unordered/Monotonic case. setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); - // Since the libcalls include locking, fold in the fences - setShouldFoldAtomicFences(true); } setOperationAction(ISD::PREFETCH, MVT::Other, Custom); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 94370ae..b7ba0b8 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -528,13 +528,6 @@ void X86TargetLowering::resetOperationActions() { setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); - // On X86 and X86-64, atomic operations are lowered to locked instructions. - // Locked instructions, in turn, have implicit fence semantics (all memory - // operations are flushed before issuing the locked instruction, and they - // are not buffered), so we can fold away the common pattern of - // fence-atomic-fence. - setShouldFoldAtomicFences(true); - // Expand certain atomics for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { MVT VT = IntVTs[i]; |