diff options
author | Chris Lattner <sabre@nondot.org> | 2010-08-23 17:30:29 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2010-08-23 17:30:29 +0000 |
commit | b7f243a638c60e32c4f576d13b03687b0f90cfee (patch) | |
tree | 6754601c653927d295ad10f22ccb8f65c1a1b8c6 /lib/Target/X86 | |
parent | f90ab07c351ffd2f82c424fd310de2b8664417e4 (diff) | |
download | external_llvm-b7f243a638c60e32c4f576d13b03687b0f90cfee.zip external_llvm-b7f243a638c60e32c4f576d13b03687b0f90cfee.tar.gz external_llvm-b7f243a638c60e32c4f576d13b03687b0f90cfee.tar.bz2 |
random improvement for variable shift codegen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@111813 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r-- | lib/Target/X86/README-SSE.txt | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt index b6aba93..85cdd17 100644 --- a/lib/Target/X86/README-SSE.txt +++ b/lib/Target/X86/README-SSE.txt @@ -2,8 +2,20 @@ // Random ideas for the X86 backend: SSE-specific stuff. //===---------------------------------------------------------------------===// -- Consider eliminating the unaligned SSE load intrinsics, replacing them with - unaligned LLVM load instructions. +//===---------------------------------------------------------------------===// + +SSE Variable shift can be custom lowered to something like this, which uses a +small table + unaligned load + shuffle instead of going through memory. + +__m128i_shift_right: + .byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + .byte -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 + +... +__m128i shift_right(__m128i value, unsigned long offset) { + return _mm_shuffle_epi8(value, + _mm_loadu_si128((__m128 *) (___m128i_shift_right + offset))); +} //===---------------------------------------------------------------------===// |