aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
blob: 5d7a10b5901e3385e23937e844a81e6bf4eb8f09 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s

; clang -Oz -c test1.cpp -emit-llvm -S -o
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double 
; precision shift instuctions.
; uint64_t lshift10(uint64_t a, uint64_t b)
; {
;     return (a << 10) | (b >> 54);
; }

; Function Attrs: minsize nounwind optsize readnone uwtable
define i64 @_Z8lshift10mm(i64 %a, i64 %b) #0 {
entry:
; CHECK:   shldq   $10
  %shl = shl i64 %a, 10
  %shr = lshr i64 %b, 54
  %or = or i64 %shr, %shl
  ret i64 %or
}

attributes #0 = { minsize nounwind optsize readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }


; clang -Os -c test2.cpp -emit-llvm -S
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double
; precision shift instuctions.
; uint64_t lshift11(uint64_t a, uint64_t b)
; {
;     return (a << 11) | (b >> 53);
; }

; Function Attrs: nounwind optsize readnone uwtable
define i64 @_Z8lshift11mm(i64 %a, i64 %b) #1 {
entry:
; CHECK:   shldq   $11
  %shl = shl i64 %a, 11
  %shr = lshr i64 %b, 53
  %or = or i64 %shr, %shl
  ret i64 %or
}

attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

; clang -O2 -c test2.cpp -emit-llvm -S
; Verify that we do not generate shld insruction when we are not optimizing
; for size for X86_64 processors that are known to have poor latency double
; precision shift instuctions.
; uint64_t lshift12(uint64_t a, uint64_t b)
; {
;     return (a << 12) | (b >> 52);
; }

; Function Attrs: nounwind optsize readnone uwtable
define i64 @_Z8lshift12mm(i64 %a, i64 %b) #2 {
entry:
; CHECK:       shlq    $12
; CHECK-NEXT:  shrq    $52
  %shl = shl i64 %a, 12
  %shr = lshr i64 %b, 52
  %or = or i64 %shr, %shl
  ret i64 %or
}

attributes #2= { nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }