aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/SystemZ/bswap-03.ll
blob: ea62c4f71df6c15372e93ece2fa3edfa0bd5e637 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
; Test 64-bit byteswaps from memory to registers.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s

declare i64 @llvm.bswap.i64(i64 %a)

; Check LRVG with no displacement.
define i64 @f1(i64 *%src) {
; CHECK-LABEL: f1:
; CHECK: lrvg %r2, 0(%r2)
; CHECK: br %r14
  %a = load i64 , i64 *%src
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check the high end of the aligned LRVG range.
define i64 @f2(i64 *%src) {
; CHECK-LABEL: f2:
; CHECK: lrvg %r2, 524280(%r2)
; CHECK: br %r14
  %ptr = getelementptr i64, i64 *%src, i64 65535
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define i64 @f3(i64 *%src) {
; CHECK-LABEL: f3:
; CHECK: agfi %r2, 524288
; CHECK: lrvg %r2, 0(%r2)
; CHECK: br %r14
  %ptr = getelementptr i64, i64 *%src, i64 65536
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check the high end of the negative aligned LRVG range.
define i64 @f4(i64 *%src) {
; CHECK-LABEL: f4:
; CHECK: lrvg %r2, -8(%r2)
; CHECK: br %r14
  %ptr = getelementptr i64, i64 *%src, i64 -1
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check the low end of the LRVG range.
define i64 @f5(i64 *%src) {
; CHECK-LABEL: f5:
; CHECK: lrvg %r2, -524288(%r2)
; CHECK: br %r14
  %ptr = getelementptr i64, i64 *%src, i64 -65536
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check the next doubleword down, which needs separate address logic.
; Other sequences besides this one would be OK.
define i64 @f6(i64 *%src) {
; CHECK-LABEL: f6:
; CHECK: agfi %r2, -524296
; CHECK: lrvg %r2, 0(%r2)
; CHECK: br %r14
  %ptr = getelementptr i64, i64 *%src, i64 -65537
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check that LRVG allows an index.
define i64 @f7(i64 %src, i64 %index) {
; CHECK-LABEL: f7:
; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
; CHECK: br %r14
  %add1 = add i64 %src, %index
  %add2 = add i64 %add1, 524287
  %ptr = inttoptr i64 %add2 to i64 *
  %a = load i64 , i64 *%ptr
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Check that volatile accesses do not use LRVG, which might access the
; storage multple times.
define i64 @f8(i64 *%src) {
; CHECK-LABEL: f8:
; CHECK: lg [[REG:%r[0-5]]], 0(%r2)
; CHECK: lrvgr %r2, [[REG]]
; CHECK: br %r14
  %a = load volatile i64 , i64 *%src
  %swapped = call i64 @llvm.bswap.i64(i64 %a)
  ret i64 %swapped
}

; Test a case where we spill the source of at least one LRVGR.  We want
; to use LRVG if possible.
define void @f9(i64 *%ptr) {
; CHECK-LABEL: f9:
; CHECK: lrvg {{%r[0-9]+}}, 160(%r15)
; CHECK: br %r14
  %val0 = load volatile i64 , i64 *%ptr
  %val1 = load volatile i64 , i64 *%ptr
  %val2 = load volatile i64 , i64 *%ptr
  %val3 = load volatile i64 , i64 *%ptr
  %val4 = load volatile i64 , i64 *%ptr
  %val5 = load volatile i64 , i64 *%ptr
  %val6 = load volatile i64 , i64 *%ptr
  %val7 = load volatile i64 , i64 *%ptr
  %val8 = load volatile i64 , i64 *%ptr
  %val9 = load volatile i64 , i64 *%ptr
  %val10 = load volatile i64 , i64 *%ptr
  %val11 = load volatile i64 , i64 *%ptr
  %val12 = load volatile i64 , i64 *%ptr
  %val13 = load volatile i64 , i64 *%ptr
  %val14 = load volatile i64 , i64 *%ptr
  %val15 = load volatile i64 , i64 *%ptr

  %swapped0 = call i64 @llvm.bswap.i64(i64 %val0)
  %swapped1 = call i64 @llvm.bswap.i64(i64 %val1)
  %swapped2 = call i64 @llvm.bswap.i64(i64 %val2)
  %swapped3 = call i64 @llvm.bswap.i64(i64 %val3)
  %swapped4 = call i64 @llvm.bswap.i64(i64 %val4)
  %swapped5 = call i64 @llvm.bswap.i64(i64 %val5)
  %swapped6 = call i64 @llvm.bswap.i64(i64 %val6)
  %swapped7 = call i64 @llvm.bswap.i64(i64 %val7)
  %swapped8 = call i64 @llvm.bswap.i64(i64 %val8)
  %swapped9 = call i64 @llvm.bswap.i64(i64 %val9)
  %swapped10 = call i64 @llvm.bswap.i64(i64 %val10)
  %swapped11 = call i64 @llvm.bswap.i64(i64 %val11)
  %swapped12 = call i64 @llvm.bswap.i64(i64 %val12)
  %swapped13 = call i64 @llvm.bswap.i64(i64 %val13)
  %swapped14 = call i64 @llvm.bswap.i64(i64 %val14)
  %swapped15 = call i64 @llvm.bswap.i64(i64 %val15)

  store volatile i64 %val0, i64 *%ptr
  store volatile i64 %val1, i64 *%ptr
  store volatile i64 %val2, i64 *%ptr
  store volatile i64 %val3, i64 *%ptr
  store volatile i64 %val4, i64 *%ptr
  store volatile i64 %val5, i64 *%ptr
  store volatile i64 %val6, i64 *%ptr
  store volatile i64 %val7, i64 *%ptr
  store volatile i64 %val8, i64 *%ptr
  store volatile i64 %val9, i64 *%ptr
  store volatile i64 %val10, i64 *%ptr
  store volatile i64 %val11, i64 *%ptr
  store volatile i64 %val12, i64 *%ptr
  store volatile i64 %val13, i64 *%ptr
  store volatile i64 %val14, i64 *%ptr
  store volatile i64 %val15, i64 *%ptr

  store volatile i64 %swapped0, i64 *%ptr
  store volatile i64 %swapped1, i64 *%ptr
  store volatile i64 %swapped2, i64 *%ptr
  store volatile i64 %swapped3, i64 *%ptr
  store volatile i64 %swapped4, i64 *%ptr
  store volatile i64 %swapped5, i64 *%ptr
  store volatile i64 %swapped6, i64 *%ptr
  store volatile i64 %swapped7, i64 *%ptr
  store volatile i64 %swapped8, i64 *%ptr
  store volatile i64 %swapped9, i64 *%ptr
  store volatile i64 %swapped10, i64 *%ptr
  store volatile i64 %swapped11, i64 *%ptr
  store volatile i64 %swapped12, i64 *%ptr
  store volatile i64 %swapped13, i64 *%ptr
  store volatile i64 %swapped14, i64 *%ptr
  store volatile i64 %swapped15, i64 *%ptr

  ret void
}