@@ -4,6 +4,8 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
4
4
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
5
5
#endif
6
6
7
+ declare void @llvm.trap ()
8
+
7
9
define i32 @__clc__atomic_load_global_4_unordered (i32 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
8
10
entry:
9
11
%0 = load atomic volatile i32 , i32 addrspace (1 )* %ptr unordered , align 4
@@ -54,99 +56,99 @@ entry:
54
56
55
57
define i32 @__clc__atomic_load_global_4_acquire (i32 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
56
58
entry:
57
- %0 = load atomic volatile i32 , i32 addrspace ( 1 )* %ptr acquire , align 4
58
- ret i32 %0
59
+ tail call void @llvm.trap ()
60
+ unreachable
59
61
}
60
62
61
63
define i32 @__clc__atomic_load_local_4_acquire (i32 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
62
64
entry:
63
- %0 = load atomic volatile i32 , i32 addrspace ( 3 )* %ptr acquire , align 4
64
- ret i32 %0
65
+ tail call void @llvm.trap ()
66
+ unreachable
65
67
}
66
68
67
69
define i64 @__clc__atomic_load_global_8_acquire (i64 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
68
70
entry:
69
- %0 = load atomic volatile i64 , i64 addrspace ( 1 )* %ptr acquire , align 8
70
- ret i64 %0
71
+ tail call void @llvm.trap ()
72
+ unreachable
71
73
}
72
74
73
75
define i64 @__clc__atomic_load_local_8_acquire (i64 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
74
76
entry:
75
- %0 = load atomic volatile i64 , i64 addrspace ( 3 )* %ptr acquire , align 8
76
- ret i64 %0
77
+ tail call void @llvm.trap ()
78
+ unreachable
77
79
}
78
80
79
81
define i32 @__clc__atomic_uload_global_4_acquire (i32 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
80
82
entry:
81
- %0 = load atomic volatile i32 , i32 addrspace ( 1 )* %ptr acquire , align 4
82
- ret i32 %0
83
+ tail call void @llvm.trap ()
84
+ unreachable
83
85
}
84
86
85
87
define i32 @__clc__atomic_uload_local_4_acquire (i32 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
86
88
entry:
87
- %0 = load atomic volatile i32 , i32 addrspace ( 3 )* %ptr acquire , align 4
88
- ret i32 %0
89
+ tail call void @llvm.trap ()
90
+ unreachable
89
91
}
90
92
91
93
define i64 @__clc__atomic_uload_global_8_acquire (i64 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
92
94
entry:
93
- %0 = load atomic volatile i64 , i64 addrspace ( 1 )* %ptr acquire , align 8
94
- ret i64 %0
95
+ tail call void @llvm.trap ()
96
+ unreachable
95
97
}
96
98
97
99
define i64 @__clc__atomic_uload_local_8_acquire (i64 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
98
100
entry:
99
- %0 = load atomic volatile i64 , i64 addrspace ( 3 )* %ptr acquire , align 8
100
- ret i64 %0
101
+ tail call void @llvm.trap ()
102
+ unreachable
101
103
}
102
104
103
105
104
106
define i32 @__clc__atomic_load_global_4_seq_cst (i32 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
105
107
entry:
106
- %0 = load atomic volatile i32 , i32 addrspace ( 1 )* %ptr seq_cst , align 4
107
- ret i32 %0
108
+ tail call void @llvm.trap ()
109
+ unreachable
108
110
}
109
111
110
112
define i32 @__clc__atomic_load_local_4_seq_cst (i32 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
111
113
entry:
112
- %0 = load atomic volatile i32 , i32 addrspace ( 3 )* %ptr seq_cst , align 4
113
- ret i32 %0
114
+ tail call void @llvm.trap ()
115
+ unreachable
114
116
}
115
117
116
118
define i64 @__clc__atomic_load_global_8_seq_cst (i64 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
117
119
entry:
118
- %0 = load atomic volatile i64 , i64 addrspace ( 1 )* %ptr seq_cst , align 8
119
- ret i64 %0
120
+ tail call void @llvm.trap ()
121
+ unreachable
120
122
}
121
123
122
124
define i64 @__clc__atomic_load_local_8_seq_cst (i64 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
123
125
entry:
124
- %0 = load atomic volatile i64 , i64 addrspace ( 3 )* %ptr seq_cst , align 8
125
- ret i64 %0
126
+ tail call void @llvm.trap ()
127
+ unreachable
126
128
}
127
129
128
130
define i32 @__clc__atomic_uload_global_4_seq_cst (i32 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
129
131
entry:
130
- %0 = load atomic volatile i32 , i32 addrspace ( 1 )* %ptr seq_cst , align 4
131
- ret i32 %0
132
+ tail call void @llvm.trap ()
133
+ unreachable
132
134
}
133
135
134
136
define i32 @__clc__atomic_uload_local_4_seq_cst (i32 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
135
137
entry:
136
- %0 = load atomic volatile i32 , i32 addrspace ( 3 )* %ptr seq_cst , align 4
137
- ret i32 %0
138
+ tail call void @llvm.trap ()
139
+ unreachable
138
140
}
139
141
140
142
define i64 @__clc__atomic_uload_global_8_seq_cst (i64 addrspace (1 )* nocapture %ptr ) nounwind alwaysinline {
141
143
entry:
142
- %0 = load atomic volatile i64 , i64 addrspace ( 1 )* %ptr seq_cst , align 8
143
- ret i64 %0
144
+ tail call void @llvm.trap ()
145
+ unreachable
144
146
}
145
147
146
148
define i64 @__clc__atomic_uload_local_8_seq_cst (i64 addrspace (3 )* nocapture %ptr ) nounwind alwaysinline {
147
149
entry:
148
- %0 = load atomic volatile i64 , i64 addrspace ( 3 )* %ptr seq_cst , align 8
149
- ret i64 %0
150
+ tail call void @llvm.trap ()
151
+ unreachable
150
152
}
151
153
152
154
define void @__clc__atomic_store_global_4_unordered (i32 addrspace (1 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
@@ -199,96 +201,96 @@ entry:
199
201
200
202
define void @__clc__atomic_store_global_4_release (i32 addrspace (1 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
201
203
entry:
202
- store atomic volatile i32 %value , i32 addrspace ( 1 )* %ptr release , align 4
203
- ret void
204
+ tail call void @llvm.trap ()
205
+ unreachable
204
206
}
205
207
206
208
define void @__clc__atomic_store_local_4_release (i32 addrspace (3 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
207
209
entry:
208
- store atomic volatile i32 %value , i32 addrspace ( 3 )* %ptr release , align 4
209
- ret void
210
+ tail call void @llvm.trap ()
211
+ unreachable
210
212
}
211
213
212
214
define void @__clc__atomic_store_global_8_release (i64 addrspace (1 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
213
215
entry:
214
- store atomic volatile i64 %value , i64 addrspace ( 1 )* %ptr release , align 8
215
- ret void
216
+ tail call void @llvm.trap ()
217
+ unreachable
216
218
}
217
219
218
220
define void @__clc__atomic_store_local_8_release (i64 addrspace (3 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
219
221
entry:
220
- store atomic volatile i64 %value , i64 addrspace ( 3 )* %ptr release , align 8
221
- ret void
222
+ tail call void @llvm.trap ()
223
+ unreachable
222
224
}
223
225
224
226
define void @__clc__atomic_ustore_global_4_release (i32 addrspace (1 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
225
227
entry:
226
- store atomic volatile i32 %value , i32 addrspace ( 1 )* %ptr release , align 4
227
- ret void
228
+ tail call void @llvm.trap ()
229
+ unreachable
228
230
}
229
231
230
232
define void @__clc__atomic_ustore_local_4_release (i32 addrspace (3 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
231
233
entry:
232
- store atomic volatile i32 %value , i32 addrspace ( 3 )* %ptr release , align 4
233
- ret void
234
+ tail call void @llvm.trap ()
235
+ unreachable
234
236
}
235
237
236
238
define void @__clc__atomic_ustore_global_8_release (i64 addrspace (1 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
237
239
entry:
238
- store atomic volatile i64 %value , i64 addrspace ( 1 )* %ptr release , align 8
239
- ret void
240
+ tail call void @llvm.trap ()
241
+ unreachable
240
242
}
241
243
242
244
define void @__clc__atomic_ustore_local_8_release (i64 addrspace (3 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
243
245
entry:
244
- store atomic volatile i64 %value , i64 addrspace ( 3 )* %ptr release , align 8
245
- ret void
246
+ tail call void @llvm.trap ()
247
+ unreachable
246
248
}
247
249
248
250
define void @__clc__atomic_store_global_4_seq_cst (i32 addrspace (1 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
249
251
entry:
250
- store atomic volatile i32 %value , i32 addrspace ( 1 )* %ptr seq_cst , align 4
251
- ret void
252
+ tail call void @llvm.trap ()
253
+ unreachable
252
254
}
253
255
254
256
define void @__clc__atomic_store_local_4_seq_cst (i32 addrspace (3 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
255
257
entry:
256
- store atomic volatile i32 %value , i32 addrspace ( 3 )* %ptr seq_cst , align 4
257
- ret void
258
+ tail call void @llvm.trap ()
259
+ unreachable
258
260
}
259
261
260
262
define void @__clc__atomic_store_global_8_seq_cst (i64 addrspace (1 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
261
263
entry:
262
- store atomic volatile i64 %value , i64 addrspace ( 1 )* %ptr seq_cst , align 8
263
- ret void
264
+ tail call void @llvm.trap ()
265
+ unreachable
264
266
}
265
267
266
268
define void @__clc__atomic_store_local_8_seq_cst (i64 addrspace (3 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
267
269
entry:
268
- store atomic volatile i64 %value , i64 addrspace ( 3 )* %ptr seq_cst , align 8
269
- ret void
270
+ tail call void @llvm.trap ()
271
+ unreachable
270
272
}
271
273
272
274
define void @__clc__atomic_ustore_global_4_seq_cst (i32 addrspace (1 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
273
275
entry:
274
- store atomic volatile i32 %value , i32 addrspace ( 1 )* %ptr seq_cst , align 4
275
- ret void
276
+ tail call void @llvm.trap ()
277
+ unreachable
276
278
}
277
279
278
280
define void @__clc__atomic_ustore_local_4_seq_cst (i32 addrspace (3 )* nocapture %ptr , i32 %value ) nounwind alwaysinline {
279
281
entry:
280
- store atomic volatile i32 %value , i32 addrspace ( 3 )* %ptr seq_cst , align 4
281
- ret void
282
+ tail call void @llvm.trap ()
283
+ unreachable
282
284
}
283
285
284
286
define void @__clc__atomic_ustore_global_8_seq_cst (i64 addrspace (1 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
285
287
entry:
286
- store atomic volatile i64 %value , i64 addrspace ( 1 )* %ptr seq_cst , align 8
287
- ret void
288
+ tail call void @llvm.trap ()
289
+ unreachable
288
290
}
289
291
290
292
define void @__clc__atomic_ustore_local_8_seq_cst (i64 addrspace (3 )* nocapture %ptr , i64 %value ) nounwind alwaysinline {
291
293
entry:
292
- store atomic volatile i64 %value , i64 addrspace ( 3 )* %ptr seq_cst , align 8
293
- ret void
294
+ tail call void @llvm.trap ()
295
+ unreachable
294
296
}
0 commit comments