QUDA  v0.7.0
A library for QCD on GPUs
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
tmc_dslash_dagger_g80_core.h
Go to the documentation of this file.
1 // *** CUDA DSLASH DAGGER ***
2 
3 #define DSLASH_SHARED_FLOATS_PER_THREAD 19
4 
5 
6 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler
7 #define VOLATILE
8 #else // Open64 compiler
9 #define VOLATILE volatile
10 #endif
11 // input spinor
12 #ifdef SPINOR_DOUBLE
13 #define spinorFloat double
14 #define i00_re I0.x
15 #define i00_im I0.y
16 #define i01_re I1.x
17 #define i01_im I1.y
18 #define i02_re I2.x
19 #define i02_im I2.y
20 #define i10_re I3.x
21 #define i10_im I3.y
22 #define i11_re I4.x
23 #define i11_im I4.y
24 #define i12_re I5.x
25 #define i12_im I5.y
26 #define i20_re I6.x
27 #define i20_im I6.y
28 #define i21_re I7.x
29 #define i21_im I7.y
30 #define i22_re I8.x
31 #define i22_im I8.y
32 #define i30_re I9.x
33 #define i30_im I9.y
34 #define i31_re I10.x
35 #define i31_im I10.y
36 #define i32_re I11.x
37 #define i32_im I11.y
38 #define acc00_re accum0.x
39 #define acc00_im accum0.y
40 #define acc01_re accum1.x
41 #define acc01_im accum1.y
42 #define acc02_re accum2.x
43 #define acc02_im accum2.y
44 #define acc10_re accum3.x
45 #define acc10_im accum3.y
46 #define acc11_re accum4.x
47 #define acc11_im accum4.y
48 #define acc12_re accum5.x
49 #define acc12_im accum5.y
50 #define acc20_re accum6.x
51 #define acc20_im accum6.y
52 #define acc21_re accum7.x
53 #define acc21_im accum7.y
54 #define acc22_re accum8.x
55 #define acc22_im accum8.y
56 #define acc30_re accum9.x
57 #define acc30_im accum9.y
58 #define acc31_re accum10.x
59 #define acc31_im accum10.y
60 #define acc32_re accum11.x
61 #define acc32_im accum11.y
62 #else
63 #define spinorFloat float
64 #define i00_re I0.x
65 #define i00_im I0.y
66 #define i01_re I0.z
67 #define i01_im I0.w
68 #define i02_re I1.x
69 #define i02_im I1.y
70 #define i10_re I1.z
71 #define i10_im I1.w
72 #define i11_re I2.x
73 #define i11_im I2.y
74 #define i12_re I2.z
75 #define i12_im I2.w
76 #define i20_re I3.x
77 #define i20_im I3.y
78 #define i21_re I3.z
79 #define i21_im I3.w
80 #define i22_re I4.x
81 #define i22_im I4.y
82 #define i30_re I4.z
83 #define i30_im I4.w
84 #define i31_re I5.x
85 #define i31_im I5.y
86 #define i32_re I5.z
87 #define i32_im I5.w
88 #define acc00_re accum0.x
89 #define acc00_im accum0.y
90 #define acc01_re accum0.z
91 #define acc01_im accum0.w
92 #define acc02_re accum1.x
93 #define acc02_im accum1.y
94 #define acc10_re accum1.z
95 #define acc10_im accum1.w
96 #define acc11_re accum2.x
97 #define acc11_im accum2.y
98 #define acc12_re accum2.z
99 #define acc12_im accum2.w
100 #define acc20_re accum3.x
101 #define acc20_im accum3.y
102 #define acc21_re accum3.z
103 #define acc21_im accum3.w
104 #define acc22_re accum4.x
105 #define acc22_im accum4.y
106 #define acc30_re accum4.z
107 #define acc30_im accum4.w
108 #define acc31_re accum5.x
109 #define acc31_im accum5.y
110 #define acc32_re accum5.z
111 #define acc32_im accum5.w
112 #endif // SPINOR_DOUBLE
113 
114 // gauge link
115 #ifdef GAUGE_FLOAT2
116 #define g00_re G0.x
117 #define g00_im G0.y
118 #define g01_re G1.x
119 #define g01_im G1.y
120 #define g02_re G2.x
121 #define g02_im G2.y
122 #define g10_re G3.x
123 #define g10_im G3.y
124 #define g11_re G4.x
125 #define g11_im G4.y
126 #define g12_re G5.x
127 #define g12_im G5.y
128 #define g20_re G6.x
129 #define g20_im G6.y
130 #define g21_re G7.x
131 #define g21_im G7.y
132 #define g22_re G8.x
133 #define g22_im G8.y
134 
135 #else
136 #define g00_re G0.x
137 #define g00_im G0.y
138 #define g01_re G0.z
139 #define g01_im G0.w
140 #define g02_re G1.x
141 #define g02_im G1.y
142 #define g10_re G1.z
143 #define g10_im G1.w
144 #define g11_re G2.x
145 #define g11_im G2.y
146 #define g12_re G2.z
147 #define g12_im G2.w
148 #define g20_re G3.x
149 #define g20_im G3.y
150 #define g21_re G3.z
151 #define g21_im G3.w
152 #define g22_re G4.x
153 #define g22_im G4.y
154 
155 #endif // GAUGE_DOUBLE
156 
157 // conjugated gauge link
158 #define gT00_re (+g00_re)
159 #define gT00_im (-g00_im)
160 #define gT01_re (+g10_re)
161 #define gT01_im (-g10_im)
162 #define gT02_re (+g20_re)
163 #define gT02_im (-g20_im)
164 #define gT10_re (+g01_re)
165 #define gT10_im (-g01_im)
166 #define gT11_re (+g11_re)
167 #define gT11_im (-g11_im)
168 #define gT12_re (+g21_re)
169 #define gT12_im (-g21_im)
170 #define gT20_re (+g02_re)
171 #define gT20_im (-g02_im)
172 #define gT21_re (+g12_re)
173 #define gT21_im (-g12_im)
174 #define gT22_re (+g22_re)
175 #define gT22_im (-g22_im)
176 
177 // first chiral block of clover term
178 #ifdef CLOVER_DOUBLE
179 #define c00_00_re C0.x
180 #define c01_01_re C0.y
181 #define c02_02_re C1.x
182 #define c10_10_re C1.y
183 #define c11_11_re C2.x
184 #define c12_12_re C2.y
185 #define c01_00_re C3.x
186 #define c01_00_im C3.y
187 #define c02_00_re C4.x
188 #define c02_00_im C4.y
189 #define c10_00_re C5.x
190 #define c10_00_im C5.y
191 #define c11_00_re C6.x
192 #define c11_00_im C6.y
193 #define c12_00_re C7.x
194 #define c12_00_im C7.y
195 #define c02_01_re C8.x
196 #define c02_01_im C8.y
197 #define c10_01_re C9.x
198 #define c10_01_im C9.y
199 #define c11_01_re C10.x
200 #define c11_01_im C10.y
201 #define c12_01_re C11.x
202 #define c12_01_im C11.y
203 #define c10_02_re C12.x
204 #define c10_02_im C12.y
205 #define c11_02_re C13.x
206 #define c11_02_im C13.y
207 #define c12_02_re C14.x
208 #define c12_02_im C14.y
209 #define c11_10_re C15.x
210 #define c11_10_im C15.y
211 #define c12_10_re C16.x
212 #define c12_10_im C16.y
213 #define c12_11_re C17.x
214 #define c12_11_im C17.y
215 #else
216 #define c00_00_re C0.x
217 #define c01_01_re C0.y
218 #define c02_02_re C0.z
219 #define c10_10_re C0.w
220 #define c11_11_re C1.x
221 #define c12_12_re C1.y
222 #define c01_00_re C1.z
223 #define c01_00_im C1.w
224 #define c02_00_re C2.x
225 #define c02_00_im C2.y
226 #define c10_00_re C2.z
227 #define c10_00_im C2.w
228 #define c11_00_re C3.x
229 #define c11_00_im C3.y
230 #define c12_00_re C3.z
231 #define c12_00_im C3.w
232 #define c02_01_re C4.x
233 #define c02_01_im C4.y
234 #define c10_01_re C4.z
235 #define c10_01_im C4.w
236 #define c11_01_re C5.x
237 #define c11_01_im C5.y
238 #define c12_01_re C5.z
239 #define c12_01_im C5.w
240 #define c10_02_re C6.x
241 #define c10_02_im C6.y
242 #define c11_02_re C6.z
243 #define c11_02_im C6.w
244 #define c12_02_re C7.x
245 #define c12_02_im C7.y
246 #define c11_10_re C7.z
247 #define c11_10_im C7.w
248 #define c12_10_re C8.x
249 #define c12_10_im C8.y
250 #define c12_11_re C8.z
251 #define c12_11_im C8.w
252 #endif // CLOVER_DOUBLE
253 
254 #define c00_01_re (+c01_00_re)
255 #define c00_01_im (-c01_00_im)
256 #define c00_02_re (+c02_00_re)
257 #define c00_02_im (-c02_00_im)
258 #define c01_02_re (+c02_01_re)
259 #define c01_02_im (-c02_01_im)
260 #define c00_10_re (+c10_00_re)
261 #define c00_10_im (-c10_00_im)
262 #define c01_10_re (+c10_01_re)
263 #define c01_10_im (-c10_01_im)
264 #define c02_10_re (+c10_02_re)
265 #define c02_10_im (-c10_02_im)
266 #define c00_11_re (+c11_00_re)
267 #define c00_11_im (-c11_00_im)
268 #define c01_11_re (+c11_01_re)
269 #define c01_11_im (-c11_01_im)
270 #define c02_11_re (+c11_02_re)
271 #define c02_11_im (-c11_02_im)
272 #define c10_11_re (+c11_10_re)
273 #define c10_11_im (-c11_10_im)
274 #define c00_12_re (+c12_00_re)
275 #define c00_12_im (-c12_00_im)
276 #define c01_12_re (+c12_01_re)
277 #define c01_12_im (-c12_01_im)
278 #define c02_12_re (+c12_02_re)
279 #define c02_12_im (-c12_02_im)
280 #define c10_12_re (+c12_10_re)
281 #define c10_12_im (-c12_10_im)
282 #define c11_12_re (+c12_11_re)
283 #define c11_12_im (-c12_11_im)
284 
285 // second chiral block of clover term (reuses C0,...,C9)
286 #define c20_20_re c00_00_re
287 #define c21_20_re c01_00_re
288 #define c21_20_im c01_00_im
289 #define c22_20_re c02_00_re
290 #define c22_20_im c02_00_im
291 #define c30_20_re c10_00_re
292 #define c30_20_im c10_00_im
293 #define c31_20_re c11_00_re
294 #define c31_20_im c11_00_im
295 #define c32_20_re c12_00_re
296 #define c32_20_im c12_00_im
297 #define c20_21_re c00_01_re
298 #define c20_21_im c00_01_im
299 #define c21_21_re c01_01_re
300 #define c22_21_re c02_01_re
301 #define c22_21_im c02_01_im
302 #define c30_21_re c10_01_re
303 #define c30_21_im c10_01_im
304 #define c31_21_re c11_01_re
305 #define c31_21_im c11_01_im
306 #define c32_21_re c12_01_re
307 #define c32_21_im c12_01_im
308 #define c20_22_re c00_02_re
309 #define c20_22_im c00_02_im
310 #define c21_22_re c01_02_re
311 #define c21_22_im c01_02_im
312 #define c22_22_re c02_02_re
313 #define c30_22_re c10_02_re
314 #define c30_22_im c10_02_im
315 #define c31_22_re c11_02_re
316 #define c31_22_im c11_02_im
317 #define c32_22_re c12_02_re
318 #define c32_22_im c12_02_im
319 #define c20_30_re c00_10_re
320 #define c20_30_im c00_10_im
321 #define c21_30_re c01_10_re
322 #define c21_30_im c01_10_im
323 #define c22_30_re c02_10_re
324 #define c22_30_im c02_10_im
325 #define c30_30_re c10_10_re
326 #define c31_30_re c11_10_re
327 #define c31_30_im c11_10_im
328 #define c32_30_re c12_10_re
329 #define c32_30_im c12_10_im
330 #define c20_31_re c00_11_re
331 #define c20_31_im c00_11_im
332 #define c21_31_re c01_11_re
333 #define c21_31_im c01_11_im
334 #define c22_31_re c02_11_re
335 #define c22_31_im c02_11_im
336 #define c30_31_re c10_11_re
337 #define c30_31_im c10_11_im
338 #define c31_31_re c11_11_re
339 #define c32_31_re c12_11_re
340 #define c32_31_im c12_11_im
341 #define c20_32_re c00_12_re
342 #define c20_32_im c00_12_im
343 #define c21_32_re c01_12_re
344 #define c21_32_im c01_12_im
345 #define c22_32_re c02_12_re
346 #define c22_32_im c02_12_im
347 #define c30_32_re c10_12_re
348 #define c30_32_im c10_12_im
349 #define c31_32_re c11_12_re
350 #define c31_32_im c11_12_im
351 #define c32_32_re c12_12_re
352 
353 
354 // first chiral block of inverted clover term
355 #ifdef CLOVER_DOUBLE
356 #define cinv00_00_re C0.x
357 #define cinv01_01_re C0.y
358 #define cinv02_02_re C1.x
359 #define cinv10_10_re C1.y
360 #define cinv11_11_re C2.x
361 #define cinv12_12_re C2.y
362 #define cinv01_00_re C3.x
363 #define cinv01_00_im C3.y
364 #define cinv02_00_re C4.x
365 #define cinv02_00_im C4.y
366 #define cinv10_00_re C5.x
367 #define cinv10_00_im C5.y
368 #define cinv11_00_re C6.x
369 #define cinv11_00_im C6.y
370 #define cinv12_00_re C7.x
371 #define cinv12_00_im C7.y
372 #define cinv02_01_re C8.x
373 #define cinv02_01_im C8.y
374 #define cinv10_01_re C9.x
375 #define cinv10_01_im C9.y
376 #define cinv11_01_re C10.x
377 #define cinv11_01_im C10.y
378 #define cinv12_01_re C11.x
379 #define cinv12_01_im C11.y
380 #define cinv10_02_re C12.x
381 #define cinv10_02_im C12.y
382 #define cinv11_02_re C13.x
383 #define cinv11_02_im C13.y
384 #define cinv12_02_re C14.x
385 #define cinv12_02_im C14.y
386 #define cinv11_10_re C15.x
387 #define cinv11_10_im C15.y
388 #define cinv12_10_re C16.x
389 #define cinv12_10_im C16.y
390 #define cinv12_11_re C17.x
391 #define cinv12_11_im C17.y
392 #else
393 #define cinv00_00_re C0.x
394 #define cinv01_01_re C0.y
395 #define cinv02_02_re C0.z
396 #define cinv10_10_re C0.w
397 #define cinv11_11_re C1.x
398 #define cinv12_12_re C1.y
399 #define cinv01_00_re C1.z
400 #define cinv01_00_im C1.w
401 #define cinv02_00_re C2.x
402 #define cinv02_00_im C2.y
403 #define cinv10_00_re C2.z
404 #define cinv10_00_im C2.w
405 #define cinv11_00_re C3.x
406 #define cinv11_00_im C3.y
407 #define cinv12_00_re C3.z
408 #define cinv12_00_im C3.w
409 #define cinv02_01_re C4.x
410 #define cinv02_01_im C4.y
411 #define cinv10_01_re C4.z
412 #define cinv10_01_im C4.w
413 #define cinv11_01_re C5.x
414 #define cinv11_01_im C5.y
415 #define cinv12_01_re C5.z
416 #define cinv12_01_im C5.w
417 #define cinv10_02_re C6.x
418 #define cinv10_02_im C6.y
419 #define cinv11_02_re C6.z
420 #define cinv11_02_im C6.w
421 #define cinv12_02_re C7.x
422 #define cinv12_02_im C7.y
423 #define cinv11_10_re C7.z
424 #define cinv11_10_im C7.w
425 #define cinv12_10_re C8.x
426 #define cinv12_10_im C8.y
427 #define cinv12_11_re C8.z
428 #define cinv12_11_im C8.w
429 #endif // CLOVER_DOUBLE
430 
431 #define cinv00_01_re (+cinv01_00_re)
432 #define cinv00_01_im (-cinv01_00_im)
433 #define cinv00_02_re (+cinv02_00_re)
434 #define cinv00_02_im (-cinv02_00_im)
435 #define cinv01_02_re (+cinv02_01_re)
436 #define cinv01_02_im (-cinv02_01_im)
437 #define cinv00_10_re (+cinv10_00_re)
438 #define cinv00_10_im (-cinv10_00_im)
439 #define cinv01_10_re (+cinv10_01_re)
440 #define cinv01_10_im (-cinv10_01_im)
441 #define cinv02_10_re (+cinv10_02_re)
442 #define cinv02_10_im (-cinv10_02_im)
443 #define cinv00_11_re (+cinv11_00_re)
444 #define cinv00_11_im (-cinv11_00_im)
445 #define cinv01_11_re (+cinv11_01_re)
446 #define cinv01_11_im (-cinv11_01_im)
447 #define cinv02_11_re (+cinv11_02_re)
448 #define cinv02_11_im (-cinv11_02_im)
449 #define cinv10_11_re (+cinv11_10_re)
450 #define cinv10_11_im (-cinv11_10_im)
451 #define cinv00_12_re (+cinv12_00_re)
452 #define cinv00_12_im (-cinv12_00_im)
453 #define cinv01_12_re (+cinv12_01_re)
454 #define cinv01_12_im (-cinv12_01_im)
455 #define cinv02_12_re (+cinv12_02_re)
456 #define cinv02_12_im (-cinv12_02_im)
457 #define cinv10_12_re (+cinv12_10_re)
458 #define cinv10_12_im (-cinv12_10_im)
459 #define cinv11_12_re (+cinv12_11_re)
460 #define cinv11_12_im (-cinv12_11_im)
461 
462 // second chiral block of inverted clover term (reuses C0,...,C9)
463 #define cinv20_20_re cinv00_00_re
464 #define cinv21_20_re cinv01_00_re
465 #define cinv21_20_im cinv01_00_im
466 #define cinv22_20_re cinv02_00_re
467 #define cinv22_20_im cinv02_00_im
468 #define cinv30_20_re cinv10_00_re
469 #define cinv30_20_im cinv10_00_im
470 #define cinv31_20_re cinv11_00_re
471 #define cinv31_20_im cinv11_00_im
472 #define cinv32_20_re cinv12_00_re
473 #define cinv32_20_im cinv12_00_im
474 #define cinv20_21_re cinv00_01_re
475 #define cinv20_21_im cinv00_01_im
476 #define cinv21_21_re cinv01_01_re
477 #define cinv22_21_re cinv02_01_re
478 #define cinv22_21_im cinv02_01_im
479 #define cinv30_21_re cinv10_01_re
480 #define cinv30_21_im cinv10_01_im
481 #define cinv31_21_re cinv11_01_re
482 #define cinv31_21_im cinv11_01_im
483 #define cinv32_21_re cinv12_01_re
484 #define cinv32_21_im cinv12_01_im
485 #define cinv20_22_re cinv00_02_re
486 #define cinv20_22_im cinv00_02_im
487 #define cinv21_22_re cinv01_02_re
488 #define cinv21_22_im cinv01_02_im
489 #define cinv22_22_re cinv02_02_re
490 #define cinv30_22_re cinv10_02_re
491 #define cinv30_22_im cinv10_02_im
492 #define cinv31_22_re cinv11_02_re
493 #define cinv31_22_im cinv11_02_im
494 #define cinv32_22_re cinv12_02_re
495 #define cinv32_22_im cinv12_02_im
496 #define cinv20_30_re cinv00_10_re
497 #define cinv20_30_im cinv00_10_im
498 #define cinv21_30_re cinv01_10_re
499 #define cinv21_30_im cinv01_10_im
500 #define cinv22_30_re cinv02_10_re
501 #define cinv22_30_im cinv02_10_im
502 #define cinv30_30_re cinv10_10_re
503 #define cinv31_30_re cinv11_10_re
504 #define cinv31_30_im cinv11_10_im
505 #define cinv32_30_re cinv12_10_re
506 #define cinv32_30_im cinv12_10_im
507 #define cinv20_31_re cinv00_11_re
508 #define cinv20_31_im cinv00_11_im
509 #define cinv21_31_re cinv01_11_re
510 #define cinv21_31_im cinv01_11_im
511 #define cinv22_31_re cinv02_11_re
512 #define cinv22_31_im cinv02_11_im
513 #define cinv30_31_re cinv10_11_re
514 #define cinv30_31_im cinv10_11_im
515 #define cinv31_31_re cinv11_11_re
516 #define cinv32_31_re cinv12_11_re
517 #define cinv32_31_im cinv12_11_im
518 #define cinv20_32_re cinv00_12_re
519 #define cinv20_32_im cinv00_12_im
520 #define cinv21_32_re cinv01_12_re
521 #define cinv21_32_im cinv01_12_im
522 #define cinv22_32_re cinv02_12_re
523 #define cinv22_32_im cinv02_12_im
524 #define cinv30_32_re cinv10_12_re
525 #define cinv30_32_im cinv10_12_im
526 #define cinv31_32_re cinv11_12_re
527 #define cinv31_32_im cinv11_12_im
528 #define cinv32_32_re cinv12_12_re
529 
530 
531 #ifndef CLOVER_TWIST_INV_DSLASH
532 
533 // declare C## here and use ASSN below instead of READ
534 #ifdef CLOVER_DOUBLE
535 double2 C0;
536 double2 C1;
537 double2 C2;
538 double2 C3;
539 double2 C4;
540 double2 C5;
541 double2 C6;
542 double2 C7;
543 double2 C8;
544 double2 C9;
545 double2 C10;
546 double2 C11;
547 double2 C12;
548 double2 C13;
549 double2 C14;
550 double2 C15;
551 double2 C16;
552 double2 C17;
553 #else
554 float4 C0;
555 float4 C1;
556 float4 C2;
557 float4 C3;
558 float4 C4;
559 float4 C5;
560 float4 C6;
561 float4 C7;
562 float4 C8;
563 
564 #if (DD_PREC==2)
565 float K;
566 #endif
567 
568 #endif // CLOVER_DOUBLE
569 #endif
570 
571 // output spinor
572 #define o00_re s[0*SHARED_STRIDE]
573 #define o00_im s[1*SHARED_STRIDE]
574 #define o01_re s[2*SHARED_STRIDE]
575 #define o01_im s[3*SHARED_STRIDE]
576 #define o02_re s[4*SHARED_STRIDE]
577 #define o02_im s[5*SHARED_STRIDE]
578 #define o10_re s[6*SHARED_STRIDE]
579 #define o10_im s[7*SHARED_STRIDE]
580 #define o11_re s[8*SHARED_STRIDE]
581 #define o11_im s[9*SHARED_STRIDE]
582 #define o12_re s[10*SHARED_STRIDE]
583 #define o12_im s[11*SHARED_STRIDE]
584 #define o20_re s[12*SHARED_STRIDE]
585 #define o20_im s[13*SHARED_STRIDE]
586 #define o21_re s[14*SHARED_STRIDE]
587 #define o21_im s[15*SHARED_STRIDE]
588 #define o22_re s[16*SHARED_STRIDE]
589 #define o22_im s[17*SHARED_STRIDE]
590 #define o30_re s[18*SHARED_STRIDE]
596 
597 #ifdef SPINOR_DOUBLE
598 #define SHARED_STRIDE 8 // to avoid bank conflicts on G80 and GT200
599 #else
600 #define SHARED_STRIDE 16 // to avoid bank conflicts on G80 and GT200
601 #endif
602 
603 extern __shared__ char s_data[];
604 
606  + (threadIdx.x % SHARED_STRIDE);
607 
608 #include "read_gauge.h"
609 #include "io_spinor.h"
610 #include "read_clover.h"
611 #include "tmc_core.h"
612 
613 int x1, x2, x3, x4;
614 int X;
615 
616 #if (defined MULTI_GPU) && (DD_PREC==2) // half precision
617 int sp_norm_idx;
618 #endif // MULTI_GPU half precision
619 
620 int sid;
621 
622 #ifdef MULTI_GPU
623 int face_idx;
625 #endif
626 
627  sid = blockIdx.x*blockDim.x + threadIdx.x;
628  if (sid >= param.threads) return;
629 
630  // Inline by hand for the moment and assume even dimensions
631  const int dims[] = {X1, X2, X3, X4};
632  coordsFromIndex<EVEN_X>(X, x1, x2, x3, x4, sid, param.parity, dims);
633 
634  o00_re = 0; o00_im = 0;
635  o01_re = 0; o01_im = 0;
636  o02_re = 0; o02_im = 0;
637  o10_re = 0; o10_im = 0;
638  o11_re = 0; o11_im = 0;
639  o12_re = 0; o12_im = 0;
640  o20_re = 0; o20_im = 0;
641  o21_re = 0; o21_im = 0;
642  o22_re = 0; o22_im = 0;
643  o30_re = 0; o30_im = 0;
644  o31_re = 0; o31_im = 0;
645  o32_re = 0; o32_im = 0;
646 
647 #ifdef MULTI_GPU
648 } else { // exterior kernel
649 
650  sid = blockIdx.x*blockDim.x + threadIdx.x;
651  if (sid >= param.threads) return;
652 
653  const int dim = static_cast<int>(kernel_type);
654  const int face_volume = (param.threads >> 1); // volume of one face
655  const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1
656  face_idx = sid - face_num*face_volume; // index into the respective face
657 
658  // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP)
659  // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading)
660  //sp_idx = face_idx + param.ghostOffset[dim];
661 
662 #if (DD_PREC==2) // half precision
663  sp_norm_idx = sid + param.ghostNormOffset[static_cast<int>(kernel_type)];
664 #endif
665 
666  const int dims[] = {X1, X2, X3, X4};
667  coordsFromFaceIndex<1>(X, sid, x1, x2, x3, x4, face_idx, face_volume, dim, face_num, param.parity, dims);
668 
669  READ_INTERMEDIATE_SPINOR(INTERTEX, param.sp_stride, sid, sid);
670 
671  o00_re = i00_re; o00_im = i00_im;
672  o01_re = i01_re; o01_im = i01_im;
673  o02_re = i02_re; o02_im = i02_im;
674  o10_re = i10_re; o10_im = i10_im;
675  o11_re = i11_re; o11_im = i11_im;
676  o12_re = i12_re; o12_im = i12_im;
677  o20_re = i20_re; o20_im = i20_im;
678  o21_re = i21_re; o21_im = i21_im;
679  o22_re = i22_re; o22_im = i22_im;
680  o30_re = i30_re; o30_im = i30_im;
681  o31_re = i31_re; o31_im = i31_im;
682  o32_re = i32_re; o32_im = i32_im;
683 }
684 #endif // MULTI_GPU
685 
686 
687 #ifdef MULTI_GPU
688 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || x1<X1m1)) ||
689  (kernel_type == EXTERIOR_KERNEL_X && x1==X1m1) )
690 #endif
691 {
692  // Projector P0+
693  // 1 0 0 i
694  // 0 1 i 0
695  // 0 -i 1 0
696  // -i 0 0 1
697 
698 #ifdef MULTI_GPU
699  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x1==X1m1 ? X-X1m1 : X+1) >> 1 :
700  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
701 #else
702  const int sp_idx = (x1==X1m1 ? X-X1m1 : X+1) >> 1;
703 #endif
704 
705  const int ga_idx = sid;
706 
713 
714 #ifdef MULTI_GPU
715  if (kernel_type == INTERIOR_KERNEL) {
716 #endif
717 
718  // read spinor from device memory
719  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
720 
721  // project spinor into half spinors
722  a0_re = +i00_re-i30_im;
723  a0_im = +i00_im+i30_re;
724  a1_re = +i01_re-i31_im;
725  a1_im = +i01_im+i31_re;
726  a2_re = +i02_re-i32_im;
727  a2_im = +i02_im+i32_re;
728  b0_re = +i10_re-i20_im;
729  b0_im = +i10_im+i20_re;
730  b1_re = +i11_re-i21_im;
731  b1_im = +i11_im+i21_re;
732  b2_re = +i12_re-i22_im;
733  b2_im = +i12_im+i22_re;
734 
735 #ifdef MULTI_GPU
736  } else {
737 
738  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
739 
740  // read half spinor from device memory
741  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
742 
743  a0_re = i00_re; a0_im = i00_im;
744  a1_re = i01_re; a1_im = i01_im;
745  a2_re = i02_re; a2_im = i02_im;
746  b0_re = i10_re; b0_im = i10_im;
747  b1_re = i11_re; b1_im = i11_im;
748  b2_re = i12_re; b2_im = i12_im;
749 
750  }
751 #endif // MULTI_GPU
752 
753  // read gauge matrix from device memory
754  READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, ga_stride);
755 
756  // reconstruct gauge matrix
758 
759  // multiply row 0
761  A0_re += g00_re * a0_re;
762  A0_re -= g00_im * a0_im;
763  A0_re += g01_re * a1_re;
764  A0_re -= g01_im * a1_im;
765  A0_re += g02_re * a2_re;
766  A0_re -= g02_im * a2_im;
768  A0_im += g00_re * a0_im;
769  A0_im += g00_im * a0_re;
770  A0_im += g01_re * a1_im;
771  A0_im += g01_im * a1_re;
772  A0_im += g02_re * a2_im;
773  A0_im += g02_im * a2_re;
775  B0_re += g00_re * b0_re;
776  B0_re -= g00_im * b0_im;
777  B0_re += g01_re * b1_re;
778  B0_re -= g01_im * b1_im;
779  B0_re += g02_re * b2_re;
780  B0_re -= g02_im * b2_im;
782  B0_im += g00_re * b0_im;
783  B0_im += g00_im * b0_re;
784  B0_im += g01_re * b1_im;
785  B0_im += g01_im * b1_re;
786  B0_im += g02_re * b2_im;
787  B0_im += g02_im * b2_re;
788 
789  // multiply row 1
791  A1_re += g10_re * a0_re;
792  A1_re -= g10_im * a0_im;
793  A1_re += g11_re * a1_re;
794  A1_re -= g11_im * a1_im;
795  A1_re += g12_re * a2_re;
796  A1_re -= g12_im * a2_im;
798  A1_im += g10_re * a0_im;
799  A1_im += g10_im * a0_re;
800  A1_im += g11_re * a1_im;
801  A1_im += g11_im * a1_re;
802  A1_im += g12_re * a2_im;
803  A1_im += g12_im * a2_re;
805  B1_re += g10_re * b0_re;
806  B1_re -= g10_im * b0_im;
807  B1_re += g11_re * b1_re;
808  B1_re -= g11_im * b1_im;
809  B1_re += g12_re * b2_re;
810  B1_re -= g12_im * b2_im;
812  B1_im += g10_re * b0_im;
813  B1_im += g10_im * b0_re;
814  B1_im += g11_re * b1_im;
815  B1_im += g11_im * b1_re;
816  B1_im += g12_re * b2_im;
817  B1_im += g12_im * b2_re;
818 
819  // multiply row 2
821  A2_re += g20_re * a0_re;
822  A2_re -= g20_im * a0_im;
823  A2_re += g21_re * a1_re;
824  A2_re -= g21_im * a1_im;
825  A2_re += g22_re * a2_re;
826  A2_re -= g22_im * a2_im;
828  A2_im += g20_re * a0_im;
829  A2_im += g20_im * a0_re;
830  A2_im += g21_re * a1_im;
831  A2_im += g21_im * a1_re;
832  A2_im += g22_re * a2_im;
833  A2_im += g22_im * a2_re;
835  B2_re += g20_re * b0_re;
836  B2_re -= g20_im * b0_im;
837  B2_re += g21_re * b1_re;
838  B2_re -= g21_im * b1_im;
839  B2_re += g22_re * b2_re;
840  B2_re -= g22_im * b2_im;
842  B2_im += g20_re * b0_im;
843  B2_im += g20_im * b0_re;
844  B2_im += g21_re * b1_im;
845  B2_im += g21_im * b1_re;
846  B2_im += g22_re * b2_im;
847  B2_im += g22_im * b2_re;
848 
849  o00_re += A0_re;
850  o00_im += A0_im;
851  o10_re += B0_re;
852  o10_im += B0_im;
853  o20_re += B0_im;
854  o20_im -= B0_re;
855  o30_re += A0_im;
856  o30_im -= A0_re;
857 
858  o01_re += A1_re;
859  o01_im += A1_im;
860  o11_re += B1_re;
861  o11_im += B1_im;
862  o21_re += B1_im;
863  o21_im -= B1_re;
864  o31_re += A1_im;
865  o31_im -= A1_re;
866 
867  o02_re += A2_re;
868  o02_im += A2_im;
869  o12_re += B2_re;
870  o12_im += B2_im;
871  o22_re += B2_im;
872  o22_im -= B2_re;
873  o32_re += A2_im;
874  o32_im -= A2_re;
875 
876 }
877 
878 #ifdef MULTI_GPU
879 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || x1>0)) ||
880  (kernel_type == EXTERIOR_KERNEL_X && x1==0) )
881 #endif
882 {
883  // Projector P0-
884  // 1 0 0 -i
885  // 0 1 -i 0
886  // 0 i 1 0
887  // i 0 0 1
888 
889 #ifdef MULTI_GPU
890  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x1==0 ? X+X1m1 : X-1) >> 1 :
891  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
892 #else
893  const int sp_idx = (x1==0 ? X+X1m1 : X-1) >> 1;
894 #endif
895 
896 #ifdef MULTI_GPU
897  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
898 #else
899  const int ga_idx = sp_idx;
900 #endif
901 
908 
909 #ifdef MULTI_GPU
910  if (kernel_type == INTERIOR_KERNEL) {
911 #endif
912 
913  // read spinor from device memory
914  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
915 
916  // project spinor into half spinors
917  a0_re = +i00_re+i30_im;
918  a0_im = +i00_im-i30_re;
919  a1_re = +i01_re+i31_im;
920  a1_im = +i01_im-i31_re;
921  a2_re = +i02_re+i32_im;
922  a2_im = +i02_im-i32_re;
923  b0_re = +i10_re+i20_im;
924  b0_im = +i10_im-i20_re;
925  b1_re = +i11_re+i21_im;
926  b1_im = +i11_im-i21_re;
927  b2_re = +i12_re+i22_im;
928  b2_im = +i12_im-i22_re;
929 
930 #ifdef MULTI_GPU
931  } else {
932 
933  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
934 
935  // read half spinor from device memory
936  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
937 
938  a0_re = i00_re; a0_im = i00_im;
939  a1_re = i01_re; a1_im = i01_im;
940  a2_re = i02_re; a2_im = i02_im;
941  b0_re = i10_re; b0_im = i10_im;
942  b1_re = i11_re; b1_im = i11_im;
943  b2_re = i12_re; b2_im = i12_im;
944 
945  }
946 #endif // MULTI_GPU
947 
948  // read gauge matrix from device memory
949  READ_GAUGE_MATRIX(G, GAUGE1TEX, 1, ga_idx, ga_stride);
950 
951  // reconstruct gauge matrix
953 
954  // multiply row 0
955  spinorFloat A0_re = 0;
956  A0_re += gT00_re * a0_re;
957  A0_re -= gT00_im * a0_im;
958  A0_re += gT01_re * a1_re;
959  A0_re -= gT01_im * a1_im;
960  A0_re += gT02_re * a2_re;
961  A0_re -= gT02_im * a2_im;
962  spinorFloat A0_im = 0;
963  A0_im += gT00_re * a0_im;
964  A0_im += gT00_im * a0_re;
965  A0_im += gT01_re * a1_im;
966  A0_im += gT01_im * a1_re;
967  A0_im += gT02_re * a2_im;
968  A0_im += gT02_im * a2_re;
969  spinorFloat B0_re = 0;
970  B0_re += gT00_re * b0_re;
971  B0_re -= gT00_im * b0_im;
972  B0_re += gT01_re * b1_re;
973  B0_re -= gT01_im * b1_im;
974  B0_re += gT02_re * b2_re;
975  B0_re -= gT02_im * b2_im;
976  spinorFloat B0_im = 0;
977  B0_im += gT00_re * b0_im;
978  B0_im += gT00_im * b0_re;
979  B0_im += gT01_re * b1_im;
980  B0_im += gT01_im * b1_re;
981  B0_im += gT02_re * b2_im;
982  B0_im += gT02_im * b2_re;
983 
984  // multiply row 1
985  spinorFloat A1_re = 0;
986  A1_re += gT10_re * a0_re;
987  A1_re -= gT10_im * a0_im;
988  A1_re += gT11_re * a1_re;
989  A1_re -= gT11_im * a1_im;
990  A1_re += gT12_re * a2_re;
991  A1_re -= gT12_im * a2_im;
992  spinorFloat A1_im = 0;
993  A1_im += gT10_re * a0_im;
994  A1_im += gT10_im * a0_re;
995  A1_im += gT11_re * a1_im;
996  A1_im += gT11_im * a1_re;
997  A1_im += gT12_re * a2_im;
998  A1_im += gT12_im * a2_re;
999  spinorFloat B1_re = 0;
1000  B1_re += gT10_re * b0_re;
1001  B1_re -= gT10_im * b0_im;
1002  B1_re += gT11_re * b1_re;
1003  B1_re -= gT11_im * b1_im;
1004  B1_re += gT12_re * b2_re;
1005  B1_re -= gT12_im * b2_im;
1006  spinorFloat B1_im = 0;
1007  B1_im += gT10_re * b0_im;
1008  B1_im += gT10_im * b0_re;
1009  B1_im += gT11_re * b1_im;
1010  B1_im += gT11_im * b1_re;
1011  B1_im += gT12_re * b2_im;
1012  B1_im += gT12_im * b2_re;
1013 
1014  // multiply row 2
1015  spinorFloat A2_re = 0;
1016  A2_re += gT20_re * a0_re;
1017  A2_re -= gT20_im * a0_im;
1018  A2_re += gT21_re * a1_re;
1019  A2_re -= gT21_im * a1_im;
1020  A2_re += gT22_re * a2_re;
1021  A2_re -= gT22_im * a2_im;
1022  spinorFloat A2_im = 0;
1023  A2_im += gT20_re * a0_im;
1024  A2_im += gT20_im * a0_re;
1025  A2_im += gT21_re * a1_im;
1026  A2_im += gT21_im * a1_re;
1027  A2_im += gT22_re * a2_im;
1028  A2_im += gT22_im * a2_re;
1029  spinorFloat B2_re = 0;
1030  B2_re += gT20_re * b0_re;
1031  B2_re -= gT20_im * b0_im;
1032  B2_re += gT21_re * b1_re;
1033  B2_re -= gT21_im * b1_im;
1034  B2_re += gT22_re * b2_re;
1035  B2_re -= gT22_im * b2_im;
1036  spinorFloat B2_im = 0;
1037  B2_im += gT20_re * b0_im;
1038  B2_im += gT20_im * b0_re;
1039  B2_im += gT21_re * b1_im;
1040  B2_im += gT21_im * b1_re;
1041  B2_im += gT22_re * b2_im;
1042  B2_im += gT22_im * b2_re;
1043 
1044  o00_re += A0_re;
1045  o00_im += A0_im;
1046  o10_re += B0_re;
1047  o10_im += B0_im;
1048  o20_re -= B0_im;
1049  o20_im += B0_re;
1050  o30_re -= A0_im;
1051  o30_im += A0_re;
1052 
1053  o01_re += A1_re;
1054  o01_im += A1_im;
1055  o11_re += B1_re;
1056  o11_im += B1_im;
1057  o21_re -= B1_im;
1058  o21_im += B1_re;
1059  o31_re -= A1_im;
1060  o31_im += A1_re;
1061 
1062  o02_re += A2_re;
1063  o02_im += A2_im;
1064  o12_re += B2_re;
1065  o12_im += B2_im;
1066  o22_re -= B2_im;
1067  o22_im += B2_re;
1068  o32_re -= A2_im;
1069  o32_im += A2_re;
1070 
1071 }
1072 
1073 #ifdef MULTI_GPU
1074 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || x2<X2m1)) ||
1075  (kernel_type == EXTERIOR_KERNEL_Y && x2==X2m1) )
1076 #endif
1077 {
1078  // Projector P1+
1079  // 1 0 0 1
1080  // 0 1 -1 0
1081  // 0 -1 1 0
1082  // 1 0 0 1
1083 
1084 #ifdef MULTI_GPU
1085  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x2==X2m1 ? X-X2X1mX1 : X+X1) >> 1 :
1086  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1087 #else
1088  const int sp_idx = (x2==X2m1 ? X-X2X1mX1 : X+X1) >> 1;
1089 #endif
1090 
1091  const int ga_idx = sid;
1092 
1099 
1100 #ifdef MULTI_GPU
1101  if (kernel_type == INTERIOR_KERNEL) {
1102 #endif
1103 
1104  // read spinor from device memory
1105  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1106 
1107  // project spinor into half spinors
1108  a0_re = +i00_re+i30_re;
1109  a0_im = +i00_im+i30_im;
1110  a1_re = +i01_re+i31_re;
1111  a1_im = +i01_im+i31_im;
1112  a2_re = +i02_re+i32_re;
1113  a2_im = +i02_im+i32_im;
1114  b0_re = +i10_re-i20_re;
1115  b0_im = +i10_im-i20_im;
1116  b1_re = +i11_re-i21_re;
1117  b1_im = +i11_im-i21_im;
1118  b2_re = +i12_re-i22_re;
1119  b2_im = +i12_im-i22_im;
1120 
1121 #ifdef MULTI_GPU
1122  } else {
1123 
1124  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1125 
1126  // read half spinor from device memory
1127  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1128 
1129  a0_re = i00_re; a0_im = i00_im;
1130  a1_re = i01_re; a1_im = i01_im;
1131  a2_re = i02_re; a2_im = i02_im;
1132  b0_re = i10_re; b0_im = i10_im;
1133  b1_re = i11_re; b1_im = i11_im;
1134  b2_re = i12_re; b2_im = i12_im;
1135 
1136  }
1137 #endif // MULTI_GPU
1138 
1139  // read gauge matrix from device memory
1140  READ_GAUGE_MATRIX(G, GAUGE0TEX, 2, ga_idx, ga_stride);
1141 
1142  // reconstruct gauge matrix
1144 
1145  // multiply row 0
1146  spinorFloat A0_re = 0;
1147  A0_re += g00_re * a0_re;
1148  A0_re -= g00_im * a0_im;
1149  A0_re += g01_re * a1_re;
1150  A0_re -= g01_im * a1_im;
1151  A0_re += g02_re * a2_re;
1152  A0_re -= g02_im * a2_im;
1153  spinorFloat A0_im = 0;
1154  A0_im += g00_re * a0_im;
1155  A0_im += g00_im * a0_re;
1156  A0_im += g01_re * a1_im;
1157  A0_im += g01_im * a1_re;
1158  A0_im += g02_re * a2_im;
1159  A0_im += g02_im * a2_re;
1160  spinorFloat B0_re = 0;
1161  B0_re += g00_re * b0_re;
1162  B0_re -= g00_im * b0_im;
1163  B0_re += g01_re * b1_re;
1164  B0_re -= g01_im * b1_im;
1165  B0_re += g02_re * b2_re;
1166  B0_re -= g02_im * b2_im;
1167  spinorFloat B0_im = 0;
1168  B0_im += g00_re * b0_im;
1169  B0_im += g00_im * b0_re;
1170  B0_im += g01_re * b1_im;
1171  B0_im += g01_im * b1_re;
1172  B0_im += g02_re * b2_im;
1173  B0_im += g02_im * b2_re;
1174 
1175  // multiply row 1
1176  spinorFloat A1_re = 0;
1177  A1_re += g10_re * a0_re;
1178  A1_re -= g10_im * a0_im;
1179  A1_re += g11_re * a1_re;
1180  A1_re -= g11_im * a1_im;
1181  A1_re += g12_re * a2_re;
1182  A1_re -= g12_im * a2_im;
1183  spinorFloat A1_im = 0;
1184  A1_im += g10_re * a0_im;
1185  A1_im += g10_im * a0_re;
1186  A1_im += g11_re * a1_im;
1187  A1_im += g11_im * a1_re;
1188  A1_im += g12_re * a2_im;
1189  A1_im += g12_im * a2_re;
1190  spinorFloat B1_re = 0;
1191  B1_re += g10_re * b0_re;
1192  B1_re -= g10_im * b0_im;
1193  B1_re += g11_re * b1_re;
1194  B1_re -= g11_im * b1_im;
1195  B1_re += g12_re * b2_re;
1196  B1_re -= g12_im * b2_im;
1197  spinorFloat B1_im = 0;
1198  B1_im += g10_re * b0_im;
1199  B1_im += g10_im * b0_re;
1200  B1_im += g11_re * b1_im;
1201  B1_im += g11_im * b1_re;
1202  B1_im += g12_re * b2_im;
1203  B1_im += g12_im * b2_re;
1204 
1205  // multiply row 2
1206  spinorFloat A2_re = 0;
1207  A2_re += g20_re * a0_re;
1208  A2_re -= g20_im * a0_im;
1209  A2_re += g21_re * a1_re;
1210  A2_re -= g21_im * a1_im;
1211  A2_re += g22_re * a2_re;
1212  A2_re -= g22_im * a2_im;
1213  spinorFloat A2_im = 0;
1214  A2_im += g20_re * a0_im;
1215  A2_im += g20_im * a0_re;
1216  A2_im += g21_re * a1_im;
1217  A2_im += g21_im * a1_re;
1218  A2_im += g22_re * a2_im;
1219  A2_im += g22_im * a2_re;
1220  spinorFloat B2_re = 0;
1221  B2_re += g20_re * b0_re;
1222  B2_re -= g20_im * b0_im;
1223  B2_re += g21_re * b1_re;
1224  B2_re -= g21_im * b1_im;
1225  B2_re += g22_re * b2_re;
1226  B2_re -= g22_im * b2_im;
1227  spinorFloat B2_im = 0;
1228  B2_im += g20_re * b0_im;
1229  B2_im += g20_im * b0_re;
1230  B2_im += g21_re * b1_im;
1231  B2_im += g21_im * b1_re;
1232  B2_im += g22_re * b2_im;
1233  B2_im += g22_im * b2_re;
1234 
1235  o00_re += A0_re;
1236  o00_im += A0_im;
1237  o10_re += B0_re;
1238  o10_im += B0_im;
1239  o20_re -= B0_re;
1240  o20_im -= B0_im;
1241  o30_re += A0_re;
1242  o30_im += A0_im;
1243 
1244  o01_re += A1_re;
1245  o01_im += A1_im;
1246  o11_re += B1_re;
1247  o11_im += B1_im;
1248  o21_re -= B1_re;
1249  o21_im -= B1_im;
1250  o31_re += A1_re;
1251  o31_im += A1_im;
1252 
1253  o02_re += A2_re;
1254  o02_im += A2_im;
1255  o12_re += B2_re;
1256  o12_im += B2_im;
1257  o22_re -= B2_re;
1258  o22_im -= B2_im;
1259  o32_re += A2_re;
1260  o32_im += A2_im;
1261 
1262 }
1263 
1264 #ifdef MULTI_GPU
1265 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || x2>0)) ||
1266  (kernel_type == EXTERIOR_KERNEL_Y && x2==0) )
1267 #endif
1268 {
1269  // Projector P1-
1270  // 1 0 0 -1
1271  // 0 1 1 0
1272  // 0 1 1 0
1273  // -1 0 0 1
1274 
1275 #ifdef MULTI_GPU
1276  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x2==0 ? X+X2X1mX1 : X-X1) >> 1 :
1277  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1278 #else
1279  const int sp_idx = (x2==0 ? X+X2X1mX1 : X-X1) >> 1;
1280 #endif
1281 
1282 #ifdef MULTI_GPU
1283  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
1284 #else
1285  const int ga_idx = sp_idx;
1286 #endif
1287 
1294 
1295 #ifdef MULTI_GPU
1296  if (kernel_type == INTERIOR_KERNEL) {
1297 #endif
1298 
1299  // read spinor from device memory
1300  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1301 
1302  // project spinor into half spinors
1303  a0_re = +i00_re-i30_re;
1304  a0_im = +i00_im-i30_im;
1305  a1_re = +i01_re-i31_re;
1306  a1_im = +i01_im-i31_im;
1307  a2_re = +i02_re-i32_re;
1308  a2_im = +i02_im-i32_im;
1309  b0_re = +i10_re+i20_re;
1310  b0_im = +i10_im+i20_im;
1311  b1_re = +i11_re+i21_re;
1312  b1_im = +i11_im+i21_im;
1313  b2_re = +i12_re+i22_re;
1314  b2_im = +i12_im+i22_im;
1315 
1316 #ifdef MULTI_GPU
1317  } else {
1318 
1319  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1320 
1321  // read half spinor from device memory
1322  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
1323 
1324  a0_re = i00_re; a0_im = i00_im;
1325  a1_re = i01_re; a1_im = i01_im;
1326  a2_re = i02_re; a2_im = i02_im;
1327  b0_re = i10_re; b0_im = i10_im;
1328  b1_re = i11_re; b1_im = i11_im;
1329  b2_re = i12_re; b2_im = i12_im;
1330 
1331  }
1332 #endif // MULTI_GPU
1333 
1334  // read gauge matrix from device memory
1335  READ_GAUGE_MATRIX(G, GAUGE1TEX, 3, ga_idx, ga_stride);
1336 
1337  // reconstruct gauge matrix
1339 
1340  // multiply row 0
1341  spinorFloat A0_re = 0;
1342  A0_re += gT00_re * a0_re;
1343  A0_re -= gT00_im * a0_im;
1344  A0_re += gT01_re * a1_re;
1345  A0_re -= gT01_im * a1_im;
1346  A0_re += gT02_re * a2_re;
1347  A0_re -= gT02_im * a2_im;
1348  spinorFloat A0_im = 0;
1349  A0_im += gT00_re * a0_im;
1350  A0_im += gT00_im * a0_re;
1351  A0_im += gT01_re * a1_im;
1352  A0_im += gT01_im * a1_re;
1353  A0_im += gT02_re * a2_im;
1354  A0_im += gT02_im * a2_re;
1355  spinorFloat B0_re = 0;
1356  B0_re += gT00_re * b0_re;
1357  B0_re -= gT00_im * b0_im;
1358  B0_re += gT01_re * b1_re;
1359  B0_re -= gT01_im * b1_im;
1360  B0_re += gT02_re * b2_re;
1361  B0_re -= gT02_im * b2_im;
1362  spinorFloat B0_im = 0;
1363  B0_im += gT00_re * b0_im;
1364  B0_im += gT00_im * b0_re;
1365  B0_im += gT01_re * b1_im;
1366  B0_im += gT01_im * b1_re;
1367  B0_im += gT02_re * b2_im;
1368  B0_im += gT02_im * b2_re;
1369 
1370  // multiply row 1
1371  spinorFloat A1_re = 0;
1372  A1_re += gT10_re * a0_re;
1373  A1_re -= gT10_im * a0_im;
1374  A1_re += gT11_re * a1_re;
1375  A1_re -= gT11_im * a1_im;
1376  A1_re += gT12_re * a2_re;
1377  A1_re -= gT12_im * a2_im;
1378  spinorFloat A1_im = 0;
1379  A1_im += gT10_re * a0_im;
1380  A1_im += gT10_im * a0_re;
1381  A1_im += gT11_re * a1_im;
1382  A1_im += gT11_im * a1_re;
1383  A1_im += gT12_re * a2_im;
1384  A1_im += gT12_im * a2_re;
1385  spinorFloat B1_re = 0;
1386  B1_re += gT10_re * b0_re;
1387  B1_re -= gT10_im * b0_im;
1388  B1_re += gT11_re * b1_re;
1389  B1_re -= gT11_im * b1_im;
1390  B1_re += gT12_re * b2_re;
1391  B1_re -= gT12_im * b2_im;
1392  spinorFloat B1_im = 0;
1393  B1_im += gT10_re * b0_im;
1394  B1_im += gT10_im * b0_re;
1395  B1_im += gT11_re * b1_im;
1396  B1_im += gT11_im * b1_re;
1397  B1_im += gT12_re * b2_im;
1398  B1_im += gT12_im * b2_re;
1399 
1400  // multiply row 2
1401  spinorFloat A2_re = 0;
1402  A2_re += gT20_re * a0_re;
1403  A2_re -= gT20_im * a0_im;
1404  A2_re += gT21_re * a1_re;
1405  A2_re -= gT21_im * a1_im;
1406  A2_re += gT22_re * a2_re;
1407  A2_re -= gT22_im * a2_im;
1408  spinorFloat A2_im = 0;
1409  A2_im += gT20_re * a0_im;
1410  A2_im += gT20_im * a0_re;
1411  A2_im += gT21_re * a1_im;
1412  A2_im += gT21_im * a1_re;
1413  A2_im += gT22_re * a2_im;
1414  A2_im += gT22_im * a2_re;
1415  spinorFloat B2_re = 0;
1416  B2_re += gT20_re * b0_re;
1417  B2_re -= gT20_im * b0_im;
1418  B2_re += gT21_re * b1_re;
1419  B2_re -= gT21_im * b1_im;
1420  B2_re += gT22_re * b2_re;
1421  B2_re -= gT22_im * b2_im;
1422  spinorFloat B2_im = 0;
1423  B2_im += gT20_re * b0_im;
1424  B2_im += gT20_im * b0_re;
1425  B2_im += gT21_re * b1_im;
1426  B2_im += gT21_im * b1_re;
1427  B2_im += gT22_re * b2_im;
1428  B2_im += gT22_im * b2_re;
1429 
1430  o00_re += A0_re;
1431  o00_im += A0_im;
1432  o10_re += B0_re;
1433  o10_im += B0_im;
1434  o20_re += B0_re;
1435  o20_im += B0_im;
1436  o30_re -= A0_re;
1437  o30_im -= A0_im;
1438 
1439  o01_re += A1_re;
1440  o01_im += A1_im;
1441  o11_re += B1_re;
1442  o11_im += B1_im;
1443  o21_re += B1_re;
1444  o21_im += B1_im;
1445  o31_re -= A1_re;
1446  o31_im -= A1_im;
1447 
1448  o02_re += A2_re;
1449  o02_im += A2_im;
1450  o12_re += B2_re;
1451  o12_im += B2_im;
1452  o22_re += B2_re;
1453  o22_im += B2_im;
1454  o32_re -= A2_re;
1455  o32_im -= A2_im;
1456 
1457 }
1458 
1459 #ifdef MULTI_GPU
1460 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || x3<X3m1)) ||
1461  (kernel_type == EXTERIOR_KERNEL_Z && x3==X3m1) )
1462 #endif
1463 {
1464  // Projector P2+
1465  // 1 0 i 0
1466  // 0 1 0 -i
1467  // -i 0 1 0
1468  // 0 i 0 1
1469 
1470 #ifdef MULTI_GPU
1471  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x3==X3m1 ? X-X3X2X1mX2X1 : X+X2X1) >> 1 :
1472  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1473 #else
1474  const int sp_idx = (x3==X3m1 ? X-X3X2X1mX2X1 : X+X2X1) >> 1;
1475 #endif
1476 
1477  const int ga_idx = sid;
1478 
1485 
1486 #ifdef MULTI_GPU
1487  if (kernel_type == INTERIOR_KERNEL) {
1488 #endif
1489 
1490  // read spinor from device memory
1491  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1492 
1493  // project spinor into half spinors
1494  a0_re = +i00_re-i20_im;
1495  a0_im = +i00_im+i20_re;
1496  a1_re = +i01_re-i21_im;
1497  a1_im = +i01_im+i21_re;
1498  a2_re = +i02_re-i22_im;
1499  a2_im = +i02_im+i22_re;
1500  b0_re = +i10_re+i30_im;
1501  b0_im = +i10_im-i30_re;
1502  b1_re = +i11_re+i31_im;
1503  b1_im = +i11_im-i31_re;
1504  b2_re = +i12_re+i32_im;
1505  b2_im = +i12_im-i32_re;
1506 
1507 #ifdef MULTI_GPU
1508  } else {
1509 
1510  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1511 
1512  // read half spinor from device memory
1513  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1514 
1515  a0_re = i00_re; a0_im = i00_im;
1516  a1_re = i01_re; a1_im = i01_im;
1517  a2_re = i02_re; a2_im = i02_im;
1518  b0_re = i10_re; b0_im = i10_im;
1519  b1_re = i11_re; b1_im = i11_im;
1520  b2_re = i12_re; b2_im = i12_im;
1521 
1522  }
1523 #endif // MULTI_GPU
1524 
1525  // read gauge matrix from device memory
1526  READ_GAUGE_MATRIX(G, GAUGE0TEX, 4, ga_idx, ga_stride);
1527 
1528  // reconstruct gauge matrix
1530 
1531  // multiply row 0
1532  spinorFloat A0_re = 0;
1533  A0_re += g00_re * a0_re;
1534  A0_re -= g00_im * a0_im;
1535  A0_re += g01_re * a1_re;
1536  A0_re -= g01_im * a1_im;
1537  A0_re += g02_re * a2_re;
1538  A0_re -= g02_im * a2_im;
1539  spinorFloat A0_im = 0;
1540  A0_im += g00_re * a0_im;
1541  A0_im += g00_im * a0_re;
1542  A0_im += g01_re * a1_im;
1543  A0_im += g01_im * a1_re;
1544  A0_im += g02_re * a2_im;
1545  A0_im += g02_im * a2_re;
1546  spinorFloat B0_re = 0;
1547  B0_re += g00_re * b0_re;
1548  B0_re -= g00_im * b0_im;
1549  B0_re += g01_re * b1_re;
1550  B0_re -= g01_im * b1_im;
1551  B0_re += g02_re * b2_re;
1552  B0_re -= g02_im * b2_im;
1553  spinorFloat B0_im = 0;
1554  B0_im += g00_re * b0_im;
1555  B0_im += g00_im * b0_re;
1556  B0_im += g01_re * b1_im;
1557  B0_im += g01_im * b1_re;
1558  B0_im += g02_re * b2_im;
1559  B0_im += g02_im * b2_re;
1560 
1561  // multiply row 1
1562  spinorFloat A1_re = 0;
1563  A1_re += g10_re * a0_re;
1564  A1_re -= g10_im * a0_im;
1565  A1_re += g11_re * a1_re;
1566  A1_re -= g11_im * a1_im;
1567  A1_re += g12_re * a2_re;
1568  A1_re -= g12_im * a2_im;
1569  spinorFloat A1_im = 0;
1570  A1_im += g10_re * a0_im;
1571  A1_im += g10_im * a0_re;
1572  A1_im += g11_re * a1_im;
1573  A1_im += g11_im * a1_re;
1574  A1_im += g12_re * a2_im;
1575  A1_im += g12_im * a2_re;
1576  spinorFloat B1_re = 0;
1577  B1_re += g10_re * b0_re;
1578  B1_re -= g10_im * b0_im;
1579  B1_re += g11_re * b1_re;
1580  B1_re -= g11_im * b1_im;
1581  B1_re += g12_re * b2_re;
1582  B1_re -= g12_im * b2_im;
1583  spinorFloat B1_im = 0;
1584  B1_im += g10_re * b0_im;
1585  B1_im += g10_im * b0_re;
1586  B1_im += g11_re * b1_im;
1587  B1_im += g11_im * b1_re;
1588  B1_im += g12_re * b2_im;
1589  B1_im += g12_im * b2_re;
1590 
1591  // multiply row 2
1592  spinorFloat A2_re = 0;
1593  A2_re += g20_re * a0_re;
1594  A2_re -= g20_im * a0_im;
1595  A2_re += g21_re * a1_re;
1596  A2_re -= g21_im * a1_im;
1597  A2_re += g22_re * a2_re;
1598  A2_re -= g22_im * a2_im;
1599  spinorFloat A2_im = 0;
1600  A2_im += g20_re * a0_im;
1601  A2_im += g20_im * a0_re;
1602  A2_im += g21_re * a1_im;
1603  A2_im += g21_im * a1_re;
1604  A2_im += g22_re * a2_im;
1605  A2_im += g22_im * a2_re;
1606  spinorFloat B2_re = 0;
1607  B2_re += g20_re * b0_re;
1608  B2_re -= g20_im * b0_im;
1609  B2_re += g21_re * b1_re;
1610  B2_re -= g21_im * b1_im;
1611  B2_re += g22_re * b2_re;
1612  B2_re -= g22_im * b2_im;
1613  spinorFloat B2_im = 0;
1614  B2_im += g20_re * b0_im;
1615  B2_im += g20_im * b0_re;
1616  B2_im += g21_re * b1_im;
1617  B2_im += g21_im * b1_re;
1618  B2_im += g22_re * b2_im;
1619  B2_im += g22_im * b2_re;
1620 
1621  o00_re += A0_re;
1622  o00_im += A0_im;
1623  o10_re += B0_re;
1624  o10_im += B0_im;
1625  o20_re += A0_im;
1626  o20_im -= A0_re;
1627  o30_re -= B0_im;
1628  o30_im += B0_re;
1629 
1630  o01_re += A1_re;
1631  o01_im += A1_im;
1632  o11_re += B1_re;
1633  o11_im += B1_im;
1634  o21_re += A1_im;
1635  o21_im -= A1_re;
1636  o31_re -= B1_im;
1637  o31_im += B1_re;
1638 
1639  o02_re += A2_re;
1640  o02_im += A2_im;
1641  o12_re += B2_re;
1642  o12_im += B2_im;
1643  o22_re += A2_im;
1644  o22_im -= A2_re;
1645  o32_re -= B2_im;
1646  o32_im += B2_re;
1647 
1648 }
1649 
1650 #ifdef MULTI_GPU
1651 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || x3>0)) ||
1652  (kernel_type == EXTERIOR_KERNEL_Z && x3==0) )
1653 #endif
1654 {
1655  // Projector P2-
1656  // 1 0 -i 0
1657  // 0 1 0 i
1658  // i 0 1 0
1659  // 0 -i 0 1
1660 
1661 #ifdef MULTI_GPU
1662  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x3==0 ? X+X3X2X1mX2X1 : X-X2X1) >> 1 :
1663  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1664 #else
1665  const int sp_idx = (x3==0 ? X+X3X2X1mX2X1 : X-X2X1) >> 1;
1666 #endif
1667 
1668 #ifdef MULTI_GPU
1669  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
1670 #else
1671  const int ga_idx = sp_idx;
1672 #endif
1673 
1680 
1681 #ifdef MULTI_GPU
1682  if (kernel_type == INTERIOR_KERNEL) {
1683 #endif
1684 
1685  // read spinor from device memory
1686  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1687 
1688  // project spinor into half spinors
1689  a0_re = +i00_re+i20_im;
1690  a0_im = +i00_im-i20_re;
1691  a1_re = +i01_re+i21_im;
1692  a1_im = +i01_im-i21_re;
1693  a2_re = +i02_re+i22_im;
1694  a2_im = +i02_im-i22_re;
1695  b0_re = +i10_re-i30_im;
1696  b0_im = +i10_im+i30_re;
1697  b1_re = +i11_re-i31_im;
1698  b1_im = +i11_im+i31_re;
1699  b2_re = +i12_re-i32_im;
1700  b2_im = +i12_im+i32_re;
1701 
1702 #ifdef MULTI_GPU
1703  } else {
1704 
1705  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1706 
1707  // read half spinor from device memory
1708  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
1709 
1710  a0_re = i00_re; a0_im = i00_im;
1711  a1_re = i01_re; a1_im = i01_im;
1712  a2_re = i02_re; a2_im = i02_im;
1713  b0_re = i10_re; b0_im = i10_im;
1714  b1_re = i11_re; b1_im = i11_im;
1715  b2_re = i12_re; b2_im = i12_im;
1716 
1717  }
1718 #endif // MULTI_GPU
1719 
1720  // read gauge matrix from device memory
1721  READ_GAUGE_MATRIX(G, GAUGE1TEX, 5, ga_idx, ga_stride);
1722 
1723  // reconstruct gauge matrix
1725 
1726  // multiply row 0
1727  spinorFloat A0_re = 0;
1728  A0_re += gT00_re * a0_re;
1729  A0_re -= gT00_im * a0_im;
1730  A0_re += gT01_re * a1_re;
1731  A0_re -= gT01_im * a1_im;
1732  A0_re += gT02_re * a2_re;
1733  A0_re -= gT02_im * a2_im;
1734  spinorFloat A0_im = 0;
1735  A0_im += gT00_re * a0_im;
1736  A0_im += gT00_im * a0_re;
1737  A0_im += gT01_re * a1_im;
1738  A0_im += gT01_im * a1_re;
1739  A0_im += gT02_re * a2_im;
1740  A0_im += gT02_im * a2_re;
1741  spinorFloat B0_re = 0;
1742  B0_re += gT00_re * b0_re;
1743  B0_re -= gT00_im * b0_im;
1744  B0_re += gT01_re * b1_re;
1745  B0_re -= gT01_im * b1_im;
1746  B0_re += gT02_re * b2_re;
1747  B0_re -= gT02_im * b2_im;
1748  spinorFloat B0_im = 0;
1749  B0_im += gT00_re * b0_im;
1750  B0_im += gT00_im * b0_re;
1751  B0_im += gT01_re * b1_im;
1752  B0_im += gT01_im * b1_re;
1753  B0_im += gT02_re * b2_im;
1754  B0_im += gT02_im * b2_re;
1755 
1756  // multiply row 1
1757  spinorFloat A1_re = 0;
1758  A1_re += gT10_re * a0_re;
1759  A1_re -= gT10_im * a0_im;
1760  A1_re += gT11_re * a1_re;
1761  A1_re -= gT11_im * a1_im;
1762  A1_re += gT12_re * a2_re;
1763  A1_re -= gT12_im * a2_im;
1764  spinorFloat A1_im = 0;
1765  A1_im += gT10_re * a0_im;
1766  A1_im += gT10_im * a0_re;
1767  A1_im += gT11_re * a1_im;
1768  A1_im += gT11_im * a1_re;
1769  A1_im += gT12_re * a2_im;
1770  A1_im += gT12_im * a2_re;
1771  spinorFloat B1_re = 0;
1772  B1_re += gT10_re * b0_re;
1773  B1_re -= gT10_im * b0_im;
1774  B1_re += gT11_re * b1_re;
1775  B1_re -= gT11_im * b1_im;
1776  B1_re += gT12_re * b2_re;
1777  B1_re -= gT12_im * b2_im;
1778  spinorFloat B1_im = 0;
1779  B1_im += gT10_re * b0_im;
1780  B1_im += gT10_im * b0_re;
1781  B1_im += gT11_re * b1_im;
1782  B1_im += gT11_im * b1_re;
1783  B1_im += gT12_re * b2_im;
1784  B1_im += gT12_im * b2_re;
1785 
1786  // multiply row 2
1787  spinorFloat A2_re = 0;
1788  A2_re += gT20_re * a0_re;
1789  A2_re -= gT20_im * a0_im;
1790  A2_re += gT21_re * a1_re;
1791  A2_re -= gT21_im * a1_im;
1792  A2_re += gT22_re * a2_re;
1793  A2_re -= gT22_im * a2_im;
1794  spinorFloat A2_im = 0;
1795  A2_im += gT20_re * a0_im;
1796  A2_im += gT20_im * a0_re;
1797  A2_im += gT21_re * a1_im;
1798  A2_im += gT21_im * a1_re;
1799  A2_im += gT22_re * a2_im;
1800  A2_im += gT22_im * a2_re;
1801  spinorFloat B2_re = 0;
1802  B2_re += gT20_re * b0_re;
1803  B2_re -= gT20_im * b0_im;
1804  B2_re += gT21_re * b1_re;
1805  B2_re -= gT21_im * b1_im;
1806  B2_re += gT22_re * b2_re;
1807  B2_re -= gT22_im * b2_im;
1808  spinorFloat B2_im = 0;
1809  B2_im += gT20_re * b0_im;
1810  B2_im += gT20_im * b0_re;
1811  B2_im += gT21_re * b1_im;
1812  B2_im += gT21_im * b1_re;
1813  B2_im += gT22_re * b2_im;
1814  B2_im += gT22_im * b2_re;
1815 
1816  o00_re += A0_re;
1817  o00_im += A0_im;
1818  o10_re += B0_re;
1819  o10_im += B0_im;
1820  o20_re -= A0_im;
1821  o20_im += A0_re;
1822  o30_re += B0_im;
1823  o30_im -= B0_re;
1824 
1825  o01_re += A1_re;
1826  o01_im += A1_im;
1827  o11_re += B1_re;
1828  o11_im += B1_im;
1829  o21_re -= A1_im;
1830  o21_im += A1_re;
1831  o31_re += B1_im;
1832  o31_im -= B1_re;
1833 
1834  o02_re += A2_re;
1835  o02_im += A2_im;
1836  o12_re += B2_re;
1837  o12_im += B2_im;
1838  o22_re -= A2_im;
1839  o22_im += A2_re;
1840  o32_re += B2_im;
1841  o32_im -= B2_re;
1842 
1843 }
1844 
1845 #ifdef MULTI_GPU
1846 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || x4<X4m1)) ||
1847  (kernel_type == EXTERIOR_KERNEL_T && x4==X4m1) )
1848 #endif
1849 {
1850  // Projector P3+
1851  // 2 0 0 0
1852  // 0 2 0 0
1853  // 0 0 0 0
1854  // 0 0 0 0
1855 
1856 #ifdef MULTI_GPU
1857  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x4==X4m1 ? X-X4X3X2X1mX3X2X1 : X+X3X2X1) >> 1 :
1858  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1859 #else
1860  const int sp_idx = (x4==X4m1 ? X-X4X3X2X1mX3X2X1 : X+X3X2X1) >> 1;
1861 #endif
1862 
1863  const int ga_idx = sid;
1864 
1866  {
1873 
1874 #ifdef MULTI_GPU
1875  if (kernel_type == INTERIOR_KERNEL) {
1876 #endif
1877 
1878  // read spinor from device memory
1879  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1880 
1881  // project spinor into half spinors
1882  a0_re = +2*i00_re;
1883  a0_im = +2*i00_im;
1884  a1_re = +2*i01_re;
1885  a1_im = +2*i01_im;
1886  a2_re = +2*i02_re;
1887  a2_im = +2*i02_im;
1888  b0_re = +2*i10_re;
1889  b0_im = +2*i10_im;
1890  b1_re = +2*i11_re;
1891  b1_im = +2*i11_im;
1892  b2_re = +2*i12_re;
1893  b2_im = +2*i12_im;
1894 
1895 #ifdef MULTI_GPU
1896  } else {
1897 
1898  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1899  const int t_proj_scale = TPROJSCALE;
1900 
1901  // read half spinor from device memory
1902  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1903 
1904  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
1905  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
1906  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
1907  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
1908  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
1909  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
1910 
1911  }
1912 #endif // MULTI_GPU
1913 
1914  // identity gauge matrix
1921 
1922  o00_re += A0_re;
1923  o00_im += A0_im;
1924  o10_re += B0_re;
1925  o10_im += B0_im;
1926 
1927  o01_re += A1_re;
1928  o01_im += A1_im;
1929  o11_re += B1_re;
1930  o11_im += B1_im;
1931 
1932  o02_re += A2_re;
1933  o02_im += A2_im;
1934  o12_re += B2_re;
1935  o12_im += B2_im;
1936 
1937  } else {
1944 
1945 #ifdef MULTI_GPU
1946  if (kernel_type == INTERIOR_KERNEL) {
1947 #endif
1948 
1949  // read spinor from device memory
1950  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1951 
1952  // project spinor into half spinors
1953  a0_re = +2*i00_re;
1954  a0_im = +2*i00_im;
1955  a1_re = +2*i01_re;
1956  a1_im = +2*i01_im;
1957  a2_re = +2*i02_re;
1958  a2_im = +2*i02_im;
1959  b0_re = +2*i10_re;
1960  b0_im = +2*i10_im;
1961  b1_re = +2*i11_re;
1962  b1_im = +2*i11_im;
1963  b2_re = +2*i12_re;
1964  b2_im = +2*i12_im;
1965 
1966 #ifdef MULTI_GPU
1967  } else {
1968 
1969  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1970  const int t_proj_scale = TPROJSCALE;
1971 
1972  // read half spinor from device memory
1973  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1974 
1975  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
1976  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
1977  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
1978  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
1979  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
1980  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
1981 
1982  }
1983 #endif // MULTI_GPU
1984 
1985  // read gauge matrix from device memory
1986  READ_GAUGE_MATRIX(G, GAUGE0TEX, 6, ga_idx, ga_stride);
1987 
1988  // reconstruct gauge matrix
1990 
1991  // multiply row 0
1992  spinorFloat A0_re = 0;
1993  A0_re += g00_re * a0_re;
1994  A0_re -= g00_im * a0_im;
1995  A0_re += g01_re * a1_re;
1996  A0_re -= g01_im * a1_im;
1997  A0_re += g02_re * a2_re;
1998  A0_re -= g02_im * a2_im;
1999  spinorFloat A0_im = 0;
2000  A0_im += g00_re * a0_im;
2001  A0_im += g00_im * a0_re;
2002  A0_im += g01_re * a1_im;
2003  A0_im += g01_im * a1_re;
2004  A0_im += g02_re * a2_im;
2005  A0_im += g02_im * a2_re;
2006  spinorFloat B0_re = 0;
2007  B0_re += g00_re * b0_re;
2008  B0_re -= g00_im * b0_im;
2009  B0_re += g01_re * b1_re;
2010  B0_re -= g01_im * b1_im;
2011  B0_re += g02_re * b2_re;
2012  B0_re -= g02_im * b2_im;
2013  spinorFloat B0_im = 0;
2014  B0_im += g00_re * b0_im;
2015  B0_im += g00_im * b0_re;
2016  B0_im += g01_re * b1_im;
2017  B0_im += g01_im * b1_re;
2018  B0_im += g02_re * b2_im;
2019  B0_im += g02_im * b2_re;
2020 
2021  // multiply row 1
2022  spinorFloat A1_re = 0;
2023  A1_re += g10_re * a0_re;
2024  A1_re -= g10_im * a0_im;
2025  A1_re += g11_re * a1_re;
2026  A1_re -= g11_im * a1_im;
2027  A1_re += g12_re * a2_re;
2028  A1_re -= g12_im * a2_im;
2029  spinorFloat A1_im = 0;
2030  A1_im += g10_re * a0_im;
2031  A1_im += g10_im * a0_re;
2032  A1_im += g11_re * a1_im;
2033  A1_im += g11_im * a1_re;
2034  A1_im += g12_re * a2_im;
2035  A1_im += g12_im * a2_re;
2036  spinorFloat B1_re = 0;
2037  B1_re += g10_re * b0_re;
2038  B1_re -= g10_im * b0_im;
2039  B1_re += g11_re * b1_re;
2040  B1_re -= g11_im * b1_im;
2041  B1_re += g12_re * b2_re;
2042  B1_re -= g12_im * b2_im;
2043  spinorFloat B1_im = 0;
2044  B1_im += g10_re * b0_im;
2045  B1_im += g10_im * b0_re;
2046  B1_im += g11_re * b1_im;
2047  B1_im += g11_im * b1_re;
2048  B1_im += g12_re * b2_im;
2049  B1_im += g12_im * b2_re;
2050 
2051  // multiply row 2
2052  spinorFloat A2_re = 0;
2053  A2_re += g20_re * a0_re;
2054  A2_re -= g20_im * a0_im;
2055  A2_re += g21_re * a1_re;
2056  A2_re -= g21_im * a1_im;
2057  A2_re += g22_re * a2_re;
2058  A2_re -= g22_im * a2_im;
2059  spinorFloat A2_im = 0;
2060  A2_im += g20_re * a0_im;
2061  A2_im += g20_im * a0_re;
2062  A2_im += g21_re * a1_im;
2063  A2_im += g21_im * a1_re;
2064  A2_im += g22_re * a2_im;
2065  A2_im += g22_im * a2_re;
2066  spinorFloat B2_re = 0;
2067  B2_re += g20_re * b0_re;
2068  B2_re -= g20_im * b0_im;
2069  B2_re += g21_re * b1_re;
2070  B2_re -= g21_im * b1_im;
2071  B2_re += g22_re * b2_re;
2072  B2_re -= g22_im * b2_im;
2073  spinorFloat B2_im = 0;
2074  B2_im += g20_re * b0_im;
2075  B2_im += g20_im * b0_re;
2076  B2_im += g21_re * b1_im;
2077  B2_im += g21_im * b1_re;
2078  B2_im += g22_re * b2_im;
2079  B2_im += g22_im * b2_re;
2080 
2081  o00_re += A0_re;
2082  o00_im += A0_im;
2083  o10_re += B0_re;
2084  o10_im += B0_im;
2085 
2086  o01_re += A1_re;
2087  o01_im += A1_im;
2088  o11_re += B1_re;
2089  o11_im += B1_im;
2090 
2091  o02_re += A2_re;
2092  o02_im += A2_im;
2093  o12_re += B2_re;
2094  o12_im += B2_im;
2095 
2096  }
2097 }
2098 
2099 #ifdef MULTI_GPU
2100 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || x4>0)) ||
2101  (kernel_type == EXTERIOR_KERNEL_T && x4==0) )
2102 #endif
2103 {
2104  // Projector P3-
2105  // 0 0 0 0
2106  // 0 0 0 0
2107  // 0 0 2 0
2108  // 0 0 0 2
2109 
2110 #ifdef MULTI_GPU
2111  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x4==0 ? X+X4X3X2X1mX3X2X1 : X-X3X2X1) >> 1 :
2112  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
2113 #else
2114  const int sp_idx = (x4==0 ? X+X4X3X2X1mX3X2X1 : X-X3X2X1) >> 1;
2115 #endif
2116 
2117 #ifdef MULTI_GPU
2118  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
2119 #else
2120  const int ga_idx = sp_idx;
2121 #endif
2122 
2123  if (gauge_fixed && ga_idx < X4X3X2X1hmX3X2X1h)
2124  {
2131 
2132 #ifdef MULTI_GPU
2133  if (kernel_type == INTERIOR_KERNEL) {
2134 #endif
2135 
2136  // read spinor from device memory
2137  READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2138 
2139  // project spinor into half spinors
2140  a0_re = +2*i20_re;
2141  a0_im = +2*i20_im;
2142  a1_re = +2*i21_re;
2143  a1_im = +2*i21_im;
2144  a2_re = +2*i22_re;
2145  a2_im = +2*i22_im;
2146  b0_re = +2*i30_re;
2147  b0_im = +2*i30_im;
2148  b1_re = +2*i31_re;
2149  b1_im = +2*i31_im;
2150  b2_re = +2*i32_re;
2151  b2_im = +2*i32_im;
2152 
2153 #ifdef MULTI_GPU
2154  } else {
2155 
2156  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
2157  const int t_proj_scale = TPROJSCALE;
2158 
2159  // read half spinor from device memory
2160  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
2161 
2162  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2163  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2164  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2165  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2166  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2167  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2168 
2169  }
2170 #endif // MULTI_GPU
2171 
2172  // identity gauge matrix
2179 
2180  o20_re += A0_re;
2181  o20_im += A0_im;
2182  o30_re += B0_re;
2183  o30_im += B0_im;
2184 
2185  o21_re += A1_re;
2186  o21_im += A1_im;
2187  o31_re += B1_re;
2188  o31_im += B1_im;
2189 
2190  o22_re += A2_re;
2191  o22_im += A2_im;
2192  o32_re += B2_re;
2193  o32_im += B2_im;
2194 
2195  } else {
2202 
2203 #ifdef MULTI_GPU
2204  if (kernel_type == INTERIOR_KERNEL) {
2205 #endif
2206 
2207  // read spinor from device memory
2208  READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2209 
2210  // project spinor into half spinors
2211  a0_re = +2*i20_re;
2212  a0_im = +2*i20_im;
2213  a1_re = +2*i21_re;
2214  a1_im = +2*i21_im;
2215  a2_re = +2*i22_re;
2216  a2_im = +2*i22_im;
2217  b0_re = +2*i30_re;
2218  b0_im = +2*i30_im;
2219  b1_re = +2*i31_re;
2220  b1_im = +2*i31_im;
2221  b2_re = +2*i32_re;
2222  b2_im = +2*i32_im;
2223 
2224 #ifdef MULTI_GPU
2225  } else {
2226 
2227  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
2228  const int t_proj_scale = TPROJSCALE;
2229 
2230  // read half spinor from device memory
2231  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
2232 
2233  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2234  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2235  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2236  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2237  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2238  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2239 
2240  }
2241 #endif // MULTI_GPU
2242 
2243  // read gauge matrix from device memory
2244  READ_GAUGE_MATRIX(G, GAUGE1TEX, 7, ga_idx, ga_stride);
2245 
2246  // reconstruct gauge matrix
2248 
2249  // multiply row 0
2250  spinorFloat A0_re = 0;
2251  A0_re += gT00_re * a0_re;
2252  A0_re -= gT00_im * a0_im;
2253  A0_re += gT01_re * a1_re;
2254  A0_re -= gT01_im * a1_im;
2255  A0_re += gT02_re * a2_re;
2256  A0_re -= gT02_im * a2_im;
2257  spinorFloat A0_im = 0;
2258  A0_im += gT00_re * a0_im;
2259  A0_im += gT00_im * a0_re;
2260  A0_im += gT01_re * a1_im;
2261  A0_im += gT01_im * a1_re;
2262  A0_im += gT02_re * a2_im;
2263  A0_im += gT02_im * a2_re;
2264  spinorFloat B0_re = 0;
2265  B0_re += gT00_re * b0_re;
2266  B0_re -= gT00_im * b0_im;
2267  B0_re += gT01_re * b1_re;
2268  B0_re -= gT01_im * b1_im;
2269  B0_re += gT02_re * b2_re;
2270  B0_re -= gT02_im * b2_im;
2271  spinorFloat B0_im = 0;
2272  B0_im += gT00_re * b0_im;
2273  B0_im += gT00_im * b0_re;
2274  B0_im += gT01_re * b1_im;
2275  B0_im += gT01_im * b1_re;
2276  B0_im += gT02_re * b2_im;
2277  B0_im += gT02_im * b2_re;
2278 
2279  // multiply row 1
2280  spinorFloat A1_re = 0;
2281  A1_re += gT10_re * a0_re;
2282  A1_re -= gT10_im * a0_im;
2283  A1_re += gT11_re * a1_re;
2284  A1_re -= gT11_im * a1_im;
2285  A1_re += gT12_re * a2_re;
2286  A1_re -= gT12_im * a2_im;
2287  spinorFloat A1_im = 0;
2288  A1_im += gT10_re * a0_im;
2289  A1_im += gT10_im * a0_re;
2290  A1_im += gT11_re * a1_im;
2291  A1_im += gT11_im * a1_re;
2292  A1_im += gT12_re * a2_im;
2293  A1_im += gT12_im * a2_re;
2294  spinorFloat B1_re = 0;
2295  B1_re += gT10_re * b0_re;
2296  B1_re -= gT10_im * b0_im;
2297  B1_re += gT11_re * b1_re;
2298  B1_re -= gT11_im * b1_im;
2299  B1_re += gT12_re * b2_re;
2300  B1_re -= gT12_im * b2_im;
2301  spinorFloat B1_im = 0;
2302  B1_im += gT10_re * b0_im;
2303  B1_im += gT10_im * b0_re;
2304  B1_im += gT11_re * b1_im;
2305  B1_im += gT11_im * b1_re;
2306  B1_im += gT12_re * b2_im;
2307  B1_im += gT12_im * b2_re;
2308 
2309  // multiply row 2
2310  spinorFloat A2_re = 0;
2311  A2_re += gT20_re * a0_re;
2312  A2_re -= gT20_im * a0_im;
2313  A2_re += gT21_re * a1_re;
2314  A2_re -= gT21_im * a1_im;
2315  A2_re += gT22_re * a2_re;
2316  A2_re -= gT22_im * a2_im;
2317  spinorFloat A2_im = 0;
2318  A2_im += gT20_re * a0_im;
2319  A2_im += gT20_im * a0_re;
2320  A2_im += gT21_re * a1_im;
2321  A2_im += gT21_im * a1_re;
2322  A2_im += gT22_re * a2_im;
2323  A2_im += gT22_im * a2_re;
2324  spinorFloat B2_re = 0;
2325  B2_re += gT20_re * b0_re;
2326  B2_re -= gT20_im * b0_im;
2327  B2_re += gT21_re * b1_re;
2328  B2_re -= gT21_im * b1_im;
2329  B2_re += gT22_re * b2_re;
2330  B2_re -= gT22_im * b2_im;
2331  spinorFloat B2_im = 0;
2332  B2_im += gT20_re * b0_im;
2333  B2_im += gT20_im * b0_re;
2334  B2_im += gT21_re * b1_im;
2335  B2_im += gT21_im * b1_re;
2336  B2_im += gT22_re * b2_im;
2337  B2_im += gT22_im * b2_re;
2338 
2339  o20_re += A0_re;
2340  o20_im += A0_im;
2341  o30_re += B0_re;
2342  o30_im += B0_im;
2343 
2344  o21_re += A1_re;
2345  o21_im += A1_im;
2346  o31_re += B1_re;
2347  o31_im += B1_im;
2348 
2349  o22_re += A2_re;
2350  o22_im += A2_im;
2351  o32_re += B2_re;
2352  o32_im += B2_im;
2353 
2354  }
2355 }
2356 
2357 #ifdef MULTI_GPU
2358 
2359 int incomplete = 0; // Have all 8 contributions been computed for this site?
2360 
2361 switch(kernel_type) { // intentional fall-through
2362 case INTERIOR_KERNEL:
2363  incomplete = incomplete || (param.commDim[3] && (x4==0 || x4==X4m1));
2364 case EXTERIOR_KERNEL_T:
2365  incomplete = incomplete || (param.commDim[2] && (x3==0 || x3==X3m1));
2366 case EXTERIOR_KERNEL_Z:
2367  incomplete = incomplete || (param.commDim[1] && (x2==0 || x2==X2m1));
2368 case EXTERIOR_KERNEL_Y:
2369  incomplete = incomplete || (param.commDim[0] && (x1==0 || x1==X1m1));
2370 }
2371 
2372 if (!incomplete)
2373 #endif // MULTI_GPU
2374 {
2375 #ifdef DSLASH_XPAY
2376  READ_ACCUM(ACCUMTEX, param.sp_stride)
2377 
2378 #ifndef CLOVER_TWIST_INV_DSLASH
2379 #ifndef CLOVER_TWIST_XPAY
2380  //perform invert twist first:
2381  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2382 #else
2383  APPLY_CLOVER_TWIST(c, -a, acc);
2384 #endif
2385 #endif
2386  o00_re = b*o00_re + acc00_re;
2387  o00_im = b*o00_im + acc00_im;
2388  o01_re = b*o01_re + acc01_re;
2389  o01_im = b*o01_im + acc01_im;
2390  o02_re = b*o02_re + acc02_re;
2391  o02_im = b*o02_im + acc02_im;
2392  o10_re = b*o10_re + acc10_re;
2393  o10_im = b*o10_im + acc10_im;
2394  o11_re = b*o11_re + acc11_re;
2395  o11_im = b*o11_im + acc11_im;
2396  o12_re = b*o12_re + acc12_re;
2397  o12_im = b*o12_im + acc12_im;
2398  o20_re = b*o20_re + acc20_re;
2399  o20_im = b*o20_im + acc20_im;
2400  o21_re = b*o21_re + acc21_re;
2401  o21_im = b*o21_im + acc21_im;
2402  o22_re = b*o22_re + acc22_re;
2403  o22_im = b*o22_im + acc22_im;
2404  o30_re = b*o30_re + acc30_re;
2405  o30_im = b*o30_im + acc30_im;
2406  o31_re = b*o31_re + acc31_re;
2407  o31_im = b*o31_im + acc31_im;
2408  o32_re = b*o32_re + acc32_re;
2409  o32_im = b*o32_im + acc32_im;
2410 #else //no XPAY
2411 #ifndef CLOVER_TWIST_INV_DSLASH
2412  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2413 #endif
2414 #endif
2415 }
2416 
2417 // write spinor field back to device memory
2418 WRITE_SPINOR(param.sp_stride);
2419 
2420 // undefine to prevent warning when precision is changed
2421 #undef spinorFloat
2422 #undef SHARED_STRIDE
2423 
2424 #undef g00_re
2425 #undef g00_im
2426 #undef g01_re
2427 #undef g01_im
2428 #undef g02_re
2429 #undef g02_im
2430 #undef g10_re
2431 #undef g10_im
2432 #undef g11_re
2433 #undef g11_im
2434 #undef g12_re
2435 #undef g12_im
2436 #undef g20_re
2437 #undef g20_im
2438 #undef g21_re
2439 #undef g21_im
2440 #undef g22_re
2441 #undef g22_im
2442 
2443 #undef i00_re
2444 #undef i00_im
2445 #undef i01_re
2446 #undef i01_im
2447 #undef i02_re
2448 #undef i02_im
2449 #undef i10_re
2450 #undef i10_im
2451 #undef i11_re
2452 #undef i11_im
2453 #undef i12_re
2454 #undef i12_im
2455 #undef i20_re
2456 #undef i20_im
2457 #undef i21_re
2458 #undef i21_im
2459 #undef i22_re
2460 #undef i22_im
2461 #undef i30_re
2462 #undef i30_im
2463 #undef i31_re
2464 #undef i31_im
2465 #undef i32_re
2466 #undef i32_im
2467 
2468 #undef c00_00_re
2469 #undef c01_01_re
2470 #undef c02_02_re
2471 #undef c10_10_re
2472 #undef c11_11_re
2473 #undef c12_12_re
2474 #undef c01_00_re
2475 #undef c01_00_im
2476 #undef c02_00_re
2477 #undef c02_00_im
2478 #undef c10_00_re
2479 #undef c10_00_im
2480 #undef c11_00_re
2481 #undef c11_00_im
2482 #undef c12_00_re
2483 #undef c12_00_im
2484 #undef c02_01_re
2485 #undef c02_01_im
2486 #undef c10_01_re
2487 #undef c10_01_im
2488 #undef c11_01_re
2489 #undef c11_01_im
2490 #undef c12_01_re
2491 #undef c12_01_im
2492 #undef c10_02_re
2493 #undef c10_02_im
2494 #undef c11_02_re
2495 #undef c11_02_im
2496 #undef c12_02_re
2497 #undef c12_02_im
2498 #undef c11_10_re
2499 #undef c11_10_im
2500 #undef c12_10_re
2501 #undef c12_10_im
2502 #undef c12_11_re
2503 #undef c12_11_im
2504 
2505 #undef cinv00_00_re
2506 #undef cinv01_01_re
2507 #undef cinv02_02_re
2508 #undef cinv10_10_re
2509 #undef cinv11_11_re
2510 #undef cinv12_12_re
2511 #undef cinv01_00_re
2512 #undef cinv01_00_im
2513 #undef cinv02_00_re
2514 #undef cinv02_00_im
2515 #undef cinv10_00_re
2516 #undef cinv10_00_im
2517 #undef cinv11_00_re
2518 #undef cinv11_00_im
2519 #undef cinv12_00_re
2520 #undef cinv12_00_im
2521 #undef cinv02_01_re
2522 #undef cinv02_01_im
2523 #undef cinv10_01_re
2524 #undef cinv10_01_im
2525 #undef cinv11_01_re
2526 #undef cinv11_01_im
2527 #undef cinv12_01_re
2528 #undef cinv12_01_im
2529 #undef cinv10_02_re
2530 #undef cinv10_02_im
2531 #undef cinv11_02_re
2532 #undef cinv11_02_im
2533 #undef cinv12_02_re
2534 #undef cinv12_02_im
2535 #undef cinv11_10_re
2536 #undef cinv11_10_im
2537 #undef cinv12_10_re
2538 #undef cinv12_10_im
2539 #undef cinv12_11_re
2540 #undef cinv12_11_im
2541 
2542 #undef acc00_re
2543 #undef acc00_im
2544 #undef acc01_re
2545 #undef acc01_im
2546 #undef acc02_re
2547 #undef acc02_im
2548 #undef acc10_re
2549 #undef acc10_im
2550 #undef acc11_re
2551 #undef acc11_im
2552 #undef acc12_re
2553 #undef acc12_im
2554 #undef acc20_re
2555 #undef acc20_im
2556 #undef acc21_re
2557 #undef acc21_im
2558 #undef acc22_re
2559 #undef acc22_im
2560 #undef acc30_re
2561 #undef acc30_im
2562 #undef acc31_re
2563 #undef acc31_im
2564 #undef acc32_re
2565 #undef acc32_im
2566 
2567 
2568 #undef o00_re
2569 #undef o00_im
2570 #undef o01_re
2571 #undef o01_im
2572 #undef o02_re
2573 #undef o02_im
2574 #undef o10_re
2575 #undef o10_im
2576 #undef o11_re
2577 #undef o11_im
2578 #undef o12_re
2579 #undef o12_im
2580 #undef o20_re
2581 #undef o20_im
2582 #undef o21_re
2583 #undef o21_im
2584 #undef o22_re
2585 #undef o22_im
2586 #undef o30_re
2587 
2588 #undef VOLATILE
READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define o02_re
spinorFloat a1_re
#define i22_im
#define g21_re
#define gT02_im
__constant__ int Vh
#define i00_re
READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define APPLY_CLOVER_TWIST(c, a, reg)
Definition: tmc_core.h:1
__constant__ int X2
#define o21_im
#define acc01_re
spinorFloat B2_im
#define g21_im
#define i20_im
#define SHARED_STRIDE
__constant__ int X2X1mX1
#define g01_re
#define i30_re
__constant__ int X3X2X1mX2X1
#define gT10_re
#define i11_im
#define g22_re
__constant__ int X1
#define READ_INTERMEDIATE_SPINOR
Definition: covDev.h:144
#define g11_im
#define acc01_im
#define g10_im
int sp_idx
#define acc30_im
#define gT20_im
#define gT10_im
spinorFloat b2_re
VOLATILE spinorFloat o32_re
spinorFloat b1_re
#define o12_re
#define gT12_re
#define acc20_re
__constant__ int X3X2X1
spinorFloat A2_im
RECONSTRUCT_GAUGE_MATRIX(0)
#define gT21_re
spinorFloat B1_re
#define i32_im
#define i31_re
spinorFloat B0_re
READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, ga_stride)
#define gT11_re
spinorFloat A1_re
#define g12_re
spinorFloat b0_re
#define g00_re
#define o22_im
const int ga_idx
#define g20_im
#define i21_im
#define i12_re
#define i30_im
#define i31_im
#define i01_re
#define g11_re
#define acc21_im
#define acc02_re
#define acc00_im
spinorFloat a2_re
QudaGaugeParam param
Definition: pack_test.cpp:17
#define gT01_im
#define acc12_re
#define o10_re
__constant__ int ghostFace[QUDA_MAX_DIM+1]
spinorFloat B1_im
#define APPLY_CLOVER_TWIST_INV(c, cinv, a, reg)
Definition: tmc_core.h:432
#define gT22_im
#define acc02_im
spinorFloat B2_re
#define gT21_im
spinorFloat b2_im
#define o20_re
#define o11_im
#define gT20_re
#define GAUGE0TEX
Definition: covDev.h:112
#define acc31_im
#define o22_re
#define acc31_re
#define acc11_re
VOLATILE spinorFloat o31_re
__shared__ char s_data[]
#define o00_re
#define gT00_im
spinorFloat A0_im
spinorFloat A0_re
spinorFloat a2_im
spinorFloat a0_im
#define i12_im
spinorFloat a0_re
spinorFloat b0_im
__constant__ int X2m1
#define i00_im
#define VOLATILE
#define SPINORTEX
Definition: clover_def.h:40
#define o21_re
READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
__constant__ int gauge_fixed
#define o01_im
#define gT00_re
__constant__ int X4X3X2X1mX3X2X1
const int dims[]
VOLATILE spinorFloat o31_im
#define acc11_im
spinorFloat A2_re
#define SPINOR_HOP
Definition: covDev.h:158
#define o00_im
#define g02_re
#define o12_im
#define i32_re
__constant__ int ga_stride
#define o30_re
#define g02_im
#define i01_im
#define spinorFloat
__constant__ int X1m1
#define gT11_im
__constant__ int X3
#define i02_im
#define acc32_im
#define acc20_im
#define gT02_re
#define acc30_re
#define i02_re
#define i11_re
#define o01_re
spinorFloat A1_im
#define acc10_re
#define acc10_im
VOLATILE spinorFloat o30_im
#define o20_im
#define o02_im
#define DSLASH_SHARED_FLOATS_PER_THREAD
#define GAUGE1TEX
Definition: covDev.h:113
#define i22_re
#define i10_im
__constant__ int X4m1
spinorFloat B0_im
#define i10_re
#define acc32_re
#define gT01_re
spinorFloat b1_im
#define gT12_im
#define g01_im
spinorFloat a1_im
#define READ_HALF_SPINOR
Definition: io_spinor.h:390
#define o11_re
#define INTERTEX
Definition: covDev.h:149
#define acc12_im
#define i20_re
#define acc00_re
#define o10_im
__constant__ int X4X3X2X1hmX3X2X1h
#define g22_im
#define acc22_im
#define g12_im
#define g20_re
coordsFromIndex< EVEN_X >(X, x1, x2, x3, x4, sid, param.parity, dims)
#define i21_re
KernelType kernel_type
#define gT22_re
#define acc22_re
WRITE_SPINOR(param.sp_stride)
VOLATILE spinorFloat * s
#define acc21_re
__constant__ int X4
__constant__ int X3m1
#define g10_re
#define TPROJSCALE
Definition: covDev.h:101
#define g00_im
VOLATILE spinorFloat o32_im
__constant__ int X2X1