QUDA  v0.7.0
A library for QCD on GPUs
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
tmc_dslash_dagger_fermi_core.h
Go to the documentation of this file.
1 // *** CUDA DSLASH DAGGER ***
2 
3 #define DSLASH_SHARED_FLOATS_PER_THREAD 24
4 
5 
6 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler
7 #define VOLATILE
8 #else // Open64 compiler
9 #define VOLATILE volatile
10 #endif
11 // input spinor
12 #ifdef SPINOR_DOUBLE
13 #define spinorFloat double
14 #define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_DOUBLE2
15 #define READ_SPINOR_SHARED READ_SPINOR_SHARED_DOUBLE2
16 #define i00_re I0.x
17 #define i00_im I0.y
18 #define i01_re I1.x
19 #define i01_im I1.y
20 #define i02_re I2.x
21 #define i02_im I2.y
22 #define i10_re I3.x
23 #define i10_im I3.y
24 #define i11_re I4.x
25 #define i11_im I4.y
26 #define i12_re I5.x
27 #define i12_im I5.y
28 #define i20_re I6.x
29 #define i20_im I6.y
30 #define i21_re I7.x
31 #define i21_im I7.y
32 #define i22_re I8.x
33 #define i22_im I8.y
34 #define i30_re I9.x
35 #define i30_im I9.y
36 #define i31_re I10.x
37 #define i31_im I10.y
38 #define i32_re I11.x
39 #define i32_im I11.y
40 #define acc00_re accum0.x
41 #define acc00_im accum0.y
42 #define acc01_re accum1.x
43 #define acc01_im accum1.y
44 #define acc02_re accum2.x
45 #define acc02_im accum2.y
46 #define acc10_re accum3.x
47 #define acc10_im accum3.y
48 #define acc11_re accum4.x
49 #define acc11_im accum4.y
50 #define acc12_re accum5.x
51 #define acc12_im accum5.y
52 #define acc20_re accum6.x
53 #define acc20_im accum6.y
54 #define acc21_re accum7.x
55 #define acc21_im accum7.y
56 #define acc22_re accum8.x
57 #define acc22_im accum8.y
58 #define acc30_re accum9.x
59 #define acc30_im accum9.y
60 #define acc31_re accum10.x
61 #define acc31_im accum10.y
62 #define acc32_re accum11.x
63 #define acc32_im accum11.y
64 #else
65 #define spinorFloat float
66 #define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_FLOAT4
67 #define READ_SPINOR_SHARED READ_SPINOR_SHARED_FLOAT4
68 #define i00_re I0.x
69 #define i00_im I0.y
70 #define i01_re I0.z
71 #define i01_im I0.w
72 #define i02_re I1.x
73 #define i02_im I1.y
74 #define i10_re I1.z
75 #define i10_im I1.w
76 #define i11_re I2.x
77 #define i11_im I2.y
78 #define i12_re I2.z
79 #define i12_im I2.w
80 #define i20_re I3.x
81 #define i20_im I3.y
82 #define i21_re I3.z
83 #define i21_im I3.w
84 #define i22_re I4.x
85 #define i22_im I4.y
86 #define i30_re I4.z
87 #define i30_im I4.w
88 #define i31_re I5.x
89 #define i31_im I5.y
90 #define i32_re I5.z
91 #define i32_im I5.w
92 #define acc00_re accum0.x
93 #define acc00_im accum0.y
94 #define acc01_re accum0.z
95 #define acc01_im accum0.w
96 #define acc02_re accum1.x
97 #define acc02_im accum1.y
98 #define acc10_re accum1.z
99 #define acc10_im accum1.w
100 #define acc11_re accum2.x
101 #define acc11_im accum2.y
102 #define acc12_re accum2.z
103 #define acc12_im accum2.w
104 #define acc20_re accum3.x
105 #define acc20_im accum3.y
106 #define acc21_re accum3.z
107 #define acc21_im accum3.w
108 #define acc22_re accum4.x
109 #define acc22_im accum4.y
110 #define acc30_re accum4.z
111 #define acc30_im accum4.w
112 #define acc31_re accum5.x
113 #define acc31_im accum5.y
114 #define acc32_re accum5.z
115 #define acc32_im accum5.w
116 #endif // SPINOR_DOUBLE
117 
118 // gauge link
119 #ifdef GAUGE_FLOAT2
120 #define g00_re G0.x
121 #define g00_im G0.y
122 #define g01_re G1.x
123 #define g01_im G1.y
124 #define g02_re G2.x
125 #define g02_im G2.y
126 #define g10_re G3.x
127 #define g10_im G3.y
128 #define g11_re G4.x
129 #define g11_im G4.y
130 #define g12_re G5.x
131 #define g12_im G5.y
132 #define g20_re G6.x
133 #define g20_im G6.y
134 #define g21_re G7.x
135 #define g21_im G7.y
136 #define g22_re G8.x
137 #define g22_im G8.y
138 
139 #else
140 #define g00_re G0.x
141 #define g00_im G0.y
142 #define g01_re G0.z
143 #define g01_im G0.w
144 #define g02_re G1.x
145 #define g02_im G1.y
146 #define g10_re G1.z
147 #define g10_im G1.w
148 #define g11_re G2.x
149 #define g11_im G2.y
150 #define g12_re G2.z
151 #define g12_im G2.w
152 #define g20_re G3.x
153 #define g20_im G3.y
154 #define g21_re G3.z
155 #define g21_im G3.w
156 #define g22_re G4.x
157 #define g22_im G4.y
158 
159 #endif // GAUGE_DOUBLE
160 
161 // conjugated gauge link
162 #define gT00_re (+g00_re)
163 #define gT00_im (-g00_im)
164 #define gT01_re (+g10_re)
165 #define gT01_im (-g10_im)
166 #define gT02_re (+g20_re)
167 #define gT02_im (-g20_im)
168 #define gT10_re (+g01_re)
169 #define gT10_im (-g01_im)
170 #define gT11_re (+g11_re)
171 #define gT11_im (-g11_im)
172 #define gT12_re (+g21_re)
173 #define gT12_im (-g21_im)
174 #define gT20_re (+g02_re)
175 #define gT20_im (-g02_im)
176 #define gT21_re (+g12_re)
177 #define gT21_im (-g12_im)
178 #define gT22_re (+g22_re)
179 #define gT22_im (-g22_im)
180 
181 // first chiral block of clover term
182 #ifdef CLOVER_DOUBLE
183 #define c00_00_re C0.x
184 #define c01_01_re C0.y
185 #define c02_02_re C1.x
186 #define c10_10_re C1.y
187 #define c11_11_re C2.x
188 #define c12_12_re C2.y
189 #define c01_00_re C3.x
190 #define c01_00_im C3.y
191 #define c02_00_re C4.x
192 #define c02_00_im C4.y
193 #define c10_00_re C5.x
194 #define c10_00_im C5.y
195 #define c11_00_re C6.x
196 #define c11_00_im C6.y
197 #define c12_00_re C7.x
198 #define c12_00_im C7.y
199 #define c02_01_re C8.x
200 #define c02_01_im C8.y
201 #define c10_01_re C9.x
202 #define c10_01_im C9.y
203 #define c11_01_re C10.x
204 #define c11_01_im C10.y
205 #define c12_01_re C11.x
206 #define c12_01_im C11.y
207 #define c10_02_re C12.x
208 #define c10_02_im C12.y
209 #define c11_02_re C13.x
210 #define c11_02_im C13.y
211 #define c12_02_re C14.x
212 #define c12_02_im C14.y
213 #define c11_10_re C15.x
214 #define c11_10_im C15.y
215 #define c12_10_re C16.x
216 #define c12_10_im C16.y
217 #define c12_11_re C17.x
218 #define c12_11_im C17.y
219 #else
220 #define c00_00_re C0.x
221 #define c01_01_re C0.y
222 #define c02_02_re C0.z
223 #define c10_10_re C0.w
224 #define c11_11_re C1.x
225 #define c12_12_re C1.y
226 #define c01_00_re C1.z
227 #define c01_00_im C1.w
228 #define c02_00_re C2.x
229 #define c02_00_im C2.y
230 #define c10_00_re C2.z
231 #define c10_00_im C2.w
232 #define c11_00_re C3.x
233 #define c11_00_im C3.y
234 #define c12_00_re C3.z
235 #define c12_00_im C3.w
236 #define c02_01_re C4.x
237 #define c02_01_im C4.y
238 #define c10_01_re C4.z
239 #define c10_01_im C4.w
240 #define c11_01_re C5.x
241 #define c11_01_im C5.y
242 #define c12_01_re C5.z
243 #define c12_01_im C5.w
244 #define c10_02_re C6.x
245 #define c10_02_im C6.y
246 #define c11_02_re C6.z
247 #define c11_02_im C6.w
248 #define c12_02_re C7.x
249 #define c12_02_im C7.y
250 #define c11_10_re C7.z
251 #define c11_10_im C7.w
252 #define c12_10_re C8.x
253 #define c12_10_im C8.y
254 #define c12_11_re C8.z
255 #define c12_11_im C8.w
256 #endif // CLOVER_DOUBLE
257 
258 #define c00_01_re (+c01_00_re)
259 #define c00_01_im (-c01_00_im)
260 #define c00_02_re (+c02_00_re)
261 #define c00_02_im (-c02_00_im)
262 #define c01_02_re (+c02_01_re)
263 #define c01_02_im (-c02_01_im)
264 #define c00_10_re (+c10_00_re)
265 #define c00_10_im (-c10_00_im)
266 #define c01_10_re (+c10_01_re)
267 #define c01_10_im (-c10_01_im)
268 #define c02_10_re (+c10_02_re)
269 #define c02_10_im (-c10_02_im)
270 #define c00_11_re (+c11_00_re)
271 #define c00_11_im (-c11_00_im)
272 #define c01_11_re (+c11_01_re)
273 #define c01_11_im (-c11_01_im)
274 #define c02_11_re (+c11_02_re)
275 #define c02_11_im (-c11_02_im)
276 #define c10_11_re (+c11_10_re)
277 #define c10_11_im (-c11_10_im)
278 #define c00_12_re (+c12_00_re)
279 #define c00_12_im (-c12_00_im)
280 #define c01_12_re (+c12_01_re)
281 #define c01_12_im (-c12_01_im)
282 #define c02_12_re (+c12_02_re)
283 #define c02_12_im (-c12_02_im)
284 #define c10_12_re (+c12_10_re)
285 #define c10_12_im (-c12_10_im)
286 #define c11_12_re (+c12_11_re)
287 #define c11_12_im (-c12_11_im)
288 
289 // second chiral block of clover term (reuses C0,...,C9)
290 #define c20_20_re c00_00_re
291 #define c21_20_re c01_00_re
292 #define c21_20_im c01_00_im
293 #define c22_20_re c02_00_re
294 #define c22_20_im c02_00_im
295 #define c30_20_re c10_00_re
296 #define c30_20_im c10_00_im
297 #define c31_20_re c11_00_re
298 #define c31_20_im c11_00_im
299 #define c32_20_re c12_00_re
300 #define c32_20_im c12_00_im
301 #define c20_21_re c00_01_re
302 #define c20_21_im c00_01_im
303 #define c21_21_re c01_01_re
304 #define c22_21_re c02_01_re
305 #define c22_21_im c02_01_im
306 #define c30_21_re c10_01_re
307 #define c30_21_im c10_01_im
308 #define c31_21_re c11_01_re
309 #define c31_21_im c11_01_im
310 #define c32_21_re c12_01_re
311 #define c32_21_im c12_01_im
312 #define c20_22_re c00_02_re
313 #define c20_22_im c00_02_im
314 #define c21_22_re c01_02_re
315 #define c21_22_im c01_02_im
316 #define c22_22_re c02_02_re
317 #define c30_22_re c10_02_re
318 #define c30_22_im c10_02_im
319 #define c31_22_re c11_02_re
320 #define c31_22_im c11_02_im
321 #define c32_22_re c12_02_re
322 #define c32_22_im c12_02_im
323 #define c20_30_re c00_10_re
324 #define c20_30_im c00_10_im
325 #define c21_30_re c01_10_re
326 #define c21_30_im c01_10_im
327 #define c22_30_re c02_10_re
328 #define c22_30_im c02_10_im
329 #define c30_30_re c10_10_re
330 #define c31_30_re c11_10_re
331 #define c31_30_im c11_10_im
332 #define c32_30_re c12_10_re
333 #define c32_30_im c12_10_im
334 #define c20_31_re c00_11_re
335 #define c20_31_im c00_11_im
336 #define c21_31_re c01_11_re
337 #define c21_31_im c01_11_im
338 #define c22_31_re c02_11_re
339 #define c22_31_im c02_11_im
340 #define c30_31_re c10_11_re
341 #define c30_31_im c10_11_im
342 #define c31_31_re c11_11_re
343 #define c32_31_re c12_11_re
344 #define c32_31_im c12_11_im
345 #define c20_32_re c00_12_re
346 #define c20_32_im c00_12_im
347 #define c21_32_re c01_12_re
348 #define c21_32_im c01_12_im
349 #define c22_32_re c02_12_re
350 #define c22_32_im c02_12_im
351 #define c30_32_re c10_12_re
352 #define c30_32_im c10_12_im
353 #define c31_32_re c11_12_re
354 #define c31_32_im c11_12_im
355 #define c32_32_re c12_12_re
356 
357 
358 // first chiral block of inverted clover term
359 #ifdef CLOVER_DOUBLE
360 #define cinv00_00_re C0.x
361 #define cinv01_01_re C0.y
362 #define cinv02_02_re C1.x
363 #define cinv10_10_re C1.y
364 #define cinv11_11_re C2.x
365 #define cinv12_12_re C2.y
366 #define cinv01_00_re C3.x
367 #define cinv01_00_im C3.y
368 #define cinv02_00_re C4.x
369 #define cinv02_00_im C4.y
370 #define cinv10_00_re C5.x
371 #define cinv10_00_im C5.y
372 #define cinv11_00_re C6.x
373 #define cinv11_00_im C6.y
374 #define cinv12_00_re C7.x
375 #define cinv12_00_im C7.y
376 #define cinv02_01_re C8.x
377 #define cinv02_01_im C8.y
378 #define cinv10_01_re C9.x
379 #define cinv10_01_im C9.y
380 #define cinv11_01_re C10.x
381 #define cinv11_01_im C10.y
382 #define cinv12_01_re C11.x
383 #define cinv12_01_im C11.y
384 #define cinv10_02_re C12.x
385 #define cinv10_02_im C12.y
386 #define cinv11_02_re C13.x
387 #define cinv11_02_im C13.y
388 #define cinv12_02_re C14.x
389 #define cinv12_02_im C14.y
390 #define cinv11_10_re C15.x
391 #define cinv11_10_im C15.y
392 #define cinv12_10_re C16.x
393 #define cinv12_10_im C16.y
394 #define cinv12_11_re C17.x
395 #define cinv12_11_im C17.y
396 #else
397 #define cinv00_00_re C0.x
398 #define cinv01_01_re C0.y
399 #define cinv02_02_re C0.z
400 #define cinv10_10_re C0.w
401 #define cinv11_11_re C1.x
402 #define cinv12_12_re C1.y
403 #define cinv01_00_re C1.z
404 #define cinv01_00_im C1.w
405 #define cinv02_00_re C2.x
406 #define cinv02_00_im C2.y
407 #define cinv10_00_re C2.z
408 #define cinv10_00_im C2.w
409 #define cinv11_00_re C3.x
410 #define cinv11_00_im C3.y
411 #define cinv12_00_re C3.z
412 #define cinv12_00_im C3.w
413 #define cinv02_01_re C4.x
414 #define cinv02_01_im C4.y
415 #define cinv10_01_re C4.z
416 #define cinv10_01_im C4.w
417 #define cinv11_01_re C5.x
418 #define cinv11_01_im C5.y
419 #define cinv12_01_re C5.z
420 #define cinv12_01_im C5.w
421 #define cinv10_02_re C6.x
422 #define cinv10_02_im C6.y
423 #define cinv11_02_re C6.z
424 #define cinv11_02_im C6.w
425 #define cinv12_02_re C7.x
426 #define cinv12_02_im C7.y
427 #define cinv11_10_re C7.z
428 #define cinv11_10_im C7.w
429 #define cinv12_10_re C8.x
430 #define cinv12_10_im C8.y
431 #define cinv12_11_re C8.z
432 #define cinv12_11_im C8.w
433 #endif // CLOVER_DOUBLE
434 
435 #define cinv00_01_re (+cinv01_00_re)
436 #define cinv00_01_im (-cinv01_00_im)
437 #define cinv00_02_re (+cinv02_00_re)
438 #define cinv00_02_im (-cinv02_00_im)
439 #define cinv01_02_re (+cinv02_01_re)
440 #define cinv01_02_im (-cinv02_01_im)
441 #define cinv00_10_re (+cinv10_00_re)
442 #define cinv00_10_im (-cinv10_00_im)
443 #define cinv01_10_re (+cinv10_01_re)
444 #define cinv01_10_im (-cinv10_01_im)
445 #define cinv02_10_re (+cinv10_02_re)
446 #define cinv02_10_im (-cinv10_02_im)
447 #define cinv00_11_re (+cinv11_00_re)
448 #define cinv00_11_im (-cinv11_00_im)
449 #define cinv01_11_re (+cinv11_01_re)
450 #define cinv01_11_im (-cinv11_01_im)
451 #define cinv02_11_re (+cinv11_02_re)
452 #define cinv02_11_im (-cinv11_02_im)
453 #define cinv10_11_re (+cinv11_10_re)
454 #define cinv10_11_im (-cinv11_10_im)
455 #define cinv00_12_re (+cinv12_00_re)
456 #define cinv00_12_im (-cinv12_00_im)
457 #define cinv01_12_re (+cinv12_01_re)
458 #define cinv01_12_im (-cinv12_01_im)
459 #define cinv02_12_re (+cinv12_02_re)
460 #define cinv02_12_im (-cinv12_02_im)
461 #define cinv10_12_re (+cinv12_10_re)
462 #define cinv10_12_im (-cinv12_10_im)
463 #define cinv11_12_re (+cinv12_11_re)
464 #define cinv11_12_im (-cinv12_11_im)
465 
466 // second chiral block of inverted clover term (reuses C0,...,C9)
467 #define cinv20_20_re cinv00_00_re
468 #define cinv21_20_re cinv01_00_re
469 #define cinv21_20_im cinv01_00_im
470 #define cinv22_20_re cinv02_00_re
471 #define cinv22_20_im cinv02_00_im
472 #define cinv30_20_re cinv10_00_re
473 #define cinv30_20_im cinv10_00_im
474 #define cinv31_20_re cinv11_00_re
475 #define cinv31_20_im cinv11_00_im
476 #define cinv32_20_re cinv12_00_re
477 #define cinv32_20_im cinv12_00_im
478 #define cinv20_21_re cinv00_01_re
479 #define cinv20_21_im cinv00_01_im
480 #define cinv21_21_re cinv01_01_re
481 #define cinv22_21_re cinv02_01_re
482 #define cinv22_21_im cinv02_01_im
483 #define cinv30_21_re cinv10_01_re
484 #define cinv30_21_im cinv10_01_im
485 #define cinv31_21_re cinv11_01_re
486 #define cinv31_21_im cinv11_01_im
487 #define cinv32_21_re cinv12_01_re
488 #define cinv32_21_im cinv12_01_im
489 #define cinv20_22_re cinv00_02_re
490 #define cinv20_22_im cinv00_02_im
491 #define cinv21_22_re cinv01_02_re
492 #define cinv21_22_im cinv01_02_im
493 #define cinv22_22_re cinv02_02_re
494 #define cinv30_22_re cinv10_02_re
495 #define cinv30_22_im cinv10_02_im
496 #define cinv31_22_re cinv11_02_re
497 #define cinv31_22_im cinv11_02_im
498 #define cinv32_22_re cinv12_02_re
499 #define cinv32_22_im cinv12_02_im
500 #define cinv20_30_re cinv00_10_re
501 #define cinv20_30_im cinv00_10_im
502 #define cinv21_30_re cinv01_10_re
503 #define cinv21_30_im cinv01_10_im
504 #define cinv22_30_re cinv02_10_re
505 #define cinv22_30_im cinv02_10_im
506 #define cinv30_30_re cinv10_10_re
507 #define cinv31_30_re cinv11_10_re
508 #define cinv31_30_im cinv11_10_im
509 #define cinv32_30_re cinv12_10_re
510 #define cinv32_30_im cinv12_10_im
511 #define cinv20_31_re cinv00_11_re
512 #define cinv20_31_im cinv00_11_im
513 #define cinv21_31_re cinv01_11_re
514 #define cinv21_31_im cinv01_11_im
515 #define cinv22_31_re cinv02_11_re
516 #define cinv22_31_im cinv02_11_im
517 #define cinv30_31_re cinv10_11_re
518 #define cinv30_31_im cinv10_11_im
519 #define cinv31_31_re cinv11_11_re
520 #define cinv32_31_re cinv12_11_re
521 #define cinv32_31_im cinv12_11_im
522 #define cinv20_32_re cinv00_12_re
523 #define cinv20_32_im cinv00_12_im
524 #define cinv21_32_re cinv01_12_re
525 #define cinv21_32_im cinv01_12_im
526 #define cinv22_32_re cinv02_12_re
527 #define cinv22_32_im cinv02_12_im
528 #define cinv30_32_re cinv10_12_re
529 #define cinv30_32_im cinv10_12_im
530 #define cinv31_32_re cinv11_12_re
531 #define cinv31_32_im cinv11_12_im
532 #define cinv32_32_re cinv12_12_re
533 
534 
535 #ifndef CLOVER_TWIST_INV_DSLASH
536 
537 // declare C## here and use ASSN below instead of READ
538 #ifdef CLOVER_DOUBLE
539 double2 C0;
540 double2 C1;
541 double2 C2;
542 double2 C3;
543 double2 C4;
544 double2 C5;
545 double2 C6;
546 double2 C7;
547 double2 C8;
548 double2 C9;
549 double2 C10;
550 double2 C11;
551 double2 C12;
552 double2 C13;
553 double2 C14;
554 double2 C15;
555 double2 C16;
556 double2 C17;
557 #else
558 float4 C0;
559 float4 C1;
560 float4 C2;
561 float4 C3;
562 float4 C4;
563 float4 C5;
564 float4 C6;
565 float4 C7;
566 float4 C8;
567 
568 #if (DD_PREC==2)
569 float K;
570 #endif
571 
572 #endif // CLOVER_DOUBLE
573 #endif
574 
575 // output spinor
600 
601 #ifdef SPINOR_DOUBLE
602 #define SHARED_STRIDE 16 // to avoid bank conflicts on Fermi
603 #else
604 #define SHARED_STRIDE 32 // to avoid bank conflicts on Fermi
605 #endif
606 
607 #include "read_gauge.h"
608 #include "io_spinor.h"
609 #include "read_clover.h"
610 #include "tmc_core.h"
611 
612 int x1, x2, x3, x4;
613 int X;
614 
615 #if (defined MULTI_GPU) && (DD_PREC==2) // half precision
616 int sp_norm_idx;
617 #endif // MULTI_GPU half precision
618 
619 int sid;
620 
621 #ifdef MULTI_GPU
622 int face_idx;
624 #endif
625 
626  // Inline by hand for the moment and assume even dimensions
627  const int dims[] = {X1, X2, X3, X4};
628  coordsFromIndex3D<EVEN_X>(X, x1, x2, x3, x4, sid, param.parity, dims);
629 
630  // only need to check Y and Z dims currently since X and T set to match exactly
631  if (x2 >= X2) return;
632  if (x3 >= X3) return;
633 
634  o00_re = 0; o00_im = 0;
635  o01_re = 0; o01_im = 0;
636  o02_re = 0; o02_im = 0;
637  o10_re = 0; o10_im = 0;
638  o11_re = 0; o11_im = 0;
639  o12_re = 0; o12_im = 0;
640  o20_re = 0; o20_im = 0;
641  o21_re = 0; o21_im = 0;
642  o22_re = 0; o22_im = 0;
643  o30_re = 0; o30_im = 0;
644  o31_re = 0; o31_im = 0;
645  o32_re = 0; o32_im = 0;
646 
647 #ifdef MULTI_GPU
648 } else { // exterior kernel
649 
650  sid = blockIdx.x*blockDim.x + threadIdx.x;
651  if (sid >= param.threads) return;
652 
653  const int dim = static_cast<int>(kernel_type);
654  const int face_volume = (param.threads >> 1); // volume of one face
655  const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1
656  face_idx = sid - face_num*face_volume; // index into the respective face
657 
658  // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP)
659  // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading)
660  //sp_idx = face_idx + param.ghostOffset[dim];
661 
662 #if (DD_PREC==2) // half precision
663  sp_norm_idx = sid + param.ghostNormOffset[static_cast<int>(kernel_type)];
664 #endif
665 
666  const int dims[] = {X1, X2, X3, X4};
667  coordsFromFaceIndex<1>(X, sid, x1, x2, x3, x4, face_idx, face_volume, dim, face_num, param.parity, dims);
668 
669  READ_INTERMEDIATE_SPINOR(INTERTEX, param.sp_stride, sid, sid);
670 
671  o00_re = i00_re; o00_im = i00_im;
672  o01_re = i01_re; o01_im = i01_im;
673  o02_re = i02_re; o02_im = i02_im;
674  o10_re = i10_re; o10_im = i10_im;
675  o11_re = i11_re; o11_im = i11_im;
676  o12_re = i12_re; o12_im = i12_im;
677  o20_re = i20_re; o20_im = i20_im;
678  o21_re = i21_re; o21_im = i21_im;
679  o22_re = i22_re; o22_im = i22_im;
680  o30_re = i30_re; o30_im = i30_im;
681  o31_re = i31_re; o31_im = i31_im;
682  o32_re = i32_re; o32_im = i32_im;
683 }
684 #endif // MULTI_GPU
685 
686 
687 #ifdef MULTI_GPU
688 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || x1<X1m1)) ||
689  (kernel_type == EXTERIOR_KERNEL_X && x1==X1m1) )
690 #endif
691 {
692  // Projector P0+
693  // 1 0 0 i
694  // 0 1 i 0
695  // 0 -i 1 0
696  // -i 0 0 1
697 
698 #ifdef MULTI_GPU
699  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x1==X1m1 ? X-X1m1 : X+1) >> 1 :
700  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
701 #else
702  const int sp_idx = (x1==X1m1 ? X-X1m1 : X+1) >> 1;
703 #endif
704 
705  const int ga_idx = sid;
706 
713 
714 #ifdef MULTI_GPU
715  if (kernel_type == INTERIOR_KERNEL) {
716 #endif
717 
718  // read spinor from device memory
719  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
720 
721  // store spinor into shared memory
722  WRITE_SPINOR_SHARED(threadIdx.x, threadIdx.y, threadIdx.z, i);
723 
724  // project spinor into half spinors
725  a0_re = +i00_re-i30_im;
726  a0_im = +i00_im+i30_re;
727  a1_re = +i01_re-i31_im;
728  a1_im = +i01_im+i31_re;
729  a2_re = +i02_re-i32_im;
730  a2_im = +i02_im+i32_re;
731  b0_re = +i10_re-i20_im;
732  b0_im = +i10_im+i20_re;
733  b1_re = +i11_re-i21_im;
734  b1_im = +i11_im+i21_re;
735  b2_re = +i12_re-i22_im;
736  b2_im = +i12_im+i22_re;
737 
738 #ifdef MULTI_GPU
739  } else {
740 
741  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
742 
743  // read half spinor from device memory
744  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
745 
746  a0_re = i00_re; a0_im = i00_im;
747  a1_re = i01_re; a1_im = i01_im;
748  a2_re = i02_re; a2_im = i02_im;
749  b0_re = i10_re; b0_im = i10_im;
750  b1_re = i11_re; b1_im = i11_im;
751  b2_re = i12_re; b2_im = i12_im;
752 
753  }
754 #endif // MULTI_GPU
755 
756  // read gauge matrix from device memory
757  READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, ga_stride);
758 
759  // reconstruct gauge matrix
761 
762  // multiply row 0
764  A0_re += g00_re * a0_re;
765  A0_re -= g00_im * a0_im;
766  A0_re += g01_re * a1_re;
767  A0_re -= g01_im * a1_im;
768  A0_re += g02_re * a2_re;
769  A0_re -= g02_im * a2_im;
771  A0_im += g00_re * a0_im;
772  A0_im += g00_im * a0_re;
773  A0_im += g01_re * a1_im;
774  A0_im += g01_im * a1_re;
775  A0_im += g02_re * a2_im;
776  A0_im += g02_im * a2_re;
778  B0_re += g00_re * b0_re;
779  B0_re -= g00_im * b0_im;
780  B0_re += g01_re * b1_re;
781  B0_re -= g01_im * b1_im;
782  B0_re += g02_re * b2_re;
783  B0_re -= g02_im * b2_im;
785  B0_im += g00_re * b0_im;
786  B0_im += g00_im * b0_re;
787  B0_im += g01_re * b1_im;
788  B0_im += g01_im * b1_re;
789  B0_im += g02_re * b2_im;
790  B0_im += g02_im * b2_re;
791 
792  // multiply row 1
794  A1_re += g10_re * a0_re;
795  A1_re -= g10_im * a0_im;
796  A1_re += g11_re * a1_re;
797  A1_re -= g11_im * a1_im;
798  A1_re += g12_re * a2_re;
799  A1_re -= g12_im * a2_im;
801  A1_im += g10_re * a0_im;
802  A1_im += g10_im * a0_re;
803  A1_im += g11_re * a1_im;
804  A1_im += g11_im * a1_re;
805  A1_im += g12_re * a2_im;
806  A1_im += g12_im * a2_re;
808  B1_re += g10_re * b0_re;
809  B1_re -= g10_im * b0_im;
810  B1_re += g11_re * b1_re;
811  B1_re -= g11_im * b1_im;
812  B1_re += g12_re * b2_re;
813  B1_re -= g12_im * b2_im;
815  B1_im += g10_re * b0_im;
816  B1_im += g10_im * b0_re;
817  B1_im += g11_re * b1_im;
818  B1_im += g11_im * b1_re;
819  B1_im += g12_re * b2_im;
820  B1_im += g12_im * b2_re;
821 
822  // multiply row 2
824  A2_re += g20_re * a0_re;
825  A2_re -= g20_im * a0_im;
826  A2_re += g21_re * a1_re;
827  A2_re -= g21_im * a1_im;
828  A2_re += g22_re * a2_re;
829  A2_re -= g22_im * a2_im;
831  A2_im += g20_re * a0_im;
832  A2_im += g20_im * a0_re;
833  A2_im += g21_re * a1_im;
834  A2_im += g21_im * a1_re;
835  A2_im += g22_re * a2_im;
836  A2_im += g22_im * a2_re;
838  B2_re += g20_re * b0_re;
839  B2_re -= g20_im * b0_im;
840  B2_re += g21_re * b1_re;
841  B2_re -= g21_im * b1_im;
842  B2_re += g22_re * b2_re;
843  B2_re -= g22_im * b2_im;
845  B2_im += g20_re * b0_im;
846  B2_im += g20_im * b0_re;
847  B2_im += g21_re * b1_im;
848  B2_im += g21_im * b1_re;
849  B2_im += g22_re * b2_im;
850  B2_im += g22_im * b2_re;
851 
852  o00_re += A0_re;
853  o00_im += A0_im;
854  o10_re += B0_re;
855  o10_im += B0_im;
856  o20_re += B0_im;
857  o20_im -= B0_re;
858  o30_re += A0_im;
859  o30_im -= A0_re;
860 
861  o01_re += A1_re;
862  o01_im += A1_im;
863  o11_re += B1_re;
864  o11_im += B1_im;
865  o21_re += B1_im;
866  o21_im -= B1_re;
867  o31_re += A1_im;
868  o31_im -= A1_re;
869 
870  o02_re += A2_re;
871  o02_im += A2_im;
872  o12_re += B2_re;
873  o12_im += B2_im;
874  o22_re += B2_im;
875  o22_im -= B2_re;
876  o32_re += A2_im;
877  o32_im -= A2_re;
878 
879 }
880 
881 #ifdef MULTI_GPU
882 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || x1>0)) ||
883  (kernel_type == EXTERIOR_KERNEL_X && x1==0) )
884 #endif
885 {
886  // Projector P0-
887  // 1 0 0 -i
888  // 0 1 -i 0
889  // 0 i 1 0
890  // i 0 0 1
891 
892 #ifdef MULTI_GPU
893  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x1==0 ? X+X1m1 : X-1) >> 1 :
894  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
895 #else
896  const int sp_idx = (x1==0 ? X+X1m1 : X-1) >> 1;
897 #endif
898 
899 #ifdef MULTI_GPU
900  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
901 #else
902  const int ga_idx = sp_idx;
903 #endif
904 
911 
912 #ifdef MULTI_GPU
913  if (kernel_type == INTERIOR_KERNEL) {
914 #endif
915 
916  // load spinor from shared memory
917  int tx = (threadIdx.x > 0) ? threadIdx.x-1 : blockDim.x-1;
918  __syncthreads();
919  READ_SPINOR_SHARED(tx, threadIdx.y, threadIdx.z);
920 
921  // project spinor into half spinors
922  a0_re = +i00_re+i30_im;
923  a0_im = +i00_im-i30_re;
924  a1_re = +i01_re+i31_im;
925  a1_im = +i01_im-i31_re;
926  a2_re = +i02_re+i32_im;
927  a2_im = +i02_im-i32_re;
928  b0_re = +i10_re+i20_im;
929  b0_im = +i10_im-i20_re;
930  b1_re = +i11_re+i21_im;
931  b1_im = +i11_im-i21_re;
932  b2_re = +i12_re+i22_im;
933  b2_im = +i12_im-i22_re;
934 
935 #ifdef MULTI_GPU
936  } else {
937 
938  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
939 
940  // read half spinor from device memory
941  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
942 
943  a0_re = i00_re; a0_im = i00_im;
944  a1_re = i01_re; a1_im = i01_im;
945  a2_re = i02_re; a2_im = i02_im;
946  b0_re = i10_re; b0_im = i10_im;
947  b1_re = i11_re; b1_im = i11_im;
948  b2_re = i12_re; b2_im = i12_im;
949 
950  }
951 #endif // MULTI_GPU
952 
953  // read gauge matrix from device memory
954  READ_GAUGE_MATRIX(G, GAUGE1TEX, 1, ga_idx, ga_stride);
955 
956  // reconstruct gauge matrix
958 
959  // multiply row 0
960  spinorFloat A0_re = 0;
961  A0_re += gT00_re * a0_re;
962  A0_re -= gT00_im * a0_im;
963  A0_re += gT01_re * a1_re;
964  A0_re -= gT01_im * a1_im;
965  A0_re += gT02_re * a2_re;
966  A0_re -= gT02_im * a2_im;
967  spinorFloat A0_im = 0;
968  A0_im += gT00_re * a0_im;
969  A0_im += gT00_im * a0_re;
970  A0_im += gT01_re * a1_im;
971  A0_im += gT01_im * a1_re;
972  A0_im += gT02_re * a2_im;
973  A0_im += gT02_im * a2_re;
974  spinorFloat B0_re = 0;
975  B0_re += gT00_re * b0_re;
976  B0_re -= gT00_im * b0_im;
977  B0_re += gT01_re * b1_re;
978  B0_re -= gT01_im * b1_im;
979  B0_re += gT02_re * b2_re;
980  B0_re -= gT02_im * b2_im;
981  spinorFloat B0_im = 0;
982  B0_im += gT00_re * b0_im;
983  B0_im += gT00_im * b0_re;
984  B0_im += gT01_re * b1_im;
985  B0_im += gT01_im * b1_re;
986  B0_im += gT02_re * b2_im;
987  B0_im += gT02_im * b2_re;
988 
989  // multiply row 1
990  spinorFloat A1_re = 0;
991  A1_re += gT10_re * a0_re;
992  A1_re -= gT10_im * a0_im;
993  A1_re += gT11_re * a1_re;
994  A1_re -= gT11_im * a1_im;
995  A1_re += gT12_re * a2_re;
996  A1_re -= gT12_im * a2_im;
997  spinorFloat A1_im = 0;
998  A1_im += gT10_re * a0_im;
999  A1_im += gT10_im * a0_re;
1000  A1_im += gT11_re * a1_im;
1001  A1_im += gT11_im * a1_re;
1002  A1_im += gT12_re * a2_im;
1003  A1_im += gT12_im * a2_re;
1004  spinorFloat B1_re = 0;
1005  B1_re += gT10_re * b0_re;
1006  B1_re -= gT10_im * b0_im;
1007  B1_re += gT11_re * b1_re;
1008  B1_re -= gT11_im * b1_im;
1009  B1_re += gT12_re * b2_re;
1010  B1_re -= gT12_im * b2_im;
1011  spinorFloat B1_im = 0;
1012  B1_im += gT10_re * b0_im;
1013  B1_im += gT10_im * b0_re;
1014  B1_im += gT11_re * b1_im;
1015  B1_im += gT11_im * b1_re;
1016  B1_im += gT12_re * b2_im;
1017  B1_im += gT12_im * b2_re;
1018 
1019  // multiply row 2
1020  spinorFloat A2_re = 0;
1021  A2_re += gT20_re * a0_re;
1022  A2_re -= gT20_im * a0_im;
1023  A2_re += gT21_re * a1_re;
1024  A2_re -= gT21_im * a1_im;
1025  A2_re += gT22_re * a2_re;
1026  A2_re -= gT22_im * a2_im;
1027  spinorFloat A2_im = 0;
1028  A2_im += gT20_re * a0_im;
1029  A2_im += gT20_im * a0_re;
1030  A2_im += gT21_re * a1_im;
1031  A2_im += gT21_im * a1_re;
1032  A2_im += gT22_re * a2_im;
1033  A2_im += gT22_im * a2_re;
1034  spinorFloat B2_re = 0;
1035  B2_re += gT20_re * b0_re;
1036  B2_re -= gT20_im * b0_im;
1037  B2_re += gT21_re * b1_re;
1038  B2_re -= gT21_im * b1_im;
1039  B2_re += gT22_re * b2_re;
1040  B2_re -= gT22_im * b2_im;
1041  spinorFloat B2_im = 0;
1042  B2_im += gT20_re * b0_im;
1043  B2_im += gT20_im * b0_re;
1044  B2_im += gT21_re * b1_im;
1045  B2_im += gT21_im * b1_re;
1046  B2_im += gT22_re * b2_im;
1047  B2_im += gT22_im * b2_re;
1048 
1049  o00_re += A0_re;
1050  o00_im += A0_im;
1051  o10_re += B0_re;
1052  o10_im += B0_im;
1053  o20_re -= B0_im;
1054  o20_im += B0_re;
1055  o30_re -= A0_im;
1056  o30_im += A0_re;
1057 
1058  o01_re += A1_re;
1059  o01_im += A1_im;
1060  o11_re += B1_re;
1061  o11_im += B1_im;
1062  o21_re -= B1_im;
1063  o21_im += B1_re;
1064  o31_re -= A1_im;
1065  o31_im += A1_re;
1066 
1067  o02_re += A2_re;
1068  o02_im += A2_im;
1069  o12_re += B2_re;
1070  o12_im += B2_im;
1071  o22_re -= B2_im;
1072  o22_im += B2_re;
1073  o32_re -= A2_im;
1074  o32_im += A2_re;
1075 
1076 }
1077 
1078 #ifdef MULTI_GPU
1079 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || x2<X2m1)) ||
1080  (kernel_type == EXTERIOR_KERNEL_Y && x2==X2m1) )
1081 #endif
1082 {
1083  // Projector P1+
1084  // 1 0 0 1
1085  // 0 1 -1 0
1086  // 0 -1 1 0
1087  // 1 0 0 1
1088 
1089 #ifdef MULTI_GPU
1090  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x2==X2m1 ? X-X2X1mX1 : X+X1) >> 1 :
1091  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1092 #else
1093  const int sp_idx = (x2==X2m1 ? X-X2X1mX1 : X+X1) >> 1;
1094 #endif
1095 
1096  const int ga_idx = sid;
1097 
1104 
1105 #ifdef MULTI_GPU
1106  if (kernel_type == INTERIOR_KERNEL) {
1107 #endif
1108 
1109  if (threadIdx.y == blockDim.y-1 && blockDim.y < X2 ) {
1110  // read spinor from device memory
1111  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1112 
1113  // project spinor into half spinors
1114  a0_re = +i00_re+i30_re;
1115  a0_im = +i00_im+i30_im;
1116  a1_re = +i01_re+i31_re;
1117  a1_im = +i01_im+i31_im;
1118  a2_re = +i02_re+i32_re;
1119  a2_im = +i02_im+i32_im;
1120  b0_re = +i10_re-i20_re;
1121  b0_im = +i10_im-i20_im;
1122  b1_re = +i11_re-i21_re;
1123  b1_im = +i11_im-i21_im;
1124  b2_re = +i12_re-i22_re;
1125  b2_im = +i12_im-i22_im;
1126  } else {
1127  // load spinor from shared memory
1128  int tx = (threadIdx.x + blockDim.x - ((x1+1)&1) ) % blockDim.x;
1129  int ty = (threadIdx.y < blockDim.y - 1) ? threadIdx.y + 1 : 0;
1130  READ_SPINOR_SHARED(tx, ty, threadIdx.z);
1131 
1132  // project spinor into half spinors
1133  a0_re = +i00_re+i30_re;
1134  a0_im = +i00_im+i30_im;
1135  a1_re = +i01_re+i31_re;
1136  a1_im = +i01_im+i31_im;
1137  a2_re = +i02_re+i32_re;
1138  a2_im = +i02_im+i32_im;
1139  b0_re = +i10_re-i20_re;
1140  b0_im = +i10_im-i20_im;
1141  b1_re = +i11_re-i21_re;
1142  b1_im = +i11_im-i21_im;
1143  b2_re = +i12_re-i22_re;
1144  b2_im = +i12_im-i22_im;
1145  }
1146 
1147 #ifdef MULTI_GPU
1148  } else {
1149 
1150  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1151 
1152  // read half spinor from device memory
1153  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1154 
1155  a0_re = i00_re; a0_im = i00_im;
1156  a1_re = i01_re; a1_im = i01_im;
1157  a2_re = i02_re; a2_im = i02_im;
1158  b0_re = i10_re; b0_im = i10_im;
1159  b1_re = i11_re; b1_im = i11_im;
1160  b2_re = i12_re; b2_im = i12_im;
1161 
1162  }
1163 #endif // MULTI_GPU
1164 
1165  // read gauge matrix from device memory
1166  READ_GAUGE_MATRIX(G, GAUGE0TEX, 2, ga_idx, ga_stride);
1167 
1168  // reconstruct gauge matrix
1170 
1171  // multiply row 0
1172  spinorFloat A0_re = 0;
1173  A0_re += g00_re * a0_re;
1174  A0_re -= g00_im * a0_im;
1175  A0_re += g01_re * a1_re;
1176  A0_re -= g01_im * a1_im;
1177  A0_re += g02_re * a2_re;
1178  A0_re -= g02_im * a2_im;
1179  spinorFloat A0_im = 0;
1180  A0_im += g00_re * a0_im;
1181  A0_im += g00_im * a0_re;
1182  A0_im += g01_re * a1_im;
1183  A0_im += g01_im * a1_re;
1184  A0_im += g02_re * a2_im;
1185  A0_im += g02_im * a2_re;
1186  spinorFloat B0_re = 0;
1187  B0_re += g00_re * b0_re;
1188  B0_re -= g00_im * b0_im;
1189  B0_re += g01_re * b1_re;
1190  B0_re -= g01_im * b1_im;
1191  B0_re += g02_re * b2_re;
1192  B0_re -= g02_im * b2_im;
1193  spinorFloat B0_im = 0;
1194  B0_im += g00_re * b0_im;
1195  B0_im += g00_im * b0_re;
1196  B0_im += g01_re * b1_im;
1197  B0_im += g01_im * b1_re;
1198  B0_im += g02_re * b2_im;
1199  B0_im += g02_im * b2_re;
1200 
1201  // multiply row 1
1202  spinorFloat A1_re = 0;
1203  A1_re += g10_re * a0_re;
1204  A1_re -= g10_im * a0_im;
1205  A1_re += g11_re * a1_re;
1206  A1_re -= g11_im * a1_im;
1207  A1_re += g12_re * a2_re;
1208  A1_re -= g12_im * a2_im;
1209  spinorFloat A1_im = 0;
1210  A1_im += g10_re * a0_im;
1211  A1_im += g10_im * a0_re;
1212  A1_im += g11_re * a1_im;
1213  A1_im += g11_im * a1_re;
1214  A1_im += g12_re * a2_im;
1215  A1_im += g12_im * a2_re;
1216  spinorFloat B1_re = 0;
1217  B1_re += g10_re * b0_re;
1218  B1_re -= g10_im * b0_im;
1219  B1_re += g11_re * b1_re;
1220  B1_re -= g11_im * b1_im;
1221  B1_re += g12_re * b2_re;
1222  B1_re -= g12_im * b2_im;
1223  spinorFloat B1_im = 0;
1224  B1_im += g10_re * b0_im;
1225  B1_im += g10_im * b0_re;
1226  B1_im += g11_re * b1_im;
1227  B1_im += g11_im * b1_re;
1228  B1_im += g12_re * b2_im;
1229  B1_im += g12_im * b2_re;
1230 
1231  // multiply row 2
1232  spinorFloat A2_re = 0;
1233  A2_re += g20_re * a0_re;
1234  A2_re -= g20_im * a0_im;
1235  A2_re += g21_re * a1_re;
1236  A2_re -= g21_im * a1_im;
1237  A2_re += g22_re * a2_re;
1238  A2_re -= g22_im * a2_im;
1239  spinorFloat A2_im = 0;
1240  A2_im += g20_re * a0_im;
1241  A2_im += g20_im * a0_re;
1242  A2_im += g21_re * a1_im;
1243  A2_im += g21_im * a1_re;
1244  A2_im += g22_re * a2_im;
1245  A2_im += g22_im * a2_re;
1246  spinorFloat B2_re = 0;
1247  B2_re += g20_re * b0_re;
1248  B2_re -= g20_im * b0_im;
1249  B2_re += g21_re * b1_re;
1250  B2_re -= g21_im * b1_im;
1251  B2_re += g22_re * b2_re;
1252  B2_re -= g22_im * b2_im;
1253  spinorFloat B2_im = 0;
1254  B2_im += g20_re * b0_im;
1255  B2_im += g20_im * b0_re;
1256  B2_im += g21_re * b1_im;
1257  B2_im += g21_im * b1_re;
1258  B2_im += g22_re * b2_im;
1259  B2_im += g22_im * b2_re;
1260 
1261  o00_re += A0_re;
1262  o00_im += A0_im;
1263  o10_re += B0_re;
1264  o10_im += B0_im;
1265  o20_re -= B0_re;
1266  o20_im -= B0_im;
1267  o30_re += A0_re;
1268  o30_im += A0_im;
1269 
1270  o01_re += A1_re;
1271  o01_im += A1_im;
1272  o11_re += B1_re;
1273  o11_im += B1_im;
1274  o21_re -= B1_re;
1275  o21_im -= B1_im;
1276  o31_re += A1_re;
1277  o31_im += A1_im;
1278 
1279  o02_re += A2_re;
1280  o02_im += A2_im;
1281  o12_re += B2_re;
1282  o12_im += B2_im;
1283  o22_re -= B2_re;
1284  o22_im -= B2_im;
1285  o32_re += A2_re;
1286  o32_im += A2_im;
1287 
1288 }
1289 
1290 #ifdef MULTI_GPU
1291 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || x2>0)) ||
1292  (kernel_type == EXTERIOR_KERNEL_Y && x2==0) )
1293 #endif
1294 {
1295  // Projector P1-
1296  // 1 0 0 -1
1297  // 0 1 1 0
1298  // 0 1 1 0
1299  // -1 0 0 1
1300 
1301 #ifdef MULTI_GPU
1302  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x2==0 ? X+X2X1mX1 : X-X1) >> 1 :
1303  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1304 #else
1305  const int sp_idx = (x2==0 ? X+X2X1mX1 : X-X1) >> 1;
1306 #endif
1307 
1308 #ifdef MULTI_GPU
1309  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
1310 #else
1311  const int ga_idx = sp_idx;
1312 #endif
1313 
1320 
1321 #ifdef MULTI_GPU
1322  if (kernel_type == INTERIOR_KERNEL) {
1323 #endif
1324 
1325  if (threadIdx.y == 0 && blockDim.y < X2) {
1326  // read spinor from device memory
1327  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1328 
1329  // project spinor into half spinors
1330  a0_re = +i00_re-i30_re;
1331  a0_im = +i00_im-i30_im;
1332  a1_re = +i01_re-i31_re;
1333  a1_im = +i01_im-i31_im;
1334  a2_re = +i02_re-i32_re;
1335  a2_im = +i02_im-i32_im;
1336  b0_re = +i10_re+i20_re;
1337  b0_im = +i10_im+i20_im;
1338  b1_re = +i11_re+i21_re;
1339  b1_im = +i11_im+i21_im;
1340  b2_re = +i12_re+i22_re;
1341  b2_im = +i12_im+i22_im;
1342  } else {
1343  // load spinor from shared memory
1344  int tx = (threadIdx.x + blockDim.x - ((x1+1)&1)) % blockDim.x;
1345  int ty = (threadIdx.y > 0) ? threadIdx.y - 1 : blockDim.y - 1;
1346  READ_SPINOR_SHARED(tx, ty, threadIdx.z);
1347 
1348  // project spinor into half spinors
1349  a0_re = +i00_re-i30_re;
1350  a0_im = +i00_im-i30_im;
1351  a1_re = +i01_re-i31_re;
1352  a1_im = +i01_im-i31_im;
1353  a2_re = +i02_re-i32_re;
1354  a2_im = +i02_im-i32_im;
1355  b0_re = +i10_re+i20_re;
1356  b0_im = +i10_im+i20_im;
1357  b1_re = +i11_re+i21_re;
1358  b1_im = +i11_im+i21_im;
1359  b2_re = +i12_re+i22_re;
1360  b2_im = +i12_im+i22_im;
1361  }
1362 
1363 #ifdef MULTI_GPU
1364  } else {
1365 
1366  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1367 
1368  // read half spinor from device memory
1369  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
1370 
1371  a0_re = i00_re; a0_im = i00_im;
1372  a1_re = i01_re; a1_im = i01_im;
1373  a2_re = i02_re; a2_im = i02_im;
1374  b0_re = i10_re; b0_im = i10_im;
1375  b1_re = i11_re; b1_im = i11_im;
1376  b2_re = i12_re; b2_im = i12_im;
1377 
1378  }
1379 #endif // MULTI_GPU
1380 
1381  // read gauge matrix from device memory
1382  READ_GAUGE_MATRIX(G, GAUGE1TEX, 3, ga_idx, ga_stride);
1383 
1384  // reconstruct gauge matrix
1386 
1387  // multiply row 0
1388  spinorFloat A0_re = 0;
1389  A0_re += gT00_re * a0_re;
1390  A0_re -= gT00_im * a0_im;
1391  A0_re += gT01_re * a1_re;
1392  A0_re -= gT01_im * a1_im;
1393  A0_re += gT02_re * a2_re;
1394  A0_re -= gT02_im * a2_im;
1395  spinorFloat A0_im = 0;
1396  A0_im += gT00_re * a0_im;
1397  A0_im += gT00_im * a0_re;
1398  A0_im += gT01_re * a1_im;
1399  A0_im += gT01_im * a1_re;
1400  A0_im += gT02_re * a2_im;
1401  A0_im += gT02_im * a2_re;
1402  spinorFloat B0_re = 0;
1403  B0_re += gT00_re * b0_re;
1404  B0_re -= gT00_im * b0_im;
1405  B0_re += gT01_re * b1_re;
1406  B0_re -= gT01_im * b1_im;
1407  B0_re += gT02_re * b2_re;
1408  B0_re -= gT02_im * b2_im;
1409  spinorFloat B0_im = 0;
1410  B0_im += gT00_re * b0_im;
1411  B0_im += gT00_im * b0_re;
1412  B0_im += gT01_re * b1_im;
1413  B0_im += gT01_im * b1_re;
1414  B0_im += gT02_re * b2_im;
1415  B0_im += gT02_im * b2_re;
1416 
1417  // multiply row 1
1418  spinorFloat A1_re = 0;
1419  A1_re += gT10_re * a0_re;
1420  A1_re -= gT10_im * a0_im;
1421  A1_re += gT11_re * a1_re;
1422  A1_re -= gT11_im * a1_im;
1423  A1_re += gT12_re * a2_re;
1424  A1_re -= gT12_im * a2_im;
1425  spinorFloat A1_im = 0;
1426  A1_im += gT10_re * a0_im;
1427  A1_im += gT10_im * a0_re;
1428  A1_im += gT11_re * a1_im;
1429  A1_im += gT11_im * a1_re;
1430  A1_im += gT12_re * a2_im;
1431  A1_im += gT12_im * a2_re;
1432  spinorFloat B1_re = 0;
1433  B1_re += gT10_re * b0_re;
1434  B1_re -= gT10_im * b0_im;
1435  B1_re += gT11_re * b1_re;
1436  B1_re -= gT11_im * b1_im;
1437  B1_re += gT12_re * b2_re;
1438  B1_re -= gT12_im * b2_im;
1439  spinorFloat B1_im = 0;
1440  B1_im += gT10_re * b0_im;
1441  B1_im += gT10_im * b0_re;
1442  B1_im += gT11_re * b1_im;
1443  B1_im += gT11_im * b1_re;
1444  B1_im += gT12_re * b2_im;
1445  B1_im += gT12_im * b2_re;
1446 
1447  // multiply row 2
1448  spinorFloat A2_re = 0;
1449  A2_re += gT20_re * a0_re;
1450  A2_re -= gT20_im * a0_im;
1451  A2_re += gT21_re * a1_re;
1452  A2_re -= gT21_im * a1_im;
1453  A2_re += gT22_re * a2_re;
1454  A2_re -= gT22_im * a2_im;
1455  spinorFloat A2_im = 0;
1456  A2_im += gT20_re * a0_im;
1457  A2_im += gT20_im * a0_re;
1458  A2_im += gT21_re * a1_im;
1459  A2_im += gT21_im * a1_re;
1460  A2_im += gT22_re * a2_im;
1461  A2_im += gT22_im * a2_re;
1462  spinorFloat B2_re = 0;
1463  B2_re += gT20_re * b0_re;
1464  B2_re -= gT20_im * b0_im;
1465  B2_re += gT21_re * b1_re;
1466  B2_re -= gT21_im * b1_im;
1467  B2_re += gT22_re * b2_re;
1468  B2_re -= gT22_im * b2_im;
1469  spinorFloat B2_im = 0;
1470  B2_im += gT20_re * b0_im;
1471  B2_im += gT20_im * b0_re;
1472  B2_im += gT21_re * b1_im;
1473  B2_im += gT21_im * b1_re;
1474  B2_im += gT22_re * b2_im;
1475  B2_im += gT22_im * b2_re;
1476 
1477  o00_re += A0_re;
1478  o00_im += A0_im;
1479  o10_re += B0_re;
1480  o10_im += B0_im;
1481  o20_re += B0_re;
1482  o20_im += B0_im;
1483  o30_re -= A0_re;
1484  o30_im -= A0_im;
1485 
1486  o01_re += A1_re;
1487  o01_im += A1_im;
1488  o11_re += B1_re;
1489  o11_im += B1_im;
1490  o21_re += B1_re;
1491  o21_im += B1_im;
1492  o31_re -= A1_re;
1493  o31_im -= A1_im;
1494 
1495  o02_re += A2_re;
1496  o02_im += A2_im;
1497  o12_re += B2_re;
1498  o12_im += B2_im;
1499  o22_re += B2_re;
1500  o22_im += B2_im;
1501  o32_re -= A2_re;
1502  o32_im -= A2_im;
1503 
1504 }
1505 
1506 #ifdef MULTI_GPU
1507 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || x3<X3m1)) ||
1508  (kernel_type == EXTERIOR_KERNEL_Z && x3==X3m1) )
1509 #endif
1510 {
1511  // Projector P2+
1512  // 1 0 i 0
1513  // 0 1 0 -i
1514  // -i 0 1 0
1515  // 0 i 0 1
1516 
1517 #ifdef MULTI_GPU
1518  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x3==X3m1 ? X-X3X2X1mX2X1 : X+X2X1) >> 1 :
1519  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1520 #else
1521  const int sp_idx = (x3==X3m1 ? X-X3X2X1mX2X1 : X+X2X1) >> 1;
1522 #endif
1523 
1524  const int ga_idx = sid;
1525 
1532 
1533 #ifdef MULTI_GPU
1534  if (kernel_type == INTERIOR_KERNEL) {
1535 #endif
1536 
1537  if (threadIdx.z == blockDim.z-1 && blockDim.z < X3) {
1538  // read spinor from device memory
1539  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1540 
1541  // project spinor into half spinors
1542  a0_re = +i00_re-i20_im;
1543  a0_im = +i00_im+i20_re;
1544  a1_re = +i01_re-i21_im;
1545  a1_im = +i01_im+i21_re;
1546  a2_re = +i02_re-i22_im;
1547  a2_im = +i02_im+i22_re;
1548  b0_re = +i10_re+i30_im;
1549  b0_im = +i10_im-i30_re;
1550  b1_re = +i11_re+i31_im;
1551  b1_im = +i11_im-i31_re;
1552  b2_re = +i12_re+i32_im;
1553  b2_im = +i12_im-i32_re;
1554  } else {
1555  // load spinor from shared memory
1556  int tx = (threadIdx.x + blockDim.x - ((x1+1)&1) ) % blockDim.x;
1557  int tz = (threadIdx.z < blockDim.z - 1) ? threadIdx.z + 1 : 0;
1558  READ_SPINOR_SHARED(tx, threadIdx.y, tz);
1559 
1560  // project spinor into half spinors
1561  a0_re = +i00_re-i20_im;
1562  a0_im = +i00_im+i20_re;
1563  a1_re = +i01_re-i21_im;
1564  a1_im = +i01_im+i21_re;
1565  a2_re = +i02_re-i22_im;
1566  a2_im = +i02_im+i22_re;
1567  b0_re = +i10_re+i30_im;
1568  b0_im = +i10_im-i30_re;
1569  b1_re = +i11_re+i31_im;
1570  b1_im = +i11_im-i31_re;
1571  b2_re = +i12_re+i32_im;
1572  b2_im = +i12_im-i32_re;
1573  }
1574 
1575 #ifdef MULTI_GPU
1576  } else {
1577 
1578  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1579 
1580  // read half spinor from device memory
1581  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1582 
1583  a0_re = i00_re; a0_im = i00_im;
1584  a1_re = i01_re; a1_im = i01_im;
1585  a2_re = i02_re; a2_im = i02_im;
1586  b0_re = i10_re; b0_im = i10_im;
1587  b1_re = i11_re; b1_im = i11_im;
1588  b2_re = i12_re; b2_im = i12_im;
1589 
1590  }
1591 #endif // MULTI_GPU
1592 
1593  // read gauge matrix from device memory
1594  READ_GAUGE_MATRIX(G, GAUGE0TEX, 4, ga_idx, ga_stride);
1595 
1596  // reconstruct gauge matrix
1598 
1599  // multiply row 0
1600  spinorFloat A0_re = 0;
1601  A0_re += g00_re * a0_re;
1602  A0_re -= g00_im * a0_im;
1603  A0_re += g01_re * a1_re;
1604  A0_re -= g01_im * a1_im;
1605  A0_re += g02_re * a2_re;
1606  A0_re -= g02_im * a2_im;
1607  spinorFloat A0_im = 0;
1608  A0_im += g00_re * a0_im;
1609  A0_im += g00_im * a0_re;
1610  A0_im += g01_re * a1_im;
1611  A0_im += g01_im * a1_re;
1612  A0_im += g02_re * a2_im;
1613  A0_im += g02_im * a2_re;
1614  spinorFloat B0_re = 0;
1615  B0_re += g00_re * b0_re;
1616  B0_re -= g00_im * b0_im;
1617  B0_re += g01_re * b1_re;
1618  B0_re -= g01_im * b1_im;
1619  B0_re += g02_re * b2_re;
1620  B0_re -= g02_im * b2_im;
1621  spinorFloat B0_im = 0;
1622  B0_im += g00_re * b0_im;
1623  B0_im += g00_im * b0_re;
1624  B0_im += g01_re * b1_im;
1625  B0_im += g01_im * b1_re;
1626  B0_im += g02_re * b2_im;
1627  B0_im += g02_im * b2_re;
1628 
1629  // multiply row 1
1630  spinorFloat A1_re = 0;
1631  A1_re += g10_re * a0_re;
1632  A1_re -= g10_im * a0_im;
1633  A1_re += g11_re * a1_re;
1634  A1_re -= g11_im * a1_im;
1635  A1_re += g12_re * a2_re;
1636  A1_re -= g12_im * a2_im;
1637  spinorFloat A1_im = 0;
1638  A1_im += g10_re * a0_im;
1639  A1_im += g10_im * a0_re;
1640  A1_im += g11_re * a1_im;
1641  A1_im += g11_im * a1_re;
1642  A1_im += g12_re * a2_im;
1643  A1_im += g12_im * a2_re;
1644  spinorFloat B1_re = 0;
1645  B1_re += g10_re * b0_re;
1646  B1_re -= g10_im * b0_im;
1647  B1_re += g11_re * b1_re;
1648  B1_re -= g11_im * b1_im;
1649  B1_re += g12_re * b2_re;
1650  B1_re -= g12_im * b2_im;
1651  spinorFloat B1_im = 0;
1652  B1_im += g10_re * b0_im;
1653  B1_im += g10_im * b0_re;
1654  B1_im += g11_re * b1_im;
1655  B1_im += g11_im * b1_re;
1656  B1_im += g12_re * b2_im;
1657  B1_im += g12_im * b2_re;
1658 
1659  // multiply row 2
1660  spinorFloat A2_re = 0;
1661  A2_re += g20_re * a0_re;
1662  A2_re -= g20_im * a0_im;
1663  A2_re += g21_re * a1_re;
1664  A2_re -= g21_im * a1_im;
1665  A2_re += g22_re * a2_re;
1666  A2_re -= g22_im * a2_im;
1667  spinorFloat A2_im = 0;
1668  A2_im += g20_re * a0_im;
1669  A2_im += g20_im * a0_re;
1670  A2_im += g21_re * a1_im;
1671  A2_im += g21_im * a1_re;
1672  A2_im += g22_re * a2_im;
1673  A2_im += g22_im * a2_re;
1674  spinorFloat B2_re = 0;
1675  B2_re += g20_re * b0_re;
1676  B2_re -= g20_im * b0_im;
1677  B2_re += g21_re * b1_re;
1678  B2_re -= g21_im * b1_im;
1679  B2_re += g22_re * b2_re;
1680  B2_re -= g22_im * b2_im;
1681  spinorFloat B2_im = 0;
1682  B2_im += g20_re * b0_im;
1683  B2_im += g20_im * b0_re;
1684  B2_im += g21_re * b1_im;
1685  B2_im += g21_im * b1_re;
1686  B2_im += g22_re * b2_im;
1687  B2_im += g22_im * b2_re;
1688 
1689  o00_re += A0_re;
1690  o00_im += A0_im;
1691  o10_re += B0_re;
1692  o10_im += B0_im;
1693  o20_re += A0_im;
1694  o20_im -= A0_re;
1695  o30_re -= B0_im;
1696  o30_im += B0_re;
1697 
1698  o01_re += A1_re;
1699  o01_im += A1_im;
1700  o11_re += B1_re;
1701  o11_im += B1_im;
1702  o21_re += A1_im;
1703  o21_im -= A1_re;
1704  o31_re -= B1_im;
1705  o31_im += B1_re;
1706 
1707  o02_re += A2_re;
1708  o02_im += A2_im;
1709  o12_re += B2_re;
1710  o12_im += B2_im;
1711  o22_re += A2_im;
1712  o22_im -= A2_re;
1713  o32_re -= B2_im;
1714  o32_im += B2_re;
1715 
1716 }
1717 
1718 #ifdef MULTI_GPU
1719 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || x3>0)) ||
1720  (kernel_type == EXTERIOR_KERNEL_Z && x3==0) )
1721 #endif
1722 {
1723  // Projector P2-
1724  // 1 0 -i 0
1725  // 0 1 0 i
1726  // i 0 1 0
1727  // 0 -i 0 1
1728 
1729 #ifdef MULTI_GPU
1730  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x3==0 ? X+X3X2X1mX2X1 : X-X2X1) >> 1 :
1731  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1732 #else
1733  const int sp_idx = (x3==0 ? X+X3X2X1mX2X1 : X-X2X1) >> 1;
1734 #endif
1735 
1736 #ifdef MULTI_GPU
1737  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
1738 #else
1739  const int ga_idx = sp_idx;
1740 #endif
1741 
1748 
1749 #ifdef MULTI_GPU
1750  if (kernel_type == INTERIOR_KERNEL) {
1751 #endif
1752 
1753  if (threadIdx.z == 0 && blockDim.z < X3) {
1754  // read spinor from device memory
1755  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1756 
1757  // project spinor into half spinors
1758  a0_re = +i00_re+i20_im;
1759  a0_im = +i00_im-i20_re;
1760  a1_re = +i01_re+i21_im;
1761  a1_im = +i01_im-i21_re;
1762  a2_re = +i02_re+i22_im;
1763  a2_im = +i02_im-i22_re;
1764  b0_re = +i10_re-i30_im;
1765  b0_im = +i10_im+i30_re;
1766  b1_re = +i11_re-i31_im;
1767  b1_im = +i11_im+i31_re;
1768  b2_re = +i12_re-i32_im;
1769  b2_im = +i12_im+i32_re;
1770  } else {
1771  // load spinor from shared memory
1772  int tx = (threadIdx.x + blockDim.x - ((x1+1)&1)) % blockDim.x;
1773  int tz = (threadIdx.z > 0) ? threadIdx.z - 1 : blockDim.z - 1;
1774  READ_SPINOR_SHARED(tx, threadIdx.y, tz);
1775 
1776  // project spinor into half spinors
1777  a0_re = +i00_re+i20_im;
1778  a0_im = +i00_im-i20_re;
1779  a1_re = +i01_re+i21_im;
1780  a1_im = +i01_im-i21_re;
1781  a2_re = +i02_re+i22_im;
1782  a2_im = +i02_im-i22_re;
1783  b0_re = +i10_re-i30_im;
1784  b0_im = +i10_im+i30_re;
1785  b1_re = +i11_re-i31_im;
1786  b1_im = +i11_im+i31_re;
1787  b2_re = +i12_re-i32_im;
1788  b2_im = +i12_im+i32_re;
1789  }
1790 
1791 #ifdef MULTI_GPU
1792  } else {
1793 
1794  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1795 
1796  // read half spinor from device memory
1797  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
1798 
1799  a0_re = i00_re; a0_im = i00_im;
1800  a1_re = i01_re; a1_im = i01_im;
1801  a2_re = i02_re; a2_im = i02_im;
1802  b0_re = i10_re; b0_im = i10_im;
1803  b1_re = i11_re; b1_im = i11_im;
1804  b2_re = i12_re; b2_im = i12_im;
1805 
1806  }
1807 #endif // MULTI_GPU
1808 
1809  // read gauge matrix from device memory
1810  READ_GAUGE_MATRIX(G, GAUGE1TEX, 5, ga_idx, ga_stride);
1811 
1812  // reconstruct gauge matrix
1814 
1815  // multiply row 0
1816  spinorFloat A0_re = 0;
1817  A0_re += gT00_re * a0_re;
1818  A0_re -= gT00_im * a0_im;
1819  A0_re += gT01_re * a1_re;
1820  A0_re -= gT01_im * a1_im;
1821  A0_re += gT02_re * a2_re;
1822  A0_re -= gT02_im * a2_im;
1823  spinorFloat A0_im = 0;
1824  A0_im += gT00_re * a0_im;
1825  A0_im += gT00_im * a0_re;
1826  A0_im += gT01_re * a1_im;
1827  A0_im += gT01_im * a1_re;
1828  A0_im += gT02_re * a2_im;
1829  A0_im += gT02_im * a2_re;
1830  spinorFloat B0_re = 0;
1831  B0_re += gT00_re * b0_re;
1832  B0_re -= gT00_im * b0_im;
1833  B0_re += gT01_re * b1_re;
1834  B0_re -= gT01_im * b1_im;
1835  B0_re += gT02_re * b2_re;
1836  B0_re -= gT02_im * b2_im;
1837  spinorFloat B0_im = 0;
1838  B0_im += gT00_re * b0_im;
1839  B0_im += gT00_im * b0_re;
1840  B0_im += gT01_re * b1_im;
1841  B0_im += gT01_im * b1_re;
1842  B0_im += gT02_re * b2_im;
1843  B0_im += gT02_im * b2_re;
1844 
1845  // multiply row 1
1846  spinorFloat A1_re = 0;
1847  A1_re += gT10_re * a0_re;
1848  A1_re -= gT10_im * a0_im;
1849  A1_re += gT11_re * a1_re;
1850  A1_re -= gT11_im * a1_im;
1851  A1_re += gT12_re * a2_re;
1852  A1_re -= gT12_im * a2_im;
1853  spinorFloat A1_im = 0;
1854  A1_im += gT10_re * a0_im;
1855  A1_im += gT10_im * a0_re;
1856  A1_im += gT11_re * a1_im;
1857  A1_im += gT11_im * a1_re;
1858  A1_im += gT12_re * a2_im;
1859  A1_im += gT12_im * a2_re;
1860  spinorFloat B1_re = 0;
1861  B1_re += gT10_re * b0_re;
1862  B1_re -= gT10_im * b0_im;
1863  B1_re += gT11_re * b1_re;
1864  B1_re -= gT11_im * b1_im;
1865  B1_re += gT12_re * b2_re;
1866  B1_re -= gT12_im * b2_im;
1867  spinorFloat B1_im = 0;
1868  B1_im += gT10_re * b0_im;
1869  B1_im += gT10_im * b0_re;
1870  B1_im += gT11_re * b1_im;
1871  B1_im += gT11_im * b1_re;
1872  B1_im += gT12_re * b2_im;
1873  B1_im += gT12_im * b2_re;
1874 
1875  // multiply row 2
1876  spinorFloat A2_re = 0;
1877  A2_re += gT20_re * a0_re;
1878  A2_re -= gT20_im * a0_im;
1879  A2_re += gT21_re * a1_re;
1880  A2_re -= gT21_im * a1_im;
1881  A2_re += gT22_re * a2_re;
1882  A2_re -= gT22_im * a2_im;
1883  spinorFloat A2_im = 0;
1884  A2_im += gT20_re * a0_im;
1885  A2_im += gT20_im * a0_re;
1886  A2_im += gT21_re * a1_im;
1887  A2_im += gT21_im * a1_re;
1888  A2_im += gT22_re * a2_im;
1889  A2_im += gT22_im * a2_re;
1890  spinorFloat B2_re = 0;
1891  B2_re += gT20_re * b0_re;
1892  B2_re -= gT20_im * b0_im;
1893  B2_re += gT21_re * b1_re;
1894  B2_re -= gT21_im * b1_im;
1895  B2_re += gT22_re * b2_re;
1896  B2_re -= gT22_im * b2_im;
1897  spinorFloat B2_im = 0;
1898  B2_im += gT20_re * b0_im;
1899  B2_im += gT20_im * b0_re;
1900  B2_im += gT21_re * b1_im;
1901  B2_im += gT21_im * b1_re;
1902  B2_im += gT22_re * b2_im;
1903  B2_im += gT22_im * b2_re;
1904 
1905  o00_re += A0_re;
1906  o00_im += A0_im;
1907  o10_re += B0_re;
1908  o10_im += B0_im;
1909  o20_re -= A0_im;
1910  o20_im += A0_re;
1911  o30_re += B0_im;
1912  o30_im -= B0_re;
1913 
1914  o01_re += A1_re;
1915  o01_im += A1_im;
1916  o11_re += B1_re;
1917  o11_im += B1_im;
1918  o21_re -= A1_im;
1919  o21_im += A1_re;
1920  o31_re += B1_im;
1921  o31_im -= B1_re;
1922 
1923  o02_re += A2_re;
1924  o02_im += A2_im;
1925  o12_re += B2_re;
1926  o12_im += B2_im;
1927  o22_re -= A2_im;
1928  o22_im += A2_re;
1929  o32_re += B2_im;
1930  o32_im -= B2_re;
1931 
1932 }
1933 
1934 #ifdef MULTI_GPU
1935 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || x4<X4m1)) ||
1936  (kernel_type == EXTERIOR_KERNEL_T && x4==X4m1) )
1937 #endif
1938 {
1939  // Projector P3+
1940  // 2 0 0 0
1941  // 0 2 0 0
1942  // 0 0 0 0
1943  // 0 0 0 0
1944 
1945 #ifdef MULTI_GPU
1946  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x4==X4m1 ? X-X4X3X2X1mX3X2X1 : X+X3X2X1) >> 1 :
1947  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
1948 #else
1949  const int sp_idx = (x4==X4m1 ? X-X4X3X2X1mX3X2X1 : X+X3X2X1) >> 1;
1950 #endif
1951 
1952  const int ga_idx = sid;
1953 
1954  if (gauge_fixed && ga_idx < X4X3X2X1hmX3X2X1h)
1955  {
1962 
1963 #ifdef MULTI_GPU
1964  if (kernel_type == INTERIOR_KERNEL) {
1965 #endif
1966 
1967  // read spinor from device memory
1968  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1969 
1970  // project spinor into half spinors
1971  a0_re = +2*i00_re;
1972  a0_im = +2*i00_im;
1973  a1_re = +2*i01_re;
1974  a1_im = +2*i01_im;
1975  a2_re = +2*i02_re;
1976  a2_im = +2*i02_im;
1977  b0_re = +2*i10_re;
1978  b0_im = +2*i10_im;
1979  b1_re = +2*i11_re;
1980  b1_im = +2*i11_im;
1981  b2_re = +2*i12_re;
1982  b2_im = +2*i12_im;
1983 
1984 #ifdef MULTI_GPU
1985  } else {
1986 
1987  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
1988  const int t_proj_scale = TPROJSCALE;
1989 
1990  // read half spinor from device memory
1991  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
1992 
1993  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
1994  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
1995  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
1996  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
1997  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
1998  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
1999 
2000  }
2001 #endif // MULTI_GPU
2002 
2003  // identity gauge matrix
2010 
2011  o00_re += A0_re;
2012  o00_im += A0_im;
2013  o10_re += B0_re;
2014  o10_im += B0_im;
2015 
2016  o01_re += A1_re;
2017  o01_im += A1_im;
2018  o11_re += B1_re;
2019  o11_im += B1_im;
2020 
2021  o02_re += A2_re;
2022  o02_im += A2_im;
2023  o12_re += B2_re;
2024  o12_im += B2_im;
2025 
2026  } else {
2033 
2034 #ifdef MULTI_GPU
2035  if (kernel_type == INTERIOR_KERNEL) {
2036 #endif
2037 
2038  // read spinor from device memory
2039  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2040 
2041  // project spinor into half spinors
2042  a0_re = +2*i00_re;
2043  a0_im = +2*i00_im;
2044  a1_re = +2*i01_re;
2045  a1_im = +2*i01_im;
2046  a2_re = +2*i02_re;
2047  a2_im = +2*i02_im;
2048  b0_re = +2*i10_re;
2049  b0_im = +2*i10_im;
2050  b1_re = +2*i11_re;
2051  b1_im = +2*i11_im;
2052  b2_re = +2*i12_re;
2053  b2_im = +2*i12_im;
2054 
2055 #ifdef MULTI_GPU
2056  } else {
2057 
2058  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
2059  const int t_proj_scale = TPROJSCALE;
2060 
2061  // read half spinor from device memory
2062  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx + (SPINOR_HOP/2)*sp_stride_pad, sp_norm_idx);
2063 
2064  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2065  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2066  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2067  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2068  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2069  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2070 
2071  }
2072 #endif // MULTI_GPU
2073 
2074  // read gauge matrix from device memory
2075  READ_GAUGE_MATRIX(G, GAUGE0TEX, 6, ga_idx, ga_stride);
2076 
2077  // reconstruct gauge matrix
2079 
2080  // multiply row 0
2081  spinorFloat A0_re = 0;
2082  A0_re += g00_re * a0_re;
2083  A0_re -= g00_im * a0_im;
2084  A0_re += g01_re * a1_re;
2085  A0_re -= g01_im * a1_im;
2086  A0_re += g02_re * a2_re;
2087  A0_re -= g02_im * a2_im;
2088  spinorFloat A0_im = 0;
2089  A0_im += g00_re * a0_im;
2090  A0_im += g00_im * a0_re;
2091  A0_im += g01_re * a1_im;
2092  A0_im += g01_im * a1_re;
2093  A0_im += g02_re * a2_im;
2094  A0_im += g02_im * a2_re;
2095  spinorFloat B0_re = 0;
2096  B0_re += g00_re * b0_re;
2097  B0_re -= g00_im * b0_im;
2098  B0_re += g01_re * b1_re;
2099  B0_re -= g01_im * b1_im;
2100  B0_re += g02_re * b2_re;
2101  B0_re -= g02_im * b2_im;
2102  spinorFloat B0_im = 0;
2103  B0_im += g00_re * b0_im;
2104  B0_im += g00_im * b0_re;
2105  B0_im += g01_re * b1_im;
2106  B0_im += g01_im * b1_re;
2107  B0_im += g02_re * b2_im;
2108  B0_im += g02_im * b2_re;
2109 
2110  // multiply row 1
2111  spinorFloat A1_re = 0;
2112  A1_re += g10_re * a0_re;
2113  A1_re -= g10_im * a0_im;
2114  A1_re += g11_re * a1_re;
2115  A1_re -= g11_im * a1_im;
2116  A1_re += g12_re * a2_re;
2117  A1_re -= g12_im * a2_im;
2118  spinorFloat A1_im = 0;
2119  A1_im += g10_re * a0_im;
2120  A1_im += g10_im * a0_re;
2121  A1_im += g11_re * a1_im;
2122  A1_im += g11_im * a1_re;
2123  A1_im += g12_re * a2_im;
2124  A1_im += g12_im * a2_re;
2125  spinorFloat B1_re = 0;
2126  B1_re += g10_re * b0_re;
2127  B1_re -= g10_im * b0_im;
2128  B1_re += g11_re * b1_re;
2129  B1_re -= g11_im * b1_im;
2130  B1_re += g12_re * b2_re;
2131  B1_re -= g12_im * b2_im;
2132  spinorFloat B1_im = 0;
2133  B1_im += g10_re * b0_im;
2134  B1_im += g10_im * b0_re;
2135  B1_im += g11_re * b1_im;
2136  B1_im += g11_im * b1_re;
2137  B1_im += g12_re * b2_im;
2138  B1_im += g12_im * b2_re;
2139 
2140  // multiply row 2
2141  spinorFloat A2_re = 0;
2142  A2_re += g20_re * a0_re;
2143  A2_re -= g20_im * a0_im;
2144  A2_re += g21_re * a1_re;
2145  A2_re -= g21_im * a1_im;
2146  A2_re += g22_re * a2_re;
2147  A2_re -= g22_im * a2_im;
2148  spinorFloat A2_im = 0;
2149  A2_im += g20_re * a0_im;
2150  A2_im += g20_im * a0_re;
2151  A2_im += g21_re * a1_im;
2152  A2_im += g21_im * a1_re;
2153  A2_im += g22_re * a2_im;
2154  A2_im += g22_im * a2_re;
2155  spinorFloat B2_re = 0;
2156  B2_re += g20_re * b0_re;
2157  B2_re -= g20_im * b0_im;
2158  B2_re += g21_re * b1_re;
2159  B2_re -= g21_im * b1_im;
2160  B2_re += g22_re * b2_re;
2161  B2_re -= g22_im * b2_im;
2162  spinorFloat B2_im = 0;
2163  B2_im += g20_re * b0_im;
2164  B2_im += g20_im * b0_re;
2165  B2_im += g21_re * b1_im;
2166  B2_im += g21_im * b1_re;
2167  B2_im += g22_re * b2_im;
2168  B2_im += g22_im * b2_re;
2169 
2170  o00_re += A0_re;
2171  o00_im += A0_im;
2172  o10_re += B0_re;
2173  o10_im += B0_im;
2174 
2175  o01_re += A1_re;
2176  o01_im += A1_im;
2177  o11_re += B1_re;
2178  o11_im += B1_im;
2179 
2180  o02_re += A2_re;
2181  o02_im += A2_im;
2182  o12_re += B2_re;
2183  o12_im += B2_im;
2184 
2185  }
2186 }
2187 
2188 #ifdef MULTI_GPU
2189 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || x4>0)) ||
2190  (kernel_type == EXTERIOR_KERNEL_T && x4==0) )
2191 #endif
2192 {
2193  // Projector P3-
2194  // 0 0 0 0
2195  // 0 0 0 0
2196  // 0 0 2 0
2197  // 0 0 0 2
2198 
2199 #ifdef MULTI_GPU
2200  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (x4==0 ? X+X4X3X2X1mX3X2X1 : X-X3X2X1) >> 1 :
2201  face_idx + param.ghostOffset[static_cast<int>(kernel_type)];
2202 #else
2203  const int sp_idx = (x4==0 ? X+X4X3X2X1mX3X2X1 : X-X3X2X1) >> 1;
2204 #endif
2205 
2206 #ifdef MULTI_GPU
2207  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : Vh+face_idx);
2208 #else
2209  const int ga_idx = sp_idx;
2210 #endif
2211 
2212  if (gauge_fixed && ga_idx < X4X3X2X1hmX3X2X1h)
2213  {
2220 
2221 #ifdef MULTI_GPU
2222  if (kernel_type == INTERIOR_KERNEL) {
2223 #endif
2224 
2225  // read spinor from device memory
2226  READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2227 
2228  // project spinor into half spinors
2229  a0_re = +2*i20_re;
2230  a0_im = +2*i20_im;
2231  a1_re = +2*i21_re;
2232  a1_im = +2*i21_im;
2233  a2_re = +2*i22_re;
2234  a2_im = +2*i22_im;
2235  b0_re = +2*i30_re;
2236  b0_im = +2*i30_im;
2237  b1_re = +2*i31_re;
2238  b1_im = +2*i31_im;
2239  b2_re = +2*i32_re;
2240  b2_im = +2*i32_im;
2241 
2242 #ifdef MULTI_GPU
2243  } else {
2244 
2245  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
2246  const int t_proj_scale = TPROJSCALE;
2247 
2248  // read half spinor from device memory
2249  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
2250 
2251  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2252  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2253  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2254  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2255  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2256  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2257 
2258  }
2259 #endif // MULTI_GPU
2260 
2261  // identity gauge matrix
2268 
2269  o20_re += A0_re;
2270  o20_im += A0_im;
2271  o30_re += B0_re;
2272  o30_im += B0_im;
2273 
2274  o21_re += A1_re;
2275  o21_im += A1_im;
2276  o31_re += B1_re;
2277  o31_im += B1_im;
2278 
2279  o22_re += A2_re;
2280  o22_im += A2_im;
2281  o32_re += B2_re;
2282  o32_im += B2_im;
2283 
2284  } else {
2291 
2292 #ifdef MULTI_GPU
2293  if (kernel_type == INTERIOR_KERNEL) {
2294 #endif
2295 
2296  // read spinor from device memory
2297  READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2298 
2299  // project spinor into half spinors
2300  a0_re = +2*i20_re;
2301  a0_im = +2*i20_im;
2302  a1_re = +2*i21_re;
2303  a1_im = +2*i21_im;
2304  a2_re = +2*i22_re;
2305  a2_im = +2*i22_im;
2306  b0_re = +2*i30_re;
2307  b0_im = +2*i30_im;
2308  b1_re = +2*i31_re;
2309  b1_im = +2*i31_im;
2310  b2_re = +2*i32_re;
2311  b2_im = +2*i32_im;
2312 
2313 #ifdef MULTI_GPU
2314  } else {
2315 
2316  const int sp_stride_pad = ghostFace[static_cast<int>(kernel_type)];
2317  const int t_proj_scale = TPROJSCALE;
2318 
2319  // read half spinor from device memory
2320  READ_HALF_SPINOR(SPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx);
2321 
2322  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2323  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2324  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2325  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2326  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2327  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2328 
2329  }
2330 #endif // MULTI_GPU
2331 
2332  // read gauge matrix from device memory
2333  READ_GAUGE_MATRIX(G, GAUGE1TEX, 7, ga_idx, ga_stride);
2334 
2335  // reconstruct gauge matrix
2337 
2338  // multiply row 0
2339  spinorFloat A0_re = 0;
2340  A0_re += gT00_re * a0_re;
2341  A0_re -= gT00_im * a0_im;
2342  A0_re += gT01_re * a1_re;
2343  A0_re -= gT01_im * a1_im;
2344  A0_re += gT02_re * a2_re;
2345  A0_re -= gT02_im * a2_im;
2346  spinorFloat A0_im = 0;
2347  A0_im += gT00_re * a0_im;
2348  A0_im += gT00_im * a0_re;
2349  A0_im += gT01_re * a1_im;
2350  A0_im += gT01_im * a1_re;
2351  A0_im += gT02_re * a2_im;
2352  A0_im += gT02_im * a2_re;
2353  spinorFloat B0_re = 0;
2354  B0_re += gT00_re * b0_re;
2355  B0_re -= gT00_im * b0_im;
2356  B0_re += gT01_re * b1_re;
2357  B0_re -= gT01_im * b1_im;
2358  B0_re += gT02_re * b2_re;
2359  B0_re -= gT02_im * b2_im;
2360  spinorFloat B0_im = 0;
2361  B0_im += gT00_re * b0_im;
2362  B0_im += gT00_im * b0_re;
2363  B0_im += gT01_re * b1_im;
2364  B0_im += gT01_im * b1_re;
2365  B0_im += gT02_re * b2_im;
2366  B0_im += gT02_im * b2_re;
2367 
2368  // multiply row 1
2369  spinorFloat A1_re = 0;
2370  A1_re += gT10_re * a0_re;
2371  A1_re -= gT10_im * a0_im;
2372  A1_re += gT11_re * a1_re;
2373  A1_re -= gT11_im * a1_im;
2374  A1_re += gT12_re * a2_re;
2375  A1_re -= gT12_im * a2_im;
2376  spinorFloat A1_im = 0;
2377  A1_im += gT10_re * a0_im;
2378  A1_im += gT10_im * a0_re;
2379  A1_im += gT11_re * a1_im;
2380  A1_im += gT11_im * a1_re;
2381  A1_im += gT12_re * a2_im;
2382  A1_im += gT12_im * a2_re;
2383  spinorFloat B1_re = 0;
2384  B1_re += gT10_re * b0_re;
2385  B1_re -= gT10_im * b0_im;
2386  B1_re += gT11_re * b1_re;
2387  B1_re -= gT11_im * b1_im;
2388  B1_re += gT12_re * b2_re;
2389  B1_re -= gT12_im * b2_im;
2390  spinorFloat B1_im = 0;
2391  B1_im += gT10_re * b0_im;
2392  B1_im += gT10_im * b0_re;
2393  B1_im += gT11_re * b1_im;
2394  B1_im += gT11_im * b1_re;
2395  B1_im += gT12_re * b2_im;
2396  B1_im += gT12_im * b2_re;
2397 
2398  // multiply row 2
2399  spinorFloat A2_re = 0;
2400  A2_re += gT20_re * a0_re;
2401  A2_re -= gT20_im * a0_im;
2402  A2_re += gT21_re * a1_re;
2403  A2_re -= gT21_im * a1_im;
2404  A2_re += gT22_re * a2_re;
2405  A2_re -= gT22_im * a2_im;
2406  spinorFloat A2_im = 0;
2407  A2_im += gT20_re * a0_im;
2408  A2_im += gT20_im * a0_re;
2409  A2_im += gT21_re * a1_im;
2410  A2_im += gT21_im * a1_re;
2411  A2_im += gT22_re * a2_im;
2412  A2_im += gT22_im * a2_re;
2413  spinorFloat B2_re = 0;
2414  B2_re += gT20_re * b0_re;
2415  B2_re -= gT20_im * b0_im;
2416  B2_re += gT21_re * b1_re;
2417  B2_re -= gT21_im * b1_im;
2418  B2_re += gT22_re * b2_re;
2419  B2_re -= gT22_im * b2_im;
2420  spinorFloat B2_im = 0;
2421  B2_im += gT20_re * b0_im;
2422  B2_im += gT20_im * b0_re;
2423  B2_im += gT21_re * b1_im;
2424  B2_im += gT21_im * b1_re;
2425  B2_im += gT22_re * b2_im;
2426  B2_im += gT22_im * b2_re;
2427 
2428  o20_re += A0_re;
2429  o20_im += A0_im;
2430  o30_re += B0_re;
2431  o30_im += B0_im;
2432 
2433  o21_re += A1_re;
2434  o21_im += A1_im;
2435  o31_re += B1_re;
2436  o31_im += B1_im;
2437 
2438  o22_re += A2_re;
2439  o22_im += A2_im;
2440  o32_re += B2_re;
2441  o32_im += B2_im;
2442 
2443  }
2444 }
2445 
2446 #ifdef MULTI_GPU
2447 
2448 int incomplete = 0; // Have all 8 contributions been computed for this site?
2449 
2450 switch(kernel_type) { // intentional fall-through
2451 case INTERIOR_KERNEL:
2452  incomplete = incomplete || (param.commDim[3] && (x4==0 || x4==X4m1));
2453 case EXTERIOR_KERNEL_T:
2454  incomplete = incomplete || (param.commDim[2] && (x3==0 || x3==X3m1));
2455 case EXTERIOR_KERNEL_Z:
2456  incomplete = incomplete || (param.commDim[1] && (x2==0 || x2==X2m1));
2457 case EXTERIOR_KERNEL_Y:
2458  incomplete = incomplete || (param.commDim[0] && (x1==0 || x1==X1m1));
2459 }
2460 
2461 if (!incomplete)
2462 #endif // MULTI_GPU
2463 {
2464 #ifdef DSLASH_XPAY
2465  READ_ACCUM(ACCUMTEX, param.sp_stride)
2466 
2467 #ifndef CLOVER_TWIST_INV_DSLASH
2468 #ifndef CLOVER_TWIST_XPAY
2469  //perform invert twist first:
2470  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2471 #else
2472  APPLY_CLOVER_TWIST(c, -a, acc);
2473 #endif
2474 #endif
2475  o00_re = b*o00_re + acc00_re;
2476  o00_im = b*o00_im + acc00_im;
2477  o01_re = b*o01_re + acc01_re;
2478  o01_im = b*o01_im + acc01_im;
2479  o02_re = b*o02_re + acc02_re;
2480  o02_im = b*o02_im + acc02_im;
2481  o10_re = b*o10_re + acc10_re;
2482  o10_im = b*o10_im + acc10_im;
2483  o11_re = b*o11_re + acc11_re;
2484  o11_im = b*o11_im + acc11_im;
2485  o12_re = b*o12_re + acc12_re;
2486  o12_im = b*o12_im + acc12_im;
2487  o20_re = b*o20_re + acc20_re;
2488  o20_im = b*o20_im + acc20_im;
2489  o21_re = b*o21_re + acc21_re;
2490  o21_im = b*o21_im + acc21_im;
2491  o22_re = b*o22_re + acc22_re;
2492  o22_im = b*o22_im + acc22_im;
2493  o30_re = b*o30_re + acc30_re;
2494  o30_im = b*o30_im + acc30_im;
2495  o31_re = b*o31_re + acc31_re;
2496  o31_im = b*o31_im + acc31_im;
2497  o32_re = b*o32_re + acc32_re;
2498  o32_im = b*o32_im + acc32_im;
2499 #else //no XPAY
2500 #ifndef CLOVER_TWIST_INV_DSLASH
2501  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2502 #endif
2503 #endif
2504 }
2505 
2506 // write spinor field back to device memory
2507 WRITE_SPINOR(param.sp_stride);
2508 
2509 // undefine to prevent warning when precision is changed
2510 #undef spinorFloat
2511 #undef WRITE_SPINOR_SHARED
2512 #undef READ_SPINOR_SHARED
2513 #undef SHARED_STRIDE
2514 
2515 #undef g00_re
2516 #undef g00_im
2517 #undef g01_re
2518 #undef g01_im
2519 #undef g02_re
2520 #undef g02_im
2521 #undef g10_re
2522 #undef g10_im
2523 #undef g11_re
2524 #undef g11_im
2525 #undef g12_re
2526 #undef g12_im
2527 #undef g20_re
2528 #undef g20_im
2529 #undef g21_re
2530 #undef g21_im
2531 #undef g22_re
2532 #undef g22_im
2533 
2534 #undef i00_re
2535 #undef i00_im
2536 #undef i01_re
2537 #undef i01_im
2538 #undef i02_re
2539 #undef i02_im
2540 #undef i10_re
2541 #undef i10_im
2542 #undef i11_re
2543 #undef i11_im
2544 #undef i12_re
2545 #undef i12_im
2546 #undef i20_re
2547 #undef i20_im
2548 #undef i21_re
2549 #undef i21_im
2550 #undef i22_re
2551 #undef i22_im
2552 #undef i30_re
2553 #undef i30_im
2554 #undef i31_re
2555 #undef i31_im
2556 #undef i32_re
2557 #undef i32_im
2558 
2559 #undef c00_00_re
2560 #undef c01_01_re
2561 #undef c02_02_re
2562 #undef c10_10_re
2563 #undef c11_11_re
2564 #undef c12_12_re
2565 #undef c01_00_re
2566 #undef c01_00_im
2567 #undef c02_00_re
2568 #undef c02_00_im
2569 #undef c10_00_re
2570 #undef c10_00_im
2571 #undef c11_00_re
2572 #undef c11_00_im
2573 #undef c12_00_re
2574 #undef c12_00_im
2575 #undef c02_01_re
2576 #undef c02_01_im
2577 #undef c10_01_re
2578 #undef c10_01_im
2579 #undef c11_01_re
2580 #undef c11_01_im
2581 #undef c12_01_re
2582 #undef c12_01_im
2583 #undef c10_02_re
2584 #undef c10_02_im
2585 #undef c11_02_re
2586 #undef c11_02_im
2587 #undef c12_02_re
2588 #undef c12_02_im
2589 #undef c11_10_re
2590 #undef c11_10_im
2591 #undef c12_10_re
2592 #undef c12_10_im
2593 #undef c12_11_re
2594 #undef c12_11_im
2595 
2596 #undef cinv00_00_re
2597 #undef cinv01_01_re
2598 #undef cinv02_02_re
2599 #undef cinv10_10_re
2600 #undef cinv11_11_re
2601 #undef cinv12_12_re
2602 #undef cinv01_00_re
2603 #undef cinv01_00_im
2604 #undef cinv02_00_re
2605 #undef cinv02_00_im
2606 #undef cinv10_00_re
2607 #undef cinv10_00_im
2608 #undef cinv11_00_re
2609 #undef cinv11_00_im
2610 #undef cinv12_00_re
2611 #undef cinv12_00_im
2612 #undef cinv02_01_re
2613 #undef cinv02_01_im
2614 #undef cinv10_01_re
2615 #undef cinv10_01_im
2616 #undef cinv11_01_re
2617 #undef cinv11_01_im
2618 #undef cinv12_01_re
2619 #undef cinv12_01_im
2620 #undef cinv10_02_re
2621 #undef cinv10_02_im
2622 #undef cinv11_02_re
2623 #undef cinv11_02_im
2624 #undef cinv12_02_re
2625 #undef cinv12_02_im
2626 #undef cinv11_10_re
2627 #undef cinv11_10_im
2628 #undef cinv12_10_re
2629 #undef cinv12_10_im
2630 #undef cinv12_11_re
2631 #undef cinv12_11_im
2632 
2633 #undef acc00_re
2634 #undef acc00_im
2635 #undef acc01_re
2636 #undef acc01_im
2637 #undef acc02_re
2638 #undef acc02_im
2639 #undef acc10_re
2640 #undef acc10_im
2641 #undef acc11_re
2642 #undef acc11_im
2643 #undef acc12_re
2644 #undef acc12_im
2645 #undef acc20_re
2646 #undef acc20_im
2647 #undef acc21_re
2648 #undef acc21_im
2649 #undef acc22_re
2650 #undef acc22_im
2651 #undef acc30_re
2652 #undef acc30_im
2653 #undef acc31_re
2654 #undef acc31_im
2655 #undef acc32_re
2656 #undef acc32_im
2657 
2658 
2659 #undef o00_re
2660 #undef o00_im
2661 #undef o01_re
2662 #undef o01_im
2663 #undef o02_re
2664 #undef o02_im
2665 #undef o10_re
2666 #undef o10_im
2667 #undef o11_re
2668 #undef o11_im
2669 #undef o12_re
2670 #undef o12_im
2671 #undef o20_re
2672 #undef o20_im
2673 #undef o21_re
2674 #undef o21_im
2675 #undef o22_re
2676 #undef o22_im
2677 #undef o30_re
2678 #undef o30_im
2679 #undef o31_re
2680 #undef o31_im
2681 #undef o32_re
2682 #undef o32_im
2683 
2684 #undef VOLATILE
VOLATILE spinorFloat o11_im
#define i10_im
spinorFloat a1_re
VOLATILE spinorFloat o01_re
#define acc10_re
__constant__ int Vh
#define i00_im
VOLATILE spinorFloat o30_im
#define APPLY_CLOVER_TWIST(c, a, reg)
Definition: tmc_core.h:1
__constant__ int X2
#define i01_im
spinorFloat B1_re
__constant__ int X2X1mX1
#define acc11_re
VOLATILE spinorFloat o20_re
VOLATILE spinorFloat o02_re
#define i12_im
spinorFloat B2_re
#define acc32_re
VOLATILE spinorFloat o10_re
spinorFloat A2_im
READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define acc11_im
__constant__ int X3X2X1mX2X1
#define i11_im
VOLATILE spinorFloat o21_re
spinorFloat a0_im
__constant__ int X1
#define acc12_im
#define READ_INTERMEDIATE_SPINOR
Definition: covDev.h:144
int sp_idx
#define i20_im
#define i22_im
spinorFloat B0_im
#define i01_re
spinorFloat b2_re
#define i00_re
#define i12_re
spinorFloat A2_re
#define acc00_im
__constant__ int X3X2X1
#define i21_re
spinorFloat A0_re
spinorFloat b2_im
#define acc00_re
WRITE_SPINOR(param.sp_stride)
#define acc01_re
#define acc31_im
VOLATILE spinorFloat o31_im
#define acc22_im
VOLATILE spinorFloat o01_im
VOLATILE spinorFloat o22_re
spinorFloat A0_im
coordsFromIndex3D< EVEN_X >(X, x1, x2, x3, x4, sid, param.parity, dims)
#define acc02_re
#define acc21_im
QudaGaugeParam param
Definition: pack_test.cpp:17
__constant__ int ghostFace[QUDA_MAX_DIM+1]
#define acc21_re
#define i31_re
VOLATILE spinorFloat o30_re
#define acc20_re
#define APPLY_CLOVER_TWIST_INV(c, cinv, a, reg)
Definition: tmc_core.h:432
spinorFloat B1_im
spinorFloat b1_im
VOLATILE spinorFloat o31_re
spinorFloat b1_re
VOLATILE spinorFloat o11_re
#define acc30_im
#define GAUGE0TEX
Definition: covDev.h:112
#define acc31_re
spinorFloat a0_re
VOLATILE spinorFloat o02_im
#define i31_im
spinorFloat b0_re
#define i21_im
#define VOLATILE
__constant__ int X2m1
spinorFloat A1_re
VOLATILE spinorFloat o10_im
#define acc32_im
#define SPINORTEX
Definition: clover_def.h:40
__constant__ int gauge_fixed
#define acc01_im
__constant__ int X4X3X2X1mX3X2X1
READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define SPINOR_HOP
Definition: covDev.h:158
VOLATILE spinorFloat o21_im
#define i30_re
#define i20_re
__constant__ int ga_stride
READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, ga_stride)
spinorFloat B0_re
VOLATILE spinorFloat o22_im
#define WRITE_SPINOR_SHARED
__constant__ int X1m1
__constant__ int X3
spinorFloat a1_im
spinorFloat b0_im
#define i02_im
VOLATILE spinorFloat o00_re
#define spinorFloat
#define acc20_im
#define i11_re
#define acc12_re
spinorFloat a2_re
#define i22_re
#define GAUGE1TEX
Definition: covDev.h:113
#define i02_re
VOLATILE spinorFloat o32_im
#define acc22_re
#define i10_re
VOLATILE spinorFloat o00_im
const int ga_idx
__constant__ int X4m1
spinorFloat B2_im
spinorFloat A1_im
READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
VOLATILE spinorFloat o20_im
const int dims[]
#define READ_SPINOR_SHARED
#define READ_HALF_SPINOR
Definition: io_spinor.h:390
#define INTERTEX
Definition: covDev.h:149
VOLATILE spinorFloat o12_im
VOLATILE spinorFloat o12_re
__constant__ int X4X3X2X1hmX3X2X1h
#define i32_im
__syncthreads()
#define i30_im
RECONSTRUCT_GAUGE_MATRIX(0)
KernelType kernel_type
#define acc10_im
#define acc30_re
VOLATILE spinorFloat o32_re
#define i32_re
__constant__ int X4
__constant__ int X3m1
#define TPROJSCALE
Definition: covDev.h:101
spinorFloat a2_im
#define acc02_im
__constant__ int X2X1