QUDA  0.9.0
tmc_dslash_dagger_fermi_core.h
Go to the documentation of this file.
1 // *** CUDA DSLASH DAGGER ***
2 
3 #define DSLASH_SHARED_FLOATS_PER_THREAD 24
4 
5 
6 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler
7 #define VOLATILE
8 #else // Open64 compiler
9 #define VOLATILE volatile
10 #endif
11 // input spinor
12 #ifdef SPINOR_DOUBLE
13 #define spinorFloat double
14 #define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_DOUBLE2
15 #define READ_SPINOR_SHARED READ_SPINOR_SHARED_DOUBLE2
16 #define i00_re I0.x
17 #define i00_im I0.y
18 #define i01_re I1.x
19 #define i01_im I1.y
20 #define i02_re I2.x
21 #define i02_im I2.y
22 #define i10_re I3.x
23 #define i10_im I3.y
24 #define i11_re I4.x
25 #define i11_im I4.y
26 #define i12_re I5.x
27 #define i12_im I5.y
28 #define i20_re I6.x
29 #define i20_im I6.y
30 #define i21_re I7.x
31 #define i21_im I7.y
32 #define i22_re I8.x
33 #define i22_im I8.y
34 #define i30_re I9.x
35 #define i30_im I9.y
36 #define i31_re I10.x
37 #define i31_im I10.y
38 #define i32_re I11.x
39 #define i32_im I11.y
40 #define acc00_re accum0.x
41 #define acc00_im accum0.y
42 #define acc01_re accum1.x
43 #define acc01_im accum1.y
44 #define acc02_re accum2.x
45 #define acc02_im accum2.y
46 #define acc10_re accum3.x
47 #define acc10_im accum3.y
48 #define acc11_re accum4.x
49 #define acc11_im accum4.y
50 #define acc12_re accum5.x
51 #define acc12_im accum5.y
52 #define acc20_re accum6.x
53 #define acc20_im accum6.y
54 #define acc21_re accum7.x
55 #define acc21_im accum7.y
56 #define acc22_re accum8.x
57 #define acc22_im accum8.y
58 #define acc30_re accum9.x
59 #define acc30_im accum9.y
60 #define acc31_re accum10.x
61 #define acc31_im accum10.y
62 #define acc32_re accum11.x
63 #define acc32_im accum11.y
64 #else
65 #define spinorFloat float
66 #define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_FLOAT4
67 #define READ_SPINOR_SHARED READ_SPINOR_SHARED_FLOAT4
68 #define i00_re I0.x
69 #define i00_im I0.y
70 #define i01_re I0.z
71 #define i01_im I0.w
72 #define i02_re I1.x
73 #define i02_im I1.y
74 #define i10_re I1.z
75 #define i10_im I1.w
76 #define i11_re I2.x
77 #define i11_im I2.y
78 #define i12_re I2.z
79 #define i12_im I2.w
80 #define i20_re I3.x
81 #define i20_im I3.y
82 #define i21_re I3.z
83 #define i21_im I3.w
84 #define i22_re I4.x
85 #define i22_im I4.y
86 #define i30_re I4.z
87 #define i30_im I4.w
88 #define i31_re I5.x
89 #define i31_im I5.y
90 #define i32_re I5.z
91 #define i32_im I5.w
92 #define acc00_re accum0.x
93 #define acc00_im accum0.y
94 #define acc01_re accum0.z
95 #define acc01_im accum0.w
96 #define acc02_re accum1.x
97 #define acc02_im accum1.y
98 #define acc10_re accum1.z
99 #define acc10_im accum1.w
100 #define acc11_re accum2.x
101 #define acc11_im accum2.y
102 #define acc12_re accum2.z
103 #define acc12_im accum2.w
104 #define acc20_re accum3.x
105 #define acc20_im accum3.y
106 #define acc21_re accum3.z
107 #define acc21_im accum3.w
108 #define acc22_re accum4.x
109 #define acc22_im accum4.y
110 #define acc30_re accum4.z
111 #define acc30_im accum4.w
112 #define acc31_re accum5.x
113 #define acc31_im accum5.y
114 #define acc32_re accum5.z
115 #define acc32_im accum5.w
116 #endif // SPINOR_DOUBLE
117 
118 // gauge link
119 #ifdef GAUGE_FLOAT2
120 #define g00_re G0.x
121 #define g00_im G0.y
122 #define g01_re G1.x
123 #define g01_im G1.y
124 #define g02_re G2.x
125 #define g02_im G2.y
126 #define g10_re G3.x
127 #define g10_im G3.y
128 #define g11_re G4.x
129 #define g11_im G4.y
130 #define g12_re G5.x
131 #define g12_im G5.y
132 #define g20_re G6.x
133 #define g20_im G6.y
134 #define g21_re G7.x
135 #define g21_im G7.y
136 #define g22_re G8.x
137 #define g22_im G8.y
138 
139 #else
140 #define g00_re G0.x
141 #define g00_im G0.y
142 #define g01_re G0.z
143 #define g01_im G0.w
144 #define g02_re G1.x
145 #define g02_im G1.y
146 #define g10_re G1.z
147 #define g10_im G1.w
148 #define g11_re G2.x
149 #define g11_im G2.y
150 #define g12_re G2.z
151 #define g12_im G2.w
152 #define g20_re G3.x
153 #define g20_im G3.y
154 #define g21_re G3.z
155 #define g21_im G3.w
156 #define g22_re G4.x
157 #define g22_im G4.y
158 
159 #endif // GAUGE_DOUBLE
160 
161 // conjugated gauge link
162 #define gT00_re (+g00_re)
163 #define gT00_im (-g00_im)
164 #define gT01_re (+g10_re)
165 #define gT01_im (-g10_im)
166 #define gT02_re (+g20_re)
167 #define gT02_im (-g20_im)
168 #define gT10_re (+g01_re)
169 #define gT10_im (-g01_im)
170 #define gT11_re (+g11_re)
171 #define gT11_im (-g11_im)
172 #define gT12_re (+g21_re)
173 #define gT12_im (-g21_im)
174 #define gT20_re (+g02_re)
175 #define gT20_im (-g02_im)
176 #define gT21_re (+g12_re)
177 #define gT21_im (-g12_im)
178 #define gT22_re (+g22_re)
179 #define gT22_im (-g22_im)
180 
181 // first chiral block of clover term
182 #ifdef CLOVER_DOUBLE
183 #define c00_00_re C0.x
184 #define c01_01_re C0.y
185 #define c02_02_re C1.x
186 #define c10_10_re C1.y
187 #define c11_11_re C2.x
188 #define c12_12_re C2.y
189 #define c01_00_re C3.x
190 #define c01_00_im C3.y
191 #define c02_00_re C4.x
192 #define c02_00_im C4.y
193 #define c10_00_re C5.x
194 #define c10_00_im C5.y
195 #define c11_00_re C6.x
196 #define c11_00_im C6.y
197 #define c12_00_re C7.x
198 #define c12_00_im C7.y
199 #define c02_01_re C8.x
200 #define c02_01_im C8.y
201 #define c10_01_re C9.x
202 #define c10_01_im C9.y
203 #define c11_01_re C10.x
204 #define c11_01_im C10.y
205 #define c12_01_re C11.x
206 #define c12_01_im C11.y
207 #define c10_02_re C12.x
208 #define c10_02_im C12.y
209 #define c11_02_re C13.x
210 #define c11_02_im C13.y
211 #define c12_02_re C14.x
212 #define c12_02_im C14.y
213 #define c11_10_re C15.x
214 #define c11_10_im C15.y
215 #define c12_10_re C16.x
216 #define c12_10_im C16.y
217 #define c12_11_re C17.x
218 #define c12_11_im C17.y
219 #else
220 #define c00_00_re C0.x
221 #define c01_01_re C0.y
222 #define c02_02_re C0.z
223 #define c10_10_re C0.w
224 #define c11_11_re C1.x
225 #define c12_12_re C1.y
226 #define c01_00_re C1.z
227 #define c01_00_im C1.w
228 #define c02_00_re C2.x
229 #define c02_00_im C2.y
230 #define c10_00_re C2.z
231 #define c10_00_im C2.w
232 #define c11_00_re C3.x
233 #define c11_00_im C3.y
234 #define c12_00_re C3.z
235 #define c12_00_im C3.w
236 #define c02_01_re C4.x
237 #define c02_01_im C4.y
238 #define c10_01_re C4.z
239 #define c10_01_im C4.w
240 #define c11_01_re C5.x
241 #define c11_01_im C5.y
242 #define c12_01_re C5.z
243 #define c12_01_im C5.w
244 #define c10_02_re C6.x
245 #define c10_02_im C6.y
246 #define c11_02_re C6.z
247 #define c11_02_im C6.w
248 #define c12_02_re C7.x
249 #define c12_02_im C7.y
250 #define c11_10_re C7.z
251 #define c11_10_im C7.w
252 #define c12_10_re C8.x
253 #define c12_10_im C8.y
254 #define c12_11_re C8.z
255 #define c12_11_im C8.w
256 #endif // CLOVER_DOUBLE
257 
258 #define c00_01_re (+c01_00_re)
259 #define c00_01_im (-c01_00_im)
260 #define c00_02_re (+c02_00_re)
261 #define c00_02_im (-c02_00_im)
262 #define c01_02_re (+c02_01_re)
263 #define c01_02_im (-c02_01_im)
264 #define c00_10_re (+c10_00_re)
265 #define c00_10_im (-c10_00_im)
266 #define c01_10_re (+c10_01_re)
267 #define c01_10_im (-c10_01_im)
268 #define c02_10_re (+c10_02_re)
269 #define c02_10_im (-c10_02_im)
270 #define c00_11_re (+c11_00_re)
271 #define c00_11_im (-c11_00_im)
272 #define c01_11_re (+c11_01_re)
273 #define c01_11_im (-c11_01_im)
274 #define c02_11_re (+c11_02_re)
275 #define c02_11_im (-c11_02_im)
276 #define c10_11_re (+c11_10_re)
277 #define c10_11_im (-c11_10_im)
278 #define c00_12_re (+c12_00_re)
279 #define c00_12_im (-c12_00_im)
280 #define c01_12_re (+c12_01_re)
281 #define c01_12_im (-c12_01_im)
282 #define c02_12_re (+c12_02_re)
283 #define c02_12_im (-c12_02_im)
284 #define c10_12_re (+c12_10_re)
285 #define c10_12_im (-c12_10_im)
286 #define c11_12_re (+c12_11_re)
287 #define c11_12_im (-c12_11_im)
288 
289 // second chiral block of clover term (reuses C0,...,C9)
290 #define c20_20_re c00_00_re
291 #define c21_20_re c01_00_re
292 #define c21_20_im c01_00_im
293 #define c22_20_re c02_00_re
294 #define c22_20_im c02_00_im
295 #define c30_20_re c10_00_re
296 #define c30_20_im c10_00_im
297 #define c31_20_re c11_00_re
298 #define c31_20_im c11_00_im
299 #define c32_20_re c12_00_re
300 #define c32_20_im c12_00_im
301 #define c20_21_re c00_01_re
302 #define c20_21_im c00_01_im
303 #define c21_21_re c01_01_re
304 #define c22_21_re c02_01_re
305 #define c22_21_im c02_01_im
306 #define c30_21_re c10_01_re
307 #define c30_21_im c10_01_im
308 #define c31_21_re c11_01_re
309 #define c31_21_im c11_01_im
310 #define c32_21_re c12_01_re
311 #define c32_21_im c12_01_im
312 #define c20_22_re c00_02_re
313 #define c20_22_im c00_02_im
314 #define c21_22_re c01_02_re
315 #define c21_22_im c01_02_im
316 #define c22_22_re c02_02_re
317 #define c30_22_re c10_02_re
318 #define c30_22_im c10_02_im
319 #define c31_22_re c11_02_re
320 #define c31_22_im c11_02_im
321 #define c32_22_re c12_02_re
322 #define c32_22_im c12_02_im
323 #define c20_30_re c00_10_re
324 #define c20_30_im c00_10_im
325 #define c21_30_re c01_10_re
326 #define c21_30_im c01_10_im
327 #define c22_30_re c02_10_re
328 #define c22_30_im c02_10_im
329 #define c30_30_re c10_10_re
330 #define c31_30_re c11_10_re
331 #define c31_30_im c11_10_im
332 #define c32_30_re c12_10_re
333 #define c32_30_im c12_10_im
334 #define c20_31_re c00_11_re
335 #define c20_31_im c00_11_im
336 #define c21_31_re c01_11_re
337 #define c21_31_im c01_11_im
338 #define c22_31_re c02_11_re
339 #define c22_31_im c02_11_im
340 #define c30_31_re c10_11_re
341 #define c30_31_im c10_11_im
342 #define c31_31_re c11_11_re
343 #define c32_31_re c12_11_re
344 #define c32_31_im c12_11_im
345 #define c20_32_re c00_12_re
346 #define c20_32_im c00_12_im
347 #define c21_32_re c01_12_re
348 #define c21_32_im c01_12_im
349 #define c22_32_re c02_12_re
350 #define c22_32_im c02_12_im
351 #define c30_32_re c10_12_re
352 #define c30_32_im c10_12_im
353 #define c31_32_re c11_12_re
354 #define c31_32_im c11_12_im
355 #define c32_32_re c12_12_re
356 
357 
358 // first chiral block of inverted clover term
359 #ifdef CLOVER_DOUBLE
360 #define cinv00_00_re C0.x
361 #define cinv01_01_re C0.y
362 #define cinv02_02_re C1.x
363 #define cinv10_10_re C1.y
364 #define cinv11_11_re C2.x
365 #define cinv12_12_re C2.y
366 #define cinv01_00_re C3.x
367 #define cinv01_00_im C3.y
368 #define cinv02_00_re C4.x
369 #define cinv02_00_im C4.y
370 #define cinv10_00_re C5.x
371 #define cinv10_00_im C5.y
372 #define cinv11_00_re C6.x
373 #define cinv11_00_im C6.y
374 #define cinv12_00_re C7.x
375 #define cinv12_00_im C7.y
376 #define cinv02_01_re C8.x
377 #define cinv02_01_im C8.y
378 #define cinv10_01_re C9.x
379 #define cinv10_01_im C9.y
380 #define cinv11_01_re C10.x
381 #define cinv11_01_im C10.y
382 #define cinv12_01_re C11.x
383 #define cinv12_01_im C11.y
384 #define cinv10_02_re C12.x
385 #define cinv10_02_im C12.y
386 #define cinv11_02_re C13.x
387 #define cinv11_02_im C13.y
388 #define cinv12_02_re C14.x
389 #define cinv12_02_im C14.y
390 #define cinv11_10_re C15.x
391 #define cinv11_10_im C15.y
392 #define cinv12_10_re C16.x
393 #define cinv12_10_im C16.y
394 #define cinv12_11_re C17.x
395 #define cinv12_11_im C17.y
396 #else
397 #define cinv00_00_re C0.x
398 #define cinv01_01_re C0.y
399 #define cinv02_02_re C0.z
400 #define cinv10_10_re C0.w
401 #define cinv11_11_re C1.x
402 #define cinv12_12_re C1.y
403 #define cinv01_00_re C1.z
404 #define cinv01_00_im C1.w
405 #define cinv02_00_re C2.x
406 #define cinv02_00_im C2.y
407 #define cinv10_00_re C2.z
408 #define cinv10_00_im C2.w
409 #define cinv11_00_re C3.x
410 #define cinv11_00_im C3.y
411 #define cinv12_00_re C3.z
412 #define cinv12_00_im C3.w
413 #define cinv02_01_re C4.x
414 #define cinv02_01_im C4.y
415 #define cinv10_01_re C4.z
416 #define cinv10_01_im C4.w
417 #define cinv11_01_re C5.x
418 #define cinv11_01_im C5.y
419 #define cinv12_01_re C5.z
420 #define cinv12_01_im C5.w
421 #define cinv10_02_re C6.x
422 #define cinv10_02_im C6.y
423 #define cinv11_02_re C6.z
424 #define cinv11_02_im C6.w
425 #define cinv12_02_re C7.x
426 #define cinv12_02_im C7.y
427 #define cinv11_10_re C7.z
428 #define cinv11_10_im C7.w
429 #define cinv12_10_re C8.x
430 #define cinv12_10_im C8.y
431 #define cinv12_11_re C8.z
432 #define cinv12_11_im C8.w
433 #endif // CLOVER_DOUBLE
434 
435 #define cinv00_01_re (+cinv01_00_re)
436 #define cinv00_01_im (-cinv01_00_im)
437 #define cinv00_02_re (+cinv02_00_re)
438 #define cinv00_02_im (-cinv02_00_im)
439 #define cinv01_02_re (+cinv02_01_re)
440 #define cinv01_02_im (-cinv02_01_im)
441 #define cinv00_10_re (+cinv10_00_re)
442 #define cinv00_10_im (-cinv10_00_im)
443 #define cinv01_10_re (+cinv10_01_re)
444 #define cinv01_10_im (-cinv10_01_im)
445 #define cinv02_10_re (+cinv10_02_re)
446 #define cinv02_10_im (-cinv10_02_im)
447 #define cinv00_11_re (+cinv11_00_re)
448 #define cinv00_11_im (-cinv11_00_im)
449 #define cinv01_11_re (+cinv11_01_re)
450 #define cinv01_11_im (-cinv11_01_im)
451 #define cinv02_11_re (+cinv11_02_re)
452 #define cinv02_11_im (-cinv11_02_im)
453 #define cinv10_11_re (+cinv11_10_re)
454 #define cinv10_11_im (-cinv11_10_im)
455 #define cinv00_12_re (+cinv12_00_re)
456 #define cinv00_12_im (-cinv12_00_im)
457 #define cinv01_12_re (+cinv12_01_re)
458 #define cinv01_12_im (-cinv12_01_im)
459 #define cinv02_12_re (+cinv12_02_re)
460 #define cinv02_12_im (-cinv12_02_im)
461 #define cinv10_12_re (+cinv12_10_re)
462 #define cinv10_12_im (-cinv12_10_im)
463 #define cinv11_12_re (+cinv12_11_re)
464 #define cinv11_12_im (-cinv12_11_im)
465 
466 // second chiral block of inverted clover term (reuses C0,...,C9)
467 #define cinv20_20_re cinv00_00_re
468 #define cinv21_20_re cinv01_00_re
469 #define cinv21_20_im cinv01_00_im
470 #define cinv22_20_re cinv02_00_re
471 #define cinv22_20_im cinv02_00_im
472 #define cinv30_20_re cinv10_00_re
473 #define cinv30_20_im cinv10_00_im
474 #define cinv31_20_re cinv11_00_re
475 #define cinv31_20_im cinv11_00_im
476 #define cinv32_20_re cinv12_00_re
477 #define cinv32_20_im cinv12_00_im
478 #define cinv20_21_re cinv00_01_re
479 #define cinv20_21_im cinv00_01_im
480 #define cinv21_21_re cinv01_01_re
481 #define cinv22_21_re cinv02_01_re
482 #define cinv22_21_im cinv02_01_im
483 #define cinv30_21_re cinv10_01_re
484 #define cinv30_21_im cinv10_01_im
485 #define cinv31_21_re cinv11_01_re
486 #define cinv31_21_im cinv11_01_im
487 #define cinv32_21_re cinv12_01_re
488 #define cinv32_21_im cinv12_01_im
489 #define cinv20_22_re cinv00_02_re
490 #define cinv20_22_im cinv00_02_im
491 #define cinv21_22_re cinv01_02_re
492 #define cinv21_22_im cinv01_02_im
493 #define cinv22_22_re cinv02_02_re
494 #define cinv30_22_re cinv10_02_re
495 #define cinv30_22_im cinv10_02_im
496 #define cinv31_22_re cinv11_02_re
497 #define cinv31_22_im cinv11_02_im
498 #define cinv32_22_re cinv12_02_re
499 #define cinv32_22_im cinv12_02_im
500 #define cinv20_30_re cinv00_10_re
501 #define cinv20_30_im cinv00_10_im
502 #define cinv21_30_re cinv01_10_re
503 #define cinv21_30_im cinv01_10_im
504 #define cinv22_30_re cinv02_10_re
505 #define cinv22_30_im cinv02_10_im
506 #define cinv30_30_re cinv10_10_re
507 #define cinv31_30_re cinv11_10_re
508 #define cinv31_30_im cinv11_10_im
509 #define cinv32_30_re cinv12_10_re
510 #define cinv32_30_im cinv12_10_im
511 #define cinv20_31_re cinv00_11_re
512 #define cinv20_31_im cinv00_11_im
513 #define cinv21_31_re cinv01_11_re
514 #define cinv21_31_im cinv01_11_im
515 #define cinv22_31_re cinv02_11_re
516 #define cinv22_31_im cinv02_11_im
517 #define cinv30_31_re cinv10_11_re
518 #define cinv30_31_im cinv10_11_im
519 #define cinv31_31_re cinv11_11_re
520 #define cinv32_31_re cinv12_11_re
521 #define cinv32_31_im cinv12_11_im
522 #define cinv20_32_re cinv00_12_re
523 #define cinv20_32_im cinv00_12_im
524 #define cinv21_32_re cinv01_12_re
525 #define cinv21_32_im cinv01_12_im
526 #define cinv22_32_re cinv02_12_re
527 #define cinv22_32_im cinv02_12_im
528 #define cinv30_32_re cinv10_12_re
529 #define cinv30_32_im cinv10_12_im
530 #define cinv31_32_re cinv11_12_re
531 #define cinv31_32_im cinv11_12_im
532 #define cinv32_32_re cinv12_12_re
533 
534 
535 #ifndef CLOVER_TWIST_INV_DSLASH
536 
537 // declare C## here and use ASSN below instead of READ
538 #ifdef CLOVER_DOUBLE
539 double2 C0;
540 double2 C1;
541 double2 C2;
542 double2 C3;
543 double2 C4;
544 double2 C5;
545 double2 C6;
546 double2 C7;
547 double2 C8;
548 double2 C9;
549 double2 C10;
550 double2 C11;
551 double2 C12;
552 double2 C13;
553 double2 C14;
554 double2 C15;
555 double2 C16;
556 double2 C17;
557 #else
558 float4 C0;
559 float4 C1;
560 float4 C2;
561 float4 C3;
562 float4 C4;
563 float4 C5;
564 float4 C6;
565 float4 C7;
566 float4 C8;
567 
568 #if (DD_PREC==2)
569 float K;
570 #endif
571 
572 #endif // CLOVER_DOUBLE
573 #endif
574 
575 // output spinor
600 
601 #ifdef SPINOR_DOUBLE
602 #define SHARED_STRIDE 16 // to avoid bank conflicts on Fermi
603 #else
604 #define SHARED_STRIDE 32 // to avoid bank conflicts on Fermi
605 #endif
606 
607 #include "read_gauge.h"
608 #include "io_spinor.h"
609 #include "read_clover.h"
610 #include "tmc_core.h"
611 
612 int coord[5];
613 int X;
614 
615 int sid;
616 
617 #ifdef MULTI_GPU
618 int face_idx;
619 if (kernel_type == INTERIOR_KERNEL) {
620 #endif
621 
622  // Assume even dimensions
624 
625  // only need to check Y and Z dims currently since X and T set to match exactly
626  if (coord[1] >= param.dc.X[1]) return;
627  if (coord[2] >= param.dc.X[2]) return;
628 
629  o00_re = 0; o00_im = 0;
630  o01_re = 0; o01_im = 0;
631  o02_re = 0; o02_im = 0;
632  o10_re = 0; o10_im = 0;
633  o11_re = 0; o11_im = 0;
634  o12_re = 0; o12_im = 0;
635  o20_re = 0; o20_im = 0;
636  o21_re = 0; o21_im = 0;
637  o22_re = 0; o22_im = 0;
638  o30_re = 0; o30_im = 0;
639  o31_re = 0; o31_im = 0;
640  o32_re = 0; o32_im = 0;
641 
642 #ifdef MULTI_GPU
643 } else { // exterior kernel
644 
645  sid = blockIdx.x*blockDim.x + threadIdx.x;
646  if (sid >= param.threads) return;
647 
648  const int face_volume = (param.threads >> 1); // volume of one face
649  const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1
650  face_idx = sid - face_num*face_volume; // index into the respective face
651 
652  // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP)
653  // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading)
654  //sp_idx = face_idx + param.ghostOffset[dim];
655 
656  coordsFromFaceIndex<4,QUDA_4D_PC,kernel_type,1>(X, sid, coord, face_idx, face_num, param);
657 
659 
660  o00_re = i00_re; o00_im = i00_im;
661  o01_re = i01_re; o01_im = i01_im;
662  o02_re = i02_re; o02_im = i02_im;
663  o10_re = i10_re; o10_im = i10_im;
664  o11_re = i11_re; o11_im = i11_im;
665  o12_re = i12_re; o12_im = i12_im;
666  o20_re = i20_re; o20_im = i20_im;
667  o21_re = i21_re; o21_im = i21_im;
668  o22_re = i22_re; o22_im = i22_im;
669  o30_re = i30_re; o30_im = i30_im;
670  o31_re = i31_re; o31_im = i31_im;
671  o32_re = i32_re; o32_im = i32_im;
672 }
673 #endif // MULTI_GPU
674 
675 
676 #ifdef MULTI_GPU
677 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || coord[0]<(param.dc.X[0]-1))) ||
678  (kernel_type == EXTERIOR_KERNEL_X && coord[0]==(param.dc.X[0]-1)) )
679 #endif
680 {
681  // Projector P0+
682  // 1 0 0 i
683  // 0 1 i 0
684  // 0 -i 1 0
685  // -i 0 0 1
686 
687 #ifdef MULTI_GPU
688  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[0]==(param.dc.X[0]-1) ? X-(param.dc.X[0]-1) : X+1) >> 1 :
689  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][1];
690 #if (DD_PREC==2) // half precision
691  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][1];
692 #endif
693 #else
694  const int sp_idx = (coord[0]==(param.dc.X[0]-1) ? X-(param.dc.X[0]-1) : X+1) >> 1;
695 #endif
696 
697  const int ga_idx = sid;
698 
705 
706 #ifdef MULTI_GPU
707  if (kernel_type == INTERIOR_KERNEL) {
708 #endif
709 
710  // read spinor from device memory
711  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
712 
713  // store spinor into shared memory
714  WRITE_SPINOR_SHARED(threadIdx.x, threadIdx.y, threadIdx.z, i);
715 
716  // project spinor into half spinors
717  a0_re = +i00_re-i30_im;
718  a0_im = +i00_im+i30_re;
719  a1_re = +i01_re-i31_im;
720  a1_im = +i01_im+i31_re;
721  a2_re = +i02_re-i32_im;
722  a2_im = +i02_im+i32_re;
723  b0_re = +i10_re-i20_im;
724  b0_im = +i10_im+i20_re;
725  b1_re = +i11_re-i21_im;
726  b1_im = +i11_im+i21_re;
727  b2_re = +i12_re-i22_im;
728  b2_im = +i12_im+i22_re;
729 
730 #ifdef MULTI_GPU
731  } else {
732 
733  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
734 
735  // read half spinor from device memory
736  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 0);
737 
738  a0_re = i00_re; a0_im = i00_im;
739  a1_re = i01_re; a1_im = i01_im;
740  a2_re = i02_re; a2_im = i02_im;
741  b0_re = i10_re; b0_im = i10_im;
742  b1_re = i11_re; b1_im = i11_im;
743  b2_re = i12_re; b2_im = i12_im;
744 
745  }
746 #endif // MULTI_GPU
747 
748  // read gauge matrix from device memory
749  READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, param.gauge_stride);
750 
751  // reconstruct gauge matrix
753 
754  // multiply row 0
756  A0_re += g00_re * a0_re;
757  A0_re -= g00_im * a0_im;
758  A0_re += g01_re * a1_re;
759  A0_re -= g01_im * a1_im;
760  A0_re += g02_re * a2_re;
761  A0_re -= g02_im * a2_im;
763  A0_im += g00_re * a0_im;
764  A0_im += g00_im * a0_re;
765  A0_im += g01_re * a1_im;
766  A0_im += g01_im * a1_re;
767  A0_im += g02_re * a2_im;
768  A0_im += g02_im * a2_re;
770  B0_re += g00_re * b0_re;
771  B0_re -= g00_im * b0_im;
772  B0_re += g01_re * b1_re;
773  B0_re -= g01_im * b1_im;
774  B0_re += g02_re * b2_re;
775  B0_re -= g02_im * b2_im;
777  B0_im += g00_re * b0_im;
778  B0_im += g00_im * b0_re;
779  B0_im += g01_re * b1_im;
780  B0_im += g01_im * b1_re;
781  B0_im += g02_re * b2_im;
782  B0_im += g02_im * b2_re;
783 
784  // multiply row 1
786  A1_re += g10_re * a0_re;
787  A1_re -= g10_im * a0_im;
788  A1_re += g11_re * a1_re;
789  A1_re -= g11_im * a1_im;
790  A1_re += g12_re * a2_re;
791  A1_re -= g12_im * a2_im;
793  A1_im += g10_re * a0_im;
794  A1_im += g10_im * a0_re;
795  A1_im += g11_re * a1_im;
796  A1_im += g11_im * a1_re;
797  A1_im += g12_re * a2_im;
798  A1_im += g12_im * a2_re;
800  B1_re += g10_re * b0_re;
801  B1_re -= g10_im * b0_im;
802  B1_re += g11_re * b1_re;
803  B1_re -= g11_im * b1_im;
804  B1_re += g12_re * b2_re;
805  B1_re -= g12_im * b2_im;
807  B1_im += g10_re * b0_im;
808  B1_im += g10_im * b0_re;
809  B1_im += g11_re * b1_im;
810  B1_im += g11_im * b1_re;
811  B1_im += g12_re * b2_im;
812  B1_im += g12_im * b2_re;
813 
814  // multiply row 2
816  A2_re += g20_re * a0_re;
817  A2_re -= g20_im * a0_im;
818  A2_re += g21_re * a1_re;
819  A2_re -= g21_im * a1_im;
820  A2_re += g22_re * a2_re;
821  A2_re -= g22_im * a2_im;
823  A2_im += g20_re * a0_im;
824  A2_im += g20_im * a0_re;
825  A2_im += g21_re * a1_im;
826  A2_im += g21_im * a1_re;
827  A2_im += g22_re * a2_im;
828  A2_im += g22_im * a2_re;
830  B2_re += g20_re * b0_re;
831  B2_re -= g20_im * b0_im;
832  B2_re += g21_re * b1_re;
833  B2_re -= g21_im * b1_im;
834  B2_re += g22_re * b2_re;
835  B2_re -= g22_im * b2_im;
837  B2_im += g20_re * b0_im;
838  B2_im += g20_im * b0_re;
839  B2_im += g21_re * b1_im;
840  B2_im += g21_im * b1_re;
841  B2_im += g22_re * b2_im;
842  B2_im += g22_im * b2_re;
843 
844  o00_re += A0_re;
845  o00_im += A0_im;
846  o10_re += B0_re;
847  o10_im += B0_im;
848  o20_re += B0_im;
849  o20_im -= B0_re;
850  o30_re += A0_im;
851  o30_im -= A0_re;
852 
853  o01_re += A1_re;
854  o01_im += A1_im;
855  o11_re += B1_re;
856  o11_im += B1_im;
857  o21_re += B1_im;
858  o21_im -= B1_re;
859  o31_re += A1_im;
860  o31_im -= A1_re;
861 
862  o02_re += A2_re;
863  o02_im += A2_im;
864  o12_re += B2_re;
865  o12_im += B2_im;
866  o22_re += B2_im;
867  o22_im -= B2_re;
868  o32_re += A2_im;
869  o32_im -= A2_re;
870 
871 }
872 
873 #ifdef MULTI_GPU
874 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[0] || coord[0]>0)) ||
875  (kernel_type == EXTERIOR_KERNEL_X && coord[0]==0) )
876 #endif
877 {
878  // Projector P0-
879  // 1 0 0 -i
880  // 0 1 -i 0
881  // 0 i 1 0
882  // i 0 0 1
883 
884 #ifdef MULTI_GPU
885  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[0]==0 ? X+(param.dc.X[0]-1) : X-1) >> 1 :
886  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][0];
887 #if (DD_PREC==2) // half precision
888  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][0];
889 #endif
890 #else
891  const int sp_idx = (coord[0]==0 ? X+(param.dc.X[0]-1) : X-1) >> 1;
892 #endif
893 
894 #ifdef MULTI_GPU
895  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);
896 #else
897  const int ga_idx = sp_idx;
898 #endif
899 
906 
907 #ifdef MULTI_GPU
908  if (kernel_type == INTERIOR_KERNEL) {
909 #endif
910 
911  // load spinor from shared memory
912  int tx = (threadIdx.x > 0) ? threadIdx.x-1 : blockDim.x-1;
913  __syncthreads();
914  READ_SPINOR_SHARED(tx, threadIdx.y, threadIdx.z);
915 
916  // project spinor into half spinors
917  a0_re = +i00_re+i30_im;
918  a0_im = +i00_im-i30_re;
919  a1_re = +i01_re+i31_im;
920  a1_im = +i01_im-i31_re;
921  a2_re = +i02_re+i32_im;
922  a2_im = +i02_im-i32_re;
923  b0_re = +i10_re+i20_im;
924  b0_im = +i10_im-i20_re;
925  b1_re = +i11_re+i21_im;
926  b1_im = +i11_im-i21_re;
927  b2_re = +i12_re+i22_im;
928  b2_im = +i12_im-i22_re;
929 
930 #ifdef MULTI_GPU
931  } else {
932 
933  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
934 
935  // read half spinor from device memory
936  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 1);
937 
938  a0_re = i00_re; a0_im = i00_im;
939  a1_re = i01_re; a1_im = i01_im;
940  a2_re = i02_re; a2_im = i02_im;
941  b0_re = i10_re; b0_im = i10_im;
942  b1_re = i11_re; b1_im = i11_im;
943  b2_re = i12_re; b2_im = i12_im;
944 
945  }
946 #endif // MULTI_GPU
947 
948  // read gauge matrix from device memory
949  READ_GAUGE_MATRIX(G, GAUGE1TEX, 1, ga_idx, param.gauge_stride);
950 
951  // reconstruct gauge matrix
953 
954  // multiply row 0
955  spinorFloat A0_re = 0;
956  A0_re += gT00_re * a0_re;
957  A0_re -= gT00_im * a0_im;
958  A0_re += gT01_re * a1_re;
959  A0_re -= gT01_im * a1_im;
960  A0_re += gT02_re * a2_re;
961  A0_re -= gT02_im * a2_im;
962  spinorFloat A0_im = 0;
963  A0_im += gT00_re * a0_im;
964  A0_im += gT00_im * a0_re;
965  A0_im += gT01_re * a1_im;
966  A0_im += gT01_im * a1_re;
967  A0_im += gT02_re * a2_im;
968  A0_im += gT02_im * a2_re;
969  spinorFloat B0_re = 0;
970  B0_re += gT00_re * b0_re;
971  B0_re -= gT00_im * b0_im;
972  B0_re += gT01_re * b1_re;
973  B0_re -= gT01_im * b1_im;
974  B0_re += gT02_re * b2_re;
975  B0_re -= gT02_im * b2_im;
976  spinorFloat B0_im = 0;
977  B0_im += gT00_re * b0_im;
978  B0_im += gT00_im * b0_re;
979  B0_im += gT01_re * b1_im;
980  B0_im += gT01_im * b1_re;
981  B0_im += gT02_re * b2_im;
982  B0_im += gT02_im * b2_re;
983 
984  // multiply row 1
985  spinorFloat A1_re = 0;
986  A1_re += gT10_re * a0_re;
987  A1_re -= gT10_im * a0_im;
988  A1_re += gT11_re * a1_re;
989  A1_re -= gT11_im * a1_im;
990  A1_re += gT12_re * a2_re;
991  A1_re -= gT12_im * a2_im;
992  spinorFloat A1_im = 0;
993  A1_im += gT10_re * a0_im;
994  A1_im += gT10_im * a0_re;
995  A1_im += gT11_re * a1_im;
996  A1_im += gT11_im * a1_re;
997  A1_im += gT12_re * a2_im;
998  A1_im += gT12_im * a2_re;
999  spinorFloat B1_re = 0;
1000  B1_re += gT10_re * b0_re;
1001  B1_re -= gT10_im * b0_im;
1002  B1_re += gT11_re * b1_re;
1003  B1_re -= gT11_im * b1_im;
1004  B1_re += gT12_re * b2_re;
1005  B1_re -= gT12_im * b2_im;
1006  spinorFloat B1_im = 0;
1007  B1_im += gT10_re * b0_im;
1008  B1_im += gT10_im * b0_re;
1009  B1_im += gT11_re * b1_im;
1010  B1_im += gT11_im * b1_re;
1011  B1_im += gT12_re * b2_im;
1012  B1_im += gT12_im * b2_re;
1013 
1014  // multiply row 2
1015  spinorFloat A2_re = 0;
1016  A2_re += gT20_re * a0_re;
1017  A2_re -= gT20_im * a0_im;
1018  A2_re += gT21_re * a1_re;
1019  A2_re -= gT21_im * a1_im;
1020  A2_re += gT22_re * a2_re;
1021  A2_re -= gT22_im * a2_im;
1022  spinorFloat A2_im = 0;
1023  A2_im += gT20_re * a0_im;
1024  A2_im += gT20_im * a0_re;
1025  A2_im += gT21_re * a1_im;
1026  A2_im += gT21_im * a1_re;
1027  A2_im += gT22_re * a2_im;
1028  A2_im += gT22_im * a2_re;
1029  spinorFloat B2_re = 0;
1030  B2_re += gT20_re * b0_re;
1031  B2_re -= gT20_im * b0_im;
1032  B2_re += gT21_re * b1_re;
1033  B2_re -= gT21_im * b1_im;
1034  B2_re += gT22_re * b2_re;
1035  B2_re -= gT22_im * b2_im;
1036  spinorFloat B2_im = 0;
1037  B2_im += gT20_re * b0_im;
1038  B2_im += gT20_im * b0_re;
1039  B2_im += gT21_re * b1_im;
1040  B2_im += gT21_im * b1_re;
1041  B2_im += gT22_re * b2_im;
1042  B2_im += gT22_im * b2_re;
1043 
1044  o00_re += A0_re;
1045  o00_im += A0_im;
1046  o10_re += B0_re;
1047  o10_im += B0_im;
1048  o20_re -= B0_im;
1049  o20_im += B0_re;
1050  o30_re -= A0_im;
1051  o30_im += A0_re;
1052 
1053  o01_re += A1_re;
1054  o01_im += A1_im;
1055  o11_re += B1_re;
1056  o11_im += B1_im;
1057  o21_re -= B1_im;
1058  o21_im += B1_re;
1059  o31_re -= A1_im;
1060  o31_im += A1_re;
1061 
1062  o02_re += A2_re;
1063  o02_im += A2_im;
1064  o12_re += B2_re;
1065  o12_im += B2_im;
1066  o22_re -= B2_im;
1067  o22_im += B2_re;
1068  o32_re -= A2_im;
1069  o32_im += A2_re;
1070 
1071 }
1072 
1073 #ifdef MULTI_GPU
1074 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || coord[1]<(param.dc.X[1]-1))) ||
1075  (kernel_type == EXTERIOR_KERNEL_Y && coord[1]==(param.dc.X[1]-1)) )
1076 #endif
1077 {
1078  // Projector P1+
1079  // 1 0 0 1
1080  // 0 1 -1 0
1081  // 0 -1 1 0
1082  // 1 0 0 1
1083 
1084 #ifdef MULTI_GPU
1085  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[1]==(param.dc.X[1]-1) ? X-param.dc.X2X1mX1 : X+param.dc.X[0]) >> 1 :
1086  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][1];
1087 #if (DD_PREC==2) // half precision
1088  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][1];
1089 #endif
1090 #else
1091  const int sp_idx = (coord[1]==(param.dc.X[1]-1) ? X-param.dc.X2X1mX1 : X+param.dc.X[0]) >> 1;
1092 #endif
1093 
1094  const int ga_idx = sid;
1095 
1102 
1103 #ifdef MULTI_GPU
1104  if (kernel_type == INTERIOR_KERNEL) {
1105 #endif
1106 
1107  if (threadIdx.y == blockDim.y-1 && blockDim.y < param.dc.X[1] ) {
1108  // read spinor from device memory
1109  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1110 
1111  // project spinor into half spinors
1112  a0_re = +i00_re+i30_re;
1113  a0_im = +i00_im+i30_im;
1114  a1_re = +i01_re+i31_re;
1115  a1_im = +i01_im+i31_im;
1116  a2_re = +i02_re+i32_re;
1117  a2_im = +i02_im+i32_im;
1118  b0_re = +i10_re-i20_re;
1119  b0_im = +i10_im-i20_im;
1120  b1_re = +i11_re-i21_re;
1121  b1_im = +i11_im-i21_im;
1122  b2_re = +i12_re-i22_re;
1123  b2_im = +i12_im-i22_im;
1124  } else {
1125  // load spinor from shared memory
1126  int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x;
1127  int ty = (threadIdx.y < blockDim.y - 1) ? threadIdx.y + 1 : 0;
1128  READ_SPINOR_SHARED(tx, ty, threadIdx.z);
1129 
1130  // project spinor into half spinors
1131  a0_re = +i00_re+i30_re;
1132  a0_im = +i00_im+i30_im;
1133  a1_re = +i01_re+i31_re;
1134  a1_im = +i01_im+i31_im;
1135  a2_re = +i02_re+i32_re;
1136  a2_im = +i02_im+i32_im;
1137  b0_re = +i10_re-i20_re;
1138  b0_im = +i10_im-i20_im;
1139  b1_re = +i11_re-i21_re;
1140  b1_im = +i11_im-i21_im;
1141  b2_re = +i12_re-i22_re;
1142  b2_im = +i12_im-i22_im;
1143  }
1144 
1145 #ifdef MULTI_GPU
1146  } else {
1147 
1148  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
1149 
1150  // read half spinor from device memory
1151  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 2);
1152 
1153  a0_re = i00_re; a0_im = i00_im;
1154  a1_re = i01_re; a1_im = i01_im;
1155  a2_re = i02_re; a2_im = i02_im;
1156  b0_re = i10_re; b0_im = i10_im;
1157  b1_re = i11_re; b1_im = i11_im;
1158  b2_re = i12_re; b2_im = i12_im;
1159 
1160  }
1161 #endif // MULTI_GPU
1162 
1163  // read gauge matrix from device memory
1164  READ_GAUGE_MATRIX(G, GAUGE0TEX, 2, ga_idx, param.gauge_stride);
1165 
1166  // reconstruct gauge matrix
1168 
1169  // multiply row 0
1170  spinorFloat A0_re = 0;
1171  A0_re += g00_re * a0_re;
1172  A0_re -= g00_im * a0_im;
1173  A0_re += g01_re * a1_re;
1174  A0_re -= g01_im * a1_im;
1175  A0_re += g02_re * a2_re;
1176  A0_re -= g02_im * a2_im;
1177  spinorFloat A0_im = 0;
1178  A0_im += g00_re * a0_im;
1179  A0_im += g00_im * a0_re;
1180  A0_im += g01_re * a1_im;
1181  A0_im += g01_im * a1_re;
1182  A0_im += g02_re * a2_im;
1183  A0_im += g02_im * a2_re;
1184  spinorFloat B0_re = 0;
1185  B0_re += g00_re * b0_re;
1186  B0_re -= g00_im * b0_im;
1187  B0_re += g01_re * b1_re;
1188  B0_re -= g01_im * b1_im;
1189  B0_re += g02_re * b2_re;
1190  B0_re -= g02_im * b2_im;
1191  spinorFloat B0_im = 0;
1192  B0_im += g00_re * b0_im;
1193  B0_im += g00_im * b0_re;
1194  B0_im += g01_re * b1_im;
1195  B0_im += g01_im * b1_re;
1196  B0_im += g02_re * b2_im;
1197  B0_im += g02_im * b2_re;
1198 
1199  // multiply row 1
1200  spinorFloat A1_re = 0;
1201  A1_re += g10_re * a0_re;
1202  A1_re -= g10_im * a0_im;
1203  A1_re += g11_re * a1_re;
1204  A1_re -= g11_im * a1_im;
1205  A1_re += g12_re * a2_re;
1206  A1_re -= g12_im * a2_im;
1207  spinorFloat A1_im = 0;
1208  A1_im += g10_re * a0_im;
1209  A1_im += g10_im * a0_re;
1210  A1_im += g11_re * a1_im;
1211  A1_im += g11_im * a1_re;
1212  A1_im += g12_re * a2_im;
1213  A1_im += g12_im * a2_re;
1214  spinorFloat B1_re = 0;
1215  B1_re += g10_re * b0_re;
1216  B1_re -= g10_im * b0_im;
1217  B1_re += g11_re * b1_re;
1218  B1_re -= g11_im * b1_im;
1219  B1_re += g12_re * b2_re;
1220  B1_re -= g12_im * b2_im;
1221  spinorFloat B1_im = 0;
1222  B1_im += g10_re * b0_im;
1223  B1_im += g10_im * b0_re;
1224  B1_im += g11_re * b1_im;
1225  B1_im += g11_im * b1_re;
1226  B1_im += g12_re * b2_im;
1227  B1_im += g12_im * b2_re;
1228 
1229  // multiply row 2
1230  spinorFloat A2_re = 0;
1231  A2_re += g20_re * a0_re;
1232  A2_re -= g20_im * a0_im;
1233  A2_re += g21_re * a1_re;
1234  A2_re -= g21_im * a1_im;
1235  A2_re += g22_re * a2_re;
1236  A2_re -= g22_im * a2_im;
1237  spinorFloat A2_im = 0;
1238  A2_im += g20_re * a0_im;
1239  A2_im += g20_im * a0_re;
1240  A2_im += g21_re * a1_im;
1241  A2_im += g21_im * a1_re;
1242  A2_im += g22_re * a2_im;
1243  A2_im += g22_im * a2_re;
1244  spinorFloat B2_re = 0;
1245  B2_re += g20_re * b0_re;
1246  B2_re -= g20_im * b0_im;
1247  B2_re += g21_re * b1_re;
1248  B2_re -= g21_im * b1_im;
1249  B2_re += g22_re * b2_re;
1250  B2_re -= g22_im * b2_im;
1251  spinorFloat B2_im = 0;
1252  B2_im += g20_re * b0_im;
1253  B2_im += g20_im * b0_re;
1254  B2_im += g21_re * b1_im;
1255  B2_im += g21_im * b1_re;
1256  B2_im += g22_re * b2_im;
1257  B2_im += g22_im * b2_re;
1258 
1259  o00_re += A0_re;
1260  o00_im += A0_im;
1261  o10_re += B0_re;
1262  o10_im += B0_im;
1263  o20_re -= B0_re;
1264  o20_im -= B0_im;
1265  o30_re += A0_re;
1266  o30_im += A0_im;
1267 
1268  o01_re += A1_re;
1269  o01_im += A1_im;
1270  o11_re += B1_re;
1271  o11_im += B1_im;
1272  o21_re -= B1_re;
1273  o21_im -= B1_im;
1274  o31_re += A1_re;
1275  o31_im += A1_im;
1276 
1277  o02_re += A2_re;
1278  o02_im += A2_im;
1279  o12_re += B2_re;
1280  o12_im += B2_im;
1281  o22_re -= B2_re;
1282  o22_im -= B2_im;
1283  o32_re += A2_re;
1284  o32_im += A2_im;
1285 
1286 }
1287 
1288 #ifdef MULTI_GPU
1289 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[1] || coord[1]>0)) ||
1290  (kernel_type == EXTERIOR_KERNEL_Y && coord[1]==0) )
1291 #endif
1292 {
1293  // Projector P1-
1294  // 1 0 0 -1
1295  // 0 1 1 0
1296  // 0 1 1 0
1297  // -1 0 0 1
1298 
1299 #ifdef MULTI_GPU
1300  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[1]==0 ? X+param.dc.X2X1mX1 : X-param.dc.X[0]) >> 1 :
1301  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][0];
1302 #if (DD_PREC==2) // half precision
1303  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][0];
1304 #endif
1305 #else
1306  const int sp_idx = (coord[1]==0 ? X+param.dc.X2X1mX1 : X-param.dc.X[0]) >> 1;
1307 #endif
1308 
1309 #ifdef MULTI_GPU
1310  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);
1311 #else
1312  const int ga_idx = sp_idx;
1313 #endif
1314 
1321 
1322 #ifdef MULTI_GPU
1323  if (kernel_type == INTERIOR_KERNEL) {
1324 #endif
1325 
1326  if (threadIdx.y == 0 && blockDim.y < param.dc.X[1]) {
1327  // read spinor from device memory
1328  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1329 
1330  // project spinor into half spinors
1331  a0_re = +i00_re-i30_re;
1332  a0_im = +i00_im-i30_im;
1333  a1_re = +i01_re-i31_re;
1334  a1_im = +i01_im-i31_im;
1335  a2_re = +i02_re-i32_re;
1336  a2_im = +i02_im-i32_im;
1337  b0_re = +i10_re+i20_re;
1338  b0_im = +i10_im+i20_im;
1339  b1_re = +i11_re+i21_re;
1340  b1_im = +i11_im+i21_im;
1341  b2_re = +i12_re+i22_re;
1342  b2_im = +i12_im+i22_im;
1343  } else {
1344  // load spinor from shared memory
1345  int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x;
1346  int ty = (threadIdx.y > 0) ? threadIdx.y - 1 : blockDim.y - 1;
1347  READ_SPINOR_SHARED(tx, ty, threadIdx.z);
1348 
1349  // project spinor into half spinors
1350  a0_re = +i00_re-i30_re;
1351  a0_im = +i00_im-i30_im;
1352  a1_re = +i01_re-i31_re;
1353  a1_im = +i01_im-i31_im;
1354  a2_re = +i02_re-i32_re;
1355  a2_im = +i02_im-i32_im;
1356  b0_re = +i10_re+i20_re;
1357  b0_im = +i10_im+i20_im;
1358  b1_re = +i11_re+i21_re;
1359  b1_im = +i11_im+i21_im;
1360  b2_re = +i12_re+i22_re;
1361  b2_im = +i12_im+i22_im;
1362  }
1363 
1364 #ifdef MULTI_GPU
1365  } else {
1366 
1367  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
1368 
1369  // read half spinor from device memory
1370  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 3);
1371 
1372  a0_re = i00_re; a0_im = i00_im;
1373  a1_re = i01_re; a1_im = i01_im;
1374  a2_re = i02_re; a2_im = i02_im;
1375  b0_re = i10_re; b0_im = i10_im;
1376  b1_re = i11_re; b1_im = i11_im;
1377  b2_re = i12_re; b2_im = i12_im;
1378 
1379  }
1380 #endif // MULTI_GPU
1381 
1382  // read gauge matrix from device memory
1383  READ_GAUGE_MATRIX(G, GAUGE1TEX, 3, ga_idx, param.gauge_stride);
1384 
1385  // reconstruct gauge matrix
1387 
1388  // multiply row 0
1389  spinorFloat A0_re = 0;
1390  A0_re += gT00_re * a0_re;
1391  A0_re -= gT00_im * a0_im;
1392  A0_re += gT01_re * a1_re;
1393  A0_re -= gT01_im * a1_im;
1394  A0_re += gT02_re * a2_re;
1395  A0_re -= gT02_im * a2_im;
1396  spinorFloat A0_im = 0;
1397  A0_im += gT00_re * a0_im;
1398  A0_im += gT00_im * a0_re;
1399  A0_im += gT01_re * a1_im;
1400  A0_im += gT01_im * a1_re;
1401  A0_im += gT02_re * a2_im;
1402  A0_im += gT02_im * a2_re;
1403  spinorFloat B0_re = 0;
1404  B0_re += gT00_re * b0_re;
1405  B0_re -= gT00_im * b0_im;
1406  B0_re += gT01_re * b1_re;
1407  B0_re -= gT01_im * b1_im;
1408  B0_re += gT02_re * b2_re;
1409  B0_re -= gT02_im * b2_im;
1410  spinorFloat B0_im = 0;
1411  B0_im += gT00_re * b0_im;
1412  B0_im += gT00_im * b0_re;
1413  B0_im += gT01_re * b1_im;
1414  B0_im += gT01_im * b1_re;
1415  B0_im += gT02_re * b2_im;
1416  B0_im += gT02_im * b2_re;
1417 
1418  // multiply row 1
1419  spinorFloat A1_re = 0;
1420  A1_re += gT10_re * a0_re;
1421  A1_re -= gT10_im * a0_im;
1422  A1_re += gT11_re * a1_re;
1423  A1_re -= gT11_im * a1_im;
1424  A1_re += gT12_re * a2_re;
1425  A1_re -= gT12_im * a2_im;
1426  spinorFloat A1_im = 0;
1427  A1_im += gT10_re * a0_im;
1428  A1_im += gT10_im * a0_re;
1429  A1_im += gT11_re * a1_im;
1430  A1_im += gT11_im * a1_re;
1431  A1_im += gT12_re * a2_im;
1432  A1_im += gT12_im * a2_re;
1433  spinorFloat B1_re = 0;
1434  B1_re += gT10_re * b0_re;
1435  B1_re -= gT10_im * b0_im;
1436  B1_re += gT11_re * b1_re;
1437  B1_re -= gT11_im * b1_im;
1438  B1_re += gT12_re * b2_re;
1439  B1_re -= gT12_im * b2_im;
1440  spinorFloat B1_im = 0;
1441  B1_im += gT10_re * b0_im;
1442  B1_im += gT10_im * b0_re;
1443  B1_im += gT11_re * b1_im;
1444  B1_im += gT11_im * b1_re;
1445  B1_im += gT12_re * b2_im;
1446  B1_im += gT12_im * b2_re;
1447 
1448  // multiply row 2
1449  spinorFloat A2_re = 0;
1450  A2_re += gT20_re * a0_re;
1451  A2_re -= gT20_im * a0_im;
1452  A2_re += gT21_re * a1_re;
1453  A2_re -= gT21_im * a1_im;
1454  A2_re += gT22_re * a2_re;
1455  A2_re -= gT22_im * a2_im;
1456  spinorFloat A2_im = 0;
1457  A2_im += gT20_re * a0_im;
1458  A2_im += gT20_im * a0_re;
1459  A2_im += gT21_re * a1_im;
1460  A2_im += gT21_im * a1_re;
1461  A2_im += gT22_re * a2_im;
1462  A2_im += gT22_im * a2_re;
1463  spinorFloat B2_re = 0;
1464  B2_re += gT20_re * b0_re;
1465  B2_re -= gT20_im * b0_im;
1466  B2_re += gT21_re * b1_re;
1467  B2_re -= gT21_im * b1_im;
1468  B2_re += gT22_re * b2_re;
1469  B2_re -= gT22_im * b2_im;
1470  spinorFloat B2_im = 0;
1471  B2_im += gT20_re * b0_im;
1472  B2_im += gT20_im * b0_re;
1473  B2_im += gT21_re * b1_im;
1474  B2_im += gT21_im * b1_re;
1475  B2_im += gT22_re * b2_im;
1476  B2_im += gT22_im * b2_re;
1477 
1478  o00_re += A0_re;
1479  o00_im += A0_im;
1480  o10_re += B0_re;
1481  o10_im += B0_im;
1482  o20_re += B0_re;
1483  o20_im += B0_im;
1484  o30_re -= A0_re;
1485  o30_im -= A0_im;
1486 
1487  o01_re += A1_re;
1488  o01_im += A1_im;
1489  o11_re += B1_re;
1490  o11_im += B1_im;
1491  o21_re += B1_re;
1492  o21_im += B1_im;
1493  o31_re -= A1_re;
1494  o31_im -= A1_im;
1495 
1496  o02_re += A2_re;
1497  o02_im += A2_im;
1498  o12_re += B2_re;
1499  o12_im += B2_im;
1500  o22_re += B2_re;
1501  o22_im += B2_im;
1502  o32_re -= A2_re;
1503  o32_im -= A2_im;
1504 
1505 }
1506 
1507 #ifdef MULTI_GPU
1508 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || coord[2]<(param.dc.X[2]-1))) ||
1509  (kernel_type == EXTERIOR_KERNEL_Z && coord[2]==(param.dc.X[2]-1)) )
1510 #endif
1511 {
1512  // Projector P2+
1513  // 1 0 i 0
1514  // 0 1 0 -i
1515  // -i 0 1 0
1516  // 0 i 0 1
1517 
1518 #ifdef MULTI_GPU
1519  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[2]==(param.dc.X[2]-1) ? X-param.dc.X3X2X1mX2X1 : X+param.dc.X2X1) >> 1 :
1520  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][1];
1521 #if (DD_PREC==2) // half precision
1522  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][1];
1523 #endif
1524 #else
1525  const int sp_idx = (coord[2]==(param.dc.X[2]-1) ? X-param.dc.X3X2X1mX2X1 : X+param.dc.X2X1) >> 1;
1526 #endif
1527 
1528  const int ga_idx = sid;
1529 
1536 
1537 #ifdef MULTI_GPU
1538  if (kernel_type == INTERIOR_KERNEL) {
1539 #endif
1540 
1541  if (threadIdx.z == blockDim.z-1 && blockDim.z < X3) {
1542  // read spinor from device memory
1543  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1544 
1545  // project spinor into half spinors
1546  a0_re = +i00_re-i20_im;
1547  a0_im = +i00_im+i20_re;
1548  a1_re = +i01_re-i21_im;
1549  a1_im = +i01_im+i21_re;
1550  a2_re = +i02_re-i22_im;
1551  a2_im = +i02_im+i22_re;
1552  b0_re = +i10_re+i30_im;
1553  b0_im = +i10_im-i30_re;
1554  b1_re = +i11_re+i31_im;
1555  b1_im = +i11_im-i31_re;
1556  b2_re = +i12_re+i32_im;
1557  b2_im = +i12_im-i32_re;
1558  } else {
1559  // load spinor from shared memory
1560  int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x;
1561  int tz = (threadIdx.z < blockDim.z - 1) ? threadIdx.z + 1 : 0;
1562  READ_SPINOR_SHARED(tx, threadIdx.y, tz);
1563 
1564  // project spinor into half spinors
1565  a0_re = +i00_re-i20_im;
1566  a0_im = +i00_im+i20_re;
1567  a1_re = +i01_re-i21_im;
1568  a1_im = +i01_im+i21_re;
1569  a2_re = +i02_re-i22_im;
1570  a2_im = +i02_im+i22_re;
1571  b0_re = +i10_re+i30_im;
1572  b0_im = +i10_im-i30_re;
1573  b1_re = +i11_re+i31_im;
1574  b1_im = +i11_im-i31_re;
1575  b2_re = +i12_re+i32_im;
1576  b2_im = +i12_im-i32_re;
1577  }
1578 
1579 #ifdef MULTI_GPU
1580  } else {
1581 
1582  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
1583 
1584  // read half spinor from device memory
1585  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 4);
1586 
1587  a0_re = i00_re; a0_im = i00_im;
1588  a1_re = i01_re; a1_im = i01_im;
1589  a2_re = i02_re; a2_im = i02_im;
1590  b0_re = i10_re; b0_im = i10_im;
1591  b1_re = i11_re; b1_im = i11_im;
1592  b2_re = i12_re; b2_im = i12_im;
1593 
1594  }
1595 #endif // MULTI_GPU
1596 
1597  // read gauge matrix from device memory
1598  READ_GAUGE_MATRIX(G, GAUGE0TEX, 4, ga_idx, param.gauge_stride);
1599 
1600  // reconstruct gauge matrix
1602 
1603  // multiply row 0
1604  spinorFloat A0_re = 0;
1605  A0_re += g00_re * a0_re;
1606  A0_re -= g00_im * a0_im;
1607  A0_re += g01_re * a1_re;
1608  A0_re -= g01_im * a1_im;
1609  A0_re += g02_re * a2_re;
1610  A0_re -= g02_im * a2_im;
1611  spinorFloat A0_im = 0;
1612  A0_im += g00_re * a0_im;
1613  A0_im += g00_im * a0_re;
1614  A0_im += g01_re * a1_im;
1615  A0_im += g01_im * a1_re;
1616  A0_im += g02_re * a2_im;
1617  A0_im += g02_im * a2_re;
1618  spinorFloat B0_re = 0;
1619  B0_re += g00_re * b0_re;
1620  B0_re -= g00_im * b0_im;
1621  B0_re += g01_re * b1_re;
1622  B0_re -= g01_im * b1_im;
1623  B0_re += g02_re * b2_re;
1624  B0_re -= g02_im * b2_im;
1625  spinorFloat B0_im = 0;
1626  B0_im += g00_re * b0_im;
1627  B0_im += g00_im * b0_re;
1628  B0_im += g01_re * b1_im;
1629  B0_im += g01_im * b1_re;
1630  B0_im += g02_re * b2_im;
1631  B0_im += g02_im * b2_re;
1632 
1633  // multiply row 1
1634  spinorFloat A1_re = 0;
1635  A1_re += g10_re * a0_re;
1636  A1_re -= g10_im * a0_im;
1637  A1_re += g11_re * a1_re;
1638  A1_re -= g11_im * a1_im;
1639  A1_re += g12_re * a2_re;
1640  A1_re -= g12_im * a2_im;
1641  spinorFloat A1_im = 0;
1642  A1_im += g10_re * a0_im;
1643  A1_im += g10_im * a0_re;
1644  A1_im += g11_re * a1_im;
1645  A1_im += g11_im * a1_re;
1646  A1_im += g12_re * a2_im;
1647  A1_im += g12_im * a2_re;
1648  spinorFloat B1_re = 0;
1649  B1_re += g10_re * b0_re;
1650  B1_re -= g10_im * b0_im;
1651  B1_re += g11_re * b1_re;
1652  B1_re -= g11_im * b1_im;
1653  B1_re += g12_re * b2_re;
1654  B1_re -= g12_im * b2_im;
1655  spinorFloat B1_im = 0;
1656  B1_im += g10_re * b0_im;
1657  B1_im += g10_im * b0_re;
1658  B1_im += g11_re * b1_im;
1659  B1_im += g11_im * b1_re;
1660  B1_im += g12_re * b2_im;
1661  B1_im += g12_im * b2_re;
1662 
1663  // multiply row 2
1664  spinorFloat A2_re = 0;
1665  A2_re += g20_re * a0_re;
1666  A2_re -= g20_im * a0_im;
1667  A2_re += g21_re * a1_re;
1668  A2_re -= g21_im * a1_im;
1669  A2_re += g22_re * a2_re;
1670  A2_re -= g22_im * a2_im;
1671  spinorFloat A2_im = 0;
1672  A2_im += g20_re * a0_im;
1673  A2_im += g20_im * a0_re;
1674  A2_im += g21_re * a1_im;
1675  A2_im += g21_im * a1_re;
1676  A2_im += g22_re * a2_im;
1677  A2_im += g22_im * a2_re;
1678  spinorFloat B2_re = 0;
1679  B2_re += g20_re * b0_re;
1680  B2_re -= g20_im * b0_im;
1681  B2_re += g21_re * b1_re;
1682  B2_re -= g21_im * b1_im;
1683  B2_re += g22_re * b2_re;
1684  B2_re -= g22_im * b2_im;
1685  spinorFloat B2_im = 0;
1686  B2_im += g20_re * b0_im;
1687  B2_im += g20_im * b0_re;
1688  B2_im += g21_re * b1_im;
1689  B2_im += g21_im * b1_re;
1690  B2_im += g22_re * b2_im;
1691  B2_im += g22_im * b2_re;
1692 
1693  o00_re += A0_re;
1694  o00_im += A0_im;
1695  o10_re += B0_re;
1696  o10_im += B0_im;
1697  o20_re += A0_im;
1698  o20_im -= A0_re;
1699  o30_re -= B0_im;
1700  o30_im += B0_re;
1701 
1702  o01_re += A1_re;
1703  o01_im += A1_im;
1704  o11_re += B1_re;
1705  o11_im += B1_im;
1706  o21_re += A1_im;
1707  o21_im -= A1_re;
1708  o31_re -= B1_im;
1709  o31_im += B1_re;
1710 
1711  o02_re += A2_re;
1712  o02_im += A2_im;
1713  o12_re += B2_re;
1714  o12_im += B2_im;
1715  o22_re += A2_im;
1716  o22_im -= A2_re;
1717  o32_re -= B2_im;
1718  o32_im += B2_re;
1719 
1720 }
1721 
1722 #ifdef MULTI_GPU
1723 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[2] || coord[2]>0)) ||
1724  (kernel_type == EXTERIOR_KERNEL_Z && coord[2]==0) )
1725 #endif
1726 {
1727  // Projector P2-
1728  // 1 0 -i 0
1729  // 0 1 0 i
1730  // i 0 1 0
1731  // 0 -i 0 1
1732 
1733 #ifdef MULTI_GPU
1734  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[2]==0 ? X+param.dc.X3X2X1mX2X1 : X-param.dc.X2X1) >> 1 :
1735  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][0];
1736 #if (DD_PREC==2) // half precision
1737  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][0];
1738 #endif
1739 #else
1740  const int sp_idx = (coord[2]==0 ? X+param.dc.X3X2X1mX2X1 : X-param.dc.X2X1) >> 1;
1741 #endif
1742 
1743 #ifdef MULTI_GPU
1744  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);
1745 #else
1746  const int ga_idx = sp_idx;
1747 #endif
1748 
1755 
1756 #ifdef MULTI_GPU
1757  if (kernel_type == INTERIOR_KERNEL) {
1758 #endif
1759 
1760  if (threadIdx.z == 0 && blockDim.z < X3) {
1761  // read spinor from device memory
1762  READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1763 
1764  // project spinor into half spinors
1765  a0_re = +i00_re+i20_im;
1766  a0_im = +i00_im-i20_re;
1767  a1_re = +i01_re+i21_im;
1768  a1_im = +i01_im-i21_re;
1769  a2_re = +i02_re+i22_im;
1770  a2_im = +i02_im-i22_re;
1771  b0_re = +i10_re-i30_im;
1772  b0_im = +i10_im+i30_re;
1773  b1_re = +i11_re-i31_im;
1774  b1_im = +i11_im+i31_re;
1775  b2_re = +i12_re-i32_im;
1776  b2_im = +i12_im+i32_re;
1777  } else {
1778  // load spinor from shared memory
1779  int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x;
1780  int tz = (threadIdx.z > 0) ? threadIdx.z - 1 : blockDim.z - 1;
1781  READ_SPINOR_SHARED(tx, threadIdx.y, tz);
1782 
1783  // project spinor into half spinors
1784  a0_re = +i00_re+i20_im;
1785  a0_im = +i00_im-i20_re;
1786  a1_re = +i01_re+i21_im;
1787  a1_im = +i01_im-i21_re;
1788  a2_re = +i02_re+i22_im;
1789  a2_im = +i02_im-i22_re;
1790  b0_re = +i10_re-i30_im;
1791  b0_im = +i10_im+i30_re;
1792  b1_re = +i11_re-i31_im;
1793  b1_im = +i11_im+i31_re;
1794  b2_re = +i12_re-i32_im;
1795  b2_im = +i12_im+i32_re;
1796  }
1797 
1798 #ifdef MULTI_GPU
1799  } else {
1800 
1801  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
1802 
1803  // read half spinor from device memory
1804  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 5);
1805 
1806  a0_re = i00_re; a0_im = i00_im;
1807  a1_re = i01_re; a1_im = i01_im;
1808  a2_re = i02_re; a2_im = i02_im;
1809  b0_re = i10_re; b0_im = i10_im;
1810  b1_re = i11_re; b1_im = i11_im;
1811  b2_re = i12_re; b2_im = i12_im;
1812 
1813  }
1814 #endif // MULTI_GPU
1815 
1816  // read gauge matrix from device memory
1817  READ_GAUGE_MATRIX(G, GAUGE1TEX, 5, ga_idx, param.gauge_stride);
1818 
1819  // reconstruct gauge matrix
1821 
1822  // multiply row 0
1823  spinorFloat A0_re = 0;
1824  A0_re += gT00_re * a0_re;
1825  A0_re -= gT00_im * a0_im;
1826  A0_re += gT01_re * a1_re;
1827  A0_re -= gT01_im * a1_im;
1828  A0_re += gT02_re * a2_re;
1829  A0_re -= gT02_im * a2_im;
1830  spinorFloat A0_im = 0;
1831  A0_im += gT00_re * a0_im;
1832  A0_im += gT00_im * a0_re;
1833  A0_im += gT01_re * a1_im;
1834  A0_im += gT01_im * a1_re;
1835  A0_im += gT02_re * a2_im;
1836  A0_im += gT02_im * a2_re;
1837  spinorFloat B0_re = 0;
1838  B0_re += gT00_re * b0_re;
1839  B0_re -= gT00_im * b0_im;
1840  B0_re += gT01_re * b1_re;
1841  B0_re -= gT01_im * b1_im;
1842  B0_re += gT02_re * b2_re;
1843  B0_re -= gT02_im * b2_im;
1844  spinorFloat B0_im = 0;
1845  B0_im += gT00_re * b0_im;
1846  B0_im += gT00_im * b0_re;
1847  B0_im += gT01_re * b1_im;
1848  B0_im += gT01_im * b1_re;
1849  B0_im += gT02_re * b2_im;
1850  B0_im += gT02_im * b2_re;
1851 
1852  // multiply row 1
1853  spinorFloat A1_re = 0;
1854  A1_re += gT10_re * a0_re;
1855  A1_re -= gT10_im * a0_im;
1856  A1_re += gT11_re * a1_re;
1857  A1_re -= gT11_im * a1_im;
1858  A1_re += gT12_re * a2_re;
1859  A1_re -= gT12_im * a2_im;
1860  spinorFloat A1_im = 0;
1861  A1_im += gT10_re * a0_im;
1862  A1_im += gT10_im * a0_re;
1863  A1_im += gT11_re * a1_im;
1864  A1_im += gT11_im * a1_re;
1865  A1_im += gT12_re * a2_im;
1866  A1_im += gT12_im * a2_re;
1867  spinorFloat B1_re = 0;
1868  B1_re += gT10_re * b0_re;
1869  B1_re -= gT10_im * b0_im;
1870  B1_re += gT11_re * b1_re;
1871  B1_re -= gT11_im * b1_im;
1872  B1_re += gT12_re * b2_re;
1873  B1_re -= gT12_im * b2_im;
1874  spinorFloat B1_im = 0;
1875  B1_im += gT10_re * b0_im;
1876  B1_im += gT10_im * b0_re;
1877  B1_im += gT11_re * b1_im;
1878  B1_im += gT11_im * b1_re;
1879  B1_im += gT12_re * b2_im;
1880  B1_im += gT12_im * b2_re;
1881 
1882  // multiply row 2
1883  spinorFloat A2_re = 0;
1884  A2_re += gT20_re * a0_re;
1885  A2_re -= gT20_im * a0_im;
1886  A2_re += gT21_re * a1_re;
1887  A2_re -= gT21_im * a1_im;
1888  A2_re += gT22_re * a2_re;
1889  A2_re -= gT22_im * a2_im;
1890  spinorFloat A2_im = 0;
1891  A2_im += gT20_re * a0_im;
1892  A2_im += gT20_im * a0_re;
1893  A2_im += gT21_re * a1_im;
1894  A2_im += gT21_im * a1_re;
1895  A2_im += gT22_re * a2_im;
1896  A2_im += gT22_im * a2_re;
1897  spinorFloat B2_re = 0;
1898  B2_re += gT20_re * b0_re;
1899  B2_re -= gT20_im * b0_im;
1900  B2_re += gT21_re * b1_re;
1901  B2_re -= gT21_im * b1_im;
1902  B2_re += gT22_re * b2_re;
1903  B2_re -= gT22_im * b2_im;
1904  spinorFloat B2_im = 0;
1905  B2_im += gT20_re * b0_im;
1906  B2_im += gT20_im * b0_re;
1907  B2_im += gT21_re * b1_im;
1908  B2_im += gT21_im * b1_re;
1909  B2_im += gT22_re * b2_im;
1910  B2_im += gT22_im * b2_re;
1911 
1912  o00_re += A0_re;
1913  o00_im += A0_im;
1914  o10_re += B0_re;
1915  o10_im += B0_im;
1916  o20_re -= A0_im;
1917  o20_im += A0_re;
1918  o30_re += B0_im;
1919  o30_im -= B0_re;
1920 
1921  o01_re += A1_re;
1922  o01_im += A1_im;
1923  o11_re += B1_re;
1924  o11_im += B1_im;
1925  o21_re -= A1_im;
1926  o21_im += A1_re;
1927  o31_re += B1_im;
1928  o31_im -= B1_re;
1929 
1930  o02_re += A2_re;
1931  o02_im += A2_im;
1932  o12_re += B2_re;
1933  o12_im += B2_im;
1934  o22_re -= A2_im;
1935  o22_im += A2_re;
1936  o32_re += B2_im;
1937  o32_im -= B2_re;
1938 
1939 }
1940 
1941 #ifdef MULTI_GPU
1942 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || coord[3]<(param.dc.X[3]-1))) ||
1943  (kernel_type == EXTERIOR_KERNEL_T && coord[3]==(param.dc.X[3]-1)) )
1944 #endif
1945 {
1946  // Projector P3+
1947  // 2 0 0 0
1948  // 0 2 0 0
1949  // 0 0 0 0
1950  // 0 0 0 0
1951 
1952 #ifdef MULTI_GPU
1953  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[3]==(param.dc.X[3]-1) ? X-param.dc.X4X3X2X1mX3X2X1 : X+param.dc.X3X2X1) >> 1 :
1954  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][1];
1955 #if (DD_PREC==2) // half precision
1956  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][1];
1957 #endif
1958 #else
1959  const int sp_idx = (coord[3]==(param.dc.X[3]-1) ? X-param.dc.X4X3X2X1mX3X2X1 : X+param.dc.X3X2X1) >> 1;
1960 #endif
1961 
1962  const int ga_idx = sid;
1963 
1964  if (param.gauge_fixed && ga_idx < param.dc.X4X3X2X1hmX3X2X1h)
1965  {
1972 
1973 #ifdef MULTI_GPU
1974  if (kernel_type == INTERIOR_KERNEL) {
1975 #endif
1976 
1977  // read spinor from device memory
1978  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
1979 
1980  // project spinor into half spinors
1981  a0_re = +2*i00_re;
1982  a0_im = +2*i00_im;
1983  a1_re = +2*i01_re;
1984  a1_im = +2*i01_im;
1985  a2_re = +2*i02_re;
1986  a2_im = +2*i02_im;
1987  b0_re = +2*i10_re;
1988  b0_im = +2*i10_im;
1989  b1_re = +2*i11_re;
1990  b1_im = +2*i11_im;
1991  b2_re = +2*i12_re;
1992  b2_im = +2*i12_im;
1993 
1994 #ifdef MULTI_GPU
1995  } else {
1996 
1997  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
1998  const int t_proj_scale = TPROJSCALE;
1999 
2000  // read half spinor from device memory
2001  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 6);
2002 
2003  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2004  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2005  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2006  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2007  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2008  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2009 
2010  }
2011 #endif // MULTI_GPU
2012 
2013  // identity gauge matrix
2020 
2021  o00_re += A0_re;
2022  o00_im += A0_im;
2023  o10_re += B0_re;
2024  o10_im += B0_im;
2025 
2026  o01_re += A1_re;
2027  o01_im += A1_im;
2028  o11_re += B1_re;
2029  o11_im += B1_im;
2030 
2031  o02_re += A2_re;
2032  o02_im += A2_im;
2033  o12_re += B2_re;
2034  o12_im += B2_im;
2035 
2036  } else {
2043 
2044 #ifdef MULTI_GPU
2045  if (kernel_type == INTERIOR_KERNEL) {
2046 #endif
2047 
2048  // read spinor from device memory
2049  READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);
2050 
2051  // project spinor into half spinors
2052  a0_re = +2*i00_re;
2053  a0_im = +2*i00_im;
2054  a1_re = +2*i01_re;
2055  a1_im = +2*i01_im;
2056  a2_re = +2*i02_re;
2057  a2_im = +2*i02_im;
2058  b0_re = +2*i10_re;
2059  b0_im = +2*i10_im;
2060  b1_re = +2*i11_re;
2061  b1_im = +2*i11_im;
2062  b2_re = +2*i12_re;
2063  b2_im = +2*i12_im;
2064 
2065 #ifdef MULTI_GPU
2066  } else {
2067 
2068  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
2069  const int t_proj_scale = TPROJSCALE;
2070 
2071  // read half spinor from device memory
2072  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 6);
2073 
2074  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2075  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2076  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2077  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2078  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2079  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2080 
2081  }
2082 #endif // MULTI_GPU
2083 
2084  // read gauge matrix from device memory
2085  READ_GAUGE_MATRIX(G, GAUGE0TEX, 6, ga_idx, param.gauge_stride);
2086 
2087  // reconstruct gauge matrix
2089 
2090  // multiply row 0
2091  spinorFloat A0_re = 0;
2092  A0_re += g00_re * a0_re;
2093  A0_re -= g00_im * a0_im;
2094  A0_re += g01_re * a1_re;
2095  A0_re -= g01_im * a1_im;
2096  A0_re += g02_re * a2_re;
2097  A0_re -= g02_im * a2_im;
2098  spinorFloat A0_im = 0;
2099  A0_im += g00_re * a0_im;
2100  A0_im += g00_im * a0_re;
2101  A0_im += g01_re * a1_im;
2102  A0_im += g01_im * a1_re;
2103  A0_im += g02_re * a2_im;
2104  A0_im += g02_im * a2_re;
2105  spinorFloat B0_re = 0;
2106  B0_re += g00_re * b0_re;
2107  B0_re -= g00_im * b0_im;
2108  B0_re += g01_re * b1_re;
2109  B0_re -= g01_im * b1_im;
2110  B0_re += g02_re * b2_re;
2111  B0_re -= g02_im * b2_im;
2112  spinorFloat B0_im = 0;
2113  B0_im += g00_re * b0_im;
2114  B0_im += g00_im * b0_re;
2115  B0_im += g01_re * b1_im;
2116  B0_im += g01_im * b1_re;
2117  B0_im += g02_re * b2_im;
2118  B0_im += g02_im * b2_re;
2119 
2120  // multiply row 1
2121  spinorFloat A1_re = 0;
2122  A1_re += g10_re * a0_re;
2123  A1_re -= g10_im * a0_im;
2124  A1_re += g11_re * a1_re;
2125  A1_re -= g11_im * a1_im;
2126  A1_re += g12_re * a2_re;
2127  A1_re -= g12_im * a2_im;
2128  spinorFloat A1_im = 0;
2129  A1_im += g10_re * a0_im;
2130  A1_im += g10_im * a0_re;
2131  A1_im += g11_re * a1_im;
2132  A1_im += g11_im * a1_re;
2133  A1_im += g12_re * a2_im;
2134  A1_im += g12_im * a2_re;
2135  spinorFloat B1_re = 0;
2136  B1_re += g10_re * b0_re;
2137  B1_re -= g10_im * b0_im;
2138  B1_re += g11_re * b1_re;
2139  B1_re -= g11_im * b1_im;
2140  B1_re += g12_re * b2_re;
2141  B1_re -= g12_im * b2_im;
2142  spinorFloat B1_im = 0;
2143  B1_im += g10_re * b0_im;
2144  B1_im += g10_im * b0_re;
2145  B1_im += g11_re * b1_im;
2146  B1_im += g11_im * b1_re;
2147  B1_im += g12_re * b2_im;
2148  B1_im += g12_im * b2_re;
2149 
2150  // multiply row 2
2151  spinorFloat A2_re = 0;
2152  A2_re += g20_re * a0_re;
2153  A2_re -= g20_im * a0_im;
2154  A2_re += g21_re * a1_re;
2155  A2_re -= g21_im * a1_im;
2156  A2_re += g22_re * a2_re;
2157  A2_re -= g22_im * a2_im;
2158  spinorFloat A2_im = 0;
2159  A2_im += g20_re * a0_im;
2160  A2_im += g20_im * a0_re;
2161  A2_im += g21_re * a1_im;
2162  A2_im += g21_im * a1_re;
2163  A2_im += g22_re * a2_im;
2164  A2_im += g22_im * a2_re;
2165  spinorFloat B2_re = 0;
2166  B2_re += g20_re * b0_re;
2167  B2_re -= g20_im * b0_im;
2168  B2_re += g21_re * b1_re;
2169  B2_re -= g21_im * b1_im;
2170  B2_re += g22_re * b2_re;
2171  B2_re -= g22_im * b2_im;
2172  spinorFloat B2_im = 0;
2173  B2_im += g20_re * b0_im;
2174  B2_im += g20_im * b0_re;
2175  B2_im += g21_re * b1_im;
2176  B2_im += g21_im * b1_re;
2177  B2_im += g22_re * b2_im;
2178  B2_im += g22_im * b2_re;
2179 
2180  o00_re += A0_re;
2181  o00_im += A0_im;
2182  o10_re += B0_re;
2183  o10_im += B0_im;
2184 
2185  o01_re += A1_re;
2186  o01_im += A1_im;
2187  o11_re += B1_re;
2188  o11_im += B1_im;
2189 
2190  o02_re += A2_re;
2191  o02_im += A2_im;
2192  o12_re += B2_re;
2193  o12_im += B2_im;
2194 
2195  }
2196 }
2197 
2198 #ifdef MULTI_GPU
2199 if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim[3] || coord[3]>0)) ||
2200  (kernel_type == EXTERIOR_KERNEL_T && coord[3]==0) )
2201 #endif
2202 {
2203  // Projector P3-
2204  // 0 0 0 0
2205  // 0 0 0 0
2206  // 0 0 2 0
2207  // 0 0 0 2
2208 
2209 #ifdef MULTI_GPU
2210  const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? (coord[3]==0 ? X+param.dc.X4X3X2X1mX3X2X1 : X-param.dc.X3X2X1) >> 1 :
2211  face_idx + param.ghostOffset[static_cast<int>(kernel_type)][0];
2212 #if (DD_PREC==2) // half precision
2213  const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][0];
2214 #endif
2215 #else
2216  const int sp_idx = (coord[3]==0 ? X+param.dc.X4X3X2X1mX3X2X1 : X-param.dc.X3X2X1) >> 1;
2217 #endif
2218 
2219 #ifdef MULTI_GPU
2220  const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);
2221 #else
2222  const int ga_idx = sp_idx;
2223 #endif
2224 
2225  if (param.gauge_fixed && ga_idx < param.dc.X4X3X2X1hmX3X2X1h)
2226  {
2233 
2234 #ifdef MULTI_GPU
2235  if (kernel_type == INTERIOR_KERNEL) {
2236 #endif
2237 
2238  // read spinor from device memory
2240 
2241  // project spinor into half spinors
2242  a0_re = +2*i20_re;
2243  a0_im = +2*i20_im;
2244  a1_re = +2*i21_re;
2245  a1_im = +2*i21_im;
2246  a2_re = +2*i22_re;
2247  a2_im = +2*i22_im;
2248  b0_re = +2*i30_re;
2249  b0_im = +2*i30_im;
2250  b1_re = +2*i31_re;
2251  b1_im = +2*i31_im;
2252  b2_re = +2*i32_re;
2253  b2_im = +2*i32_im;
2254 
2255 #ifdef MULTI_GPU
2256  } else {
2257 
2258  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
2259  const int t_proj_scale = TPROJSCALE;
2260 
2261  // read half spinor from device memory
2262  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 7);
2263 
2264  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2265  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2266  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2267  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2268  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2269  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2270 
2271  }
2272 #endif // MULTI_GPU
2273 
2274  // identity gauge matrix
2281 
2282  o20_re += A0_re;
2283  o20_im += A0_im;
2284  o30_re += B0_re;
2285  o30_im += B0_im;
2286 
2287  o21_re += A1_re;
2288  o21_im += A1_im;
2289  o31_re += B1_re;
2290  o31_im += B1_im;
2291 
2292  o22_re += A2_re;
2293  o22_im += A2_im;
2294  o32_re += B2_re;
2295  o32_im += B2_im;
2296 
2297  } else {
2304 
2305 #ifdef MULTI_GPU
2306  if (kernel_type == INTERIOR_KERNEL) {
2307 #endif
2308 
2309  // read spinor from device memory
2311 
2312  // project spinor into half spinors
2313  a0_re = +2*i20_re;
2314  a0_im = +2*i20_im;
2315  a1_re = +2*i21_re;
2316  a1_im = +2*i21_im;
2317  a2_re = +2*i22_re;
2318  a2_im = +2*i22_im;
2319  b0_re = +2*i30_re;
2320  b0_im = +2*i30_im;
2321  b1_re = +2*i31_re;
2322  b1_im = +2*i31_im;
2323  b2_re = +2*i32_re;
2324  b2_im = +2*i32_im;
2325 
2326 #ifdef MULTI_GPU
2327  } else {
2328 
2329  const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];
2330  const int t_proj_scale = TPROJSCALE;
2331 
2332  // read half spinor from device memory
2333  READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, 7);
2334 
2335  a0_re = t_proj_scale*i00_re; a0_im = t_proj_scale*i00_im;
2336  a1_re = t_proj_scale*i01_re; a1_im = t_proj_scale*i01_im;
2337  a2_re = t_proj_scale*i02_re; a2_im = t_proj_scale*i02_im;
2338  b0_re = t_proj_scale*i10_re; b0_im = t_proj_scale*i10_im;
2339  b1_re = t_proj_scale*i11_re; b1_im = t_proj_scale*i11_im;
2340  b2_re = t_proj_scale*i12_re; b2_im = t_proj_scale*i12_im;
2341 
2342  }
2343 #endif // MULTI_GPU
2344 
2345  // read gauge matrix from device memory
2346  READ_GAUGE_MATRIX(G, GAUGE1TEX, 7, ga_idx, param.gauge_stride);
2347 
2348  // reconstruct gauge matrix
2350 
2351  // multiply row 0
2352  spinorFloat A0_re = 0;
2353  A0_re += gT00_re * a0_re;
2354  A0_re -= gT00_im * a0_im;
2355  A0_re += gT01_re * a1_re;
2356  A0_re -= gT01_im * a1_im;
2357  A0_re += gT02_re * a2_re;
2358  A0_re -= gT02_im * a2_im;
2359  spinorFloat A0_im = 0;
2360  A0_im += gT00_re * a0_im;
2361  A0_im += gT00_im * a0_re;
2362  A0_im += gT01_re * a1_im;
2363  A0_im += gT01_im * a1_re;
2364  A0_im += gT02_re * a2_im;
2365  A0_im += gT02_im * a2_re;
2366  spinorFloat B0_re = 0;
2367  B0_re += gT00_re * b0_re;
2368  B0_re -= gT00_im * b0_im;
2369  B0_re += gT01_re * b1_re;
2370  B0_re -= gT01_im * b1_im;
2371  B0_re += gT02_re * b2_re;
2372  B0_re -= gT02_im * b2_im;
2373  spinorFloat B0_im = 0;
2374  B0_im += gT00_re * b0_im;
2375  B0_im += gT00_im * b0_re;
2376  B0_im += gT01_re * b1_im;
2377  B0_im += gT01_im * b1_re;
2378  B0_im += gT02_re * b2_im;
2379  B0_im += gT02_im * b2_re;
2380 
2381  // multiply row 1
2382  spinorFloat A1_re = 0;
2383  A1_re += gT10_re * a0_re;
2384  A1_re -= gT10_im * a0_im;
2385  A1_re += gT11_re * a1_re;
2386  A1_re -= gT11_im * a1_im;
2387  A1_re += gT12_re * a2_re;
2388  A1_re -= gT12_im * a2_im;
2389  spinorFloat A1_im = 0;
2390  A1_im += gT10_re * a0_im;
2391  A1_im += gT10_im * a0_re;
2392  A1_im += gT11_re * a1_im;
2393  A1_im += gT11_im * a1_re;
2394  A1_im += gT12_re * a2_im;
2395  A1_im += gT12_im * a2_re;
2396  spinorFloat B1_re = 0;
2397  B1_re += gT10_re * b0_re;
2398  B1_re -= gT10_im * b0_im;
2399  B1_re += gT11_re * b1_re;
2400  B1_re -= gT11_im * b1_im;
2401  B1_re += gT12_re * b2_re;
2402  B1_re -= gT12_im * b2_im;
2403  spinorFloat B1_im = 0;
2404  B1_im += gT10_re * b0_im;
2405  B1_im += gT10_im * b0_re;
2406  B1_im += gT11_re * b1_im;
2407  B1_im += gT11_im * b1_re;
2408  B1_im += gT12_re * b2_im;
2409  B1_im += gT12_im * b2_re;
2410 
2411  // multiply row 2
2412  spinorFloat A2_re = 0;
2413  A2_re += gT20_re * a0_re;
2414  A2_re -= gT20_im * a0_im;
2415  A2_re += gT21_re * a1_re;
2416  A2_re -= gT21_im * a1_im;
2417  A2_re += gT22_re * a2_re;
2418  A2_re -= gT22_im * a2_im;
2419  spinorFloat A2_im = 0;
2420  A2_im += gT20_re * a0_im;
2421  A2_im += gT20_im * a0_re;
2422  A2_im += gT21_re * a1_im;
2423  A2_im += gT21_im * a1_re;
2424  A2_im += gT22_re * a2_im;
2425  A2_im += gT22_im * a2_re;
2426  spinorFloat B2_re = 0;
2427  B2_re += gT20_re * b0_re;
2428  B2_re -= gT20_im * b0_im;
2429  B2_re += gT21_re * b1_re;
2430  B2_re -= gT21_im * b1_im;
2431  B2_re += gT22_re * b2_re;
2432  B2_re -= gT22_im * b2_im;
2433  spinorFloat B2_im = 0;
2434  B2_im += gT20_re * b0_im;
2435  B2_im += gT20_im * b0_re;
2436  B2_im += gT21_re * b1_im;
2437  B2_im += gT21_im * b1_re;
2438  B2_im += gT22_re * b2_im;
2439  B2_im += gT22_im * b2_re;
2440 
2441  o20_re += A0_re;
2442  o20_im += A0_im;
2443  o30_re += B0_re;
2444  o30_im += B0_im;
2445 
2446  o21_re += A1_re;
2447  o21_im += A1_im;
2448  o31_re += B1_re;
2449  o31_im += B1_im;
2450 
2451  o22_re += A2_re;
2452  o22_im += A2_im;
2453  o32_re += B2_re;
2454  o32_im += B2_im;
2455 
2456  }
2457 }
2458 
2459 #ifdef MULTI_GPU
2460 
2461 int incomplete = 0; // Have all 8 contributions been computed for this site?
2462 
2463 switch(kernel_type) { // intentional fall-through
2464 
2465 case INTERIOR_KERNEL:
2466  incomplete = incomplete || (param.commDim[3] && (coord[3]==0 || coord[3]==(param.dc.X[3]-1)));
2467 case EXTERIOR_KERNEL_T:
2468  incomplete = incomplete || (param.commDim[2] && (coord[2]==0 || coord[2]==(param.dc.X[2]-1)));
2469 case EXTERIOR_KERNEL_Z:
2470  incomplete = incomplete || (param.commDim[1] && (coord[1]==0 || coord[1]==(param.dc.X[1]-1)));
2471 case EXTERIOR_KERNEL_Y:
2472  incomplete = incomplete || (param.commDim[0] && (coord[0]==0 || coord[0]==(param.dc.X[0]-1)));
2473 }
2474 
2475 if (!incomplete)
2476 #endif // MULTI_GPU
2477 {
2478 #if !defined(CLOVER_TWIST_INV_DSLASH)
2479 #ifdef SPINOR_DOUBLE
2480  spinorFloat a = param.a;
2481 #else
2482  spinorFloat a = param.a_f;
2483 #endif
2484 #endif
2485 #ifdef DSLASH_XPAY
2486 #ifdef SPINOR_DOUBLE
2487  spinorFloat b = param.b;
2488 #else
2489  spinorFloat b = param.b_f;
2490 #endif
2491  READ_ACCUM(ACCUMTEX, param.sp_stride)
2492 
2493 #ifndef CLOVER_TWIST_INV_DSLASH
2494 #ifndef CLOVER_TWIST_XPAY
2495  //perform invert twist first:
2496 #ifndef DYNAMIC_CLOVER
2497  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2498 #else
2500 #endif
2501 #else
2502  APPLY_CLOVER_TWIST(c, -a, acc);
2503 #endif
2504 #endif
2505  o00_re = b*o00_re + acc00_re;
2506  o00_im = b*o00_im + acc00_im;
2507  o01_re = b*o01_re + acc01_re;
2508  o01_im = b*o01_im + acc01_im;
2509  o02_re = b*o02_re + acc02_re;
2510  o02_im = b*o02_im + acc02_im;
2511  o10_re = b*o10_re + acc10_re;
2512  o10_im = b*o10_im + acc10_im;
2513  o11_re = b*o11_re + acc11_re;
2514  o11_im = b*o11_im + acc11_im;
2515  o12_re = b*o12_re + acc12_re;
2516  o12_im = b*o12_im + acc12_im;
2517  o20_re = b*o20_re + acc20_re;
2518  o20_im = b*o20_im + acc20_im;
2519  o21_re = b*o21_re + acc21_re;
2520  o21_im = b*o21_im + acc21_im;
2521  o22_re = b*o22_re + acc22_re;
2522  o22_im = b*o22_im + acc22_im;
2523  o30_re = b*o30_re + acc30_re;
2524  o30_im = b*o30_im + acc30_im;
2525  o31_re = b*o31_re + acc31_re;
2526  o31_im = b*o31_im + acc31_im;
2527  o32_re = b*o32_re + acc32_re;
2528  o32_im = b*o32_im + acc32_im;
2529 #else //no XPAY
2530 #ifndef CLOVER_TWIST_INV_DSLASH
2531 #ifndef DYNAMIC_CLOVER
2532  APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);
2533 #else
2535 #endif
2536 #endif
2537 #endif
2538 }
2539 
2540 // write spinor field back to device memory
2541 WRITE_SPINOR(param.sp_stride);
2542 
2543 // undefine to prevent warning when precision is changed
2544 #undef spinorFloat
2545 #undef WRITE_SPINOR_SHARED
2546 #undef READ_SPINOR_SHARED
2547 #undef SHARED_STRIDE
2548 
2549 #undef g00_re
2550 #undef g00_im
2551 #undef g01_re
2552 #undef g01_im
2553 #undef g02_re
2554 #undef g02_im
2555 #undef g10_re
2556 #undef g10_im
2557 #undef g11_re
2558 #undef g11_im
2559 #undef g12_re
2560 #undef g12_im
2561 #undef g20_re
2562 #undef g20_im
2563 #undef g21_re
2564 #undef g21_im
2565 #undef g22_re
2566 #undef g22_im
2567 
2568 #undef i00_re
2569 #undef i00_im
2570 #undef i01_re
2571 #undef i01_im
2572 #undef i02_re
2573 #undef i02_im
2574 #undef i10_re
2575 #undef i10_im
2576 #undef i11_re
2577 #undef i11_im
2578 #undef i12_re
2579 #undef i12_im
2580 #undef i20_re
2581 #undef i20_im
2582 #undef i21_re
2583 #undef i21_im
2584 #undef i22_re
2585 #undef i22_im
2586 #undef i30_re
2587 #undef i30_im
2588 #undef i31_re
2589 #undef i31_im
2590 #undef i32_re
2591 #undef i32_im
2592 
2593 #undef c00_00_re
2594 #undef c01_01_re
2595 #undef c02_02_re
2596 #undef c10_10_re
2597 #undef c11_11_re
2598 #undef c12_12_re
2599 #undef c01_00_re
2600 #undef c01_00_im
2601 #undef c02_00_re
2602 #undef c02_00_im
2603 #undef c10_00_re
2604 #undef c10_00_im
2605 #undef c11_00_re
2606 #undef c11_00_im
2607 #undef c12_00_re
2608 #undef c12_00_im
2609 #undef c02_01_re
2610 #undef c02_01_im
2611 #undef c10_01_re
2612 #undef c10_01_im
2613 #undef c11_01_re
2614 #undef c11_01_im
2615 #undef c12_01_re
2616 #undef c12_01_im
2617 #undef c10_02_re
2618 #undef c10_02_im
2619 #undef c11_02_re
2620 #undef c11_02_im
2621 #undef c12_02_re
2622 #undef c12_02_im
2623 #undef c11_10_re
2624 #undef c11_10_im
2625 #undef c12_10_re
2626 #undef c12_10_im
2627 #undef c12_11_re
2628 #undef c12_11_im
2629 
2630 #undef cinv00_00_re
2631 #undef cinv01_01_re
2632 #undef cinv02_02_re
2633 #undef cinv10_10_re
2634 #undef cinv11_11_re
2635 #undef cinv12_12_re
2636 #undef cinv01_00_re
2637 #undef cinv01_00_im
2638 #undef cinv02_00_re
2639 #undef cinv02_00_im
2640 #undef cinv10_00_re
2641 #undef cinv10_00_im
2642 #undef cinv11_00_re
2643 #undef cinv11_00_im
2644 #undef cinv12_00_re
2645 #undef cinv12_00_im
2646 #undef cinv02_01_re
2647 #undef cinv02_01_im
2648 #undef cinv10_01_re
2649 #undef cinv10_01_im
2650 #undef cinv11_01_re
2651 #undef cinv11_01_im
2652 #undef cinv12_01_re
2653 #undef cinv12_01_im
2654 #undef cinv10_02_re
2655 #undef cinv10_02_im
2656 #undef cinv11_02_re
2657 #undef cinv11_02_im
2658 #undef cinv12_02_re
2659 #undef cinv12_02_im
2660 #undef cinv11_10_re
2661 #undef cinv11_10_im
2662 #undef cinv12_10_re
2663 #undef cinv12_10_im
2664 #undef cinv12_11_re
2665 #undef cinv12_11_im
2666 
2667 #undef acc00_re
2668 #undef acc00_im
2669 #undef acc01_re
2670 #undef acc01_im
2671 #undef acc02_re
2672 #undef acc02_im
2673 #undef acc10_re
2674 #undef acc10_im
2675 #undef acc11_re
2676 #undef acc11_im
2677 #undef acc12_re
2678 #undef acc12_im
2679 #undef acc20_re
2680 #undef acc20_im
2681 #undef acc21_re
2682 #undef acc21_im
2683 #undef acc22_re
2684 #undef acc22_im
2685 #undef acc30_re
2686 #undef acc30_im
2687 #undef acc31_re
2688 #undef acc31_im
2689 #undef acc32_re
2690 #undef acc32_im
2691 
2692 
2693 #undef o00_re
2694 #undef o00_im
2695 #undef o01_re
2696 #undef o01_im
2697 #undef o02_re
2698 #undef o02_im
2699 #undef o10_re
2700 #undef o10_im
2701 #undef o11_re
2702 #undef o11_im
2703 #undef o12_re
2704 #undef o12_im
2705 #undef o20_re
2706 #undef o20_im
2707 #undef o21_re
2708 #undef o21_im
2709 #undef o22_re
2710 #undef o22_im
2711 #undef o30_re
2712 #undef o30_im
2713 #undef o31_re
2714 #undef o31_im
2715 #undef o32_re
2716 #undef o32_im
2717 
2718 #undef VOLATILE
VOLATILE spinorFloat o11_im
#define i10_im
spinorFloat a1_re
VOLATILE spinorFloat o01_re
#define acc10_re
dim3 dim3 blockDim
#define i00_im
VOLATILE spinorFloat o30_im
#define APPLY_CLOVER_TWIST(c, a, reg)
Definition: tmc_core.h:832
#define i01_im
spinorFloat B1_re
#define acc11_re
VOLATILE spinorFloat o20_re
VOLATILE spinorFloat o02_re
#define i12_im
spinorFloat B2_re
#define acc32_re
VOLATILE spinorFloat o10_re
spinorFloat A2_im
READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define acc11_im
#define i11_im
VOLATILE spinorFloat o21_re
spinorFloat a0_im
coordsFromIndex3D< EVEN_X >(X, coord, sid, param)
#define acc12_im
int sp_idx
#define i20_im
READ_GAUGE_MATRIX(G, GAUGE0TEX, 0, ga_idx, param.gauge_stride)
#define i22_im
spinorFloat B0_im
#define i01_re
spinorFloat b2_re
#define i00_re
#define i12_re
spinorFloat A2_re
#define acc00_im
#define i21_re
#define APPLY_CLOVER_TWIST_DYN_INV(c, a, reg)
Definition: tmc_core.h:2004
spinorFloat A0_re
spinorFloat b2_im
#define acc00_re
WRITE_SPINOR(param.sp_stride)
#define acc01_re
#define acc31_im
#define GAUGE0TEX
VOLATILE spinorFloat o31_im
#define acc22_im
VOLATILE spinorFloat o01_im
VOLATILE spinorFloat o22_re
spinorFloat A0_im
#define acc02_re
#define acc21_im
QudaGaugeParam param
Definition: pack_test.cpp:17
#define b
#define acc21_re
#define i31_re
VOLATILE spinorFloat o30_re
#define acc20_re
spinorFloat B1_im
spinorFloat b1_im
VOLATILE spinorFloat o31_re
spinorFloat b1_re
VOLATILE spinorFloat o11_re
#define acc30_im
#define acc31_re
#define GAUGE1TEX
spinorFloat a0_re
VOLATILE spinorFloat o02_im
#define SPINORTEX
#define i31_im
spinorFloat b0_re
#define i21_im
#define READ_INTERMEDIATE_SPINOR
#define VOLATILE
spinorFloat A1_re
VOLATILE spinorFloat o10_im
#define acc32_im
int X[4]
Definition: quda.h:29
#define acc01_im
READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
VOLATILE spinorFloat o21_im
#define READ_SPINOR_GHOST
#define i30_re
#define i20_re
spinorFloat B0_re
APPLY_CLOVER_TWIST_INV(c, cinv, -a, o)
VOLATILE spinorFloat o22_im
#define WRITE_SPINOR_SHARED
spinorFloat a1_im
spinorFloat b0_im
#define i02_im
VOLATILE spinorFloat o00_re
#define spinorFloat
#define acc20_im
#define INTERTEX
#define i11_re
#define acc12_re
spinorFloat a2_re
#define i22_re
int face_idx
#define i02_re
VOLATILE spinorFloat o32_im
#define acc22_re
#define i10_re
VOLATILE spinorFloat o00_im
const void * c
const int ga_idx
const int face_num
#define TPROJSCALE
spinorFloat B2_im
spinorFloat A1_im
READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx)
#define GHOSTSPINORTEX
VOLATILE spinorFloat o20_im
#define READ_SPINOR_SHARED
VOLATILE spinorFloat o12_im
VOLATILE spinorFloat o12_re
#define i32_im
__syncthreads()
#define i30_im
#define a
RECONSTRUCT_GAUGE_MATRIX(0)
#define acc10_im
#define acc30_re
VOLATILE spinorFloat o32_re
#define i32_re
spinorFloat a2_im
#define acc02_im