5 #define DSLASH_SHARED_FLOATS_PER_THREAD 19
8 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler
10 #else // Open64 compiler
11 #define VOLATILE volatile
15 #define spinorFloat double
40 #define acc00_re accum0.x
41 #define acc00_im accum0.y
42 #define acc01_re accum1.x
43 #define acc01_im accum1.y
44 #define acc02_re accum2.x
45 #define acc02_im accum2.y
46 #define acc10_re accum3.x
47 #define acc10_im accum3.y
48 #define acc11_re accum4.x
49 #define acc11_im accum4.y
50 #define acc12_re accum5.x
51 #define acc12_im accum5.y
52 #define acc20_re accum6.x
53 #define acc20_im accum6.y
54 #define acc21_re accum7.x
55 #define acc21_im accum7.y
56 #define acc22_re accum8.x
57 #define acc22_im accum8.y
58 #define acc30_re accum9.x
59 #define acc30_im accum9.y
60 #define acc31_re accum10.x
61 #define acc31_im accum10.y
62 #define acc32_re accum11.x
63 #define acc32_im accum11.y
65 #define spinorFloat float
90 #define acc00_re accum0.x
91 #define acc00_im accum0.y
92 #define acc01_re accum0.z
93 #define acc01_im accum0.w
94 #define acc02_re accum1.x
95 #define acc02_im accum1.y
96 #define acc10_re accum1.z
97 #define acc10_im accum1.w
98 #define acc11_re accum2.x
99 #define acc11_im accum2.y
100 #define acc12_re accum2.z
101 #define acc12_im accum2.w
102 #define acc20_re accum3.x
103 #define acc20_im accum3.y
104 #define acc21_re accum3.z
105 #define acc21_im accum3.w
106 #define acc22_re accum4.x
107 #define acc22_im accum4.y
108 #define acc30_re accum4.z
109 #define acc30_im accum4.w
110 #define acc31_re accum5.x
111 #define acc31_im accum5.y
112 #define acc32_re accum5.z
113 #define acc32_im accum5.w
114 #endif // SPINOR_DOUBLE
157 #endif // GAUGE_DOUBLE
160 #define gT00_re (+g00_re)
161 #define gT00_im (-g00_im)
162 #define gT01_re (+g10_re)
163 #define gT01_im (-g10_im)
164 #define gT02_re (+g20_re)
165 #define gT02_im (-g20_im)
166 #define gT10_re (+g01_re)
167 #define gT10_im (-g01_im)
168 #define gT11_re (+g11_re)
169 #define gT11_im (-g11_im)
170 #define gT12_re (+g21_re)
171 #define gT12_im (-g21_im)
172 #define gT20_re (+g02_re)
173 #define gT20_im (-g02_im)
174 #define gT21_re (+g12_re)
175 #define gT21_im (-g12_im)
176 #define gT22_re (+g22_re)
177 #define gT22_im (-g22_im)
180 #define o00_re s[0*SHARED_STRIDE]
181 #define o00_im s[1*SHARED_STRIDE]
182 #define o01_re s[2*SHARED_STRIDE]
183 #define o01_im s[3*SHARED_STRIDE]
184 #define o02_re s[4*SHARED_STRIDE]
185 #define o02_im s[5*SHARED_STRIDE]
186 #define o10_re s[6*SHARED_STRIDE]
187 #define o10_im s[7*SHARED_STRIDE]
188 #define o11_re s[8*SHARED_STRIDE]
189 #define o11_im s[9*SHARED_STRIDE]
190 #define o12_re s[10*SHARED_STRIDE]
191 #define o12_im s[11*SHARED_STRIDE]
192 #define o20_re s[12*SHARED_STRIDE]
193 #define o20_im s[13*SHARED_STRIDE]
194 #define o21_re s[14*SHARED_STRIDE]
195 #define o21_im s[15*SHARED_STRIDE]
196 #define o22_re s[16*SHARED_STRIDE]
197 #define o22_im s[17*SHARED_STRIDE]
198 #define o30_re s[18*SHARED_STRIDE]
206 #define SHARED_STRIDE 8 // to avoid bank conflicts on G80 and GT200
208 #define SHARED_STRIDE 16 // to avoid bank conflicts on G80 and GT200
211 extern __shared__
char s_data[];
222 #if (DD_PREC==2) // half precision
224 #endif // half precision
232 faceVolume[0] = (
X2*
X3*
X4)>>1;
233 faceVolume[1] = (
X1*
X3*
X4)>>1;
234 faceVolume[2] = (
X1*
X2*
X4)>>1;
235 faceVolume[3] = (
X1*
X2*
X3)>>1;
237 sid = blockIdx.x*blockDim.x + threadIdx.x;
238 if (sid >=
param.threads)
return;
241 dim = dimFromFaceIndex(sid,
param);
243 const int face_volume = ((
param.threadDimMapUpper[
dim] -
param.threadDimMapLower[
dim]) >> 1);
244 const int face_num = (sid >= face_volume);
245 face_idx = sid - face_num*face_volume;
249 coordsFromFaceIndex<1>(
X,
sid,
x1,
x2,
x3,
x4,
face_idx, face_volume,
dim, face_num,
param.parity,
dims);
252 for(
int dir=0; dir<4; ++dir){
253 active = active || isActive(dim,dir,+1,x1,x2,x3,x4,
param.commDim,
param.
X);
281 const int sp_idx = face_idx +
param.ghostOffset[0];
283 sp_norm_idx = face_idx + faceVolume[0] +
param.ghostNormOffset[0];
433 if (isActive(dim,0,-1,x1,x2,x3,x4,
param.commDim,
param.
X) && x1==0 )
442 const int sp_idx = face_idx +
param.ghostOffset[0];
444 sp_norm_idx = face_idx +
param.ghostNormOffset[0];
603 const int sp_idx = face_idx +
param.ghostOffset[1];
605 sp_norm_idx = face_idx + faceVolume[1] +
param.ghostNormOffset[1];
608 const int ga_idx =
sid;
755 if (isActive(dim,1,-1,x1,x2,x3,x4,
param.commDim,
param.
X) && x2==0 )
764 const int sp_idx = face_idx +
param.ghostOffset[1];
766 sp_norm_idx = face_idx +
param.ghostNormOffset[1];
925 const int sp_idx = face_idx +
param.ghostOffset[2];
927 sp_norm_idx = face_idx + faceVolume[2] +
param.ghostNormOffset[2];
930 const int ga_idx =
sid;
1077 if (isActive(dim,2,-1,x1,x2,x3,x4,
param.commDim,
param.
X) && x3==0 )
1086 const int sp_idx = face_idx +
param.ghostOffset[2];
1088 sp_norm_idx = face_idx +
param.ghostNormOffset[2];
1238 if (isActive(dim,3,+1,x1,x2,x3,x4,
param.commDim,
param.
X) && x4==
X4m1 )
1247 const int sp_idx = face_idx +
param.ghostOffset[3];
1249 sp_norm_idx = face_idx + faceVolume[3] +
param.ghostNormOffset[3];
1252 const int ga_idx =
sid;
1270 #ifdef TWIST_INV_DSLASH
1324 #ifdef TWIST_INV_DSLASH
1454 if (isActive(dim,3,-1,x1,x2,x3,x4,
param.commDim,
param.
X) && x4==0 )
1463 const int sp_idx = face_idx +
param.ghostOffset[3];
1465 sp_norm_idx = face_idx +
param.ghostNormOffset[3];
1486 #ifdef TWIST_INV_DSLASH
1540 #ifdef TWIST_INV_DSLASH
1672 READ_ACCUM(ACCUMTEX,
param.sp_stride)
1675 #ifndef TWIST_INV_DSLASH
1733 #ifndef TWIST_INV_DSLASH
1744 #undef SHARED_STRIDE
#define APPLY_TWIST(a, reg)
#define APPLY_TWIST_INV(a, b, reg)
**************************only for deg tm:*******************************
#define READ_INTERMEDIATE_SPINOR
#define DSLASH_SHARED_FLOATS_PER_THREAD
__constant__ int ghostFace[QUDA_MAX_DIM+1]
#define RECONSTRUCT_GAUGE_MATRIX
__constant__ int gauge_fixed
__constant__ int ga_stride
#define READ_GAUGE_MATRIX
__constant__ int X4X3X2X1hmX3X2X1h