QUDA  v0.7.0
A library for QCD on GPUs
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
reduce_core.h
Go to the documentation of this file.
1 __host__ __device__ void zero(double &x) { x = 0.0; }
2 __host__ __device__ void zero(double2 &x) { x.x = 0.0; x.y = 0.0; }
3 __host__ __device__ void zero(double3 &x) { x.x = 0.0; x.y = 0.0; x.z = 0.0; }
4 __device__ void copytoshared(double *s, const int i, const double x, const int block) { s[i] = x; }
5 __device__ void copytoshared(double *s, const int i, const double2 x, const int block)
6 { s[i] = x.x; s[i+block] = x.y; }
7 __device__ void copytoshared(double *s, const int i, const double3 x, const int block)
8 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
9 __device__ void copytoshared(volatile double *s, const int i, const double x, const int block) { s[i] = x; }
10 __device__ void copytoshared(volatile double *s, const int i, const double2 x, const int block)
11 { s[i] = x.x; s[i+block] = x.y; }
12 __device__ void copytoshared(volatile double *s, const int i, const double3 x, const int block)
13 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
14 __device__ void copyfromshared(double &x, const double *s, const int i, const int block) { x = s[i]; }
15 __device__ void copyfromshared(double2 &x, const double *s, const int i, const int block)
16 { x.x = s[i]; x.y = s[i+block]; }
17 __device__ void copyfromshared(double3 &x, const double *s, const int i, const int block)
18 { x.x = s[i]; x.y = s[i+block]; x.z = s[i+2*block]; }
19 
20 template<typename ReduceType, typename ReduceSimpleType>
21 __device__ void add(ReduceType &sum, ReduceSimpleType *s, const int i, const int block) { }
22 template<> __device__ void add<double,double>(double &sum, double *s, const int i, const int block)
23 { sum += s[i]; }
24 template<> __device__ void add<double2,double>(double2 &sum, double *s, const int i, const int block)
25 { sum.x += s[i]; sum.y += s[i+block]; }
26 template<> __device__ void add<double3,double>(double3 &sum, double *s, const int i, const int block)
27 { sum.x += s[i]; sum.y += s[i+block]; sum.z += s[i+2*block]; }
28 
29 template<typename ReduceType, typename ReduceSimpleType>
30 __device__ void add(ReduceSimpleType *s, const int i, const int j, const int block) { }
31 template<typename ReduceType, typename ReduceSimpleType>
32 __device__ void add(volatile ReduceSimpleType *s, const int i, const int j, const int block) { }
33 
34 template<> __device__ void add<double,double>(double *s, const int i, const int j, const int block)
35 { s[i] += s[j]; }
36 template<> __device__ void add<double,double>(volatile double *s, const int i, const int j, const int block)
37 { s[i] += s[j]; }
38 
39 template<> __device__ void add<double2,double>(double *s, const int i, const int j, const int block)
40 { s[i] += s[j]; s[i+block] += s[j+block];}
41 template<> __device__ void add<double2,double>(volatile double *s, const int i, const int j, const int block)
42 { s[i] += s[j]; s[i+block] += s[j+block];}
43 
44 template<> __device__ void add<double3,double>(double *s, const int i, const int j, const int block)
45 { s[i] += s[j]; s[i+block] += s[j+block]; s[i+2*block] += s[j+2*block];}
46 template<> __device__ void add<double3,double>(volatile double *s, const int i, const int j, const int block)
47 { s[i] += s[j]; s[i+block] += s[j+block]; s[i+2*block] += s[j+2*block];}
48 
49 
50 template<int block_size, typename ReduceType, typename ReduceSimpleType>
51 __device__ void warpReduce(ReduceSimpleType* s, ReduceType& sum){
52 
53  volatile ReduceSimpleType *sv = s;
54  copytoshared(sv, 0, sum, block_size);
55 
56  if(block_size >= 32) { add<ReduceType>(sv, 0, 16, block_size); }
57  if(block_size >= 16) { add<ReduceType>(sv, 0, 8, block_size); }
58  if(block_size >= 8) { add<ReduceType>(sv, 0, 4, block_size); }
59  if(block_size >= 4) { add<ReduceType>(sv, 0, 2, block_size); }
60  if(block_size >= 2) { add<ReduceType>(sv, 0, 1, block_size); }
61 }
62 
63 
64 
65 
66 #if (__COMPUTE_CAPABILITY__ < 130)
67 __host__ __device__ void zero(doublesingle &x) { x = 0.0; }
68 __host__ __device__ void zero(doublesingle2 &x) { x.x = 0.0; x.y = 0.0; }
69 __host__ __device__ void zero(doublesingle3 &x) { x.x = 0.0; x.y = 0.0; x.z = 0.0; }
70 __device__ void copytoshared(doublesingle *s, const int i, const doublesingle x, const int block) { s[i] = x; }
71 __device__ void copytoshared(doublesingle *s, const int i, const doublesingle2 x, const int block)
72 { s[i] = x.x; s[i+block] = x.y; }
73 __device__ void copytoshared(doublesingle *s, const int i, const doublesingle3 x, const int block)
74 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
75 __device__ void copytoshared(volatile doublesingle *s, const int i, const doublesingle x, const int block) { s[i].a.x = x.a.x; s[i].a.y = x.a.y; }
76 __device__ void copytoshared(volatile doublesingle *s, const int i, const doublesingle2 x, const int block)
77 { s[i].a.x = x.x.a.x; s[i].a.y = x.x.a.y; s[i+block].a.x = x.y.a.x; s[i+block].a.y = x.y.a.y; }
78 __device__ void copytoshared(volatile doublesingle *s, const int i, const doublesingle3 x, const int block)
79 { s[i].a.x = x.x.a.x; s[i].a.y = x.x.a.y; s[i+block].a.x = x.y.a.x; s[i+block].a.y = x.y.a.y;
80  s[i+2*block].a.x = x.z.a.x; s[i+2*block].a.y = x.z.a.y; }
81 __device__ void copyfromshared(doublesingle &x, const doublesingle *s, const int i, const int block) { x = s[i]; }
82 __device__ void copyfromshared(doublesingle2 &x, const doublesingle *s, const int i, const int block)
83 { x.x = s[i]; x.y = s[i+block]; }
84 __device__ void copyfromshared(doublesingle3 &x, const doublesingle *s, const int i, const int block)
85 { x.x = s[i]; x.y = s[i+block]; x.z = s[i+2*block]; }
86 
87 template<> __device__ void add<doublesingle,doublesingle>(doublesingle &sum, doublesingle *s, const int i, const int block)
88 { sum += s[i]; }
89 template<> __device__ void add<doublesingle2,doublesingle>(doublesingle2 &sum, doublesingle *s, const int i, const int block)
90 { sum.x += s[i]; sum.y += s[i+block]; }
91 template<> __device__ void add<doublesingle3,doublesingle>(doublesingle3 &sum, doublesingle *s, const int i, const int block)
92 { sum.x += s[i]; sum.y += s[i+block]; sum.z += s[i+2*block]; }
93 
94 template<> __device__ void add<doublesingle,doublesingle>(doublesingle *s, const int i, const int j, const int block)
95 { s[i] += s[j]; }
96 template<> __device__ void add<doublesingle,doublesingle>(volatile doublesingle *s, const int i, const int j, const int block)
97 { s[i] += s[j]; }
98 
99 template<> __device__ void add<doublesingle2,doublesingle>(doublesingle *s, const int i, const int j, const int block)
100 { s[i] += s[j]; s[i+block] += s[j+block];}
101 template<> __device__ void add<doublesingle2,doublesingle>(volatile doublesingle *s, const int i, const int j, const int block)
102 { s[i] += s[j]; s[i+block] += s[j+block];}
103 
104 template<> __device__ void add<doublesingle3,doublesingle>(doublesingle *s, const int i, const int j, const int block)
105 { s[i] += s[j]; s[i+block] += s[j+block]; s[i+2*block] += s[j+2*block];}
106 template<> __device__ void add<doublesingle3,doublesingle>(volatile doublesingle *s, const int i, const int j, const int block)
107 { s[i] += s[j]; s[i+block] += s[j+block]; s[i+2*block] += s[j+2*block];}
108 #endif
109 
110 #include <launch_kernel.cuh>
111 
112 __device__ unsigned int count = 0;
113 __shared__ bool isLastBlockDone;
114 
115 template <typename ReduceType, typename SpinorX, typename SpinorY,
116  typename SpinorZ, typename SpinorW, typename SpinorV, typename Reducer>
117 struct ReduceArg {
118  SpinorX X;
119  SpinorY Y;
120  SpinorZ Z;
121  SpinorW W;
122  SpinorV V;
123  Reducer r;
124  ReduceType *partial;
125  ReduceType *complete;
126  const int length;
127  ReduceArg(SpinorX X, SpinorY Y, SpinorZ Z, SpinorW W, SpinorV V, Reducer r,
128  ReduceType *partial, ReduceType *complete, int length)
129  : X(X), Y(Y), Z(Z), W(W), V(V), r(r), partial(partial), complete(complete), length(length) { ; }
130 };
131 
135 template <int block_size, typename ReduceType, typename ReduceSimpleType,
136  typename FloatN, int M, typename SpinorX, typename SpinorY,
137  typename SpinorZ, typename SpinorW, typename SpinorV, typename Reducer>
139  unsigned int tid = threadIdx.x;
140  unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
141  unsigned int gridSize = gridDim.x*blockDim.x;
142 
143  ReduceType sum;
144  zero(sum);
145  while (i < arg.length) {
146  FloatN x[M], y[M], z[M], w[M], v[M];
147  arg.X.load(x, i);
148  arg.Y.load(y, i);
149  arg.Z.load(z, i);
150  arg.W.load(w, i);
151  arg.V.load(v, i);
152 
153 #if (__COMPUTE_CAPABILITY__ >= 200)
154  arg.r.pre();
155 #endif
156 
157 #pragma unroll
158  for (int j=0; j<M; j++) arg.r(sum, x[j], y[j], z[j], w[j], v[j]);
159 
160 #if (__COMPUTE_CAPABILITY__ >= 200)
161  arg.r.post(sum);
162 #endif
163 
164  arg.X.save(x, i);
165  arg.Y.save(y, i);
166  arg.Z.save(z, i);
167  arg.W.save(w, i);
168  arg.V.save(v, i);
169 
170  i += gridSize;
171  }
172 
173  extern __shared__ ReduceSimpleType sdata[];
174  ReduceSimpleType *s = sdata + tid;
175  if (tid >= warpSize) copytoshared(s, 0, sum, block_size);
176  __syncthreads();
177 
178  // now reduce using the first warp only
179  if (tid<warpSize) {
180  // Warp raking
181 #pragma unroll
182  for (int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum, s, i, block_size); }
183 
184  warpReduce<block_size>(s, sum);
185  }
186 
187  // write result for this block to global mem
188  if (tid == 0) {
189  ReduceType tmp;
190  copyfromshared(tmp, s, 0, block_size);
191  arg.partial[blockIdx.x] = tmp;
192 
193  __threadfence(); // flush result
194 
195  // increment global block counter
196  unsigned int value = atomicInc(&count, gridDim.x);
197 
198  // Determine if this block is the last block to be done
199  isLastBlockDone = (value == (gridDim.x-1));
200  }
201 
202  __syncthreads();
203 
204  // Finish the reduction if last block
205  if (isLastBlockDone) {
206  unsigned int i = threadIdx.x;
207 
208  ReduceType sum;
209  zero(sum);
210  while (i < gridDim.x) {
211  sum += arg.partial[i];
212  i += block_size;
213  }
214 
215  extern __shared__ ReduceSimpleType sdata[];
216  ReduceSimpleType *s = sdata + tid;
217  if (tid >= warpSize) copytoshared(s, 0, sum, block_size);
218  __syncthreads();
219 
220  // now reduce using the first warp only
221  if (tid<warpSize) {
222  // Warp raking
223 #pragma unroll
224  for (int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum, s, i, block_size); }
225 
226  warpReduce<block_size>(s, sum);
227  }
228 
229  // write out the final reduced value
230  if (threadIdx.x == 0) {
231  ReduceType tmp;
232  copyfromshared(tmp, s, 0, block_size);
233  arg.complete[0] = tmp;
234  count = 0;
235  }
236  }
237 
238 }
242 template <typename doubleN, typename ReduceType, typename ReduceSimpleType, typename FloatN,
243  int M, typename SpinorX, typename SpinorY, typename SpinorZ,
244  typename SpinorW, typename SpinorV, typename Reducer>
246  const TuneParam &tp, const cudaStream_t &stream) {
247  if (tp.grid.x > REDUCE_MAX_BLOCKS)
248  errorQuda("Grid size %d greater than maximum %d\n", tp.grid.x, REDUCE_MAX_BLOCKS);
249 
250  LAUNCH_KERNEL(reduceKernel,tp,stream,arg,ReduceType,ReduceSimpleType,FloatN,M);
251 
252 #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
253  if(deviceProp.canMapHostMemory) {
254  cudaEventRecord(reduceEnd, stream);
255  while (cudaSuccess != cudaEventQuery(reduceEnd)) { ; }
256  } else
257 #endif
258  { cudaMemcpy(h_reduce, hd_reduce, sizeof(ReduceType), cudaMemcpyDeviceToHost); }
259 
260  doubleN cpu_sum;
261  zero(cpu_sum);
262  cpu_sum += ((ReduceType*)h_reduce)[0];
263 
264  const int Nreduce = sizeof(doubleN) / sizeof(double);
265  reduceDoubleArray((double*)&cpu_sum, Nreduce);
266 
267  return cpu_sum;
268 }
269 
270 
271 template <typename doubleN, typename ReduceType, typename ReduceSimpleType, typename FloatN,
272  int M, typename SpinorX, typename SpinorY, typename SpinorZ,
273  typename SpinorW, typename SpinorV, typename Reducer>
274 class ReduceCuda : public Tunable {
275 
276 private:
278  doubleN &result;
279 
280  // host pointers used for backing up fields when tuning
281  // these can't be curried into the Spinors because of Tesla argument length restriction
282  char *X_h, *Y_h, *Z_h, *W_h, *V_h;
283  char *Xnorm_h, *Ynorm_h, *Znorm_h, *Wnorm_h, *Vnorm_h;
284  const size_t *bytes_;
285  const size_t *norm_bytes_;
286 
287  unsigned int sharedBytesPerThread() const { return sizeof(ReduceType); }
288 
289  // when there is only one warp per block, we need to allocate two warps
290  // worth of shared memory so that we don't index shared memory out of bounds
291  unsigned int sharedBytesPerBlock(const TuneParam &param) const {
292  int warpSize = 32; // FIXME - use device property query
293  return 2*warpSize*sizeof(ReduceType);
294  }
295 
296  virtual bool advanceSharedBytes(TuneParam &param) const
297  {
298  TuneParam next(param);
299  advanceBlockDim(next); // to get next blockDim
300  int nthreads = next.block.x * next.block.y * next.block.z;
301  param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
302  sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
303  return false;
304  }
305 
306 public:
307  ReduceCuda(doubleN &result, SpinorX &X, SpinorY &Y, SpinorZ &Z,
308  SpinorW &W, SpinorV &V, Reducer &r, int length,
309  const size_t *bytes, const size_t *norm_bytes) :
310  arg(X, Y, Z, W, V, r, (ReduceType*)d_reduce, (ReduceType*)hd_reduce, length),
311  result(result), X_h(0), Y_h(0), Z_h(0), W_h(0), V_h(0),
312  Xnorm_h(0), Ynorm_h(0), Znorm_h(0), Wnorm_h(0), Vnorm_h(0),
313  bytes_(bytes), norm_bytes_(norm_bytes) { }
314  virtual ~ReduceCuda() { }
315 
316  inline TuneKey tuneKey() const {
317  return TuneKey(blasStrings.vol_str, typeid(arg.r).name(), blasStrings.aux_str);
318  }
319 
320  void apply(const cudaStream_t &stream) {
321  TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
322  result = reduceLaunch<doubleN,ReduceType,ReduceSimpleType,FloatN,M>(arg, tp, stream);
323  }
324 
325  void preTune() {
326  arg.X.save(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
327  arg.Y.save(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
328  arg.Z.save(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
329  arg.W.save(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
330  arg.V.save(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
331  }
332 
333  void postTune() {
334  arg.X.load(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
335  arg.Y.load(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
336  arg.Z.load(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
337  arg.W.load(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
338  arg.V.load(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
339  }
340 
341  long long flops() const { return arg.r.flops()*(sizeof(FloatN)/sizeof(((FloatN*)0)->x))*arg.length*M; }
342  long long bytes() const {
343  size_t bytes = arg.X.Precision()*(sizeof(FloatN)/sizeof(((FloatN*)0)->x))*M;
344  if (arg.X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
345  return arg.r.streams()*bytes*arg.length; }
346  int tuningIter() const { return 3; }
347 };
348 
349 
350 /*
351  Wilson
352  double double2 M = 1/12
353  single float4 M = 1/6
354  half short4 M = 6/6
355 
356  Staggered
357  double double2 M = 1/3
358  single float2 M = 1/3
359  half short2 M = 3/3
360  */
361 
367 template <typename doubleN, typename ReduceType, typename ReduceSimpleType,
368  template <typename ReducerType, typename Float, typename FloatN> class Reducer,
369  int writeX, int writeY, int writeZ, int writeW, int writeV, bool siteUnroll>
370 doubleN reduceCuda(const double2 &a, const double2 &b, cudaColorSpinorField &x,
371  cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w,
372  cudaColorSpinorField &v) {
373  if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
374  doubleN even =
375  reduceCuda<doubleN,ReduceType,ReduceSimpleType,Reducer,writeX,
376  writeY,writeZ,writeW,writeV,siteUnroll>
377  (a, b, x.Even(), y.Even(), z.Even(), w.Even(), v.Even());
378  doubleN odd =
379  reduceCuda<doubleN,ReduceType,ReduceSimpleType,Reducer,writeX,
380  writeY,writeZ,writeW,writeV,siteUnroll>
381  (a, b, x.Odd(), y.Odd(), z.Odd(), w.Odd(), v.Odd());
382  return even + odd;
383  }
384 
385  checkSpinor(x, y);
386  checkSpinor(x, z);
387  checkSpinor(x, w);
388  checkSpinor(x, v);
389 
390  if (!x.isNative()) {
391  warningQuda("Reductions on non-native fields is not supported\n");
392  doubleN value;
393  zero(value);
394  return value;
395  }
396 
397  blasStrings.vol_str = x.VolString();
398  blasStrings.aux_str = x.AuxString();
399 
400  int reduce_length = siteUnroll ? x.RealLength() : x.Length();
401  doubleN value;
402 
403  // FIXME: use traits to encapsulate register type for shorts -
404  // will reduce template type parameters from 3 to 2
405 
406  size_t bytes[] = {x.Bytes(), y.Bytes(), z.Bytes(), w.Bytes(), v.Bytes()};
407  size_t norm_bytes[] = {x.NormBytes(), y.NormBytes(), z.NormBytes(), w.NormBytes(), v.NormBytes()};
408 
409  if (x.Precision() == QUDA_DOUBLE_PRECISION) {
410  if (x.Nspin() == 4){ //wilson
411  const int M = siteUnroll ? 12 : 1; // determines how much work per thread to do
417  Reducer<ReduceType, double2, double2> r(a,b);
418  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
421  Spinor<double2,double2,double2,M,writeV>, Reducer<ReduceType, double2, double2> >
422  reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
423  reduce.apply(*getBlasStream());
424  } else if (x.Nspin() == 1){ //staggered
425  const int M = siteUnroll ? 3 : 1; // determines how much work per thread to do
431  Reducer<ReduceType, double2, double2> r(a,b);
432  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
435  Spinor<double2,double2,double2,M,writeV>, Reducer<ReduceType, double2, double2> >
436  reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
437  reduce.apply(*getBlasStream());
438  } else { errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); }
439  } else if (x.Precision() == QUDA_SINGLE_PRECISION) {
440  if (x.Nspin() == 4){ //wilson
441 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
442  const int M = siteUnroll ? 6 : 1; // determines how much work per thread to do
448  Reducer<ReduceType, float2, float4> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
449  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float4,M,
452  Spinor<float4,float4,float4,M,writeV,4>, Reducer<ReduceType, float2, float4> >
453  reduce(value, X, Y, Z, W, V, r, reduce_length/(4*M), bytes, norm_bytes);
454  reduce.apply(*getBlasStream());
455 #else
456  errorQuda("blas has not been built for Nspin=%d fields", x.Nspin());
457 #endif
458  } else if (x.Nspin() == 1) {
459 #ifdef GPU_STAGGERED_DIRAC
460  const int M = siteUnroll ? 3 : 1; // determines how much work per thread to do
466  Reducer<ReduceType, float2, float2> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
467  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float2,M,
470  Spinor<float2,float2,float2,M,writeV,4>, Reducer<ReduceType, float2, float2> >
471  reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
472  reduce.apply(*getBlasStream());
473 #else
474  errorQuda("blas has not been built for Nspin=%d fields", x.Nspin());
475 #endif
476  } else { errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); }
477  } else {
478  if (x.Nspin() == 4){ //wilson
479 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
485  Reducer<ReduceType, float2, float4> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
486  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float4,6,
489  Spinor<float4,float4,short4,6,writeV,4>, Reducer<ReduceType, float2, float4> >
490  reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
491  reduce.apply(*getBlasStream());
492 #else
493  errorQuda("blas has not been built for Nspin=%d fields", x.Nspin());
494 #endif
495  } else if (x.Nspin() == 1) {//staggered
496 #ifdef GPU_STAGGERED_DIRAC
502  Reducer<ReduceType, float2, float2> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
503  ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float2,3,
506  Spinor<float2,float2,short2,3,writeV,4>, Reducer<ReduceType, float2, float2> >
507  reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
508  reduce.apply(*getBlasStream());
509 #else
510  errorQuda("blas has not been built for Nspin=%d fields", x.Nspin());
511 #endif
512  } else { errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); }
513  blas_bytes += Reducer<ReduceType,double2,double2>::streams()*(unsigned long long)x.Volume()*sizeof(float);
514  }
515  blas_bytes += Reducer<ReduceType,double2,double2>::streams()*(unsigned long long)x.RealLength()*x.Precision();
516  blas_flops += Reducer<ReduceType,double2,double2>::flops()*(unsigned long long)x.RealLength();
517 
518  checkCudaError();
519 
520  return value;
521 }
522 
523 #include "multi_reduce_core.h"
__device__ void add< doublesingle3, doublesingle >(doublesingle3 &sum, doublesingle *s, const int i, const int block)
Definition: reduce_core.h:91
__device__ void copyfromshared(double &x, const double *s, const int i, const int block)
Definition: reduce_core.h:14
int V
Definition: test_util.cpp:29
Reducer r
Definition: reduce_core.h:123
int tuningIter() const
Definition: reduce_core.h:346
int y[4]
__global__ void reduceKernel(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > arg)
Definition: reduce_core.h:138
__device__ void warpReduce(ReduceSimpleType *s, ReduceType &sum)
Definition: reduce_core.h:51
cudaDeviceProp deviceProp
doublesingle x
Definition: double_single.h:37
__device__ void copytoshared(double *s, const int i, const double x, const int block)
Definition: reduce_core.h:4
QudaVerbosity getVerbosity()
Definition: util_quda.cpp:20
doubleN reduceCuda(const double2 &a, const double2 &b, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &v)
Definition: reduce_core.h:370
#define errorQuda(...)
Definition: util_quda.h:73
ReduceArg(SpinorX X, SpinorY Y, SpinorZ Z, SpinorW W, SpinorV V, Reducer r, ReduceType *partial, ReduceType *complete, int length)
Definition: reduce_core.h:127
unsigned long long blas_bytes
Definition: blas_quda.cu:38
cudaStream_t * streams
cudaStream_t * stream
long long flops() const
Definition: reduce_core.h:341
void preTune()
Definition: reduce_core.h:325
doublesingle y
Definition: double_single.h:50
TuneKey tuneKey() const
Definition: reduce_core.h:316
SpinorW W
Definition: reduce_core.h:121
int length[]
SpinorY Y
Definition: reduce_core.h:119
const int length
Definition: reduce_core.h:126
SpinorX X
Definition: reduce_core.h:118
QudaGaugeParam param
Definition: pack_test.cpp:17
doublesingle z
Definition: double_single.h:51
__host__ __device__ void zero(double &x)
Definition: reduce_core.h:1
cudaColorSpinorField * tmp
SpinorZ Z
Definition: reduce_core.h:120
void reduceDoubleArray(double *, const int len)
doublesingle y
Definition: double_single.h:38
TuneParam & tuneLaunch(Tunable &tunable, QudaTune enabled, QudaVerbosity verbosity)
Definition: tune.cpp:271
#define warningQuda(...)
Definition: util_quda.h:84
__device__ void add< double, double >(double &sum, double *s, const int i, const int block)
Definition: reduce_core.h:22
__device__ void add< doublesingle2, doublesingle >(doublesingle2 &sum, doublesingle *s, const int i, const int block)
Definition: reduce_core.h:89
__device__ unsigned int count
Definition: reduce_core.h:112
__device__ void add< double3, double >(double3 &sum, double *s, const int i, const int block)
Definition: reduce_core.h:26
SpinorV V
Definition: reduce_core.h:122
int x[4]
cudaStream_t * getBlasStream()
Definition: blas_quda.cu:64
void postTune()
Definition: reduce_core.h:333
unsigned long long blas_flops
Definition: blas_quda.cu:37
virtual ~ReduceCuda()
Definition: reduce_core.h:314
ReduceCuda(doubleN &result, SpinorX &X, SpinorY &Y, SpinorZ &Z, SpinorW &W, SpinorV &V, Reducer &r, int length, const size_t *bytes, const size_t *norm_bytes)
Definition: reduce_core.h:307
int Z[4]
Definition: test_util.cpp:28
long long bytes() const
Definition: reduce_core.h:342
ReduceType * complete
Definition: reduce_core.h:125
void apply(const cudaStream_t &stream)
Definition: reduce_core.h:320
__host__ __device__ ValueType arg(const complex< ValueType > &z)
Returns the phase angle of z.
Definition: complex_quda.h:843
#define checkSpinor(a, b)
Definition: blas_quda.cu:15
#define REDUCE_MAX_BLOCKS
Definition: reduce_quda.cu:16
#define checkCudaError()
Definition: util_quda.h:110
doubleN reduceLaunch(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > &arg, const TuneParam &tp, const cudaStream_t &stream)
Definition: reduce_core.h:245
QudaTune getTuning()
Definition: util_quda.cpp:32
VOLATILE spinorFloat * s
__shared__ bool isLastBlockDone
Definition: reduce_core.h:113
__device__ void add(ReduceType &sum, ReduceSimpleType *s, const int i, const int block)
Definition: reduce_core.h:21
__syncthreads()
doublesingle x
Definition: double_single.h:49
__device__ void add< doublesingle, doublesingle >(doublesingle &sum, doublesingle *s, const int i, const int block)
Definition: reduce_core.h:87
ReduceType * partial
Definition: reduce_core.h:124
__device__ void add< double2, double >(double2 &sum, double *s, const int i, const int block)
Definition: reduce_core.h:24