3 __host__ __device__
void zero(
double &
x) { x = 0.0; }
4 __host__ __device__
void zero(double2 &
x) { x.x = 0.0; x.y = 0.0; }
5 __host__ __device__
void zero(double3 &
x) { x.x = 0.0; x.y = 0.0; x.z = 0.0; }
6 __device__
void copytoshared(
double *
s,
const int i,
const double x,
const int block) { s[i] =
x; }
7 __device__
void copytoshared(
double *
s,
const int i,
const double2
x,
const int block)
8 { s[i] = x.x; s[i+block] = x.y; }
9 __device__
void copytoshared(
double *
s,
const int i,
const double3
x,
const int block)
10 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
11 __device__
void copytoshared(
volatile double *
s,
const int i,
const double x,
const int block) { s[i] =
x; }
12 __device__
void copytoshared(
volatile double *
s,
const int i,
const double2
x,
const int block)
13 { s[i] = x.x; s[i+block] = x.y; }
14 __device__
void copytoshared(
volatile double *
s,
const int i,
const double3
x,
const int block)
15 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
16 __device__
void copyfromshared(
double &
x,
const double *
s,
const int i,
const int block) { x = s[i]; }
17 __device__
void copyfromshared(double2 &
x,
const double *
s,
const int i,
const int block)
18 { x.x = s[i]; x.y = s[i+block]; }
19 __device__
void copyfromshared(double3 &
x,
const double *
s,
const int i,
const int block)
20 { x.x = s[i]; x.y = s[i+block]; x.z = s[i+2*block]; }
22 template<
typename ReduceType,
typename ReduceSimpleType>
23 __device__
void add(ReduceType &sum, ReduceSimpleType *
s,
const int i,
const int block) { }
27 { sum.x +=
s[i]; sum.y +=
s[i+block]; }
29 { sum.x +=
s[i]; sum.y +=
s[i+block]; sum.z +=
s[i+2*block]; }
31 template<
typename ReduceType,
typename ReduceSimpleType>
32 __device__
void add(ReduceSimpleType *
s,
const int i,
const int j,
const int block) { }
33 template<
typename ReduceType,
typename ReduceSimpleType>
34 __device__
void add(
volatile ReduceSimpleType *
s,
const int i,
const int j,
const int block) { }
38 template<> __device__
void add<double,double>(
volatile double *
s,
const int i,
const int j,
const int block)
42 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
43 template<> __device__
void add<double2,double>(
volatile double *
s,
const int i,
const int j,
const int block)
44 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
47 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
48 template<> __device__
void add<double3,double>(
volatile double *
s,
const int i,
const int j,
const int block)
49 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
51 #if (__COMPUTE_CAPABILITY__ < 130)
57 { s[i] = x.
x; s[i+block] = x.
y; }
59 { s[i] = x.
x; s[i+block] = x.
y; s[i+2*block] = x.
z; }
62 { s[i].
a.x = x.
x.
a.x; s[i].
a.y = x.
x.
a.y; s[i+block].
a.x = x.
y.
a.x; s[i+block].
a.y = x.
y.
a.y; }
64 { s[i].
a.x = x.
x.
a.x; s[i].
a.y = x.
x.
a.y; s[i+block].
a.x = x.
y.
a.x; s[i+block].
a.y = x.
y.
a.y;
65 s[i+2*block].
a.x = x.
z.
a.x; s[i+2*block].
a.y = x.
z.
a.y; }
68 { x.
x = s[i]; x.
y = s[i+block]; }
70 { x.
x = s[i]; x.
y = s[i+block]; x.
z = s[i+2*block]; }
75 { sum.x +=
s[i]; sum.y +=
s[i+block]; }
77 { sum.x +=
s[i]; sum.y +=
s[i+block]; sum.z +=
s[i+2*block]; }
85 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
87 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
90 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
92 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
95 #include <launch_kernel.cuh>
97 __device__
unsigned int count = 0;
100 template <
typename ReduceType,
typename SpinorX,
typename SpinorY,
101 typename SpinorZ,
typename SpinorW,
typename SpinorV,
typename Reducer>
114 : X(X), Y(Y), Z(Z), W(W), V(V), r(r), partial(partial), complete(complete), length(length) { ; }
120 template <
int block_size,
typename ReduceType,
typename ReduceSimpleType,
121 typename FloatN,
int M,
typename SpinorX,
typename SpinorY,
122 typename SpinorZ,
typename SpinorW,
typename SpinorV,
typename Reducer>
124 unsigned int tid = threadIdx.x;
125 unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
126 unsigned int gridSize = gridDim.x*blockDim.x;
131 FloatN
x[M],
y[M], z[M], w[M], v[M];
138 #if (__COMPUTE_CAPABILITY__ >= 200)
143 for (
int j=0; j<M; j++) arg.
r(sum, x[j], y[j], z[j], w[j], v[j]);
145 #if (__COMPUTE_CAPABILITY__ >= 200)
158 extern __shared__ ReduceSimpleType sdata[];
159 ReduceSimpleType *
s = sdata + tid;
160 if (tid >= warpSize)
copytoshared(s, 0, sum, block_size);
167 for (
int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum,
s, i, block_size); }
170 volatile ReduceSimpleType *sv =
s;
173 if (block_size >= 32) { add<ReduceType>(sv, 0, 16, block_size); }
174 if (block_size >= 16) { add<ReduceType>(sv, 0, 8, block_size); }
175 if (block_size >= 8) { add<ReduceType>(sv, 0, 4, block_size); }
176 if (block_size >= 4) { add<ReduceType>(sv, 0, 2, block_size); }
177 if (block_size >= 2) { add<ReduceType>(sv, 0, 1, block_size); }
193 unsigned int value = atomicInc(&
count, gridDim.x);
203 unsigned int i = threadIdx.x;
207 while (i < gridDim.x) {
212 extern __shared__ ReduceSimpleType sdata[];
213 ReduceSimpleType *s = sdata + tid;
214 if (tid >= warpSize)
copytoshared(s, 0, sum, block_size);
221 for (
int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum,
s, i, block_size); }
224 volatile ReduceSimpleType *sv =
s;
227 if (block_size >= 32) { add<ReduceType>(sv, 0, 16, block_size); }
228 if (block_size >= 16) { add<ReduceType>(sv, 0, 8, block_size); }
229 if (block_size >= 8) { add<ReduceType>(sv, 0, 4, block_size); }
230 if (block_size >= 4) { add<ReduceType>(sv, 0, 2, block_size); }
231 if (block_size >= 2) { add<ReduceType>(sv, 0, 1, block_size); }
238 if (threadIdx.x == 0) {
250 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
typename FloatN,
251 int M,
typename SpinorX,
typename SpinorY,
typename SpinorZ,
252 typename SpinorW,
typename SpinorV,
typename Reducer>
254 const TuneParam &tp,
const cudaStream_t &
stream) {
258 LAUNCH_KERNEL(
reduceKernel,tp,stream,arg,ReduceType,ReduceSimpleType,FloatN,M);
260 #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
262 cudaEventRecord(reduceEnd, stream);
263 while (cudaSuccess != cudaEventQuery(reduceEnd)) { ; }
266 { cudaMemcpy(h_reduce, hd_reduce,
sizeof(ReduceType), cudaMemcpyDeviceToHost); }
270 cpu_sum += ((ReduceType*)h_reduce)[0];
272 const int Nreduce =
sizeof(doubleN) /
sizeof(
double);
279 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
typename FloatN,
280 int M,
typename SpinorX,
typename SpinorY,
typename SpinorZ,
281 typename SpinorW,
typename SpinorV,
typename Reducer>
290 char *X_h, *Y_h, *Z_h, *W_h, *V_h;
291 char *Xnorm_h, *Ynorm_h, *Znorm_h, *Wnorm_h, *Vnorm_h;
292 const size_t *bytes_;
293 const size_t *norm_bytes_;
295 unsigned int sharedBytesPerThread()
const {
return sizeof(ReduceType); }
299 unsigned int sharedBytesPerBlock(
const TuneParam &
param)
const {
301 return 2*warpSize*
sizeof(ReduceType);
304 virtual bool advanceSharedBytes(TuneParam ¶m)
const
306 TuneParam next(param);
307 advanceBlockDim(next);
308 int nthreads = next.block.x * next.block.y * next.block.z;
309 param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
310 sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
316 SpinorW &W, SpinorV &
V, Reducer &r,
int length,
317 const size_t *
bytes,
const size_t *norm_bytes) :
318 arg(X, Y, Z, W, V, r, (ReduceType*)d_reduce, (ReduceType*)hd_reduce, length),
319 result(result), X_h(0), Y_h(0), Z_h(0), W_h(0), V_h(0),
320 Xnorm_h(0), Ynorm_h(0), Znorm_h(0), Wnorm_h(0), Vnorm_h(0),
321 bytes_(bytes), norm_bytes_(norm_bytes) { }
325 return TuneKey(blasStrings.vol_str,
typeid(arg.r).name(), blasStrings.aux_tmp);
330 result = reduceLaunch<doubleN,ReduceType,ReduceSimpleType,FloatN,M>(arg, tp,
stream);
334 arg.X.save(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
335 arg.Y.save(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
336 arg.Z.save(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
337 arg.W.save(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
338 arg.V.save(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
342 arg.X.load(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
343 arg.Y.load(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
344 arg.Z.load(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
345 arg.W.load(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
346 arg.V.load(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
349 long long flops()
const {
return arg.r.flops()*(
sizeof(FloatN)/
sizeof(((FloatN*)0)->x))*arg.length*M; }
351 size_t bytes = arg.X.Precision()*(
sizeof(FloatN)/
sizeof(((FloatN*)0)->x))*M;
353 return arg.r.streams()*bytes*arg.length; }
375 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
376 template <
typename ReducerType,
typename Float,
typename FloatN>
class Reducer,
377 int writeX,
int writeY,
int writeZ,
int writeW,
int writeV,
bool siteUnroll>
378 doubleN
reduceCuda(
const double2 &a,
const double2 &b, cudaColorSpinorField &
x,
379 cudaColorSpinorField &
y, cudaColorSpinorField &z, cudaColorSpinorField &w,
380 cudaColorSpinorField &v) {
384 writeY,writeZ,writeW,writeV,siteUnroll>
385 (a, b, x.Even(), y.Even(), z.Even(), w.Even(), v.Even());
388 writeY,writeZ,writeW,writeV,siteUnroll>
389 (a, b, x.Odd(), y.Odd(), z.Odd(), w.Odd(), v.Odd());
399 warningQuda(
"Reductions on non-native fields is not supported\n");
405 blasStrings.vol_str = x.VolString();
406 strcpy(blasStrings.aux_tmp, x.AuxString());
407 strcat(blasStrings.aux_tmp,
",");
408 strcat(blasStrings.aux_tmp, z.AuxString());
415 size_t bytes[] = {x.Bytes(), y.Bytes(), z.Bytes(), w.Bytes(), v.Bytes()};
416 size_t norm_bytes[] = {x.NormBytes(), y.NormBytes(), z.NormBytes(), w.NormBytes(), v.NormBytes()};
420 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
427 Reducer<ReduceType, double2, double2> r(a,b);
428 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
429 Spinor<double2,double4,float4,M,writeX>,
Spinor<double2,double4,float4,M,writeY>,
430 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double4,float4,M,writeW>,
432 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
435 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
437 }
else if (x.Nspin() == 1) {
438 #ifdef GPU_STAGGERED_DIRAC
439 const int M = siteUnroll ? 3 : 1;
440 const int reduce_length = siteUnroll ? x.RealLength() : x.Length();
446 Reducer<ReduceType, double2, double2> r(a,b);
447 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
448 Spinor<double2,double2,float2,M,writeX>,
Spinor<double2,double2,float2,M,writeY>,
449 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double2,float2,M,writeW>,
451 reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
454 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
456 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
459 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
466 Reducer<ReduceType, double2, double2> r(a,b);
467 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
468 Spinor<double2,double4,short4,M,writeX>,
Spinor<double2,double4,short4,M,writeY>,
469 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double4,short4,M,writeW>,
471 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
474 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
476 }
else if (x.Nspin() == 1){
477 #ifdef GPU_STAGGERED_DIRAC
478 const int M = siteUnroll ? 3 : 1;
479 const int reduce_length = siteUnroll ? x.RealLength() : x.Length();
485 Reducer<ReduceType, double2, double2> r(a,b);
486 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
487 Spinor<double2,double2,float2,M,writeX>,
Spinor<double2,double2,float2,M,writeY>,
488 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double2,float2,M,writeW>,
490 reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
493 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
495 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
498 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
504 Reducer<ReduceType, float2, float4> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
505 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float4,6,
506 Spinor<float4,float4,short4,6,writeX,0>,
Spinor<float4,float4,short4,6,writeY,1>,
507 Spinor<float4,float4,float4,6,writeZ,2>,
Spinor<float4,float4,short4,6,writeW,3>,
509 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
512 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
514 }
else if (x.Nspin() == 1) {
515 #ifdef GPU_STAGGERED_DIRAC
521 Reducer<ReduceType, float2, float2> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
522 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float2,3,
523 Spinor<float2,float2,short2,3,writeX,0>,
Spinor<float2,float2,short2,3,writeY,1>,
524 Spinor<float2,float2,float2,3,writeZ,2>,
Spinor<float2,float2,short2,3,writeW,3>,
526 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
529 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
531 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
535 blas_flops += Reducer<ReduceType,double2,double2>::flops()*(
unsigned long long)x.RealLength();
cudaDeviceProp deviceProp
QudaVerbosity getVerbosity()
unsigned long long blas_bytes
ReduceArg(SpinorX X, SpinorY Y, SpinorZ Z, SpinorW W, SpinorV V, Reducer r, ReduceType *partial, ReduceType *complete, int length)
__device__ void add< double, double >(double &sum, double *s, const int i, const int block)
doubleN reduceLaunch(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > &arg, const TuneParam &tp, const cudaStream_t &stream)
__shared__ bool isLastBlockDone
__device__ void add< doublesingle3, doublesingle >(doublesingle3 &sum, doublesingle *s, const int i, const int block)
cudaColorSpinorField * tmp
void reduceDoubleArray(double *, const int len)
ReduceCuda(doubleN &result, SpinorX &X, SpinorY &Y, SpinorZ &Z, SpinorW &W, SpinorV &V, Reducer &r, int length, const size_t *bytes, const size_t *norm_bytes)
__device__ void add< double3, double >(double3 &sum, double *s, const int i, const int block)
TuneParam & tuneLaunch(Tunable &tunable, QudaTune enabled, QudaVerbosity verbosity)
#define checkLength(a, b)
__device__ void copyfromshared(double &x, const double *s, const int i, const int block)
__host__ __device__ void zero(double &x)
__device__ void copytoshared(double *s, const int i, const double x, const int block)
cudaStream_t * getBlasStream()
unsigned long long blas_flops
__device__ void add< double2, double >(double2 &sum, double *s, const int i, const int block)
__device__ unsigned int count
__device__ void add< doublesingle2, doublesingle >(doublesingle2 &sum, doublesingle *s, const int i, const int block)
__host__ __device__ ValueType arg(const complex< ValueType > &z)
Returns the phase angle of z.
void apply(const cudaStream_t &stream)
#define checkSpinor(a, b)
__device__ void add< doublesingle, doublesingle >(doublesingle &sum, doublesingle *s, const int i, const int block)
#define REDUCE_MAX_BLOCKS
doubleN reduceCuda(const double2 &a, const double2 &b, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &v)
__device__ void add(ReduceType &sum, ReduceSimpleType *s, const int i, const int block)
__global__ void reduceKernel(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > arg)