1 __host__ __device__
void zero(
double &
x) { x = 0.0; }
2 __host__ __device__
void zero(double2 &
x) { x.x = 0.0; x.y = 0.0; }
3 __host__ __device__
void zero(double3 &
x) { x.x = 0.0; x.y = 0.0; x.z = 0.0; }
4 __device__
void copytoshared(
double *
s,
const int i,
const double x,
const int block) { s[i] =
x; }
5 __device__
void copytoshared(
double *
s,
const int i,
const double2
x,
const int block)
6 { s[i] = x.x; s[i+block] = x.y; }
7 __device__
void copytoshared(
double *
s,
const int i,
const double3
x,
const int block)
8 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
9 __device__
void copytoshared(
volatile double *
s,
const int i,
const double x,
const int block) { s[i] =
x; }
10 __device__
void copytoshared(
volatile double *
s,
const int i,
const double2
x,
const int block)
11 { s[i] = x.x; s[i+block] = x.y; }
12 __device__
void copytoshared(
volatile double *
s,
const int i,
const double3
x,
const int block)
13 { s[i] = x.x; s[i+block] = x.y; s[i+2*block] = x.z; }
14 __device__
void copyfromshared(
double &
x,
const double *
s,
const int i,
const int block) { x = s[i]; }
15 __device__
void copyfromshared(double2 &
x,
const double *
s,
const int i,
const int block)
16 { x.x = s[i]; x.y = s[i+block]; }
17 __device__
void copyfromshared(double3 &
x,
const double *
s,
const int i,
const int block)
18 { x.x = s[i]; x.y = s[i+block]; x.z = s[i+2*block]; }
20 template<
typename ReduceType,
typename ReduceSimpleType>
21 __device__
void add(ReduceType &sum, ReduceSimpleType *
s,
const int i,
const int block) { }
25 { sum.x +=
s[i]; sum.y +=
s[i+block]; }
27 { sum.x +=
s[i]; sum.y +=
s[i+block]; sum.z +=
s[i+2*block]; }
29 template<
typename ReduceType,
typename ReduceSimpleType>
30 __device__
void add(ReduceSimpleType *
s,
const int i,
const int j,
const int block) { }
31 template<
typename ReduceType,
typename ReduceSimpleType>
32 __device__
void add(
volatile ReduceSimpleType *
s,
const int i,
const int j,
const int block) { }
36 template<> __device__
void add<double,double>(
volatile double *
s,
const int i,
const int j,
const int block)
40 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
41 template<> __device__
void add<double2,double>(
volatile double *
s,
const int i,
const int j,
const int block)
42 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
45 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
46 template<> __device__
void add<double3,double>(
volatile double *
s,
const int i,
const int j,
const int block)
47 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
50 template<
int block_size,
typename ReduceType,
typename ReduceSimpleType>
51 __device__
void warpReduce(ReduceSimpleType*
s, ReduceType& sum){
53 volatile ReduceSimpleType *sv =
s;
56 if(block_size >= 32) { add<ReduceType>(sv, 0, 16, block_size); }
57 if(block_size >= 16) { add<ReduceType>(sv, 0, 8, block_size); }
58 if(block_size >= 8) { add<ReduceType>(sv, 0, 4, block_size); }
59 if(block_size >= 4) { add<ReduceType>(sv, 0, 2, block_size); }
60 if(block_size >= 2) { add<ReduceType>(sv, 0, 1, block_size); }
66 #if (__COMPUTE_CAPABILITY__ < 130)
72 { s[i] = x.
x; s[i+block] = x.
y; }
74 { s[i] = x.
x; s[i+block] = x.
y; s[i+2*block] = x.
z; }
77 { s[i].
a.x = x.
x.
a.x; s[i].
a.y = x.
x.
a.y; s[i+block].
a.x = x.
y.
a.x; s[i+block].
a.y = x.
y.
a.y; }
79 { s[i].
a.x = x.
x.
a.x; s[i].
a.y = x.
x.
a.y; s[i+block].
a.x = x.
y.
a.x; s[i+block].
a.y = x.
y.
a.y;
80 s[i+2*block].
a.x = x.
z.
a.x; s[i+2*block].
a.y = x.
z.
a.y; }
83 { x.
x = s[i]; x.
y = s[i+block]; }
85 { x.
x = s[i]; x.
y = s[i+block]; x.
z = s[i+2*block]; }
90 { sum.x +=
s[i]; sum.y +=
s[i+block]; }
92 { sum.x +=
s[i]; sum.y +=
s[i+block]; sum.z +=
s[i+2*block]; }
100 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
102 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];}
105 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
107 {
s[i] +=
s[j];
s[i+block] +=
s[j+block];
s[i+2*block] +=
s[j+2*block];}
110 #include <launch_kernel.cuh>
115 template <
typename ReduceType,
typename SpinorX,
typename SpinorY,
116 typename SpinorZ,
typename SpinorW,
typename SpinorV,
typename Reducer>
129 : X(X), Y(Y), Z(Z), W(W), V(V), r(r), partial(partial), complete(complete), length(length) { ; }
135 template <
int block_size,
typename ReduceType,
typename ReduceSimpleType,
136 typename FloatN,
int M,
typename SpinorX,
typename SpinorY,
137 typename SpinorZ,
typename SpinorW,
typename SpinorV,
typename Reducer>
139 unsigned int tid = threadIdx.x;
140 unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
141 unsigned int gridSize = gridDim.x*blockDim.x;
146 FloatN
x[M],
y[M], z[M], w[M], v[M];
153 #if (__COMPUTE_CAPABILITY__ >= 200)
158 for (
int j=0; j<M; j++) arg.
r(sum, x[j], y[j], z[j], w[j], v[j]);
160 #if (__COMPUTE_CAPABILITY__ >= 200)
173 extern __shared__ ReduceSimpleType sdata[];
174 ReduceSimpleType *
s = sdata + tid;
175 if (tid >= warpSize)
copytoshared(s, 0, sum, block_size);
182 for (
int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum,
s, i, block_size); }
184 warpReduce<block_size>(
s, sum);
196 unsigned int value = atomicInc(&
count, gridDim.x);
206 unsigned int i = threadIdx.x;
210 while (i < gridDim.x) {
215 extern __shared__ ReduceSimpleType sdata[];
216 ReduceSimpleType *s = sdata + tid;
217 if (tid >= warpSize)
copytoshared(s, 0, sum, block_size);
224 for (
int i=warpSize; i<block_size; i+=warpSize) { add<ReduceType>(sum,
s, i, block_size); }
226 warpReduce<block_size>(
s, sum);
230 if (threadIdx.x == 0) {
242 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
typename FloatN,
243 int M,
typename SpinorX,
typename SpinorY,
typename SpinorZ,
244 typename SpinorW,
typename SpinorV,
typename Reducer>
246 const TuneParam &tp,
const cudaStream_t &
stream) {
250 LAUNCH_KERNEL(
reduceKernel,tp,stream,arg,ReduceType,ReduceSimpleType,FloatN,M);
252 #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
254 cudaEventRecord(reduceEnd, stream);
255 while (cudaSuccess != cudaEventQuery(reduceEnd)) { ; }
258 { cudaMemcpy(h_reduce, hd_reduce,
sizeof(ReduceType), cudaMemcpyDeviceToHost); }
262 cpu_sum += ((ReduceType*)h_reduce)[0];
264 const int Nreduce =
sizeof(doubleN) /
sizeof(
double);
271 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
typename FloatN,
272 int M,
typename SpinorX,
typename SpinorY,
typename SpinorZ,
273 typename SpinorW,
typename SpinorV,
typename Reducer>
282 char *X_h, *Y_h, *Z_h, *W_h, *V_h;
283 char *Xnorm_h, *Ynorm_h, *Znorm_h, *Wnorm_h, *Vnorm_h;
284 const size_t *bytes_;
285 const size_t *norm_bytes_;
287 unsigned int sharedBytesPerThread()
const {
return sizeof(ReduceType); }
291 unsigned int sharedBytesPerBlock(
const TuneParam &
param)
const {
293 return 2*warpSize*
sizeof(ReduceType);
296 virtual bool advanceSharedBytes(TuneParam ¶m)
const
298 TuneParam next(param);
299 advanceBlockDim(next);
300 int nthreads = next.block.x * next.block.y * next.block.z;
301 param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
302 sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
308 SpinorW &W, SpinorV &
V, Reducer &r,
int length,
309 const size_t *
bytes,
const size_t *norm_bytes) :
310 arg(X, Y, Z, W, V, r, (ReduceType*)d_reduce, (ReduceType*)hd_reduce, length),
311 result(result), X_h(0), Y_h(0), Z_h(0), W_h(0), V_h(0),
312 Xnorm_h(0), Ynorm_h(0), Znorm_h(0), Wnorm_h(0), Vnorm_h(0),
313 bytes_(bytes), norm_bytes_(norm_bytes) { }
317 return TuneKey(blasStrings.vol_str,
typeid(arg.r).name(), blasStrings.aux_str);
322 result = reduceLaunch<doubleN,ReduceType,ReduceSimpleType,FloatN,M>(arg, tp,
stream);
326 arg.X.save(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
327 arg.Y.save(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
328 arg.Z.save(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
329 arg.W.save(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
330 arg.V.save(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
334 arg.X.load(&X_h, &Xnorm_h, bytes_[0], norm_bytes_[0]);
335 arg.Y.load(&Y_h, &Ynorm_h, bytes_[1], norm_bytes_[1]);
336 arg.Z.load(&Z_h, &Znorm_h, bytes_[2], norm_bytes_[2]);
337 arg.W.load(&W_h, &Wnorm_h, bytes_[3], norm_bytes_[3]);
338 arg.V.load(&V_h, &Vnorm_h, bytes_[4], norm_bytes_[4]);
341 long long flops()
const {
return arg.r.flops()*(
sizeof(FloatN)/
sizeof(((FloatN*)0)->x))*arg.length*M; }
343 size_t bytes = arg.X.Precision()*(
sizeof(FloatN)/
sizeof(((FloatN*)0)->x))*M;
345 return arg.r.streams()*bytes*arg.length; }
367 template <
typename doubleN,
typename ReduceType,
typename ReduceSimpleType,
368 template <
typename ReducerType,
typename Float,
typename FloatN>
class Reducer,
369 int writeX,
int writeY,
int writeZ,
int writeW,
int writeV,
bool siteUnroll>
370 doubleN
reduceCuda(
const double2 &a,
const double2 &b, cudaColorSpinorField &
x,
371 cudaColorSpinorField &
y, cudaColorSpinorField &z, cudaColorSpinorField &w,
372 cudaColorSpinorField &v) {
375 reduceCuda<doubleN,ReduceType,ReduceSimpleType,Reducer,writeX,
376 writeY,writeZ,writeW,writeV,siteUnroll>
377 (a, b, x.Even(), y.Even(), z.Even(), w.Even(), v.Even());
379 reduceCuda<doubleN,ReduceType,ReduceSimpleType,Reducer,writeX,
380 writeY,writeZ,writeW,writeV,siteUnroll>
381 (a, b, x.Odd(), y.Odd(), z.Odd(), w.Odd(), v.Odd());
391 warningQuda(
"Reductions on non-native fields is not supported\n");
397 blasStrings.vol_str = x.VolString();
398 blasStrings.aux_str = x.AuxString();
400 int reduce_length = siteUnroll ? x.RealLength() : x.Length();
406 size_t bytes[] = {x.Bytes(), y.Bytes(), z.Bytes(), w.Bytes(), v.Bytes()};
407 size_t norm_bytes[] = {x.NormBytes(), y.NormBytes(), z.NormBytes(), w.NormBytes(), v.NormBytes()};
411 const int M = siteUnroll ? 12 : 1;
417 Reducer<ReduceType, double2, double2> r(a,b);
418 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
419 Spinor<double2,double2,double2,M,writeX>,
Spinor<double2,double2,double2,M,writeY>,
420 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double2,double2,M,writeW>,
422 reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
424 }
else if (x.Nspin() == 1){
425 const int M = siteUnroll ? 3 : 1;
431 Reducer<ReduceType, double2, double2> r(a,b);
432 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,double2,M,
433 Spinor<double2,double2,double2,M,writeX>,
Spinor<double2,double2,double2,M,writeY>,
434 Spinor<double2,double2,double2,M,writeZ>,
Spinor<double2,double2,double2,M,writeW>,
436 reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
438 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
441 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
442 const int M = siteUnroll ? 6 : 1;
448 Reducer<ReduceType, float2, float4> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
449 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float4,M,
450 Spinor<float4,float4,float4,M,writeX,0>,
Spinor<float4,float4,float4,M,writeY,1>,
451 Spinor<float4,float4,float4,M,writeZ,2>,
Spinor<float4,float4,float4,M,writeW,3>,
453 reduce(value, X, Y, Z, W, V, r, reduce_length/(4*M), bytes, norm_bytes);
456 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
458 }
else if (x.Nspin() == 1) {
459 #ifdef GPU_STAGGERED_DIRAC
460 const int M = siteUnroll ? 3 : 1;
466 Reducer<ReduceType, float2, float2> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
467 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float2,M,
468 Spinor<float2,float2,float2,M,writeX,0>,
Spinor<float2,float2,float2,M,writeY,1>,
469 Spinor<float2,float2,float2,M,writeZ,2>,
Spinor<float2,float2,float2,M,writeW,3>,
471 reduce(value, X, Y, Z, W, V, r, reduce_length/(2*M), bytes, norm_bytes);
474 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
476 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
479 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
485 Reducer<ReduceType, float2, float4> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
486 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float4,6,
487 Spinor<float4,float4,short4,6,writeX,0>,
Spinor<float4,float4,short4,6,writeY,1>,
488 Spinor<float4,float4,short4,6,writeZ,2>,
Spinor<float4,float4,short4,6,writeW,3>,
490 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
493 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
495 }
else if (x.Nspin() == 1) {
496 #ifdef GPU_STAGGERED_DIRAC
502 Reducer<ReduceType, float2, float2> r(make_float2(a.x, a.y), make_float2(b.x, b.y));
503 ReduceCuda<doubleN,ReduceType,ReduceSimpleType,float2,3,
504 Spinor<float2,float2,short2,3,writeX,0>,
Spinor<float2,float2,short2,3,writeY,1>,
505 Spinor<float2,float2,short2,3,writeZ,2>,
Spinor<float2,float2,short2,3,writeW,3>,
507 reduce(value, X, Y, Z, W, V, r, y.Volume(), bytes, norm_bytes);
510 errorQuda(
"blas has not been built for Nspin=%d fields", x.Nspin());
512 }
else {
errorQuda(
"ERROR: nSpin=%d is not supported\n", x.Nspin()); }
516 blas_flops += Reducer<ReduceType,double2,double2>::flops()*(
unsigned long long)x.RealLength();
__device__ void add< doublesingle3, doublesingle >(doublesingle3 &sum, doublesingle *s, const int i, const int block)
__device__ void copyfromshared(double &x, const double *s, const int i, const int block)
__global__ void reduceKernel(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > arg)
__device__ void warpReduce(ReduceSimpleType *s, ReduceType &sum)
cudaDeviceProp deviceProp
__device__ void copytoshared(double *s, const int i, const double x, const int block)
QudaVerbosity getVerbosity()
doubleN reduceCuda(const double2 &a, const double2 &b, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &v)
ReduceArg(SpinorX X, SpinorY Y, SpinorZ Z, SpinorW W, SpinorV V, Reducer r, ReduceType *partial, ReduceType *complete, int length)
unsigned long long blas_bytes
__host__ __device__ void zero(double &x)
cudaColorSpinorField * tmp
void reduceDoubleArray(double *, const int len)
TuneParam & tuneLaunch(Tunable &tunable, QudaTune enabled, QudaVerbosity verbosity)
__device__ void add< double, double >(double &sum, double *s, const int i, const int block)
__device__ void add< doublesingle2, doublesingle >(doublesingle2 &sum, doublesingle *s, const int i, const int block)
__device__ unsigned int count
__device__ void add< double3, double >(double3 &sum, double *s, const int i, const int block)
cudaStream_t * getBlasStream()
unsigned long long blas_flops
ReduceCuda(doubleN &result, SpinorX &X, SpinorY &Y, SpinorZ &Z, SpinorW &W, SpinorV &V, Reducer &r, int length, const size_t *bytes, const size_t *norm_bytes)
void apply(const cudaStream_t &stream)
__host__ __device__ ValueType arg(const complex< ValueType > &z)
Returns the phase angle of z.
#define checkSpinor(a, b)
#define REDUCE_MAX_BLOCKS
doubleN reduceLaunch(ReduceArg< ReduceType, SpinorX, SpinorY, SpinorZ, SpinorW, SpinorV, Reducer > &arg, const TuneParam &tp, const cudaStream_t &stream)
__shared__ bool isLastBlockDone
__device__ void add(ReduceType &sum, ReduceSimpleType *s, const int i, const int block)
__device__ void add< doublesingle, doublesingle >(doublesingle &sum, doublesingle *s, const int i, const int block)
__device__ void add< double2, double >(double2 &sum, double *s, const int i, const int block)