QUDA  v0.7.0
A library for QCD on GPUs
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
shift_quark_field.cu
Go to the documentation of this file.
1 #include <cstdio>
2 #include <cstdlib>
3 #include <cuda.h>
4 #include <quda_internal.h>
5 
6 namespace quda {
7 
8  template<typename Output, typename Input>
10  const unsigned int length;
11  unsigned int X[4];
12 #ifdef MULTI_GPU
13  const usigned int ghostOffset; // depends on the direction
14 #endif
15  const unsigned int parity;
16  const unsigned int dir;
17  bool partitioned[4];
18  const int shift;
19  Input in;
20  Output out;
21  ShiftColorSpinorFieldArg(const unsigned int length,
22  const unsigned int X[4],
23  const unsigned int ghostOffset,
24  const unsigned int parity,
25  const unsigned int dir,
26  const int shift,
27  const Input& in,
28  const Output& out) : length(length),
29 #ifdef MULTI_GPU
30  ghostOffset(ghostOffset),
31 #endif
32  parity(parity), dir(dir), shift(shift), in(in), out(out)
33  {
34  for(int i=0; i<4; ++i) this->X[i] = X[i];
35  for(int i=0; i<4; ++i) partitioned[i] = commDimPartitioned(i) ? true : false;
36  }
37  };
38 
39  template<IndexType idxType, typename Int>
40  __device__ __forceinline__
41  int neighborIndex(const unsigned int& cb_idx, const int (&shift)[4], const bool (&partitioned)[4], const unsigned int& parity){
42 
43  int idx;
44  Int x, y, z, t;
45 
46  coordsFromIndex(full_idx, x, y, z, t, cb_idx, parity);
47 
48 #ifdef MULTI_GPU
49  if(partitioned[0])
50  if( (x+shift[0])<0 || (x+shift[0])>=X1) return -1;
51  if(partitioned[1])
52  if( (y+shift[1])<0 || (y+shift[1])>=X2) return -1;
53  if(partitioned[2])
54  if( (z+shift[2])<0 || (z+shift[2])>=X3) return -1;
55  if(partitioned[3])
56  if( (z+shift[3])<0 || (z+shift[3])>=X4) return -1;
57 #endif
58 
59  x = shift[0] ? (x + shift[0] + X1) % X1 : x;
60  y = shift[1] ? (y + shift[1] + X2) % X2 : y;
61  z = shift[2] ? (z + shift[2] + X3) % X3 : z;
62  t = shift[3] ? (t + shift[3] + X4) % X4 : t;
63  return (((t*X3 + z)*X2 + y)*X1 + x) >> 1;
64  }
65 
66 
67  template <typename FloatN, int N, typename Output, typename Input>
68  __global__ void shiftColorSpinorFieldKernel(ShiftQuarkArg<Output,Input> arg){
69 
70  int shift[4] = {0,0,0,0};
71  shift[arg.dir] = arg.shift;
72 
73  unsigned int idx = blockIdx.x*(blockDim.x) + threadIdx.x;
74  unsigned int gridSize = gridDim.x*blockDim.x;
75 
76  FloatN x[N];
77  while(idx<arg.length){
78  const int new_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity);
79 #ifdef MULTI_GPU
80  if(new_idx > 0){
81 #endif
82  arg.in.load(x, new_idx);
83  arg.out.save(x, idx);
84 #ifdef MULTI_GPU
85  }
86 #endif
87  idx += gridSize;
88  }
89  return;
90  }
91 
92  template<typename FloatN, int N, typename Output, typename Input>
93  __global__ void shiftColorSpinorFieldExternalKernel(ShiftQuarkArg<Output,Input> arg){
94 
95  unsigned int idx = blockIdx.x*(blockDim.x) + threadIdx.x;
96  unsigned int gridSize = gridDim.x*blockDim.x;
97 
98  Float x[N];
99  unsigned int coord[4];
100  while(idx<arg.length){
101 
102  // compute the coordinates in the ghost zone
103  coordsFromIndex<1>(coord, idx, arg.X, arg.dir, arg.parity);
104 
105  unsigned int ghost_idx = arg.ghostOffset + ghostIndexFromCoords<3,3>(arg.X, coord, arg.dir, arg.shift);
106 
107  arg.in.load(x, ghost_idx);
108  arg.out.save(x, idx);
109 
110  idx += gridSize;
111  }
112 
113 
114  return;
115  }
116 
117  template<typename Output, typename Input>
119 
120  private:
122  const int *X; // pointer to lattice dimensions
123 
124  int sharedBytesPerThread() const { return 0; }
125  int sharedBytesPerBlock(const TuneParam &) cont { return 0; }
126 
127  // don't tune the grid dimension
128  bool advanceGridDim(TuneParam & param) const { return false; }
129 
130  bool advanceBlockDim(TuneParam &param) const
131  {
132  const unsigned int max_threads = deviceProp.maxThreadsDim[0];
133  const unsigned int max_blocks = deviceProp.maxGridSize[0];
134  const unsigned int max_shared = 16384;
135  const int step = deviceProp.warpSize;
136  const int threads = arg.length;
137  bool ret;
138 
139  param.block.x += step;
140  if(param.block.x > max_threads || sharedBytesPerThread()*param.block.x > max_shared){
141  param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough given the limit on gridDim
142  param.block.x = ((param.block.x+step-1)/step)*step;
143  if(param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
144  ret = false;
145  }else{
146  ret = true;
147  }
148  param.grid = dim3((threads+param.block.x-1)/param.block.x,1,1);
149  return ret;
150  }
151 
152 
153  public:
156  : arg(arg), location(location) {}
158 
159  void apply(const cudaStream_t &stream){
161  TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
162  shiftColorSpinorFieldKernel<Output,Input><<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
163 #ifdef MULTI_GPU
164  // Need to perform some communication and call exterior kernel, I guess
165 #endif
166  }else{ // run the CPU code
167  errorQuda("ShiftColorSpinorField is not yet implemented on the CPU\n");
168  }
169  } // apply
170 
171  void preTune(){}
172  void postTune(){}
173 
174  virtual void initTuneParam(TuneParam &param) const
175  {
176  const unsigned int max_threads = deviceProp.maxThreadsDim[0];
177  const unsigned int max_blocks = deviceProp.maxGridSize[0];
178  const int threads = arg.length;
179  const int step = deviceProp.warpSize;
180  param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
181  param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
182  if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
183  param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
184  param.shared_bytes = sharedBytesPerThread()*param.block.x > sharedBytesPerBlock(param) ?
185  sharedBytesPerThread()*param.block.x : sharedBytesPerBlock(param);
186  }
187 
189  void defaultTuneParam(TuneParam &param) const {
190  initTuneParam(param);
191  }
192 
193  long long flops() const { return 0; } // fixme
194  long long bytes() const { return 0; } // fixme
195 
196  TuneKey tuneKey() const {
197  std::stringstream vol, aux;
198  vol << X[0] << "x";
199  vol << X[1] << "x";
200  vol << X[2] << "x";
201  vol << X[3] << "x";
202  aux << "threads=" << 2*arg.in.volumeCB << ",prec=" << sizeof(Complex)/2;
203  aux << "stride=" << arg.in.stride;
204  return TuneKey(vol.str(), typeid(*this).name(), aux.str());
205  }
206  };
207 
208 
209  // Should really have a parity
210  void shiftColorSpinorField(cudaColorSpinorField &dst, const cudaColorSpinorField &src, const unsigned int parity, const unsigned int dim, const int shift) {
211 
212  if(&src == &dst){
213  errorQuda("destination field is the same as source field\n");
214  return;
215  }
216 
217  if(src.Nspin() != 1 && src.Nspin() !=4) errorQuda("nSpin(%d) not supported\n", src.Nspin());
218 
219  if(src.SiteSubset() != dst.SiteSubset())
220  errorQuda("Spinor fields do not have matching subsets\n");
221 
222  if(src.SiteSubset() == QUDA_FULL_SITE_SUBSET){
223  if(shift&1){
224  shiftColorSpinorField(dst.Even(), src.Odd(), 0, dim, shift);
225  shiftColorSpinorField(dst.Odd(), src.Even(), 1, dim, shift);
226  }else{
227  shiftColorSpinorField(dst.Even(), src.Even(), 0, dim, shift);
228  shiftColorSpinorField(dst.Odd(), src.Odd(), 1, dim, shift);
229  }
230  return;
231  }
232 
233 #ifdef MULTI_GPU
234  const int dir = (shift>0) ? QUDA_BACKWARDS : QUDA_FORWARDS; // pack the start of the field if shift is positive
235  const int offset = (shift>0) ? 0 : 1;
236 #endif
237 
238 
240  if(src.Nspin() == 1){
243  ShiftColorSpinorFieldArg arg(src.Volume(), parity, dim, shift, dst_spinor, src_tex);
245 
246 #ifdef MULTI_GPU
247  if(commDimPartitioned(dim) && dim!=3){
248  face->pack(src, 1-parity, dagger, dim, dir, streams); // pack in stream[1]
249  cudaEventRecord(packEnd, streams[1]);
250  cudaStreamWaitEvent(streams[1], packEnd, 0); // wait for pack to end in stream[1]
251  face->gather(src, dagger, 2*dim+offset, 1); // copy packed data from device buffer to host and do this in stream[1]
252  cudaEventRecord(gatherEnd, streams[1]); // record the completion of face->gather
253  }
254 #endif
255 
256  shiftColorSpinor.apply(0); // shift the field in the interior region
257 
258 #ifdef MULTI_GPU
259  if(commDimPartitioned(dim) && dim!=3){
260  while(1){
261  cudaError_t eventQuery = cudaEventQuery(gatherEnd);
262  if(eventQuery == cudaSuccess){
263  face->commsStart(2*dim + offset); // if argument is even, send backwards, else send forwards
264  break;
265  }
266  }
267 
268  // after communication, load data back on to device
269  // do this in stream[1]
270  while(1){
271  if(face->commsQuery(2*dim + offset)){
272  face->scatter(src, dagger, 2*dim+offset, 1);
273  break;
274  }
275  } // while(1)
276  cudaEventRecord(scatterEnd, streams[1]);
277  cudaStreamWaitEvent(streams[1], scatterEnd, 0);
278  shiftColorSpinor.apply(1);
279  }
280 #endif
281 
282  }else{
283  errorQuda("Only staggered fermions are currently supported\n");
284  }
285  }else if(dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION){
286  if(src.Nspin() == 1 ){
288  Spinor<float2, float2, float2, 3, 1> dst_spinor(dst);
289  ShiftColorSpinorFieldArg arg(src.Volume(), parity, dim, shift, dst_spinor, src_tex);
291  }else{
292  errorQuda("Only staggered fermions are currently supported\n");
293  }
294  }
295  return;
296  }
297 
298 
299 } // namespace quda
300 
__device__ __forceinline__ int neighborIndex(const unsigned int &cb_idx, const int(&shift)[4], const bool(&partitioned)[4], const unsigned int &parity)
ShiftColorSpinorField(const ShiftColorSpinorField< Output, Input > &arg, QudaFieldLocation location)
__constant__ int X2
void apply(const cudaStream_t &stream)
int commDimPartitioned(int dir)
int y[4]
cudaDeviceProp deviceProp
QudaVerbosity getVerbosity()
Definition: util_quda.cpp:20
#define errorQuda(...)
Definition: util_quda.h:73
__constant__ int X1
std::complex< double > Complex
Definition: eig_variables.h:13
void shiftColorSpinorField(cudaColorSpinorField &dst, const cudaColorSpinorField &src, const unsigned int parity, const unsigned int dim, const int shift)
cudaStream_t * streams
cudaStream_t * stream
__global__ void const FloatN FloatM FloatM Float Float int threads
Definition: llfat_core.h:1099
virtual void initTuneParam(TuneParam &param) const
__global__ void shiftColorSpinorFieldKernel(ShiftQuarkArg< Output, Input > arg)
QudaDagType dagger
Definition: test_util.cpp:1558
QudaGaugeParam param
Definition: pack_test.cpp:17
cudaColorSpinorField & Odd() const
__global__ void shiftColorSpinorFieldExternalKernel(ShiftQuarkArg< Output, Input > arg)
const QudaFieldLocation location
Definition: pack_test.cpp:46
cudaEvent_t packEnd[Nstream]
Definition: dslash_quda.cu:99
FloatingPoint< float > Float
Definition: gtest.h:7350
ShiftColorSpinorFieldArg(const unsigned int length, const unsigned int X[4], const unsigned int ghostOffset, const unsigned int parity, const unsigned int dir, const int shift, const Input &in, const Output &out)
void defaultTuneParam(TuneParam &param) const
TuneParam & tuneLaunch(Tunable &tunable, QudaTune enabled, QudaVerbosity verbosity)
Definition: tune.cpp:271
cudaEvent_t gatherEnd[Nstream]
Definition: dslash_quda.cu:101
int x[4]
enum QudaFieldLocation_s QudaFieldLocation
__constant__ int X3
QudaPrecision Precision() const
int full_idx
__host__ __device__ ValueType arg(const complex< ValueType > &z)
Returns the phase angle of z.
Definition: complex_quda.h:843
QudaTune getTuning()
Definition: util_quda.cpp:32
cudaEvent_t scatterEnd[Nstream]
Definition: dslash_quda.cu:103
QudaSiteSubset SiteSubset() const
const QudaParity parity
Definition: dslash_test.cpp:29
char aux[TuneKey::aux_n]
Definition: tune_quda.h:136
cudaColorSpinorField & Even() const
__constant__ int X4