6 return [complex(x)
for x
in a]
10 if a ==
int(a):
return `
int(a)`
14 if a == 0:
return "0i" 15 elif a == -1:
return "-i" 16 elif a == 1:
return "i" 17 else:
return fltToString(a)+
"i" 21 if re == 0
and im == 0:
return "0" 22 elif re == 0:
return imToString(im)
23 elif im == 0:
return fltToString(re)
25 im_str =
"-"+imToString(-im)
if im < 0
else "+"+imToString(im)
26 return fltToString(re)+im_str
75 return [x+y
for (x,y)
in zip(g1,g2)]
78 return [x-y
for (x,y)
in zip(g1,g2)]
98 def indentline(line):
return (
" "+line
if (line.count(
"#", 0, 1) == 0)
else line)
99 return ''.join([indentline(line)+
"\n" for line
in code.splitlines()])
102 return "{\n"+
indent(code)+
"}" 106 elif x==-1:
return "-" 107 elif x==+2:
return "+2*" 108 elif x==-2:
return "-2*" 111 return `(n/4)` +
"." + [
"x",
"y",
"z",
"w"][n%4]
114 return `(n/2)` +
"." + [
"x",
"y"][n%2]
117 def in_re(s, c):
return "i"+`s`+`c`+
"_re" 118 def in_im(s, c):
return "i"+`s`+`c`+
"_im" 119 def g_re(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_re" 120 def g_im(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_im" 121 def out_re(s, c):
return "o"+`s`+`c`+
"_re" 122 def out_im(s, c):
return "o"+`s`+`c`+
"_im" 123 def h1_re(h, c):
return [
"a",
"b"][h]+`c`+
"_re" 124 def h1_im(h, c):
return [
"a",
"b"][h]+`c`+
"_im" 125 def h2_re(h, c):
return [
"A",
"B"][h]+`c`+
"_re" 126 def h2_im(h, c):
return [
"A",
"B"][h]+`c`+
"_im" 127 def a_re(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_re" 128 def a_im(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_im" 130 def acc_re(s, c):
return "acc"+`s`+`c`+
"_re" 131 def acc_im(s, c):
return "acc"+`s`+`c`+
"_im" 133 def tmp_re(s, c):
return "tmp"+`s`+`c`+
"_re" 134 def tmp_im(s, c):
return "tmp"+`s`+`c`+
"_im" 137 if z==0:
return name+`s`+`c`+
"_re" 138 else:
return name+`s`+`c`+
"_im" 142 str +=
"// input spinor\n" 143 str +=
"#ifdef SPINOR_DOUBLE\n" 144 str +=
"#define spinorFloat double\n" 146 str +=
"#define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_DOUBLE2\n" 147 str +=
"#define READ_SPINOR_SHARED READ_SPINOR_SHARED_DOUBLE2\n" 154 if dslash
and not pack:
161 str +=
"#define spinorFloat float\n" 163 str +=
"#define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_FLOAT4\n" 164 str +=
"#define READ_SPINOR_SHARED READ_SPINOR_SHARED_FLOAT4\n" 170 if dslash
and not pack:
176 str +=
"#endif // SPINOR_DOUBLE\n\n" 182 str =
"// gauge link\n" 183 str +=
"#ifdef GAUGE_FLOAT2\n" 199 str +=
"#endif // GAUGE_DOUBLE\n\n" 201 str +=
"// conjugated gauge link\n" 205 str +=
"#define "+
g_re(1,m,n)+
" (+"+
g_re(0,n,m)+
")\n" 206 str +=
"#define "+
g_im(1,m,n)+
" (-"+
g_im(0,n,m)+
")\n" 216 str =
"// output spinor\n" 220 if 2*i < sharedFloats
and not sharedDslash:
221 str +=
"#define "+
out_re(s,c)+
" s["+`(2*i+0)`+
"*SHARED_STRIDE]\n" 223 str +=
"VOLATILE spinorFloat "+
out_re(s,c)+
";\n" 224 if 2*i+1 < sharedFloats
and not sharedDslash:
225 str +=
"#define "+
out_im(s,c)+
" s["+`(2*i+1)`+
"*SHARED_STRIDE]\n" 227 str +=
"VOLATILE spinorFloat "+
out_im(s,c)+
";\n" 236 prolog_str= (
"// *** CUDA DSLASH ***\n\n" if not dagger
else "// *** CUDA DSLASH DAGGER ***\n\n")
237 prolog_str+=
"#define DSLASH_SHARED_FLOATS_PER_THREAD "+str(sharedFloats)+
"\n\n" 239 print "Undefined prolog" 244 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler 246 #else // Open64 compiler 247 #define VOLATILE volatile 252 if dslash ==
True: prolog_str+=
def_gauge()
255 if (sharedFloats > 0):
260 #define SHARED_STRIDE 16 // to avoid bank conflicts on Fermi 262 #define SHARED_STRIDE 32 // to avoid bank conflicts on Fermi 269 #define SHARED_STRIDE 8 // to avoid bank conflicts on G80 and GT200 271 #define SHARED_STRIDE 16 // to avoid bank conflicts on G80 and GT200 277 if sharedFloats > 0
and not sharedDslash:
280 extern __shared__ char s_data[]; 286 VOLATILE spinorFloat *s = (spinorFloat*)s_data + DSLASH_SHARED_FLOATS_PER_THREAD*SHARED_STRIDE*(threadIdx.x/SHARED_STRIDE) 287 + (threadIdx.x % SHARED_STRIDE); 294 #include "read_gauge.h" 295 #include "io_spinor.h" 308 if (kernel_type == INTERIOR_KERNEL) { 311 // Assume even dimensions 312 coordsFromIndex3D<EVEN_X>(X, coord, sid, param); 314 // only need to check Y and Z dims currently since X and T set to match exactly 315 if (coord[1] >= param.dc.X[1]) return; 316 if (cpprd[2] >= param.dc.X[2]) return; 324 if (kernel_type == INTERIOR_KERNEL) { 327 sid = blockIdx.x*blockDim.x + threadIdx.x; 328 if (sid >= param.threads) return; 330 // Assume even dimensions 331 coordsFromIndex<4,QUDA_4D_PC,EVEN_X>(X, coord, sid, param); 344 } else { // exterior kernel 346 sid = blockIdx.x*blockDim.x + threadIdx.x; 347 if (sid >= param.threads) return; 349 const int face_volume = (param.threads >> 1); // volume of one face 350 const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1 351 face_idx = sid - face_num*face_volume; // index into the respective face 353 // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP) 354 // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading) 355 //sp_idx = face_idx + param.ghostOffset[dim]; 357 coordsFromFaceIndex<4,QUDA_4D_PC,kernel_type,1>(X, sid, coord, face_idx, face_num, param); 359 READ_INTERMEDIATE_SPINOR(INTERTEX, param.sp_stride, sid, sid); 369 prolog_str+=
"#endif // MULTI_GPU\n\n\n" 375 def gen(dir, pack_only=False):
376 projIdx = dir
if not dagger
else dir + (1 - 2*(dir%2))
379 return projectors[projIdx][4*i+j]
386 return (1, proj(i,1))
388 return (0, proj(i,0))
390 boundary = [
"coord[0]==(param.dc.X[0]-1)",
"coord[0]==0",
"coord[1]==(param.dc.X[1]-1)",
"coord[1]==0",
"coord[2]==(param.dc.X[2]-1)",
"coord[2]==0",
"coord[3]==(param.dc.X[3]-1)",
"coord[3]==0"]
391 interior = [
"coord[0]<(param.dc.X[0]-1)",
"coord[0]>0",
"coord[1]<(param.dc.X[1]-1)",
"coord[1]>0",
"coord[2]<(param.dc.X[2]-1)",
"coord[2]>0",
"coord[3]<(param.dc.X[3]-1)",
"coord[3]>0"]
392 dim = [
"X",
"Y",
"Z",
"T"]
395 sp_idx = [
"X+1",
"X-1",
"X+param.dc.X[0]",
"X-param.dc.X[0]",
"X+param.dc.X2X1",
"X-param.dc.X2X1",
"X+param.dc.X3X2X1",
"X-param.dc.X3X2X1"]
398 sp_idx_wrap = [
"X-(param.dc.X[0]-1)",
"X+(param.dc.X[0]-1)",
"X-param.dc.X2X1mX1",
"X+param.dc.X2X1mX1",
"X-param.dc.X3X2X1mX2X1",
"X+param.dc.X3X2X1mX2X1",
399 "X-param.dc.X4X3X2X1mX3X2X1",
"X+param.dc.X4X3X2X1mX3X2X1"]
402 cond +=
"#ifdef MULTI_GPU\n" 403 cond +=
"if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim["+`dir/2`+
"] || "+interior[dir]+
")) ||\n" 404 cond +=
" (kernel_type == EXTERIOR_KERNEL_"+dim[dir/2]+
" && "+boundary[dir]+
") )\n" 409 projName =
"P"+`dir/2`+[
"-",
"+"][projIdx%2]
410 str +=
"// Projector "+projName+
"\n" 411 for l
in projStr.splitlines():
415 str +=
"#ifdef MULTI_GPU\n" 416 str +=
"const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1 :\n" 417 str +=
" face_idx + param.ghostOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 418 str +=
"#if (DD_PREC==2) // half precision\n" 419 str +=
"const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 422 str +=
"const int sp_idx = ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1;\n" 427 str +=
"const int ga_idx = sid;\n" 429 str +=
"#ifdef MULTI_GPU\n" 430 str +=
"const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);\n" 432 str +=
"const int ga_idx = sp_idx;\n" 437 row_cnt = ([0,0,0,0])
442 if re != 0
or im != 0:
444 row_cnt[0] += row_cnt[1]
445 row_cnt[2] += row_cnt[3]
448 for h
in range(0, 2):
449 for c
in range(0, 3):
450 decl_half +=
"spinorFloat "+
h1_re(h,c)+
", "+
h1_im(h,c)+
";\n";
453 load_spinor =
"// read spinor from device memory\n" 455 load_spinor +=
"#ifdef TWIST_INV_DSLASH\n" 456 load_spinor +=
"#ifdef SPINOR_DOUBLE\n" 457 load_spinor +=
"const spinorFloat a = param.a;\n" 458 load_spinor +=
"const spinorFloat b = param.b;\n" 459 load_spinor +=
"#else\n" 460 load_spinor +=
"const spinorFloat a = param.a_f;\n" 461 load_spinor +=
"const spinorFloat b = param.b_f;\n" 462 load_spinor +=
"#endif\n" 463 load_spinor +=
"#endif\n" 467 load_spinor +=
"#ifndef TWIST_INV_DSLASH\n" 468 load_spinor +=
"READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 469 load_spinor +=
"#else\n" 470 load_spinor +=
"READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 472 load_spinor +=
"APPLY_TWIST_INV( a, b, i);\n" 474 load_spinor +=
"APPLY_TWIST_INV(-a, b, i);\n" 476 load_spinor +=
"#endif\n" 477 elif row_cnt[2] == 0:
479 load_spinor +=
"#ifndef TWIST_INV_DSLASH\n" 480 load_spinor +=
"READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 481 load_spinor +=
"#else\n" 482 load_spinor +=
"READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 484 load_spinor +=
"APPLY_TWIST_INV( a, b, i);\n" 486 load_spinor +=
"APPLY_TWIST_INV(-a, b, i);\n" 488 load_spinor +=
"#endif\n" 490 load_spinor +=
"READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 492 load_spinor +=
"#ifdef TWIST_INV_DSLASH\n" 494 load_spinor +=
"APPLY_TWIST_INV( a, b, i);\n" 496 load_spinor +=
"APPLY_TWIST_INV(-a, b, i);\n" 498 load_spinor +=
"#endif\n" 502 load_half +=
"const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];\n" 507 if dir >= 6: load_half +=
"const int t_proj_scale = TPROJSCALE;\n" 509 load_half +=
"// read half spinor from device memory\n" 513 load_half +=
"READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, "+`dir`+
");\n\n" 516 load_gauge =
"// read gauge matrix from device memory\n" 517 load_gauge +=
"READ_GAUGE_MATRIX(G, GAUGE"+`dir%2`+
"TEX, "+`dir`+
", ga_idx, param.gauge_stride);\n\n" 519 reconstruct_gauge =
"// reconstruct gauge matrix\n" 520 reconstruct_gauge +=
"RECONSTRUCT_GAUGE_MATRIX("+`dir`+
");\n\n" 522 project =
"// project spinor into half spinors\n" 523 for h
in range(0, 2):
524 for c
in range(0, 3):
527 for s
in range(0, 4):
530 if re==0
and im==0: ()
538 for s
in range(0, 4):
539 re = proj(h+2,s).real
540 im = proj(h+2,s).imag
541 if re==0
and im==0: ()
549 project +=
h1_re(h,c)+
" = "+strRe+
";\n" 550 project +=
h1_im(h,c)+
" = "+strIm+
";\n" 553 """// store spinor into shared memory 554 WRITE_SPINOR_SHARED(threadIdx.x, threadIdx.y, threadIdx.z, i);\n 558 """// load spinor from shared memory 559 int tx = (threadIdx.x > 0) ? threadIdx.x-1 : blockDim.x-1; 561 READ_SPINOR_SHARED(tx, threadIdx.y, threadIdx.z);\n 565 """// load spinor from shared memory 566 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x; 567 int ty = (threadIdx.y < blockDim.y - 1) ? threadIdx.y + 1 : 0; 568 READ_SPINOR_SHARED(tx, ty, threadIdx.z);\n 572 """// load spinor from shared memory 573 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x; 574 int ty = (threadIdx.y > 0) ? threadIdx.y - 1 : blockDim.y - 1; 575 READ_SPINOR_SHARED(tx, ty, threadIdx.z);\n 579 """// load spinor from shared memory 580 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x; 581 int tz = (threadIdx.z < blockDim.z - 1) ? threadIdx.z + 1 : 0; 582 READ_SPINOR_SHARED(tx, threadIdx.y, tz);\n 586 """// load spinor from shared memory 587 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x; 588 int tz = (threadIdx.z > 0) ? threadIdx.z - 1 : blockDim.z - 1; 589 READ_SPINOR_SHARED(tx, threadIdx.y, tz);\n 594 for h
in range(0, 2):
595 for c
in range(0, 3):
596 copy_half +=
h1_re(h,c)+
" = "+(
"t_proj_scale*" if (dir >= 6)
else "")+
in_re(h,c)+
"; " 597 copy_half +=
h1_im(h,c)+
" = "+(
"t_proj_scale*" if (dir >= 6)
else "")+
in_im(h,c)+
";\n" 601 prep_half +=
"#ifdef MULTI_GPU\n" 602 prep_half +=
"if (kernel_type == INTERIOR_KERNEL) {\n" 603 prep_half +=
"#endif\n" 608 prep_half +=
indent(load_spinor)
609 prep_half +=
indent(write_shared)
610 prep_half +=
indent(project)
612 prep_half +=
indent(load_shared_1)
613 prep_half +=
indent(project)
615 prep_half +=
indent(
"if (threadIdx.y == blockDim.y-1 && blockDim.y < param.dc.X[1] ) {\n")
616 prep_half +=
indent(load_spinor)
617 prep_half +=
indent(project)
618 prep_half +=
indent(
"} else {")
619 prep_half +=
indent(load_shared_2)
620 prep_half +=
indent(project)
623 prep_half +=
indent(
"if (threadIdx.y == 0 && blockDim.y < param.dc.X[1]) {\n")
624 prep_half +=
indent(load_spinor)
625 prep_half +=
indent(project)
626 prep_half +=
indent(
"} else {")
627 prep_half +=
indent(load_shared_3)
628 prep_half +=
indent(project)
631 prep_half +=
indent(
"if (threadIdx.z == blockDim.z-1 && blockDim.z < X3) {\n")
632 prep_half +=
indent(load_spinor)
633 prep_half +=
indent(project)
634 prep_half +=
indent(
"} else {")
635 prep_half +=
indent(load_shared_4)
636 prep_half +=
indent(project)
639 prep_half +=
indent(
"if (threadIdx.z == 0 && blockDim.z < X3) {\n")
640 prep_half +=
indent(load_spinor)
641 prep_half +=
indent(project)
642 prep_half +=
indent(
"} else {")
643 prep_half +=
indent(load_shared_5)
644 prep_half +=
indent(project)
647 prep_half +=
indent(load_spinor)
648 prep_half +=
indent(project)
650 prep_half +=
indent(load_spinor)
651 prep_half +=
indent(project)
654 prep_half +=
"#ifdef MULTI_GPU\n" 655 prep_half +=
"} else {\n" 657 prep_half +=
indent(load_half)
658 prep_half +=
indent(copy_half)
660 prep_half +=
"#endif // MULTI_GPU\n" 663 ident =
"// identity gauge matrix\n" 666 ident +=
"spinorFloat "+
h2_re(h,m)+
" = " +
h1_re(h,m) +
"; " 667 ident +=
"spinorFloat "+
h2_im(h,m)+
" = " +
h1_im(h,m) +
";\n" 672 mult +=
"// multiply row "+`m`+
"\n" 674 re =
"spinorFloat "+
h2_re(h,m)+
" = 0;\n" 675 im =
"spinorFloat "+
h2_im(h,m)+
" = 0;\n" 677 re +=
h2_re(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 678 re +=
h2_re(h,m) +
" -= " +
g_im(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 679 im +=
h2_im(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 680 im +=
h2_im(h,m) +
" += " +
g_im(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 692 reconstruct +=
out_re(h_out, m) +
" += " +
h2_re(h,m) +
";\n" 693 reconstruct +=
out_im(h_out, m) +
" += " +
h2_im(h,m) +
";\n" 699 if im == 0
and re == 0: ()
701 reconstruct +=
out_re(s, m) +
" " +
sign(re) +
"= " +
h2_re(h,m) +
";\n" 702 reconstruct +=
out_im(s, m) +
" " +
sign(re) +
"= " +
h2_im(h,m) +
";\n" 704 reconstruct +=
out_re(s, m) +
" " +
sign(-im) +
"= " +
h2_im(h,m) +
";\n" 705 reconstruct +=
out_im(s, m) +
" " +
sign(+im) +
"= " +
h2_re(h,m) +
";\n" 710 str +=
"if (param.gauge_fixed && ga_idx < param.dc.X4X3X2X1hmX3X2X1h)\n" 711 str +=
block(decl_half + prep_half + ident + reconstruct)
713 str +=
block(decl_half + prep_half + load_gauge + reconstruct_gauge + mult + reconstruct)
715 str += decl_half + prep_half + load_gauge + reconstruct_gauge + mult + reconstruct
718 out = load_spinor + decl_half + project
719 out = out.replace(
"sp_idx",
"idx")
722 return cond +
block(str)+
"\n\n" 728 if z==0:
return out_re(s,c)
731 if z==0:
return in_re(s,c)
732 else:
return in_im(s,c)
737 str +=
"#ifndef TWIST_INV_DSLASH\n" 738 str +=
"#ifdef SPINOR_DOUBLE\n" 739 str +=
"const spinorFloat a = param.a;\n" 740 str +=
"const spinorFloat b = param.b;\n" 742 str +=
"const spinorFloat a = param.a_f;\n" 743 str +=
"const spinorFloat b = param.b_f;\n" 747 str +=
"#ifdef DSLASH_XPAY\n" 748 str +=
"READ_ACCUM(ACCUMTEX, param.sp_stride)\n\n" 749 str +=
"#ifndef TWIST_XPAY\n" 750 str +=
"#ifndef TWIST_INV_DSLASH\n" 751 str +=
"//perform invert twist first:\n" 753 str +=
"APPLY_TWIST_INV( a, b, o);\n" 755 str +=
"APPLY_TWIST_INV(-a, b, o);\n" 764 str +=
"APPLY_TWIST( a, acc);\n" 766 str +=
"APPLY_TWIST(-a, acc);\n" 767 str +=
"//warning! b is unrelated to the twisted mass parameter in this case!\n\n" 773 str +=
"#endif//TWIST_XPAY\n" 774 str +=
"#else //no XPAY\n" 775 str +=
"#ifndef TWIST_INV_DSLASH\n" 777 str +=
" APPLY_TWIST_INV( a, b, o);\n" 779 str +=
" APPLY_TWIST_INV(-a, b, o);\n" 790 str +=
"#ifdef MULTI_GPU\n" 792 str +=
"#if defined MULTI_GPU && (defined DSLASH_XPAY || defined DSLASH_CLOVER)\n" 795 int incomplete = 0; // Have all 8 contributions been computed for this site? 797 switch(kernel_type) { // intentional fall-through 798 case INTERIOR_KERNEL: 799 incomplete = incomplete || (param.commDim[3] && (coord[3]==0 || coord[3]==(param.dc.X[3]-1))); 800 case EXTERIOR_KERNEL_T: 801 incomplete = incomplete || (param.commDim[2] && (coord[2]==0 || coord[2]==(param.dc.X[2]-1))); 802 case EXTERIOR_KERNEL_Z: 803 incomplete = incomplete || (param.commDim[1] && (coord[1]==0 || coord[1]==(param.dc.X[1]-1))); 804 case EXTERIOR_KERNEL_Y: 805 incomplete = incomplete || (param.commDim[0] && (coord[0]==0 || coord[0]==(param.dc.X[0]-1))); 809 str +=
"if (!incomplete)\n" 810 str +=
"#endif // MULTI_GPU\n" 814 str +=
block( block_str )
817 str +=
"// write spinor field back to device memory\n" 818 str +=
"WRITE_SPINOR(param.sp_stride);\n\n" 820 str +=
"// undefine to prevent warning when precision is changed\n" 821 str +=
"#undef spinorFloat\n" 823 str +=
"#undef WRITE_SPINOR_SHARED\n" 824 str +=
"#undef READ_SPINOR_SHARED\n" 825 if sharedFloats > 0: str +=
"#undef SHARED_STRIDE\n\n" 831 str +=
"#undef "+
g_re(0,m,n)+
"\n" 832 str +=
"#undef "+
g_im(0,m,n)+
"\n" 838 str +=
"#undef "+
in_re(s,c)+
"\n" 839 str +=
"#undef "+
in_im(s,c)+
"\n" 846 str +=
"#undef "+
acc_re(s,c)+
"\n" 847 str +=
"#undef "+
acc_im(s,c)+
"\n" 855 if 2*i < sharedFloats:
856 str +=
"#undef "+
out_re(s,c)+
"\n" 857 if 2*i+1 < sharedFloats:
858 str +=
"#undef "+
out_im(s,c)+
"\n" 861 str +=
"#undef VOLATILE\n" 869 str +=
"switch(dim) {\n" 870 for dim
in range(0,4):
871 str +=
"case "+`dim`+
":\n" 872 proj =
gen(2*dim+facenum, pack_only=
True)
874 proj +=
"// write half spinor back to device memory\n" 875 proj +=
"WRITE_HALF_SPINOR(face_volume, face_idx);\n" 882 assert (sharedFloats == 0)
885 str +=
"#include \"io_spinor.h\"\n\n" 887 str +=
"if (face_num) " 893 str +=
"// undefine to prevent warning when precision is changed\n" 894 str +=
"#undef spinorFloat\n" 895 str +=
"#undef SHARED_STRIDE\n\n" 900 str +=
"#undef "+
in_re(s,c)+
"\n" 901 str +=
"#undef "+
in_im(s,c)+
"\n" 913 print "Generating dslash kernel for sm" + str(arch/10)
936 print "Shared floats set to " + str(sharedFloats)
941 filename =
'dslash_core/tm_dslash_' + name +
'_core.h' 942 print sys.argv[0] +
": generating " + filename;
943 f = open(filename,
'w')
948 filename =
'dslash_core/tm_dslash_dagger_' + name +
'_core.h' 949 print sys.argv[0] +
": generating " + filename +
"\n";
950 f = open(filename,
'w')
979 print sys.argv[0] +
": generating wilson_pack_face_core.h";
980 f = open(
'dslash_core/wilson_pack_twisted_face_core.h',
'w')
985 print sys.argv[0] +
": generating wilson_pack_face_dagger_core.h";
986 f = open(
'dslash_core/wilson_pack_twisted_face_dagger_core.h',
'w')
def generate_dslash_kernels(arch)
def complexify(a)
complex numbers ######################################################################## ...
def input_spinor(s, c, z)
def indent(code)
code generation ######################################################################## ...
def spinor(name, s, c, z)
def gen(dir, pack_only=False)