6 return [complex(x)
for x
in a]
10 if a ==
int(a):
return `
int(a)`
14 if a == 0:
return "0i" 15 elif a == -1:
return "-i" 16 elif a == 1:
return "i" 17 else:
return fltToString(a)+
"i" 21 if re == 0
and im == 0:
return "0" 22 elif re == 0:
return imToString(im)
23 elif im == 0:
return fltToString(re)
25 im_str =
"-"+imToString(-im)
if im < 0
else "+"+imToString(im)
26 return fltToString(re)+im_str
75 return [x+y
for (x,y)
in zip(g1,g2)]
78 return [x-y
for (x,y)
in zip(g1,g2)]
98 def indentline(line):
return (
" "+line
if (line.count(
"#", 0, 1) == 0)
else line)
99 return ''.join([indentline(line)+
"\n" for line
in code.splitlines()])
102 return "{\n"+
indent(code)+
"}" 106 elif x==-1:
return "-" 107 elif x==+2:
return "+2*" 108 elif x==-2:
return "-2*" 111 return `(n/4)` +
"." + [
"x",
"y",
"z",
"w"][n%4]
114 return `(n/2)` +
"." + [
"x",
"y"][n%2]
117 def in_re(s, c):
return "i"+`s`+`c`+
"_re" 118 def in_im(s, c):
return "i"+`s`+`c`+
"_im" 119 def g_re(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_re" 120 def g_im(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_im" 121 def out_re(s, c):
return "o"+`s`+`c`+
"_re" 122 def out_im(s, c):
return "o"+`s`+`c`+
"_im" 123 def h1_re(h, c):
return [
"a",
"b"][h]+`c`+
"_re" 124 def h1_im(h, c):
return [
"a",
"b"][h]+`c`+
"_im" 125 def h2_re(h, c):
return [
"A",
"B"][h]+`c`+
"_re" 126 def h2_im(h, c):
return [
"A",
"B"][h]+`c`+
"_im" 127 def c_re(b, sm, cm, sn, cn):
return "c"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_re" 128 def c_im(b, sm, cm, sn, cn):
return "c"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_im" 129 def cinv_re(b, sm, cm, sn, cn):
return "cinv"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_re" 130 def cinv_im(b, sm, cm, sn, cn):
return "cinv"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_im" 131 def a_re(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_re" 132 def a_im(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_im" 134 def acc_re(s, c):
return "acc"+`s`+`c`+
"_re" 135 def acc_im(s, c):
return "acc"+`s`+`c`+
"_im" 137 def tmp_re(s, c):
return "tmp"+`s`+`c`+
"_re" 138 def tmp_im(s, c):
return "tmp"+`s`+`c`+
"_im" 141 if z==0:
return name+`s`+`c`+
"_re" 142 else:
return name+`s`+`c`+
"_im" 146 str +=
"// input spinor\n" 147 str +=
"#ifdef SPINOR_DOUBLE\n" 148 str +=
"#define spinorFloat double\n" 150 str +=
"#define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_DOUBLE2\n" 151 str +=
"#define READ_SPINOR_SHARED READ_SPINOR_SHARED_DOUBLE2\n" 158 if dslash
and not pack:
165 str +=
"#define spinorFloat float\n" 167 str +=
"#define WRITE_SPINOR_SHARED WRITE_SPINOR_SHARED_FLOAT4\n" 168 str +=
"#define READ_SPINOR_SHARED READ_SPINOR_SHARED_FLOAT4\n" 174 if dslash
and not pack:
180 str +=
"#endif // SPINOR_DOUBLE\n\n" 186 str =
"// gauge link\n" 187 str +=
"#ifdef GAUGE_FLOAT2\n" 203 str +=
"#endif // GAUGE_DOUBLE\n\n" 205 str +=
"// conjugated gauge link\n" 209 str +=
"#define "+
g_re(1,m,n)+
" (+"+
g_re(0,n,m)+
")\n" 210 str +=
"#define "+
g_im(1,m,n)+
" (-"+
g_im(0,n,m)+
")\n" 217 str =
"// first chiral block of clover term\n" 218 str +=
"#ifdef CLOVER_DOUBLE\n" 228 for m
in range(n+1,6):
232 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" C"+
nthFloat2(i+1)+
"\n" 244 for m
in range(n+1,6):
248 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" C"+
nthFloat4(i+1)+
"\n" 250 str +=
"#endif // CLOVER_DOUBLE\n\n" 258 str +=
"#define "+
c_re(0,sm,cm,sn,cn)+
" (+"+
c_re(0,sn,cn,sm,cm)+
")\n" 259 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" (-"+
c_im(0,sn,cn,sm,cm)+
")\n" 262 str +=
"// second chiral block of clover term (reuses C0,...,C9)\n" 269 str +=
"#define "+
c_re(1,sm,cm,sn,cn)+
" "+
c_re(0,sm,cm,sn,cn)+
"\n" 270 if m != n: str +=
"#define "+
c_im(1,sm,cm,sn,cn)+
" "+
c_im(0,sm,cm,sn,cn)+
"\n" 274 str +=
"// first chiral block of inverted clover term\n" 275 str +=
"#ifdef CLOVER_DOUBLE\n" 285 for m
in range(n+1,6):
301 for m
in range(n+1,6):
307 str +=
"#endif // CLOVER_DOUBLE\n\n" 315 str +=
"#define "+
cinv_re(0,sm,cm,sn,cn)+
" (+"+
cinv_re(0,sn,cn,sm,cm)+
")\n" 316 str +=
"#define "+
cinv_im(0,sm,cm,sn,cn)+
" (-"+
cinv_im(0,sn,cn,sm,cm)+
")\n" 319 str +=
"// second chiral block of inverted clover term (reuses C0,...,C9)\n" 326 str +=
"#define "+
cinv_re(1,sm,cm,sn,cn)+
" "+
cinv_re(0,sm,cm,sn,cn)+
"\n" 327 if m != n: str +=
"#define "+
cinv_im(1,sm,cm,sn,cn)+
" "+
cinv_im(0,sm,cm,sn,cn)+
"\n" 329 if dagger
and not pack_only:
330 str +=
"#ifndef CLOVER_TWIST_INV_DSLASH\n" 334 // declare C## here and use ASSN below instead of READ 369 #endif // CLOVER_DOUBLE 371 if dagger
and not pack_only:
380 str =
"// output spinor\n" 384 if 2*i < sharedFloats
and not sharedDslash:
385 str +=
"#define "+
out_re(s,c)+
" s["+`(2*i+0)`+
"*SHARED_STRIDE]\n" 387 str +=
"VOLATILE spinorFloat "+
out_re(s,c)+
";\n" 388 if 2*i+1 < sharedFloats
and not sharedDslash:
389 str +=
"#define "+
out_im(s,c)+
" s["+`(2*i+1)`+
"*SHARED_STRIDE]\n" 391 str +=
"VOLATILE spinorFloat "+
out_im(s,c)+
";\n" 400 prolog_str= (
"// *** CUDA DSLASH ***\n\n" if not dagger
else "// *** CUDA DSLASH DAGGER ***\n\n")
401 prolog_str+=
"#define DSLASH_SHARED_FLOATS_PER_THREAD "+str(sharedFloats)+
"\n\n" 403 print "Undefined prolog" 408 #if ((CUDA_VERSION >= 4010) && (__COMPUTE_CAPABILITY__ >= 200)) // NVVM compiler 410 #else // Open64 compiler 411 #define VOLATILE volatile 416 if dslash ==
True: prolog_str+=
def_gauge()
420 if (sharedFloats > 0):
425 #define SHARED_STRIDE 16 // to avoid bank conflicts on Fermi 427 #define SHARED_STRIDE 32 // to avoid bank conflicts on Fermi 434 #define SHARED_STRIDE 8 // to avoid bank conflicts on G80 and GT200 436 #define SHARED_STRIDE 16 // to avoid bank conflicts on G80 and GT200 442 if sharedFloats > 0
and not sharedDslash:
445 extern __shared__ char s_data[]; 451 VOLATILE spinorFloat *s = (spinorFloat*)s_data + DSLASH_SHARED_FLOATS_PER_THREAD*SHARED_STRIDE*(threadIdx.x/SHARED_STRIDE) 452 + (threadIdx.x % SHARED_STRIDE); 459 #include "read_gauge.h" 460 #include "io_spinor.h" 461 #include "read_clover.h" 462 #include "tmc_core.h" 475 if (kernel_type == INTERIOR_KERNEL) { 478 // Assume even dimensions 479 coordsFromIndex3D<EVEN_X>(X, coord, sid, param); 481 // only need to check Y and Z dims currently since X and T set to match exactly 482 if (coord[1] >= param.dc.X[1]) return; 483 if (coord[2] >= param.dc.X[2]) return; 491 if (kernel_type == INTERIOR_KERNEL) { 494 sid = blockIdx.x*blockDim.x + threadIdx.x; 495 if (sid >= param.threads) return; 497 // Assume even dimensions 498 coordsFromIndex<4,QUDA_4D_PC,EVEN_X>(X, coord, sid, param); 511 } else { // exterior kernel 513 sid = blockIdx.x*blockDim.x + threadIdx.x; 514 if (sid >= param.threads) return; 516 const int face_volume = (param.threads >> 1); // volume of one face 517 const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1 518 face_idx = sid - face_num*face_volume; // index into the respective face 520 // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP) 521 // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading) 522 //sp_idx = face_idx + param.ghostOffset[dim]; 524 coordsFromFaceIndex<4,QUDA_4D_PC,kernel_type,1>(X, sid, coord, face_idx, face_num, param); 526 READ_INTERMEDIATE_SPINOR(INTERTEX, param.sp_stride, sid, sid); 536 prolog_str+=
"#endif // MULTI_GPU\n\n\n" 542 def gen(dir, pack_only=False):
543 projIdx = dir
if not dagger
else dir + (1 - 2*(dir%2))
546 return projectors[projIdx][4*i+j]
553 return (1, proj(i,1))
555 return (0, proj(i,0))
557 boundary = [
"coord[0]==(param.dc.X[0]-1)",
"coord[0]==0",
"coord[1]==(param.dc.X[1]-1)",
"coord[1]==0",
"coord[2]==(param.dc.X[2]-1)",
"coord[2]==0",
"coord[3]==(param.dc.X[3]-1)",
"coord[3]==0"]
558 interior = [
"coord[0]<(param.dc.X[0]-1)",
"coord[0]>0",
"coord[1]<(param.dc.X[1]-1)",
"coord[1]>0",
"coord[2]<(param.dc.X[2]-1)",
"coord[2]>0",
"coord[3]<(param.dc.X[3]-1)",
"coord[3]>0"]
559 dim = [
"X",
"Y",
"Z",
"T"]
562 sp_idx = [
"X+1",
"X-1",
"X+param.dc.X[0]",
"X-param.dc.X[0]",
"X+param.dc.X2X1",
"X-param.dc.X2X1",
"X+param.dc.X3X2X1",
"X-param.dc.X3X2X1"]
565 sp_idx_wrap = [
"X-(param.dc.X[0]-1)",
"X+(param.dc.X[0]-1)",
"X-param.dc.X2X1mX1",
"X+param.dc.X2X1mX1",
"X-param.dc.X3X2X1mX2X1",
"X+param.dc.X3X2X1mX2X1",
566 "X-param.dc.X4X3X2X1mX3X2X1",
"X+param.dc.X4X3X2X1mX3X2X1"]
569 cond +=
"#ifdef MULTI_GPU\n" 570 cond +=
"if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim["+`dir/2`+
"] || "+interior[dir]+
")) ||\n" 571 cond +=
" (kernel_type == EXTERIOR_KERNEL_"+dim[dir/2]+
" && "+boundary[dir]+
") )\n" 576 projName =
"P"+`dir/2`+[
"-",
"+"][projIdx%2]
577 str +=
"// Projector "+projName+
"\n" 578 for l
in projStr.splitlines():
582 str +=
"#ifdef MULTI_GPU\n" 583 str +=
"const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1 :\n" 584 str +=
" face_idx + param.ghostOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 585 str +=
"#if (DD_PREC==2) // half precision\n" 586 str +=
"const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 589 str +=
"const int sp_idx = ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1;\n" 594 str +=
"const int ga_idx = sid;\n" 596 str +=
"#ifdef MULTI_GPU\n" 597 str +=
"const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.Vh+face_idx);\n" 599 str +=
"const int ga_idx = sp_idx;\n" 604 row_cnt = ([0,0,0,0])
609 if re != 0
or im != 0:
611 row_cnt[0] += row_cnt[1]
612 row_cnt[2] += row_cnt[3]
615 for h
in range(0, 2):
616 for c
in range(0, 3):
617 decl_half +=
"spinorFloat "+
h1_re(h,c)+
", "+
h1_im(h,c)+
";\n";
620 load_spinor =
"// read spinor from device memory\n" 622 load_spinor +=
"READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 623 elif row_cnt[2] == 0:
624 load_spinor +=
"READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 626 load_spinor +=
"READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 630 load_half +=
"const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];\n" 638 load_half +=
"const int t_proj_scale = TPROJSCALE;\n" 641 load_half +=
"// read half spinor from device memory\n" 645 load_half +=
"READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, "+`dir`+
");\n\n" 648 load_gauge =
"// read gauge matrix from device memory\n" 649 load_gauge +=
"READ_GAUGE_MATRIX(G, GAUGE"+`dir%2`+
"TEX, "+`dir`+
", ga_idx, param.gauge_stride);\n\n" 651 reconstruct_gauge =
"// reconstruct gauge matrix\n" 652 reconstruct_gauge +=
"RECONSTRUCT_GAUGE_MATRIX("+`dir`+
");\n\n" 654 project =
"// project spinor into half spinors\n" 655 for h
in range(0, 2):
656 for c
in range(0, 3):
659 for s
in range(0, 4):
662 if re==0
and im==0: ()
670 for s
in range(0, 4):
671 re = proj(h+2,s).real
672 im = proj(h+2,s).imag
673 if re==0
and im==0: ()
681 project +=
h1_re(h,c)+
" = "+strRe+
";\n" 682 project +=
h1_im(h,c)+
" = "+strIm+
";\n" 685 """// store spinor into shared memory 686 WRITE_SPINOR_SHARED(threadIdx.x, threadIdx.y, threadIdx.z, i);\n 690 """// load spinor from shared memory 691 int tx = (threadIdx.x > 0) ? threadIdx.x-1 : blockDim.x-1; 693 READ_SPINOR_SHARED(tx, threadIdx.y, threadIdx.z);\n 697 """// load spinor from shared memory 698 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x; 699 int ty = (threadIdx.y < blockDim.y - 1) ? threadIdx.y + 1 : 0; 700 READ_SPINOR_SHARED(tx, ty, threadIdx.z);\n 704 """// load spinor from shared memory 705 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x; 706 int ty = (threadIdx.y > 0) ? threadIdx.y - 1 : blockDim.y - 1; 707 READ_SPINOR_SHARED(tx, ty, threadIdx.z);\n 711 """// load spinor from shared memory 712 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1) ) % blockDim.x; 713 int tz = (threadIdx.z < blockDim.z - 1) ? threadIdx.z + 1 : 0; 714 READ_SPINOR_SHARED(tx, threadIdx.y, tz);\n 718 """// load spinor from shared memory 719 int tx = (threadIdx.x + blockDim.x - ((coord[0]+1)&1)) % blockDim.x; 720 int tz = (threadIdx.z > 0) ? threadIdx.z - 1 : blockDim.z - 1; 721 READ_SPINOR_SHARED(tx, threadIdx.y, tz);\n 727 for h
in range(0, 2):
728 for c
in range(0, 3):
730 copy_half +=
h1_im(h,c)+
" = "+
in_im(h,c)+
";\n" 732 for h
in range(0, 2):
733 for c
in range(0, 3):
734 copy_half +=
h1_re(h,c)+
" = t_proj_scale*"+
in_re(h,c)+
"; " 735 copy_half +=
h1_im(h,c)+
" = t_proj_scale*"+
in_im(h,c)+
";\n" 739 prep_half +=
"#ifdef MULTI_GPU\n" 740 prep_half +=
"if (kernel_type == INTERIOR_KERNEL) {\n" 741 prep_half +=
"#endif\n" 746 prep_half +=
indent(load_spinor)
747 prep_half +=
indent(write_shared)
748 prep_half +=
indent(project)
750 prep_half +=
indent(load_shared_1)
751 prep_half +=
indent(project)
753 prep_half +=
indent(
"if (threadIdx.y == blockDim.y-1 && blockDim.y < param.dc.X[1] ) {\n")
754 prep_half +=
indent(load_spinor)
755 prep_half +=
indent(project)
756 prep_half +=
indent(
"} else {")
757 prep_half +=
indent(load_shared_2)
758 prep_half +=
indent(project)
761 prep_half +=
indent(
"if (threadIdx.y == 0 && blockDim.y < param.dc.X[1]) {\n")
762 prep_half +=
indent(load_spinor)
763 prep_half +=
indent(project)
764 prep_half +=
indent(
"} else {")
765 prep_half +=
indent(load_shared_3)
766 prep_half +=
indent(project)
769 prep_half +=
indent(
"if (threadIdx.z == blockDim.z-1 && blockDim.z < X3) {\n")
770 prep_half +=
indent(load_spinor)
771 prep_half +=
indent(project)
772 prep_half +=
indent(
"} else {")
773 prep_half +=
indent(load_shared_4)
774 prep_half +=
indent(project)
777 prep_half +=
indent(
"if (threadIdx.z == 0 && blockDim.z < X3) {\n")
778 prep_half +=
indent(load_spinor)
779 prep_half +=
indent(project)
780 prep_half +=
indent(
"} else {")
781 prep_half +=
indent(load_shared_5)
782 prep_half +=
indent(project)
785 prep_half +=
indent(load_spinor)
786 prep_half +=
indent(project)
788 prep_half +=
indent(load_spinor)
789 prep_half +=
indent(project)
792 prep_half +=
"#ifdef MULTI_GPU\n" 793 prep_half +=
"} else {\n" 795 prep_half +=
indent(load_half)
796 prep_half +=
indent(copy_half)
798 prep_half +=
"#endif // MULTI_GPU\n" 801 ident =
"// identity gauge matrix\n" 804 ident +=
"spinorFloat "+
h2_re(h,m)+
" = " +
h1_re(h,m) +
"; " 805 ident +=
"spinorFloat "+
h2_im(h,m)+
" = " +
h1_im(h,m) +
";\n" 810 mult +=
"// multiply row "+`m`+
"\n" 812 re =
"spinorFloat "+
h2_re(h,m)+
" = 0;\n" 813 im =
"spinorFloat "+
h2_im(h,m)+
" = 0;\n" 815 re +=
h2_re(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 816 re +=
h2_re(h,m) +
" -= " +
g_im(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 817 im +=
h2_im(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 818 im +=
h2_im(h,m) +
" += " +
g_im(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 830 reconstruct +=
out_re(h_out, m) +
" += " +
h2_re(h,m) +
";\n" 831 reconstruct +=
out_im(h_out, m) +
" += " +
h2_im(h,m) +
";\n" 837 if im == 0
and re == 0: ()
839 reconstruct +=
out_re(s, m) +
" " +
sign(re) +
"= " +
h2_re(h,m) +
";\n" 840 reconstruct +=
out_im(s, m) +
" " +
sign(re) +
"= " +
h2_im(h,m) +
";\n" 842 reconstruct +=
out_re(s, m) +
" " +
sign(-im) +
"= " +
h2_im(h,m) +
";\n" 843 reconstruct +=
out_im(s, m) +
" " +
sign(+im) +
"= " +
h2_re(h,m) +
";\n" 848 str +=
"if (param.gauge_fixed && ga_idx < param.dc.X4X3X2X1hmX3X2X1h)\n" 849 str +=
block(decl_half + prep_half + ident + reconstruct)
851 str +=
block(decl_half + prep_half + load_gauge + reconstruct_gauge + mult + reconstruct)
853 str += decl_half + prep_half + load_gauge + reconstruct_gauge + mult + reconstruct
855 return cond +
block(str)+
"\n\n" 861 if z==0:
return out_re(s,c)
864 if z==0:
return in_re(s,c)
865 else:
return in_im(s,c)
871 str +=
"#if !defined(CLOVER_TWIST_INV_DSLASH)\n" 872 str +=
"#ifdef SPINOR_DOUBLE\n" 873 str +=
"spinorFloat a = param.a;\n" 875 str +=
"spinorFloat a = param.a_f;\n" 880 str +=
"#ifdef DSLASH_XPAY\n" 882 str +=
"#ifdef SPINOR_DOUBLE\n" 883 str +=
"spinorFloat b = param.b;\n" 885 str +=
"spinorFloat b = param.b_f;\n" 888 str +=
"READ_ACCUM(ACCUMTEX, param.sp_stride)\n\n" 891 str +=
"#ifndef CLOVER_TWIST_XPAY\n" 892 str +=
"//perform invert twist first:\n" 893 str +=
"#ifndef DYNAMIC_CLOVER\n" 894 str +=
"APPLY_CLOVER_TWIST_INV(c, cinv, a, o);\n" 896 str +=
"APPLY_CLOVER_TWIST_DYN_INV(c, a, o);\n" 904 str +=
"APPLY_CLOVER_TWIST(c, a, acc);\n" 910 str +=
"#endif//CLOVER_TWIST_XPAY\n" 911 str +=
"#else //no XPAY\n" 912 str +=
"#ifndef DYNAMIC_CLOVER\n" 913 str +=
"APPLY_CLOVER_TWIST_INV(c, cinv, a, o);\n" 915 str +=
"APPLY_CLOVER_TWIST_DYN_INV(c, a, o);\n" 919 str +=
"#ifndef CLOVER_TWIST_INV_DSLASH\n" 920 str +=
"#ifndef CLOVER_TWIST_XPAY\n" 921 str +=
"//perform invert twist first:\n" 922 str +=
"#ifndef DYNAMIC_CLOVER\n" 923 str +=
"APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);\n" 925 str +=
"APPLY_CLOVER_TWIST_DYN_INV(c, -a, o);\n" 928 str +=
"APPLY_CLOVER_TWIST(c, -a, acc);\n" 936 str +=
"#else //no XPAY\n" 937 str +=
"#ifndef CLOVER_TWIST_INV_DSLASH\n" 938 str +=
"#ifndef DYNAMIC_CLOVER\n" 939 str +=
"APPLY_CLOVER_TWIST_INV(c, cinv, -a, o);\n" 941 str +=
"APPLY_CLOVER_TWIST_DYN_INV(c, -a, o);\n" 953 str +=
"#ifdef MULTI_GPU\n" 955 str +=
"#if defined MULTI_GPU && (defined DSLASH_XPAY || defined DSLASH_CLOVER)\n" 958 int incomplete = 0; // Have all 8 contributions been computed for this site? 960 switch(kernel_type) { // intentional fall-through 962 case INTERIOR_KERNEL: 963 incomplete = incomplete || (param.commDim[3] && (coord[3]==0 || coord[3]==(param.dc.X[3]-1))); 964 case EXTERIOR_KERNEL_T: 965 incomplete = incomplete || (param.commDim[2] && (coord[2]==0 || coord[2]==(param.dc.X[2]-1))); 966 case EXTERIOR_KERNEL_Z: 967 incomplete = incomplete || (param.commDim[1] && (coord[1]==0 || coord[1]==(param.dc.X[1]-1))); 968 case EXTERIOR_KERNEL_Y: 969 incomplete = incomplete || (param.commDim[0] && (coord[0]==0 || coord[0]==(param.dc.X[0]-1))); 973 str +=
"if (!incomplete)\n" 974 str +=
"#endif // MULTI_GPU\n" 978 str +=
block( block_str )
981 str +=
"// write spinor field back to device memory\n" 982 str +=
"WRITE_SPINOR(param.sp_stride);\n\n" 984 str +=
"// undefine to prevent warning when precision is changed\n" 985 str +=
"#undef spinorFloat\n" 987 str +=
"#undef WRITE_SPINOR_SHARED\n" 988 str +=
"#undef READ_SPINOR_SHARED\n" 989 if sharedFloats > 0: str +=
"#undef SHARED_STRIDE\n\n" 995 str +=
"#undef "+
g_re(0,m,n)+
"\n" 996 str +=
"#undef "+
g_im(0,m,n)+
"\n" 1000 for c
in range(0,3):
1002 str +=
"#undef "+
in_re(s,c)+
"\n" 1003 str +=
"#undef "+
in_im(s,c)+
"\n" 1006 for m
in range(0,6):
1009 str +=
"#undef "+
c_re(0,s,c,s,c)+
"\n" 1010 for n
in range(0,6):
1013 for m
in range(n+1,6):
1016 str +=
"#undef "+
c_re(0,sm,cm,sn,cn)+
"\n" 1017 str +=
"#undef "+
c_im(0,sm,cm,sn,cn)+
"\n" 1020 for m
in range(0,6):
1023 str +=
"#undef "+
cinv_re(0,s,c,s,c)+
"\n" 1024 for n
in range(0,6):
1027 for m
in range(n+1,6):
1030 str +=
"#undef "+
cinv_re(0,sm,cm,sn,cn)+
"\n" 1031 str +=
"#undef "+
cinv_im(0,sm,cm,sn,cn)+
"\n" 1035 for s
in range(0,4):
1036 for c
in range(0,3):
1038 str +=
"#undef "+
acc_re(s,c)+
"\n" 1039 str +=
"#undef "+
acc_im(s,c)+
"\n" 1044 for s
in range(0,4):
1045 for c
in range(0,3):
1047 if 2*i < sharedFloats:
1048 str +=
"#undef "+
out_re(s,c)+
"\n" 1049 if 2*i+1 < sharedFloats:
1050 str +=
"#undef "+
out_im(s,c)+
"\n" 1053 str +=
"#undef VOLATILE\n" 1064 print "Generating dslash kernel for sm" + str(arch/10)
1080 sharedDslash =
False 1084 sharedDslash =
False 1087 print "Shared floats set to " + str(sharedFloats)
1092 filename =
'dslash_core/tmc_dslash_' + name +
'_core.h' 1093 print sys.argv[0] +
": generating " + filename;
1094 f = open(filename,
'w')
1099 filename =
'dslash_core/tmc_dslash_dagger_' + name +
'_core.h' 1100 print sys.argv[0] +
": generating " + filename +
"\n";
1101 f = open(filename,
'w')
1114 sharedDslash =
False def input_spinor(s, c, z)
def indent(code)
code generation ######################################################################## ...
def cinv_re(b, sm, cm, sn, cn)
def gen(dir, pack_only=False)
def clover_twisted_xpay()
def generate_dslash_kernels(arch)
def spinor(name, s, c, z)
def complexify(a)
complex numbers ######################################################################## ...
def def_clover(pack_only=False)
def c_im(b, sm, cm, sn, cn)
def c_re(b, sm, cm, sn, cn)
def cinv_im(b, sm, cm, sn, cn)