7 return [complex(x)
for x
in a]
11 if a ==
int(a):
return `
int(a)`
15 if a == 0:
return "0i" 16 elif a == -1:
return "-i" 17 elif a == 1:
return "i" 18 else:
return fltToString(a)+
"i" 22 if re == 0
and im == 0:
return "0" 23 elif re == 0:
return imToString(im)
24 elif im == 0:
return fltToString(re)
26 im_str =
"-"+imToString(-im)
if im < 0
else "+"+imToString(im)
27 return fltToString(re)+im_str
74 two_P_L = [ id[x] - igamma5[x]/1j
for x
in range(0,4*4) ]
75 two_P_R = [ id[x] + igamma5[x]/1j
for x
in range(0,4*4) ]
85 return [x+y
for (x,y)
in zip(g1,g2)]
88 return [x-y
for (x,y)
in zip(g1,g2)]
108 def indentline(line):
return (n*
" "+line
if ( line
and line.count(
"#", 0, 1) == 0)
else line)
109 return ''.join([indentline(line)+
"\n" for line
in code.splitlines()])
112 return "{\n"+
indent(code)+
"}" 116 elif x==-1:
return "-" 117 elif x==+2:
return "+2*" 118 elif x==-2:
return "-2*" 121 return `(n/4)` +
"." + [
"x",
"y",
"z",
"w"][n%4]
124 return `(n/2)` +
"." + [
"x",
"y"][n%2]
127 def in_re(s, c):
return "i"+`s`+`c`+
"_re" 128 def in_im(s, c):
return "i"+`s`+`c`+
"_im" 129 def g_re(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_re" 130 def g_im(d, m, n):
return (
"g" if (d%2==0)
else "gT")+`m`+`n`+
"_im" 131 def out_re(s, c):
return "o"+`s`+`c`+
"_re" 132 def out_im(s, c):
return "o"+`s`+`c`+
"_im" 133 def h1_re(h, c):
return [
"a",
"b"][h]+`c`+
"_re" 134 def h1_im(h, c):
return [
"a",
"b"][h]+`c`+
"_im" 135 def h2_re(h, c):
return [
"A",
"B"][h]+`c`+
"_re" 136 def h2_im(h, c):
return [
"A",
"B"][h]+`c`+
"_im" 137 def c_re(b, sm, cm, sn, cn):
return "c"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_re" 138 def c_im(b, sm, cm, sn, cn):
return "c"+`(sm+2*b)`+`cm`+
"_"+`(sn+2*b)`+`cn`+
"_im" 139 def a_re(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_re" 140 def a_im(b, s, c):
return "a"+`(s+2*b)`+`c`+
"_im" 142 def tmp_re(s, c):
return "tmp"+`s`+`c`+
"_re" 143 def tmp_im(s, c):
return "tmp"+`s`+`c`+
"_im" 148 str +=
"// input spinor\n" 149 str +=
"#ifdef SPINOR_DOUBLE\n" 150 str +=
"#define spinorFloat double\n" 157 str +=
"#define spinorFloat float\n" 163 str +=
"#endif // SPINOR_DOUBLE\n\n" 169 str =
"// gauge link\n" 170 str +=
"#ifdef GAUGE_FLOAT2\n" 186 str +=
"#endif // GAUGE_DOUBLE\n\n" 188 str +=
"// conjugated gauge link\n" 192 str +=
"#define "+
g_re(1,m,n)+
" (+"+
g_re(0,n,m)+
")\n" 193 str +=
"#define "+
g_im(1,m,n)+
" (-"+
g_im(0,n,m)+
")\n" 201 str =
"// first chiral block of inverted clover term\n" 202 str +=
"#ifdef CLOVER_DOUBLE\n" 212 for m
in range(n+1,6):
216 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" C"+
nthFloat2(i+1)+
"\n" 228 for m
in range(n+1,6):
232 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" C"+
nthFloat4(i+1)+
"\n" 234 str +=
"#endif // CLOVER_DOUBLE\n\n" 242 str +=
"#define "+
c_re(0,sm,cm,sn,cn)+
" (+"+
c_re(0,sn,cn,sm,cm)+
")\n" 243 str +=
"#define "+
c_im(0,sm,cm,sn,cn)+
" (-"+
c_im(0,sn,cn,sm,cm)+
")\n" 246 str +=
"// second chiral block of inverted clover term (reuses C0,...,C9)\n" 253 str +=
"#define "+
c_re(1,sm,cm,sn,cn)+
" "+
c_re(0,sm,cm,sn,cn)+
"\n" 254 if m != n: str +=
"#define "+
c_im(1,sm,cm,sn,cn)+
" "+
c_im(0,sm,cm,sn,cn)+
"\n" 261 str =
"// output spinor\n" 265 if 2*i < sharedFloats:
266 str +=
"#define "+
out_re(s,c)+
" s["+`(2*i+0)`+
"*SHARED_STRIDE]\n" 268 str +=
"VOLATILE spinorFloat "+
out_re(s,c)+
";\n" 269 if 2*i+1 < sharedFloats:
270 str +=
"#define "+
out_im(s,c)+
" s["+`(2*i+1)`+
"*SHARED_STRIDE]\n" 272 str +=
"VOLATILE spinorFloat "+
out_im(s,c)+
";\n" 279 prolog_str= (
"// *** CUDA DSLASH ***\n\n" if not dagger
else "// *** CUDA DSLASH DAGGER ***\n\n")
280 prolog_str+=
"#define DSLASH_SHARED_FLOATS_PER_THREAD "+str(sharedFloats)+
"\n\n" 282 prolog_str= (
"// *** CUDA CLOVER ***\n\n")
283 prolog_str+=
"#define CLOVER_SHARED_FLOATS_PER_THREAD "+str(sharedFloats)+
"\n\n" 285 print "Undefined prolog" 288 if domain_wall: prolog_str +=
"// NB! Don't trust any MULTI_GPU code\n" 292 #if (CUDA_VERSION >= 4010) 295 #define VOLATILE volatile 300 if dslash ==
True: prolog_str+=
def_gauge()
307 #if (__COMPUTE_CAPABILITY__ >= 200) 308 #define SHARED_STRIDE 16 // to avoid bank conflicts on Fermi 310 #define SHARED_STRIDE 8 // to avoid bank conflicts on G80 and GT200 313 #if (__COMPUTE_CAPABILITY__ >= 200) 314 #define SHARED_STRIDE 32 // to avoid bank conflicts on Fermi 316 #define SHARED_STRIDE 16 // to avoid bank conflicts on G80 and GT200 324 extern __shared__ char s_data[]; 330 VOLATILE spinorFloat *s = (spinorFloat*)s_data + DSLASH_SHARED_FLOATS_PER_THREAD*SHARED_STRIDE*(threadIdx.x/SHARED_STRIDE) 331 + (threadIdx.x % SHARED_STRIDE); 336 VOLATILE spinorFloat *s = (spinorFloat*)s_data + CLOVER_SHARED_FLOATS_PER_THREAD*SHARED_STRIDE*(threadIdx.x/SHARED_STRIDE) 337 + (threadIdx.x % SHARED_STRIDE); 342 prolog_str +=
"\n#include \"read_gauge.h\"\n" 344 prolog_str +=
"#include \"read_clover.h\"\n" 345 prolog_str +=
"#include \"io_spinor.h\"\n" 349 int sid = ((blockIdx.y*blockDim.y + threadIdx.y)*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x; 350 if (sid >= param.threads*param.dc.Ls) return; 358 if (kernel_type == INTERIOR_KERNEL) { 365 coordsFromIndex<5,QUDA_5D_PC,EVEN_X>(X, coord, sid, param); 366 s_parity = ( sid/param.dc.volume_4d_cb ) % 2; 373 int aux1 = X / param.dc.X[0]1; 374 x1 = X - aux1 * param.dc.X[0]; 375 int aux2 = aux1 / param.dc.X[1]; 376 x2 = aux1 - aux2 * param.dc.X[1]; 377 x4 = aux2 / param.dc.X[2]; 378 x3 = aux2 - x4 * param.dc.X[2]; 379 aux1 = (param.parity + x4 + x3 + x2) & 1; 394 } else { // exterior kernel 396 const int face_volume = (param.threads*param.dc.Ls >> 1); // volume of one face 397 const int face_num = (sid >= face_volume); // is this thread updating face 0 or 1 398 face_idx = sid - face_num*face_volume; // index into the respective face 400 // ghostOffset is scaled to include body (includes stride) and number of FloatN arrays (SPINOR_HOP) 401 // face_idx not sid since faces are spin projected and share the same volume index (modulo UP/DOWN reading) 402 //sp_idx = face_idx + param.ghostOffset[dim]; 404 coordsFromFaceIndex<5,QUDA_5D_PC,kernel_type,1>(X, sid, coord, face_idx, face_num, param); 405 s_parity = ( sid/param.dc.volume_4d_cb ) % 2; 407 READ_INTERMEDIATE_SPINOR(INTERTEX, param.sp_stride, sid, sid); 417 prolog_str+=
"#endif // MULTI_GPU\n" 422 // declare G## here and use ASSN below instead of READ 424 #if (DD_PREC==0) //temporal hack 460 #include "io_spinor.h" 462 int sid = blockIdx.x*blockDim.x + threadIdx.x; 463 if (sid >= param.threads) return; 465 // read spinor from device memory 466 READ_SPINOR(SPINORTEX, param.sp_stride, sid, sid); 472 #include "read_clover.h" 473 #include "io_spinor.h" 475 int sid = blockIdx.x*blockDim.x + threadIdx.x; 476 if (sid >= param.threads) return; 478 // read spinor from device memory 479 READ_SPINOR(SPINORTEX, param.sp_stride, sid, sid); 486 def gen(dir, pack_only=False):
487 projIdx = dir
if not dagger
else dir + ( +1
if dir%2 == 0
else -1 )
490 return projectors[projIdx][4*i+j]
497 return (1, proj(i,1))
499 return (0, proj(i,0))
501 boundary = [
"coord[0]==(param.dc.X[0]-1)",
"coord[0]==0",
"coord[1]==(param.dc.X[1]-1)",
"coord[1]==0",
"coord[2]==(param.dc.X[2]-1)",
"coord[2]==0",
"coord[3]==(param.dc.X[3]-1)",
"coord[3]==0"]
502 interior = [
"coord[0]<(param.dc.X[0]-1)",
"coord[0]>0",
"coord[1]<(param.dc.X[1]-1)",
"coord[1]>0",
"coord[2]<(param.dc.X[2]-1)",
"coord[2]>0",
"coord[3]<(param.dc.X[3]-1)",
"coord[3]>0"]
503 dim = [
"X",
"Y",
"Z",
"T"]
506 sp_idx = [
"X+1",
"X-1",
"X+param.dc.X[0]",
"X-param.dc.X[0]",
"X+param.dc.X2X1",
"X-param.dc.X2X1",
"X+param.dc.X3X2X1",
"X-param.dc.X3X2X1"]
509 sp_idx_wrap = [
"X-(param.dc.X[0]-1)",
"X+(param.dc.X[0]-1)",
"X-param.dc.X2X1mX1",
"X+param.dc.X2X1mX1",
"X-param.dc.X3X2X1mX2X1",
"X+param.dc.X3X2X1mX2X1",
510 "X-param.dc.X4X3X2X1mX3X2X1",
"X+param.dc.X4X3X2X1mX3X2X1"]
513 cond +=
"#ifdef MULTI_GPU\n" 514 cond +=
"if ( (kernel_type == INTERIOR_KERNEL && (!param.ghostDim["+`dir/2`+
"] || "+interior[dir]+
")) ||\n" 515 cond +=
" (kernel_type == EXTERIOR_KERNEL_"+dim[dir/2]+
" && "+boundary[dir]+
") )\n" 520 projName =
"P"+`dir/2`+[
"-",
"+"][projIdx%2]
521 str +=
"// Projector "+projName+
"\n" 522 for l
in projStr.splitlines():
526 str +=
"#ifdef MULTI_GPU\n" 527 str +=
"const int sp_idx = (kernel_type == INTERIOR_KERNEL) ? ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1 :\n" 528 str +=
" face_idx + param.ghostOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 529 str +=
"#if (DD_PREC==2) // half precision\n" 530 str +=
"const int sp_norm_idx = face_idx + param.ghostNormOffset[static_cast<int>(kernel_type)][" + `(dir+1)%2` +
"];\n" 533 str +=
"const int sp_idx = ("+boundary[dir]+
" ? "+sp_idx_wrap[dir]+
" : "+sp_idx[dir]+
") >> 1;\n" 538 if domain_wall: str +=
"const int ga_idx = sid % param.dc.volume_4d_cb;\n" 539 else: str +=
"const int ga_idx = sid;\n" 541 str +=
"#ifdef MULTI_GPU\n" 542 if domain_wall: str +=
"const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx % param.dc.volume_4d_cb : param.dc.volume_4d_cb+(face_idx % param.dc.ghostFace[static_cast<int>(kernel_type)]));\n" 543 else: str +=
"const int ga_idx = ((kernel_type == INTERIOR_KERNEL) ? sp_idx : param.dc.volume_4d_cb+face_idx);\n" 545 if domain_wall: str +=
"const int ga_idx = sp_idx % param.dc.volume_4d_cb;\n" 546 else: str +=
"const int ga_idx = sp_idx;\n" 551 row_cnt = ([0,0,0,0])
556 if re != 0
or im != 0:
558 row_cnt[0] += row_cnt[1]
559 row_cnt[2] += row_cnt[3]
562 for h
in range(0, 2):
563 for c
in range(0, 3):
564 decl_half +=
"spinorFloat "+
h1_re(h,c)+
", "+
h1_im(h,c)+
";\n";
567 load_spinor =
"// read spinor from device memory\n" 569 load_spinor +=
"READ_SPINOR_DOWN(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 570 elif row_cnt[2] == 0:
571 load_spinor +=
"READ_SPINOR_UP(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 573 load_spinor +=
"READ_SPINOR(SPINORTEX, param.sp_stride, sp_idx, sp_idx);\n" 578 load_half +=
"const int sp_stride_pad = param.dc.Ls*param.dc.ghostFace[static_cast<int>(kernel_type)];\n" 580 load_half +=
"const int sp_stride_pad = param.dc.ghostFace[static_cast<int>(kernel_type)];\n" 585 if dir >= 6: load_half +=
"const int t_proj_scale = TPROJSCALE;\n" 587 load_half +=
"// read half spinor from device memory\n" 591 load_half +=
"READ_SPINOR_GHOST(GHOSTSPINORTEX, sp_stride_pad, sp_idx, sp_norm_idx, "+`dir`+
");\n\n" 594 load_gauge =
"// read gauge matrix from device memory\n" 596 load_gauge +=
"if ( ! s_parity ) { ASSN_GAUGE_MATRIX(G, GAUGE"+`( dir%2)`+
"TEX, "+`dir`+
", ga_idx, param.gauge_stride); }\n" 597 load_gauge +=
"else { ASSN_GAUGE_MATRIX(G, GAUGE"+`(1-dir%2)`+
"TEX, "+`dir`+
", ga_idx, param.gauge_stride); }\n\n" 599 load_gauge +=
"READ_GAUGE_MATRIX(G, GAUGE"+`dir%2`+
"TEX, "+`dir`+
", ga_idx, param.gauge_stride);\n\n" 601 reconstruct_gauge =
"// reconstruct gauge matrix\n" 602 reconstruct_gauge +=
"RECONSTRUCT_GAUGE_MATRIX("+`dir`+
");\n\n" 604 project =
"// project spinor into half spinors\n" 605 for h
in range(0, 2):
606 for c
in range(0, 3):
609 for s
in range(0, 4):
612 if re==0
and im==0: ()
620 for s
in range(0, 4):
621 re = proj(h+2,s).real
622 im = proj(h+2,s).imag
623 if re==0
and im==0: ()
631 project +=
h1_re(h,c)+
" = "+strRe+
";\n" 632 project +=
h1_im(h,c)+
" = "+strIm+
";\n" 635 for h
in range(0, 2):
636 for c
in range(0, 3):
637 copy_half +=
h1_re(h,c)+
" = "+(
"t_proj_scale*" if (dir >= 6)
else "")+
in_re(h,c)+
"; " 638 copy_half +=
h1_im(h,c)+
" = "+(
"t_proj_scale*" if (dir >= 6)
else "")+
in_im(h,c)+
";\n" 642 prep_half +=
"#ifdef MULTI_GPU\n" 643 prep_half +=
"if (kernel_type == INTERIOR_KERNEL) {\n" 644 prep_half +=
"#endif\n" 646 prep_half +=
indent(load_spinor)
647 prep_half +=
indent(project)
649 prep_half +=
"#ifdef MULTI_GPU\n" 650 prep_half +=
"} else {\n" 652 prep_half +=
indent(load_half)
653 prep_half +=
indent(copy_half)
655 prep_half +=
"#endif // MULTI_GPU\n" 658 ident =
"// identity gauge matrix\n" 661 ident +=
"spinorFloat "+
h2_re(h,m)+
" = " +
h1_re(h,m) +
"; " 662 ident +=
"spinorFloat "+
h2_im(h,m)+
" = " +
h1_im(h,m) +
";\n" 667 mult +=
"// multiply row "+`m`+
"\n" 669 re =
"spinorFloat "+
h2_re(h,m)+
" = 0;\n" 670 im =
"spinorFloat "+
h2_im(h,m)+
" = 0;\n" 672 re +=
h2_re(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 673 re +=
h2_re(h,m) +
" -= " +
g_im(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 674 im +=
h2_im(h,m) +
" += " +
g_re(dir,m,c) +
" * "+
h1_im(h,c)+
";\n" 675 im +=
h2_im(h,m) +
" += " +
g_im(dir,m,c) +
" * "+
h1_re(h,c)+
";\n" 686 reconstruct +=
out_re(h_out, m) +
" += " +
h2_re(h,m) +
";\n" 687 reconstruct +=
out_im(h_out, m) +
" += " +
h2_im(h,m) +
";\n" 693 if im == 0
and re == 0:
696 reconstruct +=
out_re(s, m) +
" " +
sign(re) +
"= " +
h2_re(h,m) +
";\n" 697 reconstruct +=
out_im(s, m) +
" " +
sign(re) +
"= " +
h2_im(h,m) +
";\n" 699 reconstruct +=
out_re(s, m) +
" " +
sign(-im) +
"= " +
h2_im(h,m) +
";\n" 700 reconstruct +=
out_im(s, m) +
" " +
sign(+im) +
"= " +
h2_re(h,m) +
";\n" 702 if ( m < 2 ): reconstruct +=
"\n" 705 str +=
"if (param.gauge_fixed && ga_idx < param.dc.X4X3X2X1hmX3X2X1h)\n" 706 str +=
block(decl_half + prep_half + ident + reconstruct)
708 str +=
block(load_gauge + decl_half + prep_half + reconstruct_gauge + mult + reconstruct)
710 str += load_gauge + decl_half + prep_half + reconstruct_gauge + mult + reconstruct
713 out = load_spinor + decl_half + project
714 out = out.replace(
"sp_idx",
"idx")
717 return cond +
block(str)+
"\n\n" 722 if dagger: lsign=
'-'; ledge =
'0'; rsign=
'+'; redge=
'param.dc.Ls-1' 723 else: lsign=
'+'; ledge =
'param.dc.Ls-1'; rsign=
'-'; redge=
'0' 726 str +=
"// 5th dimension -- NB: not partitionable!\n" 727 str +=
"#ifdef MULTI_GPU\nif(kernel_type == INTERIOR_KERNEL)\n#endif\n{\n" 728 str +=
"// 2 P_L = 2 P_- = ( ( +1, -1 ), ( -1, +1 ) )\n" 730 str +=
" int sp_idx = ( coord[4] == %s ? X%s(param.dc.Ls-1)*2*param.dc.volume_4d_cb : X%s2*param.dc.volume_4d_cb ) / 2;\n" % (ledge, rsign, lsign)
732 str +=
"// read spinor from device memory\n" 733 str +=
" READ_SPINOR( SPINORTEX, param.sp_stride, sp_idx, sp_idx );\n" 735 str +=
" if ( coord[4] != %s )\n" % ledge
739 return two_P_L[4*i+j]
743 for s1
in range(0,4):
746 re_rhs, im_rhs =
"",
"" 747 for s2
in range(0,4):
748 re, im = proj(s1,s2).real, proj(s1,s2).imag
755 out_L += 3*
" " +
out_re(s1,c) +
" += " + re_rhs +
";" 756 out_L += 3*
" " +
out_im(s1,c) +
" += " + im_rhs +
";\n" 757 if s1 < 3 : out_L +=
"\n" 767 str += out_L.replace(
" += ",
" += -param.mferm*(").replace(
";",
");")
769 str +=
" } // end if ( coord[4]!= %s )\n" % ledge
770 str +=
" } // end P_L\n\n" 771 str +=
" // 2 P_R = 2 P_+ = ( ( +1, +1 ), ( +1, +1 ) )\n" 773 str +=
" int sp_idx = ( coord[4] == %s ? X%s(param.dc.Ls-1)*2*param.dc.volume_4d_cb : X%s2*param.dc.volume_4d_cb ) / 2;\n" % (redge, lsign, rsign)
775 str +=
"// read spinor from device memory\n" 776 str +=
" READ_SPINOR( SPINORTEX, param.sp_stride, sp_idx, sp_idx );\n" 778 str +=
" if ( coord[4] != %s )\n" % redge
782 str += out_L.replace(
"-",
"+")
789 str += out_L.replace(
"-",
"+").replace(
" += ",
" += -param.mferm*(").replace(
";",
");")
791 str +=
" } // end if ( coord[4] != %s )\n" % redge
792 str +=
" } // end P_R\n" 793 str +=
"} // end 5th dimension\n\n\n" 801 if z==0:
return out_re(s,c)
804 if z==0:
return in_re(s,c)
805 else:
return in_im(s,c)
819 for s
in range (0,4):
823 return block(str)+
"\n\n" 831 str +=
"spinorFloat "+
a_re(0,1,c)+
" = -"+
out_re(0,c)+
" - "+
out_re(2,c)+
";\n" 832 str +=
"spinorFloat "+
a_im(0,1,c)+
" = -"+
out_im(0,c)+
" - "+
out_im(2,c)+
";\n" 835 str +=
"spinorFloat "+
a_re(0,3,c)+
" = -"+
out_re(0,c)+
" + "+
out_re(2,c)+
";\n" 836 str +=
"spinorFloat "+
a_im(0,3,c)+
" = -"+
out_im(0,c)+
" + "+
out_im(2,c)+
";\n" 839 for s
in range (0,4):
843 return block(str)+
"\n\n" 848 str =
"READ_CLOVER(CLOVERTEX, "+`chi`+
")\n\n" 850 for s
in range (0,2):
851 for c
in range (0,3):
852 str +=
"spinorFloat "+
a_re(chi,s,c)+
" = 0; spinorFloat "+
a_im(chi,s,c)+
" = 0;\n" 855 for sm
in range (0,2):
856 for cm
in range (0,3):
857 for sn
in range (0,2):
858 for cn
in range (0,3):
859 str +=
a_re(chi,sm,cm)+
" += "+
c_re(chi,sm,cm,sn,cn)+
" * "+
out_re(2*chi+sn,cn)+
";\n" 860 if (sn != sm)
or (cn != cm):
861 str +=
a_re(chi,sm,cm)+
" -= "+
c_im(chi,sm,cm,sn,cn)+
" * "+
out_im(2*chi+sn,cn)+
";\n" 863 str +=
a_im(chi,sm,cm)+
" += "+
c_re(chi,sm,cm,sn,cn)+
" * "+
out_im(2*chi+sn,cn)+
";\n" 864 if (sn != sm)
or (cn != cm):
865 str +=
a_im(chi,sm,cm)+
" += "+
c_im(chi,sm,cm,sn,cn)+
" * "+
out_re(2*chi+sn,cn)+
";\n" 869 for s
in range (0,2):
870 for c
in range (0,3):
871 str +=
out_re(2*chi+s,c)+
" = "+
a_re(chi,s,c)+
"; " 872 str +=
out_im(2*chi+s,c)+
" = "+
a_im(chi,s,c)+
";\n" 875 return block(str)+
"\n\n" 880 if domain_wall:
return "" 882 if dslash: str +=
"#ifdef DSLASH_CLOVER\n\n" 883 str +=
"// change to chiral basis\n" 885 str +=
"// apply first chiral block\n" 887 str +=
"// apply second chiral block\n" 889 str +=
"// change back from chiral basis\n" 890 str +=
"// (note: required factor of 1/2 is included in clover term normalization)\n" 892 if dslash: str +=
"#endif // DSLASH_CLOVER\n\n" 900 str +=
"#if defined MULTI_GPU && defined DSLASH_XPAY\n" 901 str +=
"if (kernel_type == INTERIOR_KERNEL)\n" 904 str +=
"#ifdef DSLASH_XPAY\n" 905 str +=
" READ_ACCUM(ACCUMTEX, param.sp_stride)\n" 906 str +=
"#ifdef SPINOR_DOUBLE\n" 907 str +=
"spinorFloat a_inv = param.a_inv;\n" 909 str +=
"spinorFloat a_inv = param.a_inv_f;\n" 911 str +=
"#ifdef SPINOR_DOUBLE\n" 927 str +=
"#endif // SPINOR_DOUBLE\n\n" 928 str +=
"#endif // DSLASH_XPAY\n" 937 str +=
"#ifdef DSLASH_XPAY\n" 939 str +=
"#ifdef SPINOR_DOUBLE\n" 940 str +=
"spinorFloat a = param.a;\n" 942 str +=
"spinorFloat a = param.a_f;\n" 945 str +=
"#ifdef SPINOR_DOUBLE\n" 961 str +=
"#endif // SPINOR_DOUBLE\n\n" 962 str +=
"#endif // DSLASH_XPAY\n" 972 str +=
"#ifdef MULTI_GPU\n" 976 str +=
"#if defined MULTI_GPU && defined DSLASH_XPAY\n" 978 str +=
"#if defined MULTI_GPU && (defined DSLASH_XPAY || defined DSLASH_CLOVER)\n" 981 int incomplete = 0; // Have all 8 contributions been computed for this site? 983 switch(kernel_type) { // intentional fall-through 984 case INTERIOR_KERNEL: 985 incomplete = incomplete || (param.commDim[3] && (coord[3]==0 || coord[3]==(param.dc.X[3]-1))); 986 case EXTERIOR_KERNEL_T: 987 incomplete = incomplete || (param.commDim[2] && (coord[2]==0 || coord[2]==(param.dc.X[2]-1))); 988 case EXTERIOR_KERNEL_Z: 989 incomplete = incomplete || (param.commDim[1] && (coord[1]==0 || coord[1]==(param.dc.X[1]-1))); 990 case EXTERIOR_KERNEL_Y: 991 incomplete = incomplete || (param.commDim[0] && (coord[0]==0 || coord[0]==(param.dc.X[0]-1))); 995 str +=
"if (!incomplete)\n" 996 str +=
"#endif // MULTI_GPU\n" 1001 str +=
"// write spinor field back to device memory\n" 1002 str +=
"WRITE_SPINOR(param.sp_stride);\n\n" 1004 str +=
"// undefine to prevent warning when precision is changed\n" 1005 str +=
"#undef spinorFloat\n" 1006 str +=
"#undef SHARED_STRIDE\n\n" 1009 for m
in range(0,3):
1010 for n
in range(0,3):
1012 str +=
"#undef "+
g_re(0,m,n)+
"\n" 1013 str +=
"#undef "+
g_im(0,m,n)+
"\n" 1016 for s
in range(0,4):
1017 for c
in range(0,3):
1019 str +=
"#undef "+
in_re(s,c)+
"\n" 1020 str +=
"#undef "+
in_im(s,c)+
"\n" 1024 for m
in range(0,6):
1027 str +=
"#undef "+
c_re(0,s,c,s,c)+
"\n" 1028 for n
in range(0,6):
1031 for m
in range(n+1,6):
1034 str +=
"#undef "+
c_re(0,sm,cm,sn,cn)+
"\n" 1035 str +=
"#undef "+
c_im(0,sm,cm,sn,cn)+
"\n" 1038 for s
in range(0,4):
1039 for c
in range(0,3):
1041 if 2*i < sharedFloats:
1042 str +=
"#undef "+
out_re(s,c)+
"\n" 1043 if 2*i+1 < sharedFloats:
1044 str +=
"#undef "+
out_im(s,c)+
"\n" 1047 str +=
"#undef VOLATILE\n" 1055 str +=
"switch(dim) {\n" 1056 for dim
in range(0,4):
1057 str +=
"case "+`dim`+
":\n" 1058 proj =
gen(2*dim+facenum, pack_only=
True)
1060 proj +=
"// write half spinor back to device memory\n" 1061 proj +=
"WRITE_HALF_SPINOR(face_volume, face_idx);\n" 1068 assert (sharedFloats == 0)
1071 str +=
"#include \"io_spinor.h\"\n\n" 1073 str +=
"if (face_num) " 1079 str +=
"// undefine to prevent warning when precision is changed\n" 1080 str +=
"#undef spinorFloat\n" 1081 str +=
"#undef SHARED_STRIDE\n\n" 1083 for s
in range(0,4):
1084 for c
in range(0,3):
1086 str +=
"#undef "+
in_re(s,c)+
"\n" 1087 str +=
"#undef "+
in_im(s,c)+
"\n" 1096 for i
in range(0,8) :
1110 cloverSharedFloats = 0
1111 if(
len(sys.argv) > 1):
1112 if (sys.argv[1] ==
'--shared'):
1113 sharedFloats =
int(sys.argv[2])
1114 print "Shared floats set to " + str(sharedFloats);
1121 print sys.argv[0] +
": generating dw_dslash_core.h";
1124 f = open(
'dslash_core/dw_dslash_core.h',
'w')
1128 print sys.argv[0] +
": generating dw_dslash_dagger_core.h";
1131 f = open(
'dslash_core/dw_dslash_dagger_core.h',
'w')
def input_spinor(s, c, z)
def complexify(a)
complex numbers ######################################################################## ...
def gen(dir, pack_only=False)
def indent(code, n=1)
code generation ######################################################################## ...
def c_im(b, sm, cm, sn, cn)
def c_re(b, sm, cm, sn, cn)