13 errorQuda(
"CPU fields do not support half precision");
16 errorQuda(
"CPU fields do not support quarter precision");
19 errorQuda(
"CPU fields do not support non-zero padding");
25 errorQuda(
"10-reconstruction only supported with MILC gauge order");
47 for (
int d=0; d<siteDim; d++) {
64 errorQuda(
"MILC site gauge order only supported for reference fields");
83 for (
int i=0; i<
nDim; i++) {
113 for (
int d=0; d<siteDim; d++) {
127 for (
int i=0; i<
nDim; i++) {
141 errorQuda(
"Cannot request exchange of forward links on non-coarse geometry");
144 for (
int d=0; d<
nDim; d++) {
174 errorQuda(
"link_direction = %d not supported", link_direction);
194 for (
int d=0; d<
nDim; d++) {
201 for (
int d=0; d<
nDim; d++) {
233 memcpy(static_cast<char*>(recv[d])+bytes[d], send[d], bytes[d]);
234 memcpy(recv[d], static_cast<char*>(send[d])+bytes[d], bytes[d]);
241 for (
int d=0; d<
nDim; d++) {
262 if (
this == &src)
return;
269 errorQuda(
"fat_link_max has not been computed");
282 src.
Bytes(), cudaMemcpyDeviceToHost);
304 qudaMemcpy(((
void**)
gauge)[d], ((
void**)buffer)[d], bytes/geometry, cudaMemcpyDeviceToHost);
312 qudaMemcpy(
Ghost()[d], ghost_buffer[d], ghost_bytes[d], cudaMemcpyDeviceToHost);
321 const_cast<void*>(static_cast<const cpuGaugeField&>(src).
Gauge_p()));
338 errorQuda(
"Setting gauge pointer is only allowed when create=" 339 "QUDA_REFERENCE_FIELD_CREATE type\n");
348 char **buffer =
new char*[
geometry];
350 buffer[d] =
new char[bytes/
geometry];
351 memcpy(buffer[d],
gauge[d], bytes/geometry);
353 backup_h =
reinterpret_cast<char*
>(buffer);
367 char **buffer =
reinterpret_cast<char**
>(
backup_h);
369 memcpy(
gauge[d], buffer[d], bytes/geometry);
#define qudaMemcpy(dst, src, count, kind)
QudaFieldLocation reorder_location()
Return whether data is reordered on the CPU or GPU. This can set at QUDA initialization using the env...
void extractGaugeGhost(const GaugeField &u, void **ghost, bool extract=true, int offset=0)
void setGauge(void **_gauge)
#define pool_pinned_free(ptr)
void copyGenericGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out=0, void *In=0, void **ghostOut=0, void **ghostIn=0, int type=0)
void exchangeExtendedGhost(const int *R, bool no_comms_fill=false)
This does routine will populate the border / halo region of a gauge field that has been created using...
void free_gauge_buffer(void *buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry)
enum QudaLinkDirection_s QudaLinkDirection
void free_ghost_buffer(void **buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry)
QudaReconstructType reconstruct
double abs_max(int dim=-1) const
Compute the absolute maximum of the field (Linfinity norm)
QudaFieldGeometry geometry
void checkField(const LatticeField &) const
#define comm_declare_send_relative(buffer, dim, dir, nbytes)
#define comm_declare_receive_relative(buffer, dim, dir, nbytes)
void restore() const
Restores the cpuGaugeField.
MsgHandle * mh_recv_back[2][QUDA_MAX_DIM]
void copy(const GaugeField &src)
void extractExtendedGaugeGhost(const GaugeField &u, int dim, const int *R, void **ghost, bool extract)
void comm_start(MsgHandle *mh)
QudaGhostExchange ghostExchange
void exchange(void **recv, void **send, QudaDirection dir) const
Exchange the buffers across all dimensions in a given direction.
void * create_gauge_buffer(size_t bytes, QudaGaugeFieldOrder order, QudaFieldGeometry geometry)
const void ** Ghost() const
enum QudaGaugeFieldOrder_s QudaGaugeFieldOrder
void comm_free(MsgHandle *&mh)
void injectGhost(QudaLinkDirection link_direction=QUDA_LINK_BACKWARDS)
The opposite of exchangeGhost: take the ghost zone on x, send to node x-1, and inject back into the f...
#define safe_malloc(size)
MsgHandle * mh_send_fwd[2][QUDA_MAX_DIM]
void * memset(void *s, int c, size_t n)
cpuGaugeField(const GaugeFieldParam ¶m)
Constructor for cpuGaugeField from a GaugeFieldParam.
int surface[QUDA_MAX_DIM]
#define pool_pinned_malloc(size)
void backup() const
Backs up the cpuGaugeField.
void exchangeGhost(QudaLinkDirection link_direction=QUDA_LINK_BACKWARDS)
Exchange the ghost and store store in the padded region.
void ** create_ghost_buffer(size_t bytes[], QudaGaugeFieldOrder order, QudaFieldGeometry geometry)
const double & LinkMax() const
enum QudaFieldGeometry_s QudaFieldGeometry
#define QUDA_MAX_DIM
Maximum number of dimensions supported by QUDA. In practice, no routines make use of more than 5...
bool compute_fat_link_max
MsgHandle * mh_recv_fwd[2][QUDA_MAX_DIM]
void comm_wait(MsgHandle *mh)
QudaGaugeFieldOrder order
QudaGhostExchange GhostExchange() const
void * ghost[2 *QUDA_MAX_DIM]
MsgHandle * mh_send_back[2][QUDA_MAX_DIM]
void copyExtendedGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out=0, void *In=0)
int comm_dim_partitioned(int dim)