1 #ifndef CNTR_DISTRIBUTED_TIMESTEP_ARRAY_IMPL_H 2 #define CNTR_DISTRIBUTED_TIMESTEP_ARRAY_IMPL_H 18 ntasks_=data_.ntasks();
19 G_=std::vector<cntr::herm_matrix_timestep_view<T> >(n_);
42 for(
int j=0;j<n_;j++) G_[j].set_to_data(data_.block(j),tstp_,ntau_,size_,sig_);
45 if(
this==&a)
return *
this;
57 for(
int j=0;j<n_;j++) G_[j].set_to_data(data_.block(j),tstp_,ntau_,size_,sig_);
88 assert(-1<=nt && 0<=ntau && sig*sig==1 && 1<=size);
89 int size_tstp=(ntau+1+2*(nt+1))*size*size;
94 ntasks_=data_.ntasks();
101 G_=std::vector<cntr::herm_matrix_timestep_view<T> >(n_);
121 assert(-1<=tstp && -1<=nt_);
122 int blocksize=(ntau_+1+2*(tstp+1))*size_*size_;
124 data_.reset_blocksize(blocksize);
125 for(
int j=0;j<n_;j++) G_[j].set_to_data(data_.block(j),tstp_,ntau_,size_,sig_);
161 assert(0<=j && 0<=n_-1);
181 data_.mpi_bcast_block(j);
198 data_.mpi_bcast_all();
Class herm_matrix_timestep_view serves for interfacing with class herm_matrix_timestep without copyi...
void clear(void)
Clear the data
cntr::herm_matrix_timestep_view< T > & G(int j)
Get the pointer to the herm_matrix_timestep for the j-th block
distributed_array< std::complex< T > > data(void) const
distributed_timestep_array< T > & operator=(const distributed_timestep_array &a)
void mpi_bcast_block(int j)
MPI broadcast equivalent for the j-th block
~distributed_timestep_array()
Specialization of the distributed_array in which data-blocks are associated with the herm_matrix_tim...
Auxiliary data structure for handling set of data blocks and includes usual MPI processes on them...
distributed_timestep_array()
void mpi_bcast_all(void)
MPI allgather equivalent
void reset_tstp(int tstp)
Reset the data to new timestep of herm_matrix_timestep
std::vector< cntr::herm_matrix_timestep_view< T > > G(void) const