NESSi  v1.0.2
The NonEquilibrium Systems Simulation Library
cntr_mpitools_impl.hpp
Go to the documentation of this file.
1 #ifndef CNTR_MPITOOLS_IMPL_H
2 #define CNTR_MPITOOLS_IMPL_H
3 
4 #include "cntr_mpitools_decl.hpp"
9 
10 namespace cntr{
11 
34 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix_timestep<T> &Gred,
36  assert(tstp == G.tstp());
37  int taskid;
38  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
39  if (taskid == root) {
40  assert(tstp == Gred.tstp());
41  assert(G.ntau() == Gred.ntau());
42  assert(G.size1() == Gred.size1());
43  assert(G.size2() == Gred.size2());
44  }
45 
46  int len = 2 * (2 * (tstp + 1) + G.ntau() + 1) * G.size1() * G.size2();
47 
48  if (sizeof(T) == sizeof(double)) {
49  MPI_Reduce((double *)G.data_, (double *)Gred.data_, len, MPI_DOUBLE_PRECISION, MPI_SUM, root,
50  MPI_COMM_WORLD);
51  } else {
52  if (taskid == root) std::cerr << "herm_matrix_timestep<T>::MPI_Reduce only for double " << std::endl;
53  MPI_Finalize();
54  exit(0);
55  }
56 
57 }
58 
59 
82 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix_timestep_view<T> &Gred,
84  assert(tstp == G.tstp());
85  int taskid;
86  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
87  if (taskid == root) {
88  assert(tstp == Gred.tstp());
89  assert(G.ntau() == Gred.ntau());
90  assert(G.size1() == Gred.size1());
91  assert(G.size2() == Gred.size2());
92  }
93 
94  int len_rt = 2 * (tstp + 1) * G.size1() * G.size2();
95  int len_it = 2 * (G.ntau() + 1) * G.size1() * G.size2();
96 
97  if (sizeof(T) == sizeof(double)) {
98  if(tstp == -1){
99  MPI_Reduce((double *)G.mat_, (double *)Gred.mat_, len_it, MPI_DOUBLE_PRECISION, MPI_SUM, root,
100  MPI_COMM_WORLD);
101  } else{
102  MPI_Reduce((double *)G.les_, (double *)Gred.les_, len_rt, MPI_DOUBLE_PRECISION, MPI_SUM, root,
103  MPI_COMM_WORLD);
104  MPI_Reduce((double *)G.ret_, (double *)Gred.ret_, len_rt, MPI_DOUBLE_PRECISION, MPI_SUM, root,
105  MPI_COMM_WORLD);
106  MPI_Reduce((double *)G.tv_, (double *)Gred.tv_, len_it, MPI_DOUBLE_PRECISION, MPI_SUM, root,
107  MPI_COMM_WORLD);
108  }
109 
110  } else {
111  if (taskid == root) std::cerr << "herm_matrix_timestep_view<T>::MPI_Reduce only for double " << std::endl;
112  MPI_Finalize();
113  exit(0);
114  }
115 
116 }
117 
118 
141 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix<T> &Gred,
143  assert(tstp == G.tstp());
144  int taskid;
145  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
146  if (taskid == root) {
147  assert(tstp <= Gred.nt());
148  assert(G.ntau() == Gred.ntau());
149  assert(G.size1() == Gred.size1());
150  assert(G.size2() == Gred.size2());
151  }
152 
154  if (taskid == root){
155  Gtemp.resize(tstp, G.ntau(), G.size1());
156  }
157 
158  Reduce_timestep(tstp, root, Gtemp, G);
159 
160  if (taskid == root){
161  Gred.set_timestep(tstp, Gtemp);
162  }
163 }
164 
165 
188 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix<T> &Gred,
190  assert(tstp == G.tstp());
191  int taskid;
192  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
193  if (taskid == root) {
194  assert(tstp <= Gred.nt());
195  assert(G.ntau() == Gred.ntau());
196  assert(G.size1() == Gred.size1());
197  assert(G.size2() == Gred.size2());
198  }
199 
200  bool check_tstp = (taskid == root);
201  herm_matrix_timestep_view<T> Gred_tmp(tstp, Gred, check_tstp);
202  Reduce_timestep(tstp, root, Gred_tmp, G);
203 
204 }
205 
206 
207 
230 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix_timestep<T> &Gred,
231  herm_matrix<T> &G){
232  assert(tstp <= G.nt());
233  int taskid;
234  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
235  if (taskid == root) {
236  assert(tstp == Gred.tstp());
237  assert(G.ntau() == Gred.ntau());
238  assert(G.size1() == Gred.size1());
239  assert(G.size2() == Gred.size2());
240  }
241 
243  Gtemp.resize(tstp, G.ntau(), G.size1());
244  G.get_timestep(tstp, Gtemp);
245 
246  Reduce_timestep(tstp, root, Gred, Gtemp);
247 
248 }
249 
272 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix_timestep_view<T> &Gred,
273  herm_matrix<T> &G){
274  assert(tstp <= G.nt());
275  int taskid;
276  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
277  if (taskid == root) {
278  assert(tstp == Gred.tstp());
279  assert(G.ntau() == Gred.ntau());
280  assert(G.size1() == Gred.size1());
281  assert(G.size2() == Gred.size2());
282  }
283 
284  herm_matrix_timestep_view<T> Gtemp(tstp, G);
285  Reduce_timestep(tstp, root, Gred, Gtemp);
286 
287 }
288 
289 
312 template <typename T> void Reduce_timestep(int tstp, int root, herm_matrix<T> &Gred,
313  herm_matrix<T> &G){
314  assert(tstp <= G.nt());
315  int taskid;
316  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
317  if (taskid == root) {
318  assert(tstp <= Gred.nt());
319  assert(G.ntau() == Gred.ntau());
320  assert(G.size1() == Gred.size1());
321  assert(G.size2() == Gred.size2());
322  }
323 
324  bool check_tstp = (taskid == root);
325  herm_matrix_timestep_view<T> Gred_tmp(tstp, Gred, check_tstp);
326  herm_matrix_timestep_view<T> Garr_tmp(tstp, G);
327 
328  Reduce_timestep(tstp, root, Gred_tmp, Garr_tmp);
329 }
330 
331 
332 
333 } // namespace cntr
334 
335 
336 #endif // CNTR_MPITOOLS_IMPL_H
Class herm_matrix_timestep_view serves for interfacing with class herm_matrix_timestep without copyi...
void get_timestep(int tstp, herm_matrix_timestep< T > &timestep) const
Class herm_matrix_timestep deals with contour objects at a particular timestep .
void resize(int nt, int ntau, int size1)
Resizes herm_matrix_timestep object with respect to the number of points on the Matsubara branch and...
Class herm_matrix for two-time contour objects with hermitian symmetry.
void set_timestep(int tstp, herm_matrix &g1)
Sets all components at time step tstp to the components of a given herm_matrix. ...
void Reduce_timestep(int tstp, int root, herm_matrix_timestep< T > &Gred, herm_matrix_timestep< T > &G)
MPI reduce for the herm_matrix_timestep to a herm_matrix_timestep