NESSi
v1.0.2
The NonEquilibrium Systems Simulation Library
cntr_pseudo_convolution_impl.hpp
Go to the documentation of this file.
1
#ifndef CNTR_PSEUDO_CONVOLUTION_IMPL_H
2
#define CNTR_PSEUDO_CONVOLUTION_IMPL_H
3
4
#include "
cntr_pseudo_convolution_decl.hpp
"
5
//#include "cntr_exception.hpp"
6
#include "
cntr_elements.hpp
"
7
#include "
cntr_matsubara_decl.hpp
"
8
#include "
cntr_convolution_decl.hpp
"
9
#include "
cntr_herm_matrix_decl.hpp
"
10
11
namespace
cntr
{
12
13
#define CPLX std::complex<T>
14
15
/* /////////////////////////////////////////////////////////////////////////////////////////
16
OVERVIEW:
17
ROUTINES FOR COMPUTING THE CONVOLUTION C = gamma * C + alpha * A * ft * B at one timestep
18
19
** ALSO PSEUDO_CONVOLUTION ROUTINES:
21
24
// Ctv(t,tau) += alpha*Aret*Btv(t,tau) - ii * alpha * int_t1^beta dt1 Atv(t,t1)*Bmat(t1,tau)
26
template <typename T, class GG, int SIZE1>
27
void incr_pseudo_convolution_tv(int tstp, std::vector<bool> &mask, CPLX alpha, GG &C, GG &A,
28
GG &Acc, CPLX *f0, CPLX *ft, GG &B, GG &Bcc,
29
integration::Integrator<T> &I, T beta, T h) {
30
int ntau = A.ntau();
31
int size1 = A.size1();
32
int sc = size1 * size1;
33
bool func = (ft == NULL ? false : true);
34
CPLX adt = alpha * h;
35
CPLX adtau = alpha * beta * (1.0 / ntau);
36
int kt = I.get_k();
37
int n1 = (tstp >= kt ? tstp : kt);
38
{
39
// CONVOLUTION OF TV SECTION
40
int j, m, n, saf = size1 * size1, sfb = size1 * size1;
41
CPLX *ctemp1 = new CPLX[sc];
42
CPLX *ctemp2 = new CPLX[sc];
43
CPLX *bmat;
44
CPLX *aret;
45
CPLX *bmat1 = 0;
46
CPLX *aret1 = 0;
47
{
48
// aret[j]=Aret(tstp,j)*f(j) j=0 ... n1
49
if (func) {
50
CPLX *atemp = new CPLX[sc];
51
aret1 = new CPLX[(n1 + 1) * saf];
52
aret = aret1;
53
for (j = 0; j <= tstp; j++) {
54
element_mult<T, SIZE1>(size1, aret1 + saf * j, A.retptr(tstp, j),
55
ft + j * sc);
56
}
57
for (j = tstp + 1; j <= n1; j++) {
58
element_minusconj<T, SIZE1>(size1, atemp, Acc.retptr(j, tstp));
59
element_mult<T, SIZE1>(size1, aret1 + saf * j, atemp, ft + j * sc);
60
}
61
delete[] atemp;
62
} else {
63
if (n1 == tstp) {
64
aret = A.retptr(tstp, 0);
65
} else {
66
aret1 = new CPLX[(n1 + 1) * saf];
67
aret = aret1;
68
for (j = 0; j <= tstp; j++)
69
element_set<T, SIZE1>(size1, aret1 + sc * j, A.retptr(tstp, j));
70
for (j = tstp + 1; j <= n1; j++)
71
element_minusconj<T, SIZE1>(size1, aret1 + sc * j,
72
Acc.retptr(j, tstp));
73
}
74
}
75
// bmat[m]=f0*Bmat(m)
76
if (func) {
77
bmat1 = new CPLX[(ntau + 1) * sfb];
78
bmat = bmat1;
79
for (m = 0; m <= ntau; m++)
80
element_mult<T, SIZE1>(size1, bmat1 + m * sfb, f0, B.matptr(m));
81
} else {
82
bmat = B.matptr(0);
83
}
84
}
85
for (m = 0; m <= ntau; m++) {
86
if (mask[m]) {
87
// CONTRIBUTION FROM Atv * Bmat : very similar to computing the matsubara
88
// convolution
89
// !!!!! Here is the only difference between pseudo_convolution and
90
// convolution:
91
// matsubara_integral_2_2 instead of matsubara_integral_2
92
// NOTE: B.sig() really not needed for matsubara_integral_2_2,
93
// other than for matsubara_integral_2
94
matsubara_integral_2_2<T, SIZE1>(size1, m, ntau, ctemp1, A.tvptr(tstp, 0),
95
bmat, I);
96
element_smul<T, SIZE1>(size1, ctemp1, adtau);
97
// CONTRIBUTION FROM Aret * Btv
98
element_set_zero<T, SIZE1>(size1, ctemp2);
99
if (tstp <= 2 * kt + 2) {
100
for (n = 0; n <= n1; n++) {
101
element_incr<T, SIZE1>(size1, ctemp2, I.gregory_weights(tstp, n),
102
aret + n * saf, B.tvptr(n, m));
103
}
104
} else {
105
for (n = 0; n <= kt; n++) {
106
element_incr<T, SIZE1>(size1, ctemp2, I.gregory_omega(n),
107
aret + n * saf, B.tvptr(n, m));
108
}
109
for (n = kt + 1; n < tstp - kt; n++) {
110
element_incr<T, SIZE1>(size1, ctemp2, aret + n * saf, B.tvptr(n, m));
111
}
112
for (n = tstp - kt; n <= tstp; n++) {
113
element_incr<T, SIZE1>(size1, ctemp2, I.gregory_omega(tstp - n),
114
aret + n * saf, B.tvptr(n, m));
115
}
116
}
117
element_smul<T, SIZE1>(size1, ctemp2, adt);
118
element_incr<T, SIZE1>(size1, C.tvptr(tstp, m), ctemp1);
119
element_incr<T, SIZE1>(size1, C.tvptr(tstp, m), ctemp2);
120
}
121
}
122
if (bmat1 != 0)
123
delete[] bmat1;
124
if (aret1 != 0)
125
delete[] aret1;
126
delete[] ctemp1;
127
delete[] ctemp2;
128
}
129
return;
130
}
132
template <typename T, class GG, int SIZE1>
133
void incr_pseudo_convolution_mat(std::vector<bool> &mask, CPLX alpha, GG &C, GG &A, CPLX *f0,
134
GG &B, integration::Integrator<T> &I, T beta) {
135
int ntau = A.ntau();
136
int size1 = A.size1();
137
int sc = size1 * size1;
138
bool func = (f0 == NULL ? false : true);
139
CPLX adtau = alpha * beta * (1.0 / ntau);
140
{
141
// CONVOLUTION OF MATSUBARA GREENFUNCTIONS
142
int m, sfb = sc;
143
CPLX *ctemp = new CPLX[sc], *bmat1 = 0;
144
CPLX *btemp;
145
if (func) {
146
bmat1 = new CPLX[(ntau + 1) * sfb];
147
for (m = 0; m <= ntau; m++) {
148
element_mult<T, SIZE1>(size1, bmat1 + m * sfb, f0, B.matptr(m));
149
}
150
btemp = bmat1;
151
} else {
152
btemp = B.matptr(0);
153
}
154
// t1=omp_get_wtime();
155
for (m = 0; m <= ntau; m++) {
156
if (mask[m]) {
157
// compute cmat(m*dtau) = int_0^tau dx amat(tau-x) f0 b(x)
158
// NOTE: HERE IS THE ONLY DIFFERENCE TO THE USUAL MATSUBARA CONVOLUTION
159
// matsubara_integral_1<T,SIZE1>(size1,m,ntau,ctemp,A.matptr(0),btemp,I,A.sig());
160
matsubara_integral_1_1<T, SIZE1>(size1, m, ntau, ctemp, A.matptr(0), btemp,
161
I);
162
element_incr<T, SIZE1>(size1, C.matptr(m), adtau, ctemp);
163
}
164
}
165
// t2=omp_get_wtime();
166
if (func)
167
delete[] bmat1;
168
delete[] ctemp;
169
}
170
// std::cout << "tid " << omp_get_thread_num() << " parallel time " << t2-t1 <<
171
// std::endl;
172
return;
173
}
175
//
176
// pseudo-particle convolution: only mat and tv are different
177
//
180
template <typename T, class GG, int SIZE1>
181
void incr_pseudo_convolution(int tstp, CPLX alpha, GG &C, GG &A, GG &Acc, CPLX *f0, CPLX *ft,
182
GG &B, GG &Bcc, integration::Integrator<T> &I, T beta, T h) {
183
int ntau = A.ntau();
184
// this function is still not on top level, so no asserts!
185
if (tstp == -1) {
186
std::vector<bool> mask(ntau + 1, true);
187
incr_pseudo_convolution_mat<T, GG, SIZE1>(mask, alpha, C, A, f0, B, I, beta);
188
} else if (tstp >= 0) {
189
std::vector<bool> mask_ret(tstp + 1, true);
190
std::vector<bool> mask_tv(ntau + 1, true);
191
std::vector<bool> mask_les(tstp + 1, true);
192
incr_convolution_ret<T, GG, SIZE1>(tstp, mask_ret, alpha, C, A, Acc, ft, B, Bcc, I,
193
h);
194
incr_pseudo_convolution_tv<T, GG, SIZE1>(tstp, mask_tv, alpha, C, A, Acc, f0, ft, B,
195
Bcc, I, beta, h);
196
incr_convolution_les<T, GG, SIZE1>(tstp, mask_les, alpha, C, A, Acc, f0, ft, B, Bcc,
197
I, beta, h);
198
}
199
return;
200
}
202
template <typename T>
203
void pseudo_convolution_timestep(int tstp, herm_pseudo<T> &C, herm_pseudo<T> &A,
204
herm_pseudo<T> &Acc, function<T> &ft, herm_pseudo<T> &B,
205
herm_pseudo<T> &Bcc, integration::Integrator<T> &I, T beta,
206
T h) {
207
int kt = I.k();
208
int ntmin = (tstp == -1 || tstp > kt ? tstp : kt);
209
if (tstp < -1)
210
return;
211
int size1 = A.size1();
212
std::complex<T> *fttemp;
213
assert(C.size1()==size1);
214
assert(ft.size1()==size1);
215
assert(B.size1()==size1);
216
assert(Bcc.size1()==size1);
217
assert(Acc.size1()==size1);
218
assert(kt>=0 && kt<=5);
219
assert(C.ntau()>=kt);
220
assert(C.nt()>=ntmin);
221
assert(A.nt()>=ntmin);
222
assert(Acc.nt()>=ntmin);
223
assert(B.nt()>=ntmin);
224
assert(Bcc.nt()>=ntmin);
225
assert(ft.nt()>=ntmin);
226
assert(C.ntau()==A.ntau());
227
assert(C.ntau()==Acc.ntau());
228
assert(C.ntau()==B.ntau());
229
assert(C.ntau()==Bcc.ntau());
230
C.set_timestep_zero(tstp);
231
fttemp = (tstp == -1 ? ft.ptr(-1) : ft.ptr(0));
232
if (size1 == 1) {
233
incr_pseudo_convolution<T, herm_pseudo<T>, 1>(tstp, CPLX(1, 0), C, A, Acc,
234
ft.ptr(-1), fttemp, B, Bcc,
235
integration::I<T>(kt), beta, h);
236
} else {
237
incr_pseudo_convolution<T, herm_pseudo<T>, LARGESIZE>(
238
tstp, CPLX(1, 0), C, A, Acc, ft.ptr(-1), fttemp, B, Bcc, integration::I<T>(kt),
239
beta, h);
240
}
241
}
243
template <typename T>
244
void pseudo_convolution_timestep(int n, herm_pseudo<T> &C, herm_pseudo<T> &A,
245
function<T> &ft, herm_pseudo<T> &B,
246
integration::Integrator<T> &I, T beta, T h) {
247
pseudo_convolution_timestep<T>(n, C, A, A, ft, B, B, I, beta, h);
248
}
250
template <typename T>
251
void pseudo_convolution_matsubara(herm_pseudo<T> &C, herm_pseudo<T> &A, herm_pseudo<T> &B,
252
integration::Integrator<T> &I, T beta) {
253
pseudo_convolution_timestep<T>(-1, C, A, A, B, B, I, beta, 0.0);
254
}
256
template <typename T>
257
void pseudo_convolution_matsubara(herm_pseudo<T> &C, herm_pseudo<T> &A, function<T> &ft,
258
herm_pseudo<T> &B, integration::Integrator<T> &I, T beta) {
259
pseudo_convolution_timestep<T>(-1, C, A, A, ft, B, B, I, beta, 0.0);
260
}
262
template <typename T>
263
void pseudo_convolution(herm_pseudo<T> &C, herm_pseudo<T> &A, herm_pseudo<T> &Acc,
264
function<T> &ft, herm_pseudo<T> &B, herm_pseudo<T> &Bcc,
265
integration::Integrator<T> &I, T beta, T h) {
266
int tstp;
267
for (tstp = -1; tstp <= C.nt(); tstp++)
268
pseudo_convolution_timestep<T>(tstp, C, A, Acc, ft, B, Bcc, I, beta, h);
269
}
271
template <typename T>
272
void pseudo_convolution_timestep(int tstp, herm_pseudo<T> &C, herm_pseudo<T> &A,
273
herm_pseudo<T> &Acc, herm_pseudo<T> &B, herm_pseudo<T> &Bcc,
274
integration::Integrator<T> &I, T beta, T h) {
275
int kt = I.k();
276
int ntmin = (tstp == -1 || tstp > kt ? tstp : kt);
277
if (tstp < -1)
278
return;
279
int size1 = A.size1();
280
assert(C.size1()==size1);
281
assert(B.size1()==size1);
282
assert(Bcc.size1()==size1);
283
assert(Acc.size1()==size1);
284
assert(kt>=0 && kt <=5);
285
assert(C.ntau()>=kt);
286
assert(C.nt()>=ntmin);
287
assert(A.nt()>=ntmin);
288
assert(Acc.nt()>=ntmin);
289
assert(B.nt()>=ntmin);
290
assert(Bcc.nt()>=ntmin);
291
assert(C.ntau()==A.ntau());
292
assert(C.ntau()==Acc.ntau());
293
assert(C.ntau()==B.ntau());
294
assert(C.ntau()==Bcc.ntau());
295
C.set_timestep_zero(tstp);
296
if (size1 == 1) {
297
incr_pseudo_convolution<T, herm_pseudo<T>, 1>(
298
tstp, CPLX(1, 0), C, A, Acc, NULL, NULL, B, Bcc, integration::I<T>(kt), beta, h);
299
} else {
300
incr_pseudo_convolution<T, herm_pseudo<T>, LARGESIZE>(
301
tstp, CPLX(1, 0), C, A, Acc, NULL, NULL, B, Bcc, integration::I<T>(kt), beta, h);
302
}
303
}
305
template <typename T>
306
void pseudo_convolution_timestep(int n, herm_pseudo<T> &C, herm_pseudo<T> &A,
307
herm_pseudo<T> &B, integration::Integrator<T> &I, T beta,
308
T h) {
309
pseudo_convolution_timestep<T>(n, C, A, A, B, B, I, beta, h);
310
}
312
template <typename T>
313
void pseudo_convolution(herm_pseudo<T> &C, herm_pseudo<T> &A, herm_pseudo<T> &Acc,
314
herm_pseudo<T> &B, herm_pseudo<T> &Bcc,
315
integration::Integrator<T> &I, T beta, T h) {
316
int tstp;
317
for (tstp = -1; tstp <= C.nt(); tstp++)
318
pseudo_convolution_timestep<T>(tstp, C, A, Acc, B, Bcc, I, beta, h);
319
}
320
321
#undef CPLX
322
323
} // namespace cntr
324
325
#endif // CNTR_PSEUDO_CONVOLUTION_IMPL_H
cntr_elements.hpp
cntr_herm_matrix_decl.hpp
cntr_convolution_decl.hpp
cntr_pseudo_convolution_decl.hpp
cntr_matsubara_decl.hpp
cntr
Definition:
cntr_bubble_decl.hpp:6
cntr
cntr_pseudo_convolution_impl.hpp
Generated by
1.8.15