Carmen Code
 All Classes Files Functions Variables Macros Pages
Functions
Parallel.cpp File Reference

Parallel implementation (not working yet) More...

#include "Carmen.h"
Include dependency graph for Parallel.cpp:

Functions

void CreateMPITopology ()
 Parallel function DOES NOT WORK! More...
 
void FillCellAddr (Cell *Mesh4MPI, int d, int &n)
 
void FillNbAddr (Cell ***Nb, int l, int i, int j, int &n)
 
void CreateMPIType (FineMesh *Root)
 
void FreeMPIType ()
 Parallel function DOES NOT WORK! More...
 
void CreateMPILinks ()
 Parallel function DOES NOT WORK! More...
 
void CPUExchange (FineMesh *Root, int WS)
 Parallel function DOES NOT WORK! More...
 
void ReduceIntegralValues ()
 Parallel function DOES NOT WORK! More...
 

Detailed Description

Parallel implementation (not working yet)

Function Documentation

void CPUExchange ( FineMesh Root,
int   
)

Parallel function DOES NOT WORK!

Parameters
RootFine mesh
Returns
void
350  {
351  CommTimer.start();
352 #if defined PARMPI
353  int i,k;
354  int exNb=0;
355 
356  WhatSend=WS;
357  CellElementsNb=0;
358 
359  for (i=0;i<16;i++) {
360  k=1<<i;
361  if ((WS & k) != 0) CellElementsNb++;
362  }
363 
364  static bool ft=true;
365 // if (ft==true) {
366  CreateMPIType(Root);
367 // CreateMPILinks();
368 // ft=false;
369 // }
370 
371 // MPI_Startall(4*Dimension,req);
372 
373 
374 //Send
375  switch (MPISendType) {
376  case 0:
377  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart ,&req[exNb++]);
378  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart, &req[exNb++] );
379  break;
380 
381  case 10:
382  MPI_Isend(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart,&req[exNb++]);
383  MPI_Isend(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart,&req[exNb++]);
384  break;
385 
386  case 20:
387  MPI_Issend(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart,&req[exNb++]);
388  MPI_Issend(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart,&req[exNb++]);
389  break;
390  }
391 
392  if (Dimension >= 2) {
393  switch (MPISendType) {
394  case 0:
395  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
396  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
397  break;
398 
399  case 10:
400  MPI_Isend(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
401  MPI_Isend(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
402  break;
403 
404  case 20:
405  MPI_Issend(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
406  MPI_Issend(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
407  break;
408  }
409  }
410 
411  if (Dimension == 3) {
412  switch (MPISendType) {
413  case 0:
414  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
415  MPI_Ibsend(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
416  break;
417 
418  case 10:
419  MPI_Isend(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
420  MPI_Isend(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
421  break;
422 
423  case 20:
424  MPI_Issend(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
425  MPI_Issend(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
426  break;
427  }
428  }
429 
430 //Recv
431 
432  if (MPIRecvType==0) {
433  MPI_Recv(MPI_BOTTOM, 1, MPItypeRiL, rank_il, 200, comm_cart, &st[6]);
434  MPI_Recv(MPI_BOTTOM, 1, MPItypeRiU, rank_iu, 100, comm_cart, &st[7]);
435  } else
436  {
437  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRiL, rank_il, 200, comm_cart, &req[exNb++]);
438  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRiU, rank_iu, 100, comm_cart, &req[exNb++]);
439  }
440 
441  if (Dimension >= 2) {
442  if (MPIRecvType==0) {
443  MPI_Recv(MPI_BOTTOM, 1, MPItypeRjL, rank_jl, 400, comm_cart, &st[8]);
444  MPI_Recv(MPI_BOTTOM, 1, MPItypeRjU, rank_ju, 300, comm_cart, &st[9]);
445  } else
446  {
447  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRjL, rank_jl, 400, comm_cart, &req[exNb++]);
448  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRjU, rank_ju, 300, comm_cart, &req[exNb++]);
449  }
450  }
451 
452  if (Dimension == 3) {
453  if (MPIRecvType==0) {
454  MPI_Recv(MPI_BOTTOM, 1, MPItypeRkL, rank_kl, 600, comm_cart, &st[10]);
455  MPI_Recv(MPI_BOTTOM, 1, MPItypeRkU, rank_ku, 500, comm_cart, &st[11]);
456  } else
457  {
458  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRkL, rank_kl, 600, comm_cart, &req[exNb++]);
459  MPI_Irecv(MPI_BOTTOM, 1, MPItypeRkU, rank_ku, 500, comm_cart, &req[exNb++]);
460  }
461  }
462 
463  FreeMPIType();
464 #endif
465  CommTimer.stop();
466 }
int MPIRecvType
Definition: Parameters.cpp:239
double stop()
Stop timer and, if asked, returns CPU time from previous start in seconds.
Definition: Timer.cpp:77
int Dimension
Definition: Parameters.cpp:74
void FreeMPIType()
Parallel function DOES NOT WORK!
Definition: Parallel.cpp:247
int rank_ku
Definition: Parameters.cpp:245
int MPISendType
Definition: Parameters.cpp:238
int rank_il
Definition: Parameters.cpp:243
void CreateMPIType(FineMesh *Root)
Definition: Parallel.cpp:121
int WhatSend
Definition: Parameters.cpp:247
Timer CommTimer
Definition: Parameters.cpp:241
void start()
Starts timer.
Definition: Timer.cpp:62
int rank_jl
Definition: Parameters.cpp:244
int rank_ju
Definition: Parameters.cpp:244
int rank_iu
Definition: Parameters.cpp:243
int CellElementsNb
Definition: Parameters.cpp:233
int rank_kl
Definition: Parameters.cpp:245

Here is the caller graph for this function:

void CreateMPILinks ( )

Parallel function DOES NOT WORK!

Returns
void
271  {
272  int exNb;
273  exNb=0;
274 #if defined PARMPI
275 
276 //Send
277 
278  switch (MPISendType) {
279  case 0:
280  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart ,&req[exNb++]);
281  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart, &req[exNb++]);
282  break;
283 
284  case 10:
285  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart,&req[exNb++]);
286  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart,&req[exNb++]);
287  break;
288 
289  case 20:
290  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSiL, rank_il, 100, comm_cart,&req[exNb++]);
291  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSiU, rank_iu, 200, comm_cart,&req[exNb++]);
292  break;
293  }
294 
295  if (Dimension >= 2) {
296  switch (MPISendType) {
297  case 0:
298  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
299  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
300  break;
301 
302  case 10:
303  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
304  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
305  break;
306 
307  case 20:
308  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSjL, rank_jl, 300, comm_cart,&req[exNb++]);
309  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSjU, rank_ju, 400, comm_cart,&req[exNb++]);
310  break;
311  }
312  }
313 
314  if (Dimension == 3) {
315  switch (MPISendType) {
316  case 0:
317  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
318  MPI_Bsend_init(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
319  break;
320 
321  case 10:
322  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
323  MPI_Send_init(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
324  break;
325 
326  case 20:
327  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSkL, rank_kl, 500, comm_cart,&req[exNb++]);
328  MPI_Ssend_init(MPI_BOTTOM, 1, MPItypeSkU, rank_ku, 600, comm_cart,&req[exNb++]);
329  break;
330  }
331  }
332 
333 //Recv
334 
335  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRiL, rank_il, 200, comm_cart, &req[exNb++]);
336  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRiU, rank_iu, 100, comm_cart, &req[exNb++]);
337 
338  if (Dimension >= 2) {
339  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRjL, rank_jl, 400, comm_cart, &req[exNb++]);
340  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRjU, rank_ju, 300, comm_cart, &req[exNb++]);
341  }
342 
343  if (Dimension == 3) {
344  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRkL, rank_kl, 600, comm_cart, &req[exNb++]);
345  MPI_Recv_init(MPI_BOTTOM, 1, MPItypeRkU, rank_ku, 500, comm_cart, &req[exNb++]);
346  }
347 #endif
348 }
int Dimension
Definition: Parameters.cpp:74
int rank_ku
Definition: Parameters.cpp:245
int MPISendType
Definition: Parameters.cpp:238
int rank_il
Definition: Parameters.cpp:243
int rank_jl
Definition: Parameters.cpp:244
int rank_ju
Definition: Parameters.cpp:244
int rank_iu
Definition: Parameters.cpp:243
int rank_kl
Definition: Parameters.cpp:245
void CreateMPITopology ( )

Parallel function DOES NOT WORK!

Returns
void
22  {
23 #if defined PARMPI
24  int src;
25  int periods[]={1,1,1};
26  CartDims[0]=CartDims[1]=CartDims[2]=0;
27 
28  MPI_Dims_create(size,Dimension,CartDims);
29  MPI_Cart_create(MPI_COMM_WORLD,Dimension,CartDims,periods,1,&comm_cart);
30  MPI_Comm_rank(comm_cart, &rank);
31  MPI_Cart_coords(comm_cart,rank,Dimension,coords);
32 
33  MPI_Cart_shift(comm_cart, 0, -1, &src, &rank_il);
34  MPI_Cart_shift(comm_cart, 0, 1, &src, &rank_iu);
35 
36  if (Dimension >= 2) {
37  MPI_Cart_shift(comm_cart, 1, -1, &src, &rank_jl);
38  MPI_Cart_shift(comm_cart, 1, 1, &src, &rank_ju);
39  }
40 
41  if (Dimension == 3) {
42  MPI_Cart_shift(comm_cart, 2, -1, &src, &rank_kl);
43  MPI_Cart_shift(comm_cart, 2, 1, &src, &rank_ku);
44  }
45 #endif
46 }
int rank
Definition: Parameters.cpp:223
int coords[3]
Definition: Parameters.cpp:230
int Dimension
Definition: Parameters.cpp:74
int rank_ku
Definition: Parameters.cpp:245
int rank_il
Definition: Parameters.cpp:243
int CartDims[3]
Definition: Parameters.cpp:231
int size
Definition: Parameters.cpp:224
int rank_jl
Definition: Parameters.cpp:244
int rank_ju
Definition: Parameters.cpp:244
int rank_iu
Definition: Parameters.cpp:243
int rank_kl
Definition: Parameters.cpp:245

Here is the caller graph for this function:

void CreateMPIType ( FineMesh Root)
121  {
122 #if defined PARMPI
123  int i,j,k;
124  int n,d,l;
125 
126  Cell *MeshCell;
127  MeshCell=Root->MeshCell;
128 
129  n=0;
130  for (l=0;l<NeighbourNb;l++)
131  for (j=0;j<one_D;j++)
132  for (k=0;k<two_D;k++) FillNbAddr(Root->Neighbour_iL,l,j,k,n);
133  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRiL);
134  MPI_Type_commit(&MPItypeRiL);
135 
136  n=0;
137  for (l=0;l<NeighbourNb;l++)
138  for (j=0;j<one_D;j++)
139  for (k=0;k<two_D;k++) FillNbAddr(Root->Neighbour_iU,l,j,k,n);
140  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRiU);
141  MPI_Type_commit(&MPItypeRiU);
142 
143  n=0;
144  for (l=0;l<NeighbourNb;l++)
145  for (j=0;j<one_D;j++)
146  for (k=0;k<two_D;k++) {
147  i=l;
148  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
149  FillCellAddr(MeshCell,d,n);
150  }
151  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSiL);
152  MPI_Type_commit(&MPItypeSiL);
153 
154  n=0;
155  for (l=0;l<NeighbourNb;l++)
156  for (j=0;j<one_D;j++)
157  for (k=0;k<two_D;k++) {
158  i=(1<<ScaleNb)-NeighbourNb+l;
159  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
160  FillCellAddr(MeshCell,d,n);
161  }
162  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSiU);
163  MPI_Type_commit(&MPItypeSiU);
164 
165  if (Dimension >= 2) {
166  n=0;
167  for (l=0;l<NeighbourNb;l++)
168  for (i=0;i<one_D;i++)
169  for (k=0;k<two_D;k++) FillNbAddr(Root->Neighbour_jL,l,i,k,n);
170  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRjL);
171  MPI_Type_commit(&MPItypeRjL);
172 
173  n=0;
174  for (l=0;l<NeighbourNb;l++)
175  for (i=0;i<one_D;i++)
176  for (k=0;k<two_D;k++) FillNbAddr(Root->Neighbour_jU,l,i,k,n);
177  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRjU);
178  MPI_Type_commit(&MPItypeRjU);
179 
180  n=0;
181  for (l=0;l<NeighbourNb;l++)
182  for (i=0;i<one_D;i++)
183  for (k=0;k<two_D;k++) {
184  j=l;
185  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
186  FillCellAddr(MeshCell,d,n);
187  }
188  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSjL);
189  MPI_Type_commit(&MPItypeSjL);
190 
191  n=0;
192  for (l=0;l<NeighbourNb;l++)
193  for (i=0;i<one_D;i++)
194  for (k=0;k<two_D;k++) {
195  j=(1<<ScaleNb)-NeighbourNb+l;
196  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
197  FillCellAddr(MeshCell,d,n);
198  }
199  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSjU);
200  MPI_Type_commit(&MPItypeSjU);
201  }
202 
203 
204  if (Dimension == 3) {
205  n=0;
206  for (l=0;l<NeighbourNb;l++)
207  for (i=0;i<one_D;i++)
208  for (j=0;j<two_D;j++) FillNbAddr(Root->Neighbour_kL,l,i,j,n);
209  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRkL);
210  MPI_Type_commit(&MPItypeRkL);
211 
212  n=0;
213  for (l=0;l<NeighbourNb;l++)
214  for (i=0;i<one_D;i++)
215  for (j=0;j<two_D;j++) FillNbAddr(Root->Neighbour_kU,l,i,j,n);
216  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeRkU);
217  MPI_Type_commit(&MPItypeRkU);
218 
219  n=0;
220  for (l=0;l<NeighbourNb;l++)
221  for (i=0;i<one_D;i++)
222  for (j=0;j<two_D;j++) {
223  k=l;
224  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
225  FillCellAddr(MeshCell,d,n);
226  }
227  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSkL);
228  MPI_Type_commit(&MPItypeSkL);
229 
230  n=0;
231  for (l=0;l<NeighbourNb;l++)
232  for (i=0;i<one_D;i++)
233  for (j=0;j<two_D;j++) {
234  k=(1<<ScaleNb)-NeighbourNb+l;
235  d=i + (1<<ScaleNb)*(j + (1<<ScaleNb)*k);
236  FillCellAddr(MeshCell,d,n);
237  }
238  MPI_Type_hindexed(CellElementsNb*NeighbourNb*one_D*two_D,blocklen,disp,MPI_Type,&MPItypeSkU);
239  MPI_Type_commit(&MPItypeSkU);
240  }
241 
242 #endif
243 }
Cell *** Neighbour_iU
Definition: FineMesh.h:234
int one_D
Definition: Parameters.cpp:236
An object Cell contains all the informations of a cell for both multiresolution and finite volume com...
Definition: Cell.h:41
int ScaleNb
Definition: Parameters.cpp:87
#define MPI_Type
Definition: PreProcessor.h:48
void FillCellAddr(Cell *Mesh4MPI, int d, int &n)
Definition: Parallel.cpp:49
Cell *** Neighbour_jU
Definition: FineMesh.h:234
int Dimension
Definition: Parameters.cpp:74
Cell *** Neighbour_iL
Definition: FineMesh.h:234
Cell *** Neighbour_kU
Definition: FineMesh.h:234
void FillNbAddr(Cell ***Nb, int l, int i, int j, int &n)
Definition: Parallel.cpp:85
int two_D
Definition: Parameters.cpp:236
Cell *** Neighbour_kL
Definition: FineMesh.h:234
int NeighbourNb
Definition: Parameters.cpp:228
Cell *** Neighbour_jL
Definition: FineMesh.h:234
Cell * MeshCell
Definition: FineMesh.h:256
int CellElementsNb
Definition: Parameters.cpp:233

Here is the caller graph for this function:

void FillCellAddr ( Cell Mesh4MPI,
int  d,
int &  n 
)
49  {
50 #if defined PARMPI
51 
52  if ((WhatSend & SendQ) != 0) {
53  MPI_Address(Mesh4MPI[d].Q.U, &disp[n]);
54  blocklen[n++]=Mesh4MPI[d].Q.dimension();
55  }
56 
57  if ((WhatSend & SendQs) != 0) {
58  MPI_Address(Mesh4MPI[d].Qs.U, &disp[n]);
59  blocklen[n++]=Mesh4MPI[d].Qs.dimension();
60  }
61 
62  if ((WhatSend & SendX) != 0) {
63  MPI_Address(Mesh4MPI[d].X.U, &disp[n]);
64  blocklen[n++]=Mesh4MPI[d].X.dimension();
65  }
66 
67  if ((WhatSend & SenddX) != 0) {
68  MPI_Address(Mesh4MPI[d].dX.U, &disp[n]);
69  blocklen[n++]=Mesh4MPI[d].dX.dimension();
70  }
71 
72  if ((WhatSend & SendD) != 0) {
73  MPI_Address(Mesh4MPI[d].D.U, &disp[n]);
74  blocklen[n++]=Mesh4MPI[d].D.dimension();
75  }
76 
77  if ((WhatSend & SendGrad) != 0) {
78  MPI_Address(Mesh4MPI[d].Grad.U, &disp[n]);
79  blocklen[n++]=Mesh4MPI[d].Grad.columns()*Mesh4MPI[d].Grad.lines();
80  }
81 #endif
82 }
Vector dX
Definition: Cell.h:813
int SendX
Definition: Parameters.cpp:254
int SendQs
Definition: Parameters.cpp:253
int WhatSend
Definition: Parameters.cpp:247
Vector D
Definition: Cell.h:851
int dimension() const
Returns the dimension of the vector.
Definition: Vector.h:535
Vector Qs
Definition: Cell.h:827
int lines() const
Returns the number of lines of the matrix.
Definition: Matrix.h:486
Vector Q
Definition: Cell.h:819
int columns() const
Returns the number of columns of the matrix.
Definition: Matrix.h:495
int SenddX
Definition: Parameters.cpp:255
int SendD
Definition: Parameters.cpp:250
Vector X
Definition: Cell.h:807
int SendQ
Definition: Parameters.cpp:252
Matrix Grad
Definition: Cell.h:870
int SendGrad
Definition: Parameters.cpp:251

Here is the caller graph for this function:

void FillNbAddr ( Cell ***  Nb,
int  l,
int  i,
int  j,
int &  n 
)
85  {
86 #if defined PARMPI
87  if ((WhatSend & SendQ) != 0) {
88  MPI_Address(Nb[l][i][j].Q.U, &disp[n]);
89  blocklen[n++]=Nb[l][i][j].Q.dimension();
90  }
91 
92  if ((WhatSend & SendQs) != 0) {
93  MPI_Address(Nb[l][i][j].Qs.U, &disp[n]);
94  blocklen[n++]=Nb[l][i][j].Qs.dimension();
95  }
96 
97  if ((WhatSend & SendX) != 0) {
98  MPI_Address(Nb[l][i][j].X.U, &disp[n]);
99  blocklen[n++]=Nb[l][i][j].X.dimension();
100  }
101 
102  if ((WhatSend & SenddX) != 0) {
103  MPI_Address(Nb[l][i][j].dX.U, &disp[n]);
104  blocklen[n++]=Nb[l][i][j].dX.dimension();
105  }
106 
107  if ((WhatSend & SendD) != 0) {
108  MPI_Address(Nb[l][i][j].D.U, &disp[n]);
109  blocklen[n++]=Nb[l][i][j].D.dimension();
110  }
111 
112 
113  if ((WhatSend & SendGrad) != 0) {
114  MPI_Address(Nb[l][i][j].Grad.U, &disp[n]);
115  blocklen[n++]=Nb[l][i][j].Grad.columns()*Nb[l][i][j].Grad.lines();
116  }
117 #endif
118 }
Vector dX
Definition: Cell.h:813
int SendX
Definition: Parameters.cpp:254
int SendQs
Definition: Parameters.cpp:253
int WhatSend
Definition: Parameters.cpp:247
Vector D
Definition: Cell.h:851
int dimension() const
Returns the dimension of the vector.
Definition: Vector.h:535
Vector Qs
Definition: Cell.h:827
int lines() const
Returns the number of lines of the matrix.
Definition: Matrix.h:486
Vector Q
Definition: Cell.h:819
int columns() const
Returns the number of columns of the matrix.
Definition: Matrix.h:495
int SenddX
Definition: Parameters.cpp:255
int SendD
Definition: Parameters.cpp:250
Vector X
Definition: Cell.h:807
int SendQ
Definition: Parameters.cpp:252
Matrix Grad
Definition: Cell.h:870
int SendGrad
Definition: Parameters.cpp:251

Here is the caller graph for this function:

void FreeMPIType ( )

Parallel function DOES NOT WORK!

Returns
void
247  {
248 #if defined PARMPI
249  MPI_Type_free(&MPItypeSiL);
250  MPI_Type_free(&MPItypeSiU);
251  MPI_Type_free(&MPItypeRiL);
252  MPI_Type_free(&MPItypeRiU);
253 
254  if (Dimension >= 2) {
255  MPI_Type_free(&MPItypeSjL);
256  MPI_Type_free(&MPItypeSjU);
257  MPI_Type_free(&MPItypeRjL);
258  MPI_Type_free(&MPItypeRjU);
259  }
260 
261  if (Dimension == 3) {
262  MPI_Type_free(&MPItypeSkL);
263  MPI_Type_free(&MPItypeSkU);
264  MPI_Type_free(&MPItypeRkL);
265  MPI_Type_free(&MPItypeRkU);
266  }
267 #endif
268 }
int Dimension
Definition: Parameters.cpp:74

Here is the caller graph for this function:

void ReduceIntegralValues ( )

Parallel function DOES NOT WORK!

Returns
void
469  {
470 real rb; //Recieve Buffer
471 rb=0.0;
472  CommTimer.start();
473 #if defined PARMPI
474  MPI_Reduce(&ErrorMax,&rb,1,MPI_Type,MPI_MAX,0,MPI_COMM_WORLD);
475  ErrorMax=rb;
476 
477  MPI_Reduce(&ErrorMid,&rb,1,MPI_Type,MPI_SUM,0,MPI_COMM_WORLD);
478  ErrorMid=rb/size;
479 
480  MPI_Reduce(&ErrorL2,&rb,1,MPI_Type,MPI_SUM,0,MPI_COMM_WORLD);
481  ErrorL2=rb/size;
482 
483  MPI_Reduce(&ErrorNb,&rb,1,MPI_Type,MPI_SUM,0,MPI_COMM_WORLD);
484  ErrorNb=rb;
485 
486  MPI_Allreduce(&FlameVelocity,&rb,1,MPI_Type,MPI_SUM,MPI_COMM_WORLD);
487  FlameVelocity=rb;
488 
489  MPI_Allreduce(&GlobalMomentum,&rb,1,MPI_Type,MPI_SUM,MPI_COMM_WORLD);
490  GlobalMomentum=rb;
491 
492  MPI_Allreduce(&GlobalEnergy,&rb,1,MPI_Type,MPI_SUM,MPI_COMM_WORLD);
493  GlobalEnergy=rb;
494 
495  MPI_Reduce(&ExactMomentum,&rb,1,MPI_Type,MPI_SUM,0,MPI_COMM_WORLD);
496  ExactMomentum=rb;
497 
498  MPI_Reduce(&ExactEnergy,&rb,1,MPI_Type,MPI_SUM,0,MPI_COMM_WORLD);
499  ExactEnergy=rb;
500 
501  MPI_Allreduce(&GlobalReactionRate,&rb,1,MPI_Type,MPI_SUM,MPI_COMM_WORLD);
503 
504  MPI_Allreduce(&EigenvalueMax, &rb,1,MPI_Type,MPI_MAX,MPI_COMM_WORLD);
505  EigenvalueMax=rb;
506 
507 #endif
508  CommTimer.stop();
509 }
#define MPI_Type
Definition: PreProcessor.h:48
double stop()
Stop timer and, if asked, returns CPU time from previous start in seconds.
Definition: Timer.cpp:77
int ErrorNb
Definition: Parameters.cpp:185
real ExactMomentum
Definition: Parameters.cpp:199
real GlobalEnergy
Definition: Parameters.cpp:197
real ErrorL2
Definition: Parameters.cpp:183
real ErrorMax
Definition: Parameters.cpp:179
real GlobalMomentum
Definition: Parameters.cpp:196
real FlameVelocity
Definition: Parameters.cpp:201
Timer CommTimer
Definition: Parameters.cpp:241
void start()
Starts timer.
Definition: Timer.cpp:62
real GlobalReactionRate
Definition: Parameters.cpp:205
int size
Definition: Parameters.cpp:224
real EigenvalueMax
Definition: Parameters.cpp:161
real ExactEnergy
Definition: Parameters.cpp:200
real ErrorMid
Definition: Parameters.cpp:181
#define real
Definition: PreProcessor.h:31

Here is the caller graph for this function: