Reference documentation for deal.II version 8.4.2
petsc_parallel_vector.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2015 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/petsc_parallel_vector.h>
17 
18 #ifdef DEAL_II_WITH_PETSC
19 
20 # include <deal.II/lac/petsc_vector.h>
21 # include <cmath>
22 # include <algorithm>
23 
24 DEAL_II_NAMESPACE_OPEN
25 
26 namespace PETScWrappers
27 {
28  namespace MPI
29  {
30 
32  {
33  // this is an invalid empty vector, so we can just as well create a
34  // sequential one to avoid all the overhead incurred by parallelism
35  const int n = 0;
36  const int ierr
37  = VecCreateSeq (PETSC_COMM_SELF, n, &vector);
38  AssertThrow (ierr == 0, ExcPETScError(ierr));
39  ghosted = false;
40  }
41 
42 
43 
44  Vector::Vector (const MPI_Comm &communicator,
45  const size_type n,
46  const size_type local_size)
47  :
48  communicator (communicator)
49  {
50  Vector::create_vector (n, local_size);
51  }
52 
53 
54 
55  Vector::Vector (const MPI_Comm &communicator,
56  const VectorBase &v,
57  const size_type local_size)
58  :
59  communicator (communicator)
60  {
62 
64  }
65 
66 
67 
68  Vector::Vector (const IndexSet &local,
69  const IndexSet &ghost,
70  const MPI_Comm &communicator)
71  :
72  communicator (communicator)
73  {
74  Assert(local.is_contiguous(), ExcNotImplemented());
75 
76  IndexSet ghost_set = ghost;
77  ghost_set.subtract_set(local);
78 
79  Vector::create_vector(local.size(), local.n_elements(), ghost_set);
80  }
81 
82 
83 
84  Vector::Vector (const IndexSet &local,
85  const MPI_Comm &communicator)
86  :
87  communicator (communicator)
88  {
89  Assert(local.is_contiguous(), ExcNotImplemented());
90  Vector::create_vector(local.size(), local.n_elements());
91  }
92 
93 
94 
95  void
97  {
98  // destroy the PETSc Vec and create an invalid empty vector,
99  // so we can just as well create a sequential one to avoid
100  // all the overhead incurred by parallelism
101  attained_ownership = true;
103 
104  const int n = 0;
105  int ierr = VecCreateSeq (PETSC_COMM_SELF, n, &vector);
106  AssertThrow (ierr == 0, ExcPETScError(ierr));
107  }
108 
109 
110 
111  void
112  Vector::reinit (const MPI_Comm &comm,
113  const size_type n,
114  const size_type local_sz,
115  const bool omit_zeroing_entries)
116  {
117  communicator = comm;
118 
119  // only do something if the sizes
120  // mismatch (may not be true for every proc)
121 
122  int k_global, k = ((size() != n) || (local_size() != local_sz));
123  MPI_Allreduce (&k, &k_global, 1,
124  MPI_INT, MPI_LOR, communicator);
125 
126  if (k_global || has_ghost_elements())
127  {
128  // FIXME: I'd like to use this here,
129  // but somehow it leads to odd errors
130  // somewhere down the line in some of
131  // the tests:
132 // const int ierr = VecSetSizes (vector, n, n);
133 // AssertThrow (ierr == 0, ExcPETScError(ierr));
134 
135  // so let's go the slow way:
136  int ierr;
137 
138 #if DEAL_II_PETSC_VERSION_LT(3,2,0)
139  ierr = VecDestroy (vector);
140 #else
141  ierr = VecDestroy (&vector);
142 #endif
143 
144  AssertThrow (ierr == 0, ExcPETScError(ierr));
145 
146  create_vector (n, local_sz);
147  }
148 
149  // finally clear the new vector if so
150  // desired
151  if (omit_zeroing_entries == false)
152  *this = 0;
153  }
154 
155 
156 
157  void
159  const bool omit_zeroing_entries)
160  {
161  if (v.has_ghost_elements())
162  {
164  if (!omit_zeroing_entries)
165  {
166  int ierr = VecSet(vector, 0.0);
167  AssertThrow (ierr == 0, ExcPETScError(ierr));
168  }
169  }
170  else
171  reinit (v.communicator, v.size(), v.local_size(), omit_zeroing_entries);
172  }
173 
174 
175 
176  void
177  Vector::reinit (const IndexSet &local,
178  const IndexSet &ghost,
179  const MPI_Comm &comm)
180  {
181  int ierr;
182 #if DEAL_II_PETSC_VERSION_LT(3,2,0)
183  ierr = VecDestroy (vector);
184 #else
185  ierr = VecDestroy (&vector);
186 #endif
187  AssertThrow (ierr == 0, ExcPETScError(ierr));
188 
189  communicator = comm;
190 
191  Assert(local.is_contiguous(), ExcNotImplemented());
192 
193  IndexSet ghost_set = ghost;
194  ghost_set.subtract_set(local);
195 
196  create_vector(local.size(), local.n_elements(), ghost_set);
197  }
198 
199  void
200  Vector::reinit (const IndexSet &local,
201  const MPI_Comm &comm)
202  {
203  int ierr;
204 #if DEAL_II_PETSC_VERSION_LT(3,2,0)
205  ierr = VecDestroy (vector);
206 #else
207  ierr = VecDestroy (&vector);
208 #endif
209  AssertThrow (ierr == 0, ExcPETScError(ierr));
210 
211  communicator = comm;
212 
213  Assert(local.is_contiguous(), ExcNotImplemented());
214  Assert(local.size()>0, ExcMessage("can not create vector of size 0."));
215  create_vector(local.size(), local.n_elements());
216  }
217 
218 
219  Vector &
221  {
222  Assert(last_action==VectorOperation::unknown,
223  ExcMessage("Call to compress() required before calling operator=."));
224  //TODO [TH]: can not access v.last_action here. Implement is_compressed()?
225  //Assert(v.last_action==VectorOperation::unknown,
226  // ExcMessage("Call to compress() required before calling operator=."));
227  int ierr;
228 
229  // get a pointer to the local memory of
230  // this vector
231  PetscScalar *dest_array;
232  ierr = VecGetArray (vector, &dest_array);
233  AssertThrow (ierr == 0, ExcPETScError(ierr));
234 
235  // then also a pointer to the source
236  // vector
237  PetscScalar *src_array;
238  ierr = VecGetArray (static_cast<const Vec &>(v), &src_array);
239  AssertThrow (ierr == 0, ExcPETScError(ierr));
240 
241  // then copy:
242  const std::pair<size_type, size_type>
243  local_elements = local_range ();
244  std::copy (src_array + local_elements.first,
245  src_array + local_elements.second,
246  dest_array);
247 
248  // finally restore the arrays
249  ierr = VecRestoreArray (vector, &dest_array);
250  AssertThrow (ierr == 0, ExcPETScError(ierr));
251 
252  ierr = VecRestoreArray (static_cast<const Vec &>(v), &src_array);
253  AssertThrow (ierr == 0, ExcPETScError(ierr));
254 
255  if (has_ghost_elements())
256  {
257  ierr = VecGhostUpdateBegin(vector, INSERT_VALUES, SCATTER_FORWARD);
258  AssertThrow (ierr == 0, ExcPETScError(ierr));
259  ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
260  AssertThrow (ierr == 0, ExcPETScError(ierr));
261  }
262  return *this;
263  }
264 
265 
266  void
268  const size_type local_size)
269  {
270  (void)n;
271  Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
272  ghosted = false;
273 
274  const int ierr
275  = VecCreateMPI (communicator, local_size, PETSC_DETERMINE,
276  &vector);
277  AssertThrow (ierr == 0, ExcPETScError(ierr));
278 
279  Assert (size() == n,
280  ExcDimensionMismatch (size(), n));
281  }
282 
283 
284 
285  void
287  const size_type local_size,
288  const IndexSet &ghostnodes)
289  {
290  (void)n;
291  Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
292  ghosted = true;
293  ghost_indices = ghostnodes;
294 
295  std::vector<size_type> ghostindices;
296  ghostnodes.fill_index_vector(ghostindices);
297 
298  const PetscInt *ptr
299  = (ghostindices.size() > 0
300  ?
301  (const PetscInt *)(&(ghostindices[0]))
302  :
303  0);
304 
305  int ierr
306  = VecCreateGhost(communicator,
307  local_size,
308  PETSC_DETERMINE,
309  ghostindices.size(),
310  ptr,
311  &vector);
312 
313  AssertThrow (ierr == 0, ExcPETScError(ierr));
314 
315  Assert (size() == n,
316  ExcDimensionMismatch (size(), n));
317 
318 #if DEBUG
319  {
320  // test ghost allocation in debug mode
321  PetscInt begin, end;
322 
323  ierr = VecGetOwnershipRange (vector, &begin, &end);
324 
325  Assert(local_size==(size_type)(end-begin), ExcInternalError());
326 
327  Vec l;
328  ierr = VecGhostGetLocalForm(vector, &l);
329  AssertThrow (ierr == 0, ExcPETScError(ierr));
330 
331  PetscInt lsize;
332  ierr = VecGetSize(l, &lsize);
333  AssertThrow (ierr == 0, ExcPETScError(ierr));
334 
335  ierr = VecGhostRestoreLocalForm(vector, &l);
336  AssertThrow (ierr == 0, ExcPETScError(ierr));
337 
338  Assert (lsize==end-begin+(PetscInt)ghost_indices.n_elements(),
339  ExcInternalError());
340  }
341 #endif
342 
343 
344  // in PETSc versions up to 3.5, VecCreateGhost zeroed out the locally
345  // owned vector elements but forgot about the ghost elements. we need to
346  // do this ourselves
347  //
348  // see https://code.google.com/p/dealii/issues/detail?id=233
349 #if DEAL_II_PETSC_VERSION_LT(3,6,0)
351  zero.reinit (communicator, this->size(), local_size);
352  *this = zero;
353 #endif
354 
355  }
356 
357 
358 
359  bool
361  {
362  unsigned int has_nonzero = VectorBase::all_zero()?0:1;
363 #ifdef DEAL_II_WITH_MPI
364  // in parallel, check that the vector
365  // is zero on _all_ processors.
366  unsigned int num_nonzero = Utilities::MPI::sum(has_nonzero, communicator);
367  return num_nonzero == 0;
368 #else
369  return has_nonzero == 0;
370 #endif
371  }
372 
373 
374  void
375  Vector::print (std::ostream &out,
376  const unsigned int precision,
377  const bool scientific,
378  const bool across) const
379  {
380  AssertThrow (out, ExcIO());
381 
382  // get a representation of the vector and
383  // loop over all the elements
384  PetscScalar *val;
385  PetscInt nlocal, istart, iend;
386 
387  int ierr = VecGetArray (vector, &val);
388 
389  AssertThrow (ierr == 0, ExcPETScError(ierr));
390 
391  ierr = VecGetLocalSize (vector, &nlocal);
392 
393  AssertThrow (ierr == 0, ExcPETScError(ierr));
394 
395  ierr = VecGetOwnershipRange (vector, &istart, &iend);
396 
397  AssertThrow (ierr == 0, ExcPETScError(ierr));
398 
399  // save the state of out stream
400  std::ios::fmtflags old_flags = out.flags();
401  unsigned int old_precision = out.precision (precision);
402 
403  out.precision (precision);
404  if (scientific)
405  out.setf (std::ios::scientific, std::ios::floatfield);
406  else
407  out.setf (std::ios::fixed, std::ios::floatfield);
408 
409  for ( unsigned int i = 0;
411  i++)
412  {
413  // This is slow, but most likely only used to debug.
414  MPI_Barrier(communicator);
416  {
417  if (across)
418  {
419  out << "[Proc" << i << " " << istart << "-" << iend-1 << "]" << ' ';
420  for (PetscInt i=0; i<nlocal; ++i)
421  out << val[i] << ' ';
422  }
423  else
424  {
425  out << "[Proc " << i << " " << istart << "-" << iend-1 << "]" << std::endl;
426  for (PetscInt i=0; i<nlocal; ++i)
427  out << val[i] << std::endl;
428  }
429  out << std::endl;
430  }
431  }
432  // reset output format
433  out.flags (old_flags);
434  out.precision(old_precision);
435 
436  // restore the representation of the
437  // vector
438  ierr = VecRestoreArray (vector, &val);
439  AssertThrow (ierr == 0, ExcPETScError(ierr));
440 
441  AssertThrow (out, ExcIO());
442  }
443 
444  }
445 
446 }
447 
448 DEAL_II_NAMESPACE_CLOSE
449 
450 #endif // DEAL_II_WITH_PETSC
types::global_dof_index size_type
Vector & operator=(const Vector &v)
::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:358
VectorBase & operator=(const PetscScalar s)
std::pair< size_type, size_type > local_range() const
size_type size() const
Definition: index_set.h:1247
T sum(const T &t, const MPI_Comm &mpi_communicator)
Definition: mpi.h:611
void subtract_set(const IndexSet &other)
Definition: index_set.cc:269
VectorOperation::values last_action
#define Assert(cond, exc)
Definition: exceptions.h:294
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:99
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:461
bool has_ghost_elements() const
bool is_contiguous() const
Definition: index_set.h:1370
void reinit(const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool omit_zeroing_entries=false)
IndexSet locally_owned_elements() const
virtual void create_vector(const size_type n, const size_type local_size)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:108
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
size_type n_elements() const
Definition: index_set.h:1380