Reference documentation for deal.II version 8.4.2
tria_base.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/utilities.h>
18 #include <deal.II/base/memory_consumption.h>
19 #include <deal.II/base/logstream.h>
20 #include <deal.II/lac/sparsity_tools.h>
21 #include <deal.II/lac/sparsity_pattern.h>
22 #include <deal.II/grid/tria.h>
23 #include <deal.II/grid/tria_accessor.h>
24 #include <deal.II/grid/tria_iterator.h>
25 #include <deal.II/grid/grid_tools.h>
26 #include <deal.II/distributed/tria_base.h>
27 
28 
29 #include <algorithm>
30 #include <numeric>
31 #include <iostream>
32 #include <fstream>
33 
34 
35 DEAL_II_NAMESPACE_OPEN
36 
37 namespace parallel
38 {
39 
40  template <int dim, int spacedim>
41  Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
42  const typename ::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
43  const bool check_for_distorted_cells)
44  :
45  ::Triangulation<dim,spacedim>(smooth_grid,check_for_distorted_cells),
46  mpi_communicator (Utilities::MPI::
47  duplicate_communicator(mpi_communicator)),
48  my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
49  n_subdomains(Utilities::MPI::n_mpi_processes(mpi_communicator))
50  {
51 #ifndef DEAL_II_WITH_MPI
52  Assert(false, ExcMessage("You compiled deal.II without MPI support, for "
53  "which parallel::Triangulation is not available."));
54 #endif
55  number_cache.n_locally_owned_active_cells.resize (n_subdomains);
56  }
57 
58  template <int dim, int spacedim>
59  void
60  Triangulation<dim,spacedim>::copy_triangulation (const ::Triangulation<dim, spacedim> &old_tria)
61  {
62 #ifndef DEAL_II_WITH_MPI
63  Assert(false, ExcNotImplemented());
64 #endif
65  if (const ::parallel::Triangulation<dim,spacedim> *
66  old_tria_x = dynamic_cast<const ::parallel::Triangulation<dim,spacedim> *>(&old_tria))
67  {
68  mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
69  }
70  }
71 
72 
73 
74  template <int dim, int spacedim>
75  std::size_t
77  {
78  std::size_t mem=
80  + MemoryConsumption::memory_consumption(mpi_communicator)
82  + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
85  return mem;
86 
87  }
88 
89  template <int dim, int spacedim>
91  {
92 #ifdef DEAL_II_WITH_MPI
93  // get rid of the unique communicator used here again
94  MPI_Comm_free (&this->mpi_communicator);
95 #endif
96  }
97 
98  template <int dim, int spacedim>
100  :
102  n_global_levels(0)
103  {}
104 
105  template <int dim, int spacedim>
106  unsigned int
108  {
109  return number_cache.n_locally_owned_active_cells[my_subdomain];
110  }
111 
112  template <int dim, int spacedim>
113  unsigned int
115  {
116  return number_cache.n_global_levels;
117  }
118 
119  template <int dim, int spacedim>
122  {
123  return number_cache.n_global_active_cells;
124  }
125 
126  template <int dim, int spacedim>
127  const std::vector<unsigned int> &
129  {
130  return number_cache.n_locally_owned_active_cells;
131  }
132 
133  template <int dim, int spacedim>
134  MPI_Comm
136  {
137  return mpi_communicator;
138  }
139 
140 #ifdef DEAL_II_WITH_MPI
141  template <int dim, int spacedim>
142  void
144  {
145  Assert (number_cache.n_locally_owned_active_cells.size()
146  ==
147  Utilities::MPI::n_mpi_processes (this->mpi_communicator),
148  ExcInternalError());
149 
150  std::fill (number_cache.n_locally_owned_active_cells.begin(),
151  number_cache.n_locally_owned_active_cells.end(),
152  0);
153 
154  number_cache.ghost_owners.clear ();
155  number_cache.level_ghost_owners.clear ();
156 
157  if (this->n_levels() == 0)
158  {
159  // Skip communication done below if we do not have any cells
160  // (meaning the Triangulation is empty on all processors). This will
161  // happen when called from the destructor of Triangulation, which
162  // can get called during exception handling causing a hang in this
163  // function.
164  number_cache.n_global_active_cells = 0;
165  number_cache.n_global_levels = 0;
166  return;
167  }
168 
169 
170  {
171  // find ghost owners
173  cell = this->begin_active();
174  cell != this->end();
175  ++cell)
176  if (cell->is_ghost())
177  number_cache.ghost_owners.insert(cell->subdomain_id());
178 
179  Assert(number_cache.ghost_owners.size() < Utilities::MPI::n_mpi_processes(mpi_communicator), ExcInternalError());
180  }
181 
182  if (this->n_levels() > 0)
184  cell = this->begin_active();
185  cell != this->end(); ++cell)
186  if (cell->subdomain_id() == my_subdomain)
187  ++number_cache.n_locally_owned_active_cells[my_subdomain];
188 
189  unsigned int send_value
190  = number_cache.n_locally_owned_active_cells[my_subdomain];
191  MPI_Allgather (&send_value,
192  1,
193  MPI_UNSIGNED,
194  &number_cache.n_locally_owned_active_cells[0],
195  1,
196  MPI_UNSIGNED,
197  this->mpi_communicator);
198 
199  number_cache.n_global_active_cells
200  = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
201  number_cache.n_locally_owned_active_cells.end(),
202  /* ensure sum is computed with correct data type:*/
203  static_cast<types::global_dof_index>(0));
204  number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
205  }
206 #else
207  template <int dim, int spacedim>
208  void
210  {
211  Assert (false, ExcNotImplemented());
212  }
213 
214 #endif
215 
216  template <int dim, int spacedim>
219  {
220  Assert (dim > 1, ExcNotImplemented());
221  return my_subdomain;
222  }
223 
224  template <int dim, int spacedim>
225  const std::set<unsigned int> &
228  {
229  return number_cache.ghost_owners;
230  }
231 
232  template <int dim, int spacedim>
233  const std::set<unsigned int> &
236  {
237  return number_cache.level_ghost_owners;
238  }
239 
240 } // end namespace parallel
241 
242 
243 
244 
245 /*-------------- Explicit Instantiations -------------------------------*/
246 #include "tria_base.inst"
247 
248 DEAL_II_NAMESPACE_CLOSE
::internal::Triangulation::NumberCache< dim > number_cache
Definition: tria.h:3331
::ExceptionBase & ExcMessage(std::string arg1)
virtual void copy_triangulation(const Triangulation< dim, spacedim > &old_tria)
Definition: tria.cc:9043
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: tria.cc:10397
unsigned int n_levels() const
cell_iterator end() const
Definition: tria.cc:10465
unsigned int global_dof_index
Definition: types.h:88
#define Assert(cond, exc)
Definition: exceptions.h:294
virtual ~Triangulation()
Definition: tria.cc:8796
virtual std::size_t memory_consumption() const
Definition: tria.cc:13232
unsigned int subdomain_id
Definition: types.h:42
virtual types::global_dof_index n_global_active_cells() const
Definition: tria.cc:10979
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:99
std_cxx11::enable_if< std_cxx11::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Definition: mpi.h:55
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
virtual unsigned int n_global_levels() const
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:41
T max(const T &t, const MPI_Comm &mpi_communicator)
Definition: mpi.h:693
virtual types::subdomain_id locally_owned_subdomain() const
Definition: tria.cc:11575