Reference documentation for deal.II version 8.4.2
mg_level_global_transfer.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2003 - 2016 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
18 #include <deal.II/base/function.h>
19 
20 #include <deal.II/lac/vector.h>
21 #include <deal.II/lac/block_vector.h>
22 #include <deal.II/lac/parallel_vector.h>
23 #include <deal.II/lac/parallel_block_vector.h>
24 #include <deal.II/lac/petsc_vector.h>
25 #include <deal.II/lac/petsc_block_vector.h>
26 #include <deal.II/lac/trilinos_vector.h>
27 #include <deal.II/lac/trilinos_block_vector.h>
28 #include <deal.II/grid/tria.h>
29 #include <deal.II/grid/tria_iterator.h>
30 #include <deal.II/dofs/dof_tools.h>
31 #include <deal.II/fe/fe.h>
32 #include <deal.II/dofs/dof_accessor.h>
33 #include <deal.II/multigrid/mg_tools.h>
34 #include <deal.II/multigrid/mg_transfer.h>
35 #include <deal.II/multigrid/mg_transfer.templates.h>
36 
37 #include <algorithm>
38 
39 DEAL_II_NAMESPACE_OPEN
40 
41 
42 namespace
43 {
48  struct DoFPair
49  {
50  unsigned int level;
52  types::global_dof_index level_dof_index;
53 
54  DoFPair(const unsigned int level,
55  const types::global_dof_index global_dof_index,
56  const types::global_dof_index level_dof_index)
57  :
58  level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
59  {}
60 
61  DoFPair()
62  {}
63  };
64 
65 
66 
70  template <int dim, int spacedim>
71  void fill_copy_indices(const DoFHandler<dim,spacedim> &mg_dof,
72  const MGConstrainedDoFs *mg_constrained_dofs,
73  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices,
74  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices_global_mine,
75  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices_level_mine)
76  {
77  // Now we are filling the variables copy_indices*, which are essentially
78  // maps from global to mgdof for each level stored as a std::vector of
79  // pairs. We need to split this map on each level depending on the ownership
80  // of the global and mgdof, so that we later not access non-local elements
81  // in copy_to/from_mg.
82  // We keep track in the bitfield dof_touched which global dof has
83  // been processed already (on the current level). This is the same as
84  // the multigrid running in serial.
85 
86  // map cpu_index -> vector of data
87  // that will be copied into copy_indices_level_mine
88  std::vector<DoFPair> send_data_temp;
89 
90  const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels();
91  copy_indices.resize(n_levels);
92  copy_indices_global_mine.resize(n_levels);
93  copy_indices_level_mine.resize(n_levels);
94  IndexSet globally_relevant;
95  DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant);
96 
97  const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell;
98  std::vector<types::global_dof_index> global_dof_indices (dofs_per_cell);
99  std::vector<types::global_dof_index> level_dof_indices (dofs_per_cell);
100 
101  for (unsigned int level=0; level<n_levels; ++level)
102  {
103  std::vector<bool> dof_touched(globally_relevant.n_elements(), false);
104  copy_indices[level].clear();
105  copy_indices_level_mine[level].clear();
106  copy_indices_global_mine[level].clear();
107 
109  level_cell = mg_dof.begin_active(level);
111  level_end = mg_dof.end_active(level);
112 
113  for (; level_cell!=level_end; ++level_cell)
114  {
115  if (mg_dof.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id
116  && (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id
117  || level_cell->subdomain_id()==numbers::artificial_subdomain_id)
118  )
119  continue;
120 
121  // get the dof numbers of this cell for the global and the level-wise
122  // numbering
123  level_cell->get_dof_indices (global_dof_indices);
124  level_cell->get_mg_dof_indices (level_dof_indices);
125 
126  for (unsigned int i=0; i<dofs_per_cell; ++i)
127  {
128  // we need to ignore if the DoF is on a refinement edge (hanging node)
129  if (mg_constrained_dofs != 0
130  && mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i]))
131  continue;
132  types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]);
133  //skip if we did this global dof already (on this or a coarser level)
134  if (dof_touched[global_idx])
135  continue;
136  bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]);
137  bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]);
138 
139 
140  if (global_mine && level_mine)
141  {
142  copy_indices[level].push_back(
143  std::make_pair (global_dof_indices[i], level_dof_indices[i]));
144  }
145  else if (global_mine)
146  {
147  copy_indices_global_mine[level].push_back(
148  std::make_pair (global_dof_indices[i], level_dof_indices[i]));
149 
150  //send this to the owner of the level_dof:
151  send_data_temp.push_back(DoFPair(level, global_dof_indices[i], level_dof_indices[i]));
152  }
153  else
154  {
155  // somebody will send those to me
156  }
157 
158  dof_touched[global_idx] = true;
159  }
160  }
161  }
162 
163  const ::parallel::distributed::Triangulation<dim,spacedim> *tria =
165  (&mg_dof.get_triangulation()));
166  AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!"));
167 
168 #ifdef DEAL_II_WITH_MPI
169  if (tria)
170  {
171  // TODO: Searching the owner for every single DoF becomes quite
172  // inefficient. Please fix this, Timo.
173  // The list of neighbors is symmetric (our neighbors have us as a neighbor),
174  // so we can use it to send and to know how many messages we will get.
175  std::set<unsigned int> neighbors = tria->level_ghost_owners();
176  std::map<int, std::vector<DoFPair> > send_data;
177 
178  // * find owners of the level dofs and insert into send_data accordingly
179  for (typename std::vector<DoFPair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
180  {
181  std::set<unsigned int>::iterator it;
182  for (it = neighbors.begin(); it != neighbors.end(); ++it)
183  {
184  if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index))
185  {
186  send_data[*it].push_back(*dofpair);
187  break;
188  }
189  }
190  // Is this level dof not owned by any of our neighbors? That
191  // would certainly be a bug!
192  Assert(it!=neighbors.end(), ExcMessage("could not find DoF owner."));
193  }
194 
195  // * send
196  std::vector<MPI_Request> requests;
197  {
198  for (std::set<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
199  {
200  requests.push_back(MPI_Request());
201  unsigned int dest = *it;
202  std::vector<DoFPair> &data = send_data[dest];
203  // If there is nothing to send, we still need to send a message, because
204  // the receiving end will be waitng. In that case we just send
205  // an empty message.
206  if (data.size())
207  MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
208  else
209  MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
210  }
211  }
212 
213  // * receive
214  {
215  // We should get one message from each of our neighbors
216  std::vector<DoFPair> receive_buffer;
217  for (unsigned int counter=0; counter<neighbors.size(); ++counter)
218  {
219  MPI_Status status;
220  int len;
221  MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
222  MPI_Get_count(&status, MPI_BYTE, &len);
223 
224  if (len==0)
225  {
226  int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
227  tria->get_communicator(), &status);
228  AssertThrow(err==MPI_SUCCESS, ExcInternalError());
229  continue;
230  }
231 
232  int count = len / sizeof(DoFPair);
233  Assert(static_cast<int>(count * sizeof(DoFPair)) == len, ExcInternalError());
234  receive_buffer.resize(count);
235 
236  void *ptr = &receive_buffer[0];
237  int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
238  tria->get_communicator(), &status);
239  AssertThrow(err==MPI_SUCCESS, ExcInternalError());
240 
241  for (unsigned int i=0; i<receive_buffer.size(); ++i)
242  {
243  copy_indices_level_mine[receive_buffer[i].level].push_back(
244  std::make_pair (receive_buffer[i].global_dof_index, receive_buffer[i].level_dof_index)
245  );
246  }
247  }
248  }
249 
250  // * wait for all MPI_Isend to complete
251  if (requests.size() > 0)
252  {
253  MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
254  requests.clear();
255  }
256 #ifdef DEBUG
257  // Make sure in debug mode, that everybody sent/received all packages
258  // on this level. If a deadlock occurs here, the list of expected
259  // senders is not computed correctly.
260  MPI_Barrier(tria->get_communicator());
261 #endif
262  }
263 #endif
264 
265  // Sort the indices. This will produce more reliable debug output for regression tests
266  // and likely won't hurt performance even in release mode.
267  std::less<std::pair<types::global_dof_index, types::global_dof_index> > compare;
268  for (unsigned int level=0; level<copy_indices.size(); ++level)
269  std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
270  for (unsigned int level=0; level<copy_indices_level_mine.size(); ++level)
271  std::sort(copy_indices_level_mine[level].begin(), copy_indices_level_mine[level].end(), compare);
272  for (unsigned int level=0; level<copy_indices_global_mine.size(); ++level)
273  std::sort(copy_indices_global_mine[level].begin(), copy_indices_global_mine[level].end(), compare);
274  }
275 }
276 
277 
278 
279 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
280 
281 
282 template <typename VectorType>
283 template <int dim, int spacedim>
284 void
287 {
288  fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices,
289  copy_indices_global_mine, copy_indices_level_mine);
290 
291  // check if we can run a plain copy operation between the global DoFs and
292  // the finest level.
293  perform_plain_copy =
294  (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements())
295  &&
296  (mg_dof.locally_owned_dofs().n_elements() ==
297  mg_dof.locally_owned_mg_dofs(mg_dof.get_triangulation().n_global_levels()-1).n_elements());
298  if (perform_plain_copy)
299  {
300  AssertDimension(copy_indices_global_mine.back().size(), 0);
301  AssertDimension(copy_indices_level_mine.back().size(), 0);
302 
303  // check whether there is a renumbering of degrees of freedom on
304  // either the finest level or the global dofs, which means that we
305  // cannot apply a plain copy
306  for (unsigned int i=0; i<copy_indices.back().size(); ++i)
307  if (copy_indices.back()[i].first != copy_indices.back()[i].second)
308  {
309  perform_plain_copy = false;
310  break;
311  }
312  }
314  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
315  (&mg_dof.get_tria());
316  const MPI_Comm mpi_communicator = ptria != 0 ? ptria->get_communicator() :
317  MPI_COMM_SELF;
318  perform_plain_copy =
319  Utilities::MPI::min(static_cast<int>(perform_plain_copy),
320  mpi_communicator);
321 
322 }
323 
324 
325 
326 template <typename VectorType>
327 void
329 {
330  sizes.resize(0);
331  copy_indices.clear();
332  copy_indices_global_mine.clear();
333  copy_indices_level_mine.clear();
334  component_to_block_map.resize(0);
335  mg_constrained_dofs = 0;
336 }
337 
338 
339 
340 template <typename VectorType>
341 void
343 {
344  for (unsigned int level = 0; level<copy_indices.size(); ++level)
345  {
346  for (unsigned int i=0; i<copy_indices[level].size(); ++i)
347  os << "copy_indices[" << level
348  << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
349  }
350 
351  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
352  {
353  for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
354  os << "copy_ifrom [" << level
355  << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
356  }
357  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
358  {
359  for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
360  os << "copy_ito [" << level
361  << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
362  }
363 }
364 
365 
366 
367 template <typename VectorType>
368 std::size_t
370 {
371  std::size_t result = sizeof(*this);
372  result += MemoryConsumption::memory_consumption(sizes);
373  result += MemoryConsumption::memory_consumption(copy_indices);
374  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
375  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
376 
377  return result;
378 }
379 
380 
381 
382 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
383 
384 
385 template <typename Number>
386 template <int dim, int spacedim>
387 void
388 MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::fill_and_communicate_copy_indices
390 {
391  // first go to the usual routine...
392  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
393  copy_indices;
394  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
395  copy_indices_global_mine;
396  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
397  copy_indices_level_mine;
398 
399  fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices,
400  copy_indices_global_mine, copy_indices_level_mine);
401 
402  // get all degrees of freedom that we need read access to in copy_to_mg
403  // and copy_from_mg, respectively. We fill an IndexSet once on each level
404  // (for the global_mine indices accessing remote level indices) and once
405  // globally (for the level_mine indices accessing remote global indices).
406 
407  // the variables index_set and level_index_set are going to define the
408  // ghost indices of the respective vectors (due to construction, these are
409  // precisely the indices that we need)
411  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
412  (&mg_dof.get_tria());
413  const MPI_Comm mpi_communicator = ptria != 0 ? ptria->get_communicator() :
414  MPI_COMM_SELF;
415 
416  IndexSet index_set(mg_dof.locally_owned_dofs().size());
417  std::vector<types::global_dof_index> accessed_indices;
418  ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1);
419  std::vector<IndexSet> level_index_set(mg_dof.get_triangulation().n_global_levels());
420  for (unsigned int l=0; l<mg_dof.get_triangulation().n_global_levels(); ++l)
421  {
422  for (unsigned int i=0; i<copy_indices_level_mine[l].size(); ++i)
423  accessed_indices.push_back(copy_indices_level_mine[l][i].first);
424  std::vector<types::global_dof_index> accessed_level_indices;
425  for (unsigned int i=0; i<copy_indices_global_mine[l].size(); ++i)
426  accessed_level_indices.push_back(copy_indices_global_mine[l][i].second);
427  std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
428  level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
429  level_index_set[l].add_indices(accessed_level_indices.begin(),
430  accessed_level_indices.end());
431  level_index_set[l].compress();
432  ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
433  level_index_set[l],
434  mpi_communicator);
435  }
436  std::sort(accessed_indices.begin(), accessed_indices.end());
437  index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
438  index_set.compress();
439  ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
440  index_set,
441  mpi_communicator);
442 
443  // localize the copy indices for faster access. Since all access will be
444  // through the ghosted vector in 'data', we can use this (much faster)
445  // option
446  this->copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
447  this->copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels());
448  this->copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels());
449  for (unsigned int level=0; level<mg_dof.get_triangulation().n_global_levels(); ++level)
450  {
451  const Utilities::MPI::Partitioner &global_partitioner =
452  *ghosted_global_vector.get_partitioner();
453  const Utilities::MPI::Partitioner &level_partitioner =
454  *ghosted_level_vector[level].get_partitioner();
455  // owned-owned case: the locally owned indices are going to control
456  // the local index
457  this->copy_indices[level].resize(copy_indices[level].size());
458  for (unsigned int i=0; i<copy_indices[level].size(); ++i)
459  this->copy_indices[level][i] =
460  std::pair<unsigned int,unsigned int>
461  (global_partitioner.global_to_local(copy_indices[level][i].first),
462  level_partitioner.global_to_local(copy_indices[level][i].second));
463 
464  // remote-owned case: the locally owned indices for the level and the
465  // ghost dofs for the global indices set the local index
466  this->copy_indices_level_mine[level].
467  resize(copy_indices_level_mine[level].size());
468  for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
469  this->copy_indices_level_mine[level][i] =
470  std::pair<unsigned int,unsigned int>
471  (global_partitioner.global_to_local(copy_indices_level_mine[level][i].first),
472  level_partitioner.global_to_local(copy_indices_level_mine[level][i].second));
473 
474  // owned-remote case: the locally owned indices for the global dofs
475  // and the ghost dofs for the level indices set the local index
476  this->copy_indices_global_mine[level].
477  resize(copy_indices_global_mine[level].size());
478  for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
479  this->copy_indices_global_mine[level][i] =
480  std::pair<unsigned int,unsigned int>
481  (global_partitioner.global_to_local(copy_indices_global_mine[level][i].first),
482  level_partitioner.global_to_local(copy_indices_global_mine[level][i].second));
483  }
484 
485  perform_plain_copy = this->copy_indices.back().size()
486  == mg_dof.locally_owned_dofs().n_elements();
487  if (perform_plain_copy)
488  {
489  AssertDimension(this->copy_indices_global_mine.back().size(), 0);
490  AssertDimension(this->copy_indices_level_mine.back().size(), 0);
491 
492  // check whether there is a renumbering of degrees of freedom on
493  // either the finest level or the global dofs, which means that we
494  // cannot apply a plain copy
495  for (unsigned int i=0; i<this->copy_indices.back().size(); ++i)
496  if (this->copy_indices.back()[i].first !=
497  this->copy_indices.back()[i].second)
498  {
499  perform_plain_copy = false;
500  break;
501  }
502  }
503  perform_plain_copy =
504  Utilities::MPI::min(static_cast<int>(perform_plain_copy),
505  mpi_communicator);
506 
507  // if we do a plain copy, no need to hold additional ghosted vectors
508  if (perform_plain_copy)
509  {
510  ghosted_global_vector.reinit(0);
511  ghosted_level_vector.resize(0, 0);
512  }
513 }
514 
515 
516 
517 template <typename Number>
518 void
520 {
521  sizes.resize(0);
522  copy_indices.clear();
523  copy_indices_global_mine.clear();
524  copy_indices_level_mine.clear();
525  component_to_block_map.resize(0);
526  mg_constrained_dofs = 0;
527  ghosted_global_vector.reinit(0);
528  ghosted_level_vector.resize(0, 0);
529 }
530 
531 
532 
533 template <typename Number>
534 void
535 MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::print_indices (std::ostream &os) const
536 {
537  for (unsigned int level = 0; level<copy_indices.size(); ++level)
538  {
539  for (unsigned int i=0; i<copy_indices[level].size(); ++i)
540  os << "copy_indices[" << level
541  << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
542  }
543 
544  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
545  {
546  for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
547  os << "copy_ifrom [" << level
548  << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
549  }
550  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
551  {
552  for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
553  os << "copy_ito [" << level
554  << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
555  }
556 }
557 
558 
559 
560 template <typename Number>
561 std::size_t
563 {
564  std::size_t result = sizeof(*this);
565  result += MemoryConsumption::memory_consumption(sizes);
566  result += MemoryConsumption::memory_consumption(copy_indices);
567  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
568  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
569  result += ghosted_global_vector.memory_consumption();
570  for (unsigned int i=ghosted_level_vector.min_level();
571  i<=ghosted_level_vector.max_level(); ++i)
572  result += ghosted_level_vector[i].memory_consumption();
573 
574  return result;
575 }
576 
577 
578 
579 // explicit instantiation
580 #include "mg_level_global_transfer.inst"
581 
582 // create two additional instantiations currently not supported by the
583 // automatic template instantiation scheme
586 
587 
588 DEAL_II_NAMESPACE_CLOSE
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1052
const types::subdomain_id invalid_subdomain_id
Definition: types.h:239
std::size_t memory_consumption() const
::ExceptionBase & ExcMessage(std::string arg1)
ActiveSelector::active_cell_iterator active_cell_iterator
Definition: dof_handler.h:221
#define AssertThrow(cond, exc)
Definition: exceptions.h:358
unsigned int global_to_local(const types::global_dof_index global_index) const
const FiniteElement< dim, spacedim > & get_fe() const
void print_indices(std::ostream &os) const
size_type size() const
Definition: index_set.h:1247
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: dof_handler.cc:845
unsigned int global_dof_index
Definition: types.h:88
void fill_and_communicate_copy_indices(const DoFHandler< dim, spacedim > &mg_dof)
#define Assert(cond, exc)
Definition: exceptions.h:294
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1475
active_cell_iterator end_active(const unsigned int level) const
Definition: dof_handler.cc:883
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:135
void extract_locally_relevant_dofs(const DoFHandlerType &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:944
const IndexSet & locally_owned_mg_dofs(const unsigned int level) const
const types::subdomain_id artificial_subdomain_id
Definition: types.h:255
std_cxx11::enable_if< std_cxx11::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
bool at_refinement_edge(const unsigned int level, const types::global_dof_index index) const
const Triangulation< dim, spacedim > & get_tria() const DEAL_II_DEPRECATED
T min(const T &t, const MPI_Comm &mpi_communicator)
Definition: mpi.h:722
const Triangulation< dim, spacedim > & get_triangulation() const
bool is_element(const size_type index) const
Definition: index_set.h:1317
const IndexSet & locally_owned_dofs() const
size_type n_elements() const
Definition: index_set.h:1380