Reference documentation for deal.II version 8.4.2
trilinos_sparsity_pattern.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2015 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_sparsity_pattern.h>
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 
20 # include <deal.II/base/utilities.h>
21 # include <deal.II/lac/sparsity_pattern.h>
22 # include <deal.II/lac/dynamic_sparsity_pattern.h>
23 
24 DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
25 # include <Epetra_Export.h>
27 
28 DEAL_II_NAMESPACE_OPEN
29 
30 namespace TrilinosWrappers
31 {
32  namespace
33  {
34  // define a helper function that queries the size of an Epetra_Map object
35  // by calling either the 32- or 64-bit function necessary, and returns the
36  // result in the correct data type so that we can use it in calling other
37  // Epetra member functions that are overloaded by index type
38 #ifndef DEAL_II_WITH_64BIT_INDICES
39  int n_global_elements (const Epetra_BlockMap &map)
40  {
41  return map.NumGlobalElements();
42  }
43 
44  int min_my_gid(const Epetra_BlockMap &map)
45  {
46  return map.MinMyGID();
47  }
48 
49  int max_my_gid(const Epetra_BlockMap &map)
50  {
51  return map.MaxMyGID();
52  }
53 
54  int n_global_rows(const Epetra_CrsGraph &graph)
55  {
56  return graph.NumGlobalRows();
57  }
58 
59  int n_global_cols(const Epetra_CrsGraph &graph)
60  {
61  return graph.NumGlobalCols();
62  }
63 
64  int n_global_entries(const Epetra_CrsGraph &graph)
65  {
66  return graph.NumGlobalEntries();
67  }
68 
69  int global_row_index(const Epetra_CrsGraph &graph, int i)
70  {
71  return graph.GRID(i);
72  }
73 #else
74  long long int n_global_elements (const Epetra_BlockMap &map)
75  {
76  return map.NumGlobalElements64();
77  }
78 
79  long long int min_my_gid(const Epetra_BlockMap &map)
80  {
81  return map.MinMyGID64();
82  }
83 
84  long long int max_my_gid(const Epetra_BlockMap &map)
85  {
86  return map.MaxMyGID64();
87  }
88 
89  long long int n_global_rows(const Epetra_CrsGraph &graph)
90  {
91  return graph.NumGlobalRows64();
92  }
93 
94  long long int n_global_cols(const Epetra_CrsGraph &graph)
95  {
96  return graph.NumGlobalCols64();
97  }
98 
99  long long int n_global_entries(const Epetra_CrsGraph &graph)
100  {
101  return graph.NumGlobalEntries64();
102  }
103 
104  long long int global_row_index(const Epetra_CrsGraph &graph, int i)
105  {
106  return graph.GRID64(i);
107  }
108 #endif
109  }
110 
111  namespace SparsityPatternIterators
112  {
113  void
115  {
116  // if we are asked to visit the
117  // past-the-end line, then simply
118  // release all our caches and go on
119  // with life
120  if (this->a_row == sparsity_pattern->n_rows())
121  {
122  colnum_cache.reset ();
123 
124  return;
125  }
126 //TODO: Is this thread safe?
127 
128  // otherwise first flush Trilinos caches
130 
131  // get a representation of the present
132  // row
133  int ncols;
134  // TODO: casting a size_type to an int, could be a problem
135  int colnums = sparsity_pattern->n_cols();
136 
137  int ierr;
138  ierr = sparsity_pattern->graph->ExtractGlobalRowCopy((TrilinosWrappers::types::int_type)this->a_row,
139  colnums,
140  ncols,
141  (TrilinosWrappers::types::int_type *)&(*colnum_cache)[0]);
142  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
143 
144  // copy it into our caches if the
145  // line isn't empty. if it is, then
146  // we've done something wrong, since
147  // we shouldn't have initialized an
148  // iterator for an empty line (what
149  // would it point to?)
150  Assert (ncols != 0, ExcInternalError());
151  colnum_cache.reset (new std::vector<size_type> (colnums,
152  colnums+ncols));
153  }
154  }
155 
156 
157  // The constructor is actually the
158  // only point where we have to check
159  // whether we build a serial or a
160  // parallel Trilinos matrix.
161  // Actually, it does not even matter
162  // how many threads there are, but
163  // only if we use an MPI compiler or
164  // a standard compiler. So, even one
165  // thread on a configuration with
166  // MPI will still get a parallel
167  // interface.
169  {
170  column_space_map.reset(new Epetra_Map (TrilinosWrappers::types::int_type(0),
171  TrilinosWrappers::types::int_type(0),
173  graph.reset (new Epetra_FECrsGraph(View,
174  *column_space_map,
175  *column_space_map,
176  0));
177  graph->FillComplete();
178  }
179 
180 
181  SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
182  const size_type n_entries_per_row)
183  {
184  reinit (input_map, input_map, n_entries_per_row);
185  }
186 
187 
188 
189  SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
190  const std::vector<size_type> &n_entries_per_row)
191  {
192  reinit (input_map, input_map, n_entries_per_row);
193  }
194 
195 
196 
197  SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
198  const Epetra_Map &input_col_map,
199  const size_type n_entries_per_row)
200  {
201  reinit (input_row_map, input_col_map, n_entries_per_row);
202  }
203 
204 
205 
206  SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
207  const Epetra_Map &input_col_map,
208  const std::vector<size_type> &n_entries_per_row)
209  {
210  reinit (input_row_map, input_col_map, n_entries_per_row);
211  }
212 
213 
214 
216  const size_type n,
217  const size_type n_entries_per_row)
218  {
219  reinit (m, n, n_entries_per_row);
220  }
221 
222 
223 
225  const size_type n,
226  const std::vector<size_type> &n_entries_per_row)
227  {
228  reinit (m, n, n_entries_per_row);
229  }
230 
231 
232  // Copy function only works if the
233  // sparsity pattern is empty.
235  :
236  Subscriptor(),
237  column_space_map (new Epetra_Map(TrilinosWrappers::types::int_type(0),
238  TrilinosWrappers::types::int_type(0),
239  Utilities::Trilinos::comm_self())),
240  graph (new Epetra_FECrsGraph(View,
241  *column_space_map,
242  *column_space_map,
243  0))
244  {
245  (void)input_sparsity;
246  Assert (input_sparsity.n_rows() == 0,
247  ExcMessage ("Copy constructor only works for empty sparsity patterns."));
248  }
249 
250 
251 
252  SparsityPattern::SparsityPattern (const IndexSet &parallel_partitioning,
253  const MPI_Comm &communicator,
254  const size_type n_entries_per_row)
255  {
256  reinit (parallel_partitioning, parallel_partitioning, communicator,
257  n_entries_per_row);
258  }
259 
260 
261 
262  SparsityPattern::SparsityPattern (const IndexSet &parallel_partitioning,
263  const MPI_Comm &communicator,
264  const std::vector<size_type> &n_entries_per_row)
265  {
266  reinit (parallel_partitioning, parallel_partitioning, communicator,
267  n_entries_per_row);
268  }
269 
270 
271 
272  SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning,
273  const IndexSet &col_parallel_partitioning,
274  const MPI_Comm &communicator,
275  const size_type n_entries_per_row)
276  {
277  reinit (row_parallel_partitioning, col_parallel_partitioning,
278  communicator, n_entries_per_row);
279  }
280 
281 
282 
284  SparsityPattern (const IndexSet &row_parallel_partitioning,
285  const IndexSet &col_parallel_partitioning,
286  const MPI_Comm &communicator,
287  const std::vector<size_type> &n_entries_per_row)
288  {
289  reinit (row_parallel_partitioning, col_parallel_partitioning,
290  communicator, n_entries_per_row);
291  }
292 
293 
294 
296  SparsityPattern (const IndexSet &row_parallel_partitioning,
297  const IndexSet &col_parallel_partitioning,
298  const IndexSet &writable_rows,
299  const MPI_Comm &communicator,
300  const size_type n_max_entries_per_row)
301  {
302  reinit (row_parallel_partitioning, col_parallel_partitioning,
303  writable_rows, communicator, n_max_entries_per_row);
304  }
305 
306 
307 
309  {}
310 
311 
312 
313  void
315  const size_type n,
316  const size_type n_entries_per_row)
317  {
318  reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
319  n_entries_per_row);
320  }
321 
322 
323 
324  void
326  const size_type n,
327  const std::vector<size_type> &n_entries_per_row)
328  {
329  reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
330  n_entries_per_row);
331  }
332 
333 
334 
335  namespace
336  {
338 
339  void
340  reinit_sp (const Epetra_Map &row_map,
341  const Epetra_Map &col_map,
342  const size_type n_entries_per_row,
343  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
344  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
345  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
346  {
347  Assert(row_map.IsOneToOne(),
348  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
349  "the maps of different processors."));
350  Assert(col_map.IsOneToOne(),
351  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
352  "the maps of different processors."));
353 
354  nonlocal_graph.reset();
355  graph.reset ();
356  column_space_map.reset (new Epetra_Map (col_map));
357 
358  // for more than one processor, need to specify only row map first and
359  // let the matrix entries decide about the column map (which says which
360  // columns are present in the matrix, not to be confused with the
361  // col_map that tells how the domain dofs of the matrix will be
362  // distributed). for only one processor, we can directly assign the
363  // columns as well. If we use a recent Trilinos version, we can also
364  // require building a non-local graph which gives us thread-safe
365  // initialization.
366  if (row_map.Comm().NumProc() > 1)
367  graph.reset (new Epetra_FECrsGraph(Copy, row_map,
368  n_entries_per_row, false
369  // TODO: Check which new Trilinos
370  // version supports this... Remember
371  // to change tests/trilinos/assemble_matrix_parallel_07
372  // too.
373  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
374  //, true
375  //#endif
376  ));
377  else
378  graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
379  n_entries_per_row, false));
380  }
381 
382 
383 
384  void
385  reinit_sp (const Epetra_Map &row_map,
386  const Epetra_Map &col_map,
387  const std::vector<size_type> &n_entries_per_row,
388  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
389  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
390  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
391  {
392  Assert(row_map.IsOneToOne(),
393  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
394  "the maps of different processors."));
395  Assert(col_map.IsOneToOne(),
396  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
397  "the maps of different processors."));
398 
399  // release memory before reallocation
400  nonlocal_graph.reset();
401  graph.reset ();
402  AssertDimension (n_entries_per_row.size(),
403  static_cast<size_type>(n_global_elements(row_map)));
404 
405  column_space_map.reset (new Epetra_Map (col_map));
406  std::vector<int> local_entries_per_row(max_my_gid(row_map)-
407  min_my_gid(row_map));
408  for (unsigned int i=0; i<local_entries_per_row.size(); ++i)
409  local_entries_per_row[i] = n_entries_per_row[min_my_gid(row_map)+i];
410 
411  if (row_map.Comm().NumProc() > 1)
412  graph.reset(new Epetra_FECrsGraph(Copy, row_map,
413  &local_entries_per_row[0],
414  false
415  // TODO: Check which new Trilinos
416  // version supports this... Remember
417  // to change tests/trilinos/assemble_matrix_parallel_07
418  // too.
419  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
420  //, true
421  //#endif
422  ));
423  else
424  graph.reset(new Epetra_FECrsGraph(Copy, row_map, col_map,
425  &local_entries_per_row[0],
426  false));
427  }
428 
429 
430 
431  template <typename SparsityPatternType>
432  void
433  reinit_sp (const Epetra_Map &row_map,
434  const Epetra_Map &col_map,
435  const SparsityPatternType &sp,
436  const bool exchange_data,
437  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
438  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
439  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
440  {
441  nonlocal_graph.reset ();
442  graph.reset ();
443 
444  AssertDimension (sp.n_rows(),
445  static_cast<size_type>(n_global_elements(row_map)));
446  AssertDimension (sp.n_cols(),
447  static_cast<size_type>(n_global_elements(col_map)));
448 
449  column_space_map.reset (new Epetra_Map (col_map));
450 
451  Assert (row_map.LinearMap() == true,
452  ExcMessage ("This function only works if the row map is contiguous."));
453 
454  const size_type first_row = min_my_gid(row_map),
455  last_row = max_my_gid(row_map)+1;
456  std::vector<int> n_entries_per_row(last_row - first_row);
457 
458  // Trilinos wants the row length as an int this is hopefully never going
459  // to be a problem.
460  for (size_type row=first_row; row<last_row; ++row)
461  n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
462 
463  if (row_map.Comm().NumProc() > 1)
464  graph.reset(new Epetra_FECrsGraph(Copy, row_map,
465  &n_entries_per_row[0],
466  false));
467  else
468  graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
469  &n_entries_per_row[0],
470  false));
471 
472  AssertDimension (sp.n_rows(),
473  static_cast<size_type>(n_global_rows(*graph)));
474 
475  std::vector<TrilinosWrappers::types::int_type> row_indices;
476 
477  // Include possibility to exchange data since DynamicSparsityPattern is
478  // able to do so
479  if (exchange_data==false)
480  for (size_type row=first_row; row<last_row; ++row)
481  {
482  const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
483  if (row_length == 0)
484  continue;
485 
486  row_indices.resize (row_length, -1);
487  {
488  typename SparsityPatternType::iterator p = sp.begin(row);
489  // avoid incrementing p over the end of the current row because
490  // it is slow for DynamicSparsityPattern in parallel
491  for (int col=0; col<row_length; )
492  {
493  row_indices[col++] = p->column();
494  if (col < row_length)
495  ++p;
496  }
497  }
498  graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
499  &row_indices[0]);
500  }
501  else
502  for (size_type row=0; row<sp.n_rows(); ++row)
503  {
504  const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
505  if (row_length == 0)
506  continue;
507 
508  row_indices.resize (row_length, -1);
509  {
510  typename SparsityPatternType::iterator p = sp.begin(row);
511  // avoid incrementing p over the end of the current row because
512  // it is slow for DynamicSparsityPattern in parallel
513  for (int col=0; col<row_length; )
514  {
515  row_indices[col++] = p->column();
516  if (col < row_length)
517  ++p;
518  }
519  }
520  graph->InsertGlobalIndices (1,
521  reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),
522  row_length, &row_indices[0]);
523  }
524 
525  int ierr =
526  graph->GlobalAssemble (*column_space_map,
527  static_cast<const Epetra_Map &>(graph->RangeMap()),
528  true);
529  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
530 
531  ierr = graph->OptimizeStorage ();
532  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
533  }
534  }
535 
536 
537  void
538  SparsityPattern::reinit (const Epetra_Map &input_map,
539  const size_type n_entries_per_row)
540  {
541  reinit_sp (input_map, input_map, n_entries_per_row,
543  }
544 
545 
546 
547  void
548  SparsityPattern::reinit (const Epetra_Map &input_row_map,
549  const Epetra_Map &input_col_map,
550  const size_type n_entries_per_row)
551  {
552  reinit_sp (input_row_map, input_col_map, n_entries_per_row,
554  }
555 
556 
557 
558  void
559  SparsityPattern::reinit (const Epetra_Map &input_map,
560  const std::vector<size_type> &n_entries_per_row)
561  {
562  reinit_sp (input_map, input_map, n_entries_per_row,
564  }
565 
566 
567 
568  void
569  SparsityPattern::reinit (const Epetra_Map &input_row_map,
570  const Epetra_Map &input_col_map,
571  const std::vector<size_type> &n_entries_per_row)
572  {
573  reinit_sp (input_row_map, input_col_map, n_entries_per_row,
575  }
576 
577 
578 
579  void
580  SparsityPattern::reinit (const IndexSet &parallel_partitioning,
581  const MPI_Comm &communicator,
582  const size_type n_entries_per_row)
583  {
584  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
585  false);
586  reinit_sp (map, map, n_entries_per_row,
588  }
589 
590 
591 
592  void SparsityPattern::reinit (const IndexSet &parallel_partitioning,
593  const MPI_Comm &communicator,
594  const std::vector<size_type> &n_entries_per_row)
595  {
596  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
597  false);
598  reinit_sp (map, map, n_entries_per_row,
600  }
601 
602 
603 
604  void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
605  const IndexSet &col_parallel_partitioning,
606  const MPI_Comm &communicator,
607  const size_type n_entries_per_row)
608  {
609  Epetra_Map row_map =
610  row_parallel_partitioning.make_trilinos_map (communicator, false);
611  Epetra_Map col_map =
612  col_parallel_partitioning.make_trilinos_map (communicator, false);
613  reinit_sp (row_map, col_map, n_entries_per_row,
615  }
616 
617 
618 
619  void
620  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
621  const IndexSet &col_parallel_partitioning,
622  const MPI_Comm &communicator,
623  const std::vector<size_type> &n_entries_per_row)
624  {
625  Epetra_Map row_map =
626  row_parallel_partitioning.make_trilinos_map (communicator, false);
627  Epetra_Map col_map =
628  col_parallel_partitioning.make_trilinos_map (communicator, false);
629  reinit_sp (row_map, col_map, n_entries_per_row,
631  }
632 
633 
634 
635  void
636  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
637  const IndexSet &col_parallel_partitioning,
638  const IndexSet &writable_rows,
639  const MPI_Comm &communicator,
640  const size_type n_entries_per_row)
641  {
642  Epetra_Map row_map =
643  row_parallel_partitioning.make_trilinos_map (communicator, false);
644  Epetra_Map col_map =
645  col_parallel_partitioning.make_trilinos_map (communicator, false);
646  reinit_sp (row_map, col_map, n_entries_per_row,
648 
649  IndexSet nonlocal_partitioner = writable_rows;
650  AssertDimension(nonlocal_partitioner.size(), row_parallel_partitioning.size());
651 #ifdef DEBUG
652  {
653  IndexSet tmp = writable_rows & row_parallel_partitioning;
654  Assert (tmp == row_parallel_partitioning,
655  ExcMessage("The set of writable rows passed to this method does not "
656  "contain the locally owned rows, which is not allowed."));
657  }
658 #endif
659  nonlocal_partitioner.subtract_set(row_parallel_partitioning);
660  if (Utilities::MPI::n_mpi_processes(communicator) > 1)
661  {
662  Epetra_Map nonlocal_map =
663  nonlocal_partitioner.make_trilinos_map(communicator, true);
664  nonlocal_graph.reset(new Epetra_CrsGraph(Copy, nonlocal_map, 0));
665  }
666  else
667  Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
668  }
669 
670 
671 
672  template<typename SparsityPatternType>
673  void
674  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
675  const IndexSet &col_parallel_partitioning,
676  const SparsityPatternType &nontrilinos_sparsity_pattern,
677  const MPI_Comm &communicator,
678  const bool exchange_data)
679  {
680  Epetra_Map row_map =
681  row_parallel_partitioning.make_trilinos_map (communicator, false);
682  Epetra_Map col_map =
683  col_parallel_partitioning.make_trilinos_map (communicator, false);
684  reinit_sp (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data,
686  }
687 
688 
689 
690  template<typename SparsityPatternType>
691  void
692  SparsityPattern::reinit (const IndexSet &parallel_partitioning,
693  const SparsityPatternType &nontrilinos_sparsity_pattern,
694  const MPI_Comm &communicator,
695  const bool exchange_data)
696  {
697  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
698  false);
699  reinit_sp (map, map, nontrilinos_sparsity_pattern, exchange_data,
701  }
702 
703 
704 
705  template <typename SparsityPatternType>
706  void
707  SparsityPattern::reinit (const Epetra_Map &input_map,
708  const SparsityPatternType &sp,
709  const bool exchange_data)
710  {
711  reinit_sp (input_map, input_map, sp, exchange_data,
713  }
714 
715 
716 
717  template <typename SparsityPatternType>
718  void
719  SparsityPattern::reinit (const Epetra_Map &input_row_map,
720  const Epetra_Map &input_col_map,
721  const SparsityPatternType &sp,
722  const bool exchange_data)
723  {
724  reinit_sp (input_row_map, input_col_map, sp, exchange_data,
726 
727  compress();
728  }
729 
730 
731 
734  {
735  Assert (false, ExcNotImplemented());
736  return *this;
737  }
738 
739 
740 
741  template <typename SparsityPatternType>
742  void
743  SparsityPattern::copy_from (const SparsityPatternType &sp)
744  {
745  const Epetra_Map rows (TrilinosWrappers::types::int_type(sp.n_rows()), 0,
747  const Epetra_Map columns (TrilinosWrappers::types::int_type(sp.n_cols()), 0,
749 
750  reinit_sp (rows, columns, sp, false,
752  }
753 
754 
755 
756  void
758  {
759  // When we clear the matrix, reset
760  // the pointer and generate an
761  // empty sparsity pattern.
762  column_space_map.reset (new Epetra_Map (TrilinosWrappers::types::int_type(0),
763  TrilinosWrappers::types::int_type(0),
765  graph.reset (new Epetra_FECrsGraph(View, *column_space_map,
766  *column_space_map, 0));
767  graph->FillComplete();
768 
769  nonlocal_graph.reset();
770  }
771 
772 
773 
774  void
776  {
777  int ierr;
778  Assert (column_space_map.get() != 0, ExcInternalError());
779  if (nonlocal_graph.get() != 0)
780  {
781  if (nonlocal_graph->IndicesAreGlobal() == false &&
782  nonlocal_graph->RowMap().NumMyElements() > 0)
783  {
784  // insert dummy element
785  TrilinosWrappers::types::int_type row = nonlocal_graph->RowMap().MyGID(
786  static_cast<TrilinosWrappers::types::int_type> (0));
787  nonlocal_graph->InsertGlobalIndices(row, 1, &row);
788  }
789  Assert(nonlocal_graph->RowMap().NumMyElements() == 0 ||
790  nonlocal_graph->IndicesAreGlobal() == true,
791  ExcInternalError());
792  nonlocal_graph->FillComplete(*column_space_map,
793  static_cast<const Epetra_Map &>(graph->RangeMap()));
794  nonlocal_graph->OptimizeStorage();
795  Epetra_Export exporter(nonlocal_graph->RowMap(), graph->RowMap());
796  ierr = graph->Export(*nonlocal_graph, exporter, Add);
797  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
798  ierr =
799  graph->FillComplete(*column_space_map,
800  static_cast<const Epetra_Map &>(graph->RangeMap()));
801  }
802  else
803  ierr = graph->GlobalAssemble (*column_space_map,
804  static_cast<const Epetra_Map &>(graph->RangeMap()),
805  true);
806 
807  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
808 
809  ierr = graph->OptimizeStorage ();
810  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
811  }
812 
813 
814 
815  bool
817  const size_type j) const
818  {
819  // Extract local indices in
820  // the matrix.
821  int trilinos_i = graph->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
822  trilinos_j = graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
823 
824  // If the data is not on the
825  // present processor, we throw
826  // an exception. This is on of
827  // the two tiny differences to
828  // the el(i,j) call, which does
829  // not throw any assertions.
830  if (trilinos_i == -1)
831  {
832  return false;
833  }
834  else
835  {
836  // Check whether the matrix
837  // already is transformed to
838  // local indices.
839  if (graph->Filled() == false)
840  {
841  int nnz_present = graph->NumGlobalIndices(i);
842  int nnz_extracted;
843  TrilinosWrappers::types::int_type *col_indices;
844 
845  // Generate the view and make
846  // sure that we have not generated
847  // an error.
848  // TODO: trilinos_i is the local row index -> it is an int but
849  // ExtractGlobalRowView requires trilinos_i to be the global row
850  // index and thus it should be a long long int
851  int ierr = graph->ExtractGlobalRowView(
852  static_cast<TrilinosWrappers::types::int_type>(trilinos_i),
853  nnz_extracted, col_indices);
854  (void)ierr;
855  Assert (ierr==0, ExcTrilinosError(ierr));
856  Assert (nnz_present == nnz_extracted,
857  ExcDimensionMismatch(nnz_present, nnz_extracted));
858 
859  // Search the index
860  TrilinosWrappers::types::int_type *el_find =
861  std::find(col_indices, col_indices + nnz_present, trilinos_j);
862 
863  TrilinosWrappers::types::int_type local_col_index =
864  (TrilinosWrappers::types::int_type)(el_find - col_indices);
865 
866  if (local_col_index == nnz_present)
867  return false;
868  }
869  else
870  {
871  // Prepare pointers for extraction
872  // of a view of the row.
873  int nnz_present = graph->NumGlobalIndices(
874  static_cast<TrilinosWrappers::types::int_type>(i));
875  int nnz_extracted;
876  int *col_indices;
877 
878  // Generate the view and make
879  // sure that we have not generated
880  // an error.
881  int ierr = graph->ExtractMyRowView(trilinos_i,
882  nnz_extracted, col_indices);
883  (void)ierr;
884  Assert (ierr==0, ExcTrilinosError(ierr));
885 
886  Assert (nnz_present == nnz_extracted,
887  ExcDimensionMismatch(nnz_present, nnz_extracted));
888 
889  // Search the index
890  int *el_find = std::find(col_indices, col_indices + nnz_present,
891  static_cast<int>(trilinos_j));
892 
893  int local_col_index = (int)(el_find - col_indices);
894 
895  if (local_col_index == nnz_present)
896  return false;
897  }
898  }
899 
900  return true;
901  }
902 
903 
904 
907  {
908  size_type local_b=0;
909  TrilinosWrappers::types::int_type global_b=0;
910  for (int i=0; i<(int)local_size(); ++i)
911  {
912  int *indices;
913  int num_entries;
914  graph->ExtractMyRowView(i, num_entries, indices);
915  for (unsigned int j=0; j<(unsigned int)num_entries; ++j)
916  {
917  if (static_cast<size_type>(std::abs(static_cast<TrilinosWrappers::types::int_type>(i-indices[j]))) > local_b)
918  local_b = std::abs(static_cast<TrilinosWrappers::types::int_type>(i-indices[j]));
919  }
920  }
921  graph->Comm().MaxAll((TrilinosWrappers::types::int_type *)&local_b, &global_b, 1);
922  return static_cast<size_type>(global_b);
923  }
924 
925 
926 
929  {
930  const TrilinosWrappers::types::int_type n_rows = n_global_rows(*graph);
931  return n_rows;
932  }
933 
934 
935 
938  {
939  TrilinosWrappers::types::int_type n_cols;
940  if (graph->Filled() == true)
941  n_cols = n_global_cols(*graph);
942  else
943  n_cols = n_global_elements(*column_space_map);
944 
945  return n_cols;
946  }
947 
948 
949 
950  unsigned int
952  {
953  int n_rows = graph -> NumMyRows();
954 
955  return n_rows;
956  }
957 
958 
959 
960  std::pair<SparsityPattern::size_type, SparsityPattern::size_type>
962  {
964  begin = min_my_gid(graph->RowMap());
965  end = max_my_gid(graph->RowMap())+1;
966 
967  return std::make_pair (begin, end);
968  }
969 
970 
971 
974  {
975  TrilinosWrappers::types::int_type nnz = n_global_entries(*graph);
976 
977  return static_cast<size_type>(nnz);
978  }
979 
980 
981 
982  unsigned int
984  {
985  int nnz = graph->MaxNumIndices();
986 
987  return static_cast<unsigned int>(nnz);
988  }
989 
990 
991 
994  {
995  Assert (row < n_rows(), ExcInternalError());
996 
997  // get a representation of the
998  // present row
999  TrilinosWrappers::types::int_type ncols = -1;
1000  TrilinosWrappers::types::int_type local_row =
1001  graph->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1002 
1003  // on the processor who owns this
1004  // row, we'll have a non-negative
1005  // value.
1006  if (local_row >= 0)
1007  ncols = graph->NumMyIndices (local_row);
1008 
1009  return static_cast<size_type>(ncols);
1010  }
1011 
1012 
1013 
1014  const Epetra_Map &
1016  {
1017  return static_cast<const Epetra_Map &>(graph->DomainMap());
1018  }
1019 
1020 
1021 
1022  const Epetra_Map &
1024  {
1025  return static_cast<const Epetra_Map &>(graph->RangeMap());
1026  }
1027 
1028 
1029 
1030  const Epetra_Map &
1032  {
1033  return static_cast<const Epetra_Map &>(graph->RowMap());
1034  }
1035 
1036 
1037 
1038  const Epetra_Map &
1040  {
1041  return static_cast<const Epetra_Map &>(graph->ColMap());
1042  }
1043 
1044 
1045 
1046  const Epetra_Comm &
1048  {
1049  return graph->RangeMap().Comm();
1050  }
1051 
1052 
1053 
1054  MPI_Comm
1056  {
1057 
1058 #ifdef DEAL_II_WITH_MPI
1059 
1060  const Epetra_MpiComm *mpi_comm
1061  = dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
1062  return mpi_comm->Comm();
1063 #else
1064 
1065  return MPI_COMM_SELF;
1066 
1067 #endif
1068 
1069  }
1070 
1071 
1072 
1073  void
1075  {
1076  Assert (false, ExcNotImplemented());
1077  }
1078 
1079 
1080 
1081  // As of now, no particularly neat
1082  // ouput is generated in case of
1083  // multiple processors.
1084  void
1085  SparsityPattern::print (std::ostream &out,
1086  const bool write_extended_trilinos_info) const
1087  {
1088  if (write_extended_trilinos_info)
1089  out << *graph;
1090  else
1091  {
1092  int *indices;
1093  int num_entries;
1094 
1095  for (int i=0; i<graph->NumMyRows(); ++i)
1096  {
1097  graph->ExtractMyRowView (i, num_entries, indices);
1098  for (int j=0; j<num_entries; ++j)
1099  out << "(" << i << "," << indices[global_row_index(*graph,j)] << ") "
1100  << std::endl;
1101  }
1102  }
1103 
1104  AssertThrow (out, ExcIO());
1105  }
1106 
1107 
1108 
1109  void
1110  SparsityPattern::print_gnuplot (std::ostream &out) const
1111  {
1112  Assert (graph->Filled() == true, ExcInternalError());
1113  for (unsigned int row=0; row<local_size(); ++row)
1114  {
1115  int *indices;
1116  int num_entries;
1117  graph->ExtractMyRowView (row, num_entries, indices);
1118 
1119  for (unsigned int j=0; j<(unsigned int)num_entries; ++j)
1120  // while matrix entries are usually
1121  // written (i,j), with i vertical and
1122  // j horizontal, gnuplot output is
1123  // x-y, that is we have to exchange
1124  // the order of output
1125  out << indices[global_row_index(*graph,static_cast<int>(j))]
1126  << " " << -static_cast<signed int>(row) << std::endl;
1127  }
1128 
1129  AssertThrow (out, ExcIO());
1130  }
1131 
1132 //TODO: Implement!
1133  std::size_t
1135  {
1136  Assert(false, ExcNotImplemented());
1137  return 0;
1138  }
1139 
1140 
1141  // explicit instantiations
1142  //
1143  template void
1144  SparsityPattern::copy_from (const ::SparsityPattern &);
1145  template void
1146  SparsityPattern::copy_from (const ::DynamicSparsityPattern &);
1147 
1148 
1149  template void
1150  SparsityPattern::reinit (const Epetra_Map &,
1151  const ::SparsityPattern &,
1152  bool);
1153  template void
1154  SparsityPattern::reinit (const Epetra_Map &,
1155  const ::DynamicSparsityPattern &,
1156  bool);
1157 
1158  template void
1159  SparsityPattern::reinit (const Epetra_Map &,
1160  const Epetra_Map &,
1161  const ::SparsityPattern &,
1162  bool);
1163  template void
1164  SparsityPattern::reinit (const Epetra_Map &,
1165  const Epetra_Map &,
1166  const ::DynamicSparsityPattern &,
1167  bool);
1168 
1169 
1170  template void
1172  const ::SparsityPattern &,
1173  const MPI_Comm &,
1174  bool);
1175  template void
1177  const ::DynamicSparsityPattern &,
1178  const MPI_Comm &,
1179  bool);
1180 
1181 
1182  template void
1184  const IndexSet &,
1185  const ::SparsityPattern &,
1186  const MPI_Comm &,
1187  bool);
1188  template void
1190  const IndexSet &,
1191  const ::DynamicSparsityPattern &,
1192  const MPI_Comm &,
1193  bool);
1194 
1195 }
1196 
1197 DEAL_II_NAMESPACE_CLOSE
1198 
1199 #endif // DEAL_II_WITH_TRILINOS
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1052
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
size_type row_length(const size_type row) const
::ExceptionBase & ExcMessage(std::string arg1)
std_cxx11::shared_ptr< Epetra_FECrsGraph > graph
std_cxx11::shared_ptr< Epetra_CrsGraph > nonlocal_graph
#define AssertThrow(cond, exc)
Definition: exceptions.h:358
const Epetra_Comm & comm_self()
Definition: utilities.cc:733
const_iterator begin() const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:484
const Epetra_Comm & trilinos_communicator() const DEAL_II_DEPRECATED
size_type size() const
Definition: index_set.h:1247
bool exists(const size_type i, const size_type j) const
std_cxx11::shared_ptr< Epetra_Map > column_space_map
const Epetra_Map & row_partitioner() const DEAL_II_DEPRECATED
Definition: types.h:30
std_cxx11::shared_ptr< const std::vector< size_type > > colnum_cache
void subtract_set(const IndexSet &other)
Definition: index_set.cc:269
#define Assert(cond, exc)
Definition: exceptions.h:294
void copy_from(const SparsityPattern &input_sparsity_pattern)
void print_gnuplot(std::ostream &out) const
const_iterator end() const
const Epetra_Map & range_partitioner() const DEAL_II_DEPRECATED
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:99
const Epetra_Map & domain_partitioner() const DEAL_II_DEPRECATED
const Epetra_Map & col_partitioner() const DEAL_II_DEPRECATED
SparsityPattern & operator=(const SparsityPattern &input_sparsity_pattern)
Definition: mpi.h:55
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
size_type n_elements() const
Definition: index_set.h:1380
std::pair< size_type, size_type > local_range() const