1441 *
const double g = 9.81;
1442 *
const double rho = 7700;
1445 * values(dim - 1) = -rho * g;
1450 *
template <
int dim>
1451 *
void BodyForce<dim>::vector_value_list(
1455 *
const unsigned int n_points = points.size();
1457 *
Assert(value_list.size() == n_points,
1460 *
for (
unsigned int p = 0; p < n_points; ++p)
1461 * BodyForce<dim>::vector_value(points[p], value_list[p]);
1469 * <a name=
"ThecodeIncrementalBoundaryValuecodeclass"></a>
1470 * <h3>The <code>IncrementalBoundaryValue</code>
class</h3>
1474 * In addition to body forces, movement can be induced by boundary forces
1475 * and forced boundary displacement. The latter
case is equivalent to forces
1476 * being chosen in such a way that they induce certain displacement.
1480 * For quasistatic displacement, typical boundary forces would be pressure
1481 * on a body, or tangential friction against another body. We chose a
1482 * somewhat simpler
case here: we prescribe a certain movement of (parts of)
1483 * the boundary, or at least of certain components of the displacement
1484 * vector. We describe
this by another vector-valued
function that,
for a
1485 * given
point on the boundary, returns the prescribed displacement.
1489 * Since we have a time-dependent problem, the displacement increment of the
1490 * boundary equals the displacement accumulated during the length of the
1491 * timestep. The
class therefore has to know both the present time and the
1492 * length of the present time step, and can then
approximate the incremental
1493 * displacement as the present velocity times the present timestep.
1497 * For the purposes of
this program, we choose a simple form of boundary
1498 * displacement: we displace the top boundary with constant velocity
1499 * downwards. The rest of the boundary is either going to be fixed (and is
1500 * then described
using an
object of type
1502 * nothing special has to be done). The implementation of the
class 1503 * describing the constant downward motion should then be obvious
using the
1504 * knowledge we gained through all the previous example programs:
1507 *
template <
int dim>
1508 *
class IncrementalBoundaryValues :
public Function<dim>
1511 * IncrementalBoundaryValues(
const double present_time,
1512 *
const double present_timestep);
1522 *
const double velocity;
1523 *
const double present_time;
1524 *
const double present_timestep;
1528 *
template <
int dim>
1529 * IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1530 *
const double present_time,
1531 *
const double present_timestep)
1534 * , present_time(present_time)
1535 * , present_timestep(present_timestep)
1539 *
template <
int dim>
1541 * IncrementalBoundaryValues<dim>::vector_value(
const Point<dim> & ,
1547 * values(2) = -present_timestep * velocity;
1552 *
template <
int dim>
1553 *
void IncrementalBoundaryValues<dim>::vector_value_list(
1557 *
const unsigned int n_points = points.size();
1559 *
Assert(value_list.size() == n_points,
1562 *
for (
unsigned int p = 0; p < n_points; ++p)
1563 * IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1571 * <a name=
"ImplementationofthecodeTopLevelcodeclass"></a>
1572 * <h3>Implementation of the <code>TopLevel</code>
class</h3>
1576 * Now
for the implementation of the main
class. First, we initialize the
1577 * stress-strain tensor, which we have declared as a
static const 1578 * variable. We chose Lamé constants that are appropriate
for steel:
1581 *
template <
int dim>
1583 * get_stress_strain_tensor<dim>( 9.695e10,
1591 * <a name=
"Thepublicinterface"></a>
1592 * <h4>The
public interface</h4>
1596 * The next step is the definition of constructors and destructors. There
1597 * are no surprises here: we choose linear and continuous finite elements
1598 *
for each of the <code>dim</code> vector components of the solution, and a
1599 * Gaussian quadrature formula with 2 points in each coordinate
1600 * direction. The destructor should be obvious:
1603 *
template <
int dim>
1604 * TopLevel<dim>::TopLevel()
1608 * , quadrature_formula(fe.degree + 1)
1609 * , present_time(0.0)
1610 * , present_timestep(1.0)
1613 * , mpi_communicator(MPI_COMM_WORLD)
1621 *
template <
int dim>
1622 * TopLevel<dim>::~TopLevel()
1624 * dof_handler.clear();
1631 * The last of the
public functions is the
one that directs all the work,
1632 * <code>
run()</code>. It initializes the variables that describe where in
1633 * time we presently are, then runs the
first time step, then loops over all
1634 * the other time steps. Note that
for simplicity we use a fixed time step,
1635 * whereas a more sophisticated program would of course have to choose it in
1636 * some more reasonable way adaptively:
1639 *
template <
int dim>
1642 * do_initial_timestep();
1644 *
while (present_time < end_time)
1652 * <a name=
"TopLevelcreate_coarse_grid"></a>
1653 * <h4>TopLevel::create_coarse_grid</h4>
1657 * The next
function in the order in which they were declared above is the
1658 *
one that creates the coarse grid from which we start. For
this example
1659 * program, we want to compute the deformation of a
cylinder under axial
1660 * compression. The
first step therefore is to generate a mesh
for a
1661 *
cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1662 * respectively. Fortunately, there is a library
function for such a mesh.
1666 * In a
second step, we have to associated boundary conditions with the
1667 * upper and lower faces of the
cylinder. We choose a boundary indicator of
1668 * 0
for the boundary faces that are characterized by their midpoints having
1669 * z-coordinates of either 0 (bottom face), an indicator of 1
for z=3 (top
1670 * face);
finally, we use boundary indicator 2
for all faces on the inside
1671 * of the
cylinder shell, and 3
for the outside.
1674 *
template <
int dim>
1675 *
void TopLevel<dim>::create_coarse_grid()
1677 *
const double inner_radius = 0.8, outer_radius = 1;
1679 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1680 *
for (
const auto &face : cell->face_iterators())
1681 *
if (face->at_boundary())
1683 *
const Point<dim> face_center = face->center();
1685 *
if (face_center[2] == 0)
1686 * face->set_boundary_id(0);
1687 *
else if (face_center[2] == 3)
1688 * face->set_boundary_id(1);
1689 *
else if (std::sqrt(face_center[0] * face_center[0] +
1690 * face_center[1] * face_center[1]) <
1691 * (inner_radius + outer_radius) / 2)
1692 * face->set_boundary_id(2);
1694 * face->set_boundary_id(3);
1699 * Once all
this is done, we can
refine the mesh once globally:
1706 * As the
final step, we need to
set up a clean state of the data that we
1707 * store in the quadrature points on all cells that are treated on the
1708 * present processor.
1711 * setup_quadrature_point_history();
1719 * <a name=
"TopLevelsetup_system"></a>
1720 * <h4>TopLevel::setup_system</h4>
1724 * The next
function is the
one that sets up the data structures
for a given
1725 * mesh. This is done in most the same way as in @ref step_17
"step-17": distribute the
1726 * degrees of freedom, then sort these degrees of freedom in such a way that
1727 * each processor gets a contiguous chunk of them. Note that subdivisions into
1728 * chunks
for each processor is handled in the
functions that create or
1729 *
refine grids, unlike in the previous example program (the
point where
1730 *
this happens is mostly a matter of taste; here, we chose to
do it when
1731 * grids are created since in the <code>do_initial_timestep</code> and
1732 * <code>do_timestep</code>
functions we want to output the number of cells
1733 * on each processor at a
point where we haven
't called the present function 1737 * template <int dim> 1738 * void TopLevel<dim>::setup_system() 1740 * dof_handler.distribute_dofs(fe); 1741 * locally_owned_dofs = dof_handler.locally_owned_dofs(); 1742 * DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); 1746 * The next step is to set up constraints due to hanging nodes. This has 1747 * been handled many times before: 1750 * hanging_node_constraints.clear(); 1751 * DoFTools::make_hanging_node_constraints(dof_handler, 1752 * hanging_node_constraints); 1753 * hanging_node_constraints.close(); 1757 * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in 1758 * which we simply used PETSc's ability to just know about the size of the
1759 *
matrix and later allocate those
nonzero elements that are being written
1760 * to. While
this works just fine from a correctness viewpoint, it is not
1761 * at all efficient:
if we don
't give PETSc a clue as to which elements 1762 * are written to, it is (at least at the time of this writing) unbearably 1763 * slow when we set the elements in the matrix for the first time (i.e. in 1764 * the first timestep). Later on, when the elements have been allocated, 1765 * everything is much faster. In experiments we made, the first timestep 1766 * can be accelerated by almost two orders of magnitude if we instruct 1767 * PETSc which elements will be used and which are not. 1771 * To do so, we first generate the sparsity pattern of the matrix we are 1772 * going to work with, and make sure that the condensation of hanging node 1773 * constraints add the necessary additional entries in the sparsity 1777 * DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs); 1778 * DoFTools::make_sparsity_pattern(dof_handler, 1780 * hanging_node_constraints, 1781 * /*keep constrained dofs*/ false); 1782 * SparsityTools::distribute_sparsity_pattern(sparsity_pattern, 1783 * locally_owned_dofs, 1785 * locally_relevant_dofs); 1788 * Note that we have used the <code>DynamicSparsityPattern</code> class 1789 * here that was already introduced in @ref step_11 "step-11", rather than the 1790 * <code>SparsityPattern</code> class that we have used in all other 1791 * cases. The reason for this is that for the latter class to work we have 1792 * to give an initial upper bound for the number of entries in each row, a 1793 * task that is traditionally done by 1794 * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this 1795 * function suffers from a serious problem: it has to compute an upper 1796 * bound to the number of nonzero entries in each row, and this is a 1797 * rather complicated task, in particular in 3d. In effect, while it is 1798 * quite accurate in 2d, it often comes up with much too large a number in 1799 * 3d, and in that case the <code>SparsityPattern</code> allocates much 1800 * too much memory at first, often several 100 MBs. This is later 1801 * corrected when <code>DoFTools::make_sparsity_pattern</code> is called 1802 * and we realize that we don't need all that much memory, but at time it
1803 * is already too late:
for large problems, the temporary allocation of
1804 * too much memory can lead to out-of-memory situations.
1808 * In order to avoid
this, we resort to the
1810 * not require any up-front estimate on the number of
nonzero entries per
1811 * row. It therefore only ever allocates as much memory as it needs at any
1812 * given
time, and we can build it even
for large 3
d problems.
1816 * It is also worth noting that due to the specifics of
1818 * global, i.e. comprises all degrees of freedom whether they will be
1819 * owned by the processor we are on or another
one (in
case this program
1820 * is
run in %
parallel via MPI). This of course is not optimal -- it
1821 * limits the size of the problems we can solve, since storing the entire
1822 * sparsity pattern (even
if only
for a
short time) on each processor does
1823 * not
scale well. However, there are several more places in the program
1824 * in which we
do this,
for example we
always keep the global
1825 *
triangulation and DoF handler objects around, even
if we only work on
1826 * part of them. At present, deal.II does not have the necessary
1827 * facilities to completely distribute these objects (a task that, indeed,
1828 * is very hard to achieve with adaptive meshes, since well-balanced
1829 * subdivisions of a domain tend to become unbalanced as the mesh is
1830 * adaptively refined).
1834 * With
this data structure, we can then go to the PETSc sparse
matrix and
1835 * tell it to preallocate all the entries we will later want to write to:
1838 * system_matrix.reinit(locally_owned_dofs,
1839 * locally_owned_dofs,
1841 * mpi_communicator);
1844 * After
this point, no further
explicit knowledge of the sparsity pattern
1845 * is required any more and we can let the <code>sparsity_pattern</code>
1846 * variable go out of scope without any problem.
1850 * The last task in
this function is then only to reset the right hand
1851 * side vector as well as the solution vector to its correct size;
1852 * remember that the solution vector is a local
one, unlike the right hand
1853 * side that is a distributed %
parallel one and therefore needs to know
1854 * the MPI communicator over which it is supposed to transmit messages:
1857 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1858 * incremental_displacement.reinit(dof_handler.n_dofs());
1866 * <a name=
"TopLevelassemble_system"></a>
1867 * <h4>TopLevel::assemble_system</h4>
1871 * Again, assembling the system
matrix and right hand side follows the same
1872 * structure as in many example programs before. In particular, it is mostly
1873 * equivalent to @ref step_17
"step-17", except
for the different right hand side that now
1874 * only has to take into account
internal stresses. In addition, assembling
1875 * the
matrix is made significantly more transparent by
using the
1876 * <code>
SymmetricTensor</code>
class: note the elegance of forming the
1877 * scalar products of
symmetric tensors of rank 2 and 4. The implementation
1878 * is also more
general since it is independent of the fact that we may or
1879 * may not be
using an isotropic elasticity tensor.
1883 * The
first part of the assembly routine is as
always:
1886 *
template <
int dim>
1887 *
void TopLevel<dim>::assemble_system()
1890 * system_matrix = 0;
1893 * quadrature_formula,
1897 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1898 *
const unsigned int n_q_points = quadrature_formula.size();
1903 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1905 * BodyForce<dim> body_force;
1906 * std::vector<Vector<double>> body_force_values(n_q_points,
1911 * As in @ref step_17
"step-17", we only need to
loop over all cells that belong to the
1912 * present processor:
1915 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1916 *
if (cell->is_locally_owned())
1921 * fe_values.reinit(cell);
1925 * Then
loop over all indices i,j and quadrature points and
assemble 1926 * the system
matrix contributions from
this cell. Note how we
1928 * at a given quadrature point from the <code>
FEValues</code>
1929 * object, and the elegance with which we form the triple
1930 * contraction <code>eps_phi_i :
C : eps_phi_j</code>; the latter
1931 * needs to be compared to the clumsy computations needed in
1932 * @ref step_17
"step-17", both in the introduction as well as in the respective
1933 * place in the program:
1936 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1937 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1938 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1941 * eps_phi_i = get_strain(fe_values, i, q_point),
1942 * eps_phi_j = get_strain(fe_values, j, q_point);
1945 * stress_strain_tensor *
1948 * fe_values.JxW(q_point);
1954 * Then also
assemble the local right hand side contributions. For
1955 *
this, we need to access the prior stress
value in
this quadrature
1956 * point. To
get it, we use the user pointer of
this cell that
1957 * points into the global array to the quadrature point data
1958 * corresponding to the
first quadrature point of the present cell,
1959 * and then add an offset corresponding to the index of the
1960 * quadrature point we presently consider:
1963 *
const PointHistory<dim> *local_quadrature_points_data =
1964 *
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
1967 * In addition, we need the values of the external body forces at
1968 * the quadrature points on
this cell:
1971 * body_force.vector_value_list(fe_values.get_quadrature_points(),
1972 * body_force_values);
1975 * Then we can
loop over all degrees of freedom on
this cell and
1976 * compute local contributions to the right hand side:
1979 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1981 *
const unsigned int component_i =
1982 * fe.system_to_component_index(i).first;
1984 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1987 * local_quadrature_points_data[q_point].old_stress;
1990 * (body_force_values[q_point](component_i) *
1991 * fe_values.shape_value(i, q_point) -
1992 * old_stress * get_strain(fe_values, i, q_point)) *
1993 * fe_values.JxW(q_point);
1999 * Now that we have the local contributions to the linear system, we
2000 * need to transfer it into the global objects. This is done exactly
2001 * as in @ref step_17
"step-17":
2004 * cell->get_dof_indices(local_dof_indices);
2006 * hanging_node_constraints.distribute_local_to_global(
cell_matrix,
2008 * local_dof_indices,
2024 * The last step is to again fix up boundary values, just as we already
2025 * did in previous programs.
A slight complication is that the
2027 * vector compatible with the
matrix and right hand side (i.e. here a
2028 * distributed %
parallel vector, rather than the sequential vector we use
2029 * in
this program) in order to preset the entries of the solution vector
2030 * with the correct boundary values. We provide such a compatible vector
2031 * in the form of a temporary vector which we then
copy into the
2036 * We make up
for this complication by showing how boundary values can be
2037 * used flexibly: following the way we create the
triangulation, there are
2038 * three distinct boundary indicators used to describe the domain,
2039 * corresponding to the bottom and top faces, as well as the inner/outer
2040 * surfaces. We would like to impose boundary conditions of the following
2041 * type: The inner and outer
cylinder surfaces are
free of external
2042 * forces, a fact that corresponds to natural (Neumann-type) boundary
2043 * conditions
for which we don
't have to do anything. At the bottom, we 2044 * want no movement at all, corresponding to the cylinder being clamped or 2045 * cemented in at this part of the boundary. At the top, however, we want 2046 * a prescribed vertical downward motion compressing the cylinder; in 2047 * addition, we only want to restrict the vertical movement, but not the 2048 * horizontal ones -- one can think of this situation as a well-greased 2049 * plate sitting on top of the cylinder pushing it downwards: the atoms of 2050 * the cylinder are forced to move downward, but they are free to slide 2051 * horizontally along the plate. 2055 * The way to describe this is as follows: for boundary indicator zero 2056 * (bottom face) we use a dim-dimensional zero function representing no 2057 * motion in any coordinate direction. For the boundary with indicator 1 2058 * (top surface), we use the <code>IncrementalBoundaryValues</code> class, 2059 * but we specify an additional argument to the 2060 * <code>VectorTools::interpolate_boundary_values</code> function denoting 2061 * which vector components it should apply to; this is a vector of bools 2062 * for each vector component and because we only want to restrict vertical 2063 * motion, it has only its last component set: 2066 * FEValuesExtractors::Scalar z_component(dim - 1); 2067 * std::map<types::global_dof_index, double> boundary_values; 2068 * VectorTools::interpolate_boundary_values(dof_handler, 2070 * Functions::ZeroFunction<dim>(dim), 2072 * VectorTools::interpolate_boundary_values( 2075 * IncrementalBoundaryValues<dim>(present_time, present_timestep), 2077 * fe.component_mask(z_component)); 2079 * PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator); 2080 * MatrixTools::apply_boundary_values( 2081 * boundary_values, system_matrix, tmp, system_rhs, false); 2082 * incremental_displacement = tmp; 2090 * <a name="TopLevelsolve_timestep"></a> 2091 * <h4>TopLevel::solve_timestep</h4> 2095 * The next function is the one that controls what all has to happen within 2096 * a timestep. The order of things should be relatively self-explanatory 2097 * from the function names: 2100 * template <int dim> 2101 * void TopLevel<dim>::solve_timestep() 2103 * pcout << " Assembling system..." << std::flush; 2104 * assemble_system(); 2105 * pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl; 2107 * const unsigned int n_iterations = solve_linear_problem(); 2109 * pcout << " Solver converged in " << n_iterations << " iterations." 2112 * pcout << " Updating quadrature point data..." << std::flush; 2113 * update_quadrature_point_history(); 2114 * pcout << std::endl; 2122 * <a name="TopLevelsolve_linear_problem"></a> 2123 * <h4>TopLevel::solve_linear_problem</h4> 2127 * Solving the linear system again works mostly as before. The only 2128 * difference is that we want to only keep a complete local copy of the 2129 * solution vector instead of the distributed one that we get as output from 2130 * PETSc's solver routines. To
this end, we declare a local temporary
2131 * variable
for the distributed vector and initialize it with the contents
2132 * of the local variable (remember that the
2133 * <code>apply_boundary_values</code>
function called in
2134 * <code>assemble_system</code> preset the values of boundary nodes in
this 2135 * vector), solve with it, and at the end of the
function copy it again into
2136 * the complete local vector that we declared as a member variable. Hanging
2137 * node constraints are then distributed only on the local
copy,
2138 * i.e. independently of each other on each of the processors:
2141 *
template <
int dim>
2142 *
unsigned int TopLevel<dim>::solve_linear_problem()
2145 * locally_owned_dofs, mpi_communicator);
2146 * distributed_incremental_displacement = incremental_displacement;
2149 * 1
e-16 * system_rhs.l2_norm());
2155 * cg.solve(system_matrix,
2156 * distributed_incremental_displacement,
2160 * incremental_displacement = distributed_incremental_displacement;
2162 * hanging_node_constraints.distribute(incremental_displacement);
2164 *
return solver_control.last_step();
2172 * <a name=
"TopLeveloutput_results"></a>
2173 * <h4>TopLevel::output_results</h4>
2177 * This
function generates the graphical output in .vtu format as explained
2178 * in the introduction. Each process will only work on the cells it owns,
2179 * and then write the result into a file of its own. Additionally, processor
2180 * 0 will write the record files the reference all the .vtu files.
2184 * The crucial part of
this function is to give the <code>
DataOut</code>
2185 *
class a way to only work on the cells that the present process owns.
2191 *
template <
int dim>
2192 *
void TopLevel<dim>::output_results() const
2199 * Then, just as in @ref step_17
"step-17", define the names of solution variables (which
2200 * here are the displacement increments) and queue the solution vector
for 2201 * output. Note in the following
switch how we make sure that
if the space
2202 * dimension should be unhandled that we
throw an exception saying that we
2203 * haven
't implemented this case yet (another case of defensive 2207 * std::vector<std::string> solution_names; 2211 * solution_names.emplace_back("delta_x"); 2214 * solution_names.emplace_back("delta_x"); 2215 * solution_names.emplace_back("delta_y"); 2218 * solution_names.emplace_back("delta_x"); 2219 * solution_names.emplace_back("delta_y"); 2220 * solution_names.emplace_back("delta_z"); 2223 * Assert(false, ExcNotImplemented()); 2226 * data_out.add_data_vector(incremental_displacement, solution_names); 2231 * The next thing is that we wanted to output something like the average 2232 * norm of the stresses that we have stored in each cell. This may seem 2233 * complicated, since on the present processor we only store the stresses 2234 * in quadrature points on those cells that actually belong to the present 2235 * process. In other words, it seems as if we can't compute the average
2236 * stresses
for all cells. However, remember that our
class derived from
2237 * <code>
DataOut</code> only iterates over those cells that actually
do 2238 * belong to the present processor, i.e. we don
't have to compute anything 2239 * for all the other cells as this information would not be touched. The 2240 * following little loop does this. We enclose the entire block into a 2241 * pair of braces to make sure that the iterator variables do not remain 2242 * accidentally visible beyond the end of the block in which they are 2246 * Vector<double> norm_of_stress(triangulation.n_active_cells()); 2250 * Loop over all the cells... 2253 * for (auto &cell : triangulation.active_cell_iterators()) 2254 * if (cell->is_locally_owned()) 2258 * On these cells, add up the stresses over all quadrature 2262 * SymmetricTensor<2, dim> accumulated_stress; 2263 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q) 2264 * accumulated_stress += 2265 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q] 2270 * ...then write the norm of the average to their destination: 2273 * norm_of_stress(cell->active_cell_index()) = 2274 * (accumulated_stress / quadrature_formula.size()).norm(); 2278 * And on the cells that we are not interested in, set the respective 2279 * value in the vector to a bogus value (norms must be positive, and a 2280 * large negative value should catch your eye) in order to make sure 2281 * that if we were somehow wrong about our assumption that these 2282 * elements would not appear in the output file, that we would find out 2283 * by looking at the graphical output: 2287 * norm_of_stress(cell->active_cell_index()) = -1e+20; 2291 * Finally attach this vector as well to be treated for output: 2294 * data_out.add_data_vector(norm_of_stress, "norm_of_stress"); 2298 * As a last piece of data, let us also add the partitioning of the domain 2299 * into subdomains associated with the processors if this is a parallel 2300 * job. This works in the exact same way as in the @ref step_17 "step-17" program: 2303 * std::vector<types::subdomain_id> partition_int( 2304 * triangulation.n_active_cells()); 2305 * GridTools::get_subdomain_association(triangulation, partition_int); 2306 * const Vector<double> partitioning(partition_int.begin(), 2307 * partition_int.end()); 2308 * data_out.add_data_vector(partitioning, "partitioning"); 2312 * Finally, with all this data, we can instruct deal.II to munge the 2313 * information and produce some intermediate data structures that contain 2314 * all these solution and other data vectors: 2317 * data_out.build_patches(); 2321 * Let us call a function that opens the necessary output files and writes 2322 * the data we have generated into them. The function automatically 2323 * constructs the file names from the given directory name (the first 2324 * argument) and file name base (second argument). It augments the resulting 2325 * string by pieces that result from the time step number and a "piece 2326 * number" that corresponds to a part of the overall domain that can consist 2327 * of one or more subdomains. 2331 * The function also writes a record files (with suffix `.pvd`) for Paraview 2332 * that describes how all of these output files combine into the data for 2333 * this single time step: 2336 * const std::string pvtu_master_filename = 2337 * data_out.write_vtu_with_pvtu_record( 2338 * "./", "solution", timestep_no, mpi_communicator, 4); 2342 * The record files must be written only once and not by each processor, 2343 * so we do this on processor 0: 2346 * if (this_mpi_process == 0) 2350 * Finally, we write the paraview record, that references all .pvtu 2351 * files and their respective time. Note that the variable 2352 * times_and_names is declared static, so it will retain the entries 2353 * from the previous timesteps. 2356 * static std::vector<std::pair<double, std::string>> times_and_names; 2357 * times_and_names.push_back( 2358 * std::pair<double, std::string>(present_time, pvtu_master_filename)); 2359 * std::ofstream pvd_output("solution.pvd"); 2360 * DataOutBase::write_pvd_record(pvd_output, times_and_names); 2369 * <a name="TopLeveldo_initial_timestep"></a> 2370 * <h4>TopLevel::do_initial_timestep</h4> 2374 * This and the next function handle the overall structure of the first and 2375 * following timesteps, respectively. The first timestep is slightly more 2376 * involved because we want to compute it multiple times on successively 2377 * refined meshes, each time starting from a clean state. At the end of 2378 * these computations, in which we compute the incremental displacements 2379 * each time, we use the last results obtained for the incremental 2380 * displacements to compute the resulting stress updates and move the mesh 2381 * accordingly. On this new mesh, we then output the solution and any 2382 * additional data we consider important. 2386 * All this is interspersed by generating output to the console to update 2387 * the person watching the screen on what is going on. As in @ref step_17 "step-17", the 2388 * use of <code>pcout</code> instead of <code>std::cout</code> makes sure 2389 * that only one of the parallel processes is actually writing to the 2390 * console, without having to explicitly code an if-statement in each place 2391 * where we generate output: 2394 * template <int dim> 2395 * void TopLevel<dim>::do_initial_timestep() 2397 * present_time += present_timestep; 2399 * pcout << "Timestep " << timestep_no << " at time " << present_time 2402 * for (unsigned int cycle = 0; cycle < 2; ++cycle) 2404 * pcout << " Cycle " << cycle << ':
' << std::endl; 2407 * create_coarse_grid(); 2409 * refine_initial_grid(); 2411 * pcout << " Number of active cells: " 2412 * << triangulation.n_active_cells() << " (by partition:"; 2413 * for (unsigned int p = 0; p < n_mpi_processes; ++p) 2414 * pcout << (p == 0 ? ' ' : '+
') 2415 * << (GridTools::count_cells_with_subdomain_association( 2416 * triangulation, p)); 2417 * pcout << ")" << std::endl; 2421 * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs() 2422 * << " (by partition:"; 2423 * for (unsigned int p = 0; p < n_mpi_processes; ++p) 2424 * pcout << (p == 0 ? ' ' : '+
') 2425 * << (DoFTools::count_dofs_with_subdomain_association(dof_handler, 2427 * pcout << ")" << std::endl; 2435 * pcout << std::endl; 2443 * <a name="TopLeveldo_timestep"></a> 2444 * <h4>TopLevel::do_timestep</h4> 2448 * Subsequent timesteps are simpler, and probably do not require any more 2449 * documentation given the explanations for the previous function above: 2452 * template <int dim> 2453 * void TopLevel<dim>::do_timestep() 2455 * present_time += present_timestep; 2457 * pcout << "Timestep " << timestep_no << " at time " << present_time 2459 * if (present_time > end_time) 2461 * present_timestep -= (present_time - end_time); 2462 * present_time = end_time; 2471 * pcout << std::endl; 2478 * <a name="TopLevelrefine_initial_grid"></a> 2479 * <h4>TopLevel::refine_initial_grid</h4> 2483 * The following function is called when solving the first time step on 2484 * successively refined meshes. After each iteration, it computes a 2485 * refinement criterion, refines the mesh, and sets up the history variables 2486 * in each quadrature point again to a clean state. 2489 * template <int dim> 2490 * void TopLevel<dim>::refine_initial_grid() 2494 * First, let each process compute error indicators for the cells it owns: 2497 * Vector<float> error_per_cell(triangulation.n_active_cells()); 2498 * KellyErrorEstimator<dim>::estimate( 2500 * QGauss<dim - 1>(fe.degree + 1), 2501 * std::map<types::boundary_id, const Function<dim> *>(), 2502 * incremental_displacement, 2506 * MultithreadInfo::n_threads(), 2507 * this_mpi_process); 2511 * Then set up a global vector into which we merge the local indicators 2512 * from each of the %parallel processes: 2515 * const unsigned int n_local_cells = 2516 * triangulation.n_locally_owned_active_cells(); 2518 * PETScWrappers::MPI::Vector distributed_error_per_cell( 2519 * mpi_communicator, triangulation.n_active_cells(), n_local_cells); 2521 * for (unsigned int i = 0; i < error_per_cell.size(); ++i) 2522 * if (error_per_cell(i) != 0) 2523 * distributed_error_per_cell(i) = error_per_cell(i); 2524 * distributed_error_per_cell.compress(VectorOperation::insert); 2528 * Once we have that, copy it back into local copies on all processors and 2529 * refine the mesh accordingly: 2532 * error_per_cell = distributed_error_per_cell; 2533 * GridRefinement::refine_and_coarsen_fixed_number(triangulation, 2537 * triangulation.execute_coarsening_and_refinement(); 2541 * Finally, set up quadrature point data again on the new mesh, and only 2542 * on those cells that we have determined to be ours: 2545 * setup_quadrature_point_history(); 2553 * <a name="TopLevelmove_mesh"></a> 2554 * <h4>TopLevel::move_mesh</h4> 2558 * At the end of each time step, we move the nodes of the mesh according to 2559 * the incremental displacements computed in this time step. To do this, we 2560 * keep a vector of flags that indicate for each vertex whether we have 2561 * already moved it around, and then loop over all cells and move those 2562 * vertices of the cell that have not been moved yet. It is worth noting 2563 * that it does not matter from which of the cells adjacent to a vertex we 2564 * move this vertex: since we compute the displacement using a continuous 2565 * finite element, the displacement field is continuous as well and we can 2566 * compute the displacement of a given vertex from each of the adjacent 2567 * cells. We only have to make sure that we move each node exactly once, 2568 * which is why we keep the vector of flags. 2572 * There are two noteworthy things in this function. First, how we get the 2573 * displacement field at a given vertex using the 2574 * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index 2575 * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the 2576 * given cell. In the present case, displacement in the k-th coordinate 2577 * direction corresponds to the k-th component of the finite element. Using a 2578 * function like this bears a certain risk, because it uses knowledge of the 2579 * order of elements that we have taken together for this program in the 2580 * <code>FESystem</code> element. If we decided to add an additional 2581 * variable, for example a pressure variable for stabilization, and happened 2582 * to insert it as the first variable of the element, then the computation 2583 * below will start to produce nonsensical results. In addition, this 2584 * computation rests on other assumptions: first, that the element we use 2585 * has, indeed, degrees of freedom that are associated with vertices. This 2586 * is indeed the case for the present Q1 element, as would be for all Qp 2587 * elements of polynomial order <code>p</code>. However, it would not hold 2588 * for discontinuous elements, or elements for mixed formulations. Secondly, 2589 * it also rests on the assumption that the displacement at a vertex is 2590 * determined solely by the value of the degree of freedom associated with 2591 * this vertex; in other words, all shape functions corresponding to other 2592 * degrees of freedom are zero at this particular vertex. Again, this is the 2593 * case for the present element, but is not so for all elements that are 2594 * presently available in deal.II. Despite its risks, we choose to use this 2595 * way in order to present a way to query individual degrees of freedom 2596 * associated with vertices. 2600 * In this context, it is instructive to point out what a more general way 2601 * would be. For general finite elements, the way to go would be to take a 2602 * quadrature formula with the quadrature points in the vertices of a 2603 * cell. The <code>QTrapez</code> formula for the trapezoidal rule does 2604 * exactly this. With this quadrature formula, we would then initialize an 2605 * <code>FEValues</code> object in each cell, and use the 2606 * <code>FEValues::get_function_values</code> function to obtain the values 2607 * of the solution function in the quadrature points, i.e. the vertices of 2608 * the cell. These are the only values that we really need, i.e. we are not 2609 * at all interested in the weights (or the <code>JxW</code> values) 2610 * associated with this particular quadrature formula, and this can be 2611 * specified as the last argument in the constructor to 2612 * <code>FEValues</code>. The only point of minor inconvenience in this 2613 * scheme is that we have to figure out which quadrature point corresponds 2614 * to the vertex we consider at present, as they may or may not be ordered 2615 * in the same order. 2619 * This inconvenience could be avoided if finite elements have support 2620 * points on vertices (which the one here has; for the concept of support 2621 * points, see @ref GlossSupport "support points"). For such a case, one 2622 * could construct a custom quadrature rule using 2623 * FiniteElement::get_unit_support_points(). The first 2624 * <code>GeometryInfo@<dim@>::%vertices_per_cell*fe.dofs_per_vertex</code> 2625 * quadrature points will then correspond to the vertices of the cell and 2626 * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into 2627 * account that support points for vector elements will be duplicated 2628 * <code>fe.dofs_per_vertex</code> times. 2632 * Another point worth explaining about this short function is the way in 2633 * which the triangulation class exports information about its vertices: 2634 * through the <code>Triangulation::n_vertices</code> function, it 2635 * advertises how many vertices there are in the triangulation. Not all of 2636 * them are actually in use all the time -- some are left-overs from cells 2637 * that have been coarsened previously and remain in existence since deal.II 2638 * never changes the number of a vertex once it has come into existence, 2639 * even if vertices with lower number go away. Secondly, the location 2640 * returned by <code>cell-@>vertex(v)</code> is not only a read-only object 2641 * of type <code>Point@<dim@></code>, but in fact a reference that can be 2642 * written to. This allows to move around the nodes of a mesh with relative 2643 * ease, but it is worth pointing out that it is the responsibility of an 2644 * application program using this feature to make sure that the resulting 2645 * cells are still useful, i.e. are not distorted so much that the cell is 2646 * degenerated (indicated, for example, by negative Jacobians). Note that we 2647 * do not have any provisions in this function to actually ensure this, we 2652 * After this lengthy introduction, here are the full 20 or so lines of 2656 * template <int dim> 2657 * void TopLevel<dim>::move_mesh() 2659 * pcout << " Moving mesh..." << std::endl; 2661 * std::vector<bool> vertex_touched(triangulation.n_vertices(), false); 2662 * for (auto &cell : dof_handler.active_cell_iterators()) 2663 * for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v) 2664 * if (vertex_touched[cell->vertex_index(v)] == false) 2666 * vertex_touched[cell->vertex_index(v)] = true; 2668 * Point<dim> vertex_displacement; 2669 * for (unsigned int d = 0; d < dim; ++d) 2670 * vertex_displacement[d] = 2671 * incremental_displacement(cell->vertex_dof_index(v, d)); 2673 * cell->vertex(v) += vertex_displacement; 2681 * <a name="TopLevelsetup_quadrature_point_history"></a> 2682 * <h4>TopLevel::setup_quadrature_point_history</h4> 2686 * At the beginning of our computations, we needed to set up initial values 2687 * of the history variables, such as the existing stresses in the material, 2688 * that we store in each quadrature point. As mentioned above, we use the 2689 * <code>user_pointer</code> for this that is available in each cell. 2693 * To put this into larger perspective, we note that if we had previously 2694 * available stresses in our model (which we assume do not exist for the 2695 * purpose of this program), then we would need to interpolate the field of 2696 * preexisting stresses to the quadrature points. Likewise, if we were to 2697 * simulate elasto-plastic materials with hardening/softening, then we would 2698 * have to store additional history variables like the present yield stress 2699 * of the accumulated plastic strains in each quadrature 2700 * points. Pre-existing hardening or weakening would then be implemented by 2701 * interpolating these variables in the present function as well. 2704 * template <int dim> 2705 * void TopLevel<dim>::setup_quadrature_point_history() 2709 * For good measure, we set all user pointers of all cells, whether 2710 * ours of not, to the null pointer. This way, if we ever access the user 2711 * pointer of a cell which we should not have accessed, a segmentation 2712 * fault will let us know that this should not have happened: 2718 * triangulation.clear_user_data(); 2722 * Next, allocate the quadrature objects that are within the responsibility 2723 * of this processor. This, of course, equals the number of cells that 2724 * belong to this processor times the number of quadrature points our 2725 * quadrature formula has on each cell. Since the `resize()` function does 2726 * not actually shrink the amount of allocated memory if the requested new 2727 * size is smaller than the old size, we resort to a trick to first free all 2728 * memory, and then reallocate it: we declare an empty vector as a temporary 2729 * variable and then swap the contents of the old vector and this temporary 2730 * variable. This makes sure that the `quadrature_point_history` is now 2731 * really empty, and we can let the temporary variable that now holds the 2732 * previous contents of the vector go out of scope and be destroyed. In the 2733 * next step we can then re-allocate as many elements as we need, with the 2734 * vector default-initializing the `PointHistory` objects, which includes 2735 * setting the stress variables to zero. 2739 * std::vector<PointHistory<dim>> tmp; 2740 * quadrature_point_history.swap(tmp); 2742 * quadrature_point_history.resize( 2743 * triangulation.n_locally_owned_active_cells() * quadrature_formula.size()); 2747 * Finally loop over all cells again and set the user pointers from the 2748 * cells that belong to the present processor to point to the first 2749 * quadrature point objects corresponding to this cell in the vector of 2753 * unsigned int history_index = 0; 2754 * for (auto &cell : triangulation.active_cell_iterators()) 2755 * if (cell->is_locally_owned()) 2757 * cell->set_user_pointer(&quadrature_point_history[history_index]); 2758 * history_index += quadrature_formula.size(); 2763 * At the end, for good measure make sure that our count of elements was 2764 * correct and that we have both used up all objects we allocated 2765 * previously, and not point to any objects beyond the end of the 2766 * vector. Such defensive programming strategies are always good checks to 2767 * avoid accidental errors and to guard against future changes to this 2768 * function that forget to update all uses of a variable at the same 2769 * time. Recall that constructs using the <code>Assert</code> macro are 2770 * optimized away in optimized mode, so do not affect the run time of 2774 * Assert(history_index == quadrature_point_history.size(), 2775 * ExcInternalError()); 2783 * <a name="TopLevelupdate_quadrature_point_history"></a> 2784 * <h4>TopLevel::update_quadrature_point_history</h4> 2788 * At the end of each time step, we should have computed an incremental 2789 * displacement update so that the material in its new configuration 2790 * accommodates for the difference between the external body and boundary 2791 * forces applied during this time step minus the forces exerted through 2792 * preexisting internal stresses. In order to have the preexisting 2793 * stresses available at the next time step, we therefore have to update the 2794 * preexisting stresses with the stresses due to the incremental 2795 * displacement computed during the present time step. Ideally, the 2796 * resulting sum of internal stresses would exactly counter all external 2797 * forces. Indeed, a simple experiment can make sure that this is so: if we 2798 * choose boundary conditions and body forces to be time independent, then 2799 * the forcing terms (the sum of external forces and internal stresses) 2800 * should be exactly zero. If you make this experiment, you will realize 2801 * from the output of the norm of the right hand side in each time step that 2802 * this is almost the case: it is not exactly zero, since in the first time 2803 * step the incremental displacement and stress updates were computed 2804 * relative to the undeformed mesh, which was then deformed. In the second 2805 * time step, we again compute displacement and stress updates, but this 2806 * time in the deformed mesh -- there, the resulting updates are very small 2807 * but not quite zero. This can be iterated, and in each such iteration the 2808 * residual, i.e. the norm of the right hand side vector, is reduced; if one 2809 * makes this little experiment, one realizes that the norm of this residual 2810 * decays exponentially with the number of iterations, and after an initial 2811 * very rapid decline is reduced by roughly a factor of about 3.5 in each 2812 * iteration (for one testcase I looked at, other testcases, and other 2813 * numbers of unknowns change the factor, but not the exponential decay). 2817 * In a sense, this can then be considered as a quasi-timestepping scheme to 2818 * resolve the nonlinear problem of solving large-deformation elasticity on 2819 * a mesh that is moved along in a Lagrangian manner. 2823 * Another complication is that the existing (old) stresses are defined on 2824 * the old mesh, which we will move around after updating the stresses. If 2825 * this mesh update involves rotations of the cell, then we need to also 2826 * rotate the updated stress, since it was computed relative to the 2827 * coordinate system of the old cell. 2831 * Thus, what we need is the following: on each cell which the present 2832 * processor owns, we need to extract the old stress from the data stored 2833 * with each quadrature point, compute the stress update, add the two 2834 * together, and then rotate the result together with the incremental 2835 * rotation computed from the incremental displacement at the present 2836 * quadrature point. We will detail these steps below: 2839 * template <int dim> 2840 * void TopLevel<dim>::update_quadrature_point_history() 2844 * First, set up an <code>FEValues</code> object by which we will evaluate 2845 * the incremental displacements and the gradients thereof at the 2846 * quadrature points, together with a vector that will hold this 2850 * FEValues<dim> fe_values(fe, 2851 * quadrature_formula, 2852 * update_values | update_gradients); 2854 * std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads( 2855 * quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim)); 2859 * Then loop over all cells and do the job in the cells that belong to our 2863 * for (auto &cell : dof_handler.active_cell_iterators()) 2864 * if (cell->is_locally_owned()) 2868 * Next, get a pointer to the quadrature point history data local to 2869 * the present cell, and, as a defensive measure, make sure that 2870 * this pointer is within the bounds of the global array: 2873 * PointHistory<dim> *local_quadrature_points_history = 2874 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer()); 2875 * Assert(local_quadrature_points_history >= 2876 * &quadrature_point_history.front(), 2877 * ExcInternalError()); 2878 * Assert(local_quadrature_points_history <= 2879 * &quadrature_point_history.back(), 2880 * ExcInternalError()); 2884 * Then initialize the <code>FEValues</code> object on the present 2885 * cell, and extract the gradients of the displacement at the 2886 * quadrature points for later computation of the strains 2889 * fe_values.reinit(cell); 2890 * fe_values.get_function_gradients(incremental_displacement, 2891 * displacement_increment_grads); 2895 * Then loop over the quadrature points of this cell: 2898 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q) 2902 * On each quadrature point, compute the strain increment from 2903 * the gradients, and multiply it by the stress-strain tensor to 2904 * get the stress update. Then add this update to the already 2905 * existing strain at this point: 2908 * const SymmetricTensor<2, dim> new_stress = 2909 * (local_quadrature_points_history[q].old_stress + 2910 * (stress_strain_tensor * 2911 * get_strain(displacement_increment_grads[q]))); 2915 * Finally, we have to rotate the result. For this, we first 2916 * have to compute a rotation matrix at the present quadrature 2917 * point from the incremental displacements. In fact, it can be 2918 * computed from the gradients, and we already have a function 2922 * const Tensor<2, dim> rotation = 2923 * get_rotation_matrix(displacement_increment_grads[q]); 2926 * Note that the result, a rotation matrix, is in general an 2927 * antisymmetric tensor of rank 2, so we must store it as a full 2932 * With this rotation matrix, we can compute the rotated tensor 2933 * by contraction from the left and right, after we expand the 2934 * symmetric tensor <code>new_stress</code> into a full tensor: 2937 * const SymmetricTensor<2, dim> rotated_new_stress = 2938 * symmetrize(transpose(rotation) * 2939 * static_cast<Tensor<2, dim>>(new_stress) * rotation); 2942 * Note that while the result of the multiplication of these 2943 * three matrices should be symmetric, it is not due to floating 2944 * point round off: we get an asymmetry on the order of 1e-16 of 2945 * the off-diagonal elements of the result. When assigning the 2946 * result to a <code>SymmetricTensor</code>, the constructor of 2947 * that class checks the symmetry and realizes that it isn't
2948 * exactly
symmetric; it will then
raise an exception. To avoid
2949 * that, we explicitly
symmetrize the result to make it exactly
2954 * The result of all these operations is then written back into
2955 * the original place:
2958 * local_quadrature_points_history[q].old_stress =
2959 * rotated_new_stress;
2966 * This ends the
project specific namespace <code>Step18</code>. The rest is
2967 * as usual and as already shown in @ref step_17
"step-17": A <code>main()</code>
function 2968 * that initializes and terminates PETSc, calls the classes that
do the
2969 * actual work, and makes sure that we
catch all exceptions that propagate
2976 *
int main(
int argc,
char **argv)
2980 *
using namespace dealii;
2981 *
using namespace Step18;
2985 * TopLevel<3> elastic_problem;
2986 * elastic_problem.run();
2988 *
catch (std::exception &exc)
2990 * std::cerr << std::endl
2992 * <<
"----------------------------------------------------" 2994 * std::cerr <<
"Exception on processing: " << std::endl
2995 * << exc.what() << std::endl
2996 * <<
"Aborting!" << std::endl
2997 * <<
"----------------------------------------------------" 3004 * std::cerr << std::endl
3006 * <<
"----------------------------------------------------" 3008 * std::cerr <<
"Unknown exception!" << std::endl
3009 * <<
"Aborting!" << std::endl
3010 * <<
"----------------------------------------------------" 3018 <a name=
"Results"></a><h1>Results</h1>
3022 Running the program takes a good
while if one uses debug mode; it takes about
3023 eleven minutes on my i7 desktop. Fortunately, the version compiled with
3024 optimizations is much faster; the program only takes about a minute and a half
3025 after recompiling with the command <tt>make release</tt> on the same machine, a
3026 much more reasonable time.
3029 If
run, the program prints the following output, explaining what it is
3030 doing during all that time:
3033 [ 66%] Built target step-18
3034 [100%] Run step-18 with Release configuration
3035 Timestep 1 at time 1
3037 Number of active cells: 3712 (by
partition: 3712)
3038 Number of degrees of freedom: 17226 (by
partition: 17226)
3039 Assembling system...
norm of rhs is 1.88062e+10
3040 Solver converged in 103 iterations.
3041 Updating quadrature point data...
3043 Number of active cells: 12812 (by
partition: 12812)
3044 Number of degrees of freedom: 51738 (by partition: 51738)
3045 Assembling system...
norm of rhs is 1.86145e+10
3046 Solver converged in 121 iterations.
3047 Updating quadrature point data...
3050 Timestep 2 at time 2
3051 Assembling system...
norm of rhs is 1.84169e+10
3052 Solver converged in 122 iterations.
3053 Updating quadrature point data...
3056 Timestep 3 at time 3
3057 Assembling system...
norm of rhs is 1.82355e+10
3058 Solver converged in 122 iterations.
3059 Updating quadrature point data...
3062 Timestep 4 at time 4
3063 Assembling system...
norm of rhs is 1.80728e+10
3064 Solver converged in 117 iterations.
3065 Updating quadrature point data...
3068 Timestep 5 at time 5
3069 Assembling system...
norm of rhs is 1.79318e+10
3070 Solver converged in 116 iterations.
3071 Updating quadrature point data...
3074 Timestep 6 at time 6
3075 Assembling system...
norm of rhs is 1.78171e+10
3076 Solver converged in 115 iterations.
3077 Updating quadrature point data...
3080 Timestep 7 at time 7
3081 Assembling system...
norm of rhs is 1.7737e+10
3082 Solver converged in 112 iterations.
3083 Updating quadrature point data...
3086 Timestep 8 at time 8
3087 Assembling system...
norm of rhs is 1.77127e+10
3088 Solver converged in 111 iterations.
3089 Updating quadrature point data...
3092 Timestep 9 at time 9
3093 Assembling system...
norm of rhs is 1.78207e+10
3094 Solver converged in 113 iterations.
3095 Updating quadrature point data...
3098 Timestep 10 at time 10
3099 Assembling system...
norm of rhs is 1.83544e+10
3100 Solver converged in 115 iterations.
3101 Updating quadrature point data...
3104 [100%] Built target
run 3105 make
run 176.82s user 0.15s system 198% cpu 1:28.94 total
3107 In other words, it is computing on 12,000 cells and with some 52,000
3108 unknowns. Not a whole lot, but enough
for a coupled three-dimensional
3109 problem to keep a computer busy
for a
while. At the end of the day,
3110 this is what we have
for output:
3113 -rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.
vtu 3114 -rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3115 -rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3116 -rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.
vtu 3117 -rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3118 -rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3119 -rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.
vtu 3120 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3121 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3122 -rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.
vtu 3123 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3124 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3125 -rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.
vtu 3126 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3127 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3128 -rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.
vtu 3129 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3130 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3131 -rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.
vtu 3132 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3133 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3134 -rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.
vtu 3135 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3136 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3137 -rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.
vtu 3138 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3139 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3140 -rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.
vtu 3141 -rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3142 -rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3146 If we visualize these files with VisIt or Paraview, we
get to see the full picture
3147 of the disaster our forced compression wreaks on the
cylinder (colors in the
3148 images encode the
norm of the stress in the material):
3151 <div class=
"threecolumn" style=
"width: 80%">
3152 <div class=
"parent">
3153 <div class=
"img" align=
"center">
3154 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png" 3158 <div class=
"text" align=
"center">
3162 <div class=
"parent">
3163 <div class=
"img" align=
"center">
3164 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png" 3168 <div class=
"text" align=
"center">
3172 <div class=
"parent">
3173 <div class=
"img" align=
"center">
3174 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png" 3178 <div class=
"text" align=
"center">
3185 <div class=
"threecolumn" style=
"width: 80%">
3186 <div class=
"parent">
3187 <div class=
"img" align=
"center">
3188 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png" 3192 <div class=
"text" align=
"center">
3196 <div class=
"parent">
3197 <div class=
"img" align=
"center">
3198 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png" 3202 <div class=
"text" align=
"center">
3206 <div class=
"parent">
3207 <div class=
"img" align=
"center">
3208 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png" 3212 <div class=
"text" align=
"center">
3219 As is clearly visible, as we keep compressing the cylinder, it starts
3220 to bow out near the fully constrained bottom surface and, after about eight
3221 time units, buckle in an azimuthally
symmetric manner.
3224 Although the result appears plausible for the
symmetric geometry and loading,
3225 it is yet to be established whether or not the computation is fully converged.
3226 In order to see whether it is, we ran the program again with one more global
3227 refinement at the beginning and with the time step halved. This would have
3228 taken a very long time on a single machine, so we used a proper workstation and
3229 ran it on 16 processors in
parallel. The beginning of the output now looks like
3232 Timestep 1 at time 0.5
3234 Number of active cells: 29696 (by partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3235 Number of degrees of freedom: 113100 (by partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3236 Assembling system...
norm of rhs is 1.10765
e+10
3237 Solver converged in 209 iterations.
3238 Updating quadrature point data...
3240 Number of active cells: 102034 (by partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3241 Number of degrees of freedom: 359337 (by partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3242 Assembling system...
norm of rhs is 1.35759
e+10
3243 Solver converged in 268 iterations.
3244 Updating quadrature point data...
3247 Timestep 2 at time 1
3248 Assembling system...
norm of rhs is 1.34674
e+10
3249 Solver converged in 267 iterations.
3250 Updating quadrature point data...
3253 Timestep 3 at time 1.5
3254 Assembling system...
norm of rhs is 1.33607
e+10
3255 Solver converged in 265 iterations.
3256 Updating quadrature point data...
3259 Timestep 4 at time 2
3260 Assembling system...
norm of rhs is 1.32558
e+10
3261 Solver converged in 263 iterations.
3262 Updating quadrature point data...
3267 Timestep 20 at time 10
3268 Assembling system...
norm of rhs is 1.47755
e+10
3269 Solver converged in 425 iterations.
3270 Updating quadrature point data...
3273 That
's quite a good number of unknowns, given that we are in 3d. The output of 3274 this program are 16 files for each time step: 3276 $ ls -l solution-0001* 3277 -rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu 3278 -rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu 3279 -rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu 3280 -rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu 3281 -rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu 3282 -rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu 3283 -rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu 3284 -rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu 3285 -rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu 3286 -rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu 3287 -rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu 3288 -rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu 3289 -rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu 3290 -rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu 3291 -rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu 3292 -rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu 3293 -rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu 3294 -rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit 3298 Here are first the mesh on which we compute as well as the partitioning 3299 for the 16 processors: 3302 <div class="twocolumn" style="width: 80%"> 3303 <div class="parent"> 3304 <div class="img" align="center"> 3305 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png" 3306 alt="Discretization" 3309 <div class="text" align="center"> 3313 <div class="parent"> 3314 <div class="img" align="center"> 3315 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png" 3316 alt="Parallel partitioning" 3319 <div class="text" align="center"> 3320 Parallel partitioning 3326 Finally, here is the same output as we have shown before for the much smaller 3329 <div class="threecolumn" style="width: 80%"> 3330 <div class="parent"> 3331 <div class="img" align="center"> 3332 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png" 3336 <div class="text" align="center"> 3340 <div class="parent"> 3341 <div class="img" align="center"> 3342 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png" 3346 <div class="text" align="center"> 3350 <div class="parent"> 3351 <div class="img" align="center"> 3352 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png" 3356 <div class="text" align="center"> 3363 <div class="threecolumn" style="width: 80%"> 3364 <div class="parent"> 3365 <div class="img" align="center"> 3366 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png" 3370 <div class="text" align="center"> 3374 <div class="parent"> 3375 <div class="img" align="center"> 3376 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png" 3380 <div class="text" align="center"> 3384 <div class="parent"> 3385 <div class="img" align="center"> 3386 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png" 3390 <div class="text" align="center"> 3397 As before, we observe that at high axial compression the cylinder begins 3398 to buckle, but this time ultimately collapses on itself. In contrast to our 3399 first run, towards the end of the simulation the deflection pattern becomes 3400 nonsymmetric (the central bulge deflects laterally). The model clearly does not 3401 provide for this (all our forces and boundary deflections are symmetric) but the 3402 effect is probably physically correct anyway: in reality, small inhomogeneities 3403 in the body's material properties would lead it to buckle to one side
3404 to evade the forcing; in numerical simulations, small perturbations
3405 such as numerical round-off or an inexact solution of a linear system
3406 by an iterative solver could have the same effect. Another typical source
for 3407 asymmetries in adaptive computations is that only a certain fraction of cells
3408 is refined in each step, which may lead to asymmetric meshes even
if the
3412 If one compares
this with the previous
run, the results both qualitatively
3413 and quantitatively different. The previous computation was
3414 therefore certainly not converged, though we can
't say for sure anything about 3415 the present one. One would need an even finer computation to find out. However, 3416 the point may be moot: looking at the last picture in detail, it is pretty 3417 obvious that not only is the linear small deformation model we chose completely 3418 inadequate, but for a realistic simulation we would also need to make sure that 3419 the body does not intersect itself during deformation (if we continued 3420 compressing the cylinder we would observe some self-intersection). 3421 Without such a formulation we cannot expect anything to make physical sense, 3422 even if it produces nice pictures! 3425 <a name="Possibledirectionsforextensions"></a><h3>Possible directions for extensions</h3> 3428 The program as is does not really solve an equation that has many applications 3429 in practice: quasi-static material deformation based on a purely elastic law 3430 is almost boring. However, the program may serve as the starting point for 3431 more interesting experiments, and that indeed was the initial motivation for 3432 writing it. Here are some suggestions of what the program is missing and in 3433 what direction it may be extended: 3435 <a name="Plasticitymodels"></a><h5>Plasticity models</h5> 3438 The most obvious extension is to use a more 3439 realistic material model for large-scale quasistatic deformation. The natural 3440 choice for this would be plasticity, in which a nonlinear relationship between 3441 stress and strain replaces equation <a href="#step_18.stress-strain">[stress-strain]</a>. Plasticity 3442 models are usually rather complicated to program since the stress-strain 3443 dependence is generally non-smooth. The material can be thought of being able 3444 to withstand only a maximal stress (the yield stress) after which it starts to 3445 “flow”. A mathematical description to this can be given in the form of a 3446 variational inequality, which alternatively can be treated as minimizing the 3450 (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega} 3451 - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N}, 3453 subject to the constraint 3455 f(\sigma(\mathbf{u})) \le 0 3457 on the stress. This extension makes the problem to be solved in each time step 3458 nonlinear, so we need another loop within each time step. 3460 Without going into further details of this model, we refer to the excellent 3461 book by Simo and Hughes on “Computational Inelasticity” for a 3462 comprehensive overview of computational strategies for solving plastic 3463 models. Alternatively, a brief but concise description of an algorithm for 3464 plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann; 3468 <a name="Stabilizationissues"></a><h5>Stabilization issues</h5> 3471 The formulation we have chosen, i.e. using 3472 piecewise (bi-, tri-)linear elements for all components of the displacement 3473 vector, and treating the stress as a variable dependent on the displacement is 3474 appropriate for most materials. However, this so-called displacement-based 3475 formulation becomes unstable and exhibits spurious modes for incompressible or 3476 nearly-incompressible materials. While fluids are usually not elastic (in most 3477 cases, the stress depends on velocity gradients, not displacement gradients, 3478 although there are exceptions such as electro-rheologic fluids), there are a 3479 few solids that are nearly incompressible, for example rubber. Another case is 3480 that many plasticity models ultimately let the material become incompressible, 3481 although this is outside the scope of the present program. 3483 Incompressibility is characterized by Poisson's ratio
3485 \nu = \frac{\lambda}{2(\lambda+\mu)},
3487 where @f$\lambda,\
mu@f$ are the Lamé constants of the material.
3488 Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3489 also follows from mathematical stability considerations). If @f$\nu@f$
3490 approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3491 case, pure displacement-based formulations are no longer appropriate
for the
3492 solution of such problems, and stabilization techniques have to be employed
3493 for a stable and accurate solution. The book and paper cited above give
3494 indications as to how to
do this, but there is also a large
volume of
3495 literature on
this subject; a good start to
get an overview of the topic can
3496 be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3499 <a name=
"Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3502 In the present form, the program
3503 only refines the
initial mesh a number of times, but then never again. For any
3504 kind of realistic simulation, one would want to extend
this so that the mesh
3505 is refined and coarsened every few time steps instead. This is not hard to
do,
3506 in fact, but has been left
for future tutorial programs or as an exercise,
if 3509 The main complication one has to overcome is that one has to
3510 transfer the data that is stored in the quadrature points of the cells of the
3511 old mesh to the
new mesh, preferably by some sort of projection scheme. The
3512 general approach to
this would go like
this:
3514 - At the beginning, the data is only available in the quadrature points of
3515 individual cells, not as a finite element field that is defined everywhere.
3517 - So let us find a finite element field that <i>is</i> defined everywhere so
3518 that we can later
interpolate it to the quadrature points of the
new 3519 mesh. In
general, it will be difficult to find a continuous finite element
3520 field that matches the values in the quadrature points exactly because the
3521 number of degrees of freedom of these fields does not match the number of
3522 quadrature points there are, and the nodal values of
this global field will
3523 either be over- or underdetermined. But it is usually not very difficult to
3524 find a discontinuous field that matches the values in the quadrature points;
3525 for example,
if you have a
QGauss(2) quadrature formula (i.e. 4 points per
3526 cell in 2
d, 8 points in 3d), then one would use a finite element of kind
3527 FE_DGQ(1), i.e. bi-/tri-linear
functions as these have 4 degrees of freedom
3528 per cell in 2
d and 8 in 3
d.
3530 - There are
functions that can make
this conversion from individual points to
3531 a global field simpler. The following piece of pseudo-code should help
if 3532 you use a
QGauss(2) quadrature formula. Note that the multiplication by the
3533 projection
matrix below takes a vector of scalar components, i.
e., we can only
3534 convert one
set of scalars at a time from the quadrature points to the degrees
3535 of freedom and vice versa. So we need to store each component of stress separately,
3536 which requires <code>dim*dim</code> vectors. We'll store this
set of vectors in a 2D array to
3537 make it easier to read off components in the same way you would the stress tensor.
3538 Thus, we'll
loop over the components of stress on each cell and store
3539 these values in the global history field. (The prefix <code>history_</code>
3540 indicates that we work with quantities related to the history variables defined
3541 in the quadrature points.)
3543 FE_DGQ<dim> history_fe (1);
3544 DoFHandler<dim> history_dof_handler (triangulation);
3545 history_dof_handler.distribute_dofs (history_fe);
3548 history_field (dim,
std::vector<
Vector<
double> >(dim)),
3549 local_history_values_at_qpoints (dim,
std::vector<
Vector<
double> >(dim)),
3550 local_history_fe_values (dim,
std::vector<
Vector<
double> >(dim));
3552 for (
unsigned int i=0; i<dim; i++)
3553 for (
unsigned int j=0; j<dim; j++)
3555 history_field[i][j].
reinit(history_dof_handler.n_dofs());
3556 local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3557 local_history_fe_values[i][j].reinit(history_fe.dofs_per_cell);
3564 quadrature, quadrature,
3565 qpoint_to_dof_matrix);
3568 endc = dof_handler.end(),
3569 dg_cell = history_dof_handler.begin_active();
3571 for (; cell!=endc; ++cell, ++dg_cell)
3574 PointHistory<dim> *local_quadrature_points_history
3575 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3577 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3579 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3582 for (
unsigned int i=0; i<dim; i++)
3583 for (
unsigned int j=0; j<dim; j++)
3585 for (
unsigned int q=0; q<quadrature.size(); ++q)
3586 local_history_values_at_qpoints[i][j](q)
3587 = local_quadrature_points_history[q].old_stress[i][j];
3589 qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3590 local_history_values_at_qpoints[i][j]);
3592 dg_cell->set_dof_values (local_history_fe_values[i][j],
3593 history_field[i][j]);
3598 - Now that we have a global field, we can
refine the mesh and transfer the
3600 interpolate everything from the old to the
new mesh.
3602 - In a
final step, we have to
get the data back from the now interpolated
3603 global field to the quadrature points on the
new mesh. The following code
3607 history_fe.dofs_per_cell);
3611 dof_to_qpoint_matrix);
3614 endc = dof_handler.end(),
3615 dg_cell = history_dof_handler.begin_active();
3617 for (; cell != endc; ++cell, ++dg_cell)
3619 PointHistory<dim> *local_quadrature_points_history
3620 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3622 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3624 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3627 for (
unsigned int i=0; i<dim; i++)
3628 for (
unsigned int j=0; j<dim; j++)
3630 dg_cell->get_dof_values (history_field[i][j],
3631 local_history_fe_values[i][j]);
3633 dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3634 local_history_fe_values[i][j]);
3636 for (
unsigned int q=0; q<quadrature.size(); ++q)
3637 local_quadrature_points_history[q].old_stress[i][j]
3638 = local_history_values_at_qpoints[i][j](q);
3642 It becomes a bit more complicated once we run the program in
parallel, since
3643 then each process only stores
this data
for the cells it owned on the old
3644 mesh. That said,
using a parallel vector for <code>history_field</code> will
3645 do the trick
if you put a
call to <code>
compress</code> after the transfer
3646 from quadrature points into the global vector.
3649 <a name=
"Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3652 At present, the program makes no attempt
3653 to make sure that a cell, after moving its
vertices at the end of the time
3655 positive and bounded away from
zero everywhere). It is, in fact, not very hard
3656 to
set boundary values and forcing terms in such a way that one gets distorted
3657 and inverted cells rather quickly. Certainly, in some cases of large
3658 deformation,
this is unavoidable with a mesh of finite mesh size, but in some
3659 other cases
this should be preventable by appropriate mesh refinement and/or a
3660 reduction of the time step size. The program does not
do that, but a more
3661 sophisticated version definitely should employ some sort of heuristic defining
3662 what amount of deformation of cells is acceptable, and what isn
't. 3665 <a name="PlainProg"></a> 3666 <h1> The plain program</h1> 3667 @include "step-18.cc" Transformed quadrature weights.
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
__global__ void reduction(Number *result, const Number *v, const size_type N)
Contents is actually a matrix.
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void attach_dof_handler(const DoFHandlerType &)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
static const types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
Transformed quadrature points.
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
numbers::NumberTraits< RangeNumberType >::real_type time
active_cell_iterator begin_active(const unsigned int level=0) const
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
std::string compress(const std::string &input)
virtual void vector_value_list(const std::vector< Point< dim >> &points, std::vector< Vector< RangeNumberType >> &values) const
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
VectorType::value_type * end(VectorType &V)
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
TriangulationBase< dim, spacedim > Triangulation
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
__global__ void set(Number *val, const Number s, const size_type N)
Shape function gradients.
void approximate(SynchronousIterators< std::tuple< TriaActiveIterator< ::DoFCellAccessor< DoFHandlerType< dim, spacedim >, false >>, Vector< float >::iterator >> const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Iterator points to a valid object.
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
static const types::blas_int zero
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void copy(const T *begin, const T *end, U *dest)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
int(&) functions(const void *v1, const void *v2)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static ::ExceptionBase & ExcInternalError()