118 * #include
"PhaseFieldSolver.h"
120 *
void InitialValues::vector_value(
const Point<2> &p,
129<a name=
"ann-PhaseFieldSolver.cpp"></a>
130<h1>Annotated version of PhaseFieldSolver.cpp</h1>
146 * #include
"PhaseFieldSolver.h"
148 * PhaseFieldSolver::PhaseFieldSolver()
149 * : mpi_communicator(MPI_COMM_WORLD)
152 * , pcout(std::cout, (this_mpi_process == 0))
170<a name=
"ann-PhaseFieldSolver.h"></a>
171<h1>Annotated version of PhaseFieldSolver.h</h1>
190 * #ifndef KOBAYASHI_PARALLEL_PHASEFIELDSOLVER_H
191 * #define KOBAYASHI_PARALLEL_PHASEFIELDSOLVER_H
193 * #include <deal.II/base/quadrature_lib.h>
194 * #include <deal.II/base/function.h>
195 * #include <deal.II/base/utilities.h>
196 * #include <deal.II/lac/vector.h>
197 * #include <deal.II/lac/full_matrix.h>
198 * #include <deal.II/lac/sparse_matrix.h>
199 * #include <deal.II/lac/sparse_direct.h>
200 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
201 * #include <deal.II/lac/solver_cg.h>
202 * #include <deal.II/lac/precondition.h>
203 * #include <deal.II/lac/affine_constraints.h>
204 * #include <deal.II/grid/tria.h>
205 * #include <deal.II/grid/grid_generator.h>
206 * #include <deal.II/dofs/dof_handler.h>
207 * #include <deal.II/dofs/dof_tools.h>
208 * #include <deal.II/fe/fe_q.h>
209 * #include <deal.II/fe/fe_values.h>
210 * #include <deal.II/fe/fe_system.h>
211 * #include <deal.II/numerics/vector_tools.h>
212 * #include <deal.II/numerics/matrix_tools.h>
213 * #include <deal.II/numerics/data_out.h>
214 * #include <deal.II/grid/grid_in.h>
218 * For Parallel Computation
221 * #include <deal.II/base/conditional_ostream.h>
222 * #include <deal.II/base/mpi.h>
223 * #include <deal.II/lac/petsc_vector.h>
224 * #include <deal.II/lac/petsc_sparse_matrix.h>
225 * #include <deal.II/lac/petsc_solver.h>
226 * #include <deal.II/lac/petsc_precondition.h>
227 * #include <deal.II/grid/grid_tools.h>
228 * #include <deal.II/dofs/dof_renumbering.h>
230 * #include <deal.II/distributed/solution_transfer.h>
233 * #include <iostream>
237 *
class PhaseFieldSolver {
239 * PhaseFieldSolver();
243 *
void make_grid_and_dofs();
244 *
void assemble_system();
246 *
void output_results(
const unsigned int timestep_number)
const;
247 *
double compute_residual();
248 *
void applying_bc();
249 *
float get_random_number();
264 *
const double final_time, time_step;
265 *
const double theta;
266 *
const double epsilon, tau,
gamma, latent_heat, alpha, t_eq, a;
281 *
class InitialValues :
public Function<2>
286 * virtual void vector_value(const
Point<2> &p,
295<a name=
"ann-applying_bc.cpp"></a>
296<h1>Annotated version of applying_bc.cpp</h1>
315 * #include
"PhaseFieldSolver.h"
317 *
void PhaseFieldSolver::applying_bc(){
321 *
QGauss<2> quadrature_formula(fe.degree + 1);
323 * quadrature_formula,
329 * std::map<types::global_dof_index,double> boundary_values;
333 * Prescribing p=1 at the left face (
this will be maintained in the subsequent iterations when zero BC is applied in the Newton-Raphson iterations)
339 * boundary_values,p_mask);
343 * To
apply the boundary
values only to the solution vector without the Jacobian Matrix and RHS
Vector
346 *
for (
auto &boundary_value : boundary_values)
347 * old_solution(boundary_value.
first) = boundary_value.
second;
355<a name=
"ann-assemble_system.cpp"></a>
356<h1>Annotated version of assemble_system.cpp</h1>
372 * #include
"PhaseFieldSolver.h"
375 *
void PhaseFieldSolver::assemble_system() {
378 * Separating each variable as a
scalar to easily call the respective shape
functions
384 *
QGauss<2> quadrature_formula(fe.degree + 1);
386 * quadrature_formula,
389 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
390 *
const unsigned int n_q_points = quadrature_formula.size();
398 * Old Newton iteration
401 * std::vector<Tensor<1, 2>> old_newton_solution_gradients_p(n_q_points);
402 * std::vector<double> old_newton_solution_values_p(n_q_points);
403 * std::vector<Tensor<1, 2>> old_newton_solution_gradients_t(n_q_points);
404 * std::vector<double> old_newton_solution_values_t(n_q_points);
407 * Old time step iteration
410 * std::vector<Tensor<1, 2>> old_time_solution_gradients_p(n_q_points);
411 * std::vector<double> old_time_solution_values_p(n_q_points);
412 * std::vector<Tensor<1, 2>> old_time_solution_gradients_t(n_q_points);
413 * std::vector<double> old_time_solution_values_t(n_q_points);
415 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
416 * jacobian_matrix.operator=(0.0);
417 * system_rhs.operator=(0.0);
419 *
for (
const auto &cell : dof_handler.active_cell_iterators()){
420 *
if (cell->subdomain_id() == this_mpi_process) {
424 * fe_values.reinit(cell);
428 * Copying old solution
values
431 * fe_values[phase_parameter].get_function_values(conv_solution_np,old_newton_solution_values_p);
432 * fe_values[phase_parameter].get_function_gradients(conv_solution_np,old_newton_solution_gradients_p);
433 * fe_values[temperature].get_function_values(conv_solution_np,old_newton_solution_values_t);
434 * fe_values[temperature].get_function_gradients(conv_solution_np,old_newton_solution_gradients_t);
435 * fe_values[phase_parameter].get_function_values(old_solution_np,old_time_solution_values_p);
436 * fe_values[phase_parameter].get_function_gradients(old_solution_np,old_time_solution_gradients_p);
437 * fe_values[temperature].get_function_values(old_solution_np,old_time_solution_values_t);
438 * fe_values[temperature].get_function_gradients(old_solution_np,old_time_solution_gradients_t);
440 *
for (
unsigned int q = 0; q < n_q_points; ++q){
441 *
double khi = get_random_number();
447 *
double p_on = old_newton_solution_values_p[q];
448 *
auto grad_p_on = old_newton_solution_gradients_p[q];
449 *
double p_ot = old_time_solution_values_p[q];
450 *
auto grad_p_ot = old_time_solution_gradients_p[q];
451 *
double t_on = old_newton_solution_values_t[q];
452 *
auto grad_t_on = old_newton_solution_gradients_t[q];
453 *
double t_ot = old_time_solution_values_t[q];
454 *
auto grad_t_ot = old_time_solution_gradients_t[q];
455 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i){
461 *
double psi_i = fe_values[phase_parameter].value(i,q);
462 *
auto grad_psi_i = fe_values[phase_parameter].gradient(i,q);
463 *
double phi_i = fe_values[temperature].value(i,q);
464 *
auto grad_phi_i = fe_values[temperature].gradient(i,q);
465 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j){
471 *
double psi_j = fe_values[phase_parameter].value(j,q);
472 *
auto grad_psi_j = fe_values[phase_parameter].gradient(j,q);
473 *
double phi_j = fe_values[temperature].value(j,q);
474 *
auto grad_phi_j = fe_values[temperature].gradient(j,q);
476 *
double mp = psi_i*(tau*psi_j);
477 *
double kp = grad_psi_i*(
std::pow(epsilon,2)*grad_psi_j);
478 *
double m = (alpha/M_PI)*
std::atan(gamma*(t_eq - t_on));
479 *
double t1 = (1-p_on)*(p_on-0.5+m);
480 *
double t2 = -(p_on)*(p_on-0.5+m);
481 *
double t3 = (p_on)*(1-p_on);
482 *
double nl_p = psi_i*((t1+t2+t3)*psi_j);
485 * Adding
random noise at the interface
488 * nl_p -= a*khi*psi_i*((1.0 - 2*(p_on))*psi_j);
489 *
double f1_p= mp + time_step*theta*kp - time_step*theta*nl_p;
491 *
double t4 = (p_on)*(1-p_on)*(-(alpha*
gamma/(M_PI*(1+
std::pow((gamma*(t_eq-t_on)),2)))));
492 *
double nl_t = psi_i*(t4*phi_j);
493 *
double f1_t = -time_step*theta*nl_t;
495 *
double mpt = phi_i*(latent_heat*psi_j);
496 *
double f2_p = -mpt;
498 *
double mt = phi_i*(phi_j);
499 *
double kt = grad_phi_i*(grad_phi_j);
500 *
double f2_t = mt + time_step*theta*kt;
504 * Assembling Jacobian
matrix
507 *
cell_matrix(i,j) += (f1_p + f1_t + f2_p + f2_t)*fe_values.JxW(q);
512 * Finding f1 and f2 at previous iteration
for rhs vector
515 *
double mp_n = psi_i*(tau*p_on);
516 *
double kp_n = grad_psi_i*(
std::pow(epsilon,2)*grad_p_on);
517 *
double m_n = (alpha/M_PI)*
std::atan(gamma*(t_eq-t_on));
518 *
double nl_n = psi_i*((p_on)*(1-p_on)*(p_on-0.5+m_n));
519 *
double mp_t = psi_i*(tau*p_ot);
520 *
double kp_t = grad_psi_i*(tau*grad_p_ot);
521 *
double m_t = (alpha/M_PI)*
std::atan(gamma*(t_eq-t_ot));
522 *
double nl_t = psi_i*(p_ot)*(1-p_ot)*(p_ot-0.5+m_t);
525 * Adding
random noise at the interface
528 * nl_n -= psi_i*(a*khi*(p_on)*(1-p_on));
529 * nl_t -= psi_i*(a*khi*(p_ot)*(1-p_ot));
531 *
double f1n = mp_n + time_step*theta*kp_n - time_step*theta*nl_n - mp_t + time_step*(1-theta)*kp_t - time_step*(1-theta)*nl_t;
533 *
double mt_n = phi_i*(t_on);
534 *
double kt_n = grad_phi_i*(grad_t_on);
535 *
double mpt_n = phi_i*(latent_heat*p_on);
536 *
double mt_t = phi_i*(t_ot);
537 *
double kt_t = grad_phi_i*(grad_t_ot);
538 *
double mpt_t = phi_i*(latent_heat*p_ot);
540 *
double f2n = mt_n + time_step*theta*kt_n - mpt_n - mt_t + time_step*(1-theta)*kt_t + mpt_t;
544 * Assembling RHS vector
547 * cell_rhs(i) -= (f1n + f2n)*fe_values.JxW(q);
551 * cell->get_dof_indices(local_dof_indices);
552 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
554 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
555 * jacobian_matrix.add(local_dof_indices[i],
556 * local_dof_indices[j],
558 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
571 * std::map<types::global_dof_index, double> boundary_values;
579 * system_rhs,
false);
588<a name=
"ann-get_random_number.cpp"></a>
589<h1>Annotated version of get_random_number.cpp</h1>
605 * #include
"PhaseFieldSolver.h"
608 *
float PhaseFieldSolver::get_random_number()
610 *
static std::default_random_engine
e;
611 *
static std::uniform_real_distribution<> dis(-0.5, 0.5);
617<a name=
"ann-grid_dof.cpp"></a>
618<h1>Annotated version of grid_dof.cpp</h1>
634 * #include
"PhaseFieldSolver.h"
636 *
void PhaseFieldSolver::make_grid_and_dofs() {
643 * std::ifstream f(
"mesh/Kobayashi_mesh100x400.msh");
644 * gridin.read_msh(f);
648 * dof_handler.distribute_dofs(fe);
654 *
const std::vector<IndexSet> locally_owned_dofs_per_proc =
656 *
const IndexSet locally_owned_dofs =
658 * jacobian_matrix.reinit(locally_owned_dofs,
659 * locally_owned_dofs,
662 * old_solution.reinit(locally_owned_dofs, mpi_communicator);
663 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
664 * conv_solution.reinit(locally_owned_dofs, mpi_communicator);
665 * solution_update.reinit(locally_owned_dofs, mpi_communicator);
667 * conv_solution_np.reinit(dof_handler.n_dofs());
668 * old_solution_np.reinit(dof_handler.n_dofs());
673<a name=
"ann-main.cpp"></a>
674<h1>Annotated version of main.cpp</h1>
690 * #include <iostream>
691 * #include
"PhaseFieldSolver.h"
693 *
int main(
int argc,
char **argv) {
695 * PhaseFieldSolver phasefieldsolver;
696 * phasefieldsolver.run();
702<a name=
"ann-output_results.cpp"></a>
703<h1>Annotated version of output_results.cpp</h1>
719 * #include
"PhaseFieldSolver.h"
721 *
void PhaseFieldSolver::output_results(
const unsigned int timestep_number)
const {
726 *
using only one process to output the result
729 *
if (this_mpi_process == 0)
734 * std::vector<std::string> solution_names;
735 * solution_names.emplace_back (
"p");
736 * solution_names.emplace_back (
"T");
738 * data_out.add_data_vector(localized_solution, solution_names);
739 *
const std::string filename =
744 * data_out.set_flags(vtk_flags);
745 * std::ofstream output(filename);
747 * data_out.build_patches();
748 * data_out.write_vtk(output);
754<a name=
"ann-run.cpp"></a>
755<h1>Annotated version of
run.cpp</h1>
771 * #include
"PhaseFieldSolver.h"
774 *
void PhaseFieldSolver::run() {
775 * make_grid_and_dofs();
777 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
778 * <<
" (by partition:";
780 * pcout << (p == 0 ?
' ' :
'+')
783 * pcout <<
")" << std::endl;
786 * Initialise the solution
789 * InitialValues initial_value;
798 * Applying Boundary Conditions at t=0
811 * Time steps
begin here:
814 *
unsigned int timestep_number = 1;
815 *
for (; time <= final_time; time += time_step, ++timestep_number) {
817 * pcout <<
"Time step " << timestep_number <<
" at t=" << time+time_step
820 * conv_solution.operator=(old_solution);
824 * Newton-Raphson iterations
begin here:
827 *
for (
unsigned int it = 1; it <= 100; ++it) {
828 * pcout <<
"Newton iteration number:" << it << std::endl;
831 * pcout <<
"Convergence Failure!!!!!!!!!!!!!!!" << std::endl;
839 * conv_solution_np = conv_solution;
840 * old_solution_np = old_solution;
843 * Initialise the delta solution as zero
852 * Assemble Jacobian and Residual
858 * Solving to get delta solution
864 * Checking
for convergence
867 *
double residual_norm = system_rhs.l2_norm();
870 * pcout <<
"Nothing wrong till here!!!!!!" << std::endl;
873 * pcout <<
"the residual is:" << residual_norm << std::endl;
874 *
if (residual_norm <= (1e-4)) {
875 * pcout <<
"Solution Converged!" << std::endl;
881 * Transfer the converged solution to the old_solution vector to plot output
884 * old_solution.operator=(conv_solution);
888 * output the solution at only specific number of time steps
891 *
if (timestep_number%10 == 0)
892 * output_results(timestep_number);
898<a name=
"ann-solve.cpp"></a>
899<h1>Annotated version of solve.cpp</h1>
915 * #include
"PhaseFieldSolver.h"
917 *
void PhaseFieldSolver::solve(){
925 * A_direct.solve(jacobian_matrix, solution_update, system_rhs);
928 * Updating the solution by adding the delta solution
931 * conv_solution.add(1, solution_update);
std::vector< bool > component_mask
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
void subdomain_wise(DoFHandler< dim, spacedim > &dof_handler)
void random(DoFHandler< dim, spacedim > &dof_handler)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > epsilon(const Tensor< 2, dim, Number > &Grad_u)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * begin(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
long double gamma(const unsigned int n)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DataOutBase::CompressionLevel compression_level