My Project
Loading...
Searching...
No Matches
BlackoilModelNldd.hpp
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28
29#include <opm/grid/common/SubGridPart.hpp>
30
31#include <opm/simulators/aquifers/AquiferGridUtils.hpp>
32
33#include <opm/simulators/flow/countGlobalCells.hpp>
34#include <opm/simulators/flow/partitionCells.hpp>
35#include <opm/simulators/flow/priVarsPacking.hpp>
36#include <opm/simulators/flow/NonlinearSolver.hpp>
37#include <opm/simulators/flow/SubDomain.hpp>
38
39#include <opm/simulators/linalg/extractMatrix.hpp>
40
41#if COMPILE_GPU_BRIDGE
42#include <opm/simulators/linalg/ISTLSolverGpuBridge.hpp>
43#else
44#include <opm/simulators/linalg/ISTLSolver.hpp>
45#endif
46
47#include <opm/simulators/timestepping/ConvergenceReport.hpp>
48#include <opm/simulators/timestepping/SimulatorReport.hpp>
49#include <opm/simulators/timestepping/SimulatorTimerInterface.hpp>
50
51#include <opm/simulators/utils/ComponentName.hpp>
52
53#include <opm/simulators/wells/BlackoilWellModelNldd.hpp>
54
55#include <fmt/format.h>
56
57#include <algorithm>
58#include <array>
59#include <cassert>
60#include <cmath>
61#include <cstddef>
62#include <filesystem>
63#include <fstream>
64#include <functional>
65#include <iomanip>
66#include <ios>
67#include <memory>
68#include <numeric>
69#include <set>
70#include <sstream>
71#include <string>
72#include <type_traits>
73#include <utility>
74#include <vector>
75
76namespace Opm {
77
78template<class TypeTag> class BlackoilModel;
79
81template <class TypeTag>
83{
84public:
92
93 using BVector = typename BlackoilModel<TypeTag>::BVector;
94 using Domain = SubDomain<Grid>;
96 using Mat = typename BlackoilModel<TypeTag>::Mat;
97
98 static constexpr int numEq = Indices::numEq;
99
105 : model_(model)
106 , wellModel_(model.wellModel())
107 , rank_(model_.simulator().vanguard().grid().comm().rank())
108 {
109 // Create partitions.
110 const auto& [partition_vector_initial, num_domains_initial] = this->partitionCells();
111
114
115 // Fix-up for an extreme case: Interior cells who do not have any on-rank
116 // neighbours. Move all such cells into a single domain on this rank,
117 // and mark the domain for skipping. For what it's worth, we've seen this
118 // case occur in practice when testing on field cases.
119 bool isolated_cells = false;
120 for (auto& domainId : partition_vector) {
121 if (domainId < 0) {
123 isolated_cells = true;
124 }
125 }
126 if (isolated_cells) {
127 num_domains++;
128 }
129
130 // Set nldd handler in main well model
131 model.wellModel().setNlddAdapter(&wellModel_);
132
133 // Scan through partitioning to get correct size for each.
134 std::vector<int> sizes(num_domains, 0);
135 for (const auto& p : partition_vector) {
136 ++sizes[p];
137 }
138
139 // Set up correctly sized vectors of entity seeds and of indices for each partition.
140 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
141 std::vector<std::vector<EntitySeed>> seeds(num_domains);
142 std::vector<std::vector<int>> partitions(num_domains);
143 for (int domain = 0; domain < num_domains; ++domain) {
144 seeds[domain].resize(sizes[domain]);
145 partitions[domain].resize(sizes[domain]);
146 }
147
148 // Iterate through grid once, setting the seeds of all partitions.
149 // Note: owned cells only!
150 const auto& grid = model_.simulator().vanguard().grid();
151
152 std::vector<int> count(num_domains, 0);
153 const auto& gridView = grid.leafGridView();
154 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
155 const auto end = gridView.template end<0, Dune::Interior_Partition>();
156 int cell = 0;
157 for (auto it = beg; it != end; ++it, ++cell) {
158 const int p = partition_vector[cell];
159 seeds[p][count[p]] = it->seed();
160 partitions[p][count[p]] = cell;
161 ++count[p];
162 }
163 assert(count == sizes);
164
165 // Create the domains.
166 for (int index = 0; index < num_domains; ++index) {
167 std::vector<bool> interior(partition_vector.size(), false);
168 for (int ix : partitions[index]) {
169 interior[ix] = true;
170 }
171
172 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
173
174 // Mark the last domain for skipping if it contains isolated cells
175 const bool skip = isolated_cells && (index == num_domains - 1);
176 this->domains_.emplace_back(index,
177 std::move(partitions[index]),
178 std::move(interior),
179 std::move(view),
180 skip);
181 }
182
183 // Set up container for the local system matrices.
184 domain_matrices_.resize(num_domains);
185
186 // Set up container for the local linear solvers.
187 for (int index = 0; index < num_domains; ++index) {
188 // TODO: The ISTLSolver constructor will make
189 // parallel structures appropriate for the full grid
190 // only. This must be addressed before going parallel.
191 const auto& eclState = model_.simulator().vanguard().eclState();
193 loc_param.is_nldd_local_solver_ = true;
194 loc_param.init(eclState.getSimulationConfig().useCPR());
195 // Override solver type with umfpack if small domain.
196 if (domains_[index].cells.size() < 200) {
197 loc_param.linsolver_ = "umfpack";
198 }
199 loc_param.linear_solver_print_json_definition_ = false;
200 const bool force_serial = true;
201 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
202 domain_linsolvers_.back().setDomainIndex(index);
203 }
204
205 assert(int(domains_.size()) == num_domains);
206 }
207
210 {
211 // Setup domain->well mapping.
212 wellModel_.setupDomains(domains_);
213 }
214
216 template <class NonlinearSolverType>
218 const SimulatorTimerInterface& timer,
220 {
221 // ----------- Set up reports and timer -----------
223 Dune::Timer perfTimer;
224
225 model_.initialLinearization(report, iteration, nonlinear_solver.minIter(), nonlinear_solver.maxIter(), timer);
226
227 if (report.converged) {
228 return report;
229 }
230
231 // ----------- If not converged, do an NLDD iteration -----------
232
233 auto& solution = model_.simulator().model().solution(0);
234 auto initial_solution = solution;
236
237 // ----------- Decide on an ordering for the domains -----------
238 const auto domain_order = this->getSubdomainOrder();
239
240 // ----------- Solve each domain separately -----------
242 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
243 for (const int domain_index : domain_order) {
244 const auto& domain = domains_[domain_index];
246 if (domain.skip) {
247 local_report.converged = true;
249 continue;
250 }
251 try {
252 switch (model_.param().local_solve_approach_) {
253 case DomainSolveApproach::Jacobi:
254 solveDomainJacobi(solution, locally_solved, local_report, logger,
255 iteration, timer, domain);
256 break;
257 default:
258 case DomainSolveApproach::GaussSeidel:
259 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
260 iteration, timer, domain);
261 break;
262 }
263 }
264 catch (...) {
265 // Something went wrong during local solves.
266 local_report.converged = false;
267 }
268 // This should have updated the global matrix to be
269 // dR_i/du_j evaluated at new local solutions for
270 // i == j, at old solution for i != j.
271 if (!local_report.converged) {
272 // TODO: more proper treatment, including in parallel.
273 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
274 }
276 }
277
278 // Communicate and log all messages.
279 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
280 global_logger.logMessages();
281
282 // Accumulate local solve data.
283 // Putting the counts in a single array to avoid multiple
284 // comm.sum() calls. Keeping the named vars for readability.
285 std::array<int, 4> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()) };
286 int& num_converged = counts[0];
288 int& num_local_newtons = counts[2];
289 int& num_domains = counts[3];
290 {
292 for (const auto& dr : domain_reports) {
293 if (dr.converged) {
295 if (dr.total_newton_iterations == 0) {
297 }
298 }
299 rep += dr;
300 }
301 num_local_newtons = rep.total_newton_iterations;
302 local_reports_accumulated_ += rep;
303 }
304
305 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
306 solution = locally_solved;
307 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
308 }
309
310#if HAVE_MPI
311 // Communicate solutions:
312 // With multiple processes, this process' overlap (i.e. not
313 // owned) cells' solution values have been modified by local
314 // solves in the owning processes, and remain unchanged
315 // here. We must therefore receive the updated solution on the
316 // overlap cells and update their intensive quantities before
317 // we move on.
318 const auto& comm = model_.simulator().vanguard().grid().comm();
319 if (comm.size() > 1) {
320 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
321
322 // Copy numerical values from primary vars.
323 ccomm->copyOwnerToAll(solution, solution);
324
325 // Copy flags from primary vars.
326 const std::size_t num = solution.size();
327 Dune::BlockVector<std::size_t> allmeanings(num);
328 for (std::size_t ii = 0; ii < num; ++ii) {
329 allmeanings[ii] = PVUtil::pack(solution[ii]);
330 }
331 ccomm->copyOwnerToAll(allmeanings, allmeanings);
332 for (std::size_t ii = 0; ii < num; ++ii) {
333 PVUtil::unPack(solution[ii], allmeanings[ii]);
334 }
335
336 // Update intensive quantities for our overlap values.
337 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
338
339 // Make total counts of domains converged.
340 comm.sum(counts.data(), counts.size());
341 }
342#endif // HAVE_MPI
343
344 const bool is_iorank = this->rank_ == 0;
345 if (is_iorank) {
346 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains did no work. {} total local Newton iterations.\n",
348 }
349
350 // Finish with a Newton step.
351 // Note that the "iteration + 100" is a simple way to avoid entering
352 // "if (iteration == 0)" and similar blocks, and also makes it a little
353 // easier to spot the iteration residuals in the DBG file. A more sophisticated
354 // approach can be done later.
355 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
356 report += rep;
357 if (rep.converged) {
358 report.converged = true;
359 }
360 return report;
361 }
362
365 {
366 return local_reports_accumulated_;
367 }
368
371 void writePartitions(const std::filesystem::path& odir) const
372 {
373 const auto& elementMapper = this->model_.simulator().model().elementMapper();
374 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
375 const auto& dims = cartMapper.cartesianDimensions();
376 const auto total_size = dims[0] * dims[1] * dims[2];
377
378 const auto& grid = this->model_.simulator().vanguard().grid();
379 const auto& comm = grid.comm();
380
381 // Create a full-sized vector initialized with -1 (indicating inactive cells)
382 auto full_partition = std::vector<int>(total_size, -1);
383
384 // Fill active cell values for this rank
385 const auto p = this->reconstitutePartitionVector();
386 auto i = 0;
387 for (const auto& cell : elements(grid.leafGridView(), Dune::Partitions::interior)) {
388 full_partition[cartMapper.cartesianIndex(elementMapper.index(cell))] = p[i++];
389 }
390
391 // Gather all partitions using max operation
392 comm.max(full_partition.data(), full_partition.size());
393
394 // Only rank 0 writes the file
395 if (this->rank_ == 0) {
396 auto fname = odir / "ResInsight_compatible_partition.txt";
397 std::ofstream resInsightFile { fname };
398
399 // Write header
400 resInsightFile << "NLDD_DOM" << '\n';
401
402 // Write all cells, including inactive ones
403 for (const auto& val : full_partition) {
404 resInsightFile << val << '\n';
405 }
406 resInsightFile << "/" << '\n';
407 }
408
409 const auto nDigit = 1 + static_cast<int>(std::floor(std::log10(comm.size())));
410 auto partition_fname = odir / fmt::format("{1:0>{0}}", nDigit, comm.rank());
411 std::ofstream pfile { partition_fname };
412
413 auto cell_index = 0;
414 for (const auto& cell : elements(grid.leafGridView(), Dune::Partitions::interior)) {
415 pfile << comm.rank() << ' '
416 << cartMapper.cartesianIndex(elementMapper.index(cell)) << ' '
417 << p[cell_index++] << '\n';
418 }
419 }
420
421private:
423 std::pair<SimulatorReportSingle, ConvergenceReport>
424 solveDomain(const Domain& domain,
425 const SimulatorTimerInterface& timer,
427 [[maybe_unused]] const int global_iteration,
428 const bool initial_assembly_required)
429 {
430 auto& modelSimulator = model_.simulator();
431
433 Dune::Timer solveTimer;
434 solveTimer.start();
435 Dune::Timer detailTimer;
436
437 modelSimulator.model().newtonMethod().setIterationIndex(0);
438
439 // When called, if assembly has already been performed
440 // with the initial values, we only need to check
441 // for local convergence. Otherwise, we must do a local
442 // assembly.
443 int iter = 0;
445 detailTimer.start();
446 modelSimulator.model().newtonMethod().setIterationIndex(iter);
447 // TODO: we should have a beginIterationLocal function()
448 // only handling the well model for now
449 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
450 modelSimulator.timeStepSize(),
451 domain);
452 // Assemble reservoir locally.
453 report += this->assembleReservoirDomain(domain);
454 report.assemble_time += detailTimer.stop();
455 }
456 detailTimer.reset();
457 detailTimer.start();
458 std::vector<Scalar> resnorms;
459 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
460 if (convreport.converged()) {
461 // TODO: set more info, timing etc.
462 report.converged = true;
463 return { report, convreport };
464 }
465
466 // We have already assembled for the first iteration,
467 // but not done the Schur complement for the wells yet.
468 detailTimer.reset();
469 detailTimer.start();
470 model_.wellModel().linearizeDomain(domain,
471 modelSimulator.model().linearizer().jacobian(),
472 modelSimulator.model().linearizer().residual());
473 const double tt1 = detailTimer.stop();
474 report.assemble_time += tt1;
475 report.assemble_time_well += tt1;
476
477 // Local Newton loop.
478 const int max_iter = model_.param().max_local_solve_iterations_;
479 const auto& grid = modelSimulator.vanguard().grid();
480 double damping_factor = 1.0;
481 std::vector<std::vector<Scalar>> convergence_history;
482 convergence_history.reserve(20);
483 convergence_history.push_back(resnorms);
484 do {
485 // Solve local linear system.
486 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
487 const int nc = grid.size(0);
488 BVector x(nc);
489 detailTimer.reset();
490 detailTimer.start();
491 this->solveJacobianSystemDomain(domain, x);
492 model_.wellModel().postSolveDomain(x, domain);
493 if (damping_factor != 1.0) {
494 x *= damping_factor;
495 }
496 report.linear_solve_time += detailTimer.stop();
497 report.linear_solve_setup_time += model_.linearSolveSetupTime();
498 report.total_linear_iterations = model_.linearIterationsLastSolve();
499
500 // Update local solution. // TODO: x is still full size, should we optimize it?
501 detailTimer.reset();
502 detailTimer.start();
503 this->updateDomainSolution(domain, x);
504 report.update_time += detailTimer.stop();
505
506 // Assemble well and reservoir.
507 detailTimer.reset();
508 detailTimer.start();
509 ++iter;
510 modelSimulator.model().newtonMethod().setIterationIndex(iter);
511 // TODO: we should have a beginIterationLocal function()
512 // only handling the well model for now
513 // Assemble reservoir locally.
514 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
515 modelSimulator.timeStepSize(),
516 domain);
517 report += this->assembleReservoirDomain(domain);
518 report.assemble_time += detailTimer.stop();
519
520 // Check for local convergence.
521 detailTimer.reset();
522 detailTimer.start();
523 resnorms.clear();
524 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
525 convergence_history.push_back(resnorms);
526
527 // apply the Schur complement of the well model to the
528 // reservoir linearized equations
529 detailTimer.reset();
530 detailTimer.start();
531 model_.wellModel().linearizeDomain(domain,
532 modelSimulator.model().linearizer().jacobian(),
533 modelSimulator.model().linearizer().residual());
534 const double tt2 = detailTimer.stop();
535 report.assemble_time += tt2;
536 report.assemble_time_well += tt2;
537
538 // Check if we should dampen. Only do so if wells are converged.
539 if (!convreport.converged() && !convreport.wellFailed()) {
540 bool oscillate = false;
541 bool stagnate = false;
542 const int numPhases = convergence_history.front().size();
543 detail::detectOscillations(convergence_history, iter, numPhases,
544 Scalar{0.2}, 1, oscillate, stagnate);
545 if (oscillate) {
546 damping_factor *= 0.85;
547 logger.debug(fmt::format("| Damping factor is now {}", damping_factor));
548 }
549 }
550 } while (!convreport.converged() && iter <= max_iter);
551
552 modelSimulator.problem().endIteration();
553
554 report.converged = convreport.converged();
555 report.total_newton_iterations = iter;
556 report.total_linearizations = iter;
557 report.total_time = solveTimer.stop();
558 // TODO: set more info, timing etc.
559 return { report, convreport };
560 }
561
563 SimulatorReportSingle assembleReservoirDomain(const Domain& domain)
564 {
565 // -------- Mass balance equations --------
566 model_.simulator().model().linearizer().linearizeDomain(domain);
567 return model_.wellModel().lastReport();
568 }
569
571 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x)
572 {
573 const auto& modelSimulator = model_.simulator();
574
575 Dune::Timer perfTimer;
576 perfTimer.start();
577
578 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
579 if (domain_matrices_[domain.index]) {
580 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
581 } else {
582 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
583 }
584 auto& jac = *domain_matrices_[domain.index];
585 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
586 domain.cells);
587 auto x = res;
588
589 // set initial guess
590 global_x = 0.0;
591 x = 0.0;
592
593 auto& linsolver = domain_linsolvers_[domain.index];
594
595 linsolver.prepare(jac, res);
596 model_.linearSolveSetupTime() = perfTimer.stop();
597 linsolver.setResidual(res);
598 linsolver.solve(x);
599
600 Details::setGlobal(x, domain.cells, global_x);
601 }
602
604 void updateDomainSolution(const Domain& domain, const BVector& dx)
605 {
606 auto& simulator = model_.simulator();
607 auto& newtonMethod = simulator.model().newtonMethod();
608 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
609
610 newtonMethod.update_(/*nextSolution=*/solution,
611 /*curSolution=*/solution,
612 /*update=*/dx,
613 /*resid=*/dx,
614 domain.cells); // the update routines of the black
615 // oil model do not care about the
616 // residual
617
618 // if the solution is updated, the intensive quantities need to be recalculated
619 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
620 }
621
623 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
624 std::vector<Scalar>& R_sum,
625 std::vector<Scalar>& maxCoeff,
626 std::vector<Scalar>& B_avg,
627 std::vector<int>& maxCoeffCell)
628 {
629 const auto& modelSimulator = model_.simulator();
630
631 Scalar pvSumLocal = 0.0;
632 Scalar numAquiferPvSumLocal = 0.0;
633 const auto& model = modelSimulator.model();
634 const auto& problem = modelSimulator.problem();
635
636 const auto& modelResid = modelSimulator.model().linearizer().residual();
637
638 ElementContext elemCtx(modelSimulator);
639 const auto& gridView = domain.view;
640 const auto& elemEndIt = gridView.template end</*codim=*/0>();
641 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
642
643 for (auto elemIt = gridView.template begin</*codim=*/0>();
644 elemIt != elemEndIt;
645 ++elemIt)
646 {
647 if (elemIt->partitionType() != Dune::InteriorEntity) {
648 continue;
649 }
650 const auto& elem = *elemIt;
651 elemCtx.updatePrimaryStencil(elem);
652 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
653
654 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
655 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
656 const auto& fs = intQuants.fluidState();
657
658 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
659 model.dofTotalVolume(cell_idx);
661
663 {
665 }
666
667 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
669 }
670
671 // compute local average in terms of global number of elements
672 const int bSize = B_avg.size();
673 for ( int i = 0; i<bSize; ++i )
674 {
675 B_avg[ i ] /= Scalar(domain.cells.size());
676 }
677
679 }
680
681 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
682 const double dt,
683 const int iteration,
684 const Domain& domain,
685 DeferredLogger& logger,
686 std::vector<Scalar>& B_avg,
687 std::vector<Scalar>& residual_norms)
688 {
689 using Vector = std::vector<Scalar>;
690
691 const int numComp = numEq;
692 Vector R_sum(numComp, 0.0 );
693 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
694 std::vector<int> maxCoeffCell(numComp, -1);
695 const auto [ pvSum, numAquiferPvSum]
696 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
697
698 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
700
701 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
702 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
703 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
704 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
705 iteration >= model_.param().min_strict_cnv_iter_;
706 // Tighter bound for local convergence should increase the
707 // likelyhood of: local convergence => global convergence
708 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
709 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
710 : model_.param().tolerance_cnv_);
711
712 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
713 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
714 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
715
716 // Finish computation
717 std::vector<Scalar> CNV(numComp);
718 std::vector<Scalar> mass_balance_residual(numComp);
719 for (int compIdx = 0; compIdx < numComp; ++compIdx )
720 {
723 residual_norms.push_back(CNV[compIdx]);
724 }
725
726 // Create convergence report.
727 ConvergenceReport report{reportTime};
728 using CR = ConvergenceReport;
729 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
730 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
731 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
732 CR::ReservoirFailure::Type::Cnv };
733 Scalar tol[2] = { tol_mb, tol_cnv };
734 for (int ii : {0, 1}) {
735 if (std::isnan(res[ii])) {
736 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
737 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
738 } else if (res[ii] > model_.param().max_residual_allowed_) {
739 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
740 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
741 } else if (res[ii] < 0.0) {
742 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
743 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
744 } else if (res[ii] > tol[ii]) {
745 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
746 }
747
748 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii], tol[ii]);
749 }
750 }
751
752 // Output of residuals. If converged at initial state, log nothing.
753 const bool converged_at_initial_state = (report.converged() && iteration == 0);
755 if (iteration == 0) {
756 // Log header.
757 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
758 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
759 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
760 msg += " MB(";
761 msg += model_.compNames().name(compIdx)[0];
762 msg += ") ";
763 }
764 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
765 msg += " CNV(";
766 msg += model_.compNames().name(compIdx)[0];
767 msg += ") ";
768 }
769 logger.debug(msg);
770 }
771 // Log convergence data.
772 std::ostringstream ss;
773 ss << "| ";
774 const std::streamsize oprec = ss.precision(3);
775 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
776 ss << std::setw(4) << iteration;
777 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
778 ss << std::setw(11) << mass_balance_residual[compIdx];
779 }
780 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
781 ss << std::setw(11) << CNV[compIdx];
782 }
783 ss.precision(oprec);
784 ss.flags(oflags);
785 logger.debug(ss.str());
786 }
787
788 return report;
789 }
790
791 ConvergenceReport getDomainConvergence(const Domain& domain,
792 const SimulatorTimerInterface& timer,
793 const int iteration,
794 DeferredLogger& logger,
795 std::vector<Scalar>& residual_norms)
796 {
797 std::vector<Scalar> B_avg(numEq, 0.0);
798 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
799 timer.currentStepLength(),
800 iteration,
801 domain,
802 logger,
803 B_avg,
805 report += wellModel_.getWellConvergence(domain, B_avg, logger);
806 return report;
807 }
808
810 std::vector<int> getSubdomainOrder()
811 {
812 const auto& modelSimulator = model_.simulator();
813 const auto& solution = modelSimulator.model().solution(0);
814
815 std::vector<int> domain_order(domains_.size());
816 std::iota(domain_order.begin(), domain_order.end(), 0);
817
818 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
819 // Do nothing, 0..n-1 order is fine.
820 return domain_order;
821 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
822 // Calculate the measure used to order the domains.
823 std::vector<Scalar> measure_per_domain(domains_.size());
824 switch (model_.param().local_domains_ordering_) {
825 case DomainOrderingMeasure::AveragePressure: {
826 // Use average pressures to order domains.
827 for (const auto& domain : domains_) {
828 Scalar press_sum = 0.0;
829 for (const int c : domain.cells) {
830 press_sum += solution[c][Indices::pressureSwitchIdx];
831 }
832 const Scalar avgpress = press_sum / domain.cells.size();
834 }
835 break;
836 }
837 case DomainOrderingMeasure::MaxPressure: {
838 // Use max pressures to order domains.
839 for (const auto& domain : domains_) {
840 Scalar maxpress = 0.0;
841 for (const int c : domain.cells) {
842 maxpress = std::max(maxpress, solution[c][Indices::pressureSwitchIdx]);
843 }
845 }
846 break;
847 }
848 case DomainOrderingMeasure::Residual: {
849 // Use maximum residual to order domains.
850 const auto& residual = modelSimulator.model().linearizer().residual();
851 const int num_vars = residual[0].size();
852 for (const auto& domain : domains_) {
853 Scalar maxres = 0.0;
854 for (const int c : domain.cells) {
855 for (int ii = 0; ii < num_vars; ++ii) {
856 maxres = std::max(maxres, std::fabs(residual[c][ii]));
857 }
858 }
860 }
861 break;
862 }
863 } // end of switch (model_.param().local_domains_ordering_)
864
865 // Sort by largest measure, keeping index order if equal.
866 const auto& m = measure_per_domain;
867 std::stable_sort(domain_order.begin(), domain_order.end(),
868 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
869 return domain_order;
870 } else {
871 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
872 }
873 }
874
875 template<class GlobalEqVector>
876 void solveDomainJacobi(GlobalEqVector& solution,
877 GlobalEqVector& locally_solved,
878 SimulatorReportSingle& local_report,
879 DeferredLogger& logger,
880 const int iteration,
881 const SimulatorTimerInterface& timer,
882 const Domain& domain)
883 {
884 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
885 auto initial_local_solution = Details::extractVector(solution, domain.cells);
886 auto res = solveDomain(domain, timer, logger, iteration, false);
887 local_report = res.first;
888 if (local_report.converged) {
889 auto local_solution = Details::extractVector(solution, domain.cells);
890 Details::setGlobal(local_solution, domain.cells, locally_solved);
891 Details::setGlobal(initial_local_solution, domain.cells, solution);
892 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
893 } else {
894 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
895 Details::setGlobal(initial_local_solution, domain.cells, solution);
896 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
897 }
898 }
899
900 template<class GlobalEqVector>
901 void solveDomainGaussSeidel(GlobalEqVector& solution,
902 GlobalEqVector& locally_solved,
903 SimulatorReportSingle& local_report,
904 DeferredLogger& logger,
905 const int iteration,
906 const SimulatorTimerInterface& timer,
907 const Domain& domain)
908 {
909 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
910 auto initial_local_solution = Details::extractVector(solution, domain.cells);
911 auto res = solveDomain(domain, timer, logger, iteration, true);
912 local_report = res.first;
913 if (!local_report.converged) {
914 // We look at the detailed convergence report to evaluate
915 // if we should accept the unconverged solution.
916 const auto& convrep = res.second;
917 // We do not accept a solution if the wells are unconverged.
918 if (!convrep.wellFailed()) {
919 // Calculare the sums of the mb and cnv failures.
920 Scalar mb_sum = 0.0;
921 Scalar cnv_sum = 0.0;
922 for (const auto& rc : convrep.reservoirConvergence()) {
923 if (rc.type() == ConvergenceReport::ReservoirFailure::Type::MassBalance) {
924 mb_sum += rc.value();
925 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
926 cnv_sum += rc.value();
927 }
928 }
929 // If not too high, we overrule the convergence failure.
930 const Scalar acceptable_local_mb_sum = 1e-3;
931 const Scalar acceptable_local_cnv_sum = 1.0;
933 local_report.converged = true;
934 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
935 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
936 } else {
937 logger.debug("Unconverged local solution.");
938 }
939 } else {
940 logger.debug("Unconverged local solution with well convergence failures:");
941 for (const auto& wf : convrep.wellFailures()) {
942 logger.debug(to_string(wf));
943 }
944 }
945 }
946 if (local_report.converged) {
947 auto local_solution = Details::extractVector(solution, domain.cells);
948 Details::setGlobal(local_solution, domain.cells, locally_solved);
949 } else {
950 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
951 Details::setGlobal(initial_local_solution, domain.cells, solution);
952 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
953 }
954 }
955
956 Scalar computeCnvErrorPvLocal(const Domain& domain,
957 const std::vector<Scalar>& B_avg, double dt) const
958 {
959 Scalar errorPV{};
960 const auto& simulator = model_.simulator();
961 const auto& model = simulator.model();
962 const auto& problem = simulator.problem();
963 const auto& residual = simulator.model().linearizer().residual();
964
965 for (const int cell_idx : domain.cells) {
966 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
967 model.dofTotalVolume(cell_idx);
968 const auto& cellResidual = residual[cell_idx];
969 bool cnvViolated = false;
970
971 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
972 using std::fabs;
973 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
974 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
975 }
976
977 if (cnvViolated) {
978 errorPV += pvValue;
979 }
980 }
981 return errorPV;
982 }
983
984 decltype(auto) partitionCells() const
985 {
986 const auto& grid = this->model_.simulator().vanguard().grid();
987
988 using GridView = std::remove_cv_t<std::remove_reference_t<
989 decltype(grid.leafGridView())>>;
990
991 using Element = std::remove_cv_t<std::remove_reference_t<
992 typename GridView::template Codim<0>::Entity>>;
993
994 const auto& param = this->model_.param();
995
997
998 zoltan_ctrl.domain_imbalance = param.local_domains_partition_imbalance_;
999
1000 zoltan_ctrl.index =
1001 [elementMapper = &this->model_.simulator().model().elementMapper()]
1002 (const Element& element)
1003 {
1004 return elementMapper->index(element);
1005 };
1006
1007 zoltan_ctrl.local_to_global =
1008 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
1009 (const int elemIdx)
1010 {
1011 return cartMapper->cartesianIndex(elemIdx);
1012 };
1013
1014 // Forming the list of wells is expensive, so do this only if needed.
1015 const auto need_wells = param.local_domains_partition_method_ == "zoltan";
1016
1017 const auto wells = need_wells
1018 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
1019 : std::vector<Well>{};
1020
1022 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
1023 : std::unordered_map<std::string, std::set<int>> {};
1024
1025 // If defaulted parameter for number of domains, choose a reasonable default.
1026 constexpr int default_cells_per_domain = 1000;
1027 const int num_domains = (param.num_local_domains_ > 0)
1028 ? param.num_local_domains_
1029 : detail::countGlobalCells(grid) / default_cells_per_domain;
1030 return ::Opm::partitionCells(param.local_domains_partition_method_,
1031 num_domains, grid.leafGridView(), wells,
1033 param.local_domains_partition_well_neighbor_levels_);
1034 }
1035
1036 std::vector<int> reconstitutePartitionVector() const
1037 {
1038 const auto& grid = this->model_.simulator().vanguard().grid();
1039
1040 auto numD = std::vector<int>(grid.comm().size() + 1, 0);
1041 numD[grid.comm().rank() + 1] = static_cast<int>(this->domains_.size());
1042 grid.comm().sum(numD.data(), numD.size());
1043 std::partial_sum(numD.begin(), numD.end(), numD.begin());
1044
1045 auto p = std::vector<int>(grid.size(0));
1046 auto maxCellIdx = std::numeric_limits<int>::min();
1047
1048 auto d = numD[grid.comm().rank()];
1049 for (const auto& domain : this->domains_) {
1050 for (const auto& cell : domain.cells) {
1051 p[cell] = d;
1052 if (cell > maxCellIdx) {
1053 maxCellIdx = cell;
1054 }
1055 }
1056
1057 ++d;
1058 }
1059
1060 p.erase(p.begin() + maxCellIdx + 1, p.end());
1061 return p;
1062 }
1063
1064 BlackoilModel<TypeTag>& model_;
1066 std::vector<Domain> domains_;
1067 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1068 std::vector<ISTLSolverType> domain_linsolvers_;
1069 SimulatorReportSingle local_reports_accumulated_;
1070 int rank_ = 0;
1071};
1072
1073} // namespace Opm
1074
1075#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
A NLDD implementation for three-phase black oil.
Definition BlackoilModelNldd.hpp:83
void writePartitions(const std::filesystem::path &odir) const
Write the partition vector to a file in ResInsight compatible format for inspection and a partition f...
Definition BlackoilModelNldd.hpp:371
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition BlackoilModelNldd.hpp:104
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition BlackoilModelNldd.hpp:217
const SimulatorReportSingle & localAccumulatedReports() const
return the statistics if the nonlinearIteration() method failed
Definition BlackoilModelNldd.hpp:364
void prepareStep()
Called before starting a time step.
Definition BlackoilModelNldd.hpp:209
A model implementation for three-phase black oil.
Definition BlackoilModel.hpp:62
BlackoilWellModel< TypeTag > & wellModel()
return the StandardWells object
Definition BlackoilModel.hpp:272
Definition DeferredLogger.hpp:57
This class solves the fully implicit black-oil system by solving the reduced system (after eliminatin...
Definition ISTLSolver.hpp:145
Interface class for SimulatorTimer objects, to be improved.
Definition SimulatorTimerInterface.hpp:34
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition blackoilboundaryratevector.hh:37
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Opm::Parallel::Communication)
Create a global log combining local logs.
Definition gatherDeferredLogger.cpp:168
constexpr auto getPropValue()
get the value data member of a property
Definition propertysystem.hh:242
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition propertysystem.hh:235
Solver parameters for the BlackoilModel.
Definition BlackoilModelParameters.hpp:174
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition FlowLinearSolverParameters.hpp:95
A struct for returning timing data from a simulator to its caller.
Definition SimulatorReport.hpp:34
Representing a part of a grid, in a way suitable for performing local solves.
Definition SubDomain.hpp:85