7 #ifndef PORTAGE_DRIVER_DETECT_MISMATCH_H_ 8 #define PORTAGE_DRIVER_DETECT_MISMATCH_H_ 15 #include <unordered_set> 19 #include <type_traits> 33 using Wonton::Entity_kind;
34 using Wonton::Entity_type;
35 using Wonton::Weights_t;
47 #ifdef PORTAGE_ENABLE_MPI 49 template<Entity_kind onwhat,
class Mesh_Wrapper>
50 void get_unique_entity_masks(Mesh_Wrapper
const &mesh,
51 std::vector<int> *unique_mask,
56 MPI_Comm_rank(mycomm, &rank);
57 MPI_Comm_size(mycomm, &nprocs);
63 int nents = (onwhat == Entity_kind::CELL) ?
64 mesh.num_owned_cells() : mesh.num_owned_nodes();
66 unique_mask->resize(nents, 1);
69 std::vector<int> nents_all(nprocs, 0);
70 MPI_Allgather(&nents, 1, MPI_INT, &(nents_all[0]), 1, MPI_INT, mycomm);
71 int maxents = *std::max_element(nents_all.begin(), nents_all.end());
73 std::vector<int> gids(maxents, -1);
74 for (
int e = 0; e < nents; e++)
75 gids[e] = mesh.get_global_id(e, onwhat);
77 std::vector<int> gids_all(nprocs*maxents);
78 MPI_Allgather(&(gids[0]), maxents, MPI_INT, &(gids_all[0]), maxents,
83 std::unordered_set<int> unique_gids;
84 for (
int p = 0; p < rank; p++)
85 for (
int e = 0; e < nents_all[p]; e++) {
86 int gid = gids_all[p*maxents+e];
87 unique_gids.insert(gid);
92 for (
int e = 0; e < nents; e++) {
93 auto itpair = unique_gids.insert(gids[e]);
95 (*unique_mask)[e] = 0;
119 template<
int D, Entity_kind onwhat,
120 class SourceMesh_Wrapper,
class SourceState_Wrapper,
121 class TargetMesh_Wrapper,
class TargetState_Wrapper>
126 SourceState_Wrapper
const& source_state,
127 TargetMesh_Wrapper
const& target_mesh,
128 TargetState_Wrapper & target_state,
129 Wonton::Executor_type
const *executor,
130 bool global_check=
true) :
131 source_mesh_(source_mesh), source_state_(source_state),
132 target_mesh_(target_mesh), target_state_(target_state),
133 global_check_(global_check) {
135 #ifdef PORTAGE_ENABLE_MPI 136 auto mpiexecutor =
dynamic_cast<Wonton::MPIExecutor_type
const *
>(executor);
137 if (mpiexecutor && mpiexecutor->mpicomm != MPI_COMM_NULL) {
139 mycomm_ = mpiexecutor->mpicomm;
140 MPI_Comm_rank(mycomm_, &rank_);
141 MPI_Comm_size(mycomm_, &nprocs_);
154 Portage::vector<std::vector<Weights_t>>
const & source_ents_and_weights) {
157 if (computed_mismatch_)
return mismatch_;
159 nsourceents_ = (onwhat == Entity_kind::CELL) ?
160 source_mesh_.num_owned_cells() : source_mesh_.num_owned_nodes();
168 std::vector<int> source_ent_masks(nsourceents_, 1);
169 #ifdef PORTAGE_ENABLE_MPI 170 if (distributed_ && global_check_)
171 get_unique_entity_masks<onwhat, SourceMesh_Wrapper>(source_mesh_,
178 source_ent_volumes_.resize(nsourceents_, 0.0);
179 for (
int s = 0; s < nsourceents_; s++)
180 source_ent_volumes_[s] = (onwhat == Entity_kind::CELL) ?
181 source_ent_masks[s]*source_mesh_.cell_volume(s) :
182 source_ent_masks[s]*source_mesh_.dual_cell_volume(s);
185 std::accumulate(source_ent_volumes_.begin(), source_ent_volumes_.end(),
188 global_source_volume_ = source_volume_;
189 #ifdef PORTAGE_ENABLE_MPI 190 if (distributed_ && global_check_)
191 MPI_Allreduce(&source_volume_, &global_source_volume_, 1, MPI_DOUBLE,
196 ntargetents_ = (onwhat == Entity_kind::CELL) ?
197 target_mesh_.num_owned_cells() : target_mesh_.num_owned_nodes();
199 target_ent_volumes_.resize(ntargetents_, 0.0);
200 for (
int t = 0; t < ntargetents_; t++)
201 target_ent_volumes_[t] = (onwhat == Entity_kind::CELL) ?
202 target_mesh_.cell_volume(t) : target_mesh_.dual_cell_volume(t);
204 target_volume_ = std::accumulate(target_ent_volumes_.begin(),
205 target_ent_volumes_.end(), 0.0);
208 global_target_volume_ = target_volume_;
209 #ifdef PORTAGE_ENABLE_MPI 210 if (distributed_ && global_check_)
211 MPI_Allreduce(&target_volume_, &global_target_volume_, 1, MPI_DOUBLE,
223 xsect_volumes_.resize(ntargetents_, 0.0);
224 for (
int t = 0; t < ntargetents_; t++) {
225 std::vector<Weights_t>
const& sw_vec = source_ents_and_weights[t];
226 for (
auto const& sw : sw_vec)
227 xsect_volumes_[t] += sw.weights[0];
230 double xsect_volume = std::accumulate(xsect_volumes_.begin(),
231 xsect_volumes_.end(), 0.0);
233 global_xsect_volume_ = xsect_volume;
234 #ifdef PORTAGE_ENABLE_MPI 235 if (distributed_ && global_check_)
236 MPI_Allreduce(&xsect_volume, &global_xsect_volume_, 1,
237 MPI_DOUBLE, MPI_SUM, mycomm_);
245 fabs(global_xsect_volume_-global_source_volume_)/global_source_volume_;
246 if (relvoldiff_source_ > voldifftol_) {
251 std::cerr <<
"\n** MESH MISMATCH -" <<
252 " some source cells are not fully covered by the target mesh\n";
265 std::vector<double> source_covered_vol(source_ent_volumes_);
266 for (
auto it = target_mesh_.begin(onwhat, Entity_type::PARALLEL_OWNED);
267 it != target_mesh_.end(onwhat, Entity_type::PARALLEL_OWNED); it++) {
268 std::vector<Weights_t>
const& sw_vec = source_ents_and_weights[*it];
269 for (
auto const& sw : sw_vec)
270 source_covered_vol[sw.entityID] -= sw.weights[0];
273 for (
auto it = source_mesh_.begin(onwhat, Entity_type::PARALLEL_OWNED);
274 it != source_mesh_.end(onwhat, Entity_type::PARALLEL_OWNED); it++)
275 if (source_covered_vol[*it] > voldifftol_) {
276 if (onwhat == Entity_kind::CELL)
277 std::cerr <<
"Source cell " << *it <<
278 " not fully covered by target cells \n";
280 std::cerr <<
"Source dual cell " << *it <<
281 " not fully covered by target dual cells \n";
290 fabs(global_xsect_volume_-global_target_volume_)/global_target_volume_;
291 if (relvoldiff_target_ > voldifftol_) {
296 std::cerr <<
"\n** MESH MISMATCH -" <<
297 " some target cells are not fully covered by the source mesh\n";
302 for (
auto it = target_mesh_.begin(onwhat, Entity_type::PARALLEL_OWNED);
303 it != target_mesh_.end(onwhat, Entity_type::PARALLEL_OWNED); it++) {
305 double covered_vol = 0.0;
306 std::vector<Weights_t>
const& sw_vec = source_ents_and_weights[t];
307 for (
auto const& sw : sw_vec)
308 covered_vol += sw.weights[0];
309 if (fabs(covered_vol-target_ent_volumes_[t])/target_ent_volumes_[t] > voldifftol_) {
310 if (onwhat == Entity_kind::CELL)
311 std::cerr <<
"Target cell " << *it <<
" on rank " << rank_ <<
312 " not fully covered by source cells \n";
314 std::cerr <<
"Target dual cell " << *it <<
" on rank " << rank_ <<
315 " not fully covered by source dual cells \n";
327 computed_mismatch_ =
true;
339 assert(computed_mismatch_ &&
"check_mismatch must be called first!");
378 std::string
const & trg_var_name,
379 double global_lower_bound = -std::numeric_limits<double>::max(),
380 double global_upper_bound = std::numeric_limits<double>::max(),
391 if (distributed_ && !global_check_ &&
393 throw std::runtime_error(
394 "Cannot implement SHIFTED_CONSERVATIVE in a distributed run without MPI!");
400 throw std::runtime_error(
401 "Cannot extrapolate into empty cells without computing layers first!");
405 if (!computed_mismatch_) {
406 throw std::runtime_error(
"check_mismatch must be called first!");
409 if (source_state_.field_type(onwhat, src_var_name) ==
410 Field_type::MESH_FIELD)
412 global_lower_bound, global_upper_bound,
413 conservation_tol, maxiter,
414 partial_fixup_type, empty_fixup_type);
422 std::string
const & trg_var_name,
423 double global_lower_bound,
424 double global_upper_bound,
432 static bool hit_lobound =
false, hit_hibound =
false;
435 double const *source_data;
436 source_state_.mesh_get_data(onwhat, src_var_name, &source_data);
439 target_state_.mesh_get_data(onwhat, trg_var_name, &target_data);
454 for (
int t = 0; t < ntargetents_; t++) {
455 if (!is_cell_empty_[t]) {
456 if (fabs(xsect_volumes_[t]-target_ent_volumes_[t])/target_ent_volumes_[t] > voldifftol_)
457 target_data[t] *= xsect_volumes_[t]/target_ent_volumes_[t];
473 for (std::vector<int>
const& curlayer : emptylayers_) {
474 for (
int ent : curlayer) {
475 std::vector<int> nbrs;
476 if (onwhat == Entity_kind::CELL)
477 target_mesh_.cell_get_node_adj_cells(ent, Entity_type::PARALLEL_OWNED,
480 target_mesh_.node_get_cell_adj_nodes(ent, Entity_type::PARALLEL_OWNED,
485 for (
int nbr : nbrs) {
486 if (layernum_[nbr] < curlayernum) {
487 aveval += target_data[nbr];
496 "No owned neighbors of empty entity to extrapolate data from\n";
499 target_data[ent] = aveval;
522 std::inner_product(source_data, source_data + nsourceents_,
523 source_ent_volumes_.begin(), 0.0);
525 double global_source_sum = source_sum;
526 #ifdef PORTAGE_ENABLE_MPI 527 if (distributed_ && global_check_)
528 MPI_Allreduce(&source_sum, &global_source_sum, 1, MPI_DOUBLE, MPI_SUM,
533 std::inner_product(target_data, target_data + ntargetents_,
534 target_ent_volumes_.begin(), 0.0);
536 double global_target_sum = target_sum;
537 #ifdef PORTAGE_ENABLE_MPI 538 if (distributed_ && global_check_)
539 MPI_Allreduce(&target_sum, &global_target_sum, 1, MPI_DOUBLE, MPI_SUM,
543 double global_diff = global_target_sum - global_source_sum;
544 double reldiff = global_diff/global_source_sum;
546 if (fabs(reldiff) < conservation_tol)
558 double adj_target_volume;
559 double global_adj_target_volume;
560 double global_covered_target_volume;
562 double covered_target_volume = 0.0;
563 for (
int t = 0; t < ntargetents_; t++) {
564 if (!is_cell_empty_[t]) {
565 covered_target_volume += target_ent_volumes_[t];
568 global_covered_target_volume = covered_target_volume;
569 #ifdef PORTAGE_ENABLE_MPI 570 if (distributed_ && global_check_)
571 MPI_Allreduce(&covered_target_volume, &global_covered_target_volume,
572 1, MPI_DOUBLE, MPI_SUM, mycomm_);
574 adj_target_volume = covered_target_volume;
575 global_adj_target_volume = global_covered_target_volume;
578 adj_target_volume = target_volume_;
579 global_adj_target_volume = global_target_volume_;
583 double udiff = global_diff/global_adj_target_volume;
586 while (fabs(reldiff) > conservation_tol && iter < maxiter) {
587 for (
auto it = target_mesh_.begin(onwhat, Entity_type::PARALLEL_OWNED);
588 it != target_mesh_.end(onwhat, Entity_type::PARALLEL_OWNED); it++) {
592 if ((target_data[t]-udiff) < global_lower_bound) {
597 target_data[t] = global_lower_bound;
600 std::cerr <<
"Hit lower bound for cell " << t <<
601 " (and maybe other cells) on rank " << rank_ <<
"\n";
608 adj_target_volume -= target_ent_volumes_[t];
610 }
else if ((target_data[t]-udiff) > global_upper_bound) {
615 target_data[t] = global_upper_bound;
618 std::cerr <<
"Hit upper bound for cell " << t <<
619 " (and maybe other cells) on rank " << rank_ <<
"\n";
626 adj_target_volume -= target_ent_volumes_[t];
634 target_data[t] -= udiff;
642 target_sum = std::inner_product(target_data, target_data + ntargetents_,
643 target_ent_volumes_.begin(), 0.0);
645 global_target_sum = target_sum;
646 #ifdef PORTAGE_ENABLE_MPI 647 if (distributed_ && global_check_)
648 MPI_Allreduce(&target_sum, &global_target_sum, 1, MPI_DOUBLE, MPI_SUM,
659 global_diff = global_target_sum - global_source_sum;
661 global_adj_target_volume = adj_target_volume;
662 #ifdef PORTAGE_ENABLE_MPI 663 if (distributed_ && global_check_)
664 MPI_Allreduce(&adj_target_volume, &global_adj_target_volume, 1,
665 MPI_DOUBLE, MPI_SUM, mycomm_);
668 udiff = global_diff/global_adj_target_volume;
669 reldiff = global_diff/global_source_sum;
675 global_adj_target_volume = global_covered_target_volume;
677 global_adj_target_volume = global_target_volume_;
682 if (fabs(reldiff) > conservation_tol) {
684 std::cerr <<
"Redistribution not entirely successfully for variable " <<
685 src_var_name <<
"\n";
686 std::cerr <<
"Relative conservation error is " << reldiff <<
"\n";
687 std::cerr <<
"Absolute conservation error is " << global_diff <<
"\n";
694 std::cerr <<
"Unknown Partial fixup type\n";
701 SourceMesh_Wrapper
const& source_mesh_;
702 SourceState_Wrapper
const& source_state_;
703 TargetMesh_Wrapper
const& target_mesh_;
704 TargetState_Wrapper & target_state_;
705 int nsourceents_, ntargetents_;
706 std::vector<double> source_ent_volumes_, target_ent_volumes_, xsect_volumes_;
707 double source_volume_, target_volume_;
708 double global_source_volume_, global_target_volume_, global_xsect_volume_;
709 double relvoldiff_source_, relvoldiff_target_, relvoldiff_;
710 std::vector<int> layernum_;
711 std::vector<bool> is_cell_empty_;
712 std::vector<std::vector<int>> emptylayers_;
713 bool mismatch_ =
false;
714 int rank_ = 0, nprocs_ = 1;
716 bool distributed_ =
false;
717 bool computed_mismatch_ =
false;
718 bool global_check_=
true;
719 bool computed_layers_=
false;
720 #ifdef PORTAGE_ENABLE_MPI 721 MPI_Comm mycomm_ = MPI_COMM_NULL;
725 bool compute_layers(){
727 if (computed_layers_)
return true;
734 if (!mismatch_ || (distributed_ && !global_check_))
return mismatch_;
738 relvoldiff_ = relvoldiff_source_ + relvoldiff_target_;
747 is_cell_empty_.resize(ntargetents_,
false);
748 std::vector<int> emptyents;
749 for (
int t = 0; t < ntargetents_; t++) {
750 if (fabs(xsect_volumes_[t]) < std::numeric_limits<double>::epsilon()) {
751 emptyents.push_back(t);
752 is_cell_empty_[t] =
true;
755 int nempty = emptyents.size();
759 int global_nempty = nempty;
760 #ifdef PORTAGE_ENABLE_MPI 761 if (distributed_ && global_check_) {
762 int *nempty_all =
new int[nprocs_];
763 MPI_Gather(&nempty, 1, MPI_INT, nempty_all, 1, MPI_INT, 0, mycomm_);
764 global_nempty = std::accumulate(nempty_all, nempty_all+nprocs_, 0.0);
765 delete [] nempty_all;
769 if (rank_ == 0 && global_nempty) {
770 if (onwhat == Entity_kind::CELL)
771 std::cerr <<
"One or more target cells are not covered by " <<
772 "ANY source cells.\n" <<
773 " Will assign values based on their neighborhood\n";
775 std::cerr <<
"One or more target dual cells are not covered by " <<
776 "ANY source dual cells.\n" <<
777 " Will assign values based on their neighborhood\n";
782 layernum_.resize(ntargetents_, 0);
785 int ntagged = 0, old_ntagged = -1;
786 while (ntagged < nempty && ntagged > old_ntagged) {
787 old_ntagged = ntagged;
789 std::vector<int> curlayerents;
791 for (
int ent : emptyents) {
792 if (layernum_[ent] != 0)
continue;
794 std::vector<int> nbrs;
795 if (onwhat == Entity_kind::CELL)
796 target_mesh_.cell_get_node_adj_cells(ent, Entity_type::ALL, &nbrs);
798 target_mesh_.node_get_cell_adj_nodes(ent, Entity_type::ALL, &nbrs);
799 for (
int nbr : nbrs) {
800 if ((onwhat == Entity_kind::CELL &&
801 target_mesh_.cell_get_type(nbr) != Entity_type::PARALLEL_OWNED) ||
802 (onwhat == Entity_kind::NODE &&
803 target_mesh_.node_get_type(nbr) != Entity_type::PARALLEL_OWNED))
805 if (!is_cell_empty_[nbr] || layernum_[nbr] != 0) {
810 curlayerents.push_back(ent);
817 for (
int ent : curlayerents)
818 layernum_[ent] = nlayers+1;
819 ntagged += curlayerents.size();
821 emptylayers_.push_back(curlayerents);
826 return computed_layers_=
true;
833 #endif // PORTAGE_DRIVER_FIX_MISMATCH_H_ Definition: fix_mismatch.h:122
std::vector< T > vector
Definition: portage.h:238
bool fix_mismatch(std::string const &src_var_name, std::string const &trg_var_name, double global_lower_bound=-std::numeric_limits< double >::max(), double global_upper_bound=std::numeric_limits< double >::max(), double conservation_tol=1e2 *std::numeric_limits< double >::epsilon(), int maxiter=5, Partial_fixup_type partial_fixup_type=Partial_fixup_type::SHIFTED_CONSERVATIVE, Empty_fixup_type empty_fixup_type=Empty_fixup_type::EXTRAPOLATE)
Repair the remapped field to account for boundary mismatch.
Definition: fix_mismatch.h:377
bool fix_mismatch_meshvar(std::string const &src_var_name, std::string const &trg_var_name, double global_lower_bound, double global_upper_bound, double conservation_tol=1e2 *std::numeric_limits< double >::epsilon(), int maxiter=5, Partial_fixup_type partial_fixup_type=Partial_fixup_type::SHIFTED_CONSERVATIVE, Empty_fixup_type empty_fixup_type=Empty_fixup_type::EXTRAPOLATE)
Repair a remapped mesh field to account for boundary mismatch.
Definition: fix_mismatch.h:421
Definition: portage.h:114
bool has_mismatch() const
Return whether the mesh domains are mismatched (must be called after check_mismatch) ...
Definition: fix_mismatch.h:336
bool check_mismatch(Portage::vector< std::vector< Weights_t >> const &source_ents_and_weights)
Compute (and cache) whether the mesh domains are mismatched.
Definition: fix_mismatch.h:153
Partial_fixup_type
Fixup options for partially filled cells.
Definition: portage.h:114
Definition: portage.h:134
double const epsilon
Numerical tolerance.
Definition: weight.h:34
Definition: portage.h:134
MismatchFixer(SourceMesh_Wrapper const &source_mesh, SourceState_Wrapper const &source_state, TargetMesh_Wrapper const &target_mesh, TargetState_Wrapper &target_state, Wonton::Executor_type const *executor, bool global_check=true)
Definition: fix_mismatch.h:125
Definition: portage.h:114
Definition: coredriver.h:42
Definition: portage.h:114
Empty_fixup_type
Fixup options for empty cells.
Definition: portage.h:134