7 #ifndef MPI_BOUNDING_BOXES_H_ 8 #define MPI_BOUNDING_BOXES_H_ 11 #ifdef PORTAGE_ENABLE_MPI 17 #include <unordered_map> 23 #include "wonton/support/Point.h" 24 #include "wonton/state/state_vector_uni.h" 36 using Wonton::to_MPI_Datatype;
44 class MPI_Bounding_Boxes {
50 MPI_Bounding_Boxes(Wonton::MPIExecutor_type
const *mpiexecutor) {
52 comm_ = mpiexecutor->mpicomm;
59 ~MPI_Bounding_Boxes() =
default;
67 int sourceNum = 0, sourceNumOwned = 0;
69 int newNum = 0, newNumOwned = 0;
71 std::vector<int> sendCounts {}, sendOwnedCounts {};
73 std::vector<int> recvCounts {}, recvOwnedCounts {};
84 template <
class Source_Mesh,
class Target_Mesh>
85 bool is_bob_hungry(
const Source_Mesh &source_mesh,
86 const Target_Mesh &target_mesh)
89 int commSize, commRank;
90 MPI_Comm_size(comm_, &commSize);
91 MPI_Comm_rank(comm_, &commRank);
93 dim_ = source_mesh.space_dimension();
94 assert(dim_ == target_mesh.space_dimension());
98 std::vector<bool> sendFlags(commSize);
99 compute_sendflags(source_mesh, target_mesh, sendFlags);
101 std::vector<int> sendFlagsInt(commSize);
102 for (
int i=0; i<commSize; ++i) sendFlagsInt[i]=sendFlags[i]?1:0;
105 std::vector<int> recvFlags(commSize);
106 MPI_Alltoall(&(sendFlagsInt[0]), 1, MPI_INT,
107 &(recvFlags[0]), 1, MPI_INT, comm_);
110 for (
int i=0; i<commSize; ++i)
112 if (i!=commRank && recvFlags[i])
126 template <
class Source_Mesh,
class Target_Mesh>
127 bool is_redistribution_needed(
const Source_Mesh &source_mesh,
128 const Target_Mesh &target_mesh)
131 bool r = is_bob_hungry(source_mesh, target_mesh);
140 MPI_Allreduce(&ir, &result, 1, MPI_INT, MPI_LOR, comm_);
156 template <
class Source_Mesh,
class Source_State,
class Target_Mesh,
class Target_State>
157 void distribute(Source_Mesh &source_mesh_flat, Source_State &source_state_flat,
158 Target_Mesh &target_mesh, Target_State &target_state)
161 int commSize, commRank;
162 MPI_Comm_size(comm_, &commSize);
163 MPI_Comm_rank(comm_, &commRank);
165 int dim = dim_ = source_mesh_flat.space_dimension();
166 assert(dim == target_mesh.space_dimension());
170 std::vector<bool> sendFlags(commSize);
171 compute_sendflags(source_mesh_flat, target_mesh, sendFlags);
174 comm_info_t cellInfo;
175 int sourceNumOwnedCells = source_mesh_flat.num_owned_cells();
176 int sourceNumCells = sourceNumOwnedCells + source_mesh_flat.num_ghost_cells();
177 setSendRecvCounts(&cellInfo, commSize, sendFlags,sourceNumCells, sourceNumOwnedCells);
180 comm_info_t nodeInfo;
181 int sourceNumOwnedNodes = source_mesh_flat.num_owned_nodes();
182 int sourceNumNodes = sourceNumOwnedNodes + source_mesh_flat.num_ghost_nodes();
183 setSendRecvCounts(&nodeInfo, commSize, sendFlags,sourceNumNodes, sourceNumOwnedNodes);
191 std::vector<GID_t>& sourceCellGlobalIds = source_mesh_flat.get_global_cell_ids();
192 std::vector<GID_t> distributedCellGlobalIds(cellInfo.newNum);
193 sendField(cellInfo, commRank, commSize, to_MPI_Datatype<GID_t>(), 1,
194 sourceCellGlobalIds, &distributedCellGlobalIds);
197 std::vector<GID_t>& sourceNodeGlobalIds = source_mesh_flat.get_global_node_ids();
198 std::vector<GID_t> distributedNodeGlobalIds(nodeInfo.newNum);
199 sendField(nodeInfo, commRank, commSize, to_MPI_Datatype<GID_t>(), 1,
200 sourceNodeGlobalIds, &distributedNodeGlobalIds);
207 compress_with_ghosts(distributedCellGlobalIds, cellInfo.newNumOwned,
208 distributedCellIds_, flatCellGlobalIds_, flatCellNumOwned_);
209 compress_with_ghosts(distributedNodeGlobalIds, nodeInfo.newNumOwned,
210 distributedNodeIds_, flatNodeGlobalIds_, flatNodeNumOwned_);
213 create_gid_to_flat_map(flatCellGlobalIds_, gidToFlatCellId_);
214 create_gid_to_flat_map(flatNodeGlobalIds_, gidToFlatNodeId_);
217 std::vector<double>& sourceCoords = source_mesh_flat.get_coords();
218 std::vector<double> distributedCoords(dim*nodeInfo.newNum);
219 sendField(nodeInfo, commRank, commSize, MPI_DOUBLE, dim,
220 sourceCoords, &distributedCoords);
223 merge_duplicate_data(distributedCoords, distributedNodeIds_, sourceCoords, dim_);
235 std::vector<int>& sourceCellNodeCounts = source_mesh_flat.get_cell_node_counts();
236 std::vector<int> distributedCellNodeCounts(cellInfo.newNum);
237 sendField(cellInfo, commRank, commSize, MPI_INT, 1,
238 sourceCellNodeCounts, &distributedCellNodeCounts);
241 merge_duplicate_data(distributedCellNodeCounts, distributedCellIds_, sourceCellNodeCounts);
244 std::vector<int>& sourceCellNodeOffsets = source_mesh_flat.get_cell_node_offsets();
245 std::vector<int>& sourceCellToNodeList = source_mesh_flat.get_cell_to_node_list();
247 int sizeCellToNodeList = sourceCellToNodeList.size();
248 int sizeOwnedCellToNodeList = (
249 sourceNumCells == sourceNumOwnedCells ? sizeCellToNodeList :
250 sourceCellNodeOffsets[sourceNumOwnedCells]);
252 comm_info_t cellToNodeInfo;
253 setSendRecvCounts(&cellToNodeInfo, commSize, sendFlags,
254 sizeCellToNodeList, sizeOwnedCellToNodeList);
257 std::vector<GID_t> distributedCellToNodeList(cellToNodeInfo.newNum);
258 sendField(cellToNodeInfo, commRank, commSize, to_MPI_Datatype<GID_t>(), 1,
259 to_gid(sourceCellToNodeList, sourceNodeGlobalIds), &distributedCellToNodeList);
263 merge_duplicate_lists(distributedCellToNodeList, distributedCellNodeCounts,
264 distributedCellIds_, gidToFlatNodeId_, sourceCellToNodeList);
276 int sourceNumOwnedFaces = source_mesh_flat.num_owned_faces();
277 int sourceNumFaces = sourceNumOwnedFaces + source_mesh_flat.num_ghost_faces();
279 comm_info_t faceInfo;
280 setSendRecvCounts(&faceInfo, commSize, sendFlags,sourceNumFaces, sourceNumOwnedFaces);
283 std::vector<GID_t>& sourceFaceGlobalIds = source_mesh_flat.get_global_face_ids();
284 std::vector<GID_t> distributedFaceGlobalIds(faceInfo.newNum);
285 sendField(faceInfo, commRank, commSize, MPI_LONG_LONG, 1,
286 sourceFaceGlobalIds, &distributedFaceGlobalIds);
289 compress_with_ghosts(distributedFaceGlobalIds, faceInfo.newNumOwned,
290 distributedFaceIds_, flatFaceGlobalIds_, flatFaceNumOwned_);
293 create_gid_to_flat_map(flatFaceGlobalIds_, gidToFlatFaceId_);
296 std::vector<int>& sourceCellFaceOffsets = source_mesh_flat.get_cell_face_offsets();
297 std::vector<int>& sourceCellToFaceList = source_mesh_flat.get_cell_to_face_list();
299 int sizeCellToFaceList = sourceCellToFaceList.size();
300 int sizeOwnedCellToFaceList = (
301 sourceNumCells == sourceNumOwnedCells ? sizeCellToFaceList :
302 sourceCellFaceOffsets[sourceNumOwnedCells]);
304 comm_info_t cellToFaceInfo;
305 setSendRecvCounts(&cellToFaceInfo, commSize, sendFlags,
306 sizeCellToFaceList, sizeOwnedCellToFaceList);
309 std::vector<int>& sourceCellFaceCounts = source_mesh_flat.get_cell_face_counts();
310 std::vector<int> distributedCellFaceCounts(cellInfo.newNum);
311 sendField(cellInfo, commRank, commSize, MPI_INT, 1,
312 sourceCellFaceCounts, &distributedCellFaceCounts);
315 merge_duplicate_data( distributedCellFaceCounts, distributedCellIds_, sourceCellFaceCounts);
319 std::vector<GID_t> sourceCellToFaceList_ = to_gid(sourceCellToFaceList, sourceFaceGlobalIds);
322 std::vector<bool>& sourceCellToFaceDirs = source_mesh_flat.get_cell_to_face_dirs();
323 int const sourceCellToFaceListSize = sourceCellToFaceList.size();
325 for (
int j = 0; j < sourceCellToFaceListSize; ++j) {
326 int f = sourceCellToFaceList_[j];
327 int dir =
static_cast<int>(sourceCellToFaceDirs[j]);
328 sourceCellToFaceList_[j] = (f << 1) | dir;
331 std::vector<GID_t> distributedCellToFaceList(cellToFaceInfo.newNum);
332 sendField(cellToFaceInfo, commRank, commSize, MPI_LONG_LONG, 1,
333 sourceCellToFaceList_, &distributedCellToFaceList);
336 std::vector<bool> distributedCellToFaceDirs(cellToFaceInfo.newNum);
337 int const distributedCellToFaceListSize = distributedCellToFaceList.size();
339 for (
int j = 0; j < distributedCellToFaceListSize; ++j) {
340 int fd = distributedCellToFaceList[j];
341 distributedCellToFaceList[j] = fd >> 1;
342 distributedCellToFaceDirs[j] = fd & 1;
347 merge_duplicate_lists(distributedCellToFaceList, distributedCellFaceCounts,
348 distributedCellIds_, gidToFlatFaceId_, sourceCellToFaceList);
351 merge_duplicate_lists(distributedCellToFaceDirs, distributedCellFaceCounts,
352 distributedCellIds_, sourceCellToFaceDirs);
355 std::vector<int>& sourceFaceNodeOffsets = source_mesh_flat.get_face_node_offsets();
356 std::vector<int>& sourceFaceToNodeList = source_mesh_flat.get_face_to_node_list();
358 int sizeFaceToNodeList = sourceFaceToNodeList.size();
359 int sizeOwnedFaceToNodeList = (
360 sourceNumFaces == sourceNumOwnedFaces ? sizeFaceToNodeList :
361 sourceFaceNodeOffsets[sourceNumOwnedFaces]);
363 comm_info_t faceToNodeInfo;
364 setSendRecvCounts(&faceToNodeInfo, commSize, sendFlags,
365 sizeFaceToNodeList, sizeOwnedFaceToNodeList);
368 std::vector<int>& sourceFaceNodeCounts = source_mesh_flat.get_face_node_counts();
369 std::vector<int> distributedFaceNodeCounts(faceInfo.newNum);
370 sendField(faceInfo, commRank, commSize, MPI_INT, 1,
371 sourceFaceNodeCounts, &distributedFaceNodeCounts);
374 std::vector<GID_t> distributedFaceToNodeList(faceToNodeInfo.newNum);
375 sendField(faceToNodeInfo, commRank, commSize, MPI_LONG_LONG, 1,
376 to_gid(sourceFaceToNodeList, sourceNodeGlobalIds), &distributedFaceToNodeList);
379 merge_duplicate_data( distributedFaceNodeCounts, distributedFaceIds_, sourceFaceNodeCounts);
382 merge_duplicate_lists(distributedFaceToNodeList, distributedFaceNodeCounts,
383 distributedFaceIds_, gidToFlatNodeId_, sourceFaceToNodeList);
386 merge_duplicate_data(distributedFaceGlobalIds, distributedFaceIds_,sourceFaceGlobalIds);
389 source_mesh_flat.set_num_owned_faces(flatFaceNumOwned_);
396 int nmats = source_state_flat.num_materials();
397 comm_info_t num_mat_cells_info {};
403 std::cout <<
"in distribute, this a multimaterial problem with " << nmats <<
" materials\n";
410 comm_info_t num_mats_info;
411 setSendRecvCounts(&num_mats_info, commSize, sendFlags, nmats, nmats);
414 std::vector<int> material_ids=source_state_flat.get_material_ids();
417 distributedMaterialIds_.resize(num_mats_info.newNum);
420 sendData(commRank, commSize, MPI_INT, 1, 0, num_mats_info.sourceNum, 0,
421 num_mats_info.sendCounts, num_mats_info.recvCounts,
422 material_ids, &distributedMaterialIds_
430 std::vector<int> material_shapes=source_state_flat.get_material_shapes();
433 distributedMaterialShapes_.resize(num_mats_info.newNum);
436 sendData(commRank, commSize, MPI_INT, 1, 0, num_mats_info.sourceNum, 0,
437 num_mats_info.sendCounts, num_mats_info.recvCounts,
438 material_shapes, &distributedMaterialShapes_
446 int nmatcells = source_state_flat.num_material_cells();
449 setSendRecvCounts(&num_mat_cells_info, commSize, sendFlags, nmatcells, nmatcells);
452 std::vector<int> material_cells=source_state_flat.get_material_cells();
455 distributedMaterialCells_.resize(num_mat_cells_info.newNum);
458 sendData(commRank, commSize, to_MPI_Datatype<GID_t>(), 1, 0,
459 num_mat_cells_info.sourceNum, 0, num_mat_cells_info.sendCounts,
460 num_mat_cells_info.recvCounts, to_gid(material_cells, sourceCellGlobalIds),
461 &distributedMaterialCells_
482 std::unordered_map<int,std::vector<GID_t>> material_indices;
485 int running_counter=0;
488 int const distributedMaterialIdsSize = distributedMaterialIds_.size();
490 for (
int i = 0; i < distributedMaterialIdsSize; ++i) {
493 int mat_id = distributedMaterialIds_[i];
496 int nmat_cells = distributedMaterialShapes_[i];
499 std::vector<GID_t>& these_material_cells = material_indices[mat_id];
502 for (
int j=0; j<nmat_cells; ++j){
503 these_material_cells.push_back(distributedMaterialCells_[running_counter++]);
509 source_state_flat.clear_material_cells();
512 for (
auto &kv: material_indices){
518 compress(kv.second, distributedMaterialCellIds_[m]);
521 std::vector<int> flatMaterialCellIds;
522 flatMaterialCellIds.reserve(distributedMaterialCellIds_[m].size());
525 for (
auto id: distributedMaterialCellIds_[m])
526 flatMaterialCellIds.push_back(gidToFlatCellId_[kv.second[
id]]);
529 source_state_flat.mat_add_cells(kv.first, flatMaterialCellIds);
535 for (std::string field_name : source_state_flat.names())
540 std::vector<double> sourceField = source_state_flat.pack(field_name);
543 int sourceFieldStride = source_state_flat.get_field_stride(field_name);
546 if (source_state_flat.get_entity(field_name) == Entity_kind::NODE){
549 }
else if (source_state_flat.field_type(Entity_kind::CELL, field_name) == Wonton::Field_type::MESH_FIELD){
554 info = num_mat_cells_info;
560 std::vector<double> distributedField(sourceFieldStride*info.newNum);
563 sendField(info, commRank, commSize, MPI_DOUBLE, sourceFieldStride,
564 sourceField, &distributedField);
566 std::vector<double> tempDistributedField;
568 if (source_state_flat.get_entity(field_name) == Entity_kind::NODE){
572 merge_duplicate_data(distributedField, distributedNodeIds_, tempDistributedField, sourceFieldStride);
575 source_state_flat.unpack(field_name, tempDistributedField);
577 }
else if (source_state_flat.field_type(Entity_kind::CELL, field_name) == Wonton::Field_type::MESH_FIELD){
581 merge_duplicate_data(distributedField, distributedCellIds_, tempDistributedField, sourceFieldStride);
584 source_state_flat.unpack(field_name, tempDistributedField);
592 source_state_flat.unpack(field_name, distributedField,
593 distributedMaterialIds_, distributedMaterialShapes_,
594 distributedMaterialCellIds_);
602 merge_duplicate_data(distributedCellGlobalIds, distributedCellIds_, sourceCellGlobalIds);
603 merge_duplicate_data(distributedNodeGlobalIds, distributedNodeIds_, sourceNodeGlobalIds);
606 source_mesh_flat.set_num_owned_cells(flatCellNumOwned_);
607 source_mesh_flat.set_num_owned_nodes(flatNodeNumOwned_);
610 source_mesh_flat.finish_init();
618 MPI_Comm comm_ = MPI_COMM_NULL;
625 int flatNodeNumOwned_ = 0;
629 std::vector<GID_t> flatNodeGlobalIds_ {};
630 std::vector<int> distributedNodeIds_ {};
633 std::map<GID_t,int> gidToFlatNodeId_ {};
638 int flatFaceNumOwned_ = 0;
642 std::vector<GID_t> flatFaceGlobalIds_ {};
643 std::vector<int> distributedFaceIds_ {};
646 std::map<GID_t,int> gidToFlatFaceId_ {};
651 int flatCellNumOwned_ = 0;
655 std::vector<GID_t> flatCellGlobalIds_ {};
656 std::vector<int> distributedCellIds_ {};
659 std::map<GID_t,int> gidToFlatCellId_ {};
662 std::vector<int> distributedMaterialIds_ {};
663 std::vector<int> distributedMaterialShapes_ {};
664 std::vector<GID_t> distributedMaterialCells_ {};
668 std::map<int, std::vector<int>> distributedMaterialCellIds_ {};
678 void setSendRecvCounts(comm_info_t* info,
680 const std::vector<bool>& sendFlags,
682 const int sourceNumOwned)
685 info->sourceNum = sourceNum;
686 info->sourceNumOwned = sourceNumOwned;
689 info->sendCounts.resize(commSize);
690 info->recvCounts.resize(commSize);
691 for (
int i=0; i<commSize; i++)
692 info->sendCounts[i] = sendFlags[i] ? info->sourceNum : 0;
693 MPI_Alltoall(&(info->sendCounts[0]), 1, MPI_INT,
694 &(info->recvCounts[0]), 1, MPI_INT, comm_);
697 info->sendOwnedCounts.resize(commSize);
698 info->recvOwnedCounts.resize(commSize);
699 for (
int i=0; i<commSize; i++)
700 info->sendOwnedCounts[i] = sendFlags[i] ? info->sourceNumOwned : 0;
701 MPI_Alltoall(&(info->sendOwnedCounts[0]), 1, MPI_INT,
702 &(info->recvOwnedCounts[0]), 1, MPI_INT, comm_);
705 for (
int i=0; i<commSize; i++)
706 info->newNum += info->recvCounts[i];
707 for (
int i=0; i<commSize; i++)
708 info->newNumOwned += info->recvOwnedCounts[i];
724 void sendField(
const comm_info_t& info,
725 int commRank,
int commSize,
726 MPI_Datatype mpiType,
int stride,
727 const std::vector<T>& sourceData,
728 std::vector<T>* newData)
731 std::vector<int> recvGhostCounts(commSize);
732 std::vector<int> sendGhostCounts(commSize);
734 for (
int i=0; i<commSize; i++)
736 recvGhostCounts[i] = info.recvCounts[i] - info.recvOwnedCounts[i];
737 sendGhostCounts[i] = info.sendCounts[i] - info.sendOwnedCounts[i];
740 sendData(commRank, commSize, mpiType, stride,
741 0, info.sourceNumOwned,
743 info.sendOwnedCounts, info.recvOwnedCounts,
744 sourceData, newData);
745 sendData(commRank, commSize, mpiType, stride,
746 info.sourceNumOwned, info.sourceNum,
748 sendGhostCounts, recvGhostCounts,
749 sourceData, newData);
752 std::cout <<
"Number of values on rank " << commRank <<
": " << (*newData).size() << std::endl;
753 for (
int f=0; f<(*newData).size(); f++)
754 std::cout << (*newData)[f] <<
" ";
755 std::cout << std::endl << std::endl;
776 void sendData(
int commRank,
int commSize,
777 MPI_Datatype mpiType,
int stride,
778 int sourceStart,
int sourceEnd,
780 const std::vector<int>& curSendCounts,
781 const std::vector<int>& curRecvCounts,
782 const std::vector<T>& sourceData,
783 std::vector<T>* newData)
787 int writeOffset = newStart;
789 std::vector<MPI_Request> requests;
790 for (
int i=0; i<commSize; i++)
792 if ((i != commRank) && (curRecvCounts[i] > 0))
795 MPI_Irecv((
void *)&((*newData)[stride*writeOffset]),
796 stride*curRecvCounts[i], mpiType, i,
797 MPI_ANY_TAG, comm_, &request);
798 requests.push_back(request);
800 else if (i == commRank)
802 myOffset = writeOffset;
804 writeOffset += curRecvCounts[i];
809 if (curRecvCounts[commRank] > 0)
810 std::copy(sourceData.begin() + stride*sourceStart,
811 sourceData.begin() + stride*sourceEnd,
812 newData->begin() + stride*myOffset);
815 for (
int i=0; i<commSize; i++)
817 if ((i != commRank) && (curSendCounts[i] > 0))
819 MPI_Send((
void *)&(sourceData[stride*sourceStart]),
820 stride*curSendCounts[i], mpiType, i, 0, comm_);
825 if (!requests.empty())
827 std::vector<MPI_Status> statuses(requests.size());
828 MPI_Waitall(requests.size(), &(requests[0]), &(statuses[0]));
833 template <
class Source_Mesh,
class Target_Mesh>
834 void compute_sendflags(Source_Mesh & source_mesh, Target_Mesh &target_mesh,
835 std::vector<bool> &sendFlags){
838 int commSize, commRank;
839 MPI_Comm_size(comm_, &commSize);
840 MPI_Comm_rank(comm_, &commRank);
842 int dim = source_mesh.space_dimension();
846 int targetNumOwnedCells = target_mesh.num_owned_cells();
847 std::vector<double> targetBoundingBoxes(2*dim*commSize);
848 for (
int i=0; i<2*dim; i+=2)
850 targetBoundingBoxes[2*dim*commRank+i+0] = std::numeric_limits<double>::max();
851 targetBoundingBoxes[2*dim*commRank+i+1] = -std::numeric_limits<double>::max();
853 for (
int c=0; c<targetNumOwnedCells; ++c)
855 std::vector<int> nodes;
856 target_mesh.cell_get_nodes(c, &nodes);
857 int cellNumNodes = nodes.size();
858 for (
int j=0; j<cellNumNodes; ++j)
865 target_mesh.node_get_coordinates(n, &nodeCoord);
866 for (
int k=0; k<dim; ++k)
868 if (nodeCoord[k] < targetBoundingBoxes[2*dim*commRank+2*k])
869 targetBoundingBoxes[2*dim*commRank+2*k] = nodeCoord[k];
870 if (nodeCoord[k] > targetBoundingBoxes[2*dim*commRank+2*k+1])
871 targetBoundingBoxes[2*dim*commRank+2*k+1] = nodeCoord[k];
877 target_mesh.node_get_coordinates(n, &nodeCoord);
878 for (
int k=0; k<dim; ++k)
880 if (nodeCoord[k] < targetBoundingBoxes[2*dim*commRank+2*k])
881 targetBoundingBoxes[2*dim*commRank+2*k] = nodeCoord[k];
882 if (nodeCoord[k] > targetBoundingBoxes[2*dim*commRank+2*k+1])
883 targetBoundingBoxes[2*dim*commRank+2*k+1] = nodeCoord[k];
890 int sourceNumOwnedCells = source_mesh.num_owned_cells();
893 std::vector<double> sourceBoundingBox(2*dim);
894 for (
int i=0; i<2*dim; i+=2)
896 sourceBoundingBox[i+0] = std::numeric_limits<double>::max();
897 sourceBoundingBox[i+1] = -std::numeric_limits<double>::max();
899 for (
int c=0; c<sourceNumOwnedCells; ++c)
901 std::vector<int> nodes;
902 source_mesh.cell_get_nodes(c, &nodes);
903 int cellNumNodes = nodes.size();
904 for (
int j=0; j<cellNumNodes; ++j)
910 source_mesh.node_get_coordinates(n, &nodeCoord);
911 for (
int k=0; k<dim; ++k)
913 if (nodeCoord[k] < sourceBoundingBox[2*k])
914 sourceBoundingBox[2*k] = nodeCoord[k];
915 if (nodeCoord[k] > sourceBoundingBox[2*k+1])
916 sourceBoundingBox[2*k+1] = nodeCoord[k];
922 source_mesh.node_get_coordinates(n, &nodeCoord);
923 for (
int k=0; k<dim; ++k)
925 if (nodeCoord[k] < sourceBoundingBox[2*k])
926 sourceBoundingBox[2*k] = nodeCoord[k];
927 if (nodeCoord[k] > sourceBoundingBox[2*k+1])
928 sourceBoundingBox[2*k+1] = nodeCoord[k];
935 for (
int i=0; i<commSize; i++)
936 MPI_Bcast(&(targetBoundingBoxes[0])+i*2*dim, 2*dim, MPI_DOUBLE, i, comm_);
942 double min2[dim], max2[dim];
943 for (
int k=0; k<dim; ++k)
945 min2[k] = sourceBoundingBox[2*k]+boxOffset;
946 max2[k] = sourceBoundingBox[2*k+1]-boxOffset;
951 for (
int i=0; i<commSize; ++i)
953 double min1[dim], max1[dim];
954 bool sendThis =
true;
955 for (
int k=0; k<dim; ++k)
957 min1[k] = targetBoundingBoxes[2*dim*i+2*k]+boxOffset;
958 max1[k] = targetBoundingBoxes[2*dim*i+2*k+1]-boxOffset;
959 sendThis = sendThis &&
960 ((min1[k] <= min2[k] && min2[k] <= max1[k]) ||
961 (min2[k] <= min1[k] && min1[k] <= max2[k]));
964 sendFlags[i] = sendThis;
981 std::vector<GID_t> to_gid(std::vector<int>
const& in, vector<GID_t>
const& gids)
const {
982 std::vector<GID_t> result;
983 result.reserve(in.size());
984 for (
auto x:in) result.push_back(gids[x]);
1017 void compress(std::vector<GID_t>
const& distributedGlobalIds,
1018 std::vector<int>& distributedIds)
const {
1021 std::map<GID_t,int> uniqueGid;
1023 int const num_distributed_global_ids = distributedGlobalIds.size();
1026 for (
int i = 0 ; i < num_distributed_global_ids; ++i){
1029 GID_t gid = distributedGlobalIds[i];
1032 if (uniqueGid.find(gid) == uniqueGid.end()){
1042 distributedIds.clear();
1043 distributedIds.reserve(uniqueGid.size());
1046 for (
auto const& kv : uniqueGid){
1049 distributedIds.push_back(kv.second);
1106 void compress_with_ghosts(std::vector<GID_t>
const& distributedGlobalIds,
1107 int const distributedNumOwned, std::vector<int>& distributedIds,
1108 std::vector<GID_t>& flatGlobalIds,
int &flatNumOwned)
const {
1111 std::map<GID_t,int> uniqueGid, uniqueGhostGid;
1114 for (
int i=0 ; i<distributedNumOwned ; ++i){
1117 GID_t gid = distributedGlobalIds[i];
1120 if (uniqueGid.find(gid)==uniqueGid.end()){
1131 flatNumOwned = uniqueGid.size();
1134 for (
auto const& kv : uniqueGid){
1137 flatGlobalIds.push_back(kv.first);
1140 distributedIds.push_back(kv.second);
1144 int const num_distributed_global_ids = distributedGlobalIds.size();
1147 for (
int i = distributedNumOwned ; i < num_distributed_global_ids; ++i){
1150 GID_t gid = distributedGlobalIds[i];
1153 if (uniqueGid.find(gid)==uniqueGid.end() &&
1154 uniqueGhostGid.find(gid)==uniqueGhostGid.end()){
1157 uniqueGhostGid[gid]=i;
1164 for (
auto const& kv : uniqueGhostGid){
1167 flatGlobalIds.push_back(kv.first);
1170 distributedIds.push_back(kv.second);
1191 void create_gid_to_flat_map(std::vector<GID_t>
const& flatGlobalIds,
1192 std::map<GID_t,int>& gidToFlat)
const {
1194 int const num_flat_global_ids = flatGlobalIds.size();
1195 for (
int i = 0; i < num_flat_global_ids; ++i)
1196 gidToFlat[flatGlobalIds[i]]=i;
1215 void merge_duplicate_data(std::vector<T>
const& in, std::vector<int>
const& distributedIds,
1216 std::vector<T>& result,
int stride = 1){
1220 result.reserve(distributedIds.size()*stride);
1223 for (
auto id: distributedIds)
1224 for (
int d=0; d<stride; ++d)
1225 result.push_back(in[stride*
id + d]);
1246 void merge_duplicate_lists(std::vector<GID_t>
const& in, std::vector<int>
const& counts,
1247 std::vector<int>
const& distributedIds, std::map<GID_t,int>
const& gidToFlatId,
1248 std::vector<int>& result){
1251 std::vector<int> offsets(counts.size());
1254 std::partial_sum(counts.begin(), counts.end()-1, offsets.begin()+1);
1258 result.reserve(distributedIds.size()*(dim_+1));
1261 for (
int distributedId : distributedIds){
1264 int const thisOffset = offsets[distributedId];
1267 for (
int j=0; j<counts[distributedId]; ++j)
1270 result.push_back(gidToFlatId.at(in[thisOffset+j]));
1294 void merge_duplicate_lists(std::vector<T>
const& in, std::vector<int>
const & counts,
1295 std::vector<int>
const& distributedIds, std::vector<T>& result){
1298 std::vector<int> offsets(counts.size());
1301 std::partial_sum(counts.begin(), counts.end()-1, offsets.begin()+1);
1305 result.reserve(distributedIds.size()*(dim_+1));
1308 for (
int distributedId : distributedIds){
1311 int const thisOffset = offsets[distributedId];
1314 for (
int j=0; j<counts[distributedId]; ++j)
1317 result.push_back(in[thisOffset+j]);
1328 #endif // PORTAGE_ENABLE_MPI 1330 #endif // MPI_Bounding_Boxes_H_
double const epsilon
Numerical tolerance.
Definition: weight.h:34
Definition: coredriver.h:42