Interface Documentation
Version: invalid
mapper.hh
Go to the documentation of this file.
1 /*
2  @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@
3  /@@///// /@@ @@////@@ @@////// /@@
4  /@@ /@@ @@@@@ @@ // /@@ /@@
5  /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@
6  /@@//// /@@/@@@@@@@/@@ ////////@@/@@
7  /@@ /@@/@@//// //@@ @@ /@@/@@
8  /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@
9  // /// ////// ////// //////// //
10 
11  Copyright (c) 2016, Triad National Security, LLC
12  All rights reserved.
13  */
14 #pragma once
15 
18 #include <flecsi-config.h>
19 
20 #if !defined(__FLECSI_PRIVATE__)
21 #error Do not include this file directly!
22 #endif
23 
24 #include "../backend.hh"
25 
26 #if !defined(FLECSI_ENABLE_LEGION)
27 #error FLECSI_ENABLE_LEGION not defined! This file depends on Legion!
28 #endif
29 
30 #include <legion.h>
31 #include <legion/legion_mapping.h>
32 #include <mappers/default_mapper.h>
33 
34 namespace flecsi {
35 
36 inline log::devel_tag legion_mapper_tag("legion_mapper");
37 
38 namespace run {
39 
40 /*
41  The mpi_mapper_t - is a custom mapper that handles mpi-legion
42  interoperability in FLeCSI
43 
44  @ingroup legion-runtime
45 */
46 
47 class mpi_mapper_t : public Legion::Mapping::DefaultMapper
48 {
49 public:
59  mpi_mapper_t(Legion::Machine machine,
60  Legion::Runtime * _runtime,
61  Legion::Processor local)
62  : Legion::Mapping::DefaultMapper(_runtime->get_mapper_runtime(),
63  machine,
64  local,
65  "default"),
66  machine(machine) {
67  using legion_machine = Legion::Machine;
68  using legion_proc = Legion::Processor;
69 
70  legion_machine::ProcessorQuery pq =
71  legion_machine::ProcessorQuery(machine).same_address_space_as(local);
72  for(legion_machine::ProcessorQuery::iterator pqi = pq.begin();
73  pqi != pq.end();
74  ++pqi) {
75  legion_proc p = *pqi;
76  if(p.kind() == legion_proc::LOC_PROC)
77  local_cpus.push_back(p);
78  else if(p.kind() == legion_proc::TOC_PROC)
79  local_gpus.push_back(p);
80  else
81  continue;
82 
83  std::map<Realm::Memory::Kind, Realm::Memory> & mem_map = proc_mem_map[p];
84 
85  legion_machine::MemoryQuery mq =
86  legion_machine::MemoryQuery(machine).has_affinity_to(p);
87  for(legion_machine::MemoryQuery::iterator mqi = mq.begin();
88  mqi != mq.end();
89  ++mqi) {
90  Realm::Memory m = *mqi;
91  mem_map[m.kind()] = m;
92 
93  if(m.kind() == Realm::Memory::SYSTEM_MEM)
94  local_sysmem = m;
95  } // end for
96  } // end for
97 
98  {
99  log::devel_guard guard(legion_mapper_tag);
100  flog_devel(info) << "Mapper constructor" << std::endl
101  << "\tlocal: " << local << std::endl
102  << "\tcpus: " << local_cpus.size() << std::endl
103  << "\tgpus: " << local_gpus.size() << std::endl
104  << "\tsysmem: " << local_sysmem << std::endl;
105  } // scope
106  } // end mpi_mapper_t
107 
111  virtual ~mpi_mapper_t(){};
112 
113  Legion::LayoutConstraintID default_policy_select_layout_constraints(
114  Legion::Mapping::MapperContext ctx,
115  Realm::Memory,
116  const Legion::RegionRequirement &,
117  Legion::Mapping::DefaultMapper::MappingKind,
118  bool /* constraint */,
119  bool & force_new_instances) {
120  // We always set force_new_instances to false since we are
121  // deciding to optimize for minimizing memory usage instead
122  // of avoiding Write-After-Read (WAR) dependences
123  force_new_instances = false;
124  std::vector<Legion::DimensionKind> ordering;
125  ordering.push_back(Legion::DimensionKind::DIM_Y);
126  ordering.push_back(Legion::DimensionKind::DIM_X);
127  ordering.push_back(Legion::DimensionKind::DIM_F); // SOA
128  Legion::OrderingConstraint ordering_constraint(
129  ordering, true /*contiguous*/);
130  Legion::LayoutConstraintSet layout_constraint;
131  layout_constraint.add_constraint(ordering_constraint);
132 
133  // Do the registration
134  Legion::LayoutConstraintID result =
135  runtime->register_layout(ctx, layout_constraint);
136  return result;
137  }
138 
152  virtual void map_task(const Legion::Mapping::MapperContext ctx,
153  const Legion::Task & task,
154  const Legion::Mapping::Mapper::MapTaskInput & input,
155  Legion::Mapping::Mapper::MapTaskOutput & output) {
156  DefaultMapper::map_task(ctx, task, input, output);
157 
158  if((task.tag == FLECSI_MAPPER_COMPACTED_STORAGE) &&
159  (task.regions.size() > 0)) {
160 
161  Legion::Memory target_mem =
162  DefaultMapper::default_policy_select_target_memory(
163  ctx, task.target_proc, task.regions[0]);
164 
165  // check if we get region requirements for "exclusive, shared and ghost"
166  // logical regions for each data handle
167 
168  // Filling out "layout_constraints" with the defaults
169  Legion::LayoutConstraintSet layout_constraints;
170  // No specialization
171  layout_constraints.add_constraint(Legion::SpecializedConstraint());
172  layout_constraints.add_constraint(Legion::OrderingConstraint());
173  // Constrained for the target memory kind
174  layout_constraints.add_constraint(
175  Legion::MemoryConstraint(target_mem.kind()));
176  // Have all the field for the instance available
177  std::vector<Legion::FieldID> all_fields;
178  layout_constraints.add_constraint(Legion::FieldConstraint());
179 
180  // FIXME:: add colocation_constraints
181  Legion::ColocationConstraint colocation_constraints;
182 
183  for(size_t indx = 0; indx < task.regions.size(); indx++) {
184 
185  Legion::Mapping::PhysicalInstance result;
186  std::vector<Legion::LogicalRegion> regions;
187  bool created;
188 
189  if(task.regions[indx].tag == FLECSI_MAPPER_EXCLUSIVE_LR) {
190 
191  flog_assert((task.regions.size() >= (indx + 2)),
192  "ERROR:: wrong number of regions passed to the task wirth \
193  the tag = FLECSI_MAPPER_COMPACTED_STORAGE");
194 
195  flog_assert((!task.regions[indx].region.exists()),
196  "ERROR:: pasing not existing REGION to the mapper");
197  regions.push_back(task.regions[indx].region);
198  regions.push_back(task.regions[indx + 1].region);
199  regions.push_back(task.regions[indx + 2].region);
200 
201  flog_assert(runtime->find_or_create_physical_instance(ctx,
202  target_mem,
203  layout_constraints,
204  regions,
205  result,
206  created,
207  true /*acquire*/,
208  GC_NEVER_PRIORITY),
209  "FLeCSI mapper failed to allocate instance");
210 
211  for(size_t j = 0; j < 3; j++) {
212  output.chosen_instances[indx + j].push_back(result);
213  } // for
214 
215  indx = indx + 2;
216  }
217  else {
218 
219  regions.push_back(task.regions[indx].region);
220 
221  flog_assert(runtime->find_or_create_physical_instance(ctx,
222  target_mem,
223  layout_constraints,
224  regions,
225  result,
226  created,
227  true /*acquire*/,
228  GC_NEVER_PRIORITY),
229  "FLeCSI mapper failed to allocate instance");
230 
231  output.chosen_instances[indx].push_back(result);
232 
233  } // end if
234  } // end for
235 
236  } // end if
237 
238  } // map_task
239 
240  virtual void slice_task(const Legion::Mapping::MapperContext ctx,
241  const Legion::Task & task,
242  const Legion::Mapping::Mapper::SliceTaskInput & input,
243  Legion::Mapping::Mapper::SliceTaskOutput & output) {
244 
245  switch(task.tag) {
246  case FLECSI_MAPPER_SUBRANK_LAUNCH:
247  // expect a 1-D index domain
248  assert(input.domain.get_dim() == 1);
249  // send the whole domain to our local processor
250  output.slices.resize(1);
251  output.slices[0].domain = input.domain;
252  output.slices[0].proc = task.target_proc;
253  break;
254 
255  case FLECSI_MAPPER_FORCE_RANK_MATCH: {
256  // expect a 1-D index domain - each point goes to the corresponding node
257  assert(input.domain.get_dim() == 1);
258  LegionRuntime::Arrays::Rect<1> r = input.domain.get_rect<1>();
259 
260  // go through all the CPU processors and find a representative for each
261  // node (i.e. address space)
262  std::map<int, Legion::Processor> targets;
263 
264  Legion::Machine::ProcessorQuery pq =
265  Legion::Machine::ProcessorQuery(machine).only_kind(
266  Legion::Processor::LOC_PROC);
267  for(Legion::Machine::ProcessorQuery::iterator it = pq.begin();
268  it != pq.end();
269  ++it) {
270  Legion::Processor p = *it;
271  int a = p.address_space();
272  if(targets.count(a) == 0)
273  targets[a] = p;
274  }
275 
276  output.slices.resize(1);
277  for(int a = r.lo[0]; a <= r.hi[0]; a++) {
278  assert(targets.count(a) > 0);
279  output.slices[0].domain = // Legion::Domain::from_rect<1>(
280  Legion::Rect<1>(a, a);
281  output.slices[0].proc = targets[a];
282  }
283  break;
284  }
285 
286  default:
287  DefaultMapper::slice_task(ctx, task, input, output);
288  }
289  }
290 
291 private:
292  std::map<Legion::Processor, std::map<Realm::Memory::Kind, Realm::Memory>>
293  proc_mem_map;
294  Realm::Memory local_sysmem;
295  Realm::Machine machine;
296 };
297 
305 inline void
306 mapper_registration(Legion::Machine machine,
307  Legion::HighLevelRuntime * rt,
308  const std::set<Legion::Processor> & local_procs) {
309  for(std::set<Legion::Processor>::const_iterator it = local_procs.begin();
310  it != local_procs.end();
311  it++) {
312  mpi_mapper_t * mapper = new mpi_mapper_t(machine, rt, *it);
313  rt->replace_default_mapper(mapper, *it);
314  }
315 } // mapper registration
316 
317 } // namespace run
318 } // namespace flecsi
virtual void map_task(const Legion::Mapping::MapperContext ctx, const Legion::Task &task, const Legion::Mapping::Mapper::MapTaskInput &input, Legion::Mapping::Mapper::MapTaskOutput &output)
Definition: mapper.hh:152
mpi_mapper_t(Legion::Machine machine, Legion::Runtime *_runtime, Legion::Processor local)
Definition: mapper.hh:59
#define flog_assert(test, message)
Definition: flog.hh:411
void mapper_registration(Legion::Machine machine, Legion::HighLevelRuntime *rt, const std::set< Legion::Processor > &local_procs)
Definition: mapper.hh:306
Definition: flog.hh:82
virtual ~mpi_mapper_t()
Definition: mapper.hh:111
Definition: mapper.hh:47
Definition: control.hh:31