gem5  v21.1.0.2
Topology.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Advanced Micro Devices, Inc.
3  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met: redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer;
10  * redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution;
13  * neither the name of the copyright holders nor the names of its
14  * contributors may be used to endorse or promote products derived from
15  * this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
31 
32 #include <cassert>
33 
34 #include "base/trace.hh"
35 #include "debug/RubyNetwork.hh"
40 
41 namespace gem5
42 {
43 
44 namespace ruby
45 {
46 
47 const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
48 
49 // Note: In this file, we use the first 2*m_nodes SwitchIDs to
50 // represent the input and output endpoint links. These really are
51 // not 'switches', as they will not have a Switch object allocated for
52 // them. The first m_nodes SwitchIDs are the links into the network,
53 // the second m_nodes set of SwitchIDs represent the the output queues
54 // of the network.
55 
56 Topology::Topology(uint32_t num_nodes, uint32_t num_routers,
57  uint32_t num_vnets,
58  const std::vector<BasicExtLink *> &ext_links,
59  const std::vector<BasicIntLink *> &int_links)
60  : m_nodes(MachineType_base_number(MachineType_NUM)),
61  m_number_of_switches(num_routers), m_vnets(num_vnets),
62  m_ext_link_vector(ext_links), m_int_link_vector(int_links)
63 {
64  // Total nodes/controllers in network
65  assert(m_nodes > 1);
66 
67  // analyze both the internal and external links, create data structures.
68  // The python created external links are bi-directional,
69  // and the python created internal links are uni-directional.
70  // The networks and topology utilize uni-directional links.
71  // Thus each external link is converted to two calls to addLink,
72  // one for each direction.
73  //
74  // External Links
75  for (std::vector<BasicExtLink*>::const_iterator i = ext_links.begin();
76  i != ext_links.end(); ++i) {
77  BasicExtLink *ext_link = (*i);
78  AbstractController *abs_cntrl = ext_link->params().ext_node;
79  BasicRouter *router = ext_link->params().int_node;
80 
81  int machine_base_idx = MachineType_base_number(abs_cntrl->getType());
82  int ext_idx1 = machine_base_idx + abs_cntrl->getVersion();
83  int ext_idx2 = ext_idx1 + m_nodes;
84  int int_idx = router->params().router_id + 2*m_nodes;
85 
86  // create the internal uni-directional links in both directions
87  // ext to int
88  addLink(ext_idx1, int_idx, ext_link);
89  // int to ext
90  addLink(int_idx, ext_idx2, ext_link);
91  }
92 
93  // Internal Links
94  for (std::vector<BasicIntLink*>::const_iterator i = int_links.begin();
95  i != int_links.end(); ++i) {
96  BasicIntLink *int_link = (*i);
97  BasicRouter *router_src = int_link->params().src_node;
98  BasicRouter *router_dst = int_link->params().dst_node;
99 
100  PortDirection src_outport = int_link->params().src_outport;
101  PortDirection dst_inport = int_link->params().dst_inport;
102 
103  // Store the IntLink pointers for later
104  m_int_link_vector.push_back(int_link);
105 
106  int src = router_src->params().router_id + 2*m_nodes;
107  int dst = router_dst->params().router_id + 2*m_nodes;
108 
109  // create the internal uni-directional link from src to dst
110  addLink(src, dst, int_link, src_outport, dst_inport);
111  }
112 }
113 
114 void
116 {
117  // Find maximum switchID
118  SwitchID max_switch_id = 0;
119  for (LinkMap::const_iterator i = m_link_map.begin();
120  i != m_link_map.end(); ++i) {
121  std::pair<SwitchID, SwitchID> src_dest = (*i).first;
122  max_switch_id = std::max(max_switch_id, src_dest.first);
123  max_switch_id = std::max(max_switch_id, src_dest.second);
124  }
125 
126  // Initialize weight, latency, and inter switched vectors
127  int num_switches = max_switch_id+1;
128  Matrix topology_weights(m_vnets,
129  std::vector<std::vector<int>>(num_switches,
130  std::vector<int>(num_switches, INFINITE_LATENCY)));
131  Matrix component_latencies(num_switches,
132  std::vector<std::vector<int>>(num_switches,
133  std::vector<int>(m_vnets, -1)));
134  Matrix component_inter_switches(num_switches,
135  std::vector<std::vector<int>>(num_switches,
137 
138  // Set identity weights to zero
139  for (int i = 0; i < topology_weights[0].size(); i++) {
140  for (int v = 0; v < m_vnets; v++) {
141  topology_weights[v][i][i] = 0;
142  }
143  }
144 
145  // Fill in the topology weights and bandwidth multipliers
146  for (auto link_group : m_link_map) {
147  std::pair<int, int> src_dest = link_group.first;
148  std::vector<bool> vnet_done(m_vnets, 0);
149  int src = src_dest.first;
150  int dst = src_dest.second;
151 
152  // Iterate over all links for this source and destination
153  std::vector<LinkEntry> link_entries = link_group.second;
154  for (int l = 0; l < link_entries.size(); l++) {
155  BasicLink* link = link_entries[l].link;
156  if (link->mVnets.size() == 0) {
157  for (int v = 0; v < m_vnets; v++) {
158  // Two links connecting same src and destination
159  // cannot carry same vnets.
160  fatal_if(vnet_done[v], "Two links connecting same src"
161  " and destination cannot support same vnets");
162 
163  component_latencies[src][dst][v] = link->m_latency;
164  topology_weights[v][src][dst] = link->m_weight;
165  vnet_done[v] = true;
166  }
167  } else {
168  for (int v = 0; v < link->mVnets.size(); v++) {
169  int vnet = link->mVnets[v];
170  fatal_if(vnet >= m_vnets, "Not enough virtual networks "
171  "(setting latency and weight for vnet %d)", vnet);
172  // Two links connecting same src and destination
173  // cannot carry same vnets.
174  fatal_if(vnet_done[vnet], "Two links connecting same src"
175  " and destination cannot support same vnets");
176 
177  component_latencies[src][dst][vnet] = link->m_latency;
178  topology_weights[vnet][src][dst] = link->m_weight;
179  vnet_done[vnet] = true;
180  }
181  }
182  }
183  }
184 
185  // Walk topology and hookup the links
186  Matrix dist = shortest_path(topology_weights, component_latencies,
187  component_inter_switches);
188 
189  for (int i = 0; i < topology_weights[0].size(); i++) {
190  for (int j = 0; j < topology_weights[0][i].size(); j++) {
191  std::vector<NetDest> routingMap;
192  routingMap.resize(m_vnets);
193 
194  // Not all sources and destinations are connected
195  // by direct links. We only construct the links
196  // which have been configured in topology.
197  bool realLink = false;
198 
199  for (int v = 0; v < m_vnets; v++) {
200  int weight = topology_weights[v][i][j];
201  if (weight > 0 && weight != INFINITE_LATENCY) {
202  realLink = true;
203  routingMap[v] =
204  shortest_path_to_node(i, j, topology_weights, dist, v);
205  }
206  }
207  // Make one link for each set of vnets between
208  // a given source and destination. We do not
209  // want to create one link for each vnet.
210  if (realLink) {
211  makeLink(net, i, j, routingMap);
212  }
213  }
214  }
215 }
216 
217 void
219  PortDirection src_outport_dirn,
220  PortDirection dst_inport_dirn)
221 {
222  assert(src <= m_number_of_switches+m_nodes+m_nodes);
223  assert(dest <= m_number_of_switches+m_nodes+m_nodes);
224 
225  std::pair<int, int> src_dest_pair;
226  src_dest_pair.first = src;
227  src_dest_pair.second = dest;
228  LinkEntry link_entry;
229 
230  link_entry.link = link;
231  link_entry.src_outport_dirn = src_outport_dirn;
232  link_entry.dst_inport_dirn = dst_inport_dirn;
233 
234  auto lit = m_link_map.find(src_dest_pair);
235  if (lit != m_link_map.end()) {
236  // HeteroGarnet allows multiple links between
237  // same source-destination pair supporting
238  // different vnets. If there is a link already
239  // between a given pair of source and destination
240  // add this new link to it.
241  lit->second.push_back(link_entry);
242  } else {
244  links.push_back(link_entry);
245  m_link_map[src_dest_pair] = links;
246  }
247 }
248 
249 void
251  std::vector<NetDest>& routing_table_entry)
252 {
253  // Make sure we're not trying to connect two end-point nodes
254  // directly together
255  assert(src >= 2 * m_nodes || dest >= 2 * m_nodes);
256 
257  std::pair<int, int> src_dest;
258  LinkEntry link_entry;
259 
260  if (src < m_nodes) {
261  src_dest.first = src;
262  src_dest.second = dest;
263  std::vector<LinkEntry> links = m_link_map[src_dest];
264  for (int l = 0; l < links.size(); l++) {
265  link_entry = links[l];
266  std::vector<NetDest> linkRoute;
267  linkRoute.resize(m_vnets);
268  BasicLink *link = link_entry.link;
269  if (link->mVnets.size() == 0) {
270  net->makeExtInLink(src, dest - (2 * m_nodes), link,
271  routing_table_entry);
272  } else {
273  for (int v = 0; v< link->mVnets.size(); v++) {
274  int vnet = link->mVnets[v];
275  linkRoute[vnet] = routing_table_entry[vnet];
276  }
277  net->makeExtInLink(src, dest - (2 * m_nodes), link,
278  linkRoute);
279  }
280  }
281  } else if (dest < 2*m_nodes) {
282  assert(dest >= m_nodes);
283  NodeID node = dest - m_nodes;
284  src_dest.first = src;
285  src_dest.second = dest;
286  std::vector<LinkEntry> links = m_link_map[src_dest];
287  for (int l = 0; l < links.size(); l++) {
288  link_entry = links[l];
289  std::vector<NetDest> linkRoute;
290  linkRoute.resize(m_vnets);
291  BasicLink *link = link_entry.link;
292  if (link->mVnets.size() == 0) {
293  net->makeExtOutLink(src - (2 * m_nodes), node, link,
294  routing_table_entry);
295  } else {
296  for (int v = 0; v< link->mVnets.size(); v++) {
297  int vnet = link->mVnets[v];
298  linkRoute[vnet] = routing_table_entry[vnet];
299  }
300  net->makeExtOutLink(src - (2 * m_nodes), node, link,
301  linkRoute);
302  }
303  }
304  } else {
305  assert((src >= 2 * m_nodes) && (dest >= 2 * m_nodes));
306  src_dest.first = src;
307  src_dest.second = dest;
308  std::vector<LinkEntry> links = m_link_map[src_dest];
309  for (int l = 0; l < links.size(); l++) {
310  link_entry = links[l];
311  std::vector<NetDest> linkRoute;
312  linkRoute.resize(m_vnets);
313  BasicLink *link = link_entry.link;
314  if (link->mVnets.size() == 0) {
315  net->makeInternalLink(src - (2 * m_nodes),
316  dest - (2 * m_nodes), link, routing_table_entry,
317  link_entry.src_outport_dirn,
318  link_entry.dst_inport_dirn);
319  } else {
320  for (int v = 0; v< link->mVnets.size(); v++) {
321  int vnet = link->mVnets[v];
322  linkRoute[vnet] = routing_table_entry[vnet];
323  }
324  net->makeInternalLink(src - (2 * m_nodes),
325  dest - (2 * m_nodes), link, linkRoute,
326  link_entry.src_outport_dirn,
327  link_entry.dst_inport_dirn);
328  }
329  }
330  }
331 }
332 
333 // The following all-pairs shortest path algorithm is based on the
334 // discussion from Cormen et al., Chapter 26.1.
335 void
336 Topology::extend_shortest_path(Matrix &current_dist, Matrix &latencies,
337  Matrix &inter_switches)
338 {
339  int nodes = current_dist[0].size();
340 
341  // We find the shortest path for each vnet for a given pair of
342  // source and destinations. This is done simply by traversing via
343  // all other nodes and finding the minimum distance.
344  for (int v = 0; v < m_vnets; v++) {
345  // There is a different topology for each vnet. Here we try to
346  // build a topology by finding the minimum number of intermediate
347  // switches needed to reach the destination
348  bool change = true;
349  while (change) {
350  change = false;
351  for (int i = 0; i < nodes; i++) {
352  for (int j = 0; j < nodes; j++) {
353  // We follow an iterative process to build the shortest
354  // path tree:
355  // 1. Start from the direct connection (if there is one,
356  // otherwise assume a hypothetical infinite weight link).
357  // 2. Then we iterate through all other nodes considering
358  // new potential intermediate switches. If we find any
359  // lesser weight combination, we set(update) that as the
360  // new weight between the source and destination.
361  // 3. Repeat for all pairs of nodes.
362  // 4. Go to step 1 if there was any new update done in
363  // Step 2.
364  int minimum = current_dist[v][i][j];
365  int previous_minimum = minimum;
366  int intermediate_switch = -1;
367  for (int k = 0; k < nodes; k++) {
368  minimum = std::min(minimum,
369  current_dist[v][i][k] + current_dist[v][k][j]);
370  if (previous_minimum != minimum) {
371  intermediate_switch = k;
372  inter_switches[i][j][v] =
373  inter_switches[i][k][v] +
374  inter_switches[k][j][v] + 1;
375  }
376  previous_minimum = minimum;
377  }
378  if (current_dist[v][i][j] != minimum) {
379  change = true;
380  current_dist[v][i][j] = minimum;
381  assert(intermediate_switch >= 0);
382  assert(intermediate_switch < latencies[i].size());
383  latencies[i][j][v] =
384  latencies[i][intermediate_switch][v] +
385  latencies[intermediate_switch][j][v];
386  }
387  }
388  }
389  }
390  }
391 }
392 
393 Matrix
394 Topology::shortest_path(const Matrix &weights, Matrix &latencies,
395  Matrix &inter_switches)
396 {
397  Matrix dist = weights;
398  extend_shortest_path(dist, latencies, inter_switches);
399  return dist;
400 }
401 
402 bool
404  SwitchID final, const Matrix &weights,
405  const Matrix &dist, int vnet)
406 {
407  return weights[vnet][src][next] + dist[vnet][next][final] ==
408  dist[vnet][src][final];
409 }
410 
411 NetDest
413  const Matrix &weights, const Matrix &dist,
414  int vnet)
415 {
416  NetDest result;
417  int d = 0;
418  int machines;
419  int max_machines;
420 
421  machines = MachineType_NUM;
422  max_machines = MachineType_base_number(MachineType_NUM);
423 
424  for (int m = 0; m < machines; m++) {
425  for (NodeID i = 0; i < MachineType_base_count((MachineType)m); i++) {
426  // we use "d+max_machines" below since the "destination"
427  // switches for the machines are numbered
428  // [MachineType_base_number(MachineType_NUM)...
429  // 2*MachineType_base_number(MachineType_NUM)-1] for the
430  // component network
431  if (link_is_shortest_path_to_node(src, next, d + max_machines,
432  weights, dist, vnet)) {
433  MachineID mach = {(MachineType)m, i};
434  result.add(mach);
435  }
436  d++;
437  }
438  }
439 
440  DPRINTF(RubyNetwork, "Returning shortest path\n"
441  "(src-(2*max_machines)): %d, (next-(2*max_machines)): %d, "
442  "src: %d, next: %d, vnet:%d result: %s\n",
443  (src-(2*max_machines)), (next-(2*max_machines)),
444  src, next, vnet, result);
445 
446  return result;
447 }
448 
449 } // namespace ruby
450 } // namespace gem5
gem5::ruby::AbstractController::getVersion
NodeID getVersion() const
Definition: AbstractController.hh:89
gem5::ruby::Network::makeExtOutLink
virtual void makeExtOutLink(SwitchID src, NodeID dest, BasicLink *link, std::vector< NetDest > &routing_table_entry)=0
gem5::ruby::LinkEntry::link
BasicLink * link
Definition: Topology.hh:72
gem5::ruby::PortDirection
std::string PortDirection
Definition: Topology.hh:68
gem5::ruby::LinkEntry
Definition: Topology.hh:70
gem5::ruby::BasicRouter
Definition: BasicRouter.hh:45
gem5::ruby::AbstractController::getType
MachineType getType() const
Definition: AbstractController.hh:90
gem5::ruby::Network::makeInternalLink
virtual void makeInternalLink(SwitchID src, SwitchID dest, BasicLink *link, std::vector< NetDest > &routing_table_entry, PortDirection src_outport, PortDirection dst_inport)=0
gem5::ruby::Topology::m_int_link_vector
std::vector< BasicIntLink * > m_int_link_vector
Definition: Topology.hh:118
gem5::ruby::Topology::addLink
void addLink(SwitchID src, SwitchID dest, BasicLink *link, PortDirection src_outport_dirn="", PortDirection dest_inport_dirn="")
Definition: Topology.cc:218
gem5::ruby::Network
Definition: Network.hh:82
gem5::ruby::Topology::createLinks
void createLinks(Network *net)
Definition: Topology.cc:115
AbstractController.hh
gem5::ruby::Topology::Topology
Topology(uint32_t num_nodes, uint32_t num_routers, uint32_t num_vnets, const std::vector< BasicExtLink * > &ext_links, const std::vector< BasicIntLink * > &int_links)
Definition: Topology.cc:56
std::vector
STL vector class.
Definition: stl.hh:37
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::ruby::INFINITE_LATENCY
const int INFINITE_LATENCY
Definition: Topology.cc:47
gem5::ruby::AbstractController
Definition: AbstractController.hh:82
gem5::statistics::dist
const FlagsType dist
Print the distribution.
Definition: info.hh:66
gem5::ruby::Topology::shortest_path_to_node
NetDest shortest_path_to_node(SwitchID src, SwitchID next, const Matrix &weights, const Matrix &dist, int vnet)
Definition: Topology.cc:412
gem5::ruby::Topology::extend_shortest_path
void extend_shortest_path(Matrix &current_dist, Matrix &latencies, Matrix &inter_switches)
Definition: Topology.cc:336
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::SimObject::params
const Params & params() const
Definition: sim_object.hh:176
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ArmISA::d
Bitfield< 9 > d
Definition: misc_types.hh:63
gem5::ruby::Network::makeExtInLink
virtual void makeExtInLink(NodeID src, SwitchID dest, BasicLink *link, std::vector< NetDest > &routing_table_entry)=0
gem5::ruby::Topology::m_nodes
const uint32_t m_nodes
Definition: Topology.hh:113
gem5::ArmISA::v
Bitfield< 28 > v
Definition: misc_types.hh:54
gem5::MipsISA::l
Bitfield< 5 > l
Definition: pra_constants.hh:323
gem5::ruby::LinkEntry::src_outport_dirn
PortDirection src_outport_dirn
Definition: Topology.hh:73
gem5::ruby::LinkEntry::dst_inport_dirn
PortDirection dst_inport_dirn
Definition: Topology.hh:74
gem5::ruby::SwitchID
unsigned int SwitchID
Definition: TypeDefines.hh:41
std::pair
STL pair class.
Definition: stl.hh:58
gem5::ruby::Topology::makeLink
void makeLink(Network *net, SwitchID src, SwitchID dest, std::vector< NetDest > &routing_table_entry)
Definition: Topology.cc:250
gem5::ruby::NetDest
Definition: NetDest.hh:45
Topology.hh
Network.hh
gem5::ArmISA::m
Bitfield< 0 > m
Definition: misc_types.hh:394
gem5::ruby::Topology::m_number_of_switches
const uint32_t m_number_of_switches
Definition: Topology.hh:114
gem5::ruby::Topology::m_vnets
int m_vnets
Definition: Topology.hh:115
gem5::ruby::Topology::shortest_path
Matrix shortest_path(const Matrix &weights, Matrix &latencies, Matrix &inter_switches)
Definition: Topology.cc:394
gem5::ruby::NodeID
unsigned int NodeID
Definition: TypeDefines.hh:40
gem5::ruby::NetDest::add
void add(MachineID newElement)
Definition: NetDest.cc:45
gem5::ruby::Topology::link_is_shortest_path_to_node
bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final, const Matrix &weights, const Matrix &dist, int vnet)
Definition: Topology.cc:403
gem5::MipsISA::k
Bitfield< 23 > k
Definition: dt_constants.hh:81
trace.hh
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:225
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::ruby::MachineID
Definition: MachineID.hh:56
NetDest.hh
gem5::ruby::Topology::m_link_map
LinkMap m_link_map
Definition: Topology.hh:120

Generated on Tue Sep 21 2021 12:25:41 for gem5 by doxygen 1.8.17