gem5  v20.1.0.0
physical.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2014, 2018 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "mem/physical.hh"
39 
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <sys/user.h>
44 #include <unistd.h>
45 #include <zlib.h>
46 
47 #include <cerrno>
48 #include <climits>
49 #include <cstdio>
50 #include <iostream>
51 #include <string>
52 
53 #include "base/trace.hh"
54 #include "debug/AddrRanges.hh"
55 #include "debug/Checkpoint.hh"
56 #include "mem/abstract_mem.hh"
57 
64 #if defined(__APPLE__) || defined(__FreeBSD__)
65 #ifndef MAP_NORESERVE
66 #define MAP_NORESERVE 0
67 #endif
68 #endif
69 
70 using namespace std;
71 
72 PhysicalMemory::PhysicalMemory(const string& _name,
73  const vector<AbstractMemory*>& _memories,
74  bool mmap_using_noreserve,
75  const std::string& shared_backstore) :
76  _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve),
77  sharedBackstore(shared_backstore)
78 {
79  if (mmap_using_noreserve)
80  warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
81 
82  // add the memories from the system to the address map as
83  // appropriate
84  for (const auto& m : _memories) {
85  // only add the memory if it is part of the global address map
86  if (m->isInAddrMap()) {
87  memories.push_back(m);
88 
89  // calculate the total size once and for all
90  size += m->size();
91 
92  // add the range to our interval tree and make sure it does not
93  // intersect an existing range
94  fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
95  "Memory address range for %s is overlapping\n",
96  m->name());
97  } else {
98  // this type of memory is used e.g. as reference memory by
99  // Ruby, and they also needs a backing store, but should
100  // not be part of the global address map
101  DPRINTF(AddrRanges,
102  "Skipping memory %s that is not in global address map\n",
103  m->name());
104 
105  // sanity check
106  fatal_if(m->getAddrRange().interleaved(),
107  "Memory %s that is not in the global address map cannot "
108  "be interleaved\n", m->name());
109 
110  // simply do it independently, also note that this kind of
111  // memories are allowed to overlap in the logic address
112  // map
113  vector<AbstractMemory*> unmapped_mems{m};
114  createBackingStore(m->getAddrRange(), unmapped_mems,
115  m->isConfReported(), m->isInAddrMap(),
116  m->isKvmMap());
117  }
118  }
119 
120  // iterate over the increasing addresses and chunks of contiguous
121  // space to be mapped to backing store, create it and inform the
122  // memories
123  vector<AddrRange> intlv_ranges;
124  vector<AbstractMemory*> curr_memories;
125  for (const auto& r : addrMap) {
126  // simply skip past all memories that are null and hence do
127  // not need any backing store
128  if (!r.second->isNull()) {
129  // if the range is interleaved then save it for now
130  if (r.first.interleaved()) {
131  // if we already got interleaved ranges that are not
132  // part of the same range, then first do a merge
133  // before we add the new one
134  if (!intlv_ranges.empty() &&
135  !intlv_ranges.back().mergesWith(r.first)) {
136  AddrRange merged_range(intlv_ranges);
137 
138  AbstractMemory *f = curr_memories.front();
139  for (const auto& c : curr_memories)
140  if (f->isConfReported() != c->isConfReported() ||
141  f->isInAddrMap() != c->isInAddrMap() ||
142  f->isKvmMap() != c->isKvmMap())
143  fatal("Inconsistent flags in an interleaved "
144  "range\n");
145 
146  createBackingStore(merged_range, curr_memories,
147  f->isConfReported(), f->isInAddrMap(),
148  f->isKvmMap());
149 
150  intlv_ranges.clear();
151  curr_memories.clear();
152  }
153  intlv_ranges.push_back(r.first);
154  curr_memories.push_back(r.second);
155  } else {
156  vector<AbstractMemory*> single_memory{r.second};
157  createBackingStore(r.first, single_memory,
158  r.second->isConfReported(),
159  r.second->isInAddrMap(),
160  r.second->isKvmMap());
161  }
162  }
163  }
164 
165  // if there is still interleaved ranges waiting to be merged, go
166  // ahead and do it
167  if (!intlv_ranges.empty()) {
168  AddrRange merged_range(intlv_ranges);
169 
170  AbstractMemory *f = curr_memories.front();
171  for (const auto& c : curr_memories)
172  if (f->isConfReported() != c->isConfReported() ||
173  f->isInAddrMap() != c->isInAddrMap() ||
174  f->isKvmMap() != c->isKvmMap())
175  fatal("Inconsistent flags in an interleaved "
176  "range\n");
177 
178  createBackingStore(merged_range, curr_memories,
179  f->isConfReported(), f->isInAddrMap(),
180  f->isKvmMap());
181  }
182 }
183 
184 void
186  const vector<AbstractMemory*>& _memories,
187  bool conf_table_reported,
188  bool in_addr_map, bool kvm_map)
189 {
190  panic_if(range.interleaved(),
191  "Cannot create backing store for interleaved range %s\n",
192  range.to_string());
193 
194  // perform the actual mmap
195  DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
196  range.to_string(), range.size());
197 
198  int shm_fd;
199  int map_flags;
200 
201  if (sharedBackstore.empty()) {
202  shm_fd = -1;
203  map_flags = MAP_ANON | MAP_PRIVATE;
204  } else {
205  DPRINTF(AddrRanges, "Sharing backing store as %s\n",
206  sharedBackstore.c_str());
207  shm_fd = shm_open(sharedBackstore.c_str(), O_CREAT | O_RDWR, 0666);
208  if (shm_fd == -1)
209  panic("Shared memory failed");
210  if (ftruncate(shm_fd, range.size()))
211  panic("Setting size of shared memory failed");
212  map_flags = MAP_SHARED;
213  }
214 
215  // to be able to simulate very large memories, the user can opt to
216  // pass noreserve to mmap
217  if (mmapUsingNoReserve) {
218  map_flags |= MAP_NORESERVE;
219  }
220 
221  uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
222  PROT_READ | PROT_WRITE,
223  map_flags, shm_fd, 0);
224 
225  if (pmem == (uint8_t*) MAP_FAILED) {
226  perror("mmap");
227  fatal("Could not mmap %d bytes for range %s!\n", range.size(),
228  range.to_string());
229  }
230 
231  // remember this backing store so we can checkpoint it and unmap
232  // it appropriately
233  backingStore.emplace_back(range, pmem,
234  conf_table_reported, in_addr_map, kvm_map);
235 
236  // point the memories to their backing store
237  for (const auto& m : _memories) {
238  DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
239  m->name());
240  m->setBackingStore(pmem);
241  }
242 }
243 
245 {
246  // unmap the backing store
247  for (auto& s : backingStore)
248  munmap((char*)s.pmem, s.range.size());
249 }
250 
251 bool
253 {
254  return addrMap.contains(addr) != addrMap.end();
255 }
256 
259 {
260  // this could be done once in the constructor, but since it is unlikely to
261  // be called more than once the iteration should not be a problem
262  AddrRangeList ranges;
263  vector<AddrRange> intlv_ranges;
264  for (const auto& r : addrMap) {
265  if (r.second->isConfReported()) {
266  // if the range is interleaved then save it for now
267  if (r.first.interleaved()) {
268  // if we already got interleaved ranges that are not
269  // part of the same range, then first do a merge
270  // before we add the new one
271  if (!intlv_ranges.empty() &&
272  !intlv_ranges.back().mergesWith(r.first)) {
273  ranges.push_back(AddrRange(intlv_ranges));
274  intlv_ranges.clear();
275  }
276  intlv_ranges.push_back(r.first);
277  } else {
278  // keep the current range
279  ranges.push_back(r.first);
280  }
281  }
282  }
283 
284  // if there is still interleaved ranges waiting to be merged,
285  // go ahead and do it
286  if (!intlv_ranges.empty()) {
287  ranges.push_back(AddrRange(intlv_ranges));
288  }
289 
290  return ranges;
291 }
292 
293 void
295 {
296  assert(pkt->isRequest());
297  const auto& m = addrMap.contains(pkt->getAddrRange());
298  assert(m != addrMap.end());
299  m->second->access(pkt);
300 }
301 
302 void
304 {
305  assert(pkt->isRequest());
306  const auto& m = addrMap.contains(pkt->getAddrRange());
307  assert(m != addrMap.end());
308  m->second->functionalAccess(pkt);
309 }
310 
311 void
313 {
314  // serialize all the locked addresses and their context ids
315  vector<Addr> lal_addr;
316  vector<ContextID> lal_cid;
317 
318  for (auto& m : memories) {
319  const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
320  for (const auto& l : locked_addrs) {
321  lal_addr.push_back(l.addr);
322  lal_cid.push_back(l.contextId);
323  }
324  }
325 
326  SERIALIZE_CONTAINER(lal_addr);
327  SERIALIZE_CONTAINER(lal_cid);
328 
329  // serialize the backing stores
330  unsigned int nbr_of_stores = backingStore.size();
331  SERIALIZE_SCALAR(nbr_of_stores);
332 
333  unsigned int store_id = 0;
334  // store each backing store memory segment in a file
335  for (auto& s : backingStore) {
336  ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
337  serializeStore(cp, store_id++, s.range, s.pmem);
338  }
339 }
340 
341 void
343  AddrRange range, uint8_t* pmem) const
344 {
345  // we cannot use the address range for the name as the
346  // memories that are not part of the address map can overlap
347  string filename = name() + ".store" + to_string(store_id) + ".pmem";
348  long range_size = range.size();
349 
350  DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
351  filename, range_size);
352 
353  SERIALIZE_SCALAR(store_id);
354  SERIALIZE_SCALAR(filename);
355  SERIALIZE_SCALAR(range_size);
356 
357  // write memory file
358  string filepath = CheckpointIn::dir() + "/" + filename.c_str();
359  gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
360  if (compressed_mem == NULL)
361  fatal("Can't open physical memory checkpoint file '%s'\n",
362  filename);
363 
364  uint64_t pass_size = 0;
365 
366  // gzwrite fails if (int)len < 0 (gzwrite returns int)
367  for (uint64_t written = 0; written < range.size();
368  written += pass_size) {
369  pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
370  (uint64_t)INT_MAX : (range.size() - written);
371 
372  if (gzwrite(compressed_mem, pmem + written,
373  (unsigned int) pass_size) != (int) pass_size) {
374  fatal("Write failed on physical memory checkpoint file '%s'\n",
375  filename);
376  }
377  }
378 
379  // close the compressed stream and check that the exit status
380  // is zero
381  if (gzclose(compressed_mem))
382  fatal("Close failed on physical memory checkpoint file '%s'\n",
383  filename);
384 
385 }
386 
387 void
389 {
390  // unserialize the locked addresses and map them to the
391  // appropriate memory controller
392  vector<Addr> lal_addr;
393  vector<ContextID> lal_cid;
394  UNSERIALIZE_CONTAINER(lal_addr);
395  UNSERIALIZE_CONTAINER(lal_cid);
396  for (size_t i = 0; i < lal_addr.size(); ++i) {
397  const auto& m = addrMap.contains(lal_addr[i]);
398  m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
399  }
400 
401  // unserialize the backing stores
402  unsigned int nbr_of_stores;
403  UNSERIALIZE_SCALAR(nbr_of_stores);
404 
405  for (unsigned int i = 0; i < nbr_of_stores; ++i) {
406  ScopedCheckpointSection sec(cp, csprintf("store%d", i));
408  }
409 
410 }
411 
412 void
414 {
415  const uint32_t chunk_size = 16384;
416 
417  unsigned int store_id;
418  UNSERIALIZE_SCALAR(store_id);
419 
420  string filename;
421  UNSERIALIZE_SCALAR(filename);
422  string filepath = cp.getCptDir() + "/" + filename;
423 
424  // mmap memoryfile
425  gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
426  if (compressed_mem == NULL)
427  fatal("Can't open physical memory checkpoint file '%s'", filename);
428 
429  // we've already got the actual backing store mapped
430  uint8_t* pmem = backingStore[store_id].pmem;
431  AddrRange range = backingStore[store_id].range;
432 
433  long range_size;
434  UNSERIALIZE_SCALAR(range_size);
435 
436  DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
437  filename, range_size);
438 
439  if (range_size != range.size())
440  fatal("Memory range size has changed! Saw %lld, expected %lld\n",
441  range_size, range.size());
442 
443  uint64_t curr_size = 0;
444  long* temp_page = new long[chunk_size];
445  long* pmem_current;
446  uint32_t bytes_read;
447  while (curr_size < range.size()) {
448  bytes_read = gzread(compressed_mem, temp_page, chunk_size);
449  if (bytes_read == 0)
450  break;
451 
452  assert(bytes_read % sizeof(long) == 0);
453 
454  for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
455  // Only copy bytes that are non-zero, so we don't give
456  // the VM system hell
457  if (*(temp_page + x) != 0) {
458  pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
459  *pmem_current = *(temp_page + x);
460  }
461  }
462  curr_size += bytes_read;
463  }
464 
465  delete[] temp_page;
466 
467  if (gzclose(compressed_mem))
468  fatal("Close failed on physical memory checkpoint file '%s'\n",
469  filename);
470 }
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
warn
#define warn(...)
Definition: logging.hh:239
Packet::getAddrRange
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition: packet.cc:241
UNSERIALIZE_SCALAR
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:797
abstract_mem.hh
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
UNSERIALIZE_CONTAINER
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:856
AddrRange::interleaved
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:257
AddrRangeMap::end
const_iterator end() const
Definition: addr_range_map.hh:214
LockedAddr
Locked address class that represents a physical address and a context id.
Definition: abstract_mem.hh:62
sc_dt::to_string
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:91
PhysicalMemory::unserializeStore
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
Definition: physical.cc:413
PhysicalMemory::sharedBackstore
const std::string sharedBackstore
Definition: physical.hh:130
std::vector< AbstractMemory * >
Packet::isRequest
bool isRequest() const
Definition: packet.hh:559
RiscvISA::locked_addrs
std::unordered_map< int, std::stack< Addr > > locked_addrs
Definition: locked_mem.cc:9
PhysicalMemory::PhysicalMemory
PhysicalMemory(const PhysicalMemory &)
AbstractMemory
An abstract memory represents a contiguous block of physical memory, with an associated address range...
Definition: abstract_mem.hh:104
PhysicalMemory::name
const std::string name() const
Return the name for debugging and for creation of sections for checkpointing.
Definition: physical.hh:175
PhysicalMemory::createBackingStore
void createBackingStore(AddrRange range, const std::vector< AbstractMemory * > &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
Definition: physical.cc:185
cp
Definition: cprintf.cc:40
PhysicalMemory::serializeStore
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
Definition: physical.cc:342
AddrRange
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:68
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
PhysicalMemory::backingStore
std::vector< BackingStoreEntry > backingStore
Definition: physical.hh:134
PhysicalMemory::mmapUsingNoReserve
const bool mmapUsingNoReserve
Definition: physical.hh:128
MipsISA::r
r
Definition: pra_constants.hh:95
PhysicalMemory::functionalAccess
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: physical.cc:303
RiscvISA::x
Bitfield< 3 > x
Definition: pagetable.hh:69
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Serializable::ScopedCheckpointSection
Definition: serialize.hh:175
SERIALIZE_SCALAR
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:790
CheckpointIn::dir
static std::string dir()
Get the current checkout directory name.
Definition: serialize.cc:263
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
PhysicalMemory::memories
std::vector< AbstractMemory * > memories
Definition: physical.hh:122
PhysicalMemory::isMemAddr
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map.
Definition: physical.cc:252
AddrRange::to_string
std::string to_string() const
Get a string representation of the range.
Definition: addr_range.hh:330
std
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
PhysicalMemory::addrMap
AddrRangeMap< AbstractMemory *, 1 > addrMap
Definition: physical.hh:119
SERIALIZE_CONTAINER
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:848
AddrRangeMap::contains
const_iterator contains(const AddrRange &r) const
Find entry that contains the given address range.
Definition: addr_range_map.hh:87
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
physical.hh
addr
ip6_addr_t addr
Definition: inet.hh:423
PhysicalMemory::serialize
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
Definition: physical.cc:312
CheckpointOut
std::ostream CheckpointOut
Definition: serialize.hh:63
ArmISA::c
Bitfield< 29 > c
Definition: miscregs_types.hh:50
PhysicalMemory::access
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: physical.cc:294
trace.hh
PhysicalMemory::getConfAddrRanges
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table.
Definition: physical.cc:258
std::list< AddrRange >
ArmISA::s
Bitfield< 4 > s
Definition: miscregs_types.hh:556
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:219
CheckpointIn
Definition: serialize.hh:67
MipsISA::l
Bitfield< 5 > l
Definition: pra_constants.hh:320
AddrRange::size
Addr size() const
Get the size of the address range.
Definition: addr_range.hh:297
PhysicalMemory::unserialize
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
Definition: physical.cc:388
PhysicalMemory::~PhysicalMemory
~PhysicalMemory()
Unmap all the backing store we have used.
Definition: physical.cc:244
csprintf
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:158
ArmISA::m
Bitfield< 0 > m
Definition: miscregs_types.hh:389
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
ArmISA::f
Bitfield< 6 > f
Definition: miscregs_types.hh:64

Generated on Wed Sep 30 2020 14:02:13 for gem5 by doxygen 1.8.17