gem5  v20.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
physical.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2014, 2018 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "mem/physical.hh"
39 
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <sys/user.h>
44 #include <unistd.h>
45 #include <zlib.h>
46 
47 #include <cerrno>
48 #include <climits>
49 #include <cstdio>
50 #include <iostream>
51 #include <string>
52 
53 #include "base/trace.hh"
54 #include "debug/AddrRanges.hh"
55 #include "debug/Checkpoint.hh"
56 #include "mem/abstract_mem.hh"
57 
64 #if defined(__APPLE__) || defined(__FreeBSD__)
65 #ifndef MAP_NORESERVE
66 #define MAP_NORESERVE 0
67 #endif
68 #endif
69 
70 using namespace std;
71 
72 PhysicalMemory::PhysicalMemory(const string& _name,
73  const vector<AbstractMemory*>& _memories,
74  bool mmap_using_noreserve) :
75  _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
76 {
77  if (mmap_using_noreserve)
78  warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
79 
80  // add the memories from the system to the address map as
81  // appropriate
82  for (const auto& m : _memories) {
83  // only add the memory if it is part of the global address map
84  if (m->isInAddrMap()) {
85  memories.push_back(m);
86 
87  // calculate the total size once and for all
88  size += m->size();
89 
90  // add the range to our interval tree and make sure it does not
91  // intersect an existing range
92  fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
93  "Memory address range for %s is overlapping\n",
94  m->name());
95  } else {
96  // this type of memory is used e.g. as reference memory by
97  // Ruby, and they also needs a backing store, but should
98  // not be part of the global address map
99  DPRINTF(AddrRanges,
100  "Skipping memory %s that is not in global address map\n",
101  m->name());
102 
103  // sanity check
104  fatal_if(m->getAddrRange().interleaved(),
105  "Memory %s that is not in the global address map cannot "
106  "be interleaved\n", m->name());
107 
108  // simply do it independently, also note that this kind of
109  // memories are allowed to overlap in the logic address
110  // map
111  vector<AbstractMemory*> unmapped_mems{m};
112  createBackingStore(m->getAddrRange(), unmapped_mems,
113  m->isConfReported(), m->isInAddrMap(),
114  m->isKvmMap());
115  }
116  }
117 
118  // iterate over the increasing addresses and chunks of contiguous
119  // space to be mapped to backing store, create it and inform the
120  // memories
121  vector<AddrRange> intlv_ranges;
122  vector<AbstractMemory*> curr_memories;
123  for (const auto& r : addrMap) {
124  // simply skip past all memories that are null and hence do
125  // not need any backing store
126  if (!r.second->isNull()) {
127  // if the range is interleaved then save it for now
128  if (r.first.interleaved()) {
129  // if we already got interleaved ranges that are not
130  // part of the same range, then first do a merge
131  // before we add the new one
132  if (!intlv_ranges.empty() &&
133  !intlv_ranges.back().mergesWith(r.first)) {
134  AddrRange merged_range(intlv_ranges);
135 
136  AbstractMemory *f = curr_memories.front();
137  for (const auto& c : curr_memories)
138  if (f->isConfReported() != c->isConfReported() ||
139  f->isInAddrMap() != c->isInAddrMap() ||
140  f->isKvmMap() != c->isKvmMap())
141  fatal("Inconsistent flags in an interleaved "
142  "range\n");
143 
144  createBackingStore(merged_range, curr_memories,
145  f->isConfReported(), f->isInAddrMap(),
146  f->isKvmMap());
147 
148  intlv_ranges.clear();
149  curr_memories.clear();
150  }
151  intlv_ranges.push_back(r.first);
152  curr_memories.push_back(r.second);
153  } else {
154  vector<AbstractMemory*> single_memory{r.second};
155  createBackingStore(r.first, single_memory,
156  r.second->isConfReported(),
157  r.second->isInAddrMap(),
158  r.second->isKvmMap());
159  }
160  }
161  }
162 
163  // if there is still interleaved ranges waiting to be merged, go
164  // ahead and do it
165  if (!intlv_ranges.empty()) {
166  AddrRange merged_range(intlv_ranges);
167 
168  AbstractMemory *f = curr_memories.front();
169  for (const auto& c : curr_memories)
170  if (f->isConfReported() != c->isConfReported() ||
171  f->isInAddrMap() != c->isInAddrMap() ||
172  f->isKvmMap() != c->isKvmMap())
173  fatal("Inconsistent flags in an interleaved "
174  "range\n");
175 
176  createBackingStore(merged_range, curr_memories,
177  f->isConfReported(), f->isInAddrMap(),
178  f->isKvmMap());
179  }
180 }
181 
182 void
184  const vector<AbstractMemory*>& _memories,
185  bool conf_table_reported,
186  bool in_addr_map, bool kvm_map)
187 {
188  panic_if(range.interleaved(),
189  "Cannot create backing store for interleaved range %s\n",
190  range.to_string());
191 
192  // perform the actual mmap
193  DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
194  range.to_string(), range.size());
195  int map_flags = MAP_ANON | MAP_PRIVATE;
196 
197  // to be able to simulate very large memories, the user can opt to
198  // pass noreserve to mmap
199  if (mmapUsingNoReserve) {
200  map_flags |= MAP_NORESERVE;
201  }
202 
203  uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
204  PROT_READ | PROT_WRITE,
205  map_flags, -1, 0);
206 
207  if (pmem == (uint8_t*) MAP_FAILED) {
208  perror("mmap");
209  fatal("Could not mmap %d bytes for range %s!\n", range.size(),
210  range.to_string());
211  }
212 
213  // remember this backing store so we can checkpoint it and unmap
214  // it appropriately
215  backingStore.emplace_back(range, pmem,
216  conf_table_reported, in_addr_map, kvm_map);
217 
218  // point the memories to their backing store
219  for (const auto& m : _memories) {
220  DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
221  m->name());
222  m->setBackingStore(pmem);
223  }
224 }
225 
227 {
228  // unmap the backing store
229  for (auto& s : backingStore)
230  munmap((char*)s.pmem, s.range.size());
231 }
232 
233 bool
235 {
236  return addrMap.contains(addr) != addrMap.end();
237 }
238 
241 {
242  // this could be done once in the constructor, but since it is unlikely to
243  // be called more than once the iteration should not be a problem
244  AddrRangeList ranges;
245  vector<AddrRange> intlv_ranges;
246  for (const auto& r : addrMap) {
247  if (r.second->isConfReported()) {
248  // if the range is interleaved then save it for now
249  if (r.first.interleaved()) {
250  // if we already got interleaved ranges that are not
251  // part of the same range, then first do a merge
252  // before we add the new one
253  if (!intlv_ranges.empty() &&
254  !intlv_ranges.back().mergesWith(r.first)) {
255  ranges.push_back(AddrRange(intlv_ranges));
256  intlv_ranges.clear();
257  }
258  intlv_ranges.push_back(r.first);
259  } else {
260  // keep the current range
261  ranges.push_back(r.first);
262  }
263  }
264  }
265 
266  // if there is still interleaved ranges waiting to be merged,
267  // go ahead and do it
268  if (!intlv_ranges.empty()) {
269  ranges.push_back(AddrRange(intlv_ranges));
270  }
271 
272  return ranges;
273 }
274 
275 void
277 {
278  assert(pkt->isRequest());
279  const auto& m = addrMap.contains(pkt->getAddrRange());
280  assert(m != addrMap.end());
281  m->second->access(pkt);
282 }
283 
284 void
286 {
287  assert(pkt->isRequest());
288  const auto& m = addrMap.contains(pkt->getAddrRange());
289  assert(m != addrMap.end());
290  m->second->functionalAccess(pkt);
291 }
292 
293 void
295 {
296  // serialize all the locked addresses and their context ids
297  vector<Addr> lal_addr;
298  vector<ContextID> lal_cid;
299 
300  for (auto& m : memories) {
301  const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
302  for (const auto& l : locked_addrs) {
303  lal_addr.push_back(l.addr);
304  lal_cid.push_back(l.contextId);
305  }
306  }
307 
308  SERIALIZE_CONTAINER(lal_addr);
309  SERIALIZE_CONTAINER(lal_cid);
310 
311  // serialize the backing stores
312  unsigned int nbr_of_stores = backingStore.size();
313  SERIALIZE_SCALAR(nbr_of_stores);
314 
315  unsigned int store_id = 0;
316  // store each backing store memory segment in a file
317  for (auto& s : backingStore) {
318  ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
319  serializeStore(cp, store_id++, s.range, s.pmem);
320  }
321 }
322 
323 void
325  AddrRange range, uint8_t* pmem) const
326 {
327  // we cannot use the address range for the name as the
328  // memories that are not part of the address map can overlap
329  string filename = name() + ".store" + to_string(store_id) + ".pmem";
330  long range_size = range.size();
331 
332  DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
333  filename, range_size);
334 
335  SERIALIZE_SCALAR(store_id);
336  SERIALIZE_SCALAR(filename);
337  SERIALIZE_SCALAR(range_size);
338 
339  // write memory file
340  string filepath = CheckpointIn::dir() + "/" + filename.c_str();
341  gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
342  if (compressed_mem == NULL)
343  fatal("Can't open physical memory checkpoint file '%s'\n",
344  filename);
345 
346  uint64_t pass_size = 0;
347 
348  // gzwrite fails if (int)len < 0 (gzwrite returns int)
349  for (uint64_t written = 0; written < range.size();
350  written += pass_size) {
351  pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
352  (uint64_t)INT_MAX : (range.size() - written);
353 
354  if (gzwrite(compressed_mem, pmem + written,
355  (unsigned int) pass_size) != (int) pass_size) {
356  fatal("Write failed on physical memory checkpoint file '%s'\n",
357  filename);
358  }
359  }
360 
361  // close the compressed stream and check that the exit status
362  // is zero
363  if (gzclose(compressed_mem))
364  fatal("Close failed on physical memory checkpoint file '%s'\n",
365  filename);
366 
367 }
368 
369 void
371 {
372  // unserialize the locked addresses and map them to the
373  // appropriate memory controller
374  vector<Addr> lal_addr;
375  vector<ContextID> lal_cid;
376  UNSERIALIZE_CONTAINER(lal_addr);
377  UNSERIALIZE_CONTAINER(lal_cid);
378  for (size_t i = 0; i < lal_addr.size(); ++i) {
379  const auto& m = addrMap.contains(lal_addr[i]);
380  m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
381  }
382 
383  // unserialize the backing stores
384  unsigned int nbr_of_stores;
385  UNSERIALIZE_SCALAR(nbr_of_stores);
386 
387  for (unsigned int i = 0; i < nbr_of_stores; ++i) {
388  ScopedCheckpointSection sec(cp, csprintf("store%d", i));
389  unserializeStore(cp);
390  }
391 
392 }
393 
394 void
396 {
397  const uint32_t chunk_size = 16384;
398 
399  unsigned int store_id;
400  UNSERIALIZE_SCALAR(store_id);
401 
402  string filename;
403  UNSERIALIZE_SCALAR(filename);
404  string filepath = cp.getCptDir() + "/" + filename;
405 
406  // mmap memoryfile
407  gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
408  if (compressed_mem == NULL)
409  fatal("Can't open physical memory checkpoint file '%s'", filename);
410 
411  // we've already got the actual backing store mapped
412  uint8_t* pmem = backingStore[store_id].pmem;
413  AddrRange range = backingStore[store_id].range;
414 
415  long range_size;
416  UNSERIALIZE_SCALAR(range_size);
417 
418  DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
419  filename, range_size);
420 
421  if (range_size != range.size())
422  fatal("Memory range size has changed! Saw %lld, expected %lld\n",
423  range_size, range.size());
424 
425  uint64_t curr_size = 0;
426  long* temp_page = new long[chunk_size];
427  long* pmem_current;
428  uint32_t bytes_read;
429  while (curr_size < range.size()) {
430  bytes_read = gzread(compressed_mem, temp_page, chunk_size);
431  if (bytes_read == 0)
432  break;
433 
434  assert(bytes_read % sizeof(long) == 0);
435 
436  for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
437  // Only copy bytes that are non-zero, so we don't give
438  // the VM system hell
439  if (*(temp_page + x) != 0) {
440  pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
441  *pmem_current = *(temp_page + x);
442  }
443  }
444  curr_size += bytes_read;
445  }
446 
447  delete[] temp_page;
448 
449  if (gzclose(compressed_mem))
450  fatal("Close failed on physical memory checkpoint file '%s'\n",
451  filename);
452 }
#define DPRINTF(x,...)
Definition: trace.hh:225
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
Definition: physical.cc:294
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:829
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
const std::string name() const
Definition: page_table.hh:103
Bitfield< 7 > i
Bitfield< 0 > m
Locked address class that represents a physical address and a context id.
Definition: abstract_mem.hh:62
ip6_addr_t addr
Definition: inet.hh:330
bool isConfReported() const
Should this memory be passed to the kernel and part of the OS physical memory layout.
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
Definition: cprintf.cc:40
STL vector class.
Definition: stl.hh:37
std::unordered_map< int, std::stack< Addr > > locked_addrs
Definition: locked_mem.cc:9
Bitfield< 6 > f
Bitfield< 3 > x
Definition: pagetable.hh:69
AbstractMemory declaration.
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:68
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:770
bool isRequest() const
Definition: packet.hh:525
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:158
void createBackingStore(AddrRange range, const std::vector< AbstractMemory *> &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
Definition: physical.cc:183
Bitfield< 4 > s
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map...
Definition: physical.cc:234
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table...
Definition: physical.cc:240
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition: packet.cc:225
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:199
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
Definition: physical.cc:370
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: physical.cc:276
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:763
static std::string dir()
Get the current checkout directory name.
Definition: serialize.cc:263
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
Definition: physical.cc:324
~PhysicalMemory()
Unmap all the backing store we have used.
Definition: physical.cc:226
Bitfield< 29 > c
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:821
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:246
std::ostream CheckpointOut
Definition: serialize.hh:63
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: physical.cc:285
const std::string getCptDir()
Definition: serialize.hh:85
An abstract memory represents a contiguous block of physical memory, with an associated address range...
bool isInAddrMap() const
Some memories are used as shadow memories or should for other reasons not be part of the global addre...
Addr size() const
Get the size of the address range.
Definition: addr_range.hh:280
bool isKvmMap() const
When shadow memories are in use, KVM may want to make one or the other, but cannot map both into the ...
#define warn(...)
Definition: logging.hh:208
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
Definition: physical.cc:395
std::string to_string() const
Get a string representation of the range.
Definition: addr_range.hh:305
Scoped checkpoint section helper class.
Definition: serialize.hh:186
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:181
Bitfield< 5 > l
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60

Generated on Thu May 28 2020 16:21:34 for gem5 by doxygen 1.8.13