gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
physical.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2014, 2018 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Andreas Hansson
38  */
39 
40 #include "mem/physical.hh"
41 
42 #include <fcntl.h>
43 #include <sys/mman.h>
44 #include <sys/types.h>
45 #include <sys/user.h>
46 #include <unistd.h>
47 #include <zlib.h>
48 
49 #include <cerrno>
50 #include <climits>
51 #include <cstdio>
52 #include <iostream>
53 #include <string>
54 
55 #include "base/trace.hh"
56 #include "debug/AddrRanges.hh"
57 #include "debug/Checkpoint.hh"
58 #include "mem/abstract_mem.hh"
59 
66 #if defined(__APPLE__) || defined(__FreeBSD__)
67 #ifndef MAP_NORESERVE
68 #define MAP_NORESERVE 0
69 #endif
70 #endif
71 
72 using namespace std;
73 
74 PhysicalMemory::PhysicalMemory(const string& _name,
75  const vector<AbstractMemory*>& _memories,
76  bool mmap_using_noreserve) :
77  _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
78 {
79  if (mmap_using_noreserve)
80  warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
81 
82  // add the memories from the system to the address map as
83  // appropriate
84  for (const auto& m : _memories) {
85  // only add the memory if it is part of the global address map
86  if (m->isInAddrMap()) {
87  memories.push_back(m);
88 
89  // calculate the total size once and for all
90  size += m->size();
91 
92  // add the range to our interval tree and make sure it does not
93  // intersect an existing range
94  fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
95  "Memory address range for %s is overlapping\n",
96  m->name());
97  } else {
98  // this type of memory is used e.g. as reference memory by
99  // Ruby, and they also needs a backing store, but should
100  // not be part of the global address map
101  DPRINTF(AddrRanges,
102  "Skipping memory %s that is not in global address map\n",
103  m->name());
104 
105  // sanity check
106  fatal_if(m->getAddrRange().interleaved(),
107  "Memory %s that is not in the global address map cannot "
108  "be interleaved\n", m->name());
109 
110  // simply do it independently, also note that this kind of
111  // memories are allowed to overlap in the logic address
112  // map
113  vector<AbstractMemory*> unmapped_mems{m};
114  createBackingStore(m->getAddrRange(), unmapped_mems,
115  m->isConfReported(), m->isInAddrMap(),
116  m->isKvmMap());
117  }
118  }
119 
120  // iterate over the increasing addresses and chunks of contiguous
121  // space to be mapped to backing store, create it and inform the
122  // memories
123  vector<AddrRange> intlv_ranges;
124  vector<AbstractMemory*> curr_memories;
125  for (const auto& r : addrMap) {
126  // simply skip past all memories that are null and hence do
127  // not need any backing store
128  if (!r.second->isNull()) {
129  // if the range is interleaved then save it for now
130  if (r.first.interleaved()) {
131  // if we already got interleaved ranges that are not
132  // part of the same range, then first do a merge
133  // before we add the new one
134  if (!intlv_ranges.empty() &&
135  !intlv_ranges.back().mergesWith(r.first)) {
136  AddrRange merged_range(intlv_ranges);
137 
138  AbstractMemory *f = curr_memories.front();
139  for (const auto& c : curr_memories)
140  if (f->isConfReported() != c->isConfReported() ||
141  f->isInAddrMap() != c->isInAddrMap() ||
142  f->isKvmMap() != c->isKvmMap())
143  fatal("Inconsistent flags in an interleaved "
144  "range\n");
145 
146  createBackingStore(merged_range, curr_memories,
147  f->isConfReported(), f->isInAddrMap(),
148  f->isKvmMap());
149 
150  intlv_ranges.clear();
151  curr_memories.clear();
152  }
153  intlv_ranges.push_back(r.first);
154  curr_memories.push_back(r.second);
155  } else {
156  vector<AbstractMemory*> single_memory{r.second};
157  createBackingStore(r.first, single_memory,
158  r.second->isConfReported(),
159  r.second->isInAddrMap(),
160  r.second->isKvmMap());
161  }
162  }
163  }
164 
165  // if there is still interleaved ranges waiting to be merged, go
166  // ahead and do it
167  if (!intlv_ranges.empty()) {
168  AddrRange merged_range(intlv_ranges);
169 
170  AbstractMemory *f = curr_memories.front();
171  for (const auto& c : curr_memories)
172  if (f->isConfReported() != c->isConfReported() ||
173  f->isInAddrMap() != c->isInAddrMap() ||
174  f->isKvmMap() != c->isKvmMap())
175  fatal("Inconsistent flags in an interleaved "
176  "range\n");
177 
178  createBackingStore(merged_range, curr_memories,
179  f->isConfReported(), f->isInAddrMap(),
180  f->isKvmMap());
181  }
182 }
183 
184 void
186  const vector<AbstractMemory*>& _memories,
187  bool conf_table_reported,
188  bool in_addr_map, bool kvm_map)
189 {
190  panic_if(range.interleaved(),
191  "Cannot create backing store for interleaved range %s\n",
192  range.to_string());
193 
194  // perform the actual mmap
195  DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
196  range.to_string(), range.size());
197  int map_flags = MAP_ANON | MAP_PRIVATE;
198 
199  // to be able to simulate very large memories, the user can opt to
200  // pass noreserve to mmap
201  if (mmapUsingNoReserve) {
202  map_flags |= MAP_NORESERVE;
203  }
204 
205  uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
206  PROT_READ | PROT_WRITE,
207  map_flags, -1, 0);
208 
209  if (pmem == (uint8_t*) MAP_FAILED) {
210  perror("mmap");
211  fatal("Could not mmap %d bytes for range %s!\n", range.size(),
212  range.to_string());
213  }
214 
215  // remember this backing store so we can checkpoint it and unmap
216  // it appropriately
217  backingStore.emplace_back(range, pmem,
218  conf_table_reported, in_addr_map, kvm_map);
219 
220  // point the memories to their backing store
221  for (const auto& m : _memories) {
222  DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
223  m->name());
224  m->setBackingStore(pmem);
225  }
226 }
227 
229 {
230  // unmap the backing store
231  for (auto& s : backingStore)
232  munmap((char*)s.pmem, s.range.size());
233 }
234 
235 bool
237 {
238  return addrMap.contains(addr) != addrMap.end();
239 }
240 
243 {
244  // this could be done once in the constructor, but since it is unlikely to
245  // be called more than once the iteration should not be a problem
246  AddrRangeList ranges;
247  vector<AddrRange> intlv_ranges;
248  for (const auto& r : addrMap) {
249  if (r.second->isConfReported()) {
250  // if the range is interleaved then save it for now
251  if (r.first.interleaved()) {
252  // if we already got interleaved ranges that are not
253  // part of the same range, then first do a merge
254  // before we add the new one
255  if (!intlv_ranges.empty() &&
256  !intlv_ranges.back().mergesWith(r.first)) {
257  ranges.push_back(AddrRange(intlv_ranges));
258  intlv_ranges.clear();
259  }
260  intlv_ranges.push_back(r.first);
261  } else {
262  // keep the current range
263  ranges.push_back(r.first);
264  }
265  }
266  }
267 
268  // if there is still interleaved ranges waiting to be merged,
269  // go ahead and do it
270  if (!intlv_ranges.empty()) {
271  ranges.push_back(AddrRange(intlv_ranges));
272  }
273 
274  return ranges;
275 }
276 
277 void
279 {
280  assert(pkt->isRequest());
281  const auto& m = addrMap.contains(pkt->getAddrRange());
282  assert(m != addrMap.end());
283  m->second->access(pkt);
284 }
285 
286 void
288 {
289  assert(pkt->isRequest());
290  const auto& m = addrMap.contains(pkt->getAddrRange());
291  assert(m != addrMap.end());
292  m->second->functionalAccess(pkt);
293 }
294 
295 void
297 {
298  // serialize all the locked addresses and their context ids
299  vector<Addr> lal_addr;
300  vector<ContextID> lal_cid;
301 
302  for (auto& m : memories) {
303  const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
304  for (const auto& l : locked_addrs) {
305  lal_addr.push_back(l.addr);
306  lal_cid.push_back(l.contextId);
307  }
308  }
309 
310  SERIALIZE_CONTAINER(lal_addr);
311  SERIALIZE_CONTAINER(lal_cid);
312 
313  // serialize the backing stores
314  unsigned int nbr_of_stores = backingStore.size();
315  SERIALIZE_SCALAR(nbr_of_stores);
316 
317  unsigned int store_id = 0;
318  // store each backing store memory segment in a file
319  for (auto& s : backingStore) {
320  ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
321  serializeStore(cp, store_id++, s.range, s.pmem);
322  }
323 }
324 
325 void
327  AddrRange range, uint8_t* pmem) const
328 {
329  // we cannot use the address range for the name as the
330  // memories that are not part of the address map can overlap
331  string filename = name() + ".store" + to_string(store_id) + ".pmem";
332  long range_size = range.size();
333 
334  DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
335  filename, range_size);
336 
337  SERIALIZE_SCALAR(store_id);
338  SERIALIZE_SCALAR(filename);
339  SERIALIZE_SCALAR(range_size);
340 
341  // write memory file
342  string filepath = CheckpointIn::dir() + "/" + filename.c_str();
343  gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
344  if (compressed_mem == NULL)
345  fatal("Can't open physical memory checkpoint file '%s'\n",
346  filename);
347 
348  uint64_t pass_size = 0;
349 
350  // gzwrite fails if (int)len < 0 (gzwrite returns int)
351  for (uint64_t written = 0; written < range.size();
352  written += pass_size) {
353  pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
354  (uint64_t)INT_MAX : (range.size() - written);
355 
356  if (gzwrite(compressed_mem, pmem + written,
357  (unsigned int) pass_size) != (int) pass_size) {
358  fatal("Write failed on physical memory checkpoint file '%s'\n",
359  filename);
360  }
361  }
362 
363  // close the compressed stream and check that the exit status
364  // is zero
365  if (gzclose(compressed_mem))
366  fatal("Close failed on physical memory checkpoint file '%s'\n",
367  filename);
368 
369 }
370 
371 void
373 {
374  // unserialize the locked addresses and map them to the
375  // appropriate memory controller
376  vector<Addr> lal_addr;
377  vector<ContextID> lal_cid;
378  UNSERIALIZE_CONTAINER(lal_addr);
379  UNSERIALIZE_CONTAINER(lal_cid);
380  for (size_t i = 0; i < lal_addr.size(); ++i) {
381  const auto& m = addrMap.contains(lal_addr[i]);
382  m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
383  }
384 
385  // unserialize the backing stores
386  unsigned int nbr_of_stores;
387  UNSERIALIZE_SCALAR(nbr_of_stores);
388 
389  for (unsigned int i = 0; i < nbr_of_stores; ++i) {
390  ScopedCheckpointSection sec(cp, csprintf("store%d", i));
391  unserializeStore(cp);
392  }
393 
394 }
395 
396 void
398 {
399  const uint32_t chunk_size = 16384;
400 
401  unsigned int store_id;
402  UNSERIALIZE_SCALAR(store_id);
403 
404  string filename;
405  UNSERIALIZE_SCALAR(filename);
406  string filepath = cp.cptDir + "/" + filename;
407 
408  // mmap memoryfile
409  gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
410  if (compressed_mem == NULL)
411  fatal("Can't open physical memory checkpoint file '%s'", filename);
412 
413  // we've already got the actual backing store mapped
414  uint8_t* pmem = backingStore[store_id].pmem;
415  AddrRange range = backingStore[store_id].range;
416 
417  long range_size;
418  UNSERIALIZE_SCALAR(range_size);
419 
420  DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
421  filename, range_size);
422 
423  if (range_size != range.size())
424  fatal("Memory range size has changed! Saw %lld, expected %lld\n",
425  range_size, range.size());
426 
427  uint64_t curr_size = 0;
428  long* temp_page = new long[chunk_size];
429  long* pmem_current;
430  uint32_t bytes_read;
431  while (curr_size < range.size()) {
432  bytes_read = gzread(compressed_mem, temp_page, chunk_size);
433  if (bytes_read == 0)
434  break;
435 
436  assert(bytes_read % sizeof(long) == 0);
437 
438  for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
439  // Only copy bytes that are non-zero, so we don't give
440  // the VM system hell
441  if (*(temp_page + x) != 0) {
442  pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
443  *pmem_current = *(temp_page + x);
444  }
445  }
446  curr_size += bytes_read;
447  }
448 
449  delete[] temp_page;
450 
451  if (gzclose(compressed_mem))
452  fatal("Close failed on physical memory checkpoint file '%s'\n",
453  filename);
454 }
#define DPRINTF(x,...)
Definition: trace.hh:229
const std::string cptDir
Definition: serialize.hh:84
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
Definition: physical.cc:296
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
static std::string dir()
Definition: serialize.cc:268
const std::string name() const
Definition: page_table.hh:105
Bitfield< 7 > i
Bitfield< 0 > m
Locked address class that represents a physical address and a context id.
Definition: abstract_mem.hh:65
ip6_addr_t addr
Definition: inet.hh:335
bool isConfReported() const
Should this memory be passed to the kernel and part of the OS physical memory layout.
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:586
Definition: cprintf.cc:42
STL vector class.
Definition: stl.hh:40
std::unordered_map< int, std::stack< Addr > > locked_addrs
Definition: locked_mem.cc:9
Bitfield< 6 > f
AbstractMemory declaration.
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:72
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:645
bool isRequest() const
Definition: packet.hh:531
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:664
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:162
void createBackingStore(AddrRange range, const std::vector< AbstractMemory *> &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
Definition: physical.cc:185
Bitfield< 4 > s
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map...
Definition: physical.cc:236
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table...
Definition: physical.cc:242
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition: packet.cc:228
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:203
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:667
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
Definition: physical.cc:372
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: physical.cc:278
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:643
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
Definition: physical.cc:326
~PhysicalMemory()
Unmap all the backing store we have used.
Definition: physical.cc:228
Bitfield< 29 > c
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:250
std::ostream CheckpointOut
Definition: serialize.hh:68
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: physical.cc:287
An abstract memory represents a contiguous block of physical memory, with an associated address range...
bool isInAddrMap() const
Some memories are used as shadow memories or should for other reasons not be part of the global addre...
Addr size() const
Get the size of the address range.
Definition: addr_range.hh:284
bool isKvmMap() const
When shadow memories are in use, KVM may want to make one or the other, but cannot map both into the ...
#define warn(...)
Definition: logging.hh:212
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
Definition: physical.cc:397
std::string to_string() const
Get a string representation of the range.
Definition: addr_range.hh:309
Scoped checkpoint section helper class.
Definition: serialize.hh:173
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:185
Bitfield< 1 > x
Definition: types.hh:105
Bitfield< 5 > l
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60

Generated on Fri Feb 28 2020 16:27:02 for gem5 by doxygen 1.8.13