gem5 v24.0.0.0
Loading...
Searching...
No Matches
physical.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012, 2014, 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "mem/physical.hh"
39
40#include <fcntl.h>
41#include <sys/mman.h>
42#include <sys/types.h>
43#include <sys/user.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/intmath.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/Checkpoint.hh"
57#include "mem/abstract_mem.hh"
58#include "sim/serialize.hh"
59#include "sim/sim_exit.hh"
60
67#if defined(__APPLE__) || defined(__FreeBSD__)
68#ifndef MAP_NORESERVE
69#define MAP_NORESERVE 0
70#endif
71#endif
72
73namespace gem5
74{
75
76namespace memory
77{
78
79PhysicalMemory::PhysicalMemory(const std::string& _name,
80 const std::vector<AbstractMemory*>& _memories,
81 bool mmap_using_noreserve,
82 const std::string& shared_backstore,
83 bool auto_unlink_shared_backstore) :
84 _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve),
85 sharedBackstore(shared_backstore), sharedBackstoreSize(0),
86 pageSize(sysconf(_SC_PAGE_SIZE))
87{
88 // Register cleanup callback if requested.
89 if (auto_unlink_shared_backstore && !sharedBackstore.empty()) {
90 registerExitCallback([=]() { shm_unlink(shared_backstore.c_str()); });
91 }
92
93 if (mmap_using_noreserve)
94 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
95
96 // add the memories from the system to the address map as
97 // appropriate
98 for (const auto& m : _memories) {
99 // only add the memory if it is part of the global address map
100 if (m->isInAddrMap()) {
101 memories.push_back(m);
102
103 // calculate the total size once and for all
104 size += m->size();
105
106 // add the range to our interval tree and make sure it does not
107 // intersect an existing range
108 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
109 "Memory address range for %s is overlapping\n",
110 m->name());
111 } else {
112 // this type of memory is used e.g. as reference memory by
113 // Ruby, and they also needs a backing store, but should
114 // not be part of the global address map
115 DPRINTF(AddrRanges,
116 "Skipping memory %s that is not in global address map\n",
117 m->name());
118
119 // sanity check
120 fatal_if(m->getAddrRange().interleaved(),
121 "Memory %s that is not in the global address map cannot "
122 "be interleaved\n", m->name());
123
124 // simply do it independently, also note that this kind of
125 // memories are allowed to overlap in the logic address
126 // map
127 std::vector<AbstractMemory*> unmapped_mems{m};
128 createBackingStore(m->getAddrRange(), unmapped_mems,
129 m->isConfReported(), m->isInAddrMap(),
130 m->isKvmMap());
131 }
132 }
133
134 // iterate over the increasing addresses and chunks of contiguous
135 // space to be mapped to backing store, create it and inform the
136 // memories
137 std::vector<AddrRange> intlv_ranges;
138 std::vector<AbstractMemory*> curr_memories;
139 for (const auto& r : addrMap) {
140 // simply skip past all memories that are null and hence do
141 // not need any backing store
142 if (!r.second->isNull()) {
143 // if the range is interleaved then save it for now
144 if (r.first.interleaved()) {
145 // if we already got interleaved ranges that are not
146 // part of the same range, then first do a merge
147 // before we add the new one
148 if (!intlv_ranges.empty() &&
149 !intlv_ranges.back().mergesWith(r.first)) {
150 AddrRange merged_range(intlv_ranges);
151
152 AbstractMemory *f = curr_memories.front();
153 for (const auto& c : curr_memories)
154 if (f->isConfReported() != c->isConfReported() ||
155 f->isInAddrMap() != c->isInAddrMap() ||
156 f->isKvmMap() != c->isKvmMap())
157 fatal("Inconsistent flags in an interleaved "
158 "range\n");
159
160 createBackingStore(merged_range, curr_memories,
161 f->isConfReported(), f->isInAddrMap(),
162 f->isKvmMap());
163
164 intlv_ranges.clear();
165 curr_memories.clear();
166 }
167 intlv_ranges.push_back(r.first);
168 curr_memories.push_back(r.second);
169 } else {
170 std::vector<AbstractMemory*> single_memory{r.second};
171 createBackingStore(r.first, single_memory,
172 r.second->isConfReported(),
173 r.second->isInAddrMap(),
174 r.second->isKvmMap());
175 }
176 }
177 }
178
179 // if there is still interleaved ranges waiting to be merged, go
180 // ahead and do it
181 if (!intlv_ranges.empty()) {
182 AddrRange merged_range(intlv_ranges);
183
184 AbstractMemory *f = curr_memories.front();
185 for (const auto& c : curr_memories)
186 if (f->isConfReported() != c->isConfReported() ||
187 f->isInAddrMap() != c->isInAddrMap() ||
188 f->isKvmMap() != c->isKvmMap())
189 fatal("Inconsistent flags in an interleaved "
190 "range\n");
191
192 createBackingStore(merged_range, curr_memories,
193 f->isConfReported(), f->isInAddrMap(),
194 f->isKvmMap());
195 }
196}
197
198void
200 AddrRange range, const std::vector<AbstractMemory*>& _memories,
201 bool conf_table_reported, bool in_addr_map, bool kvm_map)
202{
203 panic_if(range.interleaved(),
204 "Cannot create backing store for interleaved range %s\n",
205 range.to_string());
206
207 // perform the actual mmap
208 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
209 range.to_string(), range.size());
210
211 int shm_fd;
212 int map_flags;
213 off_t map_offset;
214
215 if (sharedBackstore.empty()) {
216 shm_fd = -1;
217 map_flags = MAP_ANON | MAP_PRIVATE;
218 map_offset = 0;
219 } else {
220 // Newly create backstore will be located after previous one.
221 map_offset = sharedBackstoreSize;
222 // mmap requires the offset to be multiple of page, so we need to
223 // upscale the range size.
225 DPRINTF(AddrRanges, "Sharing backing store as %s at offset %llu\n",
226 sharedBackstore.c_str(), (uint64_t)map_offset);
227 shm_fd = shm_open(sharedBackstore.c_str(), O_CREAT | O_RDWR, 0666);
228 if (shm_fd == -1)
229 panic("Shared memory failed");
230 if (ftruncate(shm_fd, sharedBackstoreSize))
231 panic("Setting size of shared memory failed");
232 map_flags = MAP_SHARED;
233 }
234
235 // to be able to simulate very large memories, the user can opt to
236 // pass noreserve to mmap
237 if (mmapUsingNoReserve) {
238 map_flags |= MAP_NORESERVE;
239 }
240
241 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
242 PROT_READ | PROT_WRITE,
243 map_flags, shm_fd, map_offset);
244
245 if (pmem == (uint8_t*) MAP_FAILED) {
246 perror("mmap");
247 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
248 range.to_string());
249 }
250
251 // remember this backing store so we can checkpoint it and unmap
252 // it appropriately
253 backingStore.emplace_back(range, pmem,
254 conf_table_reported, in_addr_map, kvm_map,
255 shm_fd, map_offset);
256
257 // point the memories to their backing store
258 for (const auto& m : _memories) {
259 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
260 m->name());
261 m->setBackingStore(pmem);
262 }
263}
264
266{
267 // unmap the backing store
268 for (auto& s : backingStore)
269 munmap((char*)s.pmem, s.range.size());
270}
271
272bool
274{
275 return addrMap.contains(addr) != addrMap.end();
276}
277
280{
281 // this could be done once in the constructor, but since it is unlikely to
282 // be called more than once the iteration should not be a problem
283 AddrRangeList ranges;
284 std::vector<AddrRange> intlv_ranges;
285 for (const auto& r : addrMap) {
286 if (r.second->isConfReported()) {
287 // if the range is interleaved then save it for now
288 if (r.first.interleaved()) {
289 // if we already got interleaved ranges that are not
290 // part of the same range, then first do a merge
291 // before we add the new one
292 if (!intlv_ranges.empty() &&
293 !intlv_ranges.back().mergesWith(r.first)) {
294 ranges.push_back(AddrRange(intlv_ranges));
295 intlv_ranges.clear();
296 }
297 intlv_ranges.push_back(r.first);
298 } else {
299 // keep the current range
300 ranges.push_back(r.first);
301 }
302 }
303 }
304
305 // if there is still interleaved ranges waiting to be merged,
306 // go ahead and do it
307 if (!intlv_ranges.empty()) {
308 ranges.push_back(AddrRange(intlv_ranges));
309 }
310
311 return ranges;
312}
313
314void
316{
317 assert(pkt->isRequest());
318 const auto& m = addrMap.contains(pkt->getAddrRange());
319 assert(m != addrMap.end());
320 m->second->access(pkt);
321}
322
323void
325{
326 assert(pkt->isRequest());
327 const auto& m = addrMap.contains(pkt->getAddrRange());
328 assert(m != addrMap.end());
329 m->second->functionalAccess(pkt);
330}
331
332void
334{
335 // serialize all the locked addresses and their context ids
336 std::vector<Addr> lal_addr;
338
339 for (auto& m : memories) {
340 const std::list<LockedAddr>& locked_addrs = m->getLockedAddrList();
341 for (const auto& l : locked_addrs) {
342 lal_addr.push_back(l.addr);
343 lal_cid.push_back(l.contextId);
344 }
345 }
346
347 SERIALIZE_CONTAINER(lal_addr);
348 SERIALIZE_CONTAINER(lal_cid);
349
350 // serialize the backing stores
351 unsigned int nbr_of_stores = backingStore.size();
352 SERIALIZE_SCALAR(nbr_of_stores);
353
354 unsigned int store_id = 0;
355 // store each backing store memory segment in a file
356 for (auto& s : backingStore) {
357 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
358 serializeStore(cp, store_id++, s.range, s.pmem);
359 }
360}
361
362void
364 AddrRange range, uint8_t* pmem) const
365{
366 // we cannot use the address range for the name as the
367 // memories that are not part of the address map can overlap
368 std::string filename =
369 name() + ".store" + std::to_string(store_id) + ".pmem";
370 long range_size = range.size();
371
372 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
373 filename, range_size);
374
375 SERIALIZE_SCALAR(store_id);
376 SERIALIZE_SCALAR(filename);
377 SERIALIZE_SCALAR(range_size);
378
379 // write memory file
380 std::string filepath = CheckpointIn::dir() + "/" + filename.c_str();
381 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
382 if (compressed_mem == NULL)
383 fatal("Can't open physical memory checkpoint file '%s'\n",
384 filename);
385
386 uint64_t pass_size = 0;
387
388 // gzwrite fails if (int)len < 0 (gzwrite returns int)
389 for (uint64_t written = 0; written < range.size();
390 written += pass_size) {
391 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
392 (uint64_t)INT_MAX : (range.size() - written);
393
394 if (gzwrite(compressed_mem, pmem + written,
395 (unsigned int) pass_size) != (int) pass_size) {
396 fatal("Write failed on physical memory checkpoint file '%s'\n",
397 filename);
398 }
399 }
400
401 // close the compressed stream and check that the exit status
402 // is zero
403 if (gzclose(compressed_mem))
404 fatal("Close failed on physical memory checkpoint file '%s'\n",
405 filename);
406
407}
408
409void
411{
412 // unserialize the locked addresses and map them to the
413 // appropriate memory controller
414 std::vector<Addr> lal_addr;
416 UNSERIALIZE_CONTAINER(lal_addr);
417 UNSERIALIZE_CONTAINER(lal_cid);
418 for (size_t i = 0; i < lal_addr.size(); ++i) {
419 const auto& m = addrMap.contains(lal_addr[i]);
420 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
421 }
422
423 // unserialize the backing stores
424 unsigned int nbr_of_stores;
425 UNSERIALIZE_SCALAR(nbr_of_stores);
426
427 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
428 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
430 }
431
432}
433
434void
436{
437 const uint32_t chunk_size = 16384;
438
439 unsigned int store_id;
440 UNSERIALIZE_SCALAR(store_id);
441
442 std::string filename;
443 UNSERIALIZE_SCALAR(filename);
444 std::string filepath = cp.getCptDir() + "/" + filename;
445
446 // mmap memoryfile
447 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
448 if (compressed_mem == NULL)
449 fatal("Can't open physical memory checkpoint file '%s'", filename);
450
451 // we've already got the actual backing store mapped
452 uint8_t* pmem = backingStore[store_id].pmem;
453 AddrRange range = backingStore[store_id].range;
454
455 long range_size;
456 UNSERIALIZE_SCALAR(range_size);
457
458 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
459 filename, range_size);
460
461 if (range_size != range.size())
462 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
463 range_size, range.size());
464
465 uint64_t curr_size = 0;
466 long* temp_page = new long[chunk_size];
467 long* pmem_current;
468 uint32_t bytes_read;
469 while (curr_size < range.size()) {
470 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
471 if (bytes_read == 0)
472 break;
473
474 assert(bytes_read % sizeof(long) == 0);
475
476 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
477 // Only copy bytes that are non-zero, so we don't give
478 // the VM system hell
479 if (*(temp_page + x) != 0) {
480 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
481 *pmem_current = *(temp_page + x);
482 }
483 }
484 curr_size += bytes_read;
485 }
486
487 delete[] temp_page;
488
489 if (gzclose(compressed_mem))
490 fatal("Close failed on physical memory checkpoint file '%s'\n",
491 filename);
492}
493
494} // namespace memory
495} // namespace gem5
AbstractMemory declaration.
#define DPRINTF(x,...)
Definition trace.hh:210
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition addr_range.hh:82
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition packet.cc:243
bool isRequest() const
Definition packet.hh:597
An abstract memory represents a contiguous block of physical memory, with an associated address range...
Locked address class that represents a physical address and a context id.
std::vector< BackingStoreEntry > backingStore
Definition physical.hh:163
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table.
Definition physical.cc:279
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map.
Definition physical.cc:273
~PhysicalMemory()
Unmap all the backing store we have used.
Definition physical.cc:265
const std::string name() const
Return the name for debugging and for creation of sections for checkpointing.
Definition physical.hh:205
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
Definition physical.cc:435
std::vector< AbstractMemory * > memories
Definition physical.hh:148
AddrRangeMap< AbstractMemory *, 1 > addrMap
Definition physical.hh:145
void createBackingStore(AddrRange range, const std::vector< AbstractMemory * > &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
Definition physical.cc:199
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
Definition physical.cc:410
const std::string sharedBackstore
Definition physical.hh:156
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
Definition physical.cc:363
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition physical.cc:324
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition physical.cc:315
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
Definition physical.cc:333
STL vector class.
Definition stl.hh:37
bool interleaved() const
Determine if the range is interleaved or not.
Addr size() const
Get the size of the address range.
std::string to_string() const
Get a string representation of the range.
static constexpr T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:260
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
#define UNSERIALIZE_CONTAINER(member)
Definition serialize.hh:634
const std::string getCptDir()
Definition serialize.hh:85
#define SERIALIZE_CONTAINER(member)
Definition serialize.hh:626
static std::string dir()
Get the current checkout directory name.
Definition serialize.cc:157
#define warn(...)
Definition logging.hh:256
Bitfield< 4 > s
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 29 > c
Definition misc_types.hh:53
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 0 > m
Bitfield< 5 > l
Bitfield< 3 > x
Definition pagetable.hh:73
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
void registerExitCallback(const std::function< void()> &callback)
Register an exit callback.
Definition core.cc:143
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
Definition mem.h:38

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0