42 #include <sys/types.h> 54 #include "debug/AddrRanges.hh" 55 #include "debug/Checkpoint.hh" 64 #if defined(__APPLE__) || defined(__FreeBSD__) 66 #define MAP_NORESERVE 0 74 bool mmap_using_noreserve) :
75 _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
77 if (mmap_using_noreserve)
78 warn(
"Not reserving swap space. May cause SIGSEGV on actual usage\n");
82 for (
const auto&
m : _memories) {
84 if (
m->isInAddrMap()) {
85 memories.push_back(
m);
92 fatal_if(addrMap.insert(
m->getAddrRange(),
m) == addrMap.end(),
93 "Memory address range for %s is overlapping\n",
100 "Skipping memory %s that is not in global address map\n",
104 fatal_if(
m->getAddrRange().interleaved(),
105 "Memory %s that is not in the global address map cannot " 106 "be interleaved\n",
m->name());
112 createBackingStore(
m->getAddrRange(), unmapped_mems,
113 m->isConfReported(),
m->isInAddrMap(),
123 for (
const auto&
r : addrMap) {
126 if (!
r.second->isNull()) {
128 if (
r.first.interleaved()) {
132 if (!intlv_ranges.empty() &&
133 !intlv_ranges.back().mergesWith(
r.first)) {
137 for (
const auto&
c : curr_memories)
141 fatal(
"Inconsistent flags in an interleaved " 144 createBackingStore(merged_range, curr_memories,
148 intlv_ranges.clear();
149 curr_memories.clear();
151 intlv_ranges.push_back(
r.first);
152 curr_memories.push_back(
r.second);
155 createBackingStore(
r.first, single_memory,
156 r.second->isConfReported(),
157 r.second->isInAddrMap(),
158 r.second->isKvmMap());
165 if (!intlv_ranges.empty()) {
169 for (
const auto&
c : curr_memories)
173 fatal(
"Inconsistent flags in an interleaved " 176 createBackingStore(merged_range, curr_memories,
185 bool conf_table_reported,
186 bool in_addr_map,
bool kvm_map)
189 "Cannot create backing store for interleaved range %s\n",
193 DPRINTF(AddrRanges,
"Creating backing store for range %s with size %d\n",
195 int map_flags = MAP_ANON | MAP_PRIVATE;
199 if (mmapUsingNoReserve) {
200 map_flags |= MAP_NORESERVE;
203 uint8_t* pmem = (uint8_t*) mmap(NULL, range.
size(),
204 PROT_READ | PROT_WRITE,
207 if (pmem == (uint8_t*) MAP_FAILED) {
209 fatal(
"Could not mmap %d bytes for range %s!\n", range.
size(),
215 backingStore.emplace_back(range, pmem,
216 conf_table_reported, in_addr_map, kvm_map);
219 for (
const auto&
m : _memories) {
220 DPRINTF(AddrRanges,
"Mapping memory %s to backing store\n",
222 m->setBackingStore(pmem);
229 for (
auto&
s : backingStore)
230 munmap((
char*)
s.pmem,
s.range.size());
236 return addrMap.contains(addr) != addrMap.end();
246 for (
const auto&
r : addrMap) {
247 if (
r.second->isConfReported()) {
249 if (
r.first.interleaved()) {
253 if (!intlv_ranges.empty() &&
254 !intlv_ranges.back().mergesWith(
r.first)) {
255 ranges.push_back(
AddrRange(intlv_ranges));
256 intlv_ranges.clear();
258 intlv_ranges.push_back(
r.first);
261 ranges.push_back(
r.first);
268 if (!intlv_ranges.empty()) {
269 ranges.push_back(
AddrRange(intlv_ranges));
280 assert(m != addrMap.end());
281 m->second->access(pkt);
289 assert(m != addrMap.end());
290 m->second->functionalAccess(pkt);
300 for (
auto&
m : memories) {
302 for (
const auto&
l : locked_addrs) {
303 lal_addr.push_back(
l.addr);
304 lal_cid.push_back(
l.contextId);
312 unsigned int nbr_of_stores = backingStore.size();
315 unsigned int store_id = 0;
317 for (
auto&
s : backingStore) {
319 serializeStore(cp, store_id++,
s.range,
s.pmem);
329 string filename =
name() +
".store" +
to_string(store_id) +
".pmem";
330 long range_size = range.
size();
332 DPRINTF(Checkpoint,
"Serializing physical memory %s with size %d\n",
333 filename, range_size);
341 gzFile compressed_mem = gzopen(filepath.c_str(),
"wb");
342 if (compressed_mem == NULL)
343 fatal(
"Can't open physical memory checkpoint file '%s'\n",
346 uint64_t pass_size = 0;
349 for (uint64_t written = 0; written < range.
size();
350 written += pass_size) {
351 pass_size = (uint64_t)INT_MAX < (range.
size() - written) ?
352 (uint64_t)INT_MAX : (range.
size() - written);
354 if (gzwrite(compressed_mem, pmem + written,
355 (
unsigned int) pass_size) != (int) pass_size) {
356 fatal(
"Write failed on physical memory checkpoint file '%s'\n",
363 if (gzclose(compressed_mem))
364 fatal(
"Close failed on physical memory checkpoint file '%s'\n",
378 for (
size_t i = 0;
i < lal_addr.size(); ++
i) {
379 const auto&
m = addrMap.contains(lal_addr[
i]);
380 m->second->addLockedAddr(
LockedAddr(lal_addr[i], lal_cid[i]));
384 unsigned int nbr_of_stores;
387 for (
unsigned int i = 0;
i < nbr_of_stores; ++
i) {
389 unserializeStore(cp);
397 const uint32_t chunk_size = 16384;
399 unsigned int store_id;
404 string filepath = cp.
getCptDir() +
"/" + filename;
407 gzFile compressed_mem = gzopen(filepath.c_str(),
"rb");
408 if (compressed_mem == NULL)
409 fatal(
"Can't open physical memory checkpoint file '%s'", filename);
412 uint8_t* pmem = backingStore[store_id].pmem;
413 AddrRange range = backingStore[store_id].range;
418 DPRINTF(Checkpoint,
"Unserializing physical memory %s with size %d\n",
419 filename, range_size);
421 if (range_size != range.
size())
422 fatal(
"Memory range size has changed! Saw %lld, expected %lld\n",
423 range_size, range.
size());
425 uint64_t curr_size = 0;
426 long* temp_page =
new long[chunk_size];
429 while (curr_size < range.
size()) {
430 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
434 assert(bytes_read %
sizeof(
long) == 0);
436 for (uint32_t
x = 0;
x < bytes_read /
sizeof(long);
x++) {
439 if (*(temp_page +
x) != 0) {
440 pmem_current = (
long*)(pmem + curr_size +
x *
sizeof(
long));
441 *pmem_current = *(temp_page +
x);
444 curr_size += bytes_read;
449 if (gzclose(compressed_mem))
450 fatal(
"Close failed on physical memory checkpoint file '%s'\n",
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
#define UNSERIALIZE_CONTAINER(member)
#define fatal(...)
This implements a cprintf based fatal() function.
const std::string name() const
Locked address class that represents a physical address and a context id.
bool isConfReported() const
Should this memory be passed to the kernel and part of the OS physical memory layout.
Overload hash function for BasicBlockRange type.
std::unordered_map< int, std::stack< Addr > > locked_addrs
AbstractMemory declaration.
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
#define UNSERIALIZE_SCALAR(scalar)
std::string csprintf(const char *format, const Args &...args)
void createBackingStore(AddrRange range, const std::vector< AbstractMemory *> &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map...
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table...
AddrRange getAddrRange() const
Get address range to which this packet belongs.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
#define SERIALIZE_SCALAR(scalar)
static std::string dir()
Get the current checkout directory name.
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
~PhysicalMemory()
Unmap all the backing store we have used.
#define SERIALIZE_CONTAINER(member)
bool interleaved() const
Determine if the range is interleaved or not.
std::ostream CheckpointOut
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
const std::string getCptDir()
An abstract memory represents a contiguous block of physical memory, with an associated address range...
bool isInAddrMap() const
Some memories are used as shadow memories or should for other reasons not be part of the global addre...
Addr size() const
Get the size of the address range.
bool isKvmMap() const
When shadow memories are in use, KVM may want to make one or the other, but cannot map both into the ...
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
std::string to_string() const
Get a string representation of the range.
Scoped checkpoint section helper class.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
const std::string to_string(sc_enc enc)