gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mem_state.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2017-2020 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "sim/mem_state.hh"
30
31#include <cassert>
32
33#include "arch/generic/mmu.hh"
34#include "debug/Vma.hh"
36#include "sim/process.hh"
38#include "sim/system.hh"
39#include "sim/vma.hh"
40
41namespace gem5
42{
43
44MemState::MemState(Process *owner, Addr brk_point, Addr stack_base,
45 Addr max_stack_size, Addr next_thread_stack_base,
46 Addr mmap_end)
47 : _ownerProcess(owner),
48 _pageBytes(owner->pTable->pageSize()), _brkPoint(brk_point),
49 _stackBase(stack_base), _stackSize(max_stack_size),
50 _maxStackSize(max_stack_size), _stackMin(stack_base - max_stack_size),
51 _nextThreadStackBase(next_thread_stack_base),
52 _mmapEnd(mmap_end), _endBrkPoint(brk_point)
53{
54}
55
58{
59 if (this == &in)
60 return *this;
61
69 _mmapEnd = in._mmapEnd;
71 _vmaList = in._vmaList; /* This assignment does a deep copy. */
72
73 return *this;
74}
75
76void
78{
79 _ownerProcess = owner;
80}
81
82bool
83MemState::isUnmapped(Addr start_addr, Addr length)
84{
85 Addr end_addr = start_addr + length;
86 const AddrRange range(start_addr, end_addr);
87 for (const auto &vma : _vmaList) {
88 if (vma.intersects(range))
89 return false;
90 }
91
96 for (auto start = start_addr; start < end_addr;
97 start += _pageBytes) {
98 if (_ownerProcess->pTable->lookup(start) != nullptr) {
99 panic("Someone allocated physical memory at VA %p without "
100 "creating a VMA!\n", start);
101 return false;
102 }
103 }
104 return true;
105}
106
107void
109{
118 if (new_brk < old_brk) {
119 _brkPoint = new_brk;
120 return;
121 }
122
129 auto page_aligned_brk = roundUp(new_brk, _pageBytes);
130
141 if (page_aligned_brk > _endBrkPoint) {
142 auto length = page_aligned_brk - _endBrkPoint;
148 if (!isUnmapped(_endBrkPoint, length)) {
149 return;
150 }
151
159 mapRegion(_endBrkPoint, length, "heap");
160 _endBrkPoint = page_aligned_brk;
161 }
162
163 _brkPoint = new_brk;
164}
165
166void
167MemState::mapRegion(Addr start_addr, Addr length,
168 const std::string& region_name, int sim_fd, Addr offset)
169{
170 DPRINTF(Vma, "memstate: creating vma (%s) [0x%x - 0x%x]\n",
171 region_name.c_str(), start_addr, start_addr + length);
172
177 assert(isUnmapped(start_addr, length));
178
182 _vmaList.emplace_back(AddrRange(start_addr, start_addr + length),
183 _pageBytes, region_name, sim_fd, offset);
184}
185
186void
187MemState::unmapRegion(Addr start_addr, Addr length)
188{
189 Addr end_addr = start_addr + length;
190 const AddrRange range(start_addr, end_addr);
191
192 auto vma = std::begin(_vmaList);
193 while (vma != std::end(_vmaList)) {
194 if (vma->isStrictSuperset(range)) {
195 DPRINTF(Vma, "memstate: split vma [0x%x - 0x%x] into "
196 "[0x%x - 0x%x] and [0x%x - 0x%x]\n",
197 vma->start(), vma->end(),
198 vma->start(), start_addr,
199 end_addr, vma->end());
204 _vmaList.push_back(*vma);
205 _vmaList.back().sliceRegionRight(start_addr);
206
210 vma->sliceRegionLeft(end_addr);
211
216 break;
217 } else if (vma->isSubset(range)) {
218 DPRINTF(Vma, "memstate: destroying vma [0x%x - 0x%x]\n",
219 vma->start(), vma->end());
223 vma = _vmaList.erase(vma);
224
225 continue;
226
227 } else if (vma->intersects(range)) {
231 if (vma->start() < start_addr) {
232 DPRINTF(Vma, "memstate: resizing vma [0x%x - 0x%x] "
233 "into [0x%x - 0x%x]\n",
234 vma->start(), vma->end(),
235 vma->start(), start_addr);
239 vma->sliceRegionRight(start_addr);
240 } else {
241 DPRINTF(Vma, "memstate: resizing vma [0x%x - 0x%x] "
242 "into [0x%x - 0x%x]\n",
243 vma->start(), vma->end(),
244 end_addr, vma->end());
248 vma->sliceRegionLeft(end_addr);
249 }
250 }
251
252 vma++;
253 }
254
263 for (auto *tc: _ownerProcess->system->threads) {
264 tc->getMMUPtr()->flushAll();
265 }
266
267 do {
268 if (!_ownerProcess->pTable->isUnmapped(start_addr, _pageBytes))
269 _ownerProcess->pTable->unmap(start_addr, _pageBytes);
270
271 start_addr += _pageBytes;
272
279 length -= _pageBytes;
280 } while (length > 0);
281}
282
283void
284MemState::remapRegion(Addr start_addr, Addr new_start_addr, Addr length)
285{
286 Addr end_addr = start_addr + length;
287 const AddrRange range(start_addr, end_addr);
288
289 auto vma = std::begin(_vmaList);
290 while (vma != std::end(_vmaList)) {
291 if (vma->isStrictSuperset(range)) {
295 _vmaList.push_back(*vma);
296 _vmaList.back().sliceRegionRight(start_addr);
297
301 _vmaList.push_back(*vma);
302 _vmaList.back().sliceRegionLeft(end_addr);
303
308 vma->sliceRegionLeft(start_addr);
309 vma->sliceRegionRight(end_addr);
310 vma->remap(new_start_addr);
311
316 break;
317 } else if (vma->isSubset(range)) {
321 vma->remap(vma->start() - start_addr + new_start_addr);
322 } else if (vma->intersects(range)) {
326 _vmaList.push_back(*vma);
327
328 if (vma->start() < start_addr) {
332 _vmaList.back().sliceRegionRight(start_addr);
333
337 vma->sliceRegionLeft(start_addr);
338 vma->remap(new_start_addr);
339 } else {
343 _vmaList.back().sliceRegionLeft(end_addr);
344
348 vma->sliceRegionRight(end_addr);
349 vma->remap(new_start_addr + vma->start() - start_addr);
350 }
351 }
352
353 vma++;
354 }
355
364 for (auto *tc: _ownerProcess->system->threads) {
365 tc->getMMUPtr()->flushAll();
366 }
367
368 do {
369 if (!_ownerProcess->pTable->isUnmapped(start_addr, _pageBytes))
370 _ownerProcess->pTable->remap(start_addr, _pageBytes,
371 new_start_addr);
372
373 start_addr += _pageBytes;
374 new_start_addr += _pageBytes;
375
382 length -= _pageBytes;
383 } while (length > 0);
384}
385
386bool
388{
393 for (const auto &vma : _vmaList) {
394 if (vma.contains(vaddr)) {
395 Addr vpage_start = roundDown(vaddr, _pageBytes);
396 _ownerProcess->allocateMem(vpage_start, _pageBytes);
397
404 if (vma.hasHostBuf()) {
409 for (auto &cid : _ownerProcess->contextIds) {
410 auto *tc = _ownerProcess->system->threads[cid];
412 virt_mem(tc, SETranslatingPortProxy::Always);
413 vma.fillMemPages(vpage_start, _pageBytes, virt_mem);
414 }
415 }
416 return true;
417 }
418 }
419
427 if (vaddr >= _stackMin && vaddr < _stackBase) {
429 return true;
430 }
431
436 if (vaddr < _stackMin && vaddr >= _stackBase - _maxStackSize) {
437 while (vaddr < _stackMin) {
440 fatal("Maximum stack size exceeded\n");
441 }
443 inform("Increasing stack size by one page.");
444 }
445 return true;
446 }
447
448 return false;
449}
450
451Addr
453{
454 Addr start = _mmapEnd;
455
457 start = _mmapEnd - length;
458
459 // Look for a contiguous region of free virtual memory. We can't assume
460 // that the region beyond mmap_end is free because of fixed mappings from
461 // the user.
462 while (!isUnmapped(start, length)) {
463 DPRINTF(Vma, "memstate: cannot extend vma for mmap region at %p. "
464 "Virtual address range is already reserved! Skipping a page "
465 "and trying again!\n", start);
466 start = (_ownerProcess->mmapGrowsDown()) ? start - _pageBytes :
467 start + _pageBytes;
468 }
469
470 DPRINTF(Vma, "memstate: extending mmap region (old %p) (new %p)\n",
471 _mmapEnd,
472 _ownerProcess->mmapGrowsDown() ? start : start + length);
473
474 _mmapEnd = _ownerProcess->mmapGrowsDown() ? start : start + length;
475
476 return start;
477}
478
479std::string
481{
482 std::stringstream file_content;
483
484 for (auto vma : _vmaList) {
485 std::stringstream line;
486 line << std::hex << vma.start() << "-";
487 line << std::hex << vma.end() << " ";
488 line << "r-xp 00000000 00:00 0 ";
489 line << "[" << vma.getName() << "]" << std::endl;
490 file_content << line.str();
491 }
492
493 return file_content.str();
494}
495
496} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition addr_range.hh:82
const Entry * lookup(Addr vaddr)
Lookup function.
virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr)
Definition page_table.cc:75
virtual void unmap(Addr vaddr, int64_t size)
virtual bool isUnmapped(Addr vaddr, int64_t size)
Check if any pages in a region are already allocated.
This class holds the memory state for the Process class and all of its derived, architecture-specific...
Definition mem_state.hh:68
std::list< VMA > _vmaList
The _vmaList member is a list of virtual memory areas in the target application space that have been ...
Definition mem_state.hh:297
Process * _ownerProcess
Owner process of MemState.
Definition mem_state.hh:269
Addr extendMmap(Addr length)
Definition mem_state.cc:452
bool fixupFault(Addr vaddr)
Attempt to fix up a fault at vaddr by allocating a page.
Definition mem_state.cc:387
Addr _endBrkPoint
Keeps record of the furthest mapped heap location.
Definition mem_state.hh:283
Addr _nextThreadStackBase
Definition mem_state.hh:277
void updateBrkRegion(Addr old_brk, Addr new_brk)
Change the end of a process' program break.
Definition mem_state.cc:108
std::string printVmaList()
Print the list of VMAs in a format similar to /proc/self/maps.
Definition mem_state.cc:480
bool isUnmapped(Addr start_addr, Addr length)
Check if any page in the virtual address range from start_addr to start_addr + length is already mapp...
Definition mem_state.cc:83
void resetOwner(Process *owner)
Change the Process owner in case this MemState is copied.
Definition mem_state.cc:77
MemState & operator=(const MemState &in)
Definition mem_state.cc:57
MemState(Process *owner, Addr brk_point, Addr stack_base, Addr max_stack_size, Addr next_thread_stack_base, Addr mmap_end)
Definition mem_state.cc:44
void unmapRegion(Addr start_addr, Addr length)
Unmap a pre-existing region.
Definition mem_state.cc:187
void remapRegion(Addr start_addr, Addr new_start_addr, Addr length)
Remap a pre-existing region.
Definition mem_state.cc:284
void mapRegion(Addr start_addr, Addr length, const std::string &name="anon", int sim_fd=-1, Addr offset=0)
Add a new memory region.
Definition mem_state.cc:167
virtual bool mmapGrowsDown() const
Does mmap region grow upward or downward from mmapEnd? Most platforms grow downward,...
Definition process.hh:146
void allocateMem(Addr vaddr, int64_t size, bool clobber=false)
Definition process.cc:317
std::vector< ContextID > contextIds
Definition process.hh:170
System * system
Definition process.hh:173
EmulationPageTable * pTable
Definition process.hh:184
Threads threads
Definition system.hh:310
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
static constexpr T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:260
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define inform(...)
Definition logging.hh:257
Bitfield< 23, 0 > offset
Definition types.hh:144
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147

Generated on Mon Jul 10 2023 15:32:05 for gem5 by doxygen 1.9.7