gem5  v20.0.0.3
mem_state.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "sim/mem_state.hh"
30 
31 #include <cassert>
32 
33 #include "arch/generic/tlb.hh"
34 #include "debug/Vma.hh"
36 #include "sim/process.hh"
38 #include "sim/system.hh"
39 #include "sim/vma.hh"
40 
41 MemState::MemState(Process *owner, Addr brk_point, Addr stack_base,
42  Addr max_stack_size, Addr next_thread_stack_base,
43  Addr mmap_end)
44  : _ownerProcess(owner),
45  _pageBytes(owner->system->getPageBytes()), _brkPoint(brk_point),
46  _stackBase(stack_base), _maxStackSize(max_stack_size),
47  _nextThreadStackBase(next_thread_stack_base),
48  _mmapEnd(mmap_end), _endBrkPoint(brk_point)
49 {
50 }
51 
52 MemState&
54 {
55  if (this == &in)
56  return *this;
57 
59  _brkPoint = in._brkPoint;
63  _stackMin = in._stackMin;
65  _mmapEnd = in._mmapEnd;
67  _vmaList = in._vmaList; /* This assignment does a deep copy. */
68 
69  return *this;
70 }
71 
72 void
74 {
75  _ownerProcess = owner;
76 }
77 
78 bool
80 {
81  Addr end_addr = start_addr + length;
82  const AddrRange range(start_addr, end_addr);
83  for (const auto &vma : _vmaList) {
84  if (vma.intersects(range))
85  return false;
86  }
87 
92  for (auto start = start_addr; start < end_addr;
93  start += _pageBytes) {
94  if (_ownerProcess->pTable->lookup(start) != nullptr) {
95  panic("Someone allocated physical memory at VA %p without "
96  "creating a VMA!\n", start);
97  return false;
98  }
99  }
100  return true;
101 }
102 
103 void
105 {
114  if (new_brk < old_brk) {
115  _brkPoint = new_brk;
116  return;
117  }
118 
125  auto page_aligned_brk = roundUp(new_brk, _pageBytes);
126 
137  if (page_aligned_brk > _endBrkPoint) {
138  auto length = page_aligned_brk - _endBrkPoint;
144  if (!isUnmapped(_endBrkPoint, length)) {
145  return;
146  }
147 
155  mapRegion(_endBrkPoint, length, "heap");
156  _endBrkPoint = page_aligned_brk;
157  }
158 
159  _brkPoint = new_brk;
160 }
161 
162 void
164  const std::string& region_name, int sim_fd, Addr offset)
165 {
166  DPRINTF(Vma, "memstate: creating vma (%s) [0x%x - 0x%x]\n",
167  region_name.c_str(), start_addr, start_addr + length);
168 
173  assert(isUnmapped(start_addr, length));
174 
178  _vmaList.emplace_back(AddrRange(start_addr, start_addr + length),
179  _pageBytes, region_name, sim_fd, offset);
180 }
181 
182 void
184 {
185  Addr end_addr = start_addr + length;
186  const AddrRange range(start_addr, end_addr);
187 
188  auto vma = std::begin(_vmaList);
189  while (vma != std::end(_vmaList)) {
190  if (vma->isStrictSuperset(range)) {
191  DPRINTF(Vma, "memstate: split vma [0x%x - 0x%x] into "
192  "[0x%x - 0x%x] and [0x%x - 0x%x]\n",
193  vma->start(), vma->end(),
194  vma->start(), start_addr,
195  end_addr, vma->end());
200  _vmaList.push_back(*vma);
201  _vmaList.back().sliceRegionRight(start_addr);
202 
206  vma->sliceRegionLeft(end_addr);
207 
212  break;
213  } else if (vma->isSubset(range)) {
214  DPRINTF(Vma, "memstate: destroying vma [0x%x - 0x%x]\n",
215  vma->start(), vma->end());
219  vma = _vmaList.erase(vma);
220 
221  continue;
222 
223  } else if (vma->intersects(range)) {
227  if (vma->start() < start_addr) {
228  DPRINTF(Vma, "memstate: resizing vma [0x%x - 0x%x] "
229  "into [0x%x - 0x%x]\n",
230  vma->start(), vma->end(),
231  vma->start(), start_addr);
235  vma->sliceRegionRight(start_addr);
236  } else {
237  DPRINTF(Vma, "memstate: resizing vma [0x%x - 0x%x] "
238  "into [0x%x - 0x%x]\n",
239  vma->start(), vma->end(),
240  end_addr, vma->end());
244  vma->sliceRegionLeft(end_addr);
245  }
246  }
247 
248  vma++;
249  }
250 
259  for (auto tc : _ownerProcess->system->threadContexts) {
260  tc->getDTBPtr()->flushAll();
261  tc->getITBPtr()->flushAll();
262  }
263 
264  do {
265  if (!_ownerProcess->pTable->isUnmapped(start_addr, _pageBytes))
266  _ownerProcess->pTable->unmap(start_addr, _pageBytes);
267 
268  start_addr += _pageBytes;
269 
276  length -= _pageBytes;
277  } while (length > 0);
278 }
279 
280 void
281 MemState::remapRegion(Addr start_addr, Addr new_start_addr, Addr length)
282 {
283  Addr end_addr = start_addr + length;
284  const AddrRange range(start_addr, end_addr);
285 
286  auto vma = std::begin(_vmaList);
287  while (vma != std::end(_vmaList)) {
288  if (vma->isStrictSuperset(range)) {
292  _vmaList.push_back(*vma);
293  _vmaList.back().sliceRegionRight(start_addr);
294 
298  _vmaList.push_back(*vma);
299  _vmaList.back().sliceRegionLeft(end_addr);
300 
305  vma->sliceRegionLeft(start_addr);
306  vma->sliceRegionRight(end_addr);
307  vma->remap(new_start_addr);
308 
313  break;
314  } else if (vma->isSubset(range)) {
318  vma->remap(vma->start() - start_addr + new_start_addr);
319  } else if (vma->intersects(range)) {
323  _vmaList.push_back(*vma);
324 
325  if (vma->start() < start_addr) {
329  _vmaList.back().sliceRegionRight(start_addr);
330 
334  vma->sliceRegionLeft(start_addr);
335  vma->remap(new_start_addr);
336  } else {
340  _vmaList.back().sliceRegionLeft(end_addr);
341 
345  vma->sliceRegionRight(end_addr);
346  vma->remap(new_start_addr + vma->start() - start_addr);
347  }
348  }
349 
350  vma++;
351  }
352 
361  for (auto tc : _ownerProcess->system->threadContexts) {
362  tc->getDTBPtr()->flushAll();
363  tc->getITBPtr()->flushAll();
364  }
365 
366  do {
367  if (!_ownerProcess->pTable->isUnmapped(start_addr, _pageBytes))
368  _ownerProcess->pTable->remap(start_addr, _pageBytes,
369  new_start_addr);
370 
371  start_addr += _pageBytes;
372  new_start_addr += _pageBytes;
373 
380  length -= _pageBytes;
381  } while (length > 0);
382 }
383 
384 bool
386 {
391  for (const auto &vma : _vmaList) {
392  if (vma.contains(vaddr)) {
393  Addr vpage_start = roundDown(vaddr, _pageBytes);
394  _ownerProcess->allocateMem(vpage_start, _pageBytes);
395 
402  if (vma.hasHostBuf()) {
407  for (auto &cid : _ownerProcess->contextIds) {
408  ThreadContext *tc =
411  virt_mem(tc, SETranslatingPortProxy::Always);
412  vma.fillMemPages(vpage_start, _pageBytes, virt_mem);
413  }
414  }
415  return true;
416  }
417  }
418 
426  if (vaddr >= _stackMin && vaddr < _stackBase) {
428  return true;
429  }
430 
435  if (vaddr < _stackMin && vaddr >= _stackBase - _maxStackSize) {
436  while (vaddr < _stackMin) {
439  fatal("Maximum stack size exceeded\n");
440  }
442  inform("Increasing stack size by one page.");
443  }
444  return true;
445  }
446 
447  return false;
448 }
449 
450 Addr
452 {
453  Addr start = _mmapEnd;
454 
456  start = _mmapEnd - length;
457 
458  // Look for a contiguous region of free virtual memory. We can't assume
459  // that the region beyond mmap_end is free because of fixed mappings from
460  // the user.
461  while (!isUnmapped(start, length)) {
462  DPRINTF(Vma, "memstate: cannot extend vma for mmap region at %p. "
463  "Virtual address range is already reserved! Skipping a page "
464  "and trying again!\n", start);
465  start = (_ownerProcess->mmapGrowsDown()) ? start - _pageBytes :
466  start + _pageBytes;
467  }
468 
469  DPRINTF(Vma, "memstate: extending mmap region (old %p) (new %p)\n",
470  _mmapEnd,
471  _ownerProcess->mmapGrowsDown() ? start : start + length);
472 
473  _mmapEnd = _ownerProcess->mmapGrowsDown() ? start : start + length;
474 
475  return start;
476 }
477 
478 std::string
480 {
481  std::stringstream file_content;
482 
483  for (auto vma : _vmaList) {
484  std::stringstream line;
485  line << std::hex << vma.start() << "-";
486  line << std::hex << vma.end() << " ";
487  line << "r-xp 00000000 00:00 0 ";
488  line << "[" << vma.getName() << "]" << std::endl;
489  file_content << line.str();
490  }
491 
492  return file_content.str();
493 }
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
#define DPRINTF(x,...)
Definition: trace.hh:225
Process * _ownerProcess
Owner process of MemState.
Definition: mem_state.hh:215
virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr)
Definition: page_table.cc:72
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
MemState & operator=(const MemState &in)
Definition: mem_state.cc:53
std::vector< ContextID > contextIds
Definition: process.hh:160
void allocateMem(Addr vaddr, int64_t size, bool clobber=false)
Definition: process.cc:326
void mapRegion(Addr start_addr, Addr length, const std::string &name="anon", int sim_fd=-1, Addr offset=0)
Add a new memory region.
Definition: mem_state.cc:163
Addr _stackBase
Definition: mem_state.hh:219
Bitfield< 23, 0 > offset
Definition: types.hh:152
Addr _endBrkPoint
Keeps record of the furthest mapped heap location.
Definition: mem_state.hh:229
T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:114
Addr _stackMin
Definition: mem_state.hh:222
ThreadContext is the external interface to all thread state for anything outside of the CPU...
Addr _stackSize
Definition: mem_state.hh:220
ThreadContext * getThreadContext(ContextID tid) const
Definition: system.hh:186
void updateBrkRegion(Addr old_brk, Addr new_brk)
Change the end of a process&#39; program break.
Definition: mem_state.cc:104
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:68
void resetOwner(Process *owner)
Change the Process owner in case this MemState is copied.
Definition: mem_state.cc:73
#define inform(...)
Definition: logging.hh:209
void unmapRegion(Addr start_addr, Addr length)
Unmap a pre-existing region.
Definition: mem_state.cc:183
bool fixupFault(Addr vaddr)
Attempt to fix up a fault at vaddr by allocating a page.
Definition: mem_state.cc:385
std::string printVmaList()
Print the list of VMAs in a format similar to /proc/self/maps.
Definition: mem_state.cc:479
std::vector< ThreadContext * > threadContexts
Definition: system.hh:182
Addr _nextThreadStackBase
Definition: mem_state.hh:223
System * system
Definition: process.hh:163
Addr _brkPoint
Definition: mem_state.hh:218
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:131
std::list< VMA > _vmaList
The _vmaList member is a list of virtual memory areas in the target application space that have been ...
Definition: mem_state.hh:243
Addr _mmapEnd
Definition: mem_state.hh:224
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
This class holds the memory state for the Process class and all of its derived, architecture-specific...
Definition: mem_state.hh:62
Bitfield< 15 > system
Definition: misc.hh:997
Addr _maxStackSize
Definition: mem_state.hh:221
EmulationPageTable * pTable
Definition: process.hh:174
virtual bool isUnmapped(Addr vaddr, int64_t size)
Check if any pages in a region are already allocated.
Definition: page_table.cc:117
const Entry * lookup(Addr vaddr)
Lookup function.
Definition: page_table.cc:130
virtual void unmap(Addr vaddr, int64_t size)
Definition: page_table.cc:101
virtual bool mmapGrowsDown() const
Does mmap region grow upward or downward from mmapEnd? Most platforms grow downward, but a few (such as Alpha) grow upward instead, so they can override this method to return false.
Definition: process.hh:136
uint8_t length
Definition: inet.hh:329
Addr _pageBytes
Definition: mem_state.hh:217
bool isUnmapped(Addr start_addr, Addr length)
Check if any page in the virtual address range from start_addr to start_addr + length is already mapp...
Definition: mem_state.cc:79
void remapRegion(Addr start_addr, Addr new_start_addr, Addr length)
Remap a pre-existing region.
Definition: mem_state.cc:281
MemState(Process *owner, Addr brk_point, Addr stack_base, Addr max_stack_size, Addr next_thread_stack_base, Addr mmap_end)
Definition: mem_state.cc:41
Addr extendMmap(Addr length)
Definition: mem_state.cc:451

Generated on Fri Jul 3 2020 15:53:04 for gem5 by doxygen 1.8.13