gem5  v21.2.1.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015,2017-2020 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/simple/atomic.hh"
43 
44 #include "arch/generic/decoder.hh"
45 #include "base/output.hh"
46 #include "config/the_isa.hh"
47 #include "cpu/exetrace.hh"
48 #include "cpu/utils.hh"
49 #include "debug/Drain.hh"
50 #include "debug/ExecFaulting.hh"
51 #include "debug/SimpleCPU.hh"
52 #include "mem/packet.hh"
53 #include "mem/packet_access.hh"
54 #include "mem/physical.hh"
55 #include "params/AtomicSimpleCPU.hh"
56 #include "sim/faults.hh"
57 #include "sim/full_system.hh"
58 #include "sim/system.hh"
59 
60 namespace gem5
61 {
62 
63 void
65 {
67 
68  int cid = threadContexts[0]->contextId();
69  ifetch_req->setContext(cid);
70  data_read_req->setContext(cid);
71  data_write_req->setContext(cid);
72  data_amo_req->setContext(cid);
73 }
74 
75 AtomicSimpleCPU::AtomicSimpleCPU(const AtomicSimpleCPUParams &p)
76  : BaseSimpleCPU(p),
77  tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
78  false, Event::CPU_Tick_Pri),
79  width(p.width), locked(false),
80  simulate_data_stalls(p.simulate_data_stalls),
81  simulate_inst_stalls(p.simulate_inst_stalls),
82  icachePort(name() + ".icache_port", this),
83  dcachePort(name() + ".dcache_port", this),
84  dcache_access(false), dcache_latency(0),
85  ppCommit(nullptr)
86 {
87  _status = Idle;
88  ifetch_req = std::make_shared<Request>();
89  data_read_req = std::make_shared<Request>();
90  data_write_req = std::make_shared<Request>();
91  data_amo_req = std::make_shared<Request>();
92 }
93 
94 
96 {
97  if (tickEvent.scheduled()) {
98  deschedule(tickEvent);
99  }
100 }
101 
104 {
105  // Deschedule any power gating event (if any)
106  deschedulePowerGatingEvent();
107 
108  if (switchedOut())
109  return DrainState::Drained;
110 
111  if (!isCpuDrained()) {
112  DPRINTF(Drain, "Requesting drain.\n");
113  return DrainState::Draining;
114  } else {
115  if (tickEvent.scheduled())
116  deschedule(tickEvent);
117 
118  activeThreads.clear();
119  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
120  return DrainState::Drained;
121  }
122 }
123 
124 void
126 {
127  DPRINTF(SimpleCPU, "%s received snoop pkt for addr:%#x %s\n",
128  __func__, pkt->getAddr(), pkt->cmdString());
129 
130  for (ThreadID tid = 0; tid < numThreads; tid++) {
131  if (tid != sender) {
132  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
133  wakeup(tid);
134  }
135 
136  threadInfo[tid]->thread->getIsaPtr()->handleLockedSnoop(pkt,
138  }
139  }
140 }
141 
142 void
144 {
145  assert(!tickEvent.scheduled());
146  if (switchedOut())
147  return;
148 
149  DPRINTF(SimpleCPU, "Resume\n");
151 
152  assert(!threadContexts.empty());
153 
155 
156  for (ThreadID tid = 0; tid < numThreads; tid++) {
157  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
158  threadInfo[tid]->execContextStats.notIdleFraction = 1;
159  activeThreads.push_back(tid);
161 
162  // Tick if any threads active
163  if (!tickEvent.scheduled()) {
164  schedule(tickEvent, nextCycle());
165  }
166  } else {
167  threadInfo[tid]->execContextStats.notIdleFraction = 0;
168  }
169  }
170 
171  // Reschedule any power gating event (if any)
172  schedulePowerGatingEvent();
173 }
174 
175 bool
177 {
178  if (drainState() != DrainState::Draining)
179  return false;
180 
181  DPRINTF(Drain, "tryCompleteDrain.\n");
182  if (!isCpuDrained())
183  return false;
184 
185  DPRINTF(Drain, "CPU done draining, processing drain event\n");
186  signalDrainDone();
187 
188  return true;
189 }
190 
191 
192 void
194 {
195  BaseSimpleCPU::switchOut();
196 
197  assert(!tickEvent.scheduled());
198  assert(_status == BaseSimpleCPU::Running || _status == Idle);
199  assert(isCpuDrained());
200 }
201 
202 
203 void
205 {
207 
208  // The tick event should have been descheduled by drain()
209  assert(!tickEvent.scheduled());
210 }
211 
212 void
214 {
215  fatal_if(!system->isAtomicMode(),
216  "The atomic CPU requires the memory system to be in "
217  "'atomic' mode.");
218 }
219 
220 void
222 {
223  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
224 
225  assert(thread_num < numThreads);
226 
227  threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
228  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
229  threadInfo[thread_num]->thread->lastSuspend);
230  baseStats.numCycles += delta;
231 
232  if (!tickEvent.scheduled()) {
233  //Make sure ticks are still on multiples of cycles
234  schedule(tickEvent, clockEdge(Cycles(0)));
235  }
237  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
238  activeThreads.end()) {
239  activeThreads.push_back(thread_num);
240  }
241 
242  BaseCPU::activateContext(thread_num);
243 }
244 
245 
246 void
248 {
249  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250 
251  assert(thread_num < numThreads);
252  activeThreads.remove(thread_num);
253 
254  if (_status == Idle)
255  return;
256 
257  assert(_status == BaseSimpleCPU::Running);
258 
259  threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
260 
261  if (activeThreads.empty()) {
262  _status = Idle;
263 
264  if (tickEvent.scheduled()) {
265  deschedule(tickEvent);
266  }
267  }
268 
269  BaseCPU::suspendContext(thread_num);
270 }
271 
272 Tick
274 {
275  return port.sendAtomic(pkt);
276 }
277 
278 Tick
280 {
281  DPRINTF(SimpleCPU, "%s received atomic snoop pkt for addr:%#x %s\n",
282  __func__, pkt->getAddr(), pkt->cmdString());
283 
284  // X86 ISA: Snooping an invalidation for monitor/mwait
286 
287  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
288  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
289  cpu->wakeup(tid);
290  }
291  }
292 
293  // if snoop invalidates, release any associated locks
294  // When run without caches, Invalidation packets will not be received
295  // hence we must check if the incoming packets are writes and wakeup
296  // the processor accordingly
297  if (pkt->isInvalidate() || pkt->isWrite()) {
298  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
299  pkt->getAddr());
300  for (auto &t_info : cpu->threadInfo) {
301  t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
303  }
304  }
305 
306  return 0;
307 }
308 
309 void
311 {
312  DPRINTF(SimpleCPU, "%s received functional snoop pkt for addr:%#x %s\n",
313  __func__, pkt->getAddr(), pkt->cmdString());
314 
315  // X86 ISA: Snooping an invalidation for monitor/mwait
316  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
317  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
318  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
319  cpu->wakeup(tid);
320  }
321  }
322 
323  // if snoop invalidates, release any associated locks
324  if (pkt->isInvalidate()) {
325  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
326  pkt->getAddr());
327  for (auto &t_info : cpu->threadInfo) {
328  t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
329  cacheBlockMask);
330  }
331  }
332 }
333 
334 bool
336  int size, Request::Flags flags,
337  const std::vector<bool> &byte_enable,
338  int &frag_size, int &size_left) const
339 {
340  bool predicate = true;
341  Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
342 
343  frag_size = std::min(
344  cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
345  (Addr)size_left);
346  size_left -= frag_size;
347 
348  // Set up byte-enable mask for the current fragment
349  auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
350  auto it_end = byte_enable.begin() + (size - size_left);
351  if (isAnyActiveElement(it_start, it_end)) {
352  req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
353  inst_addr);
354  req->setByteEnable(std::vector<bool>(it_start, it_end));
355  } else {
356  predicate = false;
357  }
358 
359  return predicate;
360 }
361 
362 Fault
363 AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
364  Request::Flags flags,
365  const std::vector<bool> &byte_enable)
366 {
368  SimpleThread *thread = t_info.thread;
369 
370  // use the CPU's statically allocated read request and packet objects
371  const RequestPtr &req = data_read_req;
372 
373  if (traceData)
374  traceData->setMem(addr, size, flags);
375 
376  dcache_latency = 0;
377 
378  req->taskId(taskId());
379 
380  Addr frag_addr = addr;
381  int frag_size = 0;
382  int size_left = size;
383  bool predicate;
384  Fault fault = NoFault;
385 
386  while (1) {
387  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
388  byte_enable, frag_size, size_left);
389 
390  // translate to physical address
391  if (predicate) {
392  fault = thread->mmu->translateAtomic(req, thread->getTC(),
393  BaseMMU::Read);
394  }
395 
396  // Now do the access.
397  if (predicate && fault == NoFault &&
398  !req->getFlags().isSet(Request::NO_ACCESS)) {
399  Packet pkt(req, Packet::makeReadCmd(req));
400  pkt.dataStatic(data);
401 
402  if (req->isLocalAccess()) {
403  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
404  } else {
406  }
407  dcache_access = true;
408 
409  assert(!pkt.isError());
410 
411  if (req->isLLSC()) {
412  thread->getIsaPtr()->handleLockedRead(req);
413  }
414  }
415 
416  //If there's a fault, return it
417  if (fault != NoFault)
418  return req->isPrefetch() ? NoFault : fault;
419 
420  // If we don't need to access further cache lines, stop now.
421  if (size_left == 0) {
422  if (req->isLockedRMW() && fault == NoFault) {
423  assert(!locked);
424  locked = true;
425  }
426  return fault;
427  }
428 
429  /*
430  * Set up for accessing the next cache line.
431  */
432  frag_addr += frag_size;
433 
434  //Move the pointer we're reading into to the correct location.
435  data += frag_size;
436  }
437 }
438 
439 Fault
440 AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
441  Request::Flags flags, uint64_t *res,
442  const std::vector<bool>& byte_enable)
443 {
445  SimpleThread *thread = t_info.thread;
446  static uint8_t zero_array[64] = {};
447 
448  if (data == NULL) {
449  assert(size <= 64);
450  assert(flags & Request::STORE_NO_DATA);
451  // This must be a cache block cleaning request
452  data = zero_array;
453  }
454 
455  // use the CPU's statically allocated write request and packet objects
456  const RequestPtr &req = data_write_req;
457 
458  if (traceData)
459  traceData->setMem(addr, size, flags);
460 
461  dcache_latency = 0;
462 
463  req->taskId(taskId());
464 
465  Addr frag_addr = addr;
466  int frag_size = 0;
467  int size_left = size;
468  int curr_frag_id = 0;
469  bool predicate;
470  Fault fault = NoFault;
471 
472  while (1) {
473  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
474  byte_enable, frag_size, size_left);
475 
476  // translate to physical address
477  if (predicate)
478  fault = thread->mmu->translateAtomic(req, thread->getTC(),
480 
481  // Now do the access.
482  if (predicate && fault == NoFault) {
483  bool do_access = true; // flag to suppress cache access
484 
485  if (req->isLLSC()) {
486  assert(curr_frag_id == 0);
487  do_access = thread->getIsaPtr()->handleLockedWrite(req,
489  } else if (req->isSwap()) {
490  assert(curr_frag_id == 0);
491  if (req->isCondSwap()) {
492  assert(res);
493  req->setExtraData(*res);
494  }
495  }
496 
497  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
498  Packet pkt(req, Packet::makeWriteCmd(req));
499  pkt.dataStatic(data);
500 
501  if (req->isLocalAccess()) {
502  dcache_latency +=
503  req->localAccessor(thread->getTC(), &pkt);
504  } else {
506 
507  // Notify other threads on this CPU of write
508  threadSnoop(&pkt, curThread);
509  }
510  dcache_access = true;
511  assert(!pkt.isError());
512 
513  if (req->isSwap()) {
514  assert(res && curr_frag_id == 0);
515  memcpy(res, pkt.getConstPtr<uint8_t>(), size);
516  }
517  }
518 
519  if (res && !req->isSwap()) {
520  *res = req->getExtraData();
521  }
522  }
523 
524  //If there's a fault or we don't need to access a second cache line,
525  //stop now.
526  if (fault != NoFault || size_left == 0) {
527  if (req->isLockedRMW() && fault == NoFault) {
528  assert(!req->isMasked());
529  locked = false;
530  }
531 
532  //Supress faults from prefetches.
533  return req->isPrefetch() ? NoFault : fault;
534  }
535 
536  /*
537  * Set up for accessing the next cache line.
538  */
539  frag_addr += frag_size;
540 
541  //Move the pointer we're reading into to the correct location.
542  data += frag_size;
543 
544  curr_frag_id++;
545  }
546 }
547 
548 Fault
549 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
550  Request::Flags flags, AtomicOpFunctorPtr amo_op)
551 {
553  SimpleThread *thread = t_info.thread;
554 
555  // use the CPU's statically allocated amo request and packet objects
556  const RequestPtr &req = data_amo_req;
557 
558  if (traceData)
559  traceData->setMem(addr, size, flags);
560 
561  //The address of the second part of this access if it needs to be split
562  //across a cache line boundary.
563  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
564 
565  // AMO requests that access across a cache line boundary are not
566  // allowed since the cache does not guarantee AMO ops to be executed
567  // atomically in two cache lines
568  // For ISAs such as x86 that requires AMO operations to work on
569  // accesses that cross cache-line boundaries, the cache needs to be
570  // modified to support locking both cache lines to guarantee the
571  // atomicity.
572  panic_if(secondAddr > addr,
573  "AMO request should not access across a cache line boundary.");
574 
575  dcache_latency = 0;
576 
577  req->taskId(taskId());
578  req->setVirt(addr, size, flags, dataRequestorId(),
579  thread->pcState().instAddr(), std::move(amo_op));
580 
581  // translate to physical address
582  Fault fault = thread->mmu->translateAtomic(
583  req, thread->getTC(), BaseMMU::Write);
584 
585  // Now do the access.
586  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
587  // We treat AMO accesses as Write accesses with SwapReq command
588  // data will hold the return data of the AMO access
589  Packet pkt(req, Packet::makeWriteCmd(req));
590  pkt.dataStatic(data);
591 
592  if (req->isLocalAccess()) {
593  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
594  } else {
596  }
597 
598  dcache_access = true;
599 
600  assert(!pkt.isError());
601  assert(!req->isLLSC());
602  }
603 
604  if (fault != NoFault && req->isPrefetch()) {
605  return NoFault;
606  }
607 
608  //If there's a fault and we're not doing prefetch, return it
609  return fault;
610 }
611 
612 void
614 {
615  DPRINTF(SimpleCPU, "Tick\n");
616 
617  // Change thread if multi-threaded
619 
620  // Set memory request ids to current thread
621  if (numThreads > 1) {
622  ContextID cid = threadContexts[curThread]->contextId();
623 
624  ifetch_req->setContext(cid);
625  data_read_req->setContext(cid);
626  data_write_req->setContext(cid);
627  data_amo_req->setContext(cid);
628  }
629 
631  SimpleThread *thread = t_info.thread;
632 
633  Tick latency = 0;
634 
635  for (int i = 0; i < width || locked; ++i) {
636  baseStats.numCycles++;
637  updateCycleCounters(BaseCPU::CPU_STATE_ON);
638 
642  }
643 
644  // We must have just got suspended by a PC event
645  if (_status == Idle) {
647  return;
648  }
649 
651 
652  Fault fault = NoFault;
653 
654  const PCStateBase &pc = thread->pcState();
655 
656  bool needToFetch = !isRomMicroPC(pc.microPC()) && !curMacroStaticInst;
657  if (needToFetch) {
658  ifetch_req->taskId(taskId());
660  fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
662  }
663 
664  if (fault == NoFault) {
665  Tick icache_latency = 0;
666  bool icache_access = false;
667  dcache_access = false; // assume no dcache access
668 
669  if (needToFetch) {
670  // This is commented out because the decoder would act like
671  // a tiny cache otherwise. It wouldn't be flushed when needed
672  // like the I cache. It should be flushed, and when that works
673  // this code should be uncommented.
674  //Fetch more instruction memory if necessary
675  //if (decoder.needMoreBytes())
676  //{
677  icache_access = true;
678  icache_latency = fetchInstMem();
679  //}
680  }
681 
682  preExecute();
683 
684  Tick stall_ticks = 0;
685  if (curStaticInst) {
686  fault = curStaticInst->execute(&t_info, traceData);
687 
688  // keep an instruction count
689  if (fault == NoFault) {
690  countInst();
691  ppCommit->notify(std::make_pair(thread, curStaticInst));
692  } else if (traceData) {
693  traceFault();
694  }
695 
696  if (fault != NoFault &&
697  std::dynamic_pointer_cast<SyscallRetryFault>(fault)) {
698  // Retry execution of system calls after a delay.
699  // Prevents immediate re-execution since conditions which
700  // caused the retry are unlikely to change every tick.
701  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
702  }
703 
704  postExecute();
705  }
706 
707  // @todo remove me after debugging with legion done
708  if (curStaticInst && (!curStaticInst->isMicroop() ||
710  instCnt++;
711  }
712 
713  if (simulate_inst_stalls && icache_access)
714  stall_ticks += icache_latency;
715 
717  stall_ticks += dcache_latency;
718 
719  if (stall_ticks) {
720  // the atomic cpu does its accounting in ticks, so
721  // keep counting in ticks but round to the clock
722  // period
723  latency += divCeil(stall_ticks, clockPeriod()) *
724  clockPeriod();
725  }
726 
727  }
728  if (fault != NoFault || !t_info.stayAtPC)
729  advancePC(fault);
730  }
731 
732  if (tryCompleteDrain())
733  return;
734 
735  // instruction takes at least one cycle
736  if (latency < clockPeriod())
737  latency = clockPeriod();
738 
739  if (_status != Idle)
740  reschedule(tickEvent, curTick() + latency, true);
741 }
742 
743 Tick
745 {
746  auto &decoder = threadInfo[curThread]->thread->decoder;
747 
749 
750  // ifetch_req is initialized to read the instruction
751  // directly into the CPU object's inst field.
752  pkt.dataStatic(decoder->moreBytesPtr());
753 
754  Tick latency = sendPacket(icachePort, &pkt);
755  assert(!pkt.isError());
756 
757  return latency;
758 }
759 
760 void
762 {
763  BaseCPU::regProbePoints();
764 
766  (getProbeManager(), "Commit");
767 }
768 
769 void
771 {
773 }
774 
775 } // namespace gem5
gem5::Packet::makeWriteCmd
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition: packet.hh:987
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:577
gem5::AtomicSimpleCPU::tryCompleteDrain
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:176
gem5::PCStateBase::instAddr
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::BaseISA::handleLockedRead
virtual void handleLockedRead(const RequestPtr &req)
Definition: isa.hh:81
utils.hh
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::ThreadContext::Active
@ Active
Running.
Definition: thread_context.hh:109
gem5::AtomicSimpleCPU::regProbePoints
void regProbePoints() override
Definition: atomic.cc:761
gem5::StaticInst::isMicroop
bool isMicroop() const
Definition: static_inst.hh:207
gem5::AtomicSimpleCPU::writeMem
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:440
gem5::BaseSimpleCPU::threadInfo
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:102
system.hh
gem5::Trace::InstRecord::setMem
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:182
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::AtomicSimpleCPU::simulate_data_stalls
const bool simulate_data_stalls
Definition: atomic.hh:67
gem5::BaseSimpleCPU::traceData
Trace::InstRecord * traceData
Definition: base.hh:99
gem5::AtomicSimpleCPU::width
const int width
Definition: atomic.hh:65
gem5::AtomicSimpleCPU::sendPacket
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt)
Definition: atomic.cc:273
atomic.hh
gem5::isRomMicroPC
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:166
gem5::BaseSimpleCPU::_status
Status _status
Definition: base.hh:125
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::AtomicSimpleCPU::readMem
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:363
gem5::RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
gem5::BaseISA::handleLockedWrite
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition: isa.hh:88
gem5::AtomicSimpleCPU::~AtomicSimpleCPU
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:95
gem5::AtomicSimpleCPU::data_read_req
RequestPtr data_read_req
Definition: atomic.hh:166
gem5::BaseSimpleCPU::curStaticInst
StaticInstPtr curStaticInst
Current instruction.
Definition: base.hh:106
exetrace.hh
gem5::ArmISA::a
Bitfield< 8 > a
Definition: misc_types.hh:66
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::AtomicSimpleCPU::dcachePort
AtomicCPUDPort dcachePort
Definition: atomic.hh:162
gem5::X86ISA::system
Bitfield< 15 > system
Definition: misc.hh:1003
gem5::addrBlockOffset
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:53
std::vector< bool >
gem5::StaticInst::isDelayedCommit
bool isDelayedCommit() const
Definition: static_inst.hh:208
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::StaticInst::isFirstMicroop
bool isFirstMicroop() const
Definition: static_inst.hh:210
faults.hh
gem5::SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:93
output.hh
gem5::AtomicSimpleCPU::printAddr
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:770
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::AtomicSimpleCPU::AtomicCPUDPort::cacheBlockMask
Addr cacheBlockMask
Definition: atomic.hh:152
gem5::BaseSimpleCPU::checkForInterrupts
void checkForInterrupts()
Definition: base.cc:250
gem5::StaticInst::execute
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
gem5::AtomicSimpleCPU::init
void init() override
Definition: atomic.cc:64
gem5::takeOverFrom
void takeOverFrom(ThreadContext &ntc, ThreadContext &otc)
Copy state between thread contexts in preparation for CPU handover.
Definition: thread_context.cc:254
gem5::AtomicSimpleCPU::simulate_inst_stalls
const bool simulate_inst_stalls
Definition: atomic.hh:68
packet.hh
gem5::RequestPort
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition: port.hh:77
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::SimpleThread::mmu
BaseMMU * mmu
Definition: simple_thread.hh:134
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
gem5::AtomicSimpleCPU
Definition: atomic.hh:53
gem5::Flags< FlagsType >
gem5::DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:74
decoder.hh
gem5::BaseSimpleCPU::checkPcEventQueue
void checkPcEventQueue()
Definition: base.cc:125
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::AtomicSimpleCPU::suspendContext
void suspendContext(ThreadID thread_num) override
Definition: atomic.cc:247
gem5::ArmISA::width
Bitfield< 4 > width
Definition: misc_types.hh:72
gem5::AtomicSimpleCPU::takeOverFrom
void takeOverFrom(BaseCPU *old_cpu) override
Definition: atomic.cc:204
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::AtomicSimpleCPU::data_amo_req
RequestPtr data_amo_req
Definition: atomic.hh:168
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::BaseSimpleCPU::preExecute
void preExecute()
Definition: base.cc:305
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::AtomicSimpleCPU::AtomicCPUDPort::cpu
BaseSimpleCPU * cpu
Definition: atomic.hh:154
gem5::AtomicSimpleCPU::fetchInstMem
virtual Tick fetchInstMem()
Definition: atomic.cc:744
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::SimpleExecContext::stayAtPC
bool stayAtPC
Definition: exec_context.hh:70
gem5::AtomicSimpleCPU::tickEvent
EventFunctionWrapper tickEvent
Definition: atomic.hh:63
gem5::BaseSimpleCPU::setupFetchRequest
void setupFetchRequest(const RequestPtr &req)
Definition: base.cc:281
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::Request::NO_ACCESS
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::AtomicSimpleCPU::AtomicSimpleCPU
AtomicSimpleCPU(const AtomicSimpleCPUParams &params)
Definition: atomic.cc:75
gem5::isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
gem5::AtomicSimpleCPU::verifyMemoryMode
void verifyMemoryMode() const override
Definition: atomic.cc:213
gem5::BaseSimpleCPU::activeThreads
std::list< ThreadID > activeThreads
Definition: base.hh:103
gem5::SimpleThread::getIsaPtr
BaseISA * getIsaPtr() override
Definition: simple_thread.hh:213
gem5::AtomicSimpleCPU::drainResume
void drainResume() override
Definition: atomic.cc:143
gem5::BaseSimpleCPU
Definition: base.hh:83
gem5::AtomicSimpleCPU::genMemFragmentRequest
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition: atomic.cc:335
gem5::Packet::makeReadCmd
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:966
gem5::roundDown
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:279
gem5::BaseSimpleCPU::traceFault
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition: base.cc:239
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::AtomicSimpleCPU::locked
bool locked
Definition: atomic.hh:66
gem5::AtomicSimpleCPU::activateContext
void activateContext(ThreadID thread_num) override
Definition: atomic.cc:221
gem5::AtomicSimpleCPU::isCpuDrained
bool isCpuDrained() const
Check if a system is in a drained state.
Definition: atomic.hh:92
gem5::AtomicSimpleCPU::amoMem
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition: atomic.cc:549
gem5::SimpleThread::pcState
const PCStateBase & pcState() const override
Definition: simple_thread.hh:422
gem5::AtomicSimpleCPU::drain
DrainState drain() override
Definition: atomic.cc:103
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::AtomicSimpleCPU::dcache_latency
Tick dcache_latency
Definition: atomic.hh:171
name
const std::string & name()
Definition: trace.cc:49
packet_access.hh
gem5::AtomicSimpleCPU::data_write_req
RequestPtr data_write_req
Definition: atomic.hh:167
full_system.hh
gem5::divCeil
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
gem5::BaseSimpleCPU::postExecute
void postExecute()
Definition: base.cc:387
gem5::ProbePointArg
ProbePointArg generates a point for the class of Arg.
Definition: thermal_domain.hh:54
gem5::SimpleExecContext::thread
SimpleThread * thread
Definition: exec_context.hh:64
gem5::SimpleThread::getTC
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
Definition: simple_thread.hh:169
gem5::Request::STORE_NO_DATA
static const FlagsType STORE_NO_DATA
Definition: request.hh:246
gem5::AtomicSimpleCPU::switchOut
void switchOut() override
Definition: atomic.cc:193
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::BaseSimpleCPU::countInst
void countInst()
Definition: base.cc:153
gem5::SimpleExecContext
Definition: exec_context.hh:60
gem5::BaseSimpleCPU::Running
@ Running
Definition: base.hh:113
gem5::AtomicSimpleCPU::ifetch_req
RequestPtr ifetch_req
Definition: atomic.hh:165
gem5::AtomicSimpleCPU::threadSnoop
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:125
gem5::BaseSimpleCPU::swapActiveThread
void swapActiveThread()
Definition: base.cc:137
physical.hh
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:246
gem5::MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:243
gem5::AtomicSimpleCPU::icachePort
AtomicCPUPort icachePort
Definition: atomic.hh:161
gem5::BaseSimpleCPU::serviceInstCountEvents
void serviceInstCountEvents()
Definition: base.cc:298
gem5::BaseSimpleCPU::wakeup
void wakeup(ThreadID tid) override
Definition: base.cc:228
gem5::BaseSimpleCPU::advancePC
void advancePC(const Fault &fault)
Definition: base.cc:462
gem5::AtomicSimpleCPU::ppCommit
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:174
gem5::statistics::init
const FlagsType init
This Stat is Initialized.
Definition: info.hh:56
decoder
output decoder
Definition: nop.cc:61
gem5::PCStateBase
Definition: pcstate.hh:57
gem5::AtomicSimpleCPU::dcache_access
bool dcache_access
Definition: atomic.hh:170
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::EventBase::CPU_Tick_Pri
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition: eventq.hh:204
gem5::AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: atomic.cc:310
gem5::AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
gem5::BaseMMU::translateAtomic
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
Definition: mmu.cc:104
gem5::RequestPort::owner
SimObject & owner
Definition: port.hh:86
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::BaseSimpleCPU::Idle
@ Idle
Definition: base.hh:112
gem5::AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: atomic.cc:279
gem5::RequestPort::printAddr
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:157
gem5::BaseSimpleCPU::curMacroStaticInst
StaticInstPtr curMacroStaticInst
Definition: base.hh:107
gem5::AtomicSimpleCPU::tick
void tick()
Definition: atomic.cc:613
gem5::BaseSimpleCPU::curThread
ThreadID curThread
Definition: base.hh:86
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84

Generated on Wed May 4 2022 12:13:54 for gem5 by doxygen 1.8.17