gem5  v21.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015,2017-2020 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/simple/atomic.hh"
43 
44 #include "arch/locked_mem.hh"
45 #include "arch/utility.hh"
46 #include "base/output.hh"
47 #include "config/the_isa.hh"
48 #include "cpu/exetrace.hh"
49 #include "cpu/utils.hh"
50 #include "debug/Drain.hh"
51 #include "debug/ExecFaulting.hh"
52 #include "debug/SimpleCPU.hh"
53 #include "mem/packet.hh"
54 #include "mem/packet_access.hh"
55 #include "mem/physical.hh"
56 #include "params/AtomicSimpleCPU.hh"
57 #include "sim/faults.hh"
58 #include "sim/full_system.hh"
59 #include "sim/system.hh"
60 
61 void
63 {
65 
66  int cid = threadContexts[0]->contextId();
67  ifetch_req->setContext(cid);
68  data_read_req->setContext(cid);
69  data_write_req->setContext(cid);
70  data_amo_req->setContext(cid);
71 }
72 
73 AtomicSimpleCPU::AtomicSimpleCPU(const AtomicSimpleCPUParams &p)
74  : BaseSimpleCPU(p),
75  tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
76  false, Event::CPU_Tick_Pri),
77  width(p.width), locked(false),
78  simulate_data_stalls(p.simulate_data_stalls),
79  simulate_inst_stalls(p.simulate_inst_stalls),
80  icachePort(name() + ".icache_port", this),
81  dcachePort(name() + ".dcache_port", this),
82  dcache_access(false), dcache_latency(0),
83  ppCommit(nullptr)
84 {
85  _status = Idle;
86  ifetch_req = std::make_shared<Request>();
87  data_read_req = std::make_shared<Request>();
88  data_write_req = std::make_shared<Request>();
89  data_amo_req = std::make_shared<Request>();
90 }
91 
92 
94 {
95  if (tickEvent.scheduled()) {
97  }
98 }
99 
102 {
103  // Deschedule any power gating event (if any)
105 
106  if (switchedOut())
107  return DrainState::Drained;
108 
109  if (!isCpuDrained()) {
110  DPRINTF(Drain, "Requesting drain.\n");
111  return DrainState::Draining;
112  } else {
113  if (tickEvent.scheduled())
115 
116  activeThreads.clear();
117  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
118  return DrainState::Drained;
119  }
120 }
121 
122 void
124 {
125  DPRINTF(SimpleCPU, "%s received snoop pkt for addr:%#x %s\n",
126  __func__, pkt->getAddr(), pkt->cmdString());
127 
128  for (ThreadID tid = 0; tid < numThreads; tid++) {
129  if (tid != sender) {
130  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
131  wakeup(tid);
132  }
133 
136  }
137  }
138 }
139 
140 void
142 {
143  assert(!tickEvent.scheduled());
144  if (switchedOut())
145  return;
146 
147  DPRINTF(SimpleCPU, "Resume\n");
149 
150  assert(!threadContexts.empty());
151 
153 
154  for (ThreadID tid = 0; tid < numThreads; tid++) {
155  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
156  threadInfo[tid]->execContextStats.notIdleFraction = 1;
157  activeThreads.push_back(tid);
159 
160  // Tick if any threads active
161  if (!tickEvent.scheduled()) {
163  }
164  } else {
165  threadInfo[tid]->execContextStats.notIdleFraction = 0;
166  }
167  }
168 
169  // Reschedule any power gating event (if any)
171 }
172 
173 bool
175 {
177  return false;
178 
179  DPRINTF(Drain, "tryCompleteDrain.\n");
180  if (!isCpuDrained())
181  return false;
182 
183  DPRINTF(Drain, "CPU done draining, processing drain event\n");
184  signalDrainDone();
185 
186  return true;
187 }
188 
189 
190 void
192 {
194 
195  assert(!tickEvent.scheduled());
196  assert(_status == BaseSimpleCPU::Running || _status == Idle);
197  assert(isCpuDrained());
198 }
199 
200 
201 void
203 {
205 
206  // The tick event should have been descheduled by drain()
207  assert(!tickEvent.scheduled());
208 }
209 
210 void
212 {
214  "The atomic CPU requires the memory system to be in "
215  "'atomic' mode.");
216 }
217 
218 void
220 {
221  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
222 
223  assert(thread_num < numThreads);
224 
225  threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
226  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
227  threadInfo[thread_num]->thread->lastSuspend);
228  baseStats.numCycles += delta;
229 
230  if (!tickEvent.scheduled()) {
231  //Make sure ticks are still on multiples of cycles
233  }
235  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
236  activeThreads.end()) {
237  activeThreads.push_back(thread_num);
238  }
239 
240  BaseCPU::activateContext(thread_num);
241 }
242 
243 
244 void
246 {
247  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
248 
249  assert(thread_num < numThreads);
250  activeThreads.remove(thread_num);
251 
252  if (_status == Idle)
253  return;
254 
255  assert(_status == BaseSimpleCPU::Running);
256 
257  threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
258 
259  if (activeThreads.empty()) {
260  _status = Idle;
261 
262  if (tickEvent.scheduled()) {
264  }
265  }
266 
267  BaseCPU::suspendContext(thread_num);
268 }
269 
270 Tick
272 {
273  return port.sendAtomic(pkt);
274 }
275 
276 Tick
278 {
279  DPRINTF(SimpleCPU, "%s received atomic snoop pkt for addr:%#x %s\n",
280  __func__, pkt->getAddr(), pkt->cmdString());
281 
282  // X86 ISA: Snooping an invalidation for monitor/mwait
284 
285  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
286  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
287  cpu->wakeup(tid);
288  }
289  }
290 
291  // if snoop invalidates, release any associated locks
292  // When run without caches, Invalidation packets will not be received
293  // hence we must check if the incoming packets are writes and wakeup
294  // the processor accordingly
295  if (pkt->isInvalidate() || pkt->isWrite()) {
296  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
297  pkt->getAddr());
298  for (auto &t_info : cpu->threadInfo) {
299  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
300  }
301  }
302 
303  return 0;
304 }
305 
306 void
308 {
309  DPRINTF(SimpleCPU, "%s received functional snoop pkt for addr:%#x %s\n",
310  __func__, pkt->getAddr(), pkt->cmdString());
311 
312  // X86 ISA: Snooping an invalidation for monitor/mwait
313  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
314  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
315  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
316  cpu->wakeup(tid);
317  }
318  }
319 
320  // if snoop invalidates, release any associated locks
321  if (pkt->isInvalidate()) {
322  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
323  pkt->getAddr());
324  for (auto &t_info : cpu->threadInfo) {
325  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
326  }
327  }
328 }
329 
330 bool
332  int size, Request::Flags flags,
333  const std::vector<bool> &byte_enable,
334  int &frag_size, int &size_left) const
335 {
336  bool predicate = true;
337  Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
338 
339  frag_size = std::min(
340  cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
341  (Addr)size_left);
342  size_left -= frag_size;
343 
344  // Set up byte-enable mask for the current fragment
345  auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
346  auto it_end = byte_enable.begin() + (size - size_left);
347  if (isAnyActiveElement(it_start, it_end)) {
348  req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
349  inst_addr);
350  req->setByteEnable(std::vector<bool>(it_start, it_end));
351  } else {
352  predicate = false;
353  }
354 
355  return predicate;
356 }
357 
358 Fault
359 AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
360  Request::Flags flags,
361  const std::vector<bool> &byte_enable)
362 {
364  SimpleThread *thread = t_info.thread;
365 
366  // use the CPU's statically allocated read request and packet objects
367  const RequestPtr &req = data_read_req;
368 
369  if (traceData)
370  traceData->setMem(addr, size, flags);
371 
372  dcache_latency = 0;
373 
374  req->taskId(taskId());
375 
376  Addr frag_addr = addr;
377  int frag_size = 0;
378  int size_left = size;
379  bool predicate;
380  Fault fault = NoFault;
381 
382  while (1) {
383  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
384  byte_enable, frag_size, size_left);
385 
386  // translate to physical address
387  if (predicate) {
388  fault = thread->mmu->translateAtomic(req, thread->getTC(),
389  BaseTLB::Read);
390  }
391 
392  // Now do the access.
393  if (predicate && fault == NoFault &&
394  !req->getFlags().isSet(Request::NO_ACCESS)) {
395  Packet pkt(req, Packet::makeReadCmd(req));
396  pkt.dataStatic(data);
397 
398  if (req->isLocalAccess()) {
399  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
400  } else {
402  }
403  dcache_access = true;
404 
405  assert(!pkt.isError());
406 
407  if (req->isLLSC()) {
408  TheISA::handleLockedRead(thread, req);
409  }
410  }
411 
412  //If there's a fault, return it
413  if (fault != NoFault)
414  return req->isPrefetch() ? NoFault : fault;
415 
416  // If we don't need to access further cache lines, stop now.
417  if (size_left == 0) {
418  if (req->isLockedRMW() && fault == NoFault) {
419  assert(!locked);
420  locked = true;
421  }
422  return fault;
423  }
424 
425  /*
426  * Set up for accessing the next cache line.
427  */
428  frag_addr += frag_size;
429 
430  //Move the pointer we're reading into to the correct location.
431  data += frag_size;
432  }
433 }
434 
435 Fault
436 AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
437  Request::Flags flags, uint64_t *res,
438  const std::vector<bool>& byte_enable)
439 {
441  SimpleThread *thread = t_info.thread;
442  static uint8_t zero_array[64] = {};
443 
444  if (data == NULL) {
445  assert(size <= 64);
446  assert(flags & Request::STORE_NO_DATA);
447  // This must be a cache block cleaning request
448  data = zero_array;
449  }
450 
451  // use the CPU's statically allocated write request and packet objects
452  const RequestPtr &req = data_write_req;
453 
454  if (traceData)
455  traceData->setMem(addr, size, flags);
456 
457  dcache_latency = 0;
458 
459  req->taskId(taskId());
460 
461  Addr frag_addr = addr;
462  int frag_size = 0;
463  int size_left = size;
464  int curr_frag_id = 0;
465  bool predicate;
466  Fault fault = NoFault;
467 
468  while (1) {
469  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
470  byte_enable, frag_size, size_left);
471 
472  // translate to physical address
473  if (predicate)
474  fault = thread->mmu->translateAtomic(req, thread->getTC(),
476 
477  // Now do the access.
478  if (predicate && fault == NoFault) {
479  bool do_access = true; // flag to suppress cache access
480 
481  if (req->isLLSC()) {
482  assert(curr_frag_id == 0);
483  do_access =
484  TheISA::handleLockedWrite(thread, req,
486  } else if (req->isSwap()) {
487  assert(curr_frag_id == 0);
488  if (req->isCondSwap()) {
489  assert(res);
490  req->setExtraData(*res);
491  }
492  }
493 
494  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
495  Packet pkt(req, Packet::makeWriteCmd(req));
496  pkt.dataStatic(data);
497 
498  if (req->isLocalAccess()) {
499  dcache_latency +=
500  req->localAccessor(thread->getTC(), &pkt);
501  } else {
503 
504  // Notify other threads on this CPU of write
505  threadSnoop(&pkt, curThread);
506  }
507  dcache_access = true;
508  assert(!pkt.isError());
509 
510  if (req->isSwap()) {
511  assert(res && curr_frag_id == 0);
512  memcpy(res, pkt.getConstPtr<uint8_t>(), size);
513  }
514  }
515 
516  if (res && !req->isSwap()) {
517  *res = req->getExtraData();
518  }
519  }
520 
521  //If there's a fault or we don't need to access a second cache line,
522  //stop now.
523  if (fault != NoFault || size_left == 0) {
524  if (req->isLockedRMW() && fault == NoFault) {
525  assert(!req->isMasked());
526  locked = false;
527  }
528 
529  //Supress faults from prefetches.
530  return req->isPrefetch() ? NoFault : fault;
531  }
532 
533  /*
534  * Set up for accessing the next cache line.
535  */
536  frag_addr += frag_size;
537 
538  //Move the pointer we're reading into to the correct location.
539  data += frag_size;
540 
541  curr_frag_id++;
542  }
543 }
544 
545 Fault
546 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
547  Request::Flags flags, AtomicOpFunctorPtr amo_op)
548 {
550  SimpleThread *thread = t_info.thread;
551 
552  // use the CPU's statically allocated amo request and packet objects
553  const RequestPtr &req = data_amo_req;
554 
555  if (traceData)
556  traceData->setMem(addr, size, flags);
557 
558  //The address of the second part of this access if it needs to be split
559  //across a cache line boundary.
560  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
561 
562  // AMO requests that access across a cache line boundary are not
563  // allowed since the cache does not guarantee AMO ops to be executed
564  // atomically in two cache lines
565  // For ISAs such as x86 that requires AMO operations to work on
566  // accesses that cross cache-line boundaries, the cache needs to be
567  // modified to support locking both cache lines to guarantee the
568  // atomicity.
569  panic_if(secondAddr > addr,
570  "AMO request should not access across a cache line boundary.");
571 
572  dcache_latency = 0;
573 
574  req->taskId(taskId());
575  req->setVirt(addr, size, flags, dataRequestorId(),
576  thread->pcState().instAddr(), std::move(amo_op));
577 
578  // translate to physical address
579  Fault fault = thread->mmu->translateAtomic(
580  req, thread->getTC(), BaseTLB::Write);
581 
582  // Now do the access.
583  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
584  // We treat AMO accesses as Write accesses with SwapReq command
585  // data will hold the return data of the AMO access
586  Packet pkt(req, Packet::makeWriteCmd(req));
587  pkt.dataStatic(data);
588 
589  if (req->isLocalAccess()) {
590  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
591  } else {
593  }
594 
595  dcache_access = true;
596 
597  assert(!pkt.isError());
598  assert(!req->isLLSC());
599  }
600 
601  if (fault != NoFault && req->isPrefetch()) {
602  return NoFault;
603  }
604 
605  //If there's a fault and we're not doing prefetch, return it
606  return fault;
607 }
608 
609 void
611 {
612  DPRINTF(SimpleCPU, "Tick\n");
613 
614  // Change thread if multi-threaded
616 
617  // Set memory request ids to current thread
618  if (numThreads > 1) {
619  ContextID cid = threadContexts[curThread]->contextId();
620 
621  ifetch_req->setContext(cid);
622  data_read_req->setContext(cid);
623  data_write_req->setContext(cid);
624  data_amo_req->setContext(cid);
625  }
626 
628  SimpleThread *thread = t_info.thread;
629 
630  Tick latency = 0;
631 
632  for (int i = 0; i < width || locked; ++i) {
635 
639  }
640 
641  // We must have just got suspended by a PC event
642  if (_status == Idle) {
644  return;
645  }
646 
647  Fault fault = NoFault;
648 
649  TheISA::PCState pcState = thread->pcState();
650 
651  bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
653  if (needToFetch) {
654  ifetch_req->taskId(taskId());
656  fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
658  }
659 
660  if (fault == NoFault) {
661  Tick icache_latency = 0;
662  bool icache_access = false;
663  dcache_access = false; // assume no dcache access
664 
665  if (needToFetch) {
666  // This is commented out because the decoder would act like
667  // a tiny cache otherwise. It wouldn't be flushed when needed
668  // like the I cache. It should be flushed, and when that works
669  // this code should be uncommented.
670  //Fetch more instruction memory if necessary
671  //if (decoder.needMoreBytes())
672  //{
673  icache_access = true;
674  icache_latency = fetchInstMem();
675  //}
676  }
677 
678  preExecute();
679 
680  Tick stall_ticks = 0;
681  if (curStaticInst) {
682  fault = curStaticInst->execute(&t_info, traceData);
683 
684  // keep an instruction count
685  if (fault == NoFault) {
686  countInst();
687  ppCommit->notify(std::make_pair(thread, curStaticInst));
688  } else if (traceData) {
689  traceFault();
690  }
691 
692  if (fault != NoFault &&
693  std::dynamic_pointer_cast<SyscallRetryFault>(fault)) {
694  // Retry execution of system calls after a delay.
695  // Prevents immediate re-execution since conditions which
696  // caused the retry are unlikely to change every tick.
697  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
698  }
699 
700  postExecute();
701  }
702 
703  // @todo remove me after debugging with legion done
704  if (curStaticInst && (!curStaticInst->isMicroop() ||
706  instCnt++;
707  }
708 
709  if (simulate_inst_stalls && icache_access)
710  stall_ticks += icache_latency;
711 
713  stall_ticks += dcache_latency;
714 
715  if (stall_ticks) {
716  // the atomic cpu does its accounting in ticks, so
717  // keep counting in ticks but round to the clock
718  // period
719  latency += divCeil(stall_ticks, clockPeriod()) *
720  clockPeriod();
721  }
722 
723  }
724  if (fault != NoFault || !t_info.stayAtPC)
725  advancePC(fault);
726  }
727 
728  if (tryCompleteDrain())
729  return;
730 
731  // instruction takes at least one cycle
732  if (latency < clockPeriod())
733  latency = clockPeriod();
734 
735  if (_status != Idle)
736  reschedule(tickEvent, curTick() + latency, true);
737 }
738 
739 Tick
741 {
743 
744  // ifetch_req is initialized to read the instruction
745  // directly into the CPU object's inst field.
746  pkt.dataStatic(&inst);
747 
748  Tick latency = sendPacket(icachePort, &pkt);
749  assert(!pkt.isError());
750 
751  return latency;
752 }
753 
754 void
756 {
758 
760  (getProbeManager(), "Commit");
761 }
762 
763 void
765 {
767 }
Packet::isError
bool isError() const
Definition: packet.hh:584
AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:239
Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:462
AtomicSimpleCPU::activateContext
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition: atomic.cc:219
ArmISA::handleLockedWrite
bool handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
Definition: locked_mem.hh:111
roundDown
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:150
utils.hh
SimpleExecContext
Definition: exec_context.hh:57
SimpleThread::pcState
TheISA::PCState pcState() const override
Definition: simple_thread.hh:505
BaseSimpleCPU::traceData
Trace::InstRecord * traceData
Definition: base.hh:95
system.hh
BaseTLB::Read
@ Read
Definition: tlb.hh:57
BaseSimpleCPU::curMacroStaticInst
StaticInstPtr curMacroStaticInst
Definition: base.hh:104
EventBase::CPU_Tick_Pri
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition: eventq.hh:201
data
const char data[]
Definition: circlebuf.test.cc:47
BaseSimpleCPU::_status
Status _status
Definition: base.hh:121
ArmISA::handleLockedSnoop
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
Definition: locked_mem.hh:62
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:755
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
atomic.hh
EventManager::reschedule
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:1034
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:233
AtomicSimpleCPU::sendPacket
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt)
Definition: atomic.cc:271
AtomicSimpleCPU::ppCommit
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:171
AtomicSimpleCPU::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: atomic.cc:62
Flags< FlagsType >
AtomicSimpleCPU::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: atomic.cc:141
ArmISA::width
Bitfield< 4 > width
Definition: miscregs_types.hh:68
ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:237
AddressMonitor::doMonitor
bool doMonitor(PacketPtr pkt)
Definition: base.cc:684
BaseCPU::cacheLineSize
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:391
AtomicSimpleCPU::data_amo_req
RequestPtr data_amo_req
Definition: atomic.hh:165
ProbePointArg
ProbePointArg generates a point for the class of Arg.
Definition: thermal_domain.hh:51
MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:83
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:59
AtomicSimpleCPU::AtomicSimpleCPU
AtomicSimpleCPU(const AtomicSimpleCPUParams &params)
Definition: atomic.cc:73
exetrace.hh
Packet::makeReadCmd
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:940
SimpleThread::mmu
BaseMMU * mmu
Definition: simple_thread.hh:130
BaseCPU::syscallRetryLatency
Cycles syscallRetryLatency
Definition: base.hh:626
Trace::InstRecord::setMem
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:176
BaseSimpleCPU::setupFetchRequest
void setupFetchRequest(const RequestPtr &req)
Definition: base.cc:288
Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:572
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:86
AtomicSimpleCPU::tickEvent
EventFunctionWrapper tickEvent
Definition: atomic.hh:60
AtomicSimpleCPU::regProbePoints
void regProbePoints() override
Register probe points for this object.
Definition: atomic.cc:755
EventManager::deschedule
void deschedule(Event &event)
Definition: eventq.hh:1025
BaseCPU::baseStats
BaseCPU::BaseCPUStats baseStats
std::vector< bool >
BaseCPU::deschedulePowerGatingEvent
void deschedulePowerGatingEvent()
Definition: base.cc:448
BaseSimpleCPU::threadInfo
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:98
BaseSimpleCPU::traceFault
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition: base.cc:246
AtomicSimpleCPU::readMem
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:359
AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: atomic.cc:307
BaseSimpleCPU::countInst
void countInst()
Definition: base.cc:158
faults.hh
output.hh
BaseSimpleCPU::postExecute
void postExecute()
Definition: base.cc:390
AtomicSimpleCPU::icachePort
AtomicCPUPort icachePort
Definition: atomic.hh:158
AtomicSimpleCPU::simulate_inst_stalls
const bool simulate_inst_stalls
Definition: atomic.hh:65
BaseCPU::updateCycleCounters
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition: base.hh:532
packet.hh
SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:90
isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:86
AtomicSimpleCPU::AtomicCPUDPort::cpu
BaseSimpleCPU * cpu
Definition: atomic.hh:151
DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
BaseSimpleCPU::activeThreads
std::list< ThreadID > activeThreads
Definition: base.hh:99
DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:71
AtomicSimpleCPU::isCpuDrained
bool isCpuDrained() const
Check if a system is in a drained state.
Definition: atomic.hh:89
AtomicSimpleCPU::tick
void tick()
Definition: atomic.cc:610
AtomicSimpleCPU::dcache_latency
Tick dcache_latency
Definition: atomic.hh:168
StaticInst::isFirstMicroop
bool isFirstMicroop() const
Definition: static_inst.hh:208
divCeil
T divCeil(const T &a, const U &b)
Definition: intmath.hh:114
BaseSimpleCPU::checkPcEventQueue
void checkPcEventQueue()
Definition: base.cc:130
EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1016
ArmISA::a
Bitfield< 8 > a
Definition: miscregs_types.hh:62
StaticInst::isDelayedCommit
bool isDelayedCommit() const
Definition: static_inst.hh:206
AtomicSimpleCPU::threadSnoop
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:123
SimpleThread::getTC
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
Definition: simple_thread.hh:165
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:237
BaseCPU::instCnt
Tick instCnt
Instruction count used for SPARC misc register.
Definition: base.hh:110
Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:552
AtomicSimpleCPU::ifetch_req
RequestPtr ifetch_req
Definition: atomic.hh:162
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:246
BaseSimpleCPU::Idle
@ Idle
Definition: base.hh:108
BaseSimpleCPU::inst
TheISA::MachInst inst
Current instruction.
Definition: base.hh:102
AtomicSimpleCPU::data_write_req
RequestPtr data_write_req
Definition: atomic.hh:164
BaseCPU::threadContexts
std::vector< ThreadContext * > threadContexts
Definition: base.hh:269
Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:174
AtomicSimpleCPU::AtomicCPUDPort::cacheBlockMask
Addr cacheBlockMask
Definition: atomic.hh:149
AtomicSimpleCPU::takeOverFrom
void takeOverFrom(BaseCPU *old_cpu) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: atomic.cc:202
AtomicSimpleCPU::suspendContext
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition: atomic.cc:245
AtomicSimpleCPU::locked
bool locked
Definition: atomic.hh:63
BaseCPU::activateContext
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition: base.cc:484
SimpleExecContext::thread
SimpleThread * thread
Definition: exec_context.hh:61
BaseMMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, BaseTLB::Mode mode)
Definition: mmu.hh:79
Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:301
BaseSimpleCPU::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:119
AtomicSimpleCPU::printAddr
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:764
BaseSimpleCPU::checkForInterrupts
void checkForInterrupts()
Definition: base.cc:257
AtomicSimpleCPU::amoMem
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition: atomic.cc:546
BaseSimpleCPU
Definition: base.hh:80
BaseCPU::getCpuAddrMonitor
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:618
addrBlockOffset
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:50
Packet::makeWriteCmd
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition: packet.hh:961
AtomicSimpleCPU
Definition: atomic.hh:50
BaseSimpleCPU::curThread
ThreadID curThread
Definition: base.hh:83
RequestPort
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition: port.hh:74
BaseCPU::BaseCPUStats::numCycles
Stats::Scalar numCycles
Definition: base.hh:606
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:251
AtomicSimpleCPU::switchOut
void switchOut() override
Prepare for another CPU to take over execution.
Definition: atomic.cc:191
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:148
name
const std::string & name()
Definition: trace.cc:48
isRomMicroPC
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:167
Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:214
packet_access.hh
SimObject::getProbeManager
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:114
full_system.hh
Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:320
RequestPort::owner
SimObject & owner
Definition: port.hh:83
StaticInst::isMicroop
bool isMicroop() const
Definition: static_inst.hh:205
AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: atomic.cc:277
BaseCPU::taskId
uint32_t taskId() const
Get cpu task id.
Definition: base.hh:219
X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:80
BaseTLB::Write
@ Write
Definition: tlb.hh:57
BaseCPU
Definition: base.hh:104
BaseSimpleCPU::wakeup
void wakeup(ThreadID tid) override
Definition: base.cc:235
BaseSimpleCPU::curStaticInst
StaticInstPtr curStaticInst
Definition: base.hh:103
Request::NO_ACCESS
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:139
BaseSimpleCPU::swapActiveThread
void swapActiveThread()
Definition: base.cc:142
BaseSimpleCPU::advancePC
void advancePC(const Fault &fault)
Definition: base.cc:466
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
ArmISA::handleLockedRead
void handleLockedRead(XC *xc, const RequestPtr &req)
Definition: locked_mem.hh:91
AtomicSimpleCPU::width
const int width
Definition: atomic.hh:62
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
BaseCPU::schedulePowerGatingEvent
void schedulePowerGatingEvent()
Definition: base.cc:456
BaseCPU::system
System * system
Definition: base.hh:386
Clocked::ticksToCycles
Cycles ticksToCycles(Tick t) const
Definition: clocked_object.hh:219
Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1108
BaseCPU::takeOverFrom
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: base.cc:550
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:258
BaseCPU::switchOut
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition: base.cc:536
BaseSimpleCPU::preExecute
void preExecute()
Definition: base.cc:305
physical.hh
RequestPort::printAddr
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:154
AtomicSimpleCPU::data_read_req
RequestPtr data_read_req
Definition: atomic.hh:163
BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:378
BaseCPU::dataRequestorId
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:201
Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
Packet::isWrite
bool isWrite() const
Definition: packet.hh:558
Clocked::nextCycle
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Definition: clocked_object.hh:210
ThreadContext::Active
@ Active
Running.
Definition: thread_context.hh:103
AtomicSimpleCPU::dcache_access
bool dcache_access
Definition: atomic.hh:167
BaseCPU::regProbePoints
void regProbePoints() override
Register probe points for this object.
Definition: base.cc:337
BaseCPU::CPU_STATE_ON
@ CPU_STATE_ON
Definition: base.hh:523
BaseCPU::suspendContext
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition: base.cc:498
curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:43
AtomicSimpleCPU::simulate_data_stalls
const bool simulate_data_stalls
Definition: atomic.hh:64
StaticInst::execute
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:461
MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:323
AtomicSimpleCPU::tryCompleteDrain
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:174
Request::STORE_NO_DATA
static const FlagsType STORE_NO_DATA
Definition: request.hh:237
BaseCPU::switchedOut
bool switchedOut() const
Determine if the CPU is switched out.
Definition: base.hh:367
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:219
AtomicSimpleCPU::genMemFragmentRequest
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition: atomic.cc:331
BaseTLB::Execute
@ Execute
Definition: tlb.hh:57
System::isAtomicMode
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:253
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1167
AtomicSimpleCPU::verifyMemoryMode
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition: atomic.cc:211
AtomicSimpleCPU::writeMem
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:436
SimpleExecContext::stayAtPC
bool stayAtPC
Definition: exec_context.hh:67
AtomicSimpleCPU::~AtomicSimpleCPU
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:93
DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
AtomicSimpleCPU::fetchInstMem
virtual Tick fetchInstMem()
Definition: atomic.cc:740
BaseSimpleCPU::Running
@ Running
Definition: base.hh:109
AtomicSimpleCPU::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: atomic.cc:101
AtomicSimpleCPU::dcachePort
AtomicCPUDPort dcachePort
Definition: atomic.hh:159

Generated on Tue Mar 23 2021 19:41:25 for gem5 by doxygen 1.8.17