gem5  v22.1.0.0
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015,2017-2020 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/simple/atomic.hh"
43 
44 #include "arch/generic/decoder.hh"
45 #include "base/output.hh"
46 #include "cpu/exetrace.hh"
47 #include "cpu/utils.hh"
48 #include "debug/Drain.hh"
49 #include "debug/ExecFaulting.hh"
50 #include "debug/SimpleCPU.hh"
51 #include "mem/packet.hh"
52 #include "mem/packet_access.hh"
53 #include "mem/physical.hh"
54 #include "params/BaseAtomicSimpleCPU.hh"
55 #include "sim/faults.hh"
56 #include "sim/full_system.hh"
57 #include "sim/system.hh"
58 
59 namespace gem5
60 {
61 
62 void
64 {
66 
67  int cid = threadContexts[0]->contextId();
68  ifetch_req->setContext(cid);
69  data_read_req->setContext(cid);
70  data_write_req->setContext(cid);
71  data_amo_req->setContext(cid);
72 }
73 
74 AtomicSimpleCPU::AtomicSimpleCPU(const BaseAtomicSimpleCPUParams &p)
75  : BaseSimpleCPU(p),
76  tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
77  false, Event::CPU_Tick_Pri),
78  width(p.width), locked(false),
79  simulate_data_stalls(p.simulate_data_stalls),
80  simulate_inst_stalls(p.simulate_inst_stalls),
81  icachePort(name() + ".icache_port", this),
82  dcachePort(name() + ".dcache_port", this),
83  dcache_access(false), dcache_latency(0),
84  ppCommit(nullptr)
85 {
86  _status = Idle;
87  ifetch_req = std::make_shared<Request>();
88  data_read_req = std::make_shared<Request>();
89  data_write_req = std::make_shared<Request>();
90  data_amo_req = std::make_shared<Request>();
91 }
92 
93 
95 {
96  if (tickEvent.scheduled()) {
98  }
99 }
100 
103 {
104  // Deschedule any power gating event (if any)
106 
107  if (switchedOut())
108  return DrainState::Drained;
109 
110  if (!isCpuDrained()) {
111  DPRINTF(Drain, "Requesting drain.\n");
112  return DrainState::Draining;
113  } else {
114  if (tickEvent.scheduled())
116 
117  activeThreads.clear();
118  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
119  return DrainState::Drained;
120  }
121 }
122 
123 void
125 {
126  DPRINTF(SimpleCPU, "%s received snoop pkt for addr:%#x %s\n",
127  __func__, pkt->getAddr(), pkt->cmdString());
128 
129  for (ThreadID tid = 0; tid < numThreads; tid++) {
130  if (tid != sender) {
131  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
132  wakeup(tid);
133  }
134 
135  threadInfo[tid]->thread->getIsaPtr()->handleLockedSnoop(pkt,
137  }
138  }
139 }
140 
141 void
143 {
144  assert(!tickEvent.scheduled());
145  if (switchedOut())
146  return;
147 
148  DPRINTF(SimpleCPU, "Resume\n");
150 
151  assert(!threadContexts.empty());
152 
154 
155  for (ThreadID tid = 0; tid < numThreads; tid++) {
156  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
157  threadInfo[tid]->execContextStats.notIdleFraction = 1;
158  activeThreads.push_back(tid);
160 
161  // Tick if any threads active
162  if (!tickEvent.scheduled()) {
164  }
165  } else {
166  threadInfo[tid]->execContextStats.notIdleFraction = 0;
167  }
168  }
169 
170  // Reschedule any power gating event (if any)
172 }
173 
174 bool
176 {
178  return false;
179 
180  DPRINTF(Drain, "tryCompleteDrain.\n");
181  if (!isCpuDrained())
182  return false;
183 
184  DPRINTF(Drain, "CPU done draining, processing drain event\n");
185  signalDrainDone();
186 
187  return true;
188 }
189 
190 
191 void
193 {
195 
196  assert(!tickEvent.scheduled());
197  assert(_status == BaseSimpleCPU::Running || _status == Idle);
198  assert(isCpuDrained());
199 }
200 
201 
202 void
204 {
206 
207  // The tick event should have been descheduled by drain()
208  assert(!tickEvent.scheduled());
209 }
210 
211 void
213 {
215  "The atomic CPU requires the memory system to be in "
216  "'atomic' mode.");
217 }
218 
219 void
221 {
222  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
223 
224  assert(thread_num < numThreads);
225 
226  threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
227  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
228  threadInfo[thread_num]->thread->lastSuspend);
229  baseStats.numCycles += delta;
230 
231  if (!tickEvent.scheduled()) {
232  //Make sure ticks are still on multiples of cycles
234  }
236  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
237  activeThreads.end()) {
238  activeThreads.push_back(thread_num);
239  }
240 
241  BaseCPU::activateContext(thread_num);
242 }
243 
244 
245 void
247 {
248  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
249 
250  assert(thread_num < numThreads);
251  activeThreads.remove(thread_num);
252 
253  if (_status == Idle)
254  return;
255 
256  assert(_status == BaseSimpleCPU::Running);
257 
258  threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
259 
260  if (activeThreads.empty()) {
261  _status = Idle;
262 
263  if (tickEvent.scheduled()) {
265  }
266  }
267 
268  BaseCPU::suspendContext(thread_num);
269 }
270 
271 Tick
273 {
274  return port.sendAtomic(pkt);
275 }
276 
277 Tick
279 {
280  DPRINTF(SimpleCPU, "%s received atomic snoop pkt for addr:%#x %s\n",
281  __func__, pkt->getAddr(), pkt->cmdString());
282 
283  // X86 ISA: Snooping an invalidation for monitor/mwait
285 
286  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
287  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
288  cpu->wakeup(tid);
289  }
290  }
291 
292  // if snoop invalidates, release any associated locks
293  // When run without caches, Invalidation packets will not be received
294  // hence we must check if the incoming packets are writes and wakeup
295  // the processor accordingly
296  if (pkt->isInvalidate() || pkt->isWrite()) {
297  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
298  pkt->getAddr());
299  for (auto &t_info : cpu->threadInfo) {
300  t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
302  }
303  }
304 
305  return 0;
306 }
307 
308 void
310 {
311  DPRINTF(SimpleCPU, "%s received functional snoop pkt for addr:%#x %s\n",
312  __func__, pkt->getAddr(), pkt->cmdString());
313 
314  // X86 ISA: Snooping an invalidation for monitor/mwait
315  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
316  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
317  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
318  cpu->wakeup(tid);
319  }
320  }
321 
322  // if snoop invalidates, release any associated locks
323  if (pkt->isInvalidate()) {
324  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
325  pkt->getAddr());
326  for (auto &t_info : cpu->threadInfo) {
327  t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
328  cacheBlockMask);
329  }
330  }
331 }
332 
333 bool
335  int size, Request::Flags flags,
336  const std::vector<bool> &byte_enable,
337  int &frag_size, int &size_left) const
338 {
339  bool predicate = true;
340  Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
341 
342  frag_size = std::min(
343  cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
344  (Addr)size_left);
345  size_left -= frag_size;
346 
347  // Set up byte-enable mask for the current fragment
348  auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
349  auto it_end = byte_enable.begin() + (size - size_left);
350  if (isAnyActiveElement(it_start, it_end)) {
351  req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
352  inst_addr);
353  req->setByteEnable(std::vector<bool>(it_start, it_end));
354  } else {
355  predicate = false;
356  }
357 
358  return predicate;
359 }
360 
361 Fault
362 AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
364  const std::vector<bool> &byte_enable)
365 {
367  SimpleThread *thread = t_info.thread;
368 
369  // use the CPU's statically allocated read request and packet objects
370  const RequestPtr &req = data_read_req;
371 
372  if (traceData)
373  traceData->setMem(addr, size, flags);
374 
375  dcache_latency = 0;
376 
377  req->taskId(taskId());
378 
379  Addr frag_addr = addr;
380  int frag_size = 0;
381  int size_left = size;
382  bool predicate;
383  Fault fault = NoFault;
384 
385  while (1) {
386  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
387  byte_enable, frag_size, size_left);
388 
389  // translate to physical address
390  if (predicate) {
391  fault = thread->mmu->translateAtomic(req, thread->getTC(),
392  BaseMMU::Read);
393  }
394 
395  // Now do the access.
396  if (predicate && fault == NoFault &&
397  !req->getFlags().isSet(Request::NO_ACCESS)) {
398  Packet pkt(req, Packet::makeReadCmd(req));
399  pkt.dataStatic(data);
400 
401  if (req->isLocalAccess()) {
402  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
403  } else {
405  }
406  dcache_access = true;
407 
408  panic_if(pkt.isError(), "Data fetch (%s) failed: %s",
409  pkt.getAddrRange().to_string(), pkt.print());
410 
411  if (req->isLLSC()) {
412  thread->getIsaPtr()->handleLockedRead(req);
413  }
414  }
415 
416  //If there's a fault, return it
417  if (fault != NoFault)
418  return req->isPrefetch() ? NoFault : fault;
419 
420  // If we don't need to access further cache lines, stop now.
421  if (size_left == 0) {
422  if (req->isLockedRMW() && fault == NoFault) {
423  assert(!locked);
424  locked = true;
425  }
426  return fault;
427  }
428 
429  /*
430  * Set up for accessing the next cache line.
431  */
432  frag_addr += frag_size;
433 
434  //Move the pointer we're reading into to the correct location.
435  data += frag_size;
436  }
437 }
438 
439 Fault
440 AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
441  Request::Flags flags, uint64_t *res,
442  const std::vector<bool>& byte_enable)
443 {
445  SimpleThread *thread = t_info.thread;
446  static uint8_t zero_array[64] = {};
447 
448  if (data == NULL) {
449  assert(size <= 64);
450  assert(flags & Request::STORE_NO_DATA);
451  // This must be a cache block cleaning request
452  data = zero_array;
453  }
454 
455  // use the CPU's statically allocated write request and packet objects
456  const RequestPtr &req = data_write_req;
457 
458  if (traceData)
459  traceData->setMem(addr, size, flags);
460 
461  dcache_latency = 0;
462 
463  req->taskId(taskId());
464 
465  Addr frag_addr = addr;
466  int frag_size = 0;
467  int size_left = size;
468  int curr_frag_id = 0;
469  bool predicate;
470  Fault fault = NoFault;
471 
472  while (1) {
473  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
474  byte_enable, frag_size, size_left);
475 
476  // translate to physical address
477  if (predicate)
478  fault = thread->mmu->translateAtomic(req, thread->getTC(),
480 
481  // Now do the access.
482  if (predicate && fault == NoFault) {
483  bool do_access = true; // flag to suppress cache access
484 
485  if (req->isLLSC()) {
486  assert(curr_frag_id == 0);
487  do_access = thread->getIsaPtr()->handleLockedWrite(req,
489  } else if (req->isSwap()) {
490  assert(curr_frag_id == 0);
491  if (req->isCondSwap()) {
492  assert(res);
493  req->setExtraData(*res);
494  }
495  }
496 
497  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
498  Packet pkt(req, Packet::makeWriteCmd(req));
499  pkt.dataStatic(data);
500 
501  if (req->isLocalAccess()) {
502  dcache_latency +=
503  req->localAccessor(thread->getTC(), &pkt);
504  } else {
506 
507  // Notify other threads on this CPU of write
508  threadSnoop(&pkt, curThread);
509  }
510  dcache_access = true;
511  panic_if(pkt.isError(), "Data write (%s) failed: %s",
512  pkt.getAddrRange().to_string(), pkt.print());
513  if (req->isSwap()) {
514  assert(res && curr_frag_id == 0);
515  memcpy(res, pkt.getConstPtr<uint8_t>(), size);
516  }
517  }
518 
519  if (res && !req->isSwap()) {
520  *res = req->getExtraData();
521  }
522  }
523 
524  //If there's a fault or we don't need to access a second cache line,
525  //stop now.
526  if (fault != NoFault || size_left == 0) {
527  if (req->isLockedRMW() && fault == NoFault) {
528  assert(!req->isMasked());
529  locked = false;
530  }
531 
532  //Supress faults from prefetches.
533  return req->isPrefetch() ? NoFault : fault;
534  }
535 
536  /*
537  * Set up for accessing the next cache line.
538  */
539  frag_addr += frag_size;
540 
541  //Move the pointer we're reading into to the correct location.
542  data += frag_size;
543 
544  curr_frag_id++;
545  }
546 }
547 
548 Fault
549 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
551 {
553  SimpleThread *thread = t_info.thread;
554 
555  // use the CPU's statically allocated amo request and packet objects
556  const RequestPtr &req = data_amo_req;
557 
558  if (traceData)
559  traceData->setMem(addr, size, flags);
560 
561  //The address of the second part of this access if it needs to be split
562  //across a cache line boundary.
563  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
564 
565  // AMO requests that access across a cache line boundary are not
566  // allowed since the cache does not guarantee AMO ops to be executed
567  // atomically in two cache lines
568  // For ISAs such as x86 that requires AMO operations to work on
569  // accesses that cross cache-line boundaries, the cache needs to be
570  // modified to support locking both cache lines to guarantee the
571  // atomicity.
572  panic_if(secondAddr > addr,
573  "AMO request should not access across a cache line boundary.");
574 
575  dcache_latency = 0;
576 
577  req->taskId(taskId());
578  req->setVirt(addr, size, flags, dataRequestorId(),
579  thread->pcState().instAddr(), std::move(amo_op));
580 
581  // translate to physical address
582  Fault fault = thread->mmu->translateAtomic(
583  req, thread->getTC(), BaseMMU::Write);
584 
585  // Now do the access.
586  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
587  // We treat AMO accesses as Write accesses with SwapReq command
588  // data will hold the return data of the AMO access
589  Packet pkt(req, Packet::makeWriteCmd(req));
590  pkt.dataStatic(data);
591 
592  if (req->isLocalAccess()) {
593  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
594  } else {
596  }
597 
598  dcache_access = true;
599 
600  panic_if(pkt.isError(), "Atomic access (%s) failed: %s",
601  pkt.getAddrRange().to_string(), pkt.print());
602  assert(!req->isLLSC());
603  }
604 
605  if (fault != NoFault && req->isPrefetch()) {
606  return NoFault;
607  }
608 
609  //If there's a fault and we're not doing prefetch, return it
610  return fault;
611 }
612 
613 void
615 {
616  DPRINTF(SimpleCPU, "Tick\n");
617 
618  // Change thread if multi-threaded
620 
621  // Set memory request ids to current thread
622  if (numThreads > 1) {
623  ContextID cid = threadContexts[curThread]->contextId();
624 
625  ifetch_req->setContext(cid);
626  data_read_req->setContext(cid);
627  data_write_req->setContext(cid);
628  data_amo_req->setContext(cid);
629  }
630 
632  SimpleThread *thread = t_info.thread;
633 
634  Tick latency = 0;
635 
636  for (int i = 0; i < width || locked; ++i) {
639 
643  }
644 
645  // We must have just got suspended by a PC event
646  if (_status == Idle) {
648  return;
649  }
650 
652 
653  Fault fault = NoFault;
654 
655  const PCStateBase &pc = thread->pcState();
656 
657  bool needToFetch = !isRomMicroPC(pc.microPC()) && !curMacroStaticInst;
658  if (needToFetch) {
659  ifetch_req->taskId(taskId());
661  fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
663  }
664 
665  if (fault == NoFault) {
666  Tick icache_latency = 0;
667  bool icache_access = false;
668  dcache_access = false; // assume no dcache access
669 
670  if (needToFetch) {
671  // This is commented out because the decoder would act like
672  // a tiny cache otherwise. It wouldn't be flushed when needed
673  // like the I cache. It should be flushed, and when that works
674  // this code should be uncommented.
675  //Fetch more instruction memory if necessary
676  //if (decoder.needMoreBytes())
677  //{
678  icache_access = true;
679  icache_latency = fetchInstMem();
680  //}
681  }
682 
683  preExecute();
684 
685  Tick stall_ticks = 0;
686  if (curStaticInst) {
687  fault = curStaticInst->execute(&t_info, traceData);
688 
689  // keep an instruction count
690  if (fault == NoFault) {
691  countInst();
692  ppCommit->notify(std::make_pair(thread, curStaticInst));
693  } else if (traceData) {
694  traceFault();
695  }
696 
697  if (fault != NoFault &&
698  std::dynamic_pointer_cast<SyscallRetryFault>(fault)) {
699  // Retry execution of system calls after a delay.
700  // Prevents immediate re-execution since conditions which
701  // caused the retry are unlikely to change every tick.
702  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
703  }
704 
705  postExecute();
706  }
707 
708  // @todo remove me after debugging with legion done
709  if (curStaticInst && (!curStaticInst->isMicroop() ||
711  instCnt++;
712  }
713 
714  if (simulate_inst_stalls && icache_access)
715  stall_ticks += icache_latency;
716 
718  stall_ticks += dcache_latency;
719 
720  if (stall_ticks) {
721  // the atomic cpu does its accounting in ticks, so
722  // keep counting in ticks but round to the clock
723  // period
724  latency += divCeil(stall_ticks, clockPeriod()) *
725  clockPeriod();
726  }
727 
728  }
729  if (fault != NoFault || !t_info.stayAtPC)
730  advancePC(fault);
731  }
732 
733  if (tryCompleteDrain())
734  return;
735 
736  // instruction takes at least one cycle
737  if (latency < clockPeriod())
738  latency = clockPeriod();
739 
740  if (_status != Idle)
741  reschedule(tickEvent, curTick() + latency, true);
742 }
743 
744 Tick
746 {
747  auto &decoder = threadInfo[curThread]->thread->decoder;
748 
750 
751  // ifetch_req is initialized to read the instruction
752  // directly into the CPU object's inst field.
753  pkt.dataStatic(decoder->moreBytesPtr());
754 
755  Tick latency = sendPacket(icachePort, &pkt);
756  panic_if(pkt.isError(), "Instruction fetch (%s) failed: %s",
757  pkt.getAddrRange().to_string(), pkt.print());
758 
759  return latency;
760 }
761 
762 void
764 {
766 
768  (getProbeManager(), "Commit");
769 }
770 
771 void
773 {
775 }
776 
777 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
const char data[]
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: atomic.cc:309
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: atomic.cc:278
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition: atomic.cc:246
void takeOverFrom(BaseCPU *old_cpu) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: atomic.cc:203
const bool simulate_data_stalls
Definition: atomic.hh:67
void drainResume() override
Resume execution after a successful drain.
Definition: atomic.cc:142
void switchOut() override
Prepare for another CPU to take over execution.
Definition: atomic.cc:192
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition: atomic.cc:334
bool isCpuDrained() const
Check if a system is in a drained state.
Definition: atomic.hh:92
void regProbePoints() override
Register probe points for this object.
Definition: atomic.cc:763
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:772
virtual Tick fetchInstMem()
Definition: atomic.cc:745
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition: atomic.cc:212
AtomicCPUDPort dcachePort
Definition: atomic.hh:162
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:440
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:174
AtomicSimpleCPU(const BaseAtomicSimpleCPUParams &params)
Definition: atomic.cc:74
RequestPtr data_read_req
Definition: atomic.hh:166
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:362
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition: atomic.cc:549
RequestPtr ifetch_req
Definition: atomic.hh:165
const int width
Definition: atomic.hh:65
const bool simulate_inst_stalls
Definition: atomic.hh:68
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: atomic.cc:102
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:124
RequestPtr data_amo_req
Definition: atomic.hh:168
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:175
AtomicCPUPort icachePort
Definition: atomic.hh:161
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt)
Definition: atomic.cc:272
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: atomic.cc:63
RequestPtr data_write_req
Definition: atomic.hh:167
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition: atomic.cc:220
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:94
EventFunctionWrapper tickEvent
Definition: atomic.hh:63
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:189
Cycles syscallRetryLatency
Definition: base.hh:639
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:633
Tick instCnt
Instruction count used for SPARC misc register.
Definition: base.hh:108
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:273
System * system
Definition: base.hh:375
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition: base.hh:544
void schedulePowerGatingEvent()
Definition: base.cc:452
@ CPU_STATE_ON
Definition: base.hh:534
void regProbePoints() override
Register probe points for this object.
Definition: base.cc:337
uint32_t taskId() const
Get cpu task id.
Definition: base.hh:207
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition: base.cc:494
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:367
void deschedulePowerGatingEvent()
Definition: base.cc:444
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition: base.cc:532
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:380
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: base.cc:546
std::vector< ThreadContext * > threadContexts
Definition: base.hh:256
bool switchedOut() const
Determine if the CPU is switched out.
Definition: base.hh:356
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition: base.cc:480
virtual void handleLockedRead(const RequestPtr &req)
Definition: isa.hh:89
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition: isa.hh:96
@ Execute
Definition: mmu.hh:56
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
Definition: mmu.cc:104
void preExecute()
Definition: base.cc:303
void checkPcEventQueue()
Definition: base.cc:123
ThreadID curThread
Definition: base.hh:86
void wakeup(ThreadID tid) override
Definition: base.cc:226
StaticInstPtr curMacroStaticInst
Definition: base.hh:105
void checkForInterrupts()
Definition: base.cc:248
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition: base.cc:237
void advancePC(const Fault &fault)
Definition: base.cc:457
void swapActiveThread()
Definition: base.cc:135
void setupFetchRequest(const RequestPtr &req)
Definition: base.cc:279
std::list< ThreadID > activeThreads
Definition: base.hh:101
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:100
StaticInstPtr curStaticInst
Current instruction.
Definition: base.hh:104
Status _status
Definition: base.hh:123
void postExecute()
Definition: base.cc:382
trace::InstRecord * traceData
Definition: base.hh:97
void serviceInstCountEvents()
Definition: base.cc:296
void countInst()
Definition: base.cc:151
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
Addr getAddr() const
Definition: packet.hh:805
bool isError() const
Definition: packet.hh:621
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition: packet.cc:243
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:368
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1162
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition: packet.hh:1013
bool isWrite() const
Definition: packet.hh:593
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:587
const T * getConstPtr() const
Definition: packet.hh:1221
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:990
bool isInvalidate() const
Definition: packet.hh:608
ProbePointArg generates a point for the class of Arg.
Definition: probe.hh:264
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition: port.hh:79
SimObject & owner
Definition: port.hh:86
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:158
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
static const FlagsType STORE_NO_DATA
Definition: request.hh:260
SimpleThread * thread
Definition: exec_context.hh:62
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
const PCStateBase & pcState() const override
BaseISA * getIsaPtr() const override
bool isFirstMicroop() const
Definition: static_inst.hh:189
virtual Fault execute(ExecContext *xc, trace::InstRecord *traceData) const =0
bool isMicroop() const
Definition: static_inst.hh:186
bool isDelayedCommit() const
Definition: static_inst.hh:187
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:261
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:174
std::string to_string() const
Get a string representation of the range.
Definition: addr_range.hh:360
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:279
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
DrainState
Object drain/handover states.
Definition: drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition: eventq.hh:1028
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:1037
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition: eventq.hh:204
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:120
uint8_t flags
Definition: helpers.cc:66
Bitfield< 4 > width
Definition: misc_types.hh:72
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 8 > a
Definition: misc_types.hh:66
Bitfield< 4 > pc
Bitfield< 54 > p
Definition: pagetable.hh:70
Bitfield< 3 > addr
Definition: types.hh:84
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:53
uint64_t Tick
Tick count type.
Definition: types.hh:58
int ContextID
Globally unique thread context ID.
Definition: types.hh:239
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:166
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
output decoder
Definition: nop.cc:61
Declaration of the Packet class.
bool doMonitor(PacketPtr pkt)
Definition: base.cc:682
statistics::Scalar numCycles
Definition: base.hh:620
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:31 for gem5 by doxygen 1.9.1