gem5  v20.0.0.3
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015,2017-2019 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/simple/atomic.hh"
43 
44 #include "arch/locked_mem.hh"
45 #include "arch/utility.hh"
46 #include "base/output.hh"
47 #include "config/the_isa.hh"
48 #include "cpu/exetrace.hh"
49 #include "cpu/utils.hh"
50 #include "debug/Drain.hh"
51 #include "debug/ExecFaulting.hh"
52 #include "debug/SimpleCPU.hh"
53 #include "mem/packet.hh"
54 #include "mem/packet_access.hh"
55 #include "mem/physical.hh"
56 #include "params/AtomicSimpleCPU.hh"
57 #include "sim/faults.hh"
58 #include "sim/full_system.hh"
59 #include "sim/system.hh"
60 
61 using namespace std;
62 using namespace TheISA;
63 
64 void
66 {
68 
69  int cid = threadContexts[0]->contextId();
70  ifetch_req->setContext(cid);
71  data_read_req->setContext(cid);
72  data_write_req->setContext(cid);
73  data_amo_req->setContext(cid);
74 }
75 
76 AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
77  : BaseSimpleCPU(p),
78  tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
79  false, Event::CPU_Tick_Pri),
80  width(p->width), locked(false),
81  simulate_data_stalls(p->simulate_data_stalls),
82  simulate_inst_stalls(p->simulate_inst_stalls),
83  icachePort(name() + ".icache_port", this),
84  dcachePort(name() + ".dcache_port", this),
85  dcache_access(false), dcache_latency(0),
86  ppCommit(nullptr)
87 {
88  _status = Idle;
89  ifetch_req = std::make_shared<Request>();
90  data_read_req = std::make_shared<Request>();
91  data_write_req = std::make_shared<Request>();
92  data_amo_req = std::make_shared<Request>();
93 }
94 
95 
97 {
98  if (tickEvent.scheduled()) {
100  }
101 }
102 
105 {
106  // Deschedule any power gating event (if any)
108 
109  if (switchedOut())
110  return DrainState::Drained;
111 
112  if (!isCpuDrained()) {
113  DPRINTF(Drain, "Requesting drain.\n");
114  return DrainState::Draining;
115  } else {
116  if (tickEvent.scheduled())
118 
119  activeThreads.clear();
120  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
121  return DrainState::Drained;
122  }
123 }
124 
125 void
127 {
128  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
129  pkt->cmdString());
130 
131  for (ThreadID tid = 0; tid < numThreads; tid++) {
132  if (tid != sender) {
133  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
134  wakeup(tid);
135  }
136 
139  }
140  }
141 }
142 
143 void
145 {
146  assert(!tickEvent.scheduled());
147  if (switchedOut())
148  return;
149 
150  DPRINTF(SimpleCPU, "Resume\n");
152 
153  assert(!threadContexts.empty());
154 
156 
157  for (ThreadID tid = 0; tid < numThreads; tid++) {
158  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
159  threadInfo[tid]->notIdleFraction = 1;
160  activeThreads.push_back(tid);
161  _status = BaseSimpleCPU::Running;
162 
163  // Tick if any threads active
164  if (!tickEvent.scheduled()) {
166  }
167  } else {
168  threadInfo[tid]->notIdleFraction = 0;
169  }
170  }
171 
172  // Reschedule any power gating event (if any)
174 }
175 
176 bool
178 {
180  return false;
181 
182  DPRINTF(Drain, "tryCompleteDrain.\n");
183  if (!isCpuDrained())
184  return false;
185 
186  DPRINTF(Drain, "CPU done draining, processing drain event\n");
187  signalDrainDone();
188 
189  return true;
190 }
191 
192 
193 void
195 {
197 
198  assert(!tickEvent.scheduled());
199  assert(_status == BaseSimpleCPU::Running || _status == Idle);
200  assert(isCpuDrained());
201 }
202 
203 
204 void
206 {
208 
209  // The tick event should have been descheduled by drain()
210  assert(!tickEvent.scheduled());
211 }
212 
213 void
215 {
216  if (!system->isAtomicMode()) {
217  fatal("The atomic CPU requires the memory system to be in "
218  "'atomic' mode.\n");
219  }
220 }
221 
222 void
224 {
225  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
226 
227  assert(thread_num < numThreads);
228 
229  threadInfo[thread_num]->notIdleFraction = 1;
230  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
231  threadInfo[thread_num]->thread->lastSuspend);
232  numCycles += delta;
233 
234  if (!tickEvent.scheduled()) {
235  //Make sure ticks are still on multiples of cycles
237  }
239  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
240  == activeThreads.end()) {
241  activeThreads.push_back(thread_num);
242  }
243 
244  BaseCPU::activateContext(thread_num);
245 }
246 
247 
248 void
250 {
251  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
252 
253  assert(thread_num < numThreads);
254  activeThreads.remove(thread_num);
255 
256  if (_status == Idle)
257  return;
258 
259  assert(_status == BaseSimpleCPU::Running);
260 
261  threadInfo[thread_num]->notIdleFraction = 0;
262 
263  if (activeThreads.empty()) {
264  _status = Idle;
265 
266  if (tickEvent.scheduled()) {
268  }
269  }
270 
271  BaseCPU::suspendContext(thread_num);
272 }
273 
274 Tick
276 {
277  return port.sendAtomic(pkt);
278 }
279 
280 Tick
282 {
283  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
284  pkt->cmdString());
285 
286  // X86 ISA: Snooping an invalidation for monitor/mwait
287  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
288 
289  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
290  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
291  cpu->wakeup(tid);
292  }
293  }
294 
295  // if snoop invalidates, release any associated locks
296  // When run without caches, Invalidation packets will not be received
297  // hence we must check if the incoming packets are writes and wakeup
298  // the processor accordingly
299  if (pkt->isInvalidate() || pkt->isWrite()) {
300  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
301  pkt->getAddr());
302  for (auto &t_info : cpu->threadInfo) {
303  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
304  }
305  }
306 
307  return 0;
308 }
309 
310 void
312 {
313  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
314  pkt->cmdString());
315 
316  // X86 ISA: Snooping an invalidation for monitor/mwait
317  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
318  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
319  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
320  cpu->wakeup(tid);
321  }
322  }
323 
324  // if snoop invalidates, release any associated locks
325  if (pkt->isInvalidate()) {
326  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
327  pkt->getAddr());
328  for (auto &t_info : cpu->threadInfo) {
329  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
330  }
331  }
332 }
333 
334 bool
336  int size, Request::Flags flags,
337  const std::vector<bool>& byte_enable,
338  int& frag_size, int& size_left) const
339 {
340  bool predicate = true;
341  Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
342 
343  frag_size = std::min(
344  cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
345  (Addr) size_left);
346  size_left -= frag_size;
347 
348  if (!byte_enable.empty()) {
349  // Set up byte-enable mask for the current fragment
350  auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
351  auto it_end = byte_enable.begin() + (size - size_left);
352  if (isAnyActiveElement(it_start, it_end)) {
353  req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
354  inst_addr);
355  req->setByteEnable(std::vector<bool>(it_start, it_end));
356  } else {
357  predicate = false;
358  }
359  } else {
360  req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
361  inst_addr);
362  req->setByteEnable(std::vector<bool>());
363  }
364 
365  return predicate;
366 }
367 
368 Fault
369 AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
370  Request::Flags flags,
371  const std::vector<bool>& byte_enable)
372 {
374  SimpleThread* thread = t_info.thread;
375 
376  // use the CPU's statically allocated read request and packet objects
377  const RequestPtr &req = data_read_req;
378 
379  if (traceData)
380  traceData->setMem(addr, size, flags);
381 
382  dcache_latency = 0;
383 
384  req->taskId(taskId());
385 
386  Addr frag_addr = addr;
387  int frag_size = 0;
388  int size_left = size;
389  bool predicate;
390  Fault fault = NoFault;
391 
392  while (1) {
393  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
394  byte_enable, frag_size, size_left);
395 
396  // translate to physical address
397  if (predicate) {
398  fault = thread->dtb->translateAtomic(req, thread->getTC(),
399  BaseTLB::Read);
400  }
401 
402  // Now do the access.
403  if (predicate && fault == NoFault &&
404  !req->getFlags().isSet(Request::NO_ACCESS)) {
405  Packet pkt(req, Packet::makeReadCmd(req));
406  pkt.dataStatic(data);
407 
408  if (req->isLocalAccess()) {
409  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
410  } else {
412  }
413  dcache_access = true;
414 
415  assert(!pkt.isError());
416 
417  if (req->isLLSC()) {
418  TheISA::handleLockedRead(thread, req);
419  }
420  }
421 
422  //If there's a fault, return it
423  if (fault != NoFault) {
424  if (req->isPrefetch()) {
425  return NoFault;
426  } else {
427  return fault;
428  }
429  }
430 
431  // If we don't need to access further cache lines, stop now.
432  if (size_left == 0) {
433  if (req->isLockedRMW() && fault == NoFault) {
434  assert(!locked);
435  locked = true;
436  }
437  return fault;
438  }
439 
440  /*
441  * Set up for accessing the next cache line.
442  */
443  frag_addr += frag_size;
444 
445  //Move the pointer we're reading into to the correct location.
446  data += frag_size;
447  }
448 }
449 
450 Fault
451 AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
452  Request::Flags flags, uint64_t *res,
453  const std::vector<bool>& byte_enable)
454 {
456  SimpleThread* thread = t_info.thread;
457  static uint8_t zero_array[64] = {};
458 
459  if (data == NULL) {
460  assert(size <= 64);
461  assert(flags & Request::STORE_NO_DATA);
462  // This must be a cache block cleaning request
463  data = zero_array;
464  }
465 
466  // use the CPU's statically allocated write request and packet objects
467  const RequestPtr &req = data_write_req;
468 
469  if (traceData)
470  traceData->setMem(addr, size, flags);
471 
472  dcache_latency = 0;
473 
474  req->taskId(taskId());
475 
476  Addr frag_addr = addr;
477  int frag_size = 0;
478  int size_left = size;
479  int curr_frag_id = 0;
480  bool predicate;
481  Fault fault = NoFault;
482 
483  while (1) {
484  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
485  byte_enable, frag_size, size_left);
486 
487  // translate to physical address
488  if (predicate)
489  fault = thread->dtb->translateAtomic(req, thread->getTC(),
491 
492  // Now do the access.
493  if (predicate && fault == NoFault) {
494  bool do_access = true; // flag to suppress cache access
495 
496  if (req->isLLSC()) {
497  assert(curr_frag_id == 0);
498  do_access =
499  TheISA::handleLockedWrite(thread, req,
501  } else if (req->isSwap()) {
502  assert(curr_frag_id == 0);
503  if (req->isCondSwap()) {
504  assert(res);
505  req->setExtraData(*res);
506  }
507  }
508 
509  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
510  Packet pkt(req, Packet::makeWriteCmd(req));
511  pkt.dataStatic(data);
512 
513  if (req->isLocalAccess()) {
514  dcache_latency +=
515  req->localAccessor(thread->getTC(), &pkt);
516  } else {
518 
519  // Notify other threads on this CPU of write
520  threadSnoop(&pkt, curThread);
521  }
522  dcache_access = true;
523  assert(!pkt.isError());
524 
525  if (req->isSwap()) {
526  assert(res && curr_frag_id == 0);
527  memcpy(res, pkt.getConstPtr<uint8_t>(), size);
528  }
529  }
530 
531  if (res && !req->isSwap()) {
532  *res = req->getExtraData();
533  }
534  }
535 
536  //If there's a fault or we don't need to access a second cache line,
537  //stop now.
538  if (fault != NoFault || size_left == 0)
539  {
540  if (req->isLockedRMW() && fault == NoFault) {
541  assert(!req->isMasked());
542  locked = false;
543  }
544 
545  if (fault != NoFault && req->isPrefetch()) {
546  return NoFault;
547  } else {
548  return fault;
549  }
550  }
551 
552  /*
553  * Set up for accessing the next cache line.
554  */
555  frag_addr += frag_size;
556 
557  //Move the pointer we're reading into to the correct location.
558  data += frag_size;
559 
560  curr_frag_id++;
561  }
562 }
563 
564 Fault
565 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
566  Request::Flags flags, AtomicOpFunctorPtr amo_op)
567 {
569  SimpleThread* thread = t_info.thread;
570 
571  // use the CPU's statically allocated amo request and packet objects
572  const RequestPtr &req = data_amo_req;
573 
574  if (traceData)
575  traceData->setMem(addr, size, flags);
576 
577  //The address of the second part of this access if it needs to be split
578  //across a cache line boundary.
579  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
580 
581  // AMO requests that access across a cache line boundary are not
582  // allowed since the cache does not guarantee AMO ops to be executed
583  // atomically in two cache lines
584  // For ISAs such as x86 that requires AMO operations to work on
585  // accesses that cross cache-line boundaries, the cache needs to be
586  // modified to support locking both cache lines to guarantee the
587  // atomicity.
588  if (secondAddr > addr) {
589  panic("AMO request should not access across a cache line boundary\n");
590  }
591 
592  dcache_latency = 0;
593 
594  req->taskId(taskId());
595  req->setVirt(addr, size, flags, dataMasterId(),
596  thread->pcState().instAddr(), std::move(amo_op));
597 
598  // translate to physical address
599  Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
601 
602  // Now do the access.
603  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
604  // We treat AMO accesses as Write accesses with SwapReq command
605  // data will hold the return data of the AMO access
606  Packet pkt(req, Packet::makeWriteCmd(req));
607  pkt.dataStatic(data);
608 
609  if (req->isLocalAccess())
610  dcache_latency += req->localAccessor(thread->getTC(), &pkt);
611  else {
613  }
614 
615  dcache_access = true;
616 
617  assert(!pkt.isError());
618  assert(!req->isLLSC());
619  }
620 
621  if (fault != NoFault && req->isPrefetch()) {
622  return NoFault;
623  }
624 
625  //If there's a fault and we're not doing prefetch, return it
626  return fault;
627 }
628 
629 void
631 {
632  DPRINTF(SimpleCPU, "Tick\n");
633 
634  // Change thread if multi-threaded
636 
637  // Set memroy request ids to current thread
638  if (numThreads > 1) {
639  ContextID cid = threadContexts[curThread]->contextId();
640 
641  ifetch_req->setContext(cid);
642  data_read_req->setContext(cid);
643  data_write_req->setContext(cid);
644  data_amo_req->setContext(cid);
645  }
646 
648  SimpleThread* thread = t_info.thread;
649 
650  Tick latency = 0;
651 
652  for (int i = 0; i < width || locked; ++i) {
653  numCycles++;
655 
659  }
660 
661  // We must have just got suspended by a PC event
662  if (_status == Idle) {
664  return;
665  }
666 
667  Fault fault = NoFault;
668 
669  TheISA::PCState pcState = thread->pcState();
670 
671  bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
673  if (needToFetch) {
674  ifetch_req->taskId(taskId());
676  fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
678  }
679 
680  if (fault == NoFault) {
681  Tick icache_latency = 0;
682  bool icache_access = false;
683  dcache_access = false; // assume no dcache access
684 
685  if (needToFetch) {
686  // This is commented out because the decoder would act like
687  // a tiny cache otherwise. It wouldn't be flushed when needed
688  // like the I cache. It should be flushed, and when that works
689  // this code should be uncommented.
690  //Fetch more instruction memory if necessary
691  //if (decoder.needMoreBytes())
692  //{
693  icache_access = true;
694  Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
695  ifetch_pkt.dataStatic(&inst);
696 
697  icache_latency = sendPacket(icachePort, &ifetch_pkt);
698 
699  assert(!ifetch_pkt.isError());
700 
701  // ifetch_req is initialized to read the instruction directly
702  // into the CPU object's inst field.
703  //}
704  }
705 
706  preExecute();
707 
708  Tick stall_ticks = 0;
709  if (curStaticInst) {
710  fault = curStaticInst->execute(&t_info, traceData);
711 
712  // keep an instruction count
713  if (fault == NoFault) {
714  countInst();
715  ppCommit->notify(std::make_pair(thread, curStaticInst));
716  }
717  else if (traceData && !DTRACE(ExecFaulting)) {
718  delete traceData;
719  traceData = NULL;
720  }
721 
722  if (fault != NoFault &&
723  dynamic_pointer_cast<SyscallRetryFault>(fault)) {
724  // Retry execution of system calls after a delay.
725  // Prevents immediate re-execution since conditions which
726  // caused the retry are unlikely to change every tick.
727  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
728  }
729 
730  postExecute();
731  }
732 
733  // @todo remove me after debugging with legion done
734  if (curStaticInst && (!curStaticInst->isMicroop() ||
736  instCnt++;
737 
738  if (simulate_inst_stalls && icache_access)
739  stall_ticks += icache_latency;
740 
742  stall_ticks += dcache_latency;
743 
744  if (stall_ticks) {
745  // the atomic cpu does its accounting in ticks, so
746  // keep counting in ticks but round to the clock
747  // period
748  latency += divCeil(stall_ticks, clockPeriod()) *
749  clockPeriod();
750  }
751 
752  }
753  if (fault != NoFault || !t_info.stayAtPC)
754  advancePC(fault);
755  }
756 
757  if (tryCompleteDrain())
758  return;
759 
760  // instruction takes at least one cycle
761  if (latency < clockPeriod())
762  latency = clockPeriod();
763 
764  if (_status != Idle)
765  reschedule(tickEvent, curTick() + latency, true);
766 }
767 
768 void
770 {
772 
774  (getProbeManager(), "Commit");
775 }
776 
777 void
779 {
781 }
782 
784 //
785 // AtomicSimpleCPU Simulation Object
786 //
788 AtomicSimpleCPUParams::create()
789 {
790  return new AtomicSimpleCPU(this);
791 }
StaticInstPtr curStaticInst
Definition: base.hh:103
A MasterPort is a specialisation of a BaseMasterPort, which implements the default protocol for the t...
Definition: port.hh:71
uint32_t taskId() const
Get cpu task id.
Definition: base.hh:203
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:451
void advancePC(const Fault &fault)
Definition: base.cc:650
#define DPRINTF(x,...)
Definition: trace.hh:225
AtomicCPUPort icachePort
Definition: atomic.hh:157
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:50
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: atomic.cc:311
std::list< ThreadID > activeThreads
Definition: base.hh:99
decltype(nullptr) constexpr NoFault
Definition: types.hh:243
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:610
BaseTLB * itb
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
void checkPcEventQueue()
Definition: base.cc:134
bool isDelayedCommit() const
Definition: static_inst.hh:200
Bitfield< 7 > i
ThreadID numThreads
Number of threads we&#39;re actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:374
Tick instCnt
Instruction count used for SPARC misc register.
Definition: base.hh:111
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
Bitfield< 8 > a
TheISA::MachInst inst
Current instruction.
Definition: base.hh:102
EventFunctionWrapper tickEvent
Definition: atomic.hh:61
ip6_addr_t addr
Definition: inet.hh:330
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:228
void wakeup(ThreadID tid) override
Definition: base.cc:425
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition: base.cc:487
TheISA::PCState pcState() const override
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
bool switchedOut() const
Determine if the CPU is switched out.
Definition: base.hh:363
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
bool handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
Definition: locked_mem.hh:77
void checkForInterrupts()
Definition: base.cc:436
RequestPtr data_write_req
Definition: atomic.hh:163
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
System * system
Definition: base.hh:382
ThreadID curThread
Definition: base.hh:83
Tick dcache_latency
Definition: atomic.hh:167
void handleLockedRead(XC *xc, const RequestPtr &req)
Definition: locked_mem.hh:64
void preExecute()
Definition: base.cc:473
void drainResume() override
Resume execution after a successful drain.
Definition: atomic.cc:144
Tick clockPeriod() const
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:871
bool isWrite() const
Definition: packet.hh:523
bool isInvalidate() const
Definition: packet.hh:537
DrainState
Object drain/handover states.
Definition: drain.hh:71
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1034
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:308
void setupFetchRequest(const RequestPtr &req)
Definition: base.cc:456
void swapActiveThread()
Definition: base.cc:146
bool dcache_access
Definition: atomic.hh:166
std::vector< ThreadContext * > threadContexts
Definition: base.hh:263
Draining buffers pending serialization/handover.
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:131
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition: atomic.cc:335
Tick curTick()
The current simulated tick.
Definition: core.hh:44
void countInst()
Definition: base.cc:162
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:123
void schedulePowerGatingEvent()
Definition: base.cc:459
#define DTRACE(x)
Definition: trace.hh:223
bool isError() const
Definition: packet.hh:549
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: base.cc:555
const bool simulate_inst_stalls
Definition: atomic.hh:66
MasterID dataMasterId() const
Reads this CPU&#39;s unique data requestor ID.
Definition: base.hh:185
uint64_t Tick
Tick count type.
Definition: types.hh:61
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition: base.hh:528
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:778
void takeOverFrom(BaseCPU *oldCPU) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: atomic.cc:205
void tick()
Definition: atomic.cc:630
bool doMonitor(PacketPtr pkt)
Definition: base.cc:737
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:169
virtual Tick sendPacket(MasterPort &port, const PacketPtr &pkt)
Definition: atomic.cc:275
void deschedule(Event &event)
Definition: eventq.hh:943
Addr getAddr() const
Definition: packet.hh:720
Cycles syscallRetryLatency
Definition: base.hh:618
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition: eventq.hh:198
static const FlagsType STORE_NO_DATA
Definition: request.hh:196
void schedule(Event &event, Tick when)
Definition: eventq.hh:934
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:131
Status _status
Definition: base.hh:121
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:952
StaticInstPtr curMacroStaticInst
Definition: base.hh:104
SimpleThread * thread
Definition: exec_context.hh:64
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
ThreadContext * getTC()
Returns the pointer to this SimpleThread&#39;s ThreadContext.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: atomic.cc:65
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:95
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:96
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:170
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:159
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
DrainState drain() override
Notify an object that it needs to drain its state.
Definition: atomic.cc:104
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition: atomic.cc:565
bool isFirstMicroop() const
Definition: static_inst.hh:202
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition: base.cc:539
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition: packet.hh:887
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition: atomic.cc:223
void switchOut() override
Prepare for another CPU to take over execution.
Definition: atomic.cc:194
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:459
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:117
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:225
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:126
virtual const std::string name() const
Definition: sim_object.hh:129
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:387
ProbePointArg generates a point for the class of Arg.
The request should not cause a memory access.
Definition: request.hh:134
void regProbePoints() override
Register probe points for this object.
Definition: base.cc:350
Cycles ticksToCycles(Tick t) const
Declaration of the Packet class.
RequestPtr data_read_req
Definition: atomic.hh:162
bool isCpuDrained() const
Check if a system is in a drained state.
Definition: atomic.hh:89
Trace::InstRecord * traceData
Definition: base.hh:95
void deschedulePowerGatingEvent()
Definition: base.cc:451
AtomicSimpleCPU(AtomicSimpleCPUParams *params)
Definition: atomic.cc:76
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:369
Stats::Scalar numCycles
Definition: base.hh:599
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:98
RequestPtr ifetch_req
Definition: atomic.hh:161
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:289
const bool simulate_data_stalls
Definition: atomic.hh:65
T divCeil(const T &a, const U &b)
Definition: intmath.hh:99
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:86
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:177
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition: atomic.cc:214
const T * getConstPtr() const
Definition: packet.hh:1093
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
Definition: locked_mem.hh:58
void postExecute()
Definition: base.cc:561
RequestPtr data_amo_req
Definition: atomic.hh:164
void regProbePoints() override
Register probe points for this object.
Definition: atomic.cc:769
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:517
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time...
Definition: port.hh:423
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
Bitfield< 0 > p
AtomicCPUDPort dcachePort
Definition: atomic.hh:158
Running normally.
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:238
bool isMicroop() const
Definition: static_inst.hh:199
int ContextID
Globally unique thread context ID.
Definition: types.hh:229
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)=0
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition: atomic.cc:249
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition: base.cc:501
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
const int width
Definition: atomic.hh:63
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: atomic.cc:281
BaseTLB * dtb

Generated on Fri Jul 3 2020 15:53:01 for gem5 by doxygen 1.8.13