gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015,2017-2019 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  *
41  * Authors: Steve Reinhardt
42  */
43 
44 #include "cpu/simple/atomic.hh"
45 
46 #include "arch/locked_mem.hh"
47 #include "arch/mmapped_ipr.hh"
48 #include "arch/utility.hh"
49 #include "base/output.hh"
50 #include "config/the_isa.hh"
51 #include "cpu/exetrace.hh"
52 #include "cpu/utils.hh"
53 #include "debug/Drain.hh"
54 #include "debug/ExecFaulting.hh"
55 #include "debug/SimpleCPU.hh"
56 #include "mem/packet.hh"
57 #include "mem/packet_access.hh"
58 #include "mem/physical.hh"
59 #include "params/AtomicSimpleCPU.hh"
60 #include "sim/faults.hh"
61 #include "sim/full_system.hh"
62 #include "sim/system.hh"
63 
64 using namespace std;
65 using namespace TheISA;
66 
67 void
69 {
71 
72  int cid = threadContexts[0]->contextId();
73  ifetch_req->setContext(cid);
74  data_read_req->setContext(cid);
75  data_write_req->setContext(cid);
76  data_amo_req->setContext(cid);
77 }
78 
79 AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
80  : BaseSimpleCPU(p),
81  tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
82  false, Event::CPU_Tick_Pri),
83  width(p->width), locked(false),
84  simulate_data_stalls(p->simulate_data_stalls),
85  simulate_inst_stalls(p->simulate_inst_stalls),
86  icachePort(name() + ".icache_port", this),
87  dcachePort(name() + ".dcache_port", this),
88  dcache_access(false), dcache_latency(0),
89  ppCommit(nullptr)
90 {
91  _status = Idle;
92  ifetch_req = std::make_shared<Request>();
93  data_read_req = std::make_shared<Request>();
94  data_write_req = std::make_shared<Request>();
95  data_amo_req = std::make_shared<Request>();
96 }
97 
98 
100 {
101  if (tickEvent.scheduled()) {
103  }
104 }
105 
108 {
109  // Deschedule any power gating event (if any)
111 
112  if (switchedOut())
113  return DrainState::Drained;
114 
115  if (!isCpuDrained()) {
116  DPRINTF(Drain, "Requesting drain.\n");
117  return DrainState::Draining;
118  } else {
119  if (tickEvent.scheduled())
121 
122  activeThreads.clear();
123  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
124  return DrainState::Drained;
125  }
126 }
127 
128 void
130 {
131  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
132  pkt->cmdString());
133 
134  for (ThreadID tid = 0; tid < numThreads; tid++) {
135  if (tid != sender) {
136  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
137  wakeup(tid);
138  }
139 
142  }
143  }
144 }
145 
146 void
148 {
149  assert(!tickEvent.scheduled());
150  if (switchedOut())
151  return;
152 
153  DPRINTF(SimpleCPU, "Resume\n");
155 
156  assert(!threadContexts.empty());
157 
159 
160  for (ThreadID tid = 0; tid < numThreads; tid++) {
161  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
162  threadInfo[tid]->notIdleFraction = 1;
163  activeThreads.push_back(tid);
164  _status = BaseSimpleCPU::Running;
165 
166  // Tick if any threads active
167  if (!tickEvent.scheduled()) {
169  }
170  } else {
171  threadInfo[tid]->notIdleFraction = 0;
172  }
173  }
174 
175  // Reschedule any power gating event (if any)
177 }
178 
179 bool
181 {
183  return false;
184 
185  DPRINTF(Drain, "tryCompleteDrain.\n");
186  if (!isCpuDrained())
187  return false;
188 
189  DPRINTF(Drain, "CPU done draining, processing drain event\n");
190  signalDrainDone();
191 
192  return true;
193 }
194 
195 
196 void
198 {
200 
201  assert(!tickEvent.scheduled());
202  assert(_status == BaseSimpleCPU::Running || _status == Idle);
203  assert(isCpuDrained());
204 }
205 
206 
207 void
209 {
211 
212  // The tick event should have been descheduled by drain()
213  assert(!tickEvent.scheduled());
214 }
215 
216 void
218 {
219  if (!system->isAtomicMode()) {
220  fatal("The atomic CPU requires the memory system to be in "
221  "'atomic' mode.\n");
222  }
223 }
224 
225 void
227 {
228  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
229 
230  assert(thread_num < numThreads);
231 
232  threadInfo[thread_num]->notIdleFraction = 1;
233  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
234  threadInfo[thread_num]->thread->lastSuspend);
235  numCycles += delta;
236 
237  if (!tickEvent.scheduled()) {
238  //Make sure ticks are still on multiples of cycles
240  }
242  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
243  == activeThreads.end()) {
244  activeThreads.push_back(thread_num);
245  }
246 
247  BaseCPU::activateContext(thread_num);
248 }
249 
250 
251 void
253 {
254  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
255 
256  assert(thread_num < numThreads);
257  activeThreads.remove(thread_num);
258 
259  if (_status == Idle)
260  return;
261 
262  assert(_status == BaseSimpleCPU::Running);
263 
264  threadInfo[thread_num]->notIdleFraction = 0;
265 
266  if (activeThreads.empty()) {
267  _status = Idle;
268 
269  if (tickEvent.scheduled()) {
271  }
272  }
273 
274  BaseCPU::suspendContext(thread_num);
275 }
276 
277 Tick
279 {
280  return port.sendAtomic(pkt);
281 }
282 
283 Tick
285 {
286  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
287  pkt->cmdString());
288 
289  // X86 ISA: Snooping an invalidation for monitor/mwait
290  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
291 
292  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
293  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
294  cpu->wakeup(tid);
295  }
296  }
297 
298  // if snoop invalidates, release any associated locks
299  // When run without caches, Invalidation packets will not be received
300  // hence we must check if the incoming packets are writes and wakeup
301  // the processor accordingly
302  if (pkt->isInvalidate() || pkt->isWrite()) {
303  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
304  pkt->getAddr());
305  for (auto &t_info : cpu->threadInfo) {
306  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
307  }
308  }
309 
310  return 0;
311 }
312 
313 void
315 {
316  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
317  pkt->cmdString());
318 
319  // X86 ISA: Snooping an invalidation for monitor/mwait
320  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
321  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
322  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
323  cpu->wakeup(tid);
324  }
325  }
326 
327  // if snoop invalidates, release any associated locks
328  if (pkt->isInvalidate()) {
329  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
330  pkt->getAddr());
331  for (auto &t_info : cpu->threadInfo) {
332  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
333  }
334  }
335 }
336 
337 bool
339  int size, Request::Flags flags,
340  const std::vector<bool>& byte_enable,
341  int& frag_size, int& size_left) const
342 {
343  bool predicate = true;
344  Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
345 
346  frag_size = std::min(
347  cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
348  (Addr) size_left);
349  size_left -= frag_size;
350 
351  if (!byte_enable.empty()) {
352  // Set up byte-enable mask for the current fragment
353  auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
354  auto it_end = byte_enable.begin() + (size - size_left);
355  if (isAnyActiveElement(it_start, it_end)) {
356  req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(),
357  inst_addr);
358  req->setByteEnable(std::vector<bool>(it_start, it_end));
359  } else {
360  predicate = false;
361  }
362  } else {
363  req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(),
364  inst_addr);
365  req->setByteEnable(std::vector<bool>());
366  }
367 
368  return predicate;
369 }
370 
371 Fault
372 AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
373  Request::Flags flags,
374  const std::vector<bool>& byte_enable)
375 {
377  SimpleThread* thread = t_info.thread;
378 
379  // use the CPU's statically allocated read request and packet objects
380  const RequestPtr &req = data_read_req;
381 
382  if (traceData)
383  traceData->setMem(addr, size, flags);
384 
385  dcache_latency = 0;
386 
387  req->taskId(taskId());
388 
389  Addr frag_addr = addr;
390  int frag_size = 0;
391  int size_left = size;
392  bool predicate;
393  Fault fault = NoFault;
394 
395  while (1) {
396  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
397  byte_enable, frag_size, size_left);
398 
399  // translate to physical address
400  if (predicate) {
401  fault = thread->dtb->translateAtomic(req, thread->getTC(),
402  BaseTLB::Read);
403  }
404 
405  // Now do the access.
406  if (predicate && fault == NoFault &&
407  !req->getFlags().isSet(Request::NO_ACCESS)) {
408  Packet pkt(req, Packet::makeReadCmd(req));
409  pkt.dataStatic(data);
410 
411  if (req->isMmappedIpr()) {
412  dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
413  } else {
415  }
416  dcache_access = true;
417 
418  assert(!pkt.isError());
419 
420  if (req->isLLSC()) {
421  TheISA::handleLockedRead(thread, req);
422  }
423  }
424 
425  //If there's a fault, return it
426  if (fault != NoFault) {
427  if (req->isPrefetch()) {
428  return NoFault;
429  } else {
430  return fault;
431  }
432  }
433 
434  // If we don't need to access further cache lines, stop now.
435  if (size_left == 0) {
436  if (req->isLockedRMW() && fault == NoFault) {
437  assert(!locked);
438  locked = true;
439  }
440  return fault;
441  }
442 
443  /*
444  * Set up for accessing the next cache line.
445  */
446  frag_addr += frag_size;
447 
448  //Move the pointer we're reading into to the correct location.
449  data += frag_size;
450  }
451 }
452 
453 Fault
454 AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
455  Request::Flags flags, uint64_t *res,
456  const std::vector<bool>& byte_enable)
457 {
459  SimpleThread* thread = t_info.thread;
460  static uint8_t zero_array[64] = {};
461 
462  if (data == NULL) {
463  assert(size <= 64);
464  assert(flags & Request::STORE_NO_DATA);
465  // This must be a cache block cleaning request
466  data = zero_array;
467  }
468 
469  // use the CPU's statically allocated write request and packet objects
470  const RequestPtr &req = data_write_req;
471 
472  if (traceData)
473  traceData->setMem(addr, size, flags);
474 
475  dcache_latency = 0;
476 
477  req->taskId(taskId());
478 
479  Addr frag_addr = addr;
480  int frag_size = 0;
481  int size_left = size;
482  int curr_frag_id = 0;
483  bool predicate;
484  Fault fault = NoFault;
485 
486  while (1) {
487  predicate = genMemFragmentRequest(req, frag_addr, size, flags,
488  byte_enable, frag_size, size_left);
489 
490  // translate to physical address
491  if (predicate)
492  fault = thread->dtb->translateAtomic(req, thread->getTC(),
494 
495  // Now do the access.
496  if (predicate && fault == NoFault) {
497  bool do_access = true; // flag to suppress cache access
498 
499  if (req->isLLSC()) {
500  assert(curr_frag_id == 0);
501  do_access =
502  TheISA::handleLockedWrite(thread, req,
504  } else if (req->isSwap()) {
505  assert(curr_frag_id == 0);
506  if (req->isCondSwap()) {
507  assert(res);
508  req->setExtraData(*res);
509  }
510  }
511 
512  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
513  Packet pkt(req, Packet::makeWriteCmd(req));
514  pkt.dataStatic(data);
515 
516  if (req->isMmappedIpr()) {
517  dcache_latency +=
518  TheISA::handleIprWrite(thread->getTC(), &pkt);
519  } else {
521 
522  // Notify other threads on this CPU of write
523  threadSnoop(&pkt, curThread);
524  }
525  dcache_access = true;
526  assert(!pkt.isError());
527 
528  if (req->isSwap()) {
529  assert(res && curr_frag_id == 0);
530  memcpy(res, pkt.getConstPtr<uint8_t>(), size);
531  }
532  }
533 
534  if (res && !req->isSwap()) {
535  *res = req->getExtraData();
536  }
537  }
538 
539  //If there's a fault or we don't need to access a second cache line,
540  //stop now.
541  if (fault != NoFault || size_left == 0)
542  {
543  if (req->isLockedRMW() && fault == NoFault) {
544  assert(!req->isMasked());
545  locked = false;
546  }
547 
548  if (fault != NoFault && req->isPrefetch()) {
549  return NoFault;
550  } else {
551  return fault;
552  }
553  }
554 
555  /*
556  * Set up for accessing the next cache line.
557  */
558  frag_addr += frag_size;
559 
560  //Move the pointer we're reading into to the correct location.
561  data += frag_size;
562 
563  curr_frag_id++;
564  }
565 }
566 
567 Fault
568 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
569  Request::Flags flags, AtomicOpFunctorPtr amo_op)
570 {
572  SimpleThread* thread = t_info.thread;
573 
574  // use the CPU's statically allocated amo request and packet objects
575  const RequestPtr &req = data_amo_req;
576 
577  if (traceData)
578  traceData->setMem(addr, size, flags);
579 
580  //The address of the second part of this access if it needs to be split
581  //across a cache line boundary.
582  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
583 
584  // AMO requests that access across a cache line boundary are not
585  // allowed since the cache does not guarantee AMO ops to be executed
586  // atomically in two cache lines
587  // For ISAs such as x86 that requires AMO operations to work on
588  // accesses that cross cache-line boundaries, the cache needs to be
589  // modified to support locking both cache lines to guarantee the
590  // atomicity.
591  if (secondAddr > addr) {
592  panic("AMO request should not access across a cache line boundary\n");
593  }
594 
595  dcache_latency = 0;
596 
597  req->taskId(taskId());
598  req->setVirt(0, addr, size, flags, dataMasterId(),
599  thread->pcState().instAddr(), std::move(amo_op));
600 
601  // translate to physical address
602  Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
604 
605  // Now do the access.
606  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
607  // We treat AMO accesses as Write accesses with SwapReq command
608  // data will hold the return data of the AMO access
609  Packet pkt(req, Packet::makeWriteCmd(req));
610  pkt.dataStatic(data);
611 
612  if (req->isMmappedIpr())
613  dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
614  else {
616  }
617 
618  dcache_access = true;
619 
620  assert(!pkt.isError());
621  assert(!req->isLLSC());
622  }
623 
624  if (fault != NoFault && req->isPrefetch()) {
625  return NoFault;
626  }
627 
628  //If there's a fault and we're not doing prefetch, return it
629  return fault;
630 }
631 
632 void
634 {
635  DPRINTF(SimpleCPU, "Tick\n");
636 
637  // Change thread if multi-threaded
639 
640  // Set memroy request ids to current thread
641  if (numThreads > 1) {
642  ContextID cid = threadContexts[curThread]->contextId();
643 
644  ifetch_req->setContext(cid);
645  data_read_req->setContext(cid);
646  data_write_req->setContext(cid);
647  data_amo_req->setContext(cid);
648  }
649 
651  SimpleThread* thread = t_info.thread;
652 
653  Tick latency = 0;
654 
655  for (int i = 0; i < width || locked; ++i) {
656  numCycles++;
658 
662  }
663 
664  // We must have just got suspended by a PC event
665  if (_status == Idle) {
667  return;
668  }
669 
670  Fault fault = NoFault;
671 
672  TheISA::PCState pcState = thread->pcState();
673 
674  bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
676  if (needToFetch) {
677  ifetch_req->taskId(taskId());
679  fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
681  }
682 
683  if (fault == NoFault) {
684  Tick icache_latency = 0;
685  bool icache_access = false;
686  dcache_access = false; // assume no dcache access
687 
688  if (needToFetch) {
689  // This is commented out because the decoder would act like
690  // a tiny cache otherwise. It wouldn't be flushed when needed
691  // like the I cache. It should be flushed, and when that works
692  // this code should be uncommented.
693  //Fetch more instruction memory if necessary
694  //if (decoder.needMoreBytes())
695  //{
696  icache_access = true;
697  Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
698  ifetch_pkt.dataStatic(&inst);
699 
700  icache_latency = sendPacket(icachePort, &ifetch_pkt);
701 
702  assert(!ifetch_pkt.isError());
703 
704  // ifetch_req is initialized to read the instruction directly
705  // into the CPU object's inst field.
706  //}
707  }
708 
709  preExecute();
710 
711  Tick stall_ticks = 0;
712  if (curStaticInst) {
713  fault = curStaticInst->execute(&t_info, traceData);
714 
715  // keep an instruction count
716  if (fault == NoFault) {
717  countInst();
718  ppCommit->notify(std::make_pair(thread, curStaticInst));
719  }
720  else if (traceData && !DTRACE(ExecFaulting)) {
721  delete traceData;
722  traceData = NULL;
723  }
724 
725  if (fault != NoFault &&
726  dynamic_pointer_cast<SyscallRetryFault>(fault)) {
727  // Retry execution of system calls after a delay.
728  // Prevents immediate re-execution since conditions which
729  // caused the retry are unlikely to change every tick.
730  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
731  }
732 
733  postExecute();
734  }
735 
736  // @todo remove me after debugging with legion done
737  if (curStaticInst && (!curStaticInst->isMicroop() ||
739  instCnt++;
740 
741  if (simulate_inst_stalls && icache_access)
742  stall_ticks += icache_latency;
743 
745  stall_ticks += dcache_latency;
746 
747  if (stall_ticks) {
748  // the atomic cpu does its accounting in ticks, so
749  // keep counting in ticks but round to the clock
750  // period
751  latency += divCeil(stall_ticks, clockPeriod()) *
752  clockPeriod();
753  }
754 
755  }
756  if (fault != NoFault || !t_info.stayAtPC)
757  advancePC(fault);
758  }
759 
760  if (tryCompleteDrain())
761  return;
762 
763  // instruction takes at least one cycle
764  if (latency < clockPeriod())
765  latency = clockPeriod();
766 
767  if (_status != Idle)
768  reschedule(tickEvent, curTick() + latency, true);
769 }
770 
771 void
773 {
775 
777  (getProbeManager(), "Commit");
778 }
779 
780 void
782 {
784 }
785 
787 //
788 // AtomicSimpleCPU Simulation Object
789 //
791 AtomicSimpleCPUParams::create()
792 {
793  return new AtomicSimpleCPU(this);
794 }
StaticInstPtr curStaticInst
Definition: base.hh:107
A MasterPort is a specialisation of a BaseMasterPort, which implements the default protocol for the t...
Definition: port.hh:75
uint32_t taskId() const
Get cpu task id.
Definition: base.hh:207
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:454
void advancePC(const Fault &fault)
Definition: base.cc:662
#define DPRINTF(x,...)
Definition: trace.hh:229
AtomicCPUPort icachePort
Definition: atomic.hh:159
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:52
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: atomic.cc:314
std::list< ThreadID > activeThreads
Definition: base.hh:103
decltype(nullptr) constexpr NoFault
Definition: types.hh:245
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:614
BaseTLB * itb
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
void checkPcEventQueue()
Definition: base.cc:137
bool isDelayedCommit() const
Definition: static_inst.hh:198
Bitfield< 7 > i
DrainState
Object drain/handover states.
Definition: drain.hh:71
ThreadID numThreads
Number of threads we&#39;re actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:378
Running normally.
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition: eventq.hh:162
Tick instCnt
Instruction count used for SPARC misc register.
Definition: base.hh:115
std::shared_ptr< Request > RequestPtr
Definition: request.hh:83
Bitfield< 8 > a
TheISA::MachInst inst
Current instruction.
Definition: base.hh:106
EventFunctionWrapper tickEvent
Definition: atomic.hh:63
ip6_addr_t addr
Definition: inet.hh:335
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:230
void wakeup(ThreadID tid) override
Definition: base.cc:434
Cycles handleIprRead(ThreadContext *, Packet *)
Definition: mmapped_ipr.hh:48
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:282
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition: base.cc:491
TheISA::PCState pcState() const override
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:267
bool switchedOut() const
Determine if the CPU is switched out.
Definition: base.hh:367
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
bool handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
Definition: locked_mem.hh:79
void checkForInterrupts()
Definition: base.cc:445
RequestPtr data_write_req
Definition: atomic.hh:165
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:586
System * system
Definition: base.hh:386
ThreadID curThread
Definition: base.hh:87
Tick dcache_latency
Definition: atomic.hh:169
void handleLockedRead(XC *xc, const RequestPtr &req)
Definition: locked_mem.hh:66
void preExecute()
Definition: base.cc:482
void drainResume() override
Resume execution after a successful drain.
Definition: atomic.cc:147
Tick clockPeriod() const
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:877
bool isWrite() const
Definition: packet.hh:529
bool isInvalidate() const
Definition: packet.hh:543
void deschedule(Event &event)
Definition: eventq.hh:750
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1040
void setupFetchRequest(const RequestPtr &req)
Definition: base.cc:465
void swapActiveThread()
Definition: base.cc:149
bool dcache_access
Definition: atomic.hh:168
std::vector< ThreadContext * > threadContexts
Definition: base.hh:267
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:139
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition: atomic.cc:338
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void countInst()
Definition: base.cc:165
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:126
void schedulePowerGatingEvent()
Definition: base.cc:463
#define DTRACE(x)
Definition: trace.hh:227
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:385
bool isError() const
Definition: packet.hh:555
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: base.cc:559
const bool simulate_inst_stalls
Definition: atomic.hh:68
MasterID dataMasterId() const
Reads this CPU&#39;s unique data requestor ID.
Definition: base.hh:189
uint64_t Tick
Tick count type.
Definition: types.hh:63
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition: base.hh:532
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:781
void takeOverFrom(BaseCPU *oldCPU) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition: atomic.cc:208
void tick()
Definition: atomic.cc:633
bool doMonitor(PacketPtr pkt)
Definition: base.cc:741
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:172
virtual Tick sendPacket(MasterPort &port, const PacketPtr &pkt)
Definition: atomic.cc:278
Addr getAddr() const
Definition: packet.hh:726
Cycles syscallRetryLatency
Definition: base.hh:622
static const FlagsType STORE_NO_DATA
Definition: request.hh:200
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:185
Status _status
Definition: base.hh:125
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
StaticInstPtr curMacroStaticInst
Definition: base.hh:108
SimpleThread * thread
Definition: exec_context.hh:68
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
ThreadContext * getTC()
Returns the pointer to this SimpleThread&#39;s ThreadContext.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: atomic.cc:68
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:99
Draining buffers pending serialization/handover.
virtual const std::string name() const
Definition: sim_object.hh:120
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:99
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:172
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:161
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
DrainState drain() override
Notify an object that it needs to drain its state.
Definition: atomic.cc:107
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition: atomic.cc:568
bool isFirstMicroop() const
Definition: static_inst.hh:200
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition: base.cc:543
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition: packet.hh:893
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition: atomic.cc:226
void switchOut() override
Prepare for another CPU to take over execution.
Definition: atomic.cc:197
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:756
The request should not cause a memory access.
Definition: request.hh:138
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:129
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:391
ProbePointArg generates a point for the class of Arg.
void regProbePoints() override
Register probe points for this object.
Definition: base.cc:354
Cycles ticksToCycles(Tick t) const
Declaration of the Packet class.
RequestPtr data_read_req
Definition: atomic.hh:164
bool isCpuDrained() const
Check if a system is in a drained state.
Definition: atomic.hh:91
Trace::InstRecord * traceData
Definition: base.hh:99
void deschedulePowerGatingEvent()
Definition: base.cc:455
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
AtomicSimpleCPU(AtomicSimpleCPUParams *params)
Definition: atomic.cc:79
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:120
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition: atomic.cc:372
Stats::Scalar numCycles
Definition: base.hh:603
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:102
RequestPtr ifetch_req
Definition: atomic.hh:163
const bool simulate_data_stalls
Definition: atomic.hh:67
T divCeil(const T &a, const U &b)
Definition: intmath.hh:153
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:88
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:180
void schedule(Event &event, Tick when)
Definition: eventq.hh:744
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition: atomic.cc:217
const T * getConstPtr() const
Definition: packet.hh:1099
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
Definition: locked_mem.hh:60
void postExecute()
Definition: base.cc:573
RequestPtr data_amo_req
Definition: atomic.hh:166
Cycles handleIprWrite(ThreadContext *, Packet *)
Definition: mmapped_ipr.hh:49
void regProbePoints() override
Register probe points for this object.
Definition: atomic.cc:772
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:523
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time...
Definition: port.hh:427
Bitfield< 0 > p
AtomicCPUDPort dcachePort
Definition: atomic.hh:160
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
bool isMicroop() const
Definition: static_inst.hh:197
int ContextID
Globally unique thread context ID.
Definition: types.hh:231
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)=0
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition: atomic.cc:252
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition: base.cc:505
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:104
const int width
Definition: atomic.hh:65
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: atomic.cc:284
BaseTLB * dtb

Generated on Fri Feb 28 2020 16:27:00 for gem5 by doxygen 1.8.13