gem5  v21.1.0.2
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
base.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013, 2018-2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2003-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
46 #include "mem/cache/base.hh"
47 
48 #include "base/compiler.hh"
49 #include "base/logging.hh"
50 #include "debug/Cache.hh"
51 #include "debug/CacheComp.hh"
52 #include "debug/CachePort.hh"
53 #include "debug/CacheRepl.hh"
54 #include "debug/CacheVerbose.hh"
55 #include "debug/HWPrefetch.hh"
57 #include "mem/cache/mshr.hh"
59 #include "mem/cache/queue_entry.hh"
62 #include "params/BaseCache.hh"
63 #include "params/WriteAllocator.hh"
64 #include "sim/cur_tick.hh"
65 
66 namespace gem5
67 {
68 
70  BaseCache *_cache,
71  const std::string &_label)
72  : QueuedResponsePort(_name, _cache, queue),
73  queue(*_cache, *this, true, _label),
74  blocked(false), mustSendRetry(false),
75  sendRetryEvent([this]{ processSendRetry(); }, _name)
76 {
77 }
78 
79 BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size)
80  : ClockedObject(p),
81  cpuSidePort (p.name + ".cpu_side_port", this, "CpuSidePort"),
82  memSidePort(p.name + ".mem_side_port", this, "MemSidePort"),
83  mshrQueue("MSHRs", p.mshrs, 0, p.demand_mshr_reserve, p.name),
84  writeBuffer("write buffer", p.write_buffers, p.mshrs, p.name),
85  tags(p.tags),
88  writeAllocator(p.write_allocator),
89  writebackClean(p.writeback_clean),
90  tempBlockWriteback(nullptr),
92  name(), false,
94  blkSize(blk_size),
95  lookupLatency(p.tag_latency),
96  dataLatency(p.data_latency),
97  forwardLatency(p.tag_latency),
98  fillLatency(p.data_latency),
99  responseLatency(p.response_latency),
100  sequentialAccess(p.sequential_access),
101  numTarget(p.tgts_per_mshr),
102  forwardSnoops(true),
103  clusivity(p.clusivity),
104  isReadOnly(p.is_read_only),
105  replaceExpansions(p.replace_expansions),
106  moveContractions(p.move_contractions),
107  blocked(0),
108  order(0),
109  noTargetMSHR(nullptr),
110  missCount(p.max_miss_count),
111  addrRanges(p.addr_ranges.begin(), p.addr_ranges.end()),
112  system(p.system),
113  stats(*this)
114 {
115  // the MSHR queue has no reserve entries as we check the MSHR
116  // queue on every single allocation, whereas the write queue has
117  // as many reserve entries as we have MSHRs, since every MSHR may
118  // eventually require a writeback, and we do not check the write
119  // buffer before committing to an MSHR
120 
121  // forward snoops is overridden in init() once we can query
122  // whether the connected requestor is actually snooping or not
123 
124  tempBlock = new TempCacheBlk(blkSize);
125 
126  tags->tagsInit();
127  if (prefetcher)
128  prefetcher->setCache(this);
129 
130  fatal_if(compressor && !dynamic_cast<CompressedTags*>(tags),
131  "The tags of compressed cache %s must derive from CompressedTags",
132  name());
133  warn_if(!compressor && dynamic_cast<CompressedTags*>(tags),
134  "Compressed cache %s does not have a compression algorithm", name());
135  if (compressor)
136  compressor->setCache(this);
137 }
138 
140 {
141  delete tempBlock;
142 }
143 
144 void
146 {
147  assert(!blocked);
148  DPRINTF(CachePort, "Port is blocking new requests\n");
149  blocked = true;
150  // if we already scheduled a retry in this cycle, but it has not yet
151  // happened, cancel it
152  if (sendRetryEvent.scheduled()) {
154  DPRINTF(CachePort, "Port descheduled retry\n");
155  mustSendRetry = true;
156  }
157 }
158 
159 void
161 {
162  assert(blocked);
163  DPRINTF(CachePort, "Port is accepting new requests\n");
164  blocked = false;
165  if (mustSendRetry) {
166  // @TODO: need to find a better time (next cycle?)
167  owner.schedule(sendRetryEvent, curTick() + 1);
168  }
169 }
170 
171 void
173 {
174  DPRINTF(CachePort, "Port is sending retry\n");
175 
176  // reset the flag and call retry
177  mustSendRetry = false;
178  sendRetryReq();
179 }
180 
181 Addr
183 {
184  if (blk != tempBlock) {
185  return tags->regenerateBlkAddr(blk);
186  } else {
187  return tempBlock->getAddr();
188  }
189 }
190 
191 void
193 {
195  fatal("Cache ports on %s are not connected\n", name());
198 }
199 
200 Port &
201 BaseCache::getPort(const std::string &if_name, PortID idx)
202 {
203  if (if_name == "mem_side") {
204  return memSidePort;
205  } else if (if_name == "cpu_side") {
206  return cpuSidePort;
207  } else {
208  return ClockedObject::getPort(if_name, idx);
209  }
210 }
211 
212 bool
214 {
215  for (const auto& r : addrRanges) {
216  if (r.contains(addr)) {
217  return true;
218  }
219  }
220  return false;
221 }
222 
223 void
225 {
226  if (pkt->needsResponse()) {
227  // These delays should have been consumed by now
228  assert(pkt->headerDelay == 0);
229  assert(pkt->payloadDelay == 0);
230 
231  pkt->makeTimingResponse();
232 
233  // In this case we are considering request_time that takes
234  // into account the delay of the xbar, if any, and just
235  // lat, neglecting responseLatency, modelling hit latency
236  // just as the value of lat overriden by access(), which calls
237  // the calculateAccessLatency() function.
238  cpuSidePort.schedTimingResp(pkt, request_time);
239  } else {
240  DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
241  pkt->print());
242 
243  // queue the packet for deletion, as the sending cache is
244  // still relying on it; if the block is found in access(),
245  // CleanEvict and Writeback messages will be deleted
246  // here as well
247  pendingDelete.reset(pkt);
248  }
249 }
250 
251 void
253  Tick forward_time, Tick request_time)
254 {
255  if (writeAllocator &&
256  pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
257  writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
258  pkt->getBlockAddr(blkSize));
259  }
260 
261  if (mshr) {
265 
266  //@todo remove hw_pf here
267 
268  // Coalesce unless it was a software prefetch (see above).
269  if (pkt) {
270  assert(!pkt->isWriteback());
271  // CleanEvicts corresponding to blocks which have
272  // outstanding requests in MSHRs are simply sunk here
273  if (pkt->cmd == MemCmd::CleanEvict) {
274  pendingDelete.reset(pkt);
275  } else if (pkt->cmd == MemCmd::WriteClean) {
276  // A WriteClean should never coalesce with any
277  // outstanding cache maintenance requests.
278 
279  // We use forward_time here because there is an
280  // uncached memory write, forwarded to WriteBuffer.
281  allocateWriteBuffer(pkt, forward_time);
282  } else {
283  DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
284  pkt->print());
285 
286  assert(pkt->req->requestorId() < system->maxRequestors());
287  stats.cmdStats(pkt).mshrHits[pkt->req->requestorId()]++;
288 
289  // We use forward_time here because it is the same
290  // considering new targets. We have multiple
291  // requests for the same address here. It
292  // specifies the latency to allocate an internal
293  // buffer and to schedule an event to the queued
294  // port and also takes into account the additional
295  // delay of the xbar.
296  mshr->allocateTarget(pkt, forward_time, order++,
297  allocOnFill(pkt->cmd));
298  if (mshr->getNumTargets() == numTarget) {
299  noTargetMSHR = mshr;
301  // need to be careful with this... if this mshr isn't
302  // ready yet (i.e. time > curTick()), we don't want to
303  // move it ahead of mshrs that are ready
304  // mshrQueue.moveToFront(mshr);
305  }
306  }
307  }
308  } else {
309  // no MSHR
310  assert(pkt->req->requestorId() < system->maxRequestors());
311  stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
312  if (prefetcher && pkt->isDemand())
314 
315  if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
316  // We use forward_time here because there is an
317  // writeback or writeclean, forwarded to WriteBuffer.
318  allocateWriteBuffer(pkt, forward_time);
319  } else {
320  if (blk && blk->isValid()) {
321  // If we have a write miss to a valid block, we
322  // need to mark the block non-readable. Otherwise
323  // if we allow reads while there's an outstanding
324  // write miss, the read could return stale data
325  // out of the cache block... a more aggressive
326  // system could detect the overlap (if any) and
327  // forward data out of the MSHRs, but we don't do
328  // that yet. Note that we do need to leave the
329  // block valid so that it stays in the cache, in
330  // case we get an upgrade response (and hence no
331  // new data) when the write miss completes.
332  // As long as CPUs do proper store/load forwarding
333  // internally, and have a sufficiently weak memory
334  // model, this is probably unnecessary, but at some
335  // point it must have seemed like we needed it...
336  assert((pkt->needsWritable() &&
337  !blk->isSet(CacheBlk::WritableBit)) ||
338  pkt->req->isCacheMaintenance());
340  }
341  // Here we are using forward_time, modelling the latency of
342  // a miss (outbound) just as forwardLatency, neglecting the
343  // lookupLatency component.
344  allocateMissBuffer(pkt, forward_time);
345  }
346  }
347 }
348 
349 void
351 {
352  // anything that is merely forwarded pays for the forward latency and
353  // the delay provided by the crossbar
354  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
355 
356  Cycles lat;
357  CacheBlk *blk = nullptr;
358  bool satisfied = false;
359  {
360  PacketList writebacks;
361  // Note that lat is passed by reference here. The function
362  // access() will set the lat value.
363  satisfied = access(pkt, blk, lat, writebacks);
364 
365  // After the evicted blocks are selected, they must be forwarded
366  // to the write buffer to ensure they logically precede anything
367  // happening below
368  doWritebacks(writebacks, clockEdge(lat + forwardLatency));
369  }
370 
371  // Here we charge the headerDelay that takes into account the latencies
372  // of the bus, if the packet comes from it.
373  // The latency charged is just the value set by the access() function.
374  // In case of a hit we are neglecting response latency.
375  // In case of a miss we are neglecting forward latency.
376  Tick request_time = clockEdge(lat);
377  // Here we reset the timing of the packet.
378  pkt->headerDelay = pkt->payloadDelay = 0;
379 
380  if (satisfied) {
381  // notify before anything else as later handleTimingReqHit might turn
382  // the packet in a response
383  ppHit->notify(pkt);
384 
385  if (prefetcher && blk && blk->wasPrefetched()) {
386  DPRINTF(Cache, "Hit on prefetch for addr %#x (%s)\n",
387  pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
388  blk->clearPrefetched();
389  }
390 
391  handleTimingReqHit(pkt, blk, request_time);
392  } else {
393  handleTimingReqMiss(pkt, blk, forward_time, request_time);
394 
395  ppMiss->notify(pkt);
396  }
397 
398  if (prefetcher) {
399  // track time of availability of next prefetch, if any
400  Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
401  if (next_pf_time != MaxTick) {
402  schedMemSideSendEvent(next_pf_time);
403  }
404  }
405 }
406 
407 void
409 {
410  Tick completion_time = clockEdge(responseLatency) +
411  pkt->headerDelay + pkt->payloadDelay;
412 
413  // Reset the bus additional time as it is now accounted for
414  pkt->headerDelay = pkt->payloadDelay = 0;
415 
416  cpuSidePort.schedTimingResp(pkt, completion_time);
417 }
418 
419 void
421 {
422  assert(pkt->isResponse());
423 
424  // all header delay should be paid for by the crossbar, unless
425  // this is a prefetch response from above
426  panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
427  "%s saw a non-zero packet delay\n", name());
428 
429  const bool is_error = pkt->isError();
430 
431  if (is_error) {
432  DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
433  pkt->print());
434  }
435 
436  DPRINTF(Cache, "%s: Handling response %s\n", __func__,
437  pkt->print());
438 
439  // if this is a write, we should be looking at an uncacheable
440  // write
441  if (pkt->isWrite()) {
442  assert(pkt->req->isUncacheable());
444  return;
445  }
446 
447  // we have dealt with any (uncacheable) writes above, from here on
448  // we know we are dealing with an MSHR due to a miss or a prefetch
449  MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
450  assert(mshr);
451 
452  if (mshr == noTargetMSHR) {
453  // we always clear at least one target
455  noTargetMSHR = nullptr;
456  }
457 
458  // Initial target is used just for stats
459  const QueueEntry::Target *initial_tgt = mshr->getTarget();
460  const Tick miss_latency = curTick() - initial_tgt->recvTime;
461  if (pkt->req->isUncacheable()) {
462  assert(pkt->req->requestorId() < system->maxRequestors());
463  stats.cmdStats(initial_tgt->pkt)
464  .mshrUncacheableLatency[pkt->req->requestorId()] += miss_latency;
465  } else {
466  assert(pkt->req->requestorId() < system->maxRequestors());
467  stats.cmdStats(initial_tgt->pkt)
468  .mshrMissLatency[pkt->req->requestorId()] += miss_latency;
469  }
470 
471  PacketList writebacks;
472 
473  bool is_fill = !mshr->isForward &&
474  (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
475  mshr->wasWholeLineWrite);
476 
477  // make sure that if the mshr was due to a whole line write then
478  // the response is an invalidation
479  assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
480 
481  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
482 
483  if (is_fill && !is_error) {
484  DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
485  pkt->getAddr());
486 
487  const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
488  writeAllocator->allocate() : mshr->allocOnFill();
489  blk = handleFill(pkt, blk, writebacks, allocate);
490  assert(blk != nullptr);
491  ppFill->notify(pkt);
492  }
493 
494  if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
495  // The block was marked not readable while there was a pending
496  // cache maintenance operation, restore its flag.
498 
499  // This was a cache clean operation (without invalidate)
500  // and we have a copy of the block already. Since there
501  // is no invalidation, we can promote targets that don't
502  // require a writable copy
503  mshr->promoteReadable();
504  }
505 
506  if (blk && blk->isSet(CacheBlk::WritableBit) &&
507  !pkt->req->isCacheInvalidate()) {
508  // If at this point the referenced block is writable and the
509  // response is not a cache invalidate, we promote targets that
510  // were deferred as we couldn't guarrantee a writable copy
511  mshr->promoteWritable();
512  }
513 
514  serviceMSHRTargets(mshr, pkt, blk);
515 
516  if (mshr->promoteDeferredTargets()) {
517  // avoid later read getting stale data while write miss is
518  // outstanding.. see comment in timingAccess()
519  if (blk) {
521  }
522  mshrQueue.markPending(mshr);
524  } else {
525  // while we deallocate an mshr from the queue we still have to
526  // check the isFull condition before and after as we might
527  // have been using the reserved entries already
528  const bool was_full = mshrQueue.isFull();
529  mshrQueue.deallocate(mshr);
530  if (was_full && !mshrQueue.isFull()) {
532  }
533 
534  // Request the bus for a prefetch if this deallocation freed enough
535  // MSHRs for a prefetch to take place
536  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
537  Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
538  clockEdge());
539  if (next_pf_time != MaxTick)
540  schedMemSideSendEvent(next_pf_time);
541  }
542  }
543 
544  // if we used temp block, check to see if its valid and then clear it out
545  if (blk == tempBlock && tempBlock->isValid()) {
546  evictBlock(blk, writebacks);
547  }
548 
549  const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
550  // copy writebacks to write buffer
551  doWritebacks(writebacks, forward_time);
552 
553  DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
554  delete pkt;
555 }
556 
557 
558 Tick
560 {
561  // should assert here that there are no outstanding MSHRs or
562  // writebacks... that would mean that someone used an atomic
563  // access in timing mode
564 
565  // We use lookupLatency here because it is used to specify the latency
566  // to access.
567  Cycles lat = lookupLatency;
568 
569  CacheBlk *blk = nullptr;
570  PacketList writebacks;
571  bool satisfied = access(pkt, blk, lat, writebacks);
572 
573  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
574  // A cache clean opearation is looking for a dirty
575  // block. If a dirty block is encountered a WriteClean
576  // will update any copies to the path to the memory
577  // until the point of reference.
578  DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
579  __func__, pkt->print(), blk->print());
580  PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
581  writebacks.push_back(wb_pkt);
582  pkt->setSatisfied();
583  }
584 
585  // handle writebacks resulting from the access here to ensure they
586  // logically precede anything happening below
587  doWritebacksAtomic(writebacks);
588  assert(writebacks.empty());
589 
590  if (!satisfied) {
591  lat += handleAtomicReqMiss(pkt, blk, writebacks);
592  }
593 
594  // Note that we don't invoke the prefetcher at all in atomic mode.
595  // It's not clear how to do it properly, particularly for
596  // prefetchers that aggressively generate prefetch candidates and
597  // rely on bandwidth contention to throttle them; these will tend
598  // to pollute the cache in atomic mode since there is no bandwidth
599  // contention. If we ever do want to enable prefetching in atomic
600  // mode, though, this is the place to do it... see timingAccess()
601  // for an example (though we'd want to issue the prefetch(es)
602  // immediately rather than calling requestMemSideBus() as we do
603  // there).
604 
605  // do any writebacks resulting from the response handling
606  doWritebacksAtomic(writebacks);
607 
608  // if we used temp block, check to see if its valid and if so
609  // clear it out, but only do so after the call to recvAtomic is
610  // finished so that any downstream observers (such as a snoop
611  // filter), first see the fill, and only then see the eviction
612  if (blk == tempBlock && tempBlock->isValid()) {
613  // the atomic CPU calls recvAtomic for fetch and load/store
614  // sequentuially, and we may already have a tempBlock
615  // writeback from the fetch that we have not yet sent
616  if (tempBlockWriteback) {
617  // if that is the case, write the prevoius one back, and
618  // do not schedule any new event
620  } else {
621  // the writeback/clean eviction happens after the call to
622  // recvAtomic has finished (but before any successive
623  // calls), so that the response handling from the fill is
624  // allowed to happen first
626  }
627 
629  }
630 
631  if (pkt->needsResponse()) {
632  pkt->makeAtomicResponse();
633  }
634 
635  return lat * clockPeriod();
636 }
637 
638 void
639 BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
640 {
641  Addr blk_addr = pkt->getBlockAddr(blkSize);
642  bool is_secure = pkt->isSecure();
643  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
644  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
645 
646  pkt->pushLabel(name());
647 
648  CacheBlkPrintWrapper cbpw(blk);
649 
650  // Note that just because an L2/L3 has valid data doesn't mean an
651  // L1 doesn't have a more up-to-date modified copy that still
652  // needs to be found. As a result we always update the request if
653  // we have it, but only declare it satisfied if we are the owner.
654 
655  // see if we have data at all (owned or otherwise)
656  bool have_data = blk && blk->isValid()
657  && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
658  blk->data);
659 
660  // data we have is dirty if marked as such or if we have an
661  // in-service MSHR that is pending a modified line
662  bool have_dirty =
663  have_data && (blk->isSet(CacheBlk::DirtyBit) ||
664  (mshr && mshr->inService && mshr->isPendingModified()));
665 
666  bool done = have_dirty ||
671 
672  DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
673  (blk && blk->isValid()) ? "valid " : "",
674  have_data ? "data " : "", done ? "done " : "");
675 
676  // We're leaving the cache, so pop cache->name() label
677  pkt->popLabel();
678 
679  if (done) {
680  pkt->makeResponse();
681  } else {
682  // if it came as a request from the CPU side then make sure it
683  // continues towards the memory side
684  if (from_cpu_side) {
686  } else if (cpuSidePort.isSnooping()) {
687  // if it came from the memory side, it must be a snoop request
688  // and we should only forward it if we are forwarding snoops
690  }
691  }
692 }
693 
694 void
696  bool has_old_data)
697 {
698  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
699  if (ppDataUpdate->hasListeners()) {
700  if (has_old_data) {
701  data_update.oldData = std::vector<uint64_t>(blk->data,
702  blk->data + (blkSize / sizeof(uint64_t)));
703  }
704  }
705 
706  // Actually perform the data update
707  if (cpkt) {
708  cpkt->writeDataToBlock(blk->data, blkSize);
709  }
710 
711  if (ppDataUpdate->hasListeners()) {
712  if (cpkt) {
713  data_update.newData = std::vector<uint64_t>(blk->data,
714  blk->data + (blkSize / sizeof(uint64_t)));
715  }
716  ppDataUpdate->notify(data_update);
717  }
718 }
719 
720 void
722 {
723  assert(pkt->isRequest());
724 
725  uint64_t overwrite_val;
726  bool overwrite_mem;
727  uint64_t condition_val64;
728  uint32_t condition_val32;
729 
730  int offset = pkt->getOffset(blkSize);
731  uint8_t *blk_data = blk->data + offset;
732 
733  assert(sizeof(uint64_t) >= pkt->getSize());
734 
735  // Get a copy of the old block's contents for the probe before the update
736  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
737  if (ppDataUpdate->hasListeners()) {
738  data_update.oldData = std::vector<uint64_t>(blk->data,
739  blk->data + (blkSize / sizeof(uint64_t)));
740  }
741 
742  overwrite_mem = true;
743  // keep a copy of our possible write value, and copy what is at the
744  // memory address into the packet
745  pkt->writeData((uint8_t *)&overwrite_val);
746  pkt->setData(blk_data);
747 
748  if (pkt->req->isCondSwap()) {
749  if (pkt->getSize() == sizeof(uint64_t)) {
750  condition_val64 = pkt->req->getExtraData();
751  overwrite_mem = !std::memcmp(&condition_val64, blk_data,
752  sizeof(uint64_t));
753  } else if (pkt->getSize() == sizeof(uint32_t)) {
754  condition_val32 = (uint32_t)pkt->req->getExtraData();
755  overwrite_mem = !std::memcmp(&condition_val32, blk_data,
756  sizeof(uint32_t));
757  } else
758  panic("Invalid size for conditional read/write\n");
759  }
760 
761  if (overwrite_mem) {
762  std::memcpy(blk_data, &overwrite_val, pkt->getSize());
764 
765  if (ppDataUpdate->hasListeners()) {
766  data_update.newData = std::vector<uint64_t>(blk->data,
767  blk->data + (blkSize / sizeof(uint64_t)));
768  ppDataUpdate->notify(data_update);
769  }
770  }
771 }
772 
773 QueueEntry*
775 {
776  // Check both MSHR queue and write buffer for potential requests,
777  // note that null does not mean there is no request, it could
778  // simply be that it is not ready
779  MSHR *miss_mshr = mshrQueue.getNext();
780  WriteQueueEntry *wq_entry = writeBuffer.getNext();
781 
782  // If we got a write buffer request ready, first priority is a
783  // full write buffer, otherwise we favour the miss requests
784  if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
785  // need to search MSHR queue for conflicting earlier miss.
786  MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
787 
788  if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
789  // Service misses in order until conflict is cleared.
790  return conflict_mshr;
791 
792  // @todo Note that we ignore the ready time of the conflict here
793  }
794 
795  // No conflicts; issue write
796  return wq_entry;
797  } else if (miss_mshr) {
798  // need to check for conflicting earlier writeback
799  WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
800  if (conflict_mshr) {
801  // not sure why we don't check order here... it was in the
802  // original code but commented out.
803 
804  // The only way this happens is if we are
805  // doing a write and we didn't have permissions
806  // then subsequently saw a writeback (owned got evicted)
807  // We need to make sure to perform the writeback first
808  // To preserve the dirty data, then we can issue the write
809 
810  // should we return wq_entry here instead? I.e. do we
811  // have to flush writes in order? I don't think so... not
812  // for Alpha anyway. Maybe for x86?
813  return conflict_mshr;
814 
815  // @todo Note that we ignore the ready time of the conflict here
816  }
817 
818  // No conflicts; issue read
819  return miss_mshr;
820  }
821 
822  // fall through... no pending requests. Try a prefetch.
823  assert(!miss_mshr && !wq_entry);
824  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
825  // If we have a miss queue slot, we can try a prefetch
826  PacketPtr pkt = prefetcher->getPacket();
827  if (pkt) {
828  Addr pf_addr = pkt->getBlockAddr(blkSize);
829  if (tags->findBlock(pf_addr, pkt->isSecure())) {
830  DPRINTF(HWPrefetch, "Prefetch %#x has hit in cache, "
831  "dropped.\n", pf_addr);
833  // free the request and packet
834  delete pkt;
835  } else if (mshrQueue.findMatch(pf_addr, pkt->isSecure())) {
836  DPRINTF(HWPrefetch, "Prefetch %#x has hit in a MSHR, "
837  "dropped.\n", pf_addr);
839  // free the request and packet
840  delete pkt;
841  } else if (writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
842  DPRINTF(HWPrefetch, "Prefetch %#x has hit in the "
843  "Write Buffer, dropped.\n", pf_addr);
845  // free the request and packet
846  delete pkt;
847  } else {
848  // Update statistic on number of prefetches issued
849  // (hwpf_mshr_misses)
850  assert(pkt->req->requestorId() < system->maxRequestors());
851  stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
852 
853  // allocate an MSHR and return it, note
854  // that we send the packet straight away, so do not
855  // schedule the send
856  return allocateMissBuffer(pkt, curTick(), false);
857  }
858  }
859  }
860 
861  return nullptr;
862 }
863 
864 bool
866  PacketList &writebacks)
867 {
868  bool replacement = false;
869  for (const auto& blk : evict_blks) {
870  if (blk->isValid()) {
871  replacement = true;
872 
873  const MSHR* mshr =
874  mshrQueue.findMatch(regenerateBlkAddr(blk), blk->isSecure());
875  if (mshr) {
876  // Must be an outstanding upgrade or clean request on a block
877  // we're about to replace
878  assert((!blk->isSet(CacheBlk::WritableBit) &&
879  mshr->needsWritable()) || mshr->isCleaning());
880  return false;
881  }
882  }
883  }
884 
885  // The victim will be replaced by a new entry, so increase the replacement
886  // counter if a valid block is being replaced
887  if (replacement) {
889 
890  // Evict valid blocks associated to this victim block
891  for (auto& blk : evict_blks) {
892  if (blk->isValid()) {
893  evictBlock(blk, writebacks);
894  }
895  }
896  }
897 
898  return true;
899 }
900 
901 bool
903  PacketList &writebacks)
904 {
905  // tempBlock does not exist in the tags, so don't do anything for it.
906  if (blk == tempBlock) {
907  return true;
908  }
909 
910  // The compressor is called to compress the updated data, so that its
911  // metadata can be updated.
912  Cycles compression_lat = Cycles(0);
913  Cycles decompression_lat = Cycles(0);
914  const auto comp_data =
915  compressor->compress(data, compression_lat, decompression_lat);
916  std::size_t compression_size = comp_data->getSizeBits();
917 
918  // Get previous compressed size
919  CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
920  GEM5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
921 
922  // If compressed size didn't change enough to modify its co-allocatability
923  // there is nothing to do. Otherwise we may be facing a data expansion
924  // (block passing from more compressed to less compressed state), or a
925  // data contraction (less to more).
926  bool is_data_expansion = false;
927  bool is_data_contraction = false;
928  const CompressionBlk::OverwriteType overwrite_type =
929  compression_blk->checkExpansionContraction(compression_size);
930  std::string op_name = "";
931  if (overwrite_type == CompressionBlk::DATA_EXPANSION) {
932  op_name = "expansion";
933  is_data_expansion = true;
934  } else if ((overwrite_type == CompressionBlk::DATA_CONTRACTION) &&
936  op_name = "contraction";
937  is_data_contraction = true;
938  }
939 
940  // If block changed compression state, it was possibly co-allocated with
941  // other blocks and cannot be co-allocated anymore, so one or more blocks
942  // must be evicted to make room for the expanded/contracted block
943  std::vector<CacheBlk*> evict_blks;
944  if (is_data_expansion || is_data_contraction) {
945  std::vector<CacheBlk*> evict_blks;
946  bool victim_itself = false;
947  CacheBlk *victim = nullptr;
948  if (replaceExpansions || is_data_contraction) {
949  victim = tags->findVictim(regenerateBlkAddr(blk),
950  blk->isSecure(), compression_size, evict_blks);
951 
952  // It is valid to return nullptr if there is no victim
953  if (!victim) {
954  return false;
955  }
956 
957  // If the victim block is itself the block won't need to be moved,
958  // and the victim should not be evicted
959  if (blk == victim) {
960  victim_itself = true;
961  auto it = std::find_if(evict_blks.begin(), evict_blks.end(),
962  [&blk](CacheBlk* evict_blk){ return evict_blk == blk; });
963  evict_blks.erase(it);
964  }
965 
966  // Print victim block's information
967  DPRINTF(CacheRepl, "Data %s replacement victim: %s\n",
968  op_name, victim->print());
969  } else {
970  // If we do not move the expanded block, we must make room for
971  // the expansion to happen, so evict every co-allocated block
972  const SuperBlk* superblock = static_cast<const SuperBlk*>(
973  compression_blk->getSectorBlock());
974  for (auto& sub_blk : superblock->blks) {
975  if (sub_blk->isValid() && (blk != sub_blk)) {
976  evict_blks.push_back(sub_blk);
977  }
978  }
979  }
980 
981  // Try to evict blocks; if it fails, give up on update
982  if (!handleEvictions(evict_blks, writebacks)) {
983  return false;
984  }
985 
986  DPRINTF(CacheComp, "Data %s: [%s] from %d to %d bits\n",
987  op_name, blk->print(), prev_size, compression_size);
988 
989  if (!victim_itself && (replaceExpansions || is_data_contraction)) {
990  // Move the block's contents to the invalid block so that it now
991  // co-allocates with the other existing superblock entry
992  tags->moveBlock(blk, victim);
993  blk = victim;
994  compression_blk = static_cast<CompressionBlk*>(blk);
995  }
996  }
997 
998  // Update the number of data expansions/contractions
999  if (is_data_expansion) {
1001  } else if (is_data_contraction) {
1003  }
1004 
1005  compression_blk->setSizeBits(compression_size);
1006  compression_blk->setDecompressionLatency(decompression_lat);
1007 
1008  return true;
1009 }
1010 
1011 void
1013 {
1014  assert(pkt->isRequest());
1015 
1016  assert(blk && blk->isValid());
1017  // Occasionally this is not true... if we are a lower-level cache
1018  // satisfying a string of Read and ReadEx requests from
1019  // upper-level caches, a Read will mark the block as shared but we
1020  // can satisfy a following ReadEx anyway since we can rely on the
1021  // Read requestor(s) to have buffered the ReadEx snoop and to
1022  // invalidate their blocks after receiving them.
1023  // assert(!pkt->needsWritable() || blk->isSet(CacheBlk::WritableBit));
1024  assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
1025 
1026  // Check RMW operations first since both isRead() and
1027  // isWrite() will be true for them
1028  if (pkt->cmd == MemCmd::SwapReq) {
1029  if (pkt->isAtomicOp()) {
1030  // Get a copy of the old block's contents for the probe before
1031  // the update
1032  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
1033  if (ppDataUpdate->hasListeners()) {
1034  data_update.oldData = std::vector<uint64_t>(blk->data,
1035  blk->data + (blkSize / sizeof(uint64_t)));
1036  }
1037 
1038  // extract data from cache and save it into the data field in
1039  // the packet as a return value from this atomic op
1040  int offset = tags->extractBlkOffset(pkt->getAddr());
1041  uint8_t *blk_data = blk->data + offset;
1042  pkt->setData(blk_data);
1043 
1044  // execute AMO operation
1045  (*(pkt->getAtomicOp()))(blk_data);
1046 
1047  // Inform of this block's data contents update
1048  if (ppDataUpdate->hasListeners()) {
1049  data_update.newData = std::vector<uint64_t>(blk->data,
1050  blk->data + (blkSize / sizeof(uint64_t)));
1051  ppDataUpdate->notify(data_update);
1052  }
1053 
1054  // set block status to dirty
1056  } else {
1057  cmpAndSwap(blk, pkt);
1058  }
1059  } else if (pkt->isWrite()) {
1060  // we have the block in a writable state and can go ahead,
1061  // note that the line may be also be considered writable in
1062  // downstream caches along the path to memory, but always
1063  // Exclusive, and never Modified
1064  assert(blk->isSet(CacheBlk::WritableBit));
1065  // Write or WriteLine at the first cache with block in writable state
1066  if (blk->checkWrite(pkt)) {
1067  updateBlockData(blk, pkt, true);
1068  }
1069  // Always mark the line as dirty (and thus transition to the
1070  // Modified state) even if we are a failed StoreCond so we
1071  // supply data to any snoops that have appended themselves to
1072  // this cache before knowing the store will fail.
1074  DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
1075  } else if (pkt->isRead()) {
1076  if (pkt->isLLSC()) {
1077  blk->trackLoadLocked(pkt);
1078  }
1079 
1080  // all read responses have a data payload
1081  assert(pkt->hasRespData());
1082  pkt->setDataFromBlock(blk->data, blkSize);
1083  } else if (pkt->isUpgrade()) {
1084  // sanity check
1085  assert(!pkt->hasSharers());
1086 
1087  if (blk->isSet(CacheBlk::DirtyBit)) {
1088  // we were in the Owned state, and a cache above us that
1089  // has the line in Shared state needs to be made aware
1090  // that the data it already has is in fact dirty
1091  pkt->setCacheResponding();
1093  }
1094  } else if (pkt->isClean()) {
1096  } else {
1097  assert(pkt->isInvalidate());
1098  invalidateBlock(blk);
1099  DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
1100  pkt->print());
1101  }
1102 }
1103 
1105 //
1106 // Access path: requests coming in from the CPU side
1107 //
1109 Cycles
1111  const Cycles lookup_lat) const
1112 {
1113  // A tag-only access has to wait for the packet to arrive in order to
1114  // perform the tag lookup.
1115  return ticksToCycles(delay) + lookup_lat;
1116 }
1117 
1118 Cycles
1119 BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
1120  const Cycles lookup_lat) const
1121 {
1122  Cycles lat(0);
1123 
1124  if (blk != nullptr) {
1125  // As soon as the access arrives, for sequential accesses first access
1126  // tags, then the data entry. In the case of parallel accesses the
1127  // latency is dictated by the slowest of tag and data latencies.
1128  if (sequentialAccess) {
1129  lat = ticksToCycles(delay) + lookup_lat + dataLatency;
1130  } else {
1131  lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
1132  }
1133 
1134  // Check if the block to be accessed is available. If not, apply the
1135  // access latency on top of when the block is ready to be accessed.
1136  const Tick tick = curTick() + delay;
1137  const Tick when_ready = blk->getWhenReady();
1138  if (when_ready > tick &&
1139  ticksToCycles(when_ready - tick) > lat) {
1140  lat += ticksToCycles(when_ready - tick);
1141  }
1142  } else {
1143  // In case of a miss, we neglect the data access in a parallel
1144  // configuration (i.e., the data access will be stopped as soon as
1145  // we find out it is a miss), and use the tag-only latency.
1146  lat = calculateTagOnlyLatency(delay, lookup_lat);
1147  }
1148 
1149  return lat;
1150 }
1151 
1152 bool
1154  PacketList &writebacks)
1155 {
1156  // sanity check
1157  assert(pkt->isRequest());
1158 
1159  chatty_assert(!(isReadOnly && pkt->isWrite()),
1160  "Should never see a write in a read-only cache %s\n",
1161  name());
1162 
1163  // Access block in the tags
1164  Cycles tag_latency(0);
1165  blk = tags->accessBlock(pkt, tag_latency);
1166 
1167  DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
1168  blk ? "hit " + blk->print() : "miss");
1169 
1170  if (pkt->req->isCacheMaintenance()) {
1171  // A cache maintenance operation is always forwarded to the
1172  // memory below even if the block is found in dirty state.
1173 
1174  // We defer any changes to the state of the block until we
1175  // create and mark as in service the mshr for the downstream
1176  // packet.
1177 
1178  // Calculate access latency on top of when the packet arrives. This
1179  // takes into account the bus delay.
1180  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1181 
1182  return false;
1183  }
1184 
1185  if (pkt->isEviction()) {
1186  // We check for presence of block in above caches before issuing
1187  // Writeback or CleanEvict to write buffer. Therefore the only
1188  // possible cases can be of a CleanEvict packet coming from above
1189  // encountering a Writeback generated in this cache peer cache and
1190  // waiting in the write buffer. Cases of upper level peer caches
1191  // generating CleanEvict and Writeback or simply CleanEvict and
1192  // CleanEvict almost simultaneously will be caught by snoops sent out
1193  // by crossbar.
1194  WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
1195  pkt->isSecure());
1196  if (wb_entry) {
1197  assert(wb_entry->getNumTargets() == 1);
1198  PacketPtr wbPkt = wb_entry->getTarget()->pkt;
1199  assert(wbPkt->isWriteback());
1200 
1201  if (pkt->isCleanEviction()) {
1202  // The CleanEvict and WritebackClean snoops into other
1203  // peer caches of the same level while traversing the
1204  // crossbar. If a copy of the block is found, the
1205  // packet is deleted in the crossbar. Hence, none of
1206  // the other upper level caches connected to this
1207  // cache have the block, so we can clear the
1208  // BLOCK_CACHED flag in the Writeback if set and
1209  // discard the CleanEvict by returning true.
1210  wbPkt->clearBlockCached();
1211 
1212  // A clean evict does not need to access the data array
1213  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1214 
1215  return true;
1216  } else {
1217  assert(pkt->cmd == MemCmd::WritebackDirty);
1218  // Dirty writeback from above trumps our clean
1219  // writeback... discard here
1220  // Note: markInService will remove entry from writeback buffer.
1221  markInService(wb_entry);
1222  delete wbPkt;
1223  }
1224  }
1225  }
1226 
1227  // The critical latency part of a write depends only on the tag access
1228  if (pkt->isWrite()) {
1229  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1230  }
1231 
1232  // Writeback handling is special case. We can write the block into
1233  // the cache without having a writeable copy (or any copy at all).
1234  if (pkt->isWriteback()) {
1235  assert(blkSize == pkt->getSize());
1236 
1237  // we could get a clean writeback while we are having
1238  // outstanding accesses to a block, do the simple thing for
1239  // now and drop the clean writeback so that we do not upset
1240  // any ordering/decisions about ownership already taken
1241  if (pkt->cmd == MemCmd::WritebackClean &&
1242  mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1243  DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1244  "dropping\n", pkt->getAddr());
1245 
1246  // A writeback searches for the block, then writes the data.
1247  // As the writeback is being dropped, the data is not touched,
1248  // and we just had to wait for the time to find a match in the
1249  // MSHR. As of now assume a mshr queue search takes as long as
1250  // a tag lookup for simplicity.
1251  return true;
1252  }
1253 
1254  const bool has_old_data = blk && blk->isValid();
1255  if (!blk) {
1256  // need to do a replacement
1257  blk = allocateBlock(pkt, writebacks);
1258  if (!blk) {
1259  // no replaceable block available: give up, fwd to next level.
1260  incMissCount(pkt);
1261  return false;
1262  }
1263 
1265  } else if (compressor) {
1266  // This is an overwrite to an existing block, therefore we need
1267  // to check for data expansion (i.e., block was compressed with
1268  // a smaller size, and now it doesn't fit the entry anymore).
1269  // If that is the case we might need to evict blocks.
1270  if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1271  writebacks)) {
1272  invalidateBlock(blk);
1273  return false;
1274  }
1275  }
1276 
1277  // only mark the block dirty if we got a writeback command,
1278  // and leave it as is for a clean writeback
1279  if (pkt->cmd == MemCmd::WritebackDirty) {
1280  // TODO: the coherent cache can assert that the dirty bit is set
1282  }
1283  // if the packet does not have sharers, it is passing
1284  // writable, and we got the writeback in Modified or Exclusive
1285  // state, if not we are in the Owned or Shared state
1286  if (!pkt->hasSharers()) {
1288  }
1289  // nothing else to do; writeback doesn't expect response
1290  assert(!pkt->needsResponse());
1291 
1292  updateBlockData(blk, pkt, has_old_data);
1293  DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1294  incHitCount(pkt);
1295 
1296  // When the packet metadata arrives, the tag lookup will be done while
1297  // the payload is arriving. Then the block will be ready to access as
1298  // soon as the fill is done
1300  std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1301 
1302  return true;
1303  } else if (pkt->cmd == MemCmd::CleanEvict) {
1304  // A CleanEvict does not need to access the data array
1305  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1306 
1307  if (blk) {
1308  // Found the block in the tags, need to stop CleanEvict from
1309  // propagating further down the hierarchy. Returning true will
1310  // treat the CleanEvict like a satisfied write request and delete
1311  // it.
1312  return true;
1313  }
1314  // We didn't find the block here, propagate the CleanEvict further
1315  // down the memory hierarchy. Returning false will treat the CleanEvict
1316  // like a Writeback which could not find a replaceable block so has to
1317  // go to next level.
1318  return false;
1319  } else if (pkt->cmd == MemCmd::WriteClean) {
1320  // WriteClean handling is a special case. We can allocate a
1321  // block directly if it doesn't exist and we can update the
1322  // block immediately. The WriteClean transfers the ownership
1323  // of the block as well.
1324  assert(blkSize == pkt->getSize());
1325 
1326  const bool has_old_data = blk && blk->isValid();
1327  if (!blk) {
1328  if (pkt->writeThrough()) {
1329  // if this is a write through packet, we don't try to
1330  // allocate if the block is not present
1331  return false;
1332  } else {
1333  // a writeback that misses needs to allocate a new block
1334  blk = allocateBlock(pkt, writebacks);
1335  if (!blk) {
1336  // no replaceable block available: give up, fwd to
1337  // next level.
1338  incMissCount(pkt);
1339  return false;
1340  }
1341 
1343  }
1344  } else if (compressor) {
1345  // This is an overwrite to an existing block, therefore we need
1346  // to check for data expansion (i.e., block was compressed with
1347  // a smaller size, and now it doesn't fit the entry anymore).
1348  // If that is the case we might need to evict blocks.
1349  if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1350  writebacks)) {
1351  invalidateBlock(blk);
1352  return false;
1353  }
1354  }
1355 
1356  // at this point either this is a writeback or a write-through
1357  // write clean operation and the block is already in this
1358  // cache, we need to update the data and the block flags
1359  assert(blk);
1360  // TODO: the coherent cache can assert that the dirty bit is set
1361  if (!pkt->writeThrough()) {
1363  }
1364  // nothing else to do; writeback doesn't expect response
1365  assert(!pkt->needsResponse());
1366 
1367  updateBlockData(blk, pkt, has_old_data);
1368  DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1369 
1370  incHitCount(pkt);
1371 
1372  // When the packet metadata arrives, the tag lookup will be done while
1373  // the payload is arriving. Then the block will be ready to access as
1374  // soon as the fill is done
1376  std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1377 
1378  // If this a write-through packet it will be sent to cache below
1379  return !pkt->writeThrough();
1380  } else if (blk && (pkt->needsWritable() ?
1382  blk->isSet(CacheBlk::ReadableBit))) {
1383  // OK to satisfy access
1384  incHitCount(pkt);
1385 
1386  // Calculate access latency based on the need to access the data array
1387  if (pkt->isRead()) {
1388  lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1389 
1390  // When a block is compressed, it must first be decompressed
1391  // before being read. This adds to the access latency.
1392  if (compressor) {
1393  lat += compressor->getDecompressionLatency(blk);
1394  }
1395  } else {
1396  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1397  }
1398 
1399  satisfyRequest(pkt, blk);
1400  maintainClusivity(pkt->fromCache(), blk);
1401 
1402  return true;
1403  }
1404 
1405  // Can't satisfy access normally... either no block (blk == nullptr)
1406  // or have block but need writable
1407 
1408  incMissCount(pkt);
1409 
1410  lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1411 
1412  if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1413  // complete miss on store conditional... just give up now
1414  pkt->req->setExtraData(0);
1415  return true;
1416  }
1417 
1418  return false;
1419 }
1420 
1421 void
1423 {
1424  if (from_cache && blk && blk->isValid() &&
1425  !blk->isSet(CacheBlk::DirtyBit) && clusivity == enums::mostly_excl) {
1426  // if we have responded to a cache, and our block is still
1427  // valid, but not dirty, and this cache is mostly exclusive
1428  // with respect to the cache above, drop the block
1429  invalidateBlock(blk);
1430  }
1431 }
1432 
1433 CacheBlk*
1435  bool allocate)
1436 {
1437  assert(pkt->isResponse());
1438  Addr addr = pkt->getAddr();
1439  bool is_secure = pkt->isSecure();
1440  const bool has_old_data = blk && blk->isValid();
1441  const std::string old_state = blk ? blk->print() : "";
1442 
1443  // When handling a fill, we should have no writes to this line.
1444  assert(addr == pkt->getBlockAddr(blkSize));
1445  assert(!writeBuffer.findMatch(addr, is_secure));
1446 
1447  if (!blk) {
1448  // better have read new data...
1449  assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1450 
1451  // need to do a replacement if allocating, otherwise we stick
1452  // with the temporary storage
1453  blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1454 
1455  if (!blk) {
1456  // No replaceable block or a mostly exclusive
1457  // cache... just use temporary storage to complete the
1458  // current request and then get rid of it
1459  blk = tempBlock;
1460  tempBlock->insert(addr, is_secure);
1461  DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1462  is_secure ? "s" : "ns");
1463  }
1464  } else {
1465  // existing block... probably an upgrade
1466  // don't clear block status... if block is already dirty we
1467  // don't want to lose that
1468  }
1469 
1470  // Block is guaranteed to be valid at this point
1471  assert(blk->isValid());
1472  assert(blk->isSecure() == is_secure);
1473  assert(regenerateBlkAddr(blk) == addr);
1474 
1476 
1477  // sanity check for whole-line writes, which should always be
1478  // marked as writable as part of the fill, and then later marked
1479  // dirty as part of satisfyRequest
1480  if (pkt->cmd == MemCmd::InvalidateResp) {
1481  assert(!pkt->hasSharers());
1482  }
1483 
1484  // here we deal with setting the appropriate state of the line,
1485  // and we start by looking at the hasSharers flag, and ignore the
1486  // cacheResponding flag (normally signalling dirty data) if the
1487  // packet has sharers, thus the line is never allocated as Owned
1488  // (dirty but not writable), and always ends up being either
1489  // Shared, Exclusive or Modified, see Packet::setCacheResponding
1490  // for more details
1491  if (!pkt->hasSharers()) {
1492  // we could get a writable line from memory (rather than a
1493  // cache) even in a read-only cache, note that we set this bit
1494  // even for a read-only cache, possibly revisit this decision
1496 
1497  // check if we got this via cache-to-cache transfer (i.e., from a
1498  // cache that had the block in Modified or Owned state)
1499  if (pkt->cacheResponding()) {
1500  // we got the block in Modified state, and invalidated the
1501  // owners copy
1503 
1504  chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1505  "in read-only cache %s\n", name());
1506 
1507  }
1508  }
1509 
1510  DPRINTF(Cache, "Block addr %#llx (%s) moving from %s to %s\n",
1511  addr, is_secure ? "s" : "ns", old_state, blk->print());
1512 
1513  // if we got new data, copy it in (checking for a read response
1514  // and a response that has data is the same in the end)
1515  if (pkt->isRead()) {
1516  // sanity checks
1517  assert(pkt->hasData());
1518  assert(pkt->getSize() == blkSize);
1519 
1520  updateBlockData(blk, pkt, has_old_data);
1521  }
1522  // The block will be ready when the payload arrives and the fill is done
1524  pkt->payloadDelay);
1525 
1526  return blk;
1527 }
1528 
1529 CacheBlk*
1531 {
1532  // Get address
1533  const Addr addr = pkt->getAddr();
1534 
1535  // Get secure bit
1536  const bool is_secure = pkt->isSecure();
1537 
1538  // Block size and compression related access latency. Only relevant if
1539  // using a compressor, otherwise there is no extra delay, and the block
1540  // is fully sized
1541  std::size_t blk_size_bits = blkSize*8;
1542  Cycles compression_lat = Cycles(0);
1543  Cycles decompression_lat = Cycles(0);
1544 
1545  // If a compressor is being used, it is called to compress data before
1546  // insertion. Although in Gem5 the data is stored uncompressed, even if a
1547  // compressor is used, the compression/decompression methods are called to
1548  // calculate the amount of extra cycles needed to read or write compressed
1549  // blocks.
1550  if (compressor && pkt->hasData()) {
1551  const auto comp_data = compressor->compress(
1552  pkt->getConstPtr<uint64_t>(), compression_lat, decompression_lat);
1553  blk_size_bits = comp_data->getSizeBits();
1554  }
1555 
1556  // Find replacement victim
1557  std::vector<CacheBlk*> evict_blks;
1558  CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
1559  evict_blks);
1560 
1561  // It is valid to return nullptr if there is no victim
1562  if (!victim)
1563  return nullptr;
1564 
1565  // Print victim block's information
1566  DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1567 
1568  // Try to evict blocks; if it fails, give up on allocation
1569  if (!handleEvictions(evict_blks, writebacks)) {
1570  return nullptr;
1571  }
1572 
1573  // Insert new block at victimized entry
1574  tags->insertBlock(pkt, victim);
1575 
1576  // If using a compressor, set compression data. This must be done after
1577  // insertion, as the compression bit may be set.
1578  if (compressor) {
1579  compressor->setSizeBits(victim, blk_size_bits);
1580  compressor->setDecompressionLatency(victim, decompression_lat);
1581  }
1582 
1583  return victim;
1584 }
1585 
1586 void
1588 {
1589  // If block is still marked as prefetched, then it hasn't been used
1590  if (blk->wasPrefetched()) {
1592  }
1593 
1594  // Notify that the data contents for this address are no longer present
1595  updateBlockData(blk, nullptr, blk->isValid());
1596 
1597  // If handling a block present in the Tags, let it do its invalidation
1598  // process, which will update stats and invalidate the block itself
1599  if (blk != tempBlock) {
1600  tags->invalidate(blk);
1601  } else {
1602  tempBlock->invalidate();
1603  }
1604 }
1605 
1606 void
1608 {
1609  PacketPtr pkt = evictBlock(blk);
1610  if (pkt) {
1611  writebacks.push_back(pkt);
1612  }
1613 }
1614 
1615 PacketPtr
1617 {
1619  "Writeback from read-only cache");
1620  assert(blk && blk->isValid() &&
1622 
1624 
1625  RequestPtr req = std::make_shared<Request>(
1627 
1628  if (blk->isSecure())
1629  req->setFlags(Request::SECURE);
1630 
1631  req->taskId(blk->getTaskId());
1632 
1633  PacketPtr pkt =
1634  new Packet(req, blk->isSet(CacheBlk::DirtyBit) ?
1636 
1637  DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1638  pkt->print(), blk->isSet(CacheBlk::WritableBit),
1639  blk->isSet(CacheBlk::DirtyBit));
1640 
1641  if (blk->isSet(CacheBlk::WritableBit)) {
1642  // not asserting shared means we pass the block in modified
1643  // state, mark our own block non-writeable
1645  } else {
1646  // we are in the Owned state, tell the receiver
1647  pkt->setHasSharers();
1648  }
1649 
1650  // make sure the block is not marked dirty
1652 
1653  pkt->allocate();
1654  pkt->setDataFromBlock(blk->data, blkSize);
1655 
1656  // When a block is compressed, it must first be decompressed before being
1657  // sent for writeback.
1658  if (compressor) {
1660  }
1661 
1662  return pkt;
1663 }
1664 
1665 PacketPtr
1667 {
1668  RequestPtr req = std::make_shared<Request>(
1670 
1671  if (blk->isSecure()) {
1672  req->setFlags(Request::SECURE);
1673  }
1674  req->taskId(blk->getTaskId());
1675 
1676  PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1677 
1678  if (dest) {
1679  req->setFlags(dest);
1680  pkt->setWriteThrough();
1681  }
1682 
1683  DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1685 
1686  if (blk->isSet(CacheBlk::WritableBit)) {
1687  // not asserting shared means we pass the block in modified
1688  // state, mark our own block non-writeable
1690  } else {
1691  // we are in the Owned state, tell the receiver
1692  pkt->setHasSharers();
1693  }
1694 
1695  // make sure the block is not marked dirty
1697 
1698  pkt->allocate();
1699  pkt->setDataFromBlock(blk->data, blkSize);
1700 
1701  // When a block is compressed, it must first be decompressed before being
1702  // sent for writeback.
1703  if (compressor) {
1705  }
1706 
1707  return pkt;
1708 }
1709 
1710 
1711 void
1713 {
1714  tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1715 }
1716 
1717 void
1719 {
1720  tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1721 }
1722 
1723 bool
1725 {
1726  return tags->anyBlk([](CacheBlk &blk) {
1727  return blk.isSet(CacheBlk::DirtyBit); });
1728 }
1729 
1730 bool
1732 {
1733  return writeAllocator && writeAllocator->coalesce();
1734 }
1735 
1736 void
1738 {
1739  if (blk.isSet(CacheBlk::DirtyBit)) {
1740  assert(blk.isValid());
1741 
1742  RequestPtr request = std::make_shared<Request>(
1744 
1745  request->taskId(blk.getTaskId());
1746  if (blk.isSecure()) {
1747  request->setFlags(Request::SECURE);
1748  }
1749 
1750  Packet packet(request, MemCmd::WriteReq);
1751  packet.dataStatic(blk.data);
1752 
1753  memSidePort.sendFunctional(&packet);
1754 
1756  }
1757 }
1758 
1759 void
1761 {
1762  if (blk.isSet(CacheBlk::DirtyBit))
1763  warn_once("Invalidating dirty cache lines. " \
1764  "Expect things to break.\n");
1765 
1766  if (blk.isValid()) {
1767  assert(!blk.isSet(CacheBlk::DirtyBit));
1768  invalidateBlock(&blk);
1769  }
1770 }
1771 
1772 Tick
1774 {
1775  Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1777 
1778  // Don't signal prefetch ready time if no MSHRs available
1779  // Will signal once enoguh MSHRs are deallocated
1780  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
1781  nextReady = std::min(nextReady,
1783  }
1784 
1785  return nextReady;
1786 }
1787 
1788 
1789 bool
1791 {
1792  assert(mshr);
1793 
1794  // use request from 1st target
1795  PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1796 
1797  DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1798 
1799  // if the cache is in write coalescing mode or (additionally) in
1800  // no allocation mode, and we have a write packet with an MSHR
1801  // that is not a whole-line write (due to incompatible flags etc),
1802  // then reset the write mode
1803  if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1804  if (!mshr->isWholeLineWrite()) {
1805  // if we are currently write coalescing, hold on the
1806  // MSHR as many cycles extra as we need to completely
1807  // write a cache line
1808  if (writeAllocator->delay(mshr->blkAddr)) {
1809  Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1810  DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1811  "for write coalescing\n", tgt_pkt->print(), delay);
1812  mshrQueue.delay(mshr, delay);
1813  return false;
1814  } else {
1815  writeAllocator->reset();
1816  }
1817  } else {
1819  }
1820  }
1821 
1822  CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1823 
1824  // either a prefetch that is not present upstream, or a normal
1825  // MSHR request, proceed to get the packet to send downstream
1826  PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1827  mshr->isWholeLineWrite());
1828 
1829  mshr->isForward = (pkt == nullptr);
1830 
1831  if (mshr->isForward) {
1832  // not a cache block request, but a response is expected
1833  // make copy of current packet to forward, keep current
1834  // copy for response handling
1835  pkt = new Packet(tgt_pkt, false, true);
1836  assert(!pkt->isWrite());
1837  }
1838 
1839  // play it safe and append (rather than set) the sender state,
1840  // as forwarded packets may already have existing state
1841  pkt->pushSenderState(mshr);
1842 
1843  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
1844  // A cache clean opearation is looking for a dirty block. Mark
1845  // the packet so that the destination xbar can determine that
1846  // there will be a follow-up write packet as well.
1847  pkt->setSatisfied();
1848  }
1849 
1850  if (!memSidePort.sendTimingReq(pkt)) {
1851  // we are awaiting a retry, but we
1852  // delete the packet and will be creating a new packet
1853  // when we get the opportunity
1854  delete pkt;
1855 
1856  // note that we have now masked any requestBus and
1857  // schedSendEvent (we will wait for a retry before
1858  // doing anything), and this is so even if we do not
1859  // care about this packet and might override it before
1860  // it gets retried
1861  return true;
1862  } else {
1863  // As part of the call to sendTimingReq the packet is
1864  // forwarded to all neighbouring caches (and any caches
1865  // above them) as a snoop. Thus at this point we know if
1866  // any of the neighbouring caches are responding, and if
1867  // so, we know it is dirty, and we can determine if it is
1868  // being passed as Modified, making our MSHR the ordering
1869  // point
1870  bool pending_modified_resp = !pkt->hasSharers() &&
1871  pkt->cacheResponding();
1872  markInService(mshr, pending_modified_resp);
1873 
1874  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
1875  // A cache clean opearation is looking for a dirty
1876  // block. If a dirty block is encountered a WriteClean
1877  // will update any copies to the path to the memory
1878  // until the point of reference.
1879  DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1880  __func__, pkt->print(), blk->print());
1881  PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1882  pkt->id);
1883  PacketList writebacks;
1884  writebacks.push_back(wb_pkt);
1885  doWritebacks(writebacks, 0);
1886  }
1887 
1888  return false;
1889  }
1890 }
1891 
1892 bool
1894 {
1895  assert(wq_entry);
1896 
1897  // always a single target for write queue entries
1898  PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1899 
1900  DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1901 
1902  // forward as is, both for evictions and uncacheable writes
1903  if (!memSidePort.sendTimingReq(tgt_pkt)) {
1904  // note that we have now masked any requestBus and
1905  // schedSendEvent (we will wait for a retry before
1906  // doing anything), and this is so even if we do not
1907  // care about this packet and might override it before
1908  // it gets retried
1909  return true;
1910  } else {
1911  markInService(wq_entry);
1912  return false;
1913  }
1914 }
1915 
1916 void
1918 {
1919  bool dirty(isDirty());
1920 
1921  if (dirty) {
1922  warn("*** The cache still contains dirty data. ***\n");
1923  warn(" Make sure to drain the system using the correct flags.\n");
1924  warn(" This checkpoint will not restore correctly " \
1925  "and dirty data in the cache will be lost!\n");
1926  }
1927 
1928  // Since we don't checkpoint the data in the cache, any dirty data
1929  // will be lost when restoring from a checkpoint of a system that
1930  // wasn't drained properly. Flag the checkpoint as invalid if the
1931  // cache contains dirty data.
1932  bool bad_checkpoint(dirty);
1933  SERIALIZE_SCALAR(bad_checkpoint);
1934 }
1935 
1936 void
1938 {
1939  bool bad_checkpoint;
1940  UNSERIALIZE_SCALAR(bad_checkpoint);
1941  if (bad_checkpoint) {
1942  fatal("Restoring from checkpoints with dirty caches is not "
1943  "supported in the classic memory system. Please remove any "
1944  "caches or drain them properly before taking checkpoints.\n");
1945  }
1946 }
1947 
1948 
1950  const std::string &name)
1951  : statistics::Group(&c, name.c_str()), cache(c),
1952  ADD_STAT(hits, statistics::units::Count::get(),
1953  ("number of " + name + " hits").c_str()),
1954  ADD_STAT(misses, statistics::units::Count::get(),
1955  ("number of " + name + " misses").c_str()),
1956  ADD_STAT(missLatency, statistics::units::Tick::get(),
1957  ("number of " + name + " miss ticks").c_str()),
1958  ADD_STAT(accesses, statistics::units::Count::get(),
1959  ("number of " + name + " accesses(hits+misses)").c_str()),
1960  ADD_STAT(missRate, statistics::units::Ratio::get(),
1961  ("miss rate for " + name + " accesses").c_str()),
1962  ADD_STAT(avgMissLatency, statistics::units::Rate<
1963  statistics::units::Tick, statistics::units::Count>::get(),
1964  ("average " + name + " miss latency").c_str()),
1965  ADD_STAT(mshrHits, statistics::units::Count::get(),
1966  ("number of " + name + " MSHR hits").c_str()),
1967  ADD_STAT(mshrMisses, statistics::units::Count::get(),
1968  ("number of " + name + " MSHR misses").c_str()),
1969  ADD_STAT(mshrUncacheable, statistics::units::Count::get(),
1970  ("number of " + name + " MSHR uncacheable").c_str()),
1971  ADD_STAT(mshrMissLatency, statistics::units::Tick::get(),
1972  ("number of " + name + " MSHR miss ticks").c_str()),
1973  ADD_STAT(mshrUncacheableLatency, statistics::units::Tick::get(),
1974  ("number of " + name + " MSHR uncacheable ticks").c_str()),
1975  ADD_STAT(mshrMissRate, statistics::units::Ratio::get(),
1976  ("mshr miss rate for " + name + " accesses").c_str()),
1977  ADD_STAT(avgMshrMissLatency, statistics::units::Rate<
1978  statistics::units::Tick, statistics::units::Count>::get(),
1979  ("average " + name + " mshr miss latency").c_str()),
1980  ADD_STAT(avgMshrUncacheableLatency, statistics::units::Rate<
1981  statistics::units::Tick, statistics::units::Count>::get(),
1982  ("average " + name + " mshr uncacheable latency").c_str())
1983 {
1984 }
1985 
1986 void
1988 {
1989  using namespace statistics;
1990 
1992  System *system = cache.system;
1993  const auto max_requestors = system->maxRequestors();
1994 
1995  hits
1996  .init(max_requestors)
1997  .flags(total | nozero | nonan)
1998  ;
1999  for (int i = 0; i < max_requestors; i++) {
2000  hits.subname(i, system->getRequestorName(i));
2001  }
2002 
2003  // Miss statistics
2004  misses
2005  .init(max_requestors)
2006  .flags(total | nozero | nonan)
2007  ;
2008  for (int i = 0; i < max_requestors; i++) {
2009  misses.subname(i, system->getRequestorName(i));
2010  }
2011 
2012  // Miss latency statistics
2013  missLatency
2014  .init(max_requestors)
2015  .flags(total | nozero | nonan)
2016  ;
2017  for (int i = 0; i < max_requestors; i++) {
2018  missLatency.subname(i, system->getRequestorName(i));
2019  }
2020 
2021  // access formulas
2022  accesses.flags(total | nozero | nonan);
2023  accesses = hits + misses;
2024  for (int i = 0; i < max_requestors; i++) {
2025  accesses.subname(i, system->getRequestorName(i));
2026  }
2027 
2028  // miss rate formulas
2029  missRate.flags(total | nozero | nonan);
2030  missRate = misses / accesses;
2031  for (int i = 0; i < max_requestors; i++) {
2032  missRate.subname(i, system->getRequestorName(i));
2033  }
2034 
2035  // miss latency formulas
2036  avgMissLatency.flags(total | nozero | nonan);
2037  avgMissLatency = missLatency / misses;
2038  for (int i = 0; i < max_requestors; i++) {
2039  avgMissLatency.subname(i, system->getRequestorName(i));
2040  }
2041 
2042  // MSHR statistics
2043  // MSHR hit statistics
2044  mshrHits
2045  .init(max_requestors)
2046  .flags(total | nozero | nonan)
2047  ;
2048  for (int i = 0; i < max_requestors; i++) {
2049  mshrHits.subname(i, system->getRequestorName(i));
2050  }
2051 
2052  // MSHR miss statistics
2053  mshrMisses
2054  .init(max_requestors)
2055  .flags(total | nozero | nonan)
2056  ;
2057  for (int i = 0; i < max_requestors; i++) {
2058  mshrMisses.subname(i, system->getRequestorName(i));
2059  }
2060 
2061  // MSHR miss latency statistics
2062  mshrMissLatency
2063  .init(max_requestors)
2064  .flags(total | nozero | nonan)
2065  ;
2066  for (int i = 0; i < max_requestors; i++) {
2067  mshrMissLatency.subname(i, system->getRequestorName(i));
2068  }
2069 
2070  // MSHR uncacheable statistics
2071  mshrUncacheable
2072  .init(max_requestors)
2073  .flags(total | nozero | nonan)
2074  ;
2075  for (int i = 0; i < max_requestors; i++) {
2076  mshrUncacheable.subname(i, system->getRequestorName(i));
2077  }
2078 
2079  // MSHR miss latency statistics
2080  mshrUncacheableLatency
2081  .init(max_requestors)
2082  .flags(total | nozero | nonan)
2083  ;
2084  for (int i = 0; i < max_requestors; i++) {
2085  mshrUncacheableLatency.subname(i, system->getRequestorName(i));
2086  }
2087 
2088  // MSHR miss rate formulas
2089  mshrMissRate.flags(total | nozero | nonan);
2090  mshrMissRate = mshrMisses / accesses;
2091 
2092  for (int i = 0; i < max_requestors; i++) {
2093  mshrMissRate.subname(i, system->getRequestorName(i));
2094  }
2095 
2096  // mshrMiss latency formulas
2097  avgMshrMissLatency.flags(total | nozero | nonan);
2098  avgMshrMissLatency = mshrMissLatency / mshrMisses;
2099  for (int i = 0; i < max_requestors; i++) {
2100  avgMshrMissLatency.subname(i, system->getRequestorName(i));
2101  }
2102 
2103  // mshrUncacheable latency formulas
2104  avgMshrUncacheableLatency.flags(total | nozero | nonan);
2105  avgMshrUncacheableLatency = mshrUncacheableLatency / mshrUncacheable;
2106  for (int i = 0; i < max_requestors; i++) {
2107  avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
2108  }
2109 }
2110 
2112  : statistics::Group(&c), cache(c),
2113 
2114  ADD_STAT(demandHits, statistics::units::Count::get(),
2115  "number of demand (read+write) hits"),
2116  ADD_STAT(overallHits, statistics::units::Count::get(),
2117  "number of overall hits"),
2118  ADD_STAT(demandMisses, statistics::units::Count::get(),
2119  "number of demand (read+write) misses"),
2120  ADD_STAT(overallMisses, statistics::units::Count::get(),
2121  "number of overall misses"),
2122  ADD_STAT(demandMissLatency, statistics::units::Tick::get(),
2123  "number of demand (read+write) miss ticks"),
2124  ADD_STAT(overallMissLatency, statistics::units::Tick::get(),
2125  "number of overall miss ticks"),
2126  ADD_STAT(demandAccesses, statistics::units::Count::get(),
2127  "number of demand (read+write) accesses"),
2128  ADD_STAT(overallAccesses, statistics::units::Count::get(),
2129  "number of overall (read+write) accesses"),
2130  ADD_STAT(demandMissRate, statistics::units::Ratio::get(),
2131  "miss rate for demand accesses"),
2132  ADD_STAT(overallMissRate, statistics::units::Ratio::get(),
2133  "miss rate for overall accesses"),
2134  ADD_STAT(demandAvgMissLatency, statistics::units::Rate<
2135  statistics::units::Cycle, statistics::units::Count>::get(),
2136  "average overall miss latency"),
2137  ADD_STAT(overallAvgMissLatency, statistics::units::Rate<
2138  statistics::units::Cycle, statistics::units::Count>::get(),
2139  "average overall miss latency"),
2140  ADD_STAT(blockedCycles, statistics::units::Cycle::get(),
2141  "number of cycles access was blocked"),
2142  ADD_STAT(blockedCauses, statistics::units::Count::get(),
2143  "number of times access was blocked"),
2144  ADD_STAT(avgBlocked, statistics::units::Rate<
2145  statistics::units::Cycle, statistics::units::Count>::get(),
2146  "average number of cycles each access was blocked"),
2147  ADD_STAT(writebacks, statistics::units::Count::get(),
2148  "number of writebacks"),
2149  ADD_STAT(demandMshrHits, statistics::units::Count::get(),
2150  "number of demand (read+write) MSHR hits"),
2151  ADD_STAT(overallMshrHits, statistics::units::Count::get(),
2152  "number of overall MSHR hits"),
2153  ADD_STAT(demandMshrMisses, statistics::units::Count::get(),
2154  "number of demand (read+write) MSHR misses"),
2155  ADD_STAT(overallMshrMisses, statistics::units::Count::get(),
2156  "number of overall MSHR misses"),
2157  ADD_STAT(overallMshrUncacheable, statistics::units::Count::get(),
2158  "number of overall MSHR uncacheable misses"),
2159  ADD_STAT(demandMshrMissLatency, statistics::units::Tick::get(),
2160  "number of demand (read+write) MSHR miss ticks"),
2161  ADD_STAT(overallMshrMissLatency, statistics::units::Tick::get(),
2162  "number of overall MSHR miss ticks"),
2163  ADD_STAT(overallMshrUncacheableLatency, statistics::units::Tick::get(),
2164  "number of overall MSHR uncacheable ticks"),
2165  ADD_STAT(demandMshrMissRate, statistics::units::Ratio::get(),
2166  "mshr miss ratio for demand accesses"),
2167  ADD_STAT(overallMshrMissRate, statistics::units::Ratio::get(),
2168  "mshr miss ratio for overall accesses"),
2169  ADD_STAT(demandAvgMshrMissLatency, statistics::units::Rate<
2170  statistics::units::Cycle, statistics::units::Count>::get(),
2171  "average overall mshr miss latency"),
2172  ADD_STAT(overallAvgMshrMissLatency, statistics::units::Rate<
2173  statistics::units::Cycle, statistics::units::Count>::get(),
2174  "average overall mshr miss latency"),
2175  ADD_STAT(overallAvgMshrUncacheableLatency, statistics::units::Rate<
2176  statistics::units::Cycle, statistics::units::Count>::get(),
2177  "average overall mshr uncacheable latency"),
2178  ADD_STAT(replacements, statistics::units::Count::get(),
2179  "number of replacements"),
2180  ADD_STAT(dataExpansions, statistics::units::Count::get(),
2181  "number of data expansions"),
2182  ADD_STAT(dataContractions, statistics::units::Count::get(),
2183  "number of data contractions"),
2184  cmd(MemCmd::NUM_MEM_CMDS)
2185 {
2186  for (int idx = 0; idx < MemCmd::NUM_MEM_CMDS; ++idx)
2187  cmd[idx].reset(new CacheCmdStats(c, MemCmd(idx).toString()));
2188 }
2189 
2190 void
2192 {
2193  using namespace statistics;
2194 
2196 
2197  System *system = cache.system;
2198  const auto max_requestors = system->maxRequestors();
2199 
2200  for (auto &cs : cmd)
2201  cs->regStatsFromParent();
2202 
2203 // These macros make it easier to sum the right subset of commands and
2204 // to change the subset of commands that are considered "demand" vs
2205 // "non-demand"
2206 #define SUM_DEMAND(s) \
2207  (cmd[MemCmd::ReadReq]->s + cmd[MemCmd::WriteReq]->s + \
2208  cmd[MemCmd::WriteLineReq]->s + cmd[MemCmd::ReadExReq]->s + \
2209  cmd[MemCmd::ReadCleanReq]->s + cmd[MemCmd::ReadSharedReq]->s)
2210 
2211 // should writebacks be included here? prior code was inconsistent...
2212 #define SUM_NON_DEMAND(s) \
2213  (cmd[MemCmd::SoftPFReq]->s + cmd[MemCmd::HardPFReq]->s + \
2214  cmd[MemCmd::SoftPFExReq]->s)
2215 
2216  demandHits.flags(total | nozero | nonan);
2217  demandHits = SUM_DEMAND(hits);
2218  for (int i = 0; i < max_requestors; i++) {
2219  demandHits.subname(i, system->getRequestorName(i));
2220  }
2221 
2222  overallHits.flags(total | nozero | nonan);
2223  overallHits = demandHits + SUM_NON_DEMAND(hits);
2224  for (int i = 0; i < max_requestors; i++) {
2225  overallHits.subname(i, system->getRequestorName(i));
2226  }
2227 
2228  demandMisses.flags(total | nozero | nonan);
2229  demandMisses = SUM_DEMAND(misses);
2230  for (int i = 0; i < max_requestors; i++) {
2231  demandMisses.subname(i, system->getRequestorName(i));
2232  }
2233 
2234  overallMisses.flags(total | nozero | nonan);
2235  overallMisses = demandMisses + SUM_NON_DEMAND(misses);
2236  for (int i = 0; i < max_requestors; i++) {
2237  overallMisses.subname(i, system->getRequestorName(i));
2238  }
2239 
2240  demandMissLatency.flags(total | nozero | nonan);
2241  demandMissLatency = SUM_DEMAND(missLatency);
2242  for (int i = 0; i < max_requestors; i++) {
2243  demandMissLatency.subname(i, system->getRequestorName(i));
2244  }
2245 
2246  overallMissLatency.flags(total | nozero | nonan);
2247  overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
2248  for (int i = 0; i < max_requestors; i++) {
2249  overallMissLatency.subname(i, system->getRequestorName(i));
2250  }
2251 
2252  demandAccesses.flags(total | nozero | nonan);
2253  demandAccesses = demandHits + demandMisses;
2254  for (int i = 0; i < max_requestors; i++) {
2255  demandAccesses.subname(i, system->getRequestorName(i));
2256  }
2257 
2258  overallAccesses.flags(total | nozero | nonan);
2259  overallAccesses = overallHits + overallMisses;
2260  for (int i = 0; i < max_requestors; i++) {
2261  overallAccesses.subname(i, system->getRequestorName(i));
2262  }
2263 
2264  demandMissRate.flags(total | nozero | nonan);
2265  demandMissRate = demandMisses / demandAccesses;
2266  for (int i = 0; i < max_requestors; i++) {
2267  demandMissRate.subname(i, system->getRequestorName(i));
2268  }
2269 
2270  overallMissRate.flags(total | nozero | nonan);
2271  overallMissRate = overallMisses / overallAccesses;
2272  for (int i = 0; i < max_requestors; i++) {
2273  overallMissRate.subname(i, system->getRequestorName(i));
2274  }
2275 
2276  demandAvgMissLatency.flags(total | nozero | nonan);
2277  demandAvgMissLatency = demandMissLatency / demandMisses;
2278  for (int i = 0; i < max_requestors; i++) {
2279  demandAvgMissLatency.subname(i, system->getRequestorName(i));
2280  }
2281 
2282  overallAvgMissLatency.flags(total | nozero | nonan);
2283  overallAvgMissLatency = overallMissLatency / overallMisses;
2284  for (int i = 0; i < max_requestors; i++) {
2285  overallAvgMissLatency.subname(i, system->getRequestorName(i));
2286  }
2287 
2288  blockedCycles.init(NUM_BLOCKED_CAUSES);
2289  blockedCycles
2290  .subname(Blocked_NoMSHRs, "no_mshrs")
2291  .subname(Blocked_NoTargets, "no_targets")
2292  ;
2293 
2294 
2295  blockedCauses.init(NUM_BLOCKED_CAUSES);
2296  blockedCauses
2297  .subname(Blocked_NoMSHRs, "no_mshrs")
2298  .subname(Blocked_NoTargets, "no_targets")
2299  ;
2300 
2301  avgBlocked
2302  .subname(Blocked_NoMSHRs, "no_mshrs")
2303  .subname(Blocked_NoTargets, "no_targets")
2304  ;
2305  avgBlocked = blockedCycles / blockedCauses;
2306 
2307  writebacks
2308  .init(max_requestors)
2309  .flags(total | nozero | nonan)
2310  ;
2311  for (int i = 0; i < max_requestors; i++) {
2312  writebacks.subname(i, system->getRequestorName(i));
2313  }
2314 
2315  demandMshrHits.flags(total | nozero | nonan);
2316  demandMshrHits = SUM_DEMAND(mshrHits);
2317  for (int i = 0; i < max_requestors; i++) {
2318  demandMshrHits.subname(i, system->getRequestorName(i));
2319  }
2320 
2321  overallMshrHits.flags(total | nozero | nonan);
2322  overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshrHits);
2323  for (int i = 0; i < max_requestors; i++) {
2324  overallMshrHits.subname(i, system->getRequestorName(i));
2325  }
2326 
2327  demandMshrMisses.flags(total | nozero | nonan);
2328  demandMshrMisses = SUM_DEMAND(mshrMisses);
2329  for (int i = 0; i < max_requestors; i++) {
2330  demandMshrMisses.subname(i, system->getRequestorName(i));
2331  }
2332 
2333  overallMshrMisses.flags(total | nozero | nonan);
2334  overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshrMisses);
2335  for (int i = 0; i < max_requestors; i++) {
2336  overallMshrMisses.subname(i, system->getRequestorName(i));
2337  }
2338 
2339  demandMshrMissLatency.flags(total | nozero | nonan);
2340  demandMshrMissLatency = SUM_DEMAND(mshrMissLatency);
2341  for (int i = 0; i < max_requestors; i++) {
2342  demandMshrMissLatency.subname(i, system->getRequestorName(i));
2343  }
2344 
2345  overallMshrMissLatency.flags(total | nozero | nonan);
2346  overallMshrMissLatency =
2347  demandMshrMissLatency + SUM_NON_DEMAND(mshrMissLatency);
2348  for (int i = 0; i < max_requestors; i++) {
2349  overallMshrMissLatency.subname(i, system->getRequestorName(i));
2350  }
2351 
2352  overallMshrUncacheable.flags(total | nozero | nonan);
2353  overallMshrUncacheable =
2354  SUM_DEMAND(mshrUncacheable) + SUM_NON_DEMAND(mshrUncacheable);
2355  for (int i = 0; i < max_requestors; i++) {
2356  overallMshrUncacheable.subname(i, system->getRequestorName(i));
2357  }
2358 
2359 
2360  overallMshrUncacheableLatency.flags(total | nozero | nonan);
2361  overallMshrUncacheableLatency =
2362  SUM_DEMAND(mshrUncacheableLatency) +
2363  SUM_NON_DEMAND(mshrUncacheableLatency);
2364  for (int i = 0; i < max_requestors; i++) {
2365  overallMshrUncacheableLatency.subname(i, system->getRequestorName(i));
2366  }
2367 
2368  demandMshrMissRate.flags(total | nozero | nonan);
2369  demandMshrMissRate = demandMshrMisses / demandAccesses;
2370  for (int i = 0; i < max_requestors; i++) {
2371  demandMshrMissRate.subname(i, system->getRequestorName(i));
2372  }
2373 
2374  overallMshrMissRate.flags(total | nozero | nonan);
2375  overallMshrMissRate = overallMshrMisses / overallAccesses;
2376  for (int i = 0; i < max_requestors; i++) {
2377  overallMshrMissRate.subname(i, system->getRequestorName(i));
2378  }
2379 
2380  demandAvgMshrMissLatency.flags(total | nozero | nonan);
2381  demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2382  for (int i = 0; i < max_requestors; i++) {
2383  demandAvgMshrMissLatency.subname(i, system->getRequestorName(i));
2384  }
2385 
2386  overallAvgMshrMissLatency.flags(total | nozero | nonan);
2387  overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2388  for (int i = 0; i < max_requestors; i++) {
2389  overallAvgMshrMissLatency.subname(i, system->getRequestorName(i));
2390  }
2391 
2392  overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
2393  overallAvgMshrUncacheableLatency =
2394  overallMshrUncacheableLatency / overallMshrUncacheable;
2395  for (int i = 0; i < max_requestors; i++) {
2396  overallAvgMshrUncacheableLatency.subname(i,
2398  }
2399 
2400  dataExpansions.flags(nozero | nonan);
2401  dataContractions.flags(nozero | nonan);
2402 }
2403 
2404 void
2406 {
2407  ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2408  ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2409  ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2410  ppDataUpdate =
2411  new ProbePointArg<DataUpdate>(this->getProbeManager(), "Data Update");
2412 }
2413 
2415 //
2416 // CpuSidePort
2417 //
2419 bool
2421 {
2422  // Snoops shouldn't happen when bypassing caches
2423  assert(!cache->system->bypassCaches());
2424 
2425  assert(pkt->isResponse());
2426 
2427  // Express snoop responses from requestor to responder, e.g., from L1 to L2
2428  cache->recvTimingSnoopResp(pkt);
2429  return true;
2430 }
2431 
2432 
2433 bool
2435 {
2436  if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2437  // always let express snoop packets through even if blocked
2438  return true;
2439  } else if (blocked || mustSendRetry) {
2440  // either already committed to send a retry, or blocked
2441  mustSendRetry = true;
2442  return false;
2443  }
2444  mustSendRetry = false;
2445  return true;
2446 }
2447 
2448 bool
2450 {
2451  assert(pkt->isRequest());
2452 
2453  if (cache->system->bypassCaches()) {
2454  // Just forward the packet if caches are disabled.
2455  // @todo This should really enqueue the packet rather
2456  GEM5_VAR_USED bool success = cache->memSidePort.sendTimingReq(pkt);
2457  assert(success);
2458  return true;
2459  } else if (tryTiming(pkt)) {
2460  cache->recvTimingReq(pkt);
2461  return true;
2462  }
2463  return false;
2464 }
2465 
2466 Tick
2468 {
2469  if (cache->system->bypassCaches()) {
2470  // Forward the request if the system is in cache bypass mode.
2471  return cache->memSidePort.sendAtomic(pkt);
2472  } else {
2473  return cache->recvAtomic(pkt);
2474  }
2475 }
2476 
2477 void
2479 {
2480  if (cache->system->bypassCaches()) {
2481  // The cache should be flushed if we are in cache bypass mode,
2482  // so we don't need to check if we need to update anything.
2483  cache->memSidePort.sendFunctional(pkt);
2484  return;
2485  }
2486 
2487  // functional request
2488  cache->functionalAccess(pkt, true);
2489 }
2490 
2493 {
2494  return cache->getAddrRanges();
2495 }
2496 
2497 
2499 CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2500  const std::string &_label)
2501  : CacheResponsePort(_name, _cache, _label), cache(_cache)
2502 {
2503 }
2504 
2506 //
2507 // MemSidePort
2508 //
2510 bool
2512 {
2513  cache->recvTimingResp(pkt);
2514  return true;
2515 }
2516 
2517 // Express snooping requests to memside port
2518 void
2520 {
2521  // Snoops shouldn't happen when bypassing caches
2522  assert(!cache->system->bypassCaches());
2523 
2524  // handle snooping requests
2525  cache->recvTimingSnoopReq(pkt);
2526 }
2527 
2528 Tick
2530 {
2531  // Snoops shouldn't happen when bypassing caches
2532  assert(!cache->system->bypassCaches());
2533 
2534  return cache->recvAtomicSnoop(pkt);
2535 }
2536 
2537 void
2539 {
2540  // Snoops shouldn't happen when bypassing caches
2541  assert(!cache->system->bypassCaches());
2542 
2543  // functional snoop (note that in contrast to atomic we don't have
2544  // a specific functionalSnoop method, as they have the same
2545  // behaviour regardless)
2546  cache->functionalAccess(pkt, false);
2547 }
2548 
2549 void
2551 {
2552  // sanity check
2553  assert(!waitingOnRetry);
2554 
2555  // there should never be any deferred request packets in the
2556  // queue, instead we resly on the cache to provide the packets
2557  // from the MSHR queue or write queue
2558  assert(deferredPacketReadyTime() == MaxTick);
2559 
2560  // check for request packets (requests & writebacks)
2561  QueueEntry* entry = cache.getNextQueueEntry();
2562 
2563  if (!entry) {
2564  // can happen if e.g. we attempt a writeback and fail, but
2565  // before the retry, the writeback is eliminated because
2566  // we snoop another cache's ReadEx.
2567  } else {
2568  // let our snoop responses go first if there are responses to
2569  // the same addresses
2570  if (checkConflictingSnoop(entry->getTarget()->pkt)) {
2571  return;
2572  }
2573  waitingOnRetry = entry->sendPacket(cache);
2574  }
2575 
2576  // if we succeeded and are not waiting for a retry, schedule the
2577  // next send considering when the next queue is ready, note that
2578  // snoop responses have their own packet queue and thus schedule
2579  // their own events
2580  if (!waitingOnRetry) {
2581  schedSendEvent(cache.nextQueueReadyTime());
2582  }
2583 }
2584 
2586  BaseCache *_cache,
2587  const std::string &_label)
2588  : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue),
2589  _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2590  _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2591 {
2592 }
2593 
2594 void
2595 WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2596  Addr blk_addr)
2597 {
2598  // check if we are continuing where the last write ended
2599  if (nextAddr == write_addr) {
2600  delayCtr[blk_addr] = delayThreshold;
2601  // stop if we have already saturated
2602  if (mode != WriteMode::NO_ALLOCATE) {
2603  byteCount += write_size;
2604  // switch to streaming mode if we have passed the lower
2605  // threshold
2606  if (mode == WriteMode::ALLOCATE &&
2607  byteCount > coalesceLimit) {
2608  mode = WriteMode::COALESCE;
2609  DPRINTF(Cache, "Switched to write coalescing\n");
2610  } else if (mode == WriteMode::COALESCE &&
2611  byteCount > noAllocateLimit) {
2612  // and continue and switch to non-allocating mode if we
2613  // pass the upper threshold
2614  mode = WriteMode::NO_ALLOCATE;
2615  DPRINTF(Cache, "Switched to write-no-allocate\n");
2616  }
2617  }
2618  } else {
2619  // we did not see a write matching the previous one, start
2620  // over again
2621  byteCount = write_size;
2622  mode = WriteMode::ALLOCATE;
2623  resetDelay(blk_addr);
2624  }
2625  nextAddr = write_addr + write_size;
2626 }
2627 
2628 } // namespace gem5
gem5::Packet::getBlockAddr
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:805
gem5::BaseCache::memWriteback
virtual void memWriteback() override
Write back dirty blocks in the cache using functional accesses.
Definition: base.cc:1712
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::BaseCache::evictBlock
virtual GEM5_NO_DISCARD PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:189
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
gem5::MSHR
Miss Status and handling Register.
Definition: mshr.hh:74
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::Packet::isAtomicOp
bool isAtomicOp() const
Definition: packet.hh:820
gem5::BaseCache::MemSidePort::MemSidePort
MemSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:2585
gem5::BaseCache::incMissCount
void incMissCount(PacketPtr pkt)
Definition: base.hh:1287
gem5::BaseCache::cpuSidePort
CpuSidePort cpuSidePort
Definition: base.hh:338
queue_entry.hh
gem5::RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
gem5::BaseCache::handleTimingReqMiss
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
gem5::Packet::isRequest
bool isRequest() const
Definition: packet.hh:586
gem5::BaseCache::CacheCmdStats::regStatsFromParent
void regStatsFromParent()
Callback to register stats from parent CacheStats::regStats().
Definition: base.cc:1987
warn
#define warn(...)
Definition: logging.hh:245
gem5::WriteAllocator::reset
void reset()
Reset the write allocator state, meaning that it allocates for writes and has not recorded any inform...
Definition: base.hh:1406
compressed_tags.hh
gem5::BaseCache::~BaseCache
~BaseCache()
Definition: base.cc:139
base.hh
gem5::BaseCache::invalidateBlock
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition: base.cc:1587
gem5::MemCmd::WriteClean
@ WriteClean
Definition: packet.hh:94
gem5::BaseCache::DataUpdate::oldData
std::vector< uint64_t > oldData
The stale data contents.
Definition: base.hh:131
gem5::Packet::getOffset
Addr getOffset(unsigned int blk_size) const
Definition: packet.hh:800
gem5::MemCmd::CleanEvict
@ CleanEvict
Definition: packet.hh:95
gem5::BaseCache::CacheStats::replacements
statistics::Scalar replacements
Number of replacements of valid blocks.
Definition: base.hh:1125
gem5::ResponsePort::sendFunctionalSnoop
void sendFunctionalSnoop(PacketPtr pkt) const
Send a functional snoop request packet, where the data is instantly updated everywhere in the memory ...
Definition: port.hh:346
gem5::CacheBlk::clearPrefetched
void clearPrefetched()
Clear the prefetching bit.
Definition: cache_blk.hh:255
gem5::BaseCache::cmpAndSwap
void cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
Handle doing the Compare and Swap function for SPARC.
Definition: base.cc:721
gem5::BaseCache::NUM_BLOCKED_CAUSES
@ NUM_BLOCKED_CAUSES
Definition: base.hh:116
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::BaseCache::ppDataUpdate
ProbePointArg< DataUpdate > * ppDataUpdate
To probe when the contents of a block are updated.
Definition: base.hh:372
gem5::MemCmd::SwapReq
@ SwapReq
Definition: packet.hh:115
UNSERIALIZE_SCALAR
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:575
gem5::BaseCache::CacheRequestPort
A cache request port is used for the memory-side port of the cache, and in addition to the basic timi...
Definition: base.hh:152
gem5::BaseCache::access
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition: base.cc:1153
gem5::BaseCache::forwardSnoops
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:928
gem5::QueueEntry::Target::pkt
const PacketPtr pkt
Pending request packet.
Definition: queue_entry.hh:93
gem5::Packet::isCleanEviction
bool isCleanEviction() const
Is this packet a clean eviction, including both actual clean evict packets, but also clean writebacks...
Definition: packet.hh:1394
gem5::BaseCache::CacheCmdStats::mshrHits
statistics::Vector mshrHits
Number of misses that hit in the MSHRs per command and thread.
Definition: base.hh:1020
gem5::TempCacheBlk::invalidate
void invalidate() override
Invalidate the block and clear all state.
Definition: cache_blk.hh:524
gem5::BaseCache::CacheResponsePort::processSendRetry
void processSendRetry()
Definition: base.cc:172
gem5::BaseCache::writeBuffer
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:347
warn_once
#define warn_once(...)
Definition: logging.hh:249
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1252
gem5::BaseCache::moveContractions
const bool moveContractions
Similar to data expansions, after a block improves its compression, it may need to be moved elsewhere...
Definition: base.hh:959
gem5::Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:316
gem5::BaseCache::recvTimingResp
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition: base.cc:420
mshr.hh
gem5::BaseCache::CacheResponsePort::CacheResponsePort
CacheResponsePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:69
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::MSHR::allocOnFill
bool allocOnFill() const
Definition: mshr.hh:340
gem5::MSHR::isForward
bool isForward
True if the entry is just a simple forward from an upper level.
Definition: mshr.hh:127
gem5::BaseCache::MemSidePort::recvFunctionalSnoop
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: base.cc:2538
gem5::BaseCache::coalesce
bool coalesce() const
Checks if the cache is coalescing writes.
Definition: base.cc:1731
base.hh
gem5::CheckpointIn
Definition: serialize.hh:68
gem5::compression::Base::getDecompressionLatency
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition: base.cc:197
gem5::CompressionBlk::setSizeBits
void setSizeBits(const std::size_t size)
Set size, in bits, of this compressed block's data.
Definition: super_blk.cc:99
gem5::EventBase::Delayed_Writeback_Pri
static const Priority Delayed_Writeback_Pri
For some reason "delayed" inter-cluster writebacks are scheduled before regular writebacks (which hav...
Definition: eventq.hh:172
gem5::MSHRQueue::canPrefetch
bool canPrefetch() const
Returns true if sufficient mshrs for prefetch.
Definition: mshr_queue.hh:158
SUM_NON_DEMAND
#define SUM_NON_DEMAND(s)
gem5::Packet::writeThrough
bool writeThrough() const
Definition: packet.hh:729
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::BaseCache::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: base.cc:201
cur_tick.hh
gem5::BaseCache::markInService
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:408
gem5::Packet::setCacheResponding
void setCacheResponding()
Snoop flags.
Definition: packet.hh:640
gem5::Packet::isWriteback
bool isWriteback() const
Definition: packet.hh:602
gem5::MSHR::promoteDeferredTargets
bool promoteDeferredTargets()
Definition: mshr.cc:581
gem5::MSHR::promoteWritable
void promoteWritable()
Promotes deferred targets that do not require writable.
Definition: mshr.cc:664
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::WriteQueueEntry::getTarget
Target * getTarget() override
Returns a reference to the first target.
Definition: write_queue_entry.hh:155
gem5::prefetch::Base::prefetchUnused
void prefetchUnused()
Definition: base.hh:391
gem5::TempCacheBlk
Special instance of CacheBlk for use with tempBlk that deals with its block address regeneration.
Definition: cache_blk.hh:500
gem5::Packet::isUpgrade
bool isUpgrade() const
Definition: packet.hh:585
gem5::MaxTick
const Tick MaxTick
Definition: types.hh:60
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::CacheBlk::clearCoherenceBits
void clearCoherenceBits(unsigned bits)
Clear the corresponding coherence bits.
Definition: cache_blk.hh:231
gem5::Packet::isSecure
bool isSecure() const
Definition: packet.hh:810
gem5::MSHRQueue::delay
void delay(MSHR *mshr, Tick delay_ticks)
Adds a delay to the provided MSHR and moves MSHRs that will be ready earlier than this entry to the t...
Definition: mshr_queue.cc:104
gem5::MemCmd::NUM_MEM_CMDS
@ NUM_MEM_CMDS
Definition: packet.hh:143
gem5::Packet::setWriteThrough
void setWriteThrough()
A writeback/writeclean cmd gets propagated further downstream by the receiver when the flag is set.
Definition: packet.hh:722
gem5::BaseCache::clearBlocked
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:1246
gem5::BaseCache::dataLatency
const Cycles dataLatency
The latency of data access of a cache.
Definition: base.hh:900
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:673
gem5::BaseCache::updateCompressionData
bool updateCompressionData(CacheBlk *&blk, const uint64_t *data, PacketList &writebacks)
When a block is overwriten, its compression information must be updated, and it may need to be recomp...
Definition: base.cc:902
gem5::BaseCache::CacheCmdStats::mshrMisses
statistics::Vector mshrMisses
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:1022
gem5::X86ISA::system
Bitfield< 15 > system
Definition: misc.hh:1003
gem5::CompressionBlk::getSizeBits
std::size_t getSizeBits() const
Definition: super_blk.cc:93
gem5::BaseCache::replaceExpansions
const bool replaceExpansions
when a data expansion of a compressed block happens it will not be able to co-allocate where it is at...
Definition: base.hh:951
gem5::BaseCache::blocked
uint8_t blocked
Bit vector of the blocking reasons for the access path.
Definition: base.hh:965
gem5::QueueEntry::Target
A queue entry is holding packets that will be serviced as soon as resources are available.
Definition: queue_entry.hh:87
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
std::vector< uint64_t >
gem5::WriteAllocator::updateMode
void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr)
Update the write mode based on the current write packet.
Definition: base.cc:2595
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::CacheBlk::checkWrite
bool checkWrite(PacketPtr pkt)
Handle interaction of load-locked operations and stores.
Definition: cache_blk.hh:412
gem5::BaseCache::sequentialAccess
const bool sequentialAccess
Whether tags and data are accessed sequentially.
Definition: base.hh:922
gem5::BaseCache::BaseCache
BaseCache(const BaseCacheParams &p, unsigned blk_size)
Definition: base.cc:79
gem5::BaseCache::updateBlockData
void updateBlockData(CacheBlk *blk, const PacketPtr cpkt, bool has_old_data)
Update the data contents of a block.
Definition: base.cc:695
gem5::Packet::makeAtomicResponse
void makeAtomicResponse()
Definition: packet.hh:1043
gem5::Packet::setDataFromBlock
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1271
gem5::BaseCache::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: base.hh:402
gem5::BaseCache::CacheResponsePort
A cache response port is used for the CPU-side port of the cache, and it is basically a simple timing...
Definition: base.hh:274
gem5::BaseTags::moveBlock
virtual void moveBlock(CacheBlk *src_blk, CacheBlk *dest_blk)
Move a block's metadata to another location decided by the replacement policy.
Definition: base.cc:130
gem5::CacheBlk::WritableBit
@ WritableBit
write permission
Definition: cache_blk.hh:80
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:420
gem5::SectorSubBlk::getSectorBlock
SectorBlk * getSectorBlock() const
Get sector block associated to this block.
Definition: sector_blk.cc:52
gem5::Packet::setSatisfied
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition: packet.hh:736
gem5::QueueEntry::blkAddr
Addr blkAddr
Block aligned address.
Definition: queue_entry.hh:116
gem5::SectorBlk::blks
std::vector< SectorSubBlk * > blks
List of blocks associated to this sector.
Definition: sector_blk.hh:147
gem5::QueueEntry::order
Counter order
Order number assigned to disambiguate writes and misses.
Definition: queue_entry.hh:113
gem5::CompressionBlk::DATA_EXPANSION
@ DATA_EXPANSION
New data contents are considered larger than previous contents.
Definition: super_blk.hh:81
gem5::CacheBlk
A Basic Cache block.
Definition: cache_blk.hh:70
gem5::BaseCache::tempBlock
TempCacheBlk * tempBlock
Temporary cache block for occasional transitory use.
Definition: base.hh:396
gem5::BaseCache::handleAtomicReqMiss
virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks)=0
Handle a request in atomic mode that missed in this cache.
gem5::BaseCache::sendWriteQueuePacket
bool sendWriteQueuePacket(WriteQueueEntry *wq_entry)
Similar to sendMSHR, but for a write-queue entry instead.
Definition: base.cc:1893
gem5::BaseCache::CacheCmdStats::mshrMissLatency
statistics::Vector mshrMissLatency
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:1026
gem5::BaseCache::CpuSidePort::recvTimingReq
virtual bool recvTimingReq(PacketPtr pkt) override
Receive a timing request from the peer.
Definition: base.cc:2449
gem5::BaseCache::CacheResponsePort::setBlocked
void setBlocked()
Do not accept any new requests.
Definition: base.cc:145
gem5::BaseCache::isBlocked
bool isBlocked() const
Returns true if the cache is blocked for accesses.
Definition: base.hh:1217
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::BaseCache::writeAllocator
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition: base.hh:388
gem5::TempCacheBlk::getAddr
Addr getAddr() const
Get block's address.
Definition: cache_blk.hh:542
gem5::MemCmd::WritebackDirty
@ WritebackDirty
Definition: packet.hh:92
gem5::BaseCache::inRange
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition: base.cc:213
gem5::Packet::hasData
bool hasData() const
Definition: packet.hh:603
gem5::MemCmd
Definition: packet.hh:75
gem5::BaseCache::DataUpdate::newData
std::vector< uint64_t > newData
The new data contents.
Definition: base.hh:133
gem5::Packet::getAtomicOp
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition: packet.hh:819
gem5::BaseCache::writebackTempBlockAtomicEvent
EventFunctionWrapper writebackTempBlockAtomicEvent
An event to writeback the tempBlock after recvAtomic finishes.
Definition: base.hh:703
gem5::Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:438
gem5::BaseCache::Blocked_NoTargets
@ Blocked_NoTargets
Definition: base.hh:115
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
gem5::BaseCache::DataUpdate
A data contents update is composed of the updated block's address, the old contents,...
Definition: base.hh:124
gem5::QueueEntry::Target::recvTime
const Tick recvTime
Time when request was received (for stats)
Definition: queue_entry.hh:90
gem5::BaseCache::order
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:968
gem5::CacheBlk::getTaskId
uint32_t getTaskId() const
Get the task id associated to this block.
Definition: cache_blk.hh:285
gem5::Flags< FlagsType >
gem5::BaseCache::serialize
void serialize(CheckpointOut &cp) const override
Serialize the state of the caches.
Definition: base.cc:1917
gem5::BaseCache::ppMiss
ProbePointArg< PacketPtr > * ppMiss
To probe when a cache miss occurs.
Definition: base.hh:362
gem5::System::maxRequestors
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition: system.hh:518
gem5::RequestPort::sendFunctional
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
gem5::CacheBlk::setWhenReady
void setWhenReady(const Tick tick)
Set tick at which block's data will be available for access.
Definition: cache_blk.hh:278
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:582
gem5::BaseCache::prefetcher
prefetch::Base * prefetcher
Prefetcher.
Definition: base.hh:356
base.hh
gem5::System
Definition: system.hh:77
gem5::BaseCache::tags
BaseTags * tags
Tag and data Storage.
Definition: base.hh:350
gem5::WriteAllocator::allocate
bool allocate() const
Should writes allocate?
Definition: base.hh:1396
gem5::Queue::isFull
bool isFull() const
Definition: queue.hh:150
gem5::BaseCache::CacheStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: base.cc:2191
gem5::Packet::needsWritable
bool needsWritable() const
Definition: packet.hh:588
gem5::BaseCache::ppFill
ProbePointArg< PacketPtr > * ppFill
To probe when a cache fill occurs.
Definition: base.hh:365
gem5::Queue::nextReadyTime
Tick nextReadyTime() const
Definition: queue.hh:229
gem5::MSHR::allocateTarget
void allocateTarget(PacketPtr target, Tick when, Counter order, bool alloc_on_fill)
Add a request to the list of targets.
Definition: mshr.cc:376
gem5::BaseCache::CacheStats::cmd
std::vector< std::unique_ptr< CacheCmdStats > > cmd
Per-command statistics.
Definition: base.hh:1137
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:184
gem5::BaseCache::allocateWriteBuffer
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:1182
gem5::prefetch::Base::pfHitInMSHR
void pfHitInMSHR()
Definition: base.hh:409
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::MSHR::needsWritable
bool needsWritable() const
The pending* and post* flags are only valid if inService is true.
Definition: mshr.hh:319
gem5::BaseCache::MemSidePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition: base.cc:2511
gem5::Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:373
gem5::BaseCache::stats
gem5::BaseCache::CacheStats stats
gem5::SuperBlk
A basic compression superblock.
Definition: super_blk.hh:170
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
gem5::BaseCache
A basic cache interface.
Definition: base.hh:95
gem5::QueuedResponsePort
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:61
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::PacketId
uint64_t PacketId
Definition: packet.hh:73
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::BaseCache::calculateTagOnlyLatency
Cycles calculateTagOnlyLatency(const uint32_t delay, const Cycles lookup_lat) const
Calculate latency of accesses that only touch the tag array.
Definition: base.cc:1110
gem5::MemCmd::UpgradeResp
@ UpgradeResp
Definition: packet.hh:104
gem5::BaseCache::sendMSHRQueuePacket
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: base.cc:1790
gem5::MSHR::isPendingModified
bool isPendingModified() const
Definition: mshr.hh:326
gem5::BaseCache::writecleanBlk
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition: base.cc:1666
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::BaseCache::blkSize
const unsigned blkSize
Block size of this cache.
Definition: base.hh:888
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::Queue::getNext
Entry * getNext() const
Returns the WriteQueueEntry at the head of the readyList.
Definition: queue.hh:221
gem5::BaseCache::regenerateBlkAddr
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition: base.cc:182
gem5::ResponsePort::isSnooping
bool isSnooping() const
Find out if the peer request port is snooping or not.
Definition: port.hh:291
gem5::BaseCache::ppHit
ProbePointArg< PacketPtr > * ppHit
To probe when a cache hit occurs.
Definition: base.hh:359
gem5::QueueEntry::isSecure
bool isSecure
True if the entry targets the secure memory space.
Definition: queue_entry.hh:122
gem5::BaseCache::CacheResponsePort::clearBlocked
void clearBlocked()
Return to normal operation and accept new requests.
Definition: base.cc:160
gem5::BaseCache::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:559
gem5::System::getRequestorName
std::string getRequestorName(RequestorID requestor_id)
Get the name of an object for a given request id.
Definition: system.cc:637
gem5::BaseCache::CpuSidePort::recvTimingSnoopResp
virtual bool recvTimingSnoopResp(PacketPtr pkt) override
Receive a timing snoop response from the peer.
Definition: base.cc:2420
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::BaseTags::regenerateBlkAddr
virtual Addr regenerateBlkAddr(const CacheBlk *blk) const =0
Regenerate the block address.
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::statistics::reset
void reset()
Definition: statistics.cc:304
gem5::BaseTags::accessBlock
virtual CacheBlk * accessBlock(const PacketPtr pkt, Cycles &lat)=0
Access block and update replacement data.
gem5::CacheBlk::setCoherenceBits
void setCoherenceBits(unsigned bits)
Sets the corresponding coherence bits.
Definition: cache_blk.hh:220
gem5::BaseCache::CacheStats::writebacks
statistics::Vector writebacks
Number of blocks written back per thread.
Definition: base.hh:1088
gem5::CompressedTags
A CompressedTags cache tag store.
Definition: compressed_tags.hh:70
gem5::compression::Base::setSizeBits
static void setSizeBits(CacheBlk *blk, const std::size_t size_bits)
Set the size of the compressed block, in bits.
Definition: base.cc:225
chatty_assert
#define chatty_assert(cond,...)
The chatty assert macro will function like a normal assert, but will allow the specification of addit...
Definition: logging.hh:301
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:609
gem5::MemCmd::WritebackClean
@ WritebackClean
Definition: packet.hh:93
gem5::BaseCache::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:192
gem5::BaseCache::CpuSidePort::CpuSidePort
CpuSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:2499
gem5::CacheBlk::print
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition: cache_blk.hh:364
gem5::Queue::findMatch
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition: queue.hh:168
gem5::Packet::clearBlockCached
void clearBlockCached()
Definition: packet.hh:748
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::Port::isConnected
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:133
gem5::BaseCache::CacheStats::dataExpansions
statistics::Scalar dataExpansions
Number of data expansions.
Definition: base.hh:1128
gem5::BaseCache::CacheResponsePort::mustSendRetry
bool mustSendRetry
Definition: base.hh:297
compiler.hh
gem5::Packet::id
const PacketId id
Definition: packet.hh:363
gem5::MSHR::isWholeLineWrite
bool isWholeLineWrite() const
Check if this MSHR contains only compatible writes, and if they span the entire cache line.
Definition: mshr.hh:390
gem5::BaseCache::unserialize
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: base.cc:1937
gem5::BaseCache::allocOnFill
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition: base.hh:441
gem5::BaseCache::incHitCount
void incHitCount(PacketPtr pkt)
Definition: base.hh:1298
gem5::Queue::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Definition: queue.hh:186
gem5::Packet::pushLabel
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1420
gem5::Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:361
gem5::ArmISA::c
Bitfield< 29 > c
Definition: misc_types.hh:53
gem5::Packet::popLabel
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1430
gem5::BaseCache::Blocked_NoMSHRs
@ Blocked_NoMSHRs
Definition: base.hh:113
gem5::BaseCache::CpuSidePort::recvFunctional
virtual void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition: base.cc:2478
gem5::Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:597
gem5::Packet::writeData
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1281
gem5::MSHR::wasWholeLineWrite
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition: mshr.hh:124
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::BaseCache::doWritebacksAtomic
virtual void doWritebacksAtomic(PacketList &writebacks)=0
Send writebacks down the memory hierarchy in atomic mode.
gem5::prefetch::Base::pfHitInWB
void pfHitInWB()
Definition: base.hh:415
name
const std::string & name()
Definition: trace.cc:49
gem5::prefetch::Base::pfHitInCache
void pfHitInCache()
Definition: base.hh:403
SERIALIZE_SCALAR
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:568
gem5::BaseCache::tempBlockWriteback
PacketPtr tempBlockWriteback
Writebacks from the tempBlock, resulting on the response path in atomic mode, must happen after the c...
Definition: base.hh:684
gem5::CacheBlk::DirtyBit
@ DirtyBit
dirty (modified)
Definition: cache_blk.hh:87
gem5::statistics::Group::regStats
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:69
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::BaseCache::recvTimingReq
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:350
gem5::ResponsePort::sendRangeChange
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:296
gem5::MSHRQueue::markPending
void markPending(MSHR *mshr)
Mark an in service entry as pending, used to resend a request.
Definition: mshr_queue.cc:123
gem5::BaseCache::MemSidePort::recvTimingSnoopReq
virtual void recvTimingSnoopReq(PacketPtr pkt)
Receive a timing snoop request from the peer.
Definition: base.cc:2519
gem5::EventManager::deschedule
void deschedule(Event &event)
Definition: eventq.hh:1028
gem5::prefetch::Base::nextPrefetchReadyTime
virtual Tick nextPrefetchReadyTime() const =0
gem5::Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:324
gem5::MemCmd::InvalidateResp
@ InvalidateResp
Definition: packet.hh:138
gem5::Cache
A coherent cache that can be arranged in flexible topologies.
Definition: cache.hh:67
gem5::BaseCache::MemSidePort::recvAtomicSnoop
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: base.cc:2529
gem5::WriteQueueEntry
Write queue entry.
Definition: write_queue_entry.hh:67
gem5::BaseCache::schedMemSideSendEvent
void schedMemSideSendEvent(Tick time)
Schedule a send event for the memory-side port.
Definition: base.hh:1265
gem5::ProbePointArg
ProbePointArg generates a point for the class of Arg.
Definition: thermal_domain.hh:54
gem5::CacheBlk::data
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition: cache_blk.hh:103
gem5::BaseCache::maintainClusivity
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition: base.cc:1422
gem5::CacheBlk::getWhenReady
Tick getWhenReady() const
Get tick at which block's data will be available for access.
Definition: cache_blk.hh:265
gem5::MSHRQueue::deallocate
void deallocate(MSHR *mshr) override
Deallocate a MSHR and its targets.
Definition: mshr_queue.cc:83
gem5::Packet::isClean
bool isClean() const
Definition: packet.hh:600
gem5::BaseCache::isReadOnly
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:943
gem5::BaseCache::CacheReqPacketQueue::sendDeferredPacket
virtual void sendDeferredPacket()
Override the normal sendDeferredPacket and do not only consider the transmit list (used for responses...
Definition: base.cc:2550
gem5::Packet::setHasSharers
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition: packet.hh:672
gem5::BaseCache::noTargetMSHR
MSHR * noTargetMSHR
Pointer to the MSHR that has no targets.
Definition: base.hh:974
gem5::SimObject::getProbeManager
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:120
gem5::BaseTags::forEachBlk
virtual void forEachBlk(std::function< void(CacheBlk &)> visitor)=0
Visit each block in the tags and apply a visitor.
gem5::BaseCache::clusivity
const enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: base.hh:935
warn_if
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:272
super_blk.hh
Copyright (c) 2018 Inria All rights reserved.
gem5::ResponsePort::owner
SimObject & owner
Definition: port.hh:279
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:203
gem5::BaseTags::insertBlock
virtual void insertBlock(const PacketPtr pkt, CacheBlk *blk)
Insert the new block into the cache and update stats.
Definition: base.cc:102
gem5::BaseCache::memInvalidate
virtual void memInvalidate() override
Invalidates all blocks in the cache.
Definition: base.cc:1718
gem5::BaseCache::CacheCmdStats::CacheCmdStats
CacheCmdStats(BaseCache &c, const std::string &name)
Definition: base.cc:1949
gem5::Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1049
gem5::BaseTags::anyBlk
virtual bool anyBlk(std::function< bool(CacheBlk &)> visitor)=0
Find if any of the blocks satisfies a condition.
gem5::BaseCache::regProbePoints
void regProbePoints() override
Registers probes.
Definition: base.cc:2405
gem5::Request::funcRequestorId
@ funcRequestorId
This requestor id is used for functional requests that don't come from a particular device.
Definition: request.hh:260
gem5::Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1326
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::BaseCache::isDirty
bool isDirty() const
Determine if there are any dirty blocks in the cache.
Definition: base.cc:1724
gem5::BaseCache::CacheStats::cmdStats
CacheCmdStats & cmdStats(const PacketPtr p)
Definition: base.hh:1043
gem5::CompressionBlk
A superblock is composed of sub-blocks, and each sub-block has information regarding its superblock a...
Definition: super_blk.hh:51
gem5::BaseCache::allocateBlock
CacheBlk * allocateBlock(const PacketPtr pkt, PacketList &writebacks)
Allocate a new block and perform any necessary writebacks.
Definition: base.cc:1530
gem5::BaseCache::CacheStats::dataContractions
statistics::Scalar dataContractions
Number of data contractions (blocks that had their compression factor improved).
Definition: base.hh:1134
gem5::compression::Base::compress
virtual std::unique_ptr< CompressionData > compress(const std::vector< Chunk > &chunks, Cycles &comp_lat, Cycles &decomp_lat)=0
Apply the compression process to the cache line.
gem5::MSHR::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: mshr.hh:430
gem5::Packet::writeDataToBlock
void writeDataToBlock(uint8_t *blk_data, int blkSize) const
Copy data from the packet to the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1305
gem5::MSHR::isCleaning
bool isCleaning() const
Definition: mshr.hh:321
gem5::BaseCache::fillLatency
const Cycles fillLatency
The latency to fill a cache block.
Definition: base.hh:910
gem5::Packet::makeResponse
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:1031
gem5::BaseCache::handleTimingReqHit
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition: base.cc:224
gem5::Clocked::tick
Tick tick
Definition: clocked_object.hh:68
gem5::BaseCache::addrRanges
const AddrRangeList addrRanges
The address range to which the cache responds on the CPU side.
Definition: base.hh:982
gem5::BaseCache::system
System * system
System we are currently operating in.
Definition: base.hh:986
gem5::WriteAllocator::delay
bool delay(Addr blk_addr)
Access whether we need to delay the current write.
Definition: base.hh:1418
gem5::BaseCache::mshrQueue
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:344
gem5::BaseCache::memSidePort
MemSidePort memSidePort
Definition: base.hh:339
gem5::CacheBlk::wasPrefetched
bool wasPrefetched() const
Check if this block was the result of a hardware prefetch, yet to be touched.
Definition: cache_blk.hh:249
gem5::BaseCache::handleUncacheableWriteResp
void handleUncacheableWriteResp(PacketPtr pkt)
Handling the special case of uncacheable write responses to make recvTimingResp less cluttered.
Definition: base.cc:408
gem5::BaseCache::writebackClean
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: base.hh:675
gem5::QueuedResponsePort::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:98
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::CompressionBlk::checkExpansionContraction
OverwriteType checkExpansionContraction(const std::size_t size) const
Determines if changing the size of the block will cause a data expansion (new size is bigger) or cont...
Definition: super_blk.cc:148
gem5::CacheBlk::trackLoadLocked
void trackLoadLocked(PacketPtr pkt)
Track the fact that a local locked was issued to the block.
Definition: cache_blk.hh:327
gem5::TaggedEntry::isSecure
bool isSecure() const
Check if this block holds data from the secure memory space.
Definition: tagged_entry.hh:64
gem5::CacheBlk::isSet
bool isSet(unsigned bits) const
Checks the given coherence bits are set.
Definition: cache_blk.hh:239
gem5::MSHR::promoteReadable
void promoteReadable()
Promotes deferred targets that do not require writable.
Definition: mshr.cc:643
gem5::BaseCache::CacheCmdStats::mshrUncacheableLatency
statistics::Vector mshrUncacheableLatency
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:1028
logging.hh
gem5::QueueEntry
A queue entry base class, to be used by both the MSHRs and write-queue entries.
Definition: queue_entry.hh:62
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::BaseCache::CacheResponsePort::blocked
bool blocked
Definition: base.hh:295
gem5::TempCacheBlk::insert
void insert(const Addr addr, const bool is_secure) override
Insert the block by assigning it a tag and marking it valid.
Definition: cache_blk.hh:531
gem5::BaseCache::invalidateVisitor
void invalidateVisitor(CacheBlk &blk)
Cache block visitor that invalidates all blocks in the cache.
Definition: base.cc:1760
gem5::Packet::fromCache
bool fromCache() const
Definition: packet.hh:601
gem5::BaseCache::writebackTempBlockAtomic
void writebackTempBlockAtomic()
Send the outstanding tempBlock writeback.
Definition: base.hh:691
gem5::BaseCache::getNextQueueEntry
QueueEntry * getNextQueueEntry()
Return the next queue entry to service, either a pending miss from the MSHR queue,...
Definition: base.cc:774
gem5::Packet::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr other)
Check a functional request against a memory value stored in another packet (i.e.
Definition: packet.hh:1358
gem5::QueuedRequestPort::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:163
gem5::BaseCache::serviceMSHRTargets
virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)=0
Service non-deferred MSHR targets using the received response.
gem5::BaseCache::lookupLatency
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:894
gem5::Request::wbRequestorId
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition: request.hh:255
SUM_DEMAND
#define SUM_DEMAND(s)
gem5::WriteQueueEntry::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: write_queue_entry.hh:142
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::prefetch::Base::incrDemandMhsrMisses
void incrDemandMhsrMisses()
Definition: base.hh:397
gem5::CheckpointOut
std::ostream CheckpointOut
Definition: serialize.hh:66
gem5::BaseCache::allocateMissBuffer
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:1164
gem5::WriteAllocator::coalesce
bool coalesce() const
Should writes be coalesced? This is true if the mode is set to NO_ALLOCATE.
Definition: base.hh:1387
gem5::CacheBlkPrintWrapper
Simple class to provide virtual print() method on cache blocks without allocating a vtable pointer fo...
Definition: cache_blk.hh:554
gem5::CompressionBlk::DATA_CONTRACTION
@ DATA_CONTRACTION
New data contents are considered smaller than previous contents.
Definition: super_blk.hh:77
gem5::BaseTags::findBlock
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition: base.cc:80
gem5::Packet::isExpressSnoop
bool isExpressSnoop() const
Definition: packet.hh:689
gem5::BaseCache::doWritebacks
virtual void doWritebacks(PacketList &writebacks, Tick forward_time)=0
Insert writebacks into the write buffer.
gem5::BaseCache::setBlocked
void setBlocked(BlockedCause cause)
Marks the access path of the cache as blocked for the given cause.
Definition: base.hh:1227
gem5::BaseTags::invalidate
virtual void invalidate(CacheBlk *blk)
This function updates the tags when a block is invalidated.
Definition: base.hh:254
gem5::Clocked::ticksToCycles
Cycles ticksToCycles(Tick t) const
Definition: clocked_object.hh:222
std::list
STL list class.
Definition: stl.hh:51
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::BaseCache::writebackVisitor
void writebackVisitor(CacheBlk &blk)
Cache block visitor that writes back dirty cache blocks using functional writes.
Definition: base.cc:1737
gem5::BaseCache::CacheCmdStats
Definition: base.hh:988
gem5::BaseCache::numTarget
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:925
gem5::BaseCache::createMissPacket
virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const =0
Create an appropriate downstream bus request packet.
gem5::BaseTags::extractBlkOffset
int extractBlkOffset(Addr addr) const
Calculate the block offset of an address.
Definition: base.hh:225
gem5::CompressionBlk::OverwriteType
OverwriteType
When an overwrite happens, the data size may change an not fit in its current container any longer.
Definition: super_blk.hh:74
gem5::CompressionBlk::setDecompressionLatency
void setDecompressionLatency(const Cycles lat)
Set number of cycles needed to decompress this block.
Definition: super_blk.cc:135
gem5::QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:93
gem5::compression::Base::setDecompressionLatency
static void setDecompressionLatency(CacheBlk *blk, const Cycles lat)
Set the decompression latency of compressed block.
Definition: base.cc:215
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:225
gem5::BaseCache::functionalAccess
virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side)
Performs the access specified by the request.
Definition: base.cc:639
gem5::prefetch::Base::getPacket
virtual PacketPtr getPacket()=0
gem5::BaseCache::CacheResponsePort::sendRetryEvent
EventFunctionWrapper sendRetryEvent
Definition: base.hh:303
gem5::BaseCache::handleEvictions
bool handleEvictions(std::vector< CacheBlk * > &evict_blks, PacketList &writebacks)
Try to evict the given blocks.
Definition: base.cc:865
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::QueueEntry::sendPacket
virtual bool sendPacket(BaseCache &cache)=0
Send this queue entry as a downstream packet, with the exact behaviour depending on the specific entr...
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
gem5::BaseCache::forwardLatency
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:907
gem5::BaseCache::nextQueueReadyTime
Tick nextQueueReadyTime() const
Find next request ready time from among possible sources.
Definition: base.cc:1773
gem5::MSHR::getTarget
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition: mshr.hh:457
gem5::BaseCache::CpuSidePort::getAddrRanges
virtual AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: base.cc:2492
gem5::QueueEntry::inService
bool inService
True if the entry has been sent downstream.
Definition: queue_entry.hh:110
gem5::BaseCache::CpuSidePort::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition: base.cc:2467
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
gem5::BaseTags::findVictim
virtual CacheBlk * findVictim(Addr addr, const bool is_secure, const std::size_t size, std::vector< CacheBlk * > &evict_blks)=0
Find replacement victim based on address.
gem5::BaseCache::handleFill
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition: base.cc:1434
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::Named::_name
const std::string _name
Definition: named.hh:41
gem5::BaseCache::CacheStats::CacheStats
CacheStats(BaseCache &c)
Definition: base.cc:2111
gem5::Queue::findPending
Entry * findPending(const QueueEntry *entry) const
Find any pending requests that overlap the given request of a different queue.
Definition: queue.hh:207
gem5::BaseCache::calculateAccessLatency
Cycles calculateAccessLatency(const CacheBlk *blk, const uint32_t delay, const Cycles lookup_lat) const
Calculate access latency in ticks given a tag lookup latency, and whether access was a hit or miss.
Definition: base.cc:1119
gem5::Packet::hasRespData
bool hasRespData() const
Definition: packet.hh:604
gem5::QueueEntry::getTarget
virtual Target * getTarget()=0
Returns a pointer to the first target.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
gem5::BaseCache::writebackBlk
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition: base.cc:1616
gem5::BaseCache::CpuSidePort::tryTiming
virtual bool tryTiming(PacketPtr pkt) override
Availability request from the peer.
Definition: base.cc:2434
gem5::TaggedEntry::isValid
virtual bool isValid() const
Checks if the entry is valid.
Definition: tagged_entry.hh:57
gem5::WriteAllocator::resetDelay
void resetDelay(Addr blk_addr)
Clear delay counter for the input block.
Definition: base.hh:1432
gem5::Packet::isEviction
bool isEviction() const
Definition: packet.hh:599
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::BaseCache::satisfyRequest
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: base.cc:1012
gem5::MemCmd::HardPFResp
@ HardPFResp
Definition: packet.hh:100
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:73
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::Packet::isDemand
bool isDemand() const
Definition: packet.hh:584
gem5::CacheBlk::ReadableBit
@ ReadableBit
Read permission.
Definition: cache_blk.hh:85
gem5::BaseCache::responseLatency
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:917
gem5::BaseCache::compressor
compression::Base * compressor
Compression method being used.
Definition: base.hh:353

Generated on Tue Sep 21 2021 12:25:00 for gem5 by doxygen 1.8.17