gem5  v21.2.1.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
base.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013, 2018-2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2003-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
46 #include "mem/cache/base.hh"
47 
48 #include "base/compiler.hh"
49 #include "base/logging.hh"
50 #include "debug/Cache.hh"
51 #include "debug/CacheComp.hh"
52 #include "debug/CachePort.hh"
53 #include "debug/CacheRepl.hh"
54 #include "debug/CacheVerbose.hh"
55 #include "debug/HWPrefetch.hh"
57 #include "mem/cache/mshr.hh"
59 #include "mem/cache/queue_entry.hh"
62 #include "params/BaseCache.hh"
63 #include "params/WriteAllocator.hh"
64 #include "sim/cur_tick.hh"
65 
66 namespace gem5
67 {
68 
70  BaseCache *_cache,
71  const std::string &_label)
72  : QueuedResponsePort(_name, _cache, queue),
73  queue(*_cache, *this, true, _label),
74  blocked(false), mustSendRetry(false),
75  sendRetryEvent([this]{ processSendRetry(); }, _name)
76 {
77 }
78 
79 BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size)
80  : ClockedObject(p),
81  cpuSidePort (p.name + ".cpu_side_port", this, "CpuSidePort"),
82  memSidePort(p.name + ".mem_side_port", this, "MemSidePort"),
83  mshrQueue("MSHRs", p.mshrs, 0, p.demand_mshr_reserve, p.name),
84  writeBuffer("write buffer", p.write_buffers, p.mshrs, p.name),
85  tags(p.tags),
88  writeAllocator(p.write_allocator),
89  writebackClean(p.writeback_clean),
90  tempBlockWriteback(nullptr),
92  name(), false,
94  blkSize(blk_size),
95  lookupLatency(p.tag_latency),
96  dataLatency(p.data_latency),
97  forwardLatency(p.tag_latency),
98  fillLatency(p.data_latency),
99  responseLatency(p.response_latency),
100  sequentialAccess(p.sequential_access),
101  numTarget(p.tgts_per_mshr),
102  forwardSnoops(true),
103  clusivity(p.clusivity),
104  isReadOnly(p.is_read_only),
105  replaceExpansions(p.replace_expansions),
106  moveContractions(p.move_contractions),
107  blocked(0),
108  order(0),
109  noTargetMSHR(nullptr),
110  missCount(p.max_miss_count),
111  addrRanges(p.addr_ranges.begin(), p.addr_ranges.end()),
112  system(p.system),
113  stats(*this)
114 {
115  // the MSHR queue has no reserve entries as we check the MSHR
116  // queue on every single allocation, whereas the write queue has
117  // as many reserve entries as we have MSHRs, since every MSHR may
118  // eventually require a writeback, and we do not check the write
119  // buffer before committing to an MSHR
120 
121  // forward snoops is overridden in init() once we can query
122  // whether the connected requestor is actually snooping or not
123 
124  tempBlock = new TempCacheBlk(blkSize);
125 
126  tags->tagsInit();
127  if (prefetcher)
128  prefetcher->setCache(this);
129 
130  fatal_if(compressor && !dynamic_cast<CompressedTags*>(tags),
131  "The tags of compressed cache %s must derive from CompressedTags",
132  name());
133  warn_if(!compressor && dynamic_cast<CompressedTags*>(tags),
134  "Compressed cache %s does not have a compression algorithm", name());
135  if (compressor)
136  compressor->setCache(this);
137 }
138 
140 {
141  delete tempBlock;
142 }
143 
144 void
146 {
147  assert(!blocked);
148  DPRINTF(CachePort, "Port is blocking new requests\n");
149  blocked = true;
150  // if we already scheduled a retry in this cycle, but it has not yet
151  // happened, cancel it
152  if (sendRetryEvent.scheduled()) {
154  DPRINTF(CachePort, "Port descheduled retry\n");
155  mustSendRetry = true;
156  }
157 }
158 
159 void
161 {
162  assert(blocked);
163  DPRINTF(CachePort, "Port is accepting new requests\n");
164  blocked = false;
165  if (mustSendRetry) {
166  // @TODO: need to find a better time (next cycle?)
167  owner.schedule(sendRetryEvent, curTick() + 1);
168  }
169 }
170 
171 void
173 {
174  DPRINTF(CachePort, "Port is sending retry\n");
175 
176  // reset the flag and call retry
177  mustSendRetry = false;
178  sendRetryReq();
179 }
180 
181 Addr
183 {
184  if (blk != tempBlock) {
185  return tags->regenerateBlkAddr(blk);
186  } else {
187  return tempBlock->getAddr();
188  }
189 }
190 
191 void
193 {
195  fatal("Cache ports on %s are not connected\n", name());
198 }
199 
200 Port &
201 BaseCache::getPort(const std::string &if_name, PortID idx)
202 {
203  if (if_name == "mem_side") {
204  return memSidePort;
205  } else if (if_name == "cpu_side") {
206  return cpuSidePort;
207  } else {
208  return ClockedObject::getPort(if_name, idx);
209  }
210 }
211 
212 bool
214 {
215  for (const auto& r : addrRanges) {
216  if (r.contains(addr)) {
217  return true;
218  }
219  }
220  return false;
221 }
222 
223 void
225 {
226  if (pkt->needsResponse()) {
227  // These delays should have been consumed by now
228  assert(pkt->headerDelay == 0);
229  assert(pkt->payloadDelay == 0);
230 
231  pkt->makeTimingResponse();
232 
233  // In this case we are considering request_time that takes
234  // into account the delay of the xbar, if any, and just
235  // lat, neglecting responseLatency, modelling hit latency
236  // just as the value of lat overriden by access(), which calls
237  // the calculateAccessLatency() function.
238  cpuSidePort.schedTimingResp(pkt, request_time);
239  } else {
240  DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
241  pkt->print());
242 
243  // queue the packet for deletion, as the sending cache is
244  // still relying on it; if the block is found in access(),
245  // CleanEvict and Writeback messages will be deleted
246  // here as well
247  pendingDelete.reset(pkt);
248  }
249 }
250 
251 void
253  Tick forward_time, Tick request_time)
254 {
255  if (writeAllocator &&
256  pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
257  writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
258  pkt->getBlockAddr(blkSize));
259  }
260 
261  if (mshr) {
265 
266  //@todo remove hw_pf here
267 
268  // Coalesce unless it was a software prefetch (see above).
269  if (pkt) {
270  assert(!pkt->isWriteback());
271  // CleanEvicts corresponding to blocks which have
272  // outstanding requests in MSHRs are simply sunk here
273  if (pkt->cmd == MemCmd::CleanEvict) {
274  pendingDelete.reset(pkt);
275  } else if (pkt->cmd == MemCmd::WriteClean) {
276  // A WriteClean should never coalesce with any
277  // outstanding cache maintenance requests.
278 
279  // We use forward_time here because there is an
280  // uncached memory write, forwarded to WriteBuffer.
281  allocateWriteBuffer(pkt, forward_time);
282  } else {
283  DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
284  pkt->print());
285 
286  assert(pkt->req->requestorId() < system->maxRequestors());
287  stats.cmdStats(pkt).mshrHits[pkt->req->requestorId()]++;
288 
289  // We use forward_time here because it is the same
290  // considering new targets. We have multiple
291  // requests for the same address here. It
292  // specifies the latency to allocate an internal
293  // buffer and to schedule an event to the queued
294  // port and also takes into account the additional
295  // delay of the xbar.
296  mshr->allocateTarget(pkt, forward_time, order++,
297  allocOnFill(pkt->cmd));
298  if (mshr->getNumTargets() >= numTarget) {
299  noTargetMSHR = mshr;
301  // need to be careful with this... if this mshr isn't
302  // ready yet (i.e. time > curTick()), we don't want to
303  // move it ahead of mshrs that are ready
304  // mshrQueue.moveToFront(mshr);
305  }
306  }
307  }
308  } else {
309  // no MSHR
310  assert(pkt->req->requestorId() < system->maxRequestors());
311  stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
312  if (prefetcher && pkt->isDemand())
314 
315  if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
316  // We use forward_time here because there is an
317  // writeback or writeclean, forwarded to WriteBuffer.
318  allocateWriteBuffer(pkt, forward_time);
319  } else {
320  if (blk && blk->isValid()) {
321  // If we have a write miss to a valid block, we
322  // need to mark the block non-readable. Otherwise
323  // if we allow reads while there's an outstanding
324  // write miss, the read could return stale data
325  // out of the cache block... a more aggressive
326  // system could detect the overlap (if any) and
327  // forward data out of the MSHRs, but we don't do
328  // that yet. Note that we do need to leave the
329  // block valid so that it stays in the cache, in
330  // case we get an upgrade response (and hence no
331  // new data) when the write miss completes.
332  // As long as CPUs do proper store/load forwarding
333  // internally, and have a sufficiently weak memory
334  // model, this is probably unnecessary, but at some
335  // point it must have seemed like we needed it...
336  assert((pkt->needsWritable() &&
337  !blk->isSet(CacheBlk::WritableBit)) ||
338  pkt->req->isCacheMaintenance());
340  }
341  // Here we are using forward_time, modelling the latency of
342  // a miss (outbound) just as forwardLatency, neglecting the
343  // lookupLatency component.
344  allocateMissBuffer(pkt, forward_time);
345  }
346  }
347 }
348 
349 void
351 {
352  // anything that is merely forwarded pays for the forward latency and
353  // the delay provided by the crossbar
354  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
355 
356  Cycles lat;
357  CacheBlk *blk = nullptr;
358  bool satisfied = false;
359  {
360  PacketList writebacks;
361  // Note that lat is passed by reference here. The function
362  // access() will set the lat value.
363  satisfied = access(pkt, blk, lat, writebacks);
364 
365  // After the evicted blocks are selected, they must be forwarded
366  // to the write buffer to ensure they logically precede anything
367  // happening below
368  doWritebacks(writebacks, clockEdge(lat + forwardLatency));
369  }
370 
371  // Here we charge the headerDelay that takes into account the latencies
372  // of the bus, if the packet comes from it.
373  // The latency charged is just the value set by the access() function.
374  // In case of a hit we are neglecting response latency.
375  // In case of a miss we are neglecting forward latency.
376  Tick request_time = clockEdge(lat);
377  // Here we reset the timing of the packet.
378  pkt->headerDelay = pkt->payloadDelay = 0;
379 
380  if (satisfied) {
381  // notify before anything else as later handleTimingReqHit might turn
382  // the packet in a response
383  ppHit->notify(pkt);
384 
385  if (prefetcher && blk && blk->wasPrefetched()) {
386  DPRINTF(Cache, "Hit on prefetch for addr %#x (%s)\n",
387  pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
388  blk->clearPrefetched();
389  }
390 
391  handleTimingReqHit(pkt, blk, request_time);
392  } else {
393  handleTimingReqMiss(pkt, blk, forward_time, request_time);
394 
395  ppMiss->notify(pkt);
396  }
397 
398  if (prefetcher) {
399  // track time of availability of next prefetch, if any
400  Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
401  if (next_pf_time != MaxTick) {
402  schedMemSideSendEvent(next_pf_time);
403  }
404  }
405 }
406 
407 void
409 {
410  Tick completion_time = clockEdge(responseLatency) +
411  pkt->headerDelay + pkt->payloadDelay;
412 
413  // Reset the bus additional time as it is now accounted for
414  pkt->headerDelay = pkt->payloadDelay = 0;
415 
416  cpuSidePort.schedTimingResp(pkt, completion_time);
417 }
418 
419 void
421 {
422  assert(pkt->isResponse());
423 
424  // all header delay should be paid for by the crossbar, unless
425  // this is a prefetch response from above
426  panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
427  "%s saw a non-zero packet delay\n", name());
428 
429  const bool is_error = pkt->isError();
430 
431  if (is_error) {
432  DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
433  pkt->print());
434  }
435 
436  DPRINTF(Cache, "%s: Handling response %s\n", __func__,
437  pkt->print());
438 
439  // if this is a write, we should be looking at an uncacheable
440  // write
441  if (pkt->isWrite()) {
442  assert(pkt->req->isUncacheable());
444  return;
445  }
446 
447  // we have dealt with any (uncacheable) writes above, from here on
448  // we know we are dealing with an MSHR due to a miss or a prefetch
449  MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
450  assert(mshr);
451 
452  if (mshr == noTargetMSHR) {
453  // we always clear at least one target
455  noTargetMSHR = nullptr;
456  }
457 
458  // Initial target is used just for stats
459  const QueueEntry::Target *initial_tgt = mshr->getTarget();
460  const Tick miss_latency = curTick() - initial_tgt->recvTime;
461  if (pkt->req->isUncacheable()) {
462  assert(pkt->req->requestorId() < system->maxRequestors());
463  stats.cmdStats(initial_tgt->pkt)
464  .mshrUncacheableLatency[pkt->req->requestorId()] += miss_latency;
465  } else {
466  assert(pkt->req->requestorId() < system->maxRequestors());
467  stats.cmdStats(initial_tgt->pkt)
468  .mshrMissLatency[pkt->req->requestorId()] += miss_latency;
469  }
470 
471  PacketList writebacks;
472 
473  bool is_fill = !mshr->isForward &&
474  (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
475  mshr->wasWholeLineWrite);
476 
477  // make sure that if the mshr was due to a whole line write then
478  // the response is an invalidation
479  assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
480 
481  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
482 
483  if (is_fill && !is_error) {
484  DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
485  pkt->getAddr());
486 
487  const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
488  writeAllocator->allocate() : mshr->allocOnFill();
489  blk = handleFill(pkt, blk, writebacks, allocate);
490  assert(blk != nullptr);
491  ppFill->notify(pkt);
492  }
493 
494  if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
495  // The block was marked not readable while there was a pending
496  // cache maintenance operation, restore its flag.
498 
499  // This was a cache clean operation (without invalidate)
500  // and we have a copy of the block already. Since there
501  // is no invalidation, we can promote targets that don't
502  // require a writable copy
503  mshr->promoteReadable();
504  }
505 
506  if (blk && blk->isSet(CacheBlk::WritableBit) &&
507  !pkt->req->isCacheInvalidate()) {
508  // If at this point the referenced block is writable and the
509  // response is not a cache invalidate, we promote targets that
510  // were deferred as we couldn't guarrantee a writable copy
511  mshr->promoteWritable();
512  }
513 
514  serviceMSHRTargets(mshr, pkt, blk);
515 
516  if (mshr->promoteDeferredTargets()) {
517  // avoid later read getting stale data while write miss is
518  // outstanding.. see comment in timingAccess()
519  if (blk) {
521  }
522  mshrQueue.markPending(mshr);
524  } else {
525  // while we deallocate an mshr from the queue we still have to
526  // check the isFull condition before and after as we might
527  // have been using the reserved entries already
528  const bool was_full = mshrQueue.isFull();
529  mshrQueue.deallocate(mshr);
530  if (was_full && !mshrQueue.isFull()) {
532  }
533 
534  // Request the bus for a prefetch if this deallocation freed enough
535  // MSHRs for a prefetch to take place
536  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
537  Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
538  clockEdge());
539  if (next_pf_time != MaxTick)
540  schedMemSideSendEvent(next_pf_time);
541  }
542  }
543 
544  // if we used temp block, check to see if its valid and then clear it out
545  if (blk == tempBlock && tempBlock->isValid()) {
546  evictBlock(blk, writebacks);
547  }
548 
549  const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
550  // copy writebacks to write buffer
551  doWritebacks(writebacks, forward_time);
552 
553  DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
554  delete pkt;
555 }
556 
557 
558 Tick
560 {
561  // should assert here that there are no outstanding MSHRs or
562  // writebacks... that would mean that someone used an atomic
563  // access in timing mode
564 
565  // We use lookupLatency here because it is used to specify the latency
566  // to access.
567  Cycles lat = lookupLatency;
568 
569  CacheBlk *blk = nullptr;
570  PacketList writebacks;
571  bool satisfied = access(pkt, blk, lat, writebacks);
572 
573  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
574  // A cache clean opearation is looking for a dirty
575  // block. If a dirty block is encountered a WriteClean
576  // will update any copies to the path to the memory
577  // until the point of reference.
578  DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
579  __func__, pkt->print(), blk->print());
580  PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
581  writebacks.push_back(wb_pkt);
582  pkt->setSatisfied();
583  }
584 
585  // handle writebacks resulting from the access here to ensure they
586  // logically precede anything happening below
587  doWritebacksAtomic(writebacks);
588  assert(writebacks.empty());
589 
590  if (!satisfied) {
591  lat += handleAtomicReqMiss(pkt, blk, writebacks);
592  }
593 
594  // Note that we don't invoke the prefetcher at all in atomic mode.
595  // It's not clear how to do it properly, particularly for
596  // prefetchers that aggressively generate prefetch candidates and
597  // rely on bandwidth contention to throttle them; these will tend
598  // to pollute the cache in atomic mode since there is no bandwidth
599  // contention. If we ever do want to enable prefetching in atomic
600  // mode, though, this is the place to do it... see timingAccess()
601  // for an example (though we'd want to issue the prefetch(es)
602  // immediately rather than calling requestMemSideBus() as we do
603  // there).
604 
605  // do any writebacks resulting from the response handling
606  doWritebacksAtomic(writebacks);
607 
608  // if we used temp block, check to see if its valid and if so
609  // clear it out, but only do so after the call to recvAtomic is
610  // finished so that any downstream observers (such as a snoop
611  // filter), first see the fill, and only then see the eviction
612  if (blk == tempBlock && tempBlock->isValid()) {
613  // the atomic CPU calls recvAtomic for fetch and load/store
614  // sequentuially, and we may already have a tempBlock
615  // writeback from the fetch that we have not yet sent
616  if (tempBlockWriteback) {
617  // if that is the case, write the prevoius one back, and
618  // do not schedule any new event
620  } else {
621  // the writeback/clean eviction happens after the call to
622  // recvAtomic has finished (but before any successive
623  // calls), so that the response handling from the fill is
624  // allowed to happen first
626  }
627 
629  }
630 
631  if (pkt->needsResponse()) {
632  pkt->makeAtomicResponse();
633  }
634 
635  return lat * clockPeriod();
636 }
637 
638 void
639 BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
640 {
641  Addr blk_addr = pkt->getBlockAddr(blkSize);
642  bool is_secure = pkt->isSecure();
643  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
644  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
645 
646  pkt->pushLabel(name());
647 
648  CacheBlkPrintWrapper cbpw(blk);
649 
650  // Note that just because an L2/L3 has valid data doesn't mean an
651  // L1 doesn't have a more up-to-date modified copy that still
652  // needs to be found. As a result we always update the request if
653  // we have it, but only declare it satisfied if we are the owner.
654 
655  // see if we have data at all (owned or otherwise)
656  bool have_data = blk && blk->isValid()
657  && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
658  blk->data);
659 
660  // data we have is dirty if marked as such or if we have an
661  // in-service MSHR that is pending a modified line
662  bool have_dirty =
663  have_data && (blk->isSet(CacheBlk::DirtyBit) ||
664  (mshr && mshr->inService && mshr->isPendingModified()));
665 
666  bool done = have_dirty ||
671 
672  DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
673  (blk && blk->isValid()) ? "valid " : "",
674  have_data ? "data " : "", done ? "done " : "");
675 
676  // We're leaving the cache, so pop cache->name() label
677  pkt->popLabel();
678 
679  if (done) {
680  pkt->makeResponse();
681  } else {
682  // if it came as a request from the CPU side then make sure it
683  // continues towards the memory side
684  if (from_cpu_side) {
686  } else if (cpuSidePort.isSnooping()) {
687  // if it came from the memory side, it must be a snoop request
688  // and we should only forward it if we are forwarding snoops
690  }
691  }
692 }
693 
694 void
696  bool has_old_data)
697 {
698  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
699  if (ppDataUpdate->hasListeners()) {
700  if (has_old_data) {
701  data_update.oldData = std::vector<uint64_t>(blk->data,
702  blk->data + (blkSize / sizeof(uint64_t)));
703  }
704  }
705 
706  // Actually perform the data update
707  if (cpkt) {
708  cpkt->writeDataToBlock(blk->data, blkSize);
709  }
710 
711  if (ppDataUpdate->hasListeners()) {
712  if (cpkt) {
713  data_update.newData = std::vector<uint64_t>(blk->data,
714  blk->data + (blkSize / sizeof(uint64_t)));
715  }
716  ppDataUpdate->notify(data_update);
717  }
718 }
719 
720 void
722 {
723  assert(pkt->isRequest());
724 
725  uint64_t overwrite_val;
726  bool overwrite_mem;
727  uint64_t condition_val64;
728  uint32_t condition_val32;
729 
730  int offset = pkt->getOffset(blkSize);
731  uint8_t *blk_data = blk->data + offset;
732 
733  assert(sizeof(uint64_t) >= pkt->getSize());
734 
735  // Get a copy of the old block's contents for the probe before the update
736  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
737  if (ppDataUpdate->hasListeners()) {
738  data_update.oldData = std::vector<uint64_t>(blk->data,
739  blk->data + (blkSize / sizeof(uint64_t)));
740  }
741 
742  overwrite_mem = true;
743  // keep a copy of our possible write value, and copy what is at the
744  // memory address into the packet
745  pkt->writeData((uint8_t *)&overwrite_val);
746  pkt->setData(blk_data);
747 
748  if (pkt->req->isCondSwap()) {
749  if (pkt->getSize() == sizeof(uint64_t)) {
750  condition_val64 = pkt->req->getExtraData();
751  overwrite_mem = !std::memcmp(&condition_val64, blk_data,
752  sizeof(uint64_t));
753  } else if (pkt->getSize() == sizeof(uint32_t)) {
754  condition_val32 = (uint32_t)pkt->req->getExtraData();
755  overwrite_mem = !std::memcmp(&condition_val32, blk_data,
756  sizeof(uint32_t));
757  } else
758  panic("Invalid size for conditional read/write\n");
759  }
760 
761  if (overwrite_mem) {
762  std::memcpy(blk_data, &overwrite_val, pkt->getSize());
764 
765  if (ppDataUpdate->hasListeners()) {
766  data_update.newData = std::vector<uint64_t>(blk->data,
767  blk->data + (blkSize / sizeof(uint64_t)));
768  ppDataUpdate->notify(data_update);
769  }
770  }
771 }
772 
773 QueueEntry*
775 {
776  // Check both MSHR queue and write buffer for potential requests,
777  // note that null does not mean there is no request, it could
778  // simply be that it is not ready
779  MSHR *miss_mshr = mshrQueue.getNext();
780  WriteQueueEntry *wq_entry = writeBuffer.getNext();
781 
782  // If we got a write buffer request ready, first priority is a
783  // full write buffer, otherwise we favour the miss requests
784  if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
785  // need to search MSHR queue for conflicting earlier miss.
786  MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
787 
788  if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
789  // Service misses in order until conflict is cleared.
790  return conflict_mshr;
791 
792  // @todo Note that we ignore the ready time of the conflict here
793  }
794 
795  // No conflicts; issue write
796  return wq_entry;
797  } else if (miss_mshr) {
798  // need to check for conflicting earlier writeback
799  WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
800  if (conflict_mshr) {
801  // not sure why we don't check order here... it was in the
802  // original code but commented out.
803 
804  // The only way this happens is if we are
805  // doing a write and we didn't have permissions
806  // then subsequently saw a writeback (owned got evicted)
807  // We need to make sure to perform the writeback first
808  // To preserve the dirty data, then we can issue the write
809 
810  // should we return wq_entry here instead? I.e. do we
811  // have to flush writes in order? I don't think so... not
812  // for Alpha anyway. Maybe for x86?
813  return conflict_mshr;
814 
815  // @todo Note that we ignore the ready time of the conflict here
816  }
817 
818  // No conflicts; issue read
819  return miss_mshr;
820  }
821 
822  // fall through... no pending requests. Try a prefetch.
823  assert(!miss_mshr && !wq_entry);
824  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
825  // If we have a miss queue slot, we can try a prefetch
826  PacketPtr pkt = prefetcher->getPacket();
827  if (pkt) {
828  Addr pf_addr = pkt->getBlockAddr(blkSize);
829  if (tags->findBlock(pf_addr, pkt->isSecure())) {
830  DPRINTF(HWPrefetch, "Prefetch %#x has hit in cache, "
831  "dropped.\n", pf_addr);
833  // free the request and packet
834  delete pkt;
835  } else if (mshrQueue.findMatch(pf_addr, pkt->isSecure())) {
836  DPRINTF(HWPrefetch, "Prefetch %#x has hit in a MSHR, "
837  "dropped.\n", pf_addr);
839  // free the request and packet
840  delete pkt;
841  } else if (writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
842  DPRINTF(HWPrefetch, "Prefetch %#x has hit in the "
843  "Write Buffer, dropped.\n", pf_addr);
845  // free the request and packet
846  delete pkt;
847  } else {
848  // Update statistic on number of prefetches issued
849  // (hwpf_mshr_misses)
850  assert(pkt->req->requestorId() < system->maxRequestors());
851  stats.cmdStats(pkt).mshrMisses[pkt->req->requestorId()]++;
852 
853  // allocate an MSHR and return it, note
854  // that we send the packet straight away, so do not
855  // schedule the send
856  return allocateMissBuffer(pkt, curTick(), false);
857  }
858  }
859  }
860 
861  return nullptr;
862 }
863 
864 bool
866  PacketList &writebacks)
867 {
868  bool replacement = false;
869  for (const auto& blk : evict_blks) {
870  if (blk->isValid()) {
871  replacement = true;
872 
873  const MSHR* mshr =
874  mshrQueue.findMatch(regenerateBlkAddr(blk), blk->isSecure());
875  if (mshr) {
876  // Must be an outstanding upgrade or clean request on a block
877  // we're about to replace
878  assert((!blk->isSet(CacheBlk::WritableBit) &&
879  mshr->needsWritable()) || mshr->isCleaning());
880  return false;
881  }
882  }
883  }
884 
885  // The victim will be replaced by a new entry, so increase the replacement
886  // counter if a valid block is being replaced
887  if (replacement) {
889 
890  // Evict valid blocks associated to this victim block
891  for (auto& blk : evict_blks) {
892  if (blk->isValid()) {
893  evictBlock(blk, writebacks);
894  }
895  }
896  }
897 
898  return true;
899 }
900 
901 bool
903  PacketList &writebacks)
904 {
905  // tempBlock does not exist in the tags, so don't do anything for it.
906  if (blk == tempBlock) {
907  return true;
908  }
909 
910  // The compressor is called to compress the updated data, so that its
911  // metadata can be updated.
912  Cycles compression_lat = Cycles(0);
913  Cycles decompression_lat = Cycles(0);
914  const auto comp_data =
915  compressor->compress(data, compression_lat, decompression_lat);
916  std::size_t compression_size = comp_data->getSizeBits();
917 
918  // Get previous compressed size
919  CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
920  [[maybe_unused]] const std::size_t prev_size =
921  compression_blk->getSizeBits();
922 
923  // If compressed size didn't change enough to modify its co-allocatability
924  // there is nothing to do. Otherwise we may be facing a data expansion
925  // (block passing from more compressed to less compressed state), or a
926  // data contraction (less to more).
927  bool is_data_expansion = false;
928  bool is_data_contraction = false;
929  const CompressionBlk::OverwriteType overwrite_type =
930  compression_blk->checkExpansionContraction(compression_size);
931  std::string op_name = "";
932  if (overwrite_type == CompressionBlk::DATA_EXPANSION) {
933  op_name = "expansion";
934  is_data_expansion = true;
935  } else if ((overwrite_type == CompressionBlk::DATA_CONTRACTION) &&
937  op_name = "contraction";
938  is_data_contraction = true;
939  }
940 
941  // If block changed compression state, it was possibly co-allocated with
942  // other blocks and cannot be co-allocated anymore, so one or more blocks
943  // must be evicted to make room for the expanded/contracted block
944  std::vector<CacheBlk*> evict_blks;
945  if (is_data_expansion || is_data_contraction) {
946  std::vector<CacheBlk*> evict_blks;
947  bool victim_itself = false;
948  CacheBlk *victim = nullptr;
949  if (replaceExpansions || is_data_contraction) {
950  victim = tags->findVictim(regenerateBlkAddr(blk),
951  blk->isSecure(), compression_size, evict_blks);
952 
953  // It is valid to return nullptr if there is no victim
954  if (!victim) {
955  return false;
956  }
957 
958  // If the victim block is itself the block won't need to be moved,
959  // and the victim should not be evicted
960  if (blk == victim) {
961  victim_itself = true;
962  auto it = std::find_if(evict_blks.begin(), evict_blks.end(),
963  [&blk](CacheBlk* evict_blk){ return evict_blk == blk; });
964  evict_blks.erase(it);
965  }
966 
967  // Print victim block's information
968  DPRINTF(CacheRepl, "Data %s replacement victim: %s\n",
969  op_name, victim->print());
970  } else {
971  // If we do not move the expanded block, we must make room for
972  // the expansion to happen, so evict every co-allocated block
973  const SuperBlk* superblock = static_cast<const SuperBlk*>(
974  compression_blk->getSectorBlock());
975  for (auto& sub_blk : superblock->blks) {
976  if (sub_blk->isValid() && (blk != sub_blk)) {
977  evict_blks.push_back(sub_blk);
978  }
979  }
980  }
981 
982  // Try to evict blocks; if it fails, give up on update
983  if (!handleEvictions(evict_blks, writebacks)) {
984  return false;
985  }
986 
987  DPRINTF(CacheComp, "Data %s: [%s] from %d to %d bits\n",
988  op_name, blk->print(), prev_size, compression_size);
989 
990  if (!victim_itself && (replaceExpansions || is_data_contraction)) {
991  // Move the block's contents to the invalid block so that it now
992  // co-allocates with the other existing superblock entry
993  tags->moveBlock(blk, victim);
994  blk = victim;
995  compression_blk = static_cast<CompressionBlk*>(blk);
996  }
997  }
998 
999  // Update the number of data expansions/contractions
1000  if (is_data_expansion) {
1002  } else if (is_data_contraction) {
1004  }
1005 
1006  compression_blk->setSizeBits(compression_size);
1007  compression_blk->setDecompressionLatency(decompression_lat);
1008 
1009  return true;
1010 }
1011 
1012 void
1014 {
1015  assert(pkt->isRequest());
1016 
1017  assert(blk && blk->isValid());
1018  // Occasionally this is not true... if we are a lower-level cache
1019  // satisfying a string of Read and ReadEx requests from
1020  // upper-level caches, a Read will mark the block as shared but we
1021  // can satisfy a following ReadEx anyway since we can rely on the
1022  // Read requestor(s) to have buffered the ReadEx snoop and to
1023  // invalidate their blocks after receiving them.
1024  // assert(!pkt->needsWritable() || blk->isSet(CacheBlk::WritableBit));
1025  assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
1026 
1027  // Check RMW operations first since both isRead() and
1028  // isWrite() will be true for them
1029  if (pkt->cmd == MemCmd::SwapReq) {
1030  if (pkt->isAtomicOp()) {
1031  // Get a copy of the old block's contents for the probe before
1032  // the update
1033  DataUpdate data_update(regenerateBlkAddr(blk), blk->isSecure());
1034  if (ppDataUpdate->hasListeners()) {
1035  data_update.oldData = std::vector<uint64_t>(blk->data,
1036  blk->data + (blkSize / sizeof(uint64_t)));
1037  }
1038 
1039  // extract data from cache and save it into the data field in
1040  // the packet as a return value from this atomic op
1041  int offset = tags->extractBlkOffset(pkt->getAddr());
1042  uint8_t *blk_data = blk->data + offset;
1043  pkt->setData(blk_data);
1044 
1045  // execute AMO operation
1046  (*(pkt->getAtomicOp()))(blk_data);
1047 
1048  // Inform of this block's data contents update
1049  if (ppDataUpdate->hasListeners()) {
1050  data_update.newData = std::vector<uint64_t>(blk->data,
1051  blk->data + (blkSize / sizeof(uint64_t)));
1052  ppDataUpdate->notify(data_update);
1053  }
1054 
1055  // set block status to dirty
1057  } else {
1058  cmpAndSwap(blk, pkt);
1059  }
1060  } else if (pkt->isWrite()) {
1061  // we have the block in a writable state and can go ahead,
1062  // note that the line may be also be considered writable in
1063  // downstream caches along the path to memory, but always
1064  // Exclusive, and never Modified
1065  assert(blk->isSet(CacheBlk::WritableBit));
1066  // Write or WriteLine at the first cache with block in writable state
1067  if (blk->checkWrite(pkt)) {
1068  updateBlockData(blk, pkt, true);
1069  }
1070  // Always mark the line as dirty (and thus transition to the
1071  // Modified state) even if we are a failed StoreCond so we
1072  // supply data to any snoops that have appended themselves to
1073  // this cache before knowing the store will fail.
1075  DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
1076  } else if (pkt->isRead()) {
1077  if (pkt->isLLSC()) {
1078  blk->trackLoadLocked(pkt);
1079  }
1080 
1081  // all read responses have a data payload
1082  assert(pkt->hasRespData());
1083  pkt->setDataFromBlock(blk->data, blkSize);
1084  } else if (pkt->isUpgrade()) {
1085  // sanity check
1086  assert(!pkt->hasSharers());
1087 
1088  if (blk->isSet(CacheBlk::DirtyBit)) {
1089  // we were in the Owned state, and a cache above us that
1090  // has the line in Shared state needs to be made aware
1091  // that the data it already has is in fact dirty
1092  pkt->setCacheResponding();
1094  }
1095  } else if (pkt->isClean()) {
1097  } else {
1098  assert(pkt->isInvalidate());
1099  invalidateBlock(blk);
1100  DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
1101  pkt->print());
1102  }
1103 }
1104 
1106 //
1107 // Access path: requests coming in from the CPU side
1108 //
1110 Cycles
1112  const Cycles lookup_lat) const
1113 {
1114  // A tag-only access has to wait for the packet to arrive in order to
1115  // perform the tag lookup.
1116  return ticksToCycles(delay) + lookup_lat;
1117 }
1118 
1119 Cycles
1120 BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
1121  const Cycles lookup_lat) const
1122 {
1123  Cycles lat(0);
1124 
1125  if (blk != nullptr) {
1126  // As soon as the access arrives, for sequential accesses first access
1127  // tags, then the data entry. In the case of parallel accesses the
1128  // latency is dictated by the slowest of tag and data latencies.
1129  if (sequentialAccess) {
1130  lat = ticksToCycles(delay) + lookup_lat + dataLatency;
1131  } else {
1132  lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
1133  }
1134 
1135  // Check if the block to be accessed is available. If not, apply the
1136  // access latency on top of when the block is ready to be accessed.
1137  const Tick tick = curTick() + delay;
1138  const Tick when_ready = blk->getWhenReady();
1139  if (when_ready > tick &&
1140  ticksToCycles(when_ready - tick) > lat) {
1141  lat += ticksToCycles(when_ready - tick);
1142  }
1143  } else {
1144  // In case of a miss, we neglect the data access in a parallel
1145  // configuration (i.e., the data access will be stopped as soon as
1146  // we find out it is a miss), and use the tag-only latency.
1147  lat = calculateTagOnlyLatency(delay, lookup_lat);
1148  }
1149 
1150  return lat;
1151 }
1152 
1153 bool
1155  PacketList &writebacks)
1156 {
1157  // sanity check
1158  assert(pkt->isRequest());
1159 
1160  gem5_assert(!(isReadOnly && pkt->isWrite()),
1161  "Should never see a write in a read-only cache %s\n",
1162  name());
1163 
1164  // Access block in the tags
1165  Cycles tag_latency(0);
1166  blk = tags->accessBlock(pkt, tag_latency);
1167 
1168  DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
1169  blk ? "hit " + blk->print() : "miss");
1170 
1171  if (pkt->req->isCacheMaintenance()) {
1172  // A cache maintenance operation is always forwarded to the
1173  // memory below even if the block is found in dirty state.
1174 
1175  // We defer any changes to the state of the block until we
1176  // create and mark as in service the mshr for the downstream
1177  // packet.
1178 
1179  // Calculate access latency on top of when the packet arrives. This
1180  // takes into account the bus delay.
1181  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1182 
1183  return false;
1184  }
1185 
1186  if (pkt->isEviction()) {
1187  // We check for presence of block in above caches before issuing
1188  // Writeback or CleanEvict to write buffer. Therefore the only
1189  // possible cases can be of a CleanEvict packet coming from above
1190  // encountering a Writeback generated in this cache peer cache and
1191  // waiting in the write buffer. Cases of upper level peer caches
1192  // generating CleanEvict and Writeback or simply CleanEvict and
1193  // CleanEvict almost simultaneously will be caught by snoops sent out
1194  // by crossbar.
1195  WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
1196  pkt->isSecure());
1197  if (wb_entry) {
1198  assert(wb_entry->getNumTargets() == 1);
1199  PacketPtr wbPkt = wb_entry->getTarget()->pkt;
1200  assert(wbPkt->isWriteback());
1201 
1202  if (pkt->isCleanEviction()) {
1203  // The CleanEvict and WritebackClean snoops into other
1204  // peer caches of the same level while traversing the
1205  // crossbar. If a copy of the block is found, the
1206  // packet is deleted in the crossbar. Hence, none of
1207  // the other upper level caches connected to this
1208  // cache have the block, so we can clear the
1209  // BLOCK_CACHED flag in the Writeback if set and
1210  // discard the CleanEvict by returning true.
1211  wbPkt->clearBlockCached();
1212 
1213  // A clean evict does not need to access the data array
1214  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1215 
1216  return true;
1217  } else {
1218  assert(pkt->cmd == MemCmd::WritebackDirty);
1219  // Dirty writeback from above trumps our clean
1220  // writeback... discard here
1221  // Note: markInService will remove entry from writeback buffer.
1222  markInService(wb_entry);
1223  delete wbPkt;
1224  }
1225  }
1226  }
1227 
1228  // The critical latency part of a write depends only on the tag access
1229  if (pkt->isWrite()) {
1230  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1231  }
1232 
1233  // Writeback handling is special case. We can write the block into
1234  // the cache without having a writeable copy (or any copy at all).
1235  if (pkt->isWriteback()) {
1236  assert(blkSize == pkt->getSize());
1237 
1238  // we could get a clean writeback while we are having
1239  // outstanding accesses to a block, do the simple thing for
1240  // now and drop the clean writeback so that we do not upset
1241  // any ordering/decisions about ownership already taken
1242  if (pkt->cmd == MemCmd::WritebackClean &&
1243  mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1244  DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1245  "dropping\n", pkt->getAddr());
1246 
1247  // A writeback searches for the block, then writes the data.
1248  // As the writeback is being dropped, the data is not touched,
1249  // and we just had to wait for the time to find a match in the
1250  // MSHR. As of now assume a mshr queue search takes as long as
1251  // a tag lookup for simplicity.
1252  return true;
1253  }
1254 
1255  const bool has_old_data = blk && blk->isValid();
1256  if (!blk) {
1257  // need to do a replacement
1258  blk = allocateBlock(pkt, writebacks);
1259  if (!blk) {
1260  // no replaceable block available: give up, fwd to next level.
1261  incMissCount(pkt);
1262  return false;
1263  }
1264 
1266  } else if (compressor) {
1267  // This is an overwrite to an existing block, therefore we need
1268  // to check for data expansion (i.e., block was compressed with
1269  // a smaller size, and now it doesn't fit the entry anymore).
1270  // If that is the case we might need to evict blocks.
1271  if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1272  writebacks)) {
1273  invalidateBlock(blk);
1274  return false;
1275  }
1276  }
1277 
1278  // only mark the block dirty if we got a writeback command,
1279  // and leave it as is for a clean writeback
1280  if (pkt->cmd == MemCmd::WritebackDirty) {
1281  // TODO: the coherent cache can assert that the dirty bit is set
1283  }
1284  // if the packet does not have sharers, it is passing
1285  // writable, and we got the writeback in Modified or Exclusive
1286  // state, if not we are in the Owned or Shared state
1287  if (!pkt->hasSharers()) {
1289  }
1290  // nothing else to do; writeback doesn't expect response
1291  assert(!pkt->needsResponse());
1292 
1293  updateBlockData(blk, pkt, has_old_data);
1294  DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1295  incHitCount(pkt);
1296 
1297  // When the packet metadata arrives, the tag lookup will be done while
1298  // the payload is arriving. Then the block will be ready to access as
1299  // soon as the fill is done
1301  std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1302 
1303  return true;
1304  } else if (pkt->cmd == MemCmd::CleanEvict) {
1305  // A CleanEvict does not need to access the data array
1306  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1307 
1308  if (blk) {
1309  // Found the block in the tags, need to stop CleanEvict from
1310  // propagating further down the hierarchy. Returning true will
1311  // treat the CleanEvict like a satisfied write request and delete
1312  // it.
1313  return true;
1314  }
1315  // We didn't find the block here, propagate the CleanEvict further
1316  // down the memory hierarchy. Returning false will treat the CleanEvict
1317  // like a Writeback which could not find a replaceable block so has to
1318  // go to next level.
1319  return false;
1320  } else if (pkt->cmd == MemCmd::WriteClean) {
1321  // WriteClean handling is a special case. We can allocate a
1322  // block directly if it doesn't exist and we can update the
1323  // block immediately. The WriteClean transfers the ownership
1324  // of the block as well.
1325  assert(blkSize == pkt->getSize());
1326 
1327  const bool has_old_data = blk && blk->isValid();
1328  if (!blk) {
1329  if (pkt->writeThrough()) {
1330  // if this is a write through packet, we don't try to
1331  // allocate if the block is not present
1332  return false;
1333  } else {
1334  // a writeback that misses needs to allocate a new block
1335  blk = allocateBlock(pkt, writebacks);
1336  if (!blk) {
1337  // no replaceable block available: give up, fwd to
1338  // next level.
1339  incMissCount(pkt);
1340  return false;
1341  }
1342 
1344  }
1345  } else if (compressor) {
1346  // This is an overwrite to an existing block, therefore we need
1347  // to check for data expansion (i.e., block was compressed with
1348  // a smaller size, and now it doesn't fit the entry anymore).
1349  // If that is the case we might need to evict blocks.
1350  if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1351  writebacks)) {
1352  invalidateBlock(blk);
1353  return false;
1354  }
1355  }
1356 
1357  // at this point either this is a writeback or a write-through
1358  // write clean operation and the block is already in this
1359  // cache, we need to update the data and the block flags
1360  assert(blk);
1361  // TODO: the coherent cache can assert that the dirty bit is set
1362  if (!pkt->writeThrough()) {
1364  }
1365  // nothing else to do; writeback doesn't expect response
1366  assert(!pkt->needsResponse());
1367 
1368  updateBlockData(blk, pkt, has_old_data);
1369  DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1370 
1371  incHitCount(pkt);
1372 
1373  // When the packet metadata arrives, the tag lookup will be done while
1374  // the payload is arriving. Then the block will be ready to access as
1375  // soon as the fill is done
1377  std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1378 
1379  // If this a write-through packet it will be sent to cache below
1380  return !pkt->writeThrough();
1381  } else if (blk && (pkt->needsWritable() ?
1383  blk->isSet(CacheBlk::ReadableBit))) {
1384  // OK to satisfy access
1385  incHitCount(pkt);
1386 
1387  // Calculate access latency based on the need to access the data array
1388  if (pkt->isRead()) {
1389  lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1390 
1391  // When a block is compressed, it must first be decompressed
1392  // before being read. This adds to the access latency.
1393  if (compressor) {
1394  lat += compressor->getDecompressionLatency(blk);
1395  }
1396  } else {
1397  lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1398  }
1399 
1400  satisfyRequest(pkt, blk);
1401  maintainClusivity(pkt->fromCache(), blk);
1402 
1403  return true;
1404  }
1405 
1406  // Can't satisfy access normally... either no block (blk == nullptr)
1407  // or have block but need writable
1408 
1409  incMissCount(pkt);
1410 
1411  lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1412 
1413  if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1414  // complete miss on store conditional... just give up now
1415  pkt->req->setExtraData(0);
1416  return true;
1417  }
1418 
1419  return false;
1420 }
1421 
1422 void
1424 {
1425  if (from_cache && blk && blk->isValid() &&
1426  !blk->isSet(CacheBlk::DirtyBit) && clusivity == enums::mostly_excl) {
1427  // if we have responded to a cache, and our block is still
1428  // valid, but not dirty, and this cache is mostly exclusive
1429  // with respect to the cache above, drop the block
1430  invalidateBlock(blk);
1431  }
1432 }
1433 
1434 CacheBlk*
1436  bool allocate)
1437 {
1438  assert(pkt->isResponse());
1439  Addr addr = pkt->getAddr();
1440  bool is_secure = pkt->isSecure();
1441  const bool has_old_data = blk && blk->isValid();
1442  const std::string old_state = (debug::Cache && blk) ? blk->print() : "";
1443 
1444  // When handling a fill, we should have no writes to this line.
1445  assert(addr == pkt->getBlockAddr(blkSize));
1446  assert(!writeBuffer.findMatch(addr, is_secure));
1447 
1448  if (!blk) {
1449  // better have read new data...
1450  assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1451 
1452  // need to do a replacement if allocating, otherwise we stick
1453  // with the temporary storage
1454  blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1455 
1456  if (!blk) {
1457  // No replaceable block or a mostly exclusive
1458  // cache... just use temporary storage to complete the
1459  // current request and then get rid of it
1460  blk = tempBlock;
1461  tempBlock->insert(addr, is_secure);
1462  DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1463  is_secure ? "s" : "ns");
1464  }
1465  } else {
1466  // existing block... probably an upgrade
1467  // don't clear block status... if block is already dirty we
1468  // don't want to lose that
1469  }
1470 
1471  // Block is guaranteed to be valid at this point
1472  assert(blk->isValid());
1473  assert(blk->isSecure() == is_secure);
1474  assert(regenerateBlkAddr(blk) == addr);
1475 
1477 
1478  // sanity check for whole-line writes, which should always be
1479  // marked as writable as part of the fill, and then later marked
1480  // dirty as part of satisfyRequest
1481  if (pkt->cmd == MemCmd::InvalidateResp) {
1482  assert(!pkt->hasSharers());
1483  }
1484 
1485  // here we deal with setting the appropriate state of the line,
1486  // and we start by looking at the hasSharers flag, and ignore the
1487  // cacheResponding flag (normally signalling dirty data) if the
1488  // packet has sharers, thus the line is never allocated as Owned
1489  // (dirty but not writable), and always ends up being either
1490  // Shared, Exclusive or Modified, see Packet::setCacheResponding
1491  // for more details
1492  if (!pkt->hasSharers()) {
1493  // we could get a writable line from memory (rather than a
1494  // cache) even in a read-only cache, note that we set this bit
1495  // even for a read-only cache, possibly revisit this decision
1497 
1498  // check if we got this via cache-to-cache transfer (i.e., from a
1499  // cache that had the block in Modified or Owned state)
1500  if (pkt->cacheResponding()) {
1501  // we got the block in Modified state, and invalidated the
1502  // owners copy
1504 
1505  gem5_assert(!isReadOnly, "Should never see dirty snoop response "
1506  "in read-only cache %s\n", name());
1507 
1508  }
1509  }
1510 
1511  DPRINTF(Cache, "Block addr %#llx (%s) moving from %s to %s\n",
1512  addr, is_secure ? "s" : "ns", old_state, blk->print());
1513 
1514  // if we got new data, copy it in (checking for a read response
1515  // and a response that has data is the same in the end)
1516  if (pkt->isRead()) {
1517  // sanity checks
1518  assert(pkt->hasData());
1519  assert(pkt->getSize() == blkSize);
1520 
1521  updateBlockData(blk, pkt, has_old_data);
1522  }
1523  // The block will be ready when the payload arrives and the fill is done
1525  pkt->payloadDelay);
1526 
1527  return blk;
1528 }
1529 
1530 CacheBlk*
1532 {
1533  // Get address
1534  const Addr addr = pkt->getAddr();
1535 
1536  // Get secure bit
1537  const bool is_secure = pkt->isSecure();
1538 
1539  // Block size and compression related access latency. Only relevant if
1540  // using a compressor, otherwise there is no extra delay, and the block
1541  // is fully sized
1542  std::size_t blk_size_bits = blkSize*8;
1543  Cycles compression_lat = Cycles(0);
1544  Cycles decompression_lat = Cycles(0);
1545 
1546  // If a compressor is being used, it is called to compress data before
1547  // insertion. Although in Gem5 the data is stored uncompressed, even if a
1548  // compressor is used, the compression/decompression methods are called to
1549  // calculate the amount of extra cycles needed to read or write compressed
1550  // blocks.
1551  if (compressor && pkt->hasData()) {
1552  const auto comp_data = compressor->compress(
1553  pkt->getConstPtr<uint64_t>(), compression_lat, decompression_lat);
1554  blk_size_bits = comp_data->getSizeBits();
1555  }
1556 
1557  // Find replacement victim
1558  std::vector<CacheBlk*> evict_blks;
1559  CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
1560  evict_blks);
1561 
1562  // It is valid to return nullptr if there is no victim
1563  if (!victim)
1564  return nullptr;
1565 
1566  // Print victim block's information
1567  DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1568 
1569  // Try to evict blocks; if it fails, give up on allocation
1570  if (!handleEvictions(evict_blks, writebacks)) {
1571  return nullptr;
1572  }
1573 
1574  // Insert new block at victimized entry
1575  tags->insertBlock(pkt, victim);
1576 
1577  // If using a compressor, set compression data. This must be done after
1578  // insertion, as the compression bit may be set.
1579  if (compressor) {
1580  compressor->setSizeBits(victim, blk_size_bits);
1581  compressor->setDecompressionLatency(victim, decompression_lat);
1582  }
1583 
1584  return victim;
1585 }
1586 
1587 void
1589 {
1590  // If block is still marked as prefetched, then it hasn't been used
1591  if (blk->wasPrefetched()) {
1593  }
1594 
1595  // Notify that the data contents for this address are no longer present
1596  updateBlockData(blk, nullptr, blk->isValid());
1597 
1598  // If handling a block present in the Tags, let it do its invalidation
1599  // process, which will update stats and invalidate the block itself
1600  if (blk != tempBlock) {
1601  tags->invalidate(blk);
1602  } else {
1603  tempBlock->invalidate();
1604  }
1605 }
1606 
1607 void
1609 {
1610  PacketPtr pkt = evictBlock(blk);
1611  if (pkt) {
1612  writebacks.push_back(pkt);
1613  }
1614 }
1615 
1616 PacketPtr
1618 {
1620  "Writeback from read-only cache");
1621  assert(blk && blk->isValid() &&
1623 
1625 
1626  RequestPtr req = std::make_shared<Request>(
1628 
1629  if (blk->isSecure())
1630  req->setFlags(Request::SECURE);
1631 
1632  req->taskId(blk->getTaskId());
1633 
1634  PacketPtr pkt =
1635  new Packet(req, blk->isSet(CacheBlk::DirtyBit) ?
1637 
1638  DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1639  pkt->print(), blk->isSet(CacheBlk::WritableBit),
1640  blk->isSet(CacheBlk::DirtyBit));
1641 
1642  if (blk->isSet(CacheBlk::WritableBit)) {
1643  // not asserting shared means we pass the block in modified
1644  // state, mark our own block non-writeable
1646  } else {
1647  // we are in the Owned state, tell the receiver
1648  pkt->setHasSharers();
1649  }
1650 
1651  // make sure the block is not marked dirty
1653 
1654  pkt->allocate();
1655  pkt->setDataFromBlock(blk->data, blkSize);
1656 
1657  // When a block is compressed, it must first be decompressed before being
1658  // sent for writeback.
1659  if (compressor) {
1661  }
1662 
1663  return pkt;
1664 }
1665 
1666 PacketPtr
1668 {
1669  RequestPtr req = std::make_shared<Request>(
1671 
1672  if (blk->isSecure()) {
1673  req->setFlags(Request::SECURE);
1674  }
1675  req->taskId(blk->getTaskId());
1676 
1677  PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1678 
1679  if (dest) {
1680  req->setFlags(dest);
1681  pkt->setWriteThrough();
1682  }
1683 
1684  DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1686 
1687  if (blk->isSet(CacheBlk::WritableBit)) {
1688  // not asserting shared means we pass the block in modified
1689  // state, mark our own block non-writeable
1691  } else {
1692  // we are in the Owned state, tell the receiver
1693  pkt->setHasSharers();
1694  }
1695 
1696  // make sure the block is not marked dirty
1698 
1699  pkt->allocate();
1700  pkt->setDataFromBlock(blk->data, blkSize);
1701 
1702  // When a block is compressed, it must first be decompressed before being
1703  // sent for writeback.
1704  if (compressor) {
1706  }
1707 
1708  return pkt;
1709 }
1710 
1711 
1712 void
1714 {
1715  tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1716 }
1717 
1718 void
1720 {
1721  tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1722 }
1723 
1724 bool
1726 {
1727  return tags->anyBlk([](CacheBlk &blk) {
1728  return blk.isSet(CacheBlk::DirtyBit); });
1729 }
1730 
1731 bool
1733 {
1734  return writeAllocator && writeAllocator->coalesce();
1735 }
1736 
1737 void
1739 {
1740  if (blk.isSet(CacheBlk::DirtyBit)) {
1741  assert(blk.isValid());
1742 
1743  RequestPtr request = std::make_shared<Request>(
1745 
1746  request->taskId(blk.getTaskId());
1747  if (blk.isSecure()) {
1748  request->setFlags(Request::SECURE);
1749  }
1750 
1751  Packet packet(request, MemCmd::WriteReq);
1752  packet.dataStatic(blk.data);
1753 
1754  memSidePort.sendFunctional(&packet);
1755 
1757  }
1758 }
1759 
1760 void
1762 {
1763  if (blk.isSet(CacheBlk::DirtyBit))
1764  warn_once("Invalidating dirty cache lines. " \
1765  "Expect things to break.\n");
1766 
1767  if (blk.isValid()) {
1768  assert(!blk.isSet(CacheBlk::DirtyBit));
1769  invalidateBlock(&blk);
1770  }
1771 }
1772 
1773 Tick
1775 {
1776  Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1778 
1779  // Don't signal prefetch ready time if no MSHRs available
1780  // Will signal once enoguh MSHRs are deallocated
1781  if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
1782  nextReady = std::min(nextReady,
1784  }
1785 
1786  return nextReady;
1787 }
1788 
1789 
1790 bool
1792 {
1793  assert(mshr);
1794 
1795  // use request from 1st target
1796  PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1797 
1798  DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1799 
1800  // if the cache is in write coalescing mode or (additionally) in
1801  // no allocation mode, and we have a write packet with an MSHR
1802  // that is not a whole-line write (due to incompatible flags etc),
1803  // then reset the write mode
1804  if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1805  if (!mshr->isWholeLineWrite()) {
1806  // if we are currently write coalescing, hold on the
1807  // MSHR as many cycles extra as we need to completely
1808  // write a cache line
1809  if (writeAllocator->delay(mshr->blkAddr)) {
1810  Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1811  DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1812  "for write coalescing\n", tgt_pkt->print(), delay);
1813  mshrQueue.delay(mshr, delay);
1814  return false;
1815  } else {
1816  writeAllocator->reset();
1817  }
1818  } else {
1820  }
1821  }
1822 
1823  CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1824 
1825  // either a prefetch that is not present upstream, or a normal
1826  // MSHR request, proceed to get the packet to send downstream
1827  PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1828  mshr->isWholeLineWrite());
1829 
1830  mshr->isForward = (pkt == nullptr);
1831 
1832  if (mshr->isForward) {
1833  // not a cache block request, but a response is expected
1834  // make copy of current packet to forward, keep current
1835  // copy for response handling
1836  pkt = new Packet(tgt_pkt, false, true);
1837  assert(!pkt->isWrite());
1838  }
1839 
1840  // play it safe and append (rather than set) the sender state,
1841  // as forwarded packets may already have existing state
1842  pkt->pushSenderState(mshr);
1843 
1844  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
1845  // A cache clean opearation is looking for a dirty block. Mark
1846  // the packet so that the destination xbar can determine that
1847  // there will be a follow-up write packet as well.
1848  pkt->setSatisfied();
1849  }
1850 
1851  if (!memSidePort.sendTimingReq(pkt)) {
1852  // we are awaiting a retry, but we
1853  // delete the packet and will be creating a new packet
1854  // when we get the opportunity
1855  delete pkt;
1856 
1857  // note that we have now masked any requestBus and
1858  // schedSendEvent (we will wait for a retry before
1859  // doing anything), and this is so even if we do not
1860  // care about this packet and might override it before
1861  // it gets retried
1862  return true;
1863  } else {
1864  // As part of the call to sendTimingReq the packet is
1865  // forwarded to all neighbouring caches (and any caches
1866  // above them) as a snoop. Thus at this point we know if
1867  // any of the neighbouring caches are responding, and if
1868  // so, we know it is dirty, and we can determine if it is
1869  // being passed as Modified, making our MSHR the ordering
1870  // point
1871  bool pending_modified_resp = !pkt->hasSharers() &&
1872  pkt->cacheResponding();
1873  markInService(mshr, pending_modified_resp);
1874 
1875  if (pkt->isClean() && blk && blk->isSet(CacheBlk::DirtyBit)) {
1876  // A cache clean opearation is looking for a dirty
1877  // block. If a dirty block is encountered a WriteClean
1878  // will update any copies to the path to the memory
1879  // until the point of reference.
1880  DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1881  __func__, pkt->print(), blk->print());
1882  PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1883  pkt->id);
1884  PacketList writebacks;
1885  writebacks.push_back(wb_pkt);
1886  doWritebacks(writebacks, 0);
1887  }
1888 
1889  return false;
1890  }
1891 }
1892 
1893 bool
1895 {
1896  assert(wq_entry);
1897 
1898  // always a single target for write queue entries
1899  PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1900 
1901  DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1902 
1903  // forward as is, both for evictions and uncacheable writes
1904  if (!memSidePort.sendTimingReq(tgt_pkt)) {
1905  // note that we have now masked any requestBus and
1906  // schedSendEvent (we will wait for a retry before
1907  // doing anything), and this is so even if we do not
1908  // care about this packet and might override it before
1909  // it gets retried
1910  return true;
1911  } else {
1912  markInService(wq_entry);
1913  return false;
1914  }
1915 }
1916 
1917 void
1919 {
1920  bool dirty(isDirty());
1921 
1922  if (dirty) {
1923  warn("*** The cache still contains dirty data. ***\n");
1924  warn(" Make sure to drain the system using the correct flags.\n");
1925  warn(" This checkpoint will not restore correctly " \
1926  "and dirty data in the cache will be lost!\n");
1927  }
1928 
1929  // Since we don't checkpoint the data in the cache, any dirty data
1930  // will be lost when restoring from a checkpoint of a system that
1931  // wasn't drained properly. Flag the checkpoint as invalid if the
1932  // cache contains dirty data.
1933  bool bad_checkpoint(dirty);
1934  SERIALIZE_SCALAR(bad_checkpoint);
1935 }
1936 
1937 void
1939 {
1940  bool bad_checkpoint;
1941  UNSERIALIZE_SCALAR(bad_checkpoint);
1942  if (bad_checkpoint) {
1943  fatal("Restoring from checkpoints with dirty caches is not "
1944  "supported in the classic memory system. Please remove any "
1945  "caches or drain them properly before taking checkpoints.\n");
1946  }
1947 }
1948 
1949 
1951  const std::string &name)
1952  : statistics::Group(&c, name.c_str()), cache(c),
1953  ADD_STAT(hits, statistics::units::Count::get(),
1954  ("number of " + name + " hits").c_str()),
1955  ADD_STAT(misses, statistics::units::Count::get(),
1956  ("number of " + name + " misses").c_str()),
1957  ADD_STAT(hitLatency, statistics::units::Tick::get(),
1958  ("number of " + name + " hit ticks").c_str()),
1959  ADD_STAT(missLatency, statistics::units::Tick::get(),
1960  ("number of " + name + " miss ticks").c_str()),
1961  ADD_STAT(accesses, statistics::units::Count::get(),
1962  ("number of " + name + " accesses(hits+misses)").c_str()),
1963  ADD_STAT(missRate, statistics::units::Ratio::get(),
1964  ("miss rate for " + name + " accesses").c_str()),
1965  ADD_STAT(avgMissLatency, statistics::units::Rate<
1966  statistics::units::Tick, statistics::units::Count>::get(),
1967  ("average " + name + " miss latency").c_str()),
1968  ADD_STAT(mshrHits, statistics::units::Count::get(),
1969  ("number of " + name + " MSHR hits").c_str()),
1970  ADD_STAT(mshrMisses, statistics::units::Count::get(),
1971  ("number of " + name + " MSHR misses").c_str()),
1972  ADD_STAT(mshrUncacheable, statistics::units::Count::get(),
1973  ("number of " + name + " MSHR uncacheable").c_str()),
1974  ADD_STAT(mshrMissLatency, statistics::units::Tick::get(),
1975  ("number of " + name + " MSHR miss ticks").c_str()),
1976  ADD_STAT(mshrUncacheableLatency, statistics::units::Tick::get(),
1977  ("number of " + name + " MSHR uncacheable ticks").c_str()),
1978  ADD_STAT(mshrMissRate, statistics::units::Ratio::get(),
1979  ("mshr miss rate for " + name + " accesses").c_str()),
1980  ADD_STAT(avgMshrMissLatency, statistics::units::Rate<
1981  statistics::units::Tick, statistics::units::Count>::get(),
1982  ("average " + name + " mshr miss latency").c_str()),
1983  ADD_STAT(avgMshrUncacheableLatency, statistics::units::Rate<
1984  statistics::units::Tick, statistics::units::Count>::get(),
1985  ("average " + name + " mshr uncacheable latency").c_str())
1986 {
1987 }
1988 
1989 void
1991 {
1992  using namespace statistics;
1993 
1995  System *system = cache.system;
1996  const auto max_requestors = system->maxRequestors();
1997 
1998  hits
1999  .init(max_requestors)
2000  .flags(total | nozero | nonan)
2001  ;
2002  for (int i = 0; i < max_requestors; i++) {
2003  hits.subname(i, system->getRequestorName(i));
2004  }
2005 
2006  // Miss statistics
2007  misses
2008  .init(max_requestors)
2009  .flags(total | nozero | nonan)
2010  ;
2011  for (int i = 0; i < max_requestors; i++) {
2012  misses.subname(i, system->getRequestorName(i));
2013  }
2014 
2015  // Hit latency statistics
2016  hitLatency
2017  .init(max_requestors)
2018  .flags(total | nozero | nonan)
2019  ;
2020  for (int i = 0; i < max_requestors; i++) {
2021  hitLatency.subname(i, system->getRequestorName(i));
2022  }
2023 
2024  // Miss latency statistics
2025  missLatency
2026  .init(max_requestors)
2027  .flags(total | nozero | nonan)
2028  ;
2029  for (int i = 0; i < max_requestors; i++) {
2030  missLatency.subname(i, system->getRequestorName(i));
2031  }
2032 
2033  // access formulas
2034  accesses.flags(total | nozero | nonan);
2035  accesses = hits + misses;
2036  for (int i = 0; i < max_requestors; i++) {
2037  accesses.subname(i, system->getRequestorName(i));
2038  }
2039 
2040  // miss rate formulas
2041  missRate.flags(total | nozero | nonan);
2042  missRate = misses / accesses;
2043  for (int i = 0; i < max_requestors; i++) {
2044  missRate.subname(i, system->getRequestorName(i));
2045  }
2046 
2047  // miss latency formulas
2048  avgMissLatency.flags(total | nozero | nonan);
2049  avgMissLatency = missLatency / misses;
2050  for (int i = 0; i < max_requestors; i++) {
2051  avgMissLatency.subname(i, system->getRequestorName(i));
2052  }
2053 
2054  // MSHR statistics
2055  // MSHR hit statistics
2056  mshrHits
2057  .init(max_requestors)
2058  .flags(total | nozero | nonan)
2059  ;
2060  for (int i = 0; i < max_requestors; i++) {
2061  mshrHits.subname(i, system->getRequestorName(i));
2062  }
2063 
2064  // MSHR miss statistics
2065  mshrMisses
2066  .init(max_requestors)
2067  .flags(total | nozero | nonan)
2068  ;
2069  for (int i = 0; i < max_requestors; i++) {
2070  mshrMisses.subname(i, system->getRequestorName(i));
2071  }
2072 
2073  // MSHR miss latency statistics
2074  mshrMissLatency
2075  .init(max_requestors)
2076  .flags(total | nozero | nonan)
2077  ;
2078  for (int i = 0; i < max_requestors; i++) {
2079  mshrMissLatency.subname(i, system->getRequestorName(i));
2080  }
2081 
2082  // MSHR uncacheable statistics
2083  mshrUncacheable
2084  .init(max_requestors)
2085  .flags(total | nozero | nonan)
2086  ;
2087  for (int i = 0; i < max_requestors; i++) {
2088  mshrUncacheable.subname(i, system->getRequestorName(i));
2089  }
2090 
2091  // MSHR miss latency statistics
2092  mshrUncacheableLatency
2093  .init(max_requestors)
2094  .flags(total | nozero | nonan)
2095  ;
2096  for (int i = 0; i < max_requestors; i++) {
2097  mshrUncacheableLatency.subname(i, system->getRequestorName(i));
2098  }
2099 
2100  // MSHR miss rate formulas
2101  mshrMissRate.flags(total | nozero | nonan);
2102  mshrMissRate = mshrMisses / accesses;
2103 
2104  for (int i = 0; i < max_requestors; i++) {
2105  mshrMissRate.subname(i, system->getRequestorName(i));
2106  }
2107 
2108  // mshrMiss latency formulas
2109  avgMshrMissLatency.flags(total | nozero | nonan);
2110  avgMshrMissLatency = mshrMissLatency / mshrMisses;
2111  for (int i = 0; i < max_requestors; i++) {
2112  avgMshrMissLatency.subname(i, system->getRequestorName(i));
2113  }
2114 
2115  // mshrUncacheable latency formulas
2116  avgMshrUncacheableLatency.flags(total | nozero | nonan);
2117  avgMshrUncacheableLatency = mshrUncacheableLatency / mshrUncacheable;
2118  for (int i = 0; i < max_requestors; i++) {
2119  avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
2120  }
2121 }
2122 
2124  : statistics::Group(&c), cache(c),
2125 
2126  ADD_STAT(demandHits, statistics::units::Count::get(),
2127  "number of demand (read+write) hits"),
2128  ADD_STAT(overallHits, statistics::units::Count::get(),
2129  "number of overall hits"),
2130  ADD_STAT(demandHitLatency, statistics::units::Tick::get(),
2131  "number of demand (read+write) hit ticks"),
2132  ADD_STAT(overallHitLatency, statistics::units::Tick::get(),
2133  "number of overall hit ticks"),
2134  ADD_STAT(demandMisses, statistics::units::Count::get(),
2135  "number of demand (read+write) misses"),
2136  ADD_STAT(overallMisses, statistics::units::Count::get(),
2137  "number of overall misses"),
2138  ADD_STAT(demandMissLatency, statistics::units::Tick::get(),
2139  "number of demand (read+write) miss ticks"),
2140  ADD_STAT(overallMissLatency, statistics::units::Tick::get(),
2141  "number of overall miss ticks"),
2142  ADD_STAT(demandAccesses, statistics::units::Count::get(),
2143  "number of demand (read+write) accesses"),
2144  ADD_STAT(overallAccesses, statistics::units::Count::get(),
2145  "number of overall (read+write) accesses"),
2146  ADD_STAT(demandMissRate, statistics::units::Ratio::get(),
2147  "miss rate for demand accesses"),
2148  ADD_STAT(overallMissRate, statistics::units::Ratio::get(),
2149  "miss rate for overall accesses"),
2150  ADD_STAT(demandAvgMissLatency, statistics::units::Rate<
2151  statistics::units::Cycle, statistics::units::Count>::get(),
2152  "average overall miss latency"),
2153  ADD_STAT(overallAvgMissLatency, statistics::units::Rate<
2154  statistics::units::Cycle, statistics::units::Count>::get(),
2155  "average overall miss latency"),
2156  ADD_STAT(blockedCycles, statistics::units::Cycle::get(),
2157  "number of cycles access was blocked"),
2158  ADD_STAT(blockedCauses, statistics::units::Count::get(),
2159  "number of times access was blocked"),
2160  ADD_STAT(avgBlocked, statistics::units::Rate<
2161  statistics::units::Cycle, statistics::units::Count>::get(),
2162  "average number of cycles each access was blocked"),
2163  ADD_STAT(writebacks, statistics::units::Count::get(),
2164  "number of writebacks"),
2165  ADD_STAT(demandMshrHits, statistics::units::Count::get(),
2166  "number of demand (read+write) MSHR hits"),
2167  ADD_STAT(overallMshrHits, statistics::units::Count::get(),
2168  "number of overall MSHR hits"),
2169  ADD_STAT(demandMshrMisses, statistics::units::Count::get(),
2170  "number of demand (read+write) MSHR misses"),
2171  ADD_STAT(overallMshrMisses, statistics::units::Count::get(),
2172  "number of overall MSHR misses"),
2173  ADD_STAT(overallMshrUncacheable, statistics::units::Count::get(),
2174  "number of overall MSHR uncacheable misses"),
2175  ADD_STAT(demandMshrMissLatency, statistics::units::Tick::get(),
2176  "number of demand (read+write) MSHR miss ticks"),
2177  ADD_STAT(overallMshrMissLatency, statistics::units::Tick::get(),
2178  "number of overall MSHR miss ticks"),
2179  ADD_STAT(overallMshrUncacheableLatency, statistics::units::Tick::get(),
2180  "number of overall MSHR uncacheable ticks"),
2181  ADD_STAT(demandMshrMissRate, statistics::units::Ratio::get(),
2182  "mshr miss ratio for demand accesses"),
2183  ADD_STAT(overallMshrMissRate, statistics::units::Ratio::get(),
2184  "mshr miss ratio for overall accesses"),
2185  ADD_STAT(demandAvgMshrMissLatency, statistics::units::Rate<
2186  statistics::units::Cycle, statistics::units::Count>::get(),
2187  "average overall mshr miss latency"),
2188  ADD_STAT(overallAvgMshrMissLatency, statistics::units::Rate<
2189  statistics::units::Cycle, statistics::units::Count>::get(),
2190  "average overall mshr miss latency"),
2191  ADD_STAT(overallAvgMshrUncacheableLatency, statistics::units::Rate<
2192  statistics::units::Cycle, statistics::units::Count>::get(),
2193  "average overall mshr uncacheable latency"),
2194  ADD_STAT(replacements, statistics::units::Count::get(),
2195  "number of replacements"),
2196  ADD_STAT(dataExpansions, statistics::units::Count::get(),
2197  "number of data expansions"),
2198  ADD_STAT(dataContractions, statistics::units::Count::get(),
2199  "number of data contractions"),
2200  cmd(MemCmd::NUM_MEM_CMDS)
2201 {
2202  for (int idx = 0; idx < MemCmd::NUM_MEM_CMDS; ++idx)
2203  cmd[idx].reset(new CacheCmdStats(c, MemCmd(idx).toString()));
2204 }
2205 
2206 void
2208 {
2209  using namespace statistics;
2210 
2212 
2213  System *system = cache.system;
2214  const auto max_requestors = system->maxRequestors();
2215 
2216  for (auto &cs : cmd)
2217  cs->regStatsFromParent();
2218 
2219 // These macros make it easier to sum the right subset of commands and
2220 // to change the subset of commands that are considered "demand" vs
2221 // "non-demand"
2222 #define SUM_DEMAND(s) \
2223  (cmd[MemCmd::ReadReq]->s + cmd[MemCmd::WriteReq]->s + \
2224  cmd[MemCmd::WriteLineReq]->s + cmd[MemCmd::ReadExReq]->s + \
2225  cmd[MemCmd::ReadCleanReq]->s + cmd[MemCmd::ReadSharedReq]->s)
2226 
2227 // should writebacks be included here? prior code was inconsistent...
2228 #define SUM_NON_DEMAND(s) \
2229  (cmd[MemCmd::SoftPFReq]->s + cmd[MemCmd::HardPFReq]->s + \
2230  cmd[MemCmd::SoftPFExReq]->s)
2231 
2232  demandHits.flags(total | nozero | nonan);
2233  demandHits = SUM_DEMAND(hits);
2234  for (int i = 0; i < max_requestors; i++) {
2235  demandHits.subname(i, system->getRequestorName(i));
2236  }
2237 
2238  overallHits.flags(total | nozero | nonan);
2239  overallHits = demandHits + SUM_NON_DEMAND(hits);
2240  for (int i = 0; i < max_requestors; i++) {
2241  overallHits.subname(i, system->getRequestorName(i));
2242  }
2243 
2244  demandMisses.flags(total | nozero | nonan);
2245  demandMisses = SUM_DEMAND(misses);
2246  for (int i = 0; i < max_requestors; i++) {
2247  demandMisses.subname(i, system->getRequestorName(i));
2248  }
2249 
2250  overallMisses.flags(total | nozero | nonan);
2251  overallMisses = demandMisses + SUM_NON_DEMAND(misses);
2252  for (int i = 0; i < max_requestors; i++) {
2253  overallMisses.subname(i, system->getRequestorName(i));
2254  }
2255 
2256  demandMissLatency.flags(total | nozero | nonan);
2257  demandMissLatency = SUM_DEMAND(missLatency);
2258  for (int i = 0; i < max_requestors; i++) {
2259  demandMissLatency.subname(i, system->getRequestorName(i));
2260  }
2261 
2262  overallMissLatency.flags(total | nozero | nonan);
2263  overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
2264  for (int i = 0; i < max_requestors; i++) {
2265  overallMissLatency.subname(i, system->getRequestorName(i));
2266  }
2267 
2268  demandHitLatency.flags(total | nozero | nonan);
2269  demandHitLatency = SUM_DEMAND(hitLatency);
2270  for (int i = 0; i < max_requestors; i++) {
2271  demandHitLatency.subname(i, system->getRequestorName(i));
2272  }
2273  overallHitLatency.flags(total | nozero | nonan);
2274  overallHitLatency = demandHitLatency + SUM_NON_DEMAND(hitLatency);
2275  for (int i = 0; i < max_requestors; i++) {
2276  overallHitLatency.subname(i, system->getRequestorName(i));
2277  }
2278 
2279  demandAccesses.flags(total | nozero | nonan);
2280  demandAccesses = demandHits + demandMisses;
2281  for (int i = 0; i < max_requestors; i++) {
2282  demandAccesses.subname(i, system->getRequestorName(i));
2283  }
2284 
2285  overallAccesses.flags(total | nozero | nonan);
2286  overallAccesses = overallHits + overallMisses;
2287  for (int i = 0; i < max_requestors; i++) {
2288  overallAccesses.subname(i, system->getRequestorName(i));
2289  }
2290 
2291  demandMissRate.flags(total | nozero | nonan);
2292  demandMissRate = demandMisses / demandAccesses;
2293  for (int i = 0; i < max_requestors; i++) {
2294  demandMissRate.subname(i, system->getRequestorName(i));
2295  }
2296 
2297  overallMissRate.flags(total | nozero | nonan);
2298  overallMissRate = overallMisses / overallAccesses;
2299  for (int i = 0; i < max_requestors; i++) {
2300  overallMissRate.subname(i, system->getRequestorName(i));
2301  }
2302 
2303  demandAvgMissLatency.flags(total | nozero | nonan);
2304  demandAvgMissLatency = demandMissLatency / demandMisses;
2305  for (int i = 0; i < max_requestors; i++) {
2306  demandAvgMissLatency.subname(i, system->getRequestorName(i));
2307  }
2308 
2309  overallAvgMissLatency.flags(total | nozero | nonan);
2310  overallAvgMissLatency = overallMissLatency / overallMisses;
2311  for (int i = 0; i < max_requestors; i++) {
2312  overallAvgMissLatency.subname(i, system->getRequestorName(i));
2313  }
2314 
2315  blockedCycles.init(NUM_BLOCKED_CAUSES);
2316  blockedCycles
2317  .subname(Blocked_NoMSHRs, "no_mshrs")
2318  .subname(Blocked_NoTargets, "no_targets")
2319  ;
2320 
2321 
2322  blockedCauses.init(NUM_BLOCKED_CAUSES);
2323  blockedCauses
2324  .subname(Blocked_NoMSHRs, "no_mshrs")
2325  .subname(Blocked_NoTargets, "no_targets")
2326  ;
2327 
2328  avgBlocked
2329  .subname(Blocked_NoMSHRs, "no_mshrs")
2330  .subname(Blocked_NoTargets, "no_targets")
2331  ;
2332  avgBlocked = blockedCycles / blockedCauses;
2333 
2334  writebacks
2335  .init(max_requestors)
2336  .flags(total | nozero | nonan)
2337  ;
2338  for (int i = 0; i < max_requestors; i++) {
2339  writebacks.subname(i, system->getRequestorName(i));
2340  }
2341 
2342  demandMshrHits.flags(total | nozero | nonan);
2343  demandMshrHits = SUM_DEMAND(mshrHits);
2344  for (int i = 0; i < max_requestors; i++) {
2345  demandMshrHits.subname(i, system->getRequestorName(i));
2346  }
2347 
2348  overallMshrHits.flags(total | nozero | nonan);
2349  overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshrHits);
2350  for (int i = 0; i < max_requestors; i++) {
2351  overallMshrHits.subname(i, system->getRequestorName(i));
2352  }
2353 
2354  demandMshrMisses.flags(total | nozero | nonan);
2355  demandMshrMisses = SUM_DEMAND(mshrMisses);
2356  for (int i = 0; i < max_requestors; i++) {
2357  demandMshrMisses.subname(i, system->getRequestorName(i));
2358  }
2359 
2360  overallMshrMisses.flags(total | nozero | nonan);
2361  overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshrMisses);
2362  for (int i = 0; i < max_requestors; i++) {
2363  overallMshrMisses.subname(i, system->getRequestorName(i));
2364  }
2365 
2366  demandMshrMissLatency.flags(total | nozero | nonan);
2367  demandMshrMissLatency = SUM_DEMAND(mshrMissLatency);
2368  for (int i = 0; i < max_requestors; i++) {
2369  demandMshrMissLatency.subname(i, system->getRequestorName(i));
2370  }
2371 
2372  overallMshrMissLatency.flags(total | nozero | nonan);
2373  overallMshrMissLatency =
2374  demandMshrMissLatency + SUM_NON_DEMAND(mshrMissLatency);
2375  for (int i = 0; i < max_requestors; i++) {
2376  overallMshrMissLatency.subname(i, system->getRequestorName(i));
2377  }
2378 
2379  overallMshrUncacheable.flags(total | nozero | nonan);
2380  overallMshrUncacheable =
2381  SUM_DEMAND(mshrUncacheable) + SUM_NON_DEMAND(mshrUncacheable);
2382  for (int i = 0; i < max_requestors; i++) {
2383  overallMshrUncacheable.subname(i, system->getRequestorName(i));
2384  }
2385 
2386 
2387  overallMshrUncacheableLatency.flags(total | nozero | nonan);
2388  overallMshrUncacheableLatency =
2389  SUM_DEMAND(mshrUncacheableLatency) +
2390  SUM_NON_DEMAND(mshrUncacheableLatency);
2391  for (int i = 0; i < max_requestors; i++) {
2392  overallMshrUncacheableLatency.subname(i, system->getRequestorName(i));
2393  }
2394 
2395  demandMshrMissRate.flags(total | nozero | nonan);
2396  demandMshrMissRate = demandMshrMisses / demandAccesses;
2397  for (int i = 0; i < max_requestors; i++) {
2398  demandMshrMissRate.subname(i, system->getRequestorName(i));
2399  }
2400 
2401  overallMshrMissRate.flags(total | nozero | nonan);
2402  overallMshrMissRate = overallMshrMisses / overallAccesses;
2403  for (int i = 0; i < max_requestors; i++) {
2404  overallMshrMissRate.subname(i, system->getRequestorName(i));
2405  }
2406 
2407  demandAvgMshrMissLatency.flags(total | nozero | nonan);
2408  demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2409  for (int i = 0; i < max_requestors; i++) {
2410  demandAvgMshrMissLatency.subname(i, system->getRequestorName(i));
2411  }
2412 
2413  overallAvgMshrMissLatency.flags(total | nozero | nonan);
2414  overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2415  for (int i = 0; i < max_requestors; i++) {
2416  overallAvgMshrMissLatency.subname(i, system->getRequestorName(i));
2417  }
2418 
2419  overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
2420  overallAvgMshrUncacheableLatency =
2421  overallMshrUncacheableLatency / overallMshrUncacheable;
2422  for (int i = 0; i < max_requestors; i++) {
2423  overallAvgMshrUncacheableLatency.subname(i,
2425  }
2426 
2427  dataExpansions.flags(nozero | nonan);
2428  dataContractions.flags(nozero | nonan);
2429 }
2430 
2431 void
2433 {
2434  ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2435  ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2436  ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2437  ppDataUpdate =
2438  new ProbePointArg<DataUpdate>(this->getProbeManager(), "Data Update");
2439 }
2440 
2442 //
2443 // CpuSidePort
2444 //
2446 bool
2448 {
2449  // Snoops shouldn't happen when bypassing caches
2450  assert(!cache->system->bypassCaches());
2451 
2452  assert(pkt->isResponse());
2453 
2454  // Express snoop responses from requestor to responder, e.g., from L1 to L2
2455  cache->recvTimingSnoopResp(pkt);
2456  return true;
2457 }
2458 
2459 
2460 bool
2462 {
2463  if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2464  // always let express snoop packets through even if blocked
2465  return true;
2466  } else if (blocked || mustSendRetry) {
2467  // either already committed to send a retry, or blocked
2468  mustSendRetry = true;
2469  return false;
2470  }
2471  mustSendRetry = false;
2472  return true;
2473 }
2474 
2475 bool
2477 {
2478  assert(pkt->isRequest());
2479 
2480  if (cache->system->bypassCaches()) {
2481  // Just forward the packet if caches are disabled.
2482  // @todo This should really enqueue the packet rather
2483  [[maybe_unused]] bool success = cache->memSidePort.sendTimingReq(pkt);
2484  assert(success);
2485  return true;
2486  } else if (tryTiming(pkt)) {
2487  cache->recvTimingReq(pkt);
2488  return true;
2489  }
2490  return false;
2491 }
2492 
2493 Tick
2495 {
2496  if (cache->system->bypassCaches()) {
2497  // Forward the request if the system is in cache bypass mode.
2498  return cache->memSidePort.sendAtomic(pkt);
2499  } else {
2500  return cache->recvAtomic(pkt);
2501  }
2502 }
2503 
2504 void
2506 {
2507  if (cache->system->bypassCaches()) {
2508  // The cache should be flushed if we are in cache bypass mode,
2509  // so we don't need to check if we need to update anything.
2510  cache->memSidePort.sendFunctional(pkt);
2511  return;
2512  }
2513 
2514  // functional request
2515  cache->functionalAccess(pkt, true);
2516 }
2517 
2520 {
2521  return cache->getAddrRanges();
2522 }
2523 
2524 
2526 CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2527  const std::string &_label)
2528  : CacheResponsePort(_name, _cache, _label), cache(_cache)
2529 {
2530 }
2531 
2533 //
2534 // MemSidePort
2535 //
2537 bool
2539 {
2540  cache->recvTimingResp(pkt);
2541  return true;
2542 }
2543 
2544 // Express snooping requests to memside port
2545 void
2547 {
2548  // Snoops shouldn't happen when bypassing caches
2549  assert(!cache->system->bypassCaches());
2550 
2551  // handle snooping requests
2552  cache->recvTimingSnoopReq(pkt);
2553 }
2554 
2555 Tick
2557 {
2558  // Snoops shouldn't happen when bypassing caches
2559  assert(!cache->system->bypassCaches());
2560 
2561  return cache->recvAtomicSnoop(pkt);
2562 }
2563 
2564 void
2566 {
2567  // Snoops shouldn't happen when bypassing caches
2568  assert(!cache->system->bypassCaches());
2569 
2570  // functional snoop (note that in contrast to atomic we don't have
2571  // a specific functionalSnoop method, as they have the same
2572  // behaviour regardless)
2573  cache->functionalAccess(pkt, false);
2574 }
2575 
2576 void
2578 {
2579  // sanity check
2580  assert(!waitingOnRetry);
2581 
2582  // there should never be any deferred request packets in the
2583  // queue, instead we resly on the cache to provide the packets
2584  // from the MSHR queue or write queue
2585  assert(deferredPacketReadyTime() == MaxTick);
2586 
2587  // check for request packets (requests & writebacks)
2588  QueueEntry* entry = cache.getNextQueueEntry();
2589 
2590  if (!entry) {
2591  // can happen if e.g. we attempt a writeback and fail, but
2592  // before the retry, the writeback is eliminated because
2593  // we snoop another cache's ReadEx.
2594  } else {
2595  // let our snoop responses go first if there are responses to
2596  // the same addresses
2597  if (checkConflictingSnoop(entry->getTarget()->pkt)) {
2598  return;
2599  }
2600  waitingOnRetry = entry->sendPacket(cache);
2601  }
2602 
2603  // if we succeeded and are not waiting for a retry, schedule the
2604  // next send considering when the next queue is ready, note that
2605  // snoop responses have their own packet queue and thus schedule
2606  // their own events
2607  if (!waitingOnRetry) {
2608  schedSendEvent(cache.nextQueueReadyTime());
2609  }
2610 }
2611 
2613  BaseCache *_cache,
2614  const std::string &_label)
2615  : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue),
2616  _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2617  _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2618 {
2619 }
2620 
2621 void
2622 WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2623  Addr blk_addr)
2624 {
2625  // check if we are continuing where the last write ended
2626  if (nextAddr == write_addr) {
2627  delayCtr[blk_addr] = delayThreshold;
2628  // stop if we have already saturated
2629  if (mode != WriteMode::NO_ALLOCATE) {
2630  byteCount += write_size;
2631  // switch to streaming mode if we have passed the lower
2632  // threshold
2633  if (mode == WriteMode::ALLOCATE &&
2634  byteCount > coalesceLimit) {
2635  mode = WriteMode::COALESCE;
2636  DPRINTF(Cache, "Switched to write coalescing\n");
2637  } else if (mode == WriteMode::COALESCE &&
2638  byteCount > noAllocateLimit) {
2639  // and continue and switch to non-allocating mode if we
2640  // pass the upper threshold
2641  mode = WriteMode::NO_ALLOCATE;
2642  DPRINTF(Cache, "Switched to write-no-allocate\n");
2643  }
2644  }
2645  } else {
2646  // we did not see a write matching the previous one, start
2647  // over again
2648  byteCount = write_size;
2649  mode = WriteMode::ALLOCATE;
2650  resetDelay(blk_addr);
2651  }
2652  nextAddr = write_addr + write_size;
2653 }
2654 
2655 } // namespace gem5
gem5::Packet::getBlockAddr
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:805
gem5::BaseCache::memWriteback
virtual void memWriteback() override
Write back dirty blocks in the cache using functional accesses.
Definition: base.cc:1713
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
gem5::MSHR
Miss Status and handling Register.
Definition: mshr.hh:74
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::Packet::isAtomicOp
bool isAtomicOp() const
Definition: packet.hh:820
gem5::BaseCache::MemSidePort::MemSidePort
MemSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:2612
gem5::BaseCache::incMissCount
void incMissCount(PacketPtr pkt)
Definition: base.hh:1296
gem5::BaseCache::cpuSidePort
CpuSidePort cpuSidePort
Definition: base.hh:338
queue_entry.hh
gem5::RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
gem5::BaseCache::handleTimingReqMiss
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
gem5::Packet::isRequest
bool isRequest() const
Definition: packet.hh:586
gem5::BaseCache::CacheCmdStats::regStatsFromParent
void regStatsFromParent()
Callback to register stats from parent CacheStats::regStats().
Definition: base.cc:1990
warn
#define warn(...)
Definition: logging.hh:246
gem5::WriteAllocator::reset
void reset()
Reset the write allocator state, meaning that it allocates for writes and has not recorded any inform...
Definition: base.hh:1415
compressed_tags.hh
gem5::BaseCache::~BaseCache
~BaseCache()
Definition: base.cc:139
base.hh
gem5::BaseCache::invalidateBlock
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition: base.cc:1588
gem5::MemCmd::WriteClean
@ WriteClean
Definition: packet.hh:94
gem5::BaseCache::DataUpdate::oldData
std::vector< uint64_t > oldData
The stale data contents.
Definition: base.hh:131
gem5::Packet::getOffset
Addr getOffset(unsigned int blk_size) const
Definition: packet.hh:800
gem5::MemCmd::CleanEvict
@ CleanEvict
Definition: packet.hh:95
gem5::BaseCache::CacheStats::replacements
statistics::Scalar replacements
Number of replacements of valid blocks.
Definition: base.hh:1134
gem5::ResponsePort::sendFunctionalSnoop
void sendFunctionalSnoop(PacketPtr pkt) const
Send a functional snoop request packet, where the data is instantly updated everywhere in the memory ...
Definition: port.hh:346
gem5::CacheBlk::clearPrefetched
void clearPrefetched()
Clear the prefetching bit.
Definition: cache_blk.hh:255
gem5::BaseCache::cmpAndSwap
void cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
Handle doing the Compare and Swap function for SPARC.
Definition: base.cc:721
gem5::BaseCache::NUM_BLOCKED_CAUSES
@ NUM_BLOCKED_CAUSES
Definition: base.hh:116
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::BaseCache::ppDataUpdate
ProbePointArg< DataUpdate > * ppDataUpdate
To probe when the contents of a block are updated.
Definition: base.hh:372
gem5::MemCmd::SwapReq
@ SwapReq
Definition: packet.hh:115
UNSERIALIZE_SCALAR
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:575
gem5::BaseCache::CacheRequestPort
A cache request port is used for the memory-side port of the cache, and in addition to the basic timi...
Definition: base.hh:152
gem5::BaseCache::access
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition: base.cc:1154
gem5::BaseCache::forwardSnoops
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:928
gem5::QueueEntry::Target::pkt
const PacketPtr pkt
Pending request packet.
Definition: queue_entry.hh:93
gem5::Packet::isCleanEviction
bool isCleanEviction() const
Is this packet a clean eviction, including both actual clean evict packets, but also clean writebacks...
Definition: packet.hh:1394
gem5::BaseCache::CacheCmdStats::mshrHits
statistics::Vector mshrHits
Number of misses that hit in the MSHRs per command and thread.
Definition: base.hh:1025
gem5::TempCacheBlk::invalidate
void invalidate() override
Invalidate the block and clear all state.
Definition: cache_blk.hh:524
gem5::BaseCache::CacheResponsePort::processSendRetry
void processSendRetry()
Definition: base.cc:172
gem5::BaseCache::writeBuffer
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:347
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1252
gem5::BaseCache::moveContractions
const bool moveContractions
Similar to data expansions, after a block improves its compression, it may need to be moved elsewhere...
Definition: base.hh:959
gem5::Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:316
gem5::BaseCache::recvTimingResp
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition: base.cc:420
mshr.hh
gem5::BaseCache::CacheResponsePort::CacheResponsePort
CacheResponsePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:69
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::MSHR::allocOnFill
bool allocOnFill() const
Definition: mshr.hh:340
gem5::MSHR::isForward
bool isForward
True if the entry is just a simple forward from an upper level.
Definition: mshr.hh:127
gem5::BaseCache::MemSidePort::recvFunctionalSnoop
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition: base.cc:2565
gem5::BaseCache::coalesce
bool coalesce() const
Checks if the cache is coalescing writes.
Definition: base.cc:1732
base.hh
gem5::CheckpointIn
Definition: serialize.hh:68
gem5::compression::Base::getDecompressionLatency
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition: base.cc:197
gem5::CompressionBlk::setSizeBits
void setSizeBits(const std::size_t size)
Set size, in bits, of this compressed block's data.
Definition: super_blk.cc:99
gem5::EventBase::Delayed_Writeback_Pri
static const Priority Delayed_Writeback_Pri
For some reason "delayed" inter-cluster writebacks are scheduled before regular writebacks (which hav...
Definition: eventq.hh:172
gem5::MSHRQueue::canPrefetch
bool canPrefetch() const
Returns true if sufficient mshrs for prefetch.
Definition: mshr_queue.hh:158
SUM_NON_DEMAND
#define SUM_NON_DEMAND(s)
gem5::Packet::writeThrough
bool writeThrough() const
Definition: packet.hh:729
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::BaseCache::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: base.cc:201
cur_tick.hh
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::BaseCache::markInService
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:408
gem5::Packet::setCacheResponding
void setCacheResponding()
Snoop flags.
Definition: packet.hh:640
gem5::Packet::isWriteback
bool isWriteback() const
Definition: packet.hh:602
gem5::MSHR::promoteDeferredTargets
bool promoteDeferredTargets()
Definition: mshr.cc:581
gem5::MSHR::promoteWritable
void promoteWritable()
Promotes deferred targets that do not require writable.
Definition: mshr.cc:664
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::WriteQueueEntry::getTarget
Target * getTarget() override
Returns a reference to the first target.
Definition: write_queue_entry.hh:155
gem5::prefetch::Base::prefetchUnused
void prefetchUnused()
Definition: base.hh:391
gem5::TempCacheBlk
Special instance of CacheBlk for use with tempBlk that deals with its block address regeneration.
Definition: cache_blk.hh:500
gem5::Packet::isUpgrade
bool isUpgrade() const
Definition: packet.hh:585
gem5::MaxTick
const Tick MaxTick
Definition: types.hh:60
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::CacheBlk::clearCoherenceBits
void clearCoherenceBits(unsigned bits)
Clear the corresponding coherence bits.
Definition: cache_blk.hh:231
gem5::Packet::isSecure
bool isSecure() const
Definition: packet.hh:810
gem5::MSHRQueue::delay
void delay(MSHR *mshr, Tick delay_ticks)
Adds a delay to the provided MSHR and moves MSHRs that will be ready earlier than this entry to the t...
Definition: mshr_queue.cc:104
gem5::MemCmd::NUM_MEM_CMDS
@ NUM_MEM_CMDS
Definition: packet.hh:143
gem5::Packet::setWriteThrough
void setWriteThrough()
A writeback/writeclean cmd gets propagated further downstream by the receiver when the flag is set.
Definition: packet.hh:722
gem5::BaseCache::clearBlocked
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:1255
gem5::BaseCache::dataLatency
const Cycles dataLatency
The latency of data access of a cache.
Definition: base.hh:900
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:673
gem5::BaseCache::updateCompressionData
bool updateCompressionData(CacheBlk *&blk, const uint64_t *data, PacketList &writebacks)
When a block is overwriten, its compression information must be updated, and it may need to be recomp...
Definition: base.cc:902
gem5::BaseCache::CacheCmdStats::mshrMisses
statistics::Vector mshrMisses
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:1027
gem5::X86ISA::system
Bitfield< 15 > system
Definition: misc.hh:1003
gem5::CompressionBlk::getSizeBits
std::size_t getSizeBits() const
Definition: super_blk.cc:93
gem5::BaseCache::replaceExpansions
const bool replaceExpansions
when a data expansion of a compressed block happens it will not be able to co-allocate where it is at...
Definition: base.hh:951
gem5::BaseCache::blocked
uint8_t blocked
Bit vector of the blocking reasons for the access path.
Definition: base.hh:965
gem5::QueueEntry::Target
A queue entry is holding packets that will be serviced as soon as resources are available.
Definition: queue_entry.hh:87
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
std::vector< uint64_t >
gem5::WriteAllocator::updateMode
void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr)
Update the write mode based on the current write packet.
Definition: base.cc:2622
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::CacheBlk::checkWrite
bool checkWrite(PacketPtr pkt)
Handle interaction of load-locked operations and stores.
Definition: cache_blk.hh:412
gem5::BaseCache::sequentialAccess
const bool sequentialAccess
Whether tags and data are accessed sequentially.
Definition: base.hh:922
gem5::BaseCache::BaseCache
BaseCache(const BaseCacheParams &p, unsigned blk_size)
Definition: base.cc:79
gem5::BaseCache::updateBlockData
void updateBlockData(CacheBlk *blk, const PacketPtr cpkt, bool has_old_data)
Update the data contents of a block.
Definition: base.cc:695
gem5::Packet::makeAtomicResponse
void makeAtomicResponse()
Definition: packet.hh:1043
gem5::Packet::setDataFromBlock
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1271
gem5::BaseCache::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: base.hh:402
gem5::BaseCache::CacheResponsePort
A cache response port is used for the CPU-side port of the cache, and it is basically a simple timing...
Definition: base.hh:274
gem5::BaseTags::moveBlock
virtual void moveBlock(CacheBlk *src_blk, CacheBlk *dest_blk)
Move a block's metadata to another location decided by the replacement policy.
Definition: base.cc:130
gem5::CacheBlk::WritableBit
@ WritableBit
write permission
Definition: cache_blk.hh:80
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:420
gem5::SectorSubBlk::getSectorBlock
SectorBlk * getSectorBlock() const
Get sector block associated to this block.
Definition: sector_blk.cc:52
gem5::Packet::setSatisfied
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition: packet.hh:736
gem5::QueueEntry::blkAddr
Addr blkAddr
Block aligned address.
Definition: queue_entry.hh:116
gem5::SectorBlk::blks
std::vector< SectorSubBlk * > blks
List of blocks associated to this sector.
Definition: sector_blk.hh:147
gem5::Request::funcRequestorId
@ funcRequestorId
This requestor id is used for functional requests that don't come from a particular device.
Definition: request.hh:262
gem5::QueueEntry::order
Counter order
Order number assigned to disambiguate writes and misses.
Definition: queue_entry.hh:113
gem5::CompressionBlk::DATA_EXPANSION
@ DATA_EXPANSION
New data contents are considered larger than previous contents.
Definition: super_blk.hh:81
gem5::CacheBlk
A Basic Cache block.
Definition: cache_blk.hh:70
gem5::BaseCache::tempBlock
TempCacheBlk * tempBlock
Temporary cache block for occasional transitory use.
Definition: base.hh:396
gem5::BaseCache::handleAtomicReqMiss
virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks)=0
Handle a request in atomic mode that missed in this cache.
gem5::BaseCache::sendWriteQueuePacket
bool sendWriteQueuePacket(WriteQueueEntry *wq_entry)
Similar to sendMSHR, but for a write-queue entry instead.
Definition: base.cc:1894
gem5::BaseCache::CacheCmdStats::mshrMissLatency
statistics::Vector mshrMissLatency
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:1031
gem5::BaseCache::CpuSidePort::recvTimingReq
virtual bool recvTimingReq(PacketPtr pkt) override
Receive a timing request from the peer.
Definition: base.cc:2476
gem5::BaseCache::CacheResponsePort::setBlocked
void setBlocked()
Do not accept any new requests.
Definition: base.cc:145
gem5::BaseCache::isBlocked
bool isBlocked() const
Returns true if the cache is blocked for accesses.
Definition: base.hh:1226
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::BaseCache::writeAllocator
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition: base.hh:388
gem5::TempCacheBlk::getAddr
Addr getAddr() const
Get block's address.
Definition: cache_blk.hh:542
gem5::MemCmd::WritebackDirty
@ WritebackDirty
Definition: packet.hh:92
gem5::BaseCache::inRange
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition: base.cc:213
gem5::Packet::hasData
bool hasData() const
Definition: packet.hh:603
gem5::MemCmd
Definition: packet.hh:75
gem5::BaseCache::DataUpdate::newData
std::vector< uint64_t > newData
The new data contents.
Definition: base.hh:133
gem5::Packet::getAtomicOp
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition: packet.hh:819
gem5::BaseCache::writebackTempBlockAtomicEvent
EventFunctionWrapper writebackTempBlockAtomicEvent
An event to writeback the tempBlock after recvAtomic finishes.
Definition: base.hh:703
gem5::Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:438
gem5::BaseCache::Blocked_NoTargets
@ Blocked_NoTargets
Definition: base.hh:115
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
gem5::BaseCache::DataUpdate
A data contents update is composed of the updated block's address, the old contents,...
Definition: base.hh:124
gem5::QueueEntry::Target::recvTime
const Tick recvTime
Time when request was received (for stats)
Definition: queue_entry.hh:90
gem5::BaseCache::order
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:968
gem5::CacheBlk::getTaskId
uint32_t getTaskId() const
Get the task id associated to this block.
Definition: cache_blk.hh:285
gem5::Flags< FlagsType >
gem5::BaseCache::serialize
void serialize(CheckpointOut &cp) const override
Serialize the state of the caches.
Definition: base.cc:1918
gem5::BaseCache::ppMiss
ProbePointArg< PacketPtr > * ppMiss
To probe when a cache miss occurs.
Definition: base.hh:362
gem5::System::maxRequestors
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition: system.hh:496
gem5::RequestPort::sendFunctional
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
gem5::CacheBlk::setWhenReady
void setWhenReady(const Tick tick)
Set tick at which block's data will be available for access.
Definition: cache_blk.hh:278
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:582
gem5::BaseCache::prefetcher
prefetch::Base * prefetcher
Prefetcher.
Definition: base.hh:356
base.hh
gem5::System
Definition: system.hh:75
gem5::BaseCache::tags
BaseTags * tags
Tag and data Storage.
Definition: base.hh:350
gem5::WriteAllocator::allocate
bool allocate() const
Should writes allocate?
Definition: base.hh:1405
gem5::Queue::isFull
bool isFull() const
Definition: queue.hh:150
gem5::BaseCache::CacheStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: base.cc:2207
gem5::Packet::needsWritable
bool needsWritable() const
Definition: packet.hh:588
gem5::BaseCache::ppFill
ProbePointArg< PacketPtr > * ppFill
To probe when a cache fill occurs.
Definition: base.hh:365
gem5::Queue::nextReadyTime
Tick nextReadyTime() const
Definition: queue.hh:229
gem5::MSHR::allocateTarget
void allocateTarget(PacketPtr target, Tick when, Counter order, bool alloc_on_fill)
Add a request to the list of targets.
Definition: mshr.cc:376
gem5::BaseCache::CacheStats::cmd
std::vector< std::unique_ptr< CacheCmdStats > > cmd
Per-command statistics.
Definition: base.hh:1146
gem5::BaseCache::allocateWriteBuffer
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:1191
gem5::prefetch::Base::pfHitInMSHR
void pfHitInMSHR()
Definition: base.hh:409
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::MSHR::needsWritable
bool needsWritable() const
The pending* and post* flags are only valid if inService is true.
Definition: mshr.hh:319
gem5::BaseCache::MemSidePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition: base.cc:2538
gem5::Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:373
gem5::BaseCache::stats
gem5::BaseCache::CacheStats stats
gem5::SuperBlk
A basic compression superblock.
Definition: super_blk.hh:170
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
gem5::BaseCache
A basic cache interface.
Definition: base.hh:95
gem5::QueuedResponsePort
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:61
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::PacketId
uint64_t PacketId
Definition: packet.hh:73
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::BaseCache::calculateTagOnlyLatency
Cycles calculateTagOnlyLatency(const uint32_t delay, const Cycles lookup_lat) const
Calculate latency of accesses that only touch the tag array.
Definition: base.cc:1111
gem5::MemCmd::UpgradeResp
@ UpgradeResp
Definition: packet.hh:104
gem5::BaseCache::sendMSHRQueuePacket
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: base.cc:1791
gem5::MSHR::isPendingModified
bool isPendingModified() const
Definition: mshr.hh:326
gem5::BaseCache::writecleanBlk
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition: base.cc:1667
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::BaseCache::blkSize
const unsigned blkSize
Block size of this cache.
Definition: base.hh:888
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::Queue::getNext
Entry * getNext() const
Returns the WriteQueueEntry at the head of the readyList.
Definition: queue.hh:221
gem5::BaseCache::regenerateBlkAddr
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition: base.cc:182
gem5::ResponsePort::isSnooping
bool isSnooping() const
Find out if the peer request port is snooping or not.
Definition: port.hh:291
gem5::BaseCache::ppHit
ProbePointArg< PacketPtr > * ppHit
To probe when a cache hit occurs.
Definition: base.hh:359
gem5::QueueEntry::isSecure
bool isSecure
True if the entry targets the secure memory space.
Definition: queue_entry.hh:122
gem5::BaseCache::CacheResponsePort::clearBlocked
void clearBlocked()
Return to normal operation and accept new requests.
Definition: base.cc:160
gem5::BaseCache::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:559
gem5::System::getRequestorName
std::string getRequestorName(RequestorID requestor_id)
Get the name of an object for a given request id.
Definition: system.cc:584
gem5::BaseCache::CpuSidePort::recvTimingSnoopResp
virtual bool recvTimingSnoopResp(PacketPtr pkt) override
Receive a timing snoop response from the peer.
Definition: base.cc:2447
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::BaseTags::regenerateBlkAddr
virtual Addr regenerateBlkAddr(const CacheBlk *blk) const =0
Regenerate the block address.
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::statistics::reset
void reset()
Definition: statistics.cc:310
gem5::BaseTags::accessBlock
virtual CacheBlk * accessBlock(const PacketPtr pkt, Cycles &lat)=0
Access block and update replacement data.
gem5::CacheBlk::setCoherenceBits
void setCoherenceBits(unsigned bits)
Sets the corresponding coherence bits.
Definition: cache_blk.hh:220
gem5::BaseCache::CacheStats::writebacks
statistics::Vector writebacks
Number of blocks written back per thread.
Definition: base.hh:1097
gem5::CompressedTags
A CompressedTags cache tag store.
Definition: compressed_tags.hh:70
gem5::compression::Base::setSizeBits
static void setSizeBits(CacheBlk *blk, const std::size_t size_bits)
Set the size of the compressed block, in bits.
Definition: base.cc:225
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:609
gem5::MemCmd::WritebackClean
@ WritebackClean
Definition: packet.hh:93
gem5::BaseCache::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: base.cc:192
gem5::BaseCache::CpuSidePort::CpuSidePort
CpuSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label)
Definition: base.cc:2526
gem5::Request::wbRequestorId
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition: request.hh:257
gem5::CacheBlk::print
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition: cache_blk.hh:364
gem5::Queue::findMatch
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition: queue.hh:168
gem5::Packet::clearBlockCached
void clearBlockCached()
Definition: packet.hh:748
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::Port::isConnected
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:133
gem5::BaseCache::CacheStats::dataExpansions
statistics::Scalar dataExpansions
Number of data expansions.
Definition: base.hh:1137
gem5::BaseCache::CacheResponsePort::mustSendRetry
bool mustSendRetry
Definition: base.hh:297
compiler.hh
gem5::Packet::id
const PacketId id
Definition: packet.hh:363
gem5::MSHR::isWholeLineWrite
bool isWholeLineWrite() const
Check if this MSHR contains only compatible writes, and if they span the entire cache line.
Definition: mshr.hh:390
gem5::BaseCache::unserialize
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: base.cc:1938
gem5::BaseCache::allocOnFill
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition: base.hh:441
gem5::BaseCache::incHitCount
void incHitCount(PacketPtr pkt)
Definition: base.hh:1307
gem5::Queue::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Definition: queue.hh:186
gem5::Packet::pushLabel
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1420
gem5::Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:361
gem5::ArmISA::c
Bitfield< 29 > c
Definition: misc_types.hh:53
gem5::Packet::popLabel
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1430
gem5::BaseCache::Blocked_NoMSHRs
@ Blocked_NoMSHRs
Definition: base.hh:113
gem5::BaseCache::CpuSidePort::recvFunctional
virtual void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition: base.cc:2505
gem5::Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:597
gem5::Packet::writeData
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1281
gem5::MSHR::wasWholeLineWrite
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition: mshr.hh:124
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::BaseCache::doWritebacksAtomic
virtual void doWritebacksAtomic(PacketList &writebacks)=0
Send writebacks down the memory hierarchy in atomic mode.
gem5::prefetch::Base::pfHitInWB
void pfHitInWB()
Definition: base.hh:415
name
const std::string & name()
Definition: trace.cc:49
gem5::prefetch::Base::pfHitInCache
void pfHitInCache()
Definition: base.hh:403
SERIALIZE_SCALAR
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:568
gem5::BaseCache::tempBlockWriteback
PacketPtr tempBlockWriteback
Writebacks from the tempBlock, resulting on the response path in atomic mode, must happen after the c...
Definition: base.hh:684
gem5::CacheBlk::DirtyBit
@ DirtyBit
dirty (modified)
Definition: cache_blk.hh:87
gem5::statistics::Group::regStats
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:69
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::BaseCache::recvTimingReq
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:350
gem5::ResponsePort::sendRangeChange
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:296
gem5::MSHRQueue::markPending
void markPending(MSHR *mshr)
Mark an in service entry as pending, used to resend a request.
Definition: mshr_queue.cc:123
gem5::BaseCache::MemSidePort::recvTimingSnoopReq
virtual void recvTimingSnoopReq(PacketPtr pkt)
Receive a timing snoop request from the peer.
Definition: base.cc:2546
gem5::EventManager::deschedule
void deschedule(Event &event)
Definition: eventq.hh:1028
gem5::prefetch::Base::nextPrefetchReadyTime
virtual Tick nextPrefetchReadyTime() const =0
gem5::Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:324
gem5::MemCmd::InvalidateResp
@ InvalidateResp
Definition: packet.hh:138
gem5::Cache
A coherent cache that can be arranged in flexible topologies.
Definition: cache.hh:67
gem5::BaseCache::MemSidePort::recvAtomicSnoop
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition: base.cc:2556
gem5::WriteQueueEntry
Write queue entry.
Definition: write_queue_entry.hh:67
gem5::BaseCache::schedMemSideSendEvent
void schedMemSideSendEvent(Tick time)
Schedule a send event for the memory-side port.
Definition: base.hh:1274
gem5::ProbePointArg
ProbePointArg generates a point for the class of Arg.
Definition: thermal_domain.hh:54
gem5::CacheBlk::data
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition: cache_blk.hh:103
gem5::BaseCache::maintainClusivity
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition: base.cc:1423
gem5::CacheBlk::getWhenReady
Tick getWhenReady() const
Get tick at which block's data will be available for access.
Definition: cache_blk.hh:265
gem5::MSHRQueue::deallocate
void deallocate(MSHR *mshr) override
Deallocate a MSHR and its targets.
Definition: mshr_queue.cc:83
gem5::Packet::isClean
bool isClean() const
Definition: packet.hh:600
gem5::BaseCache::isReadOnly
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:943
gem5::BaseCache::CacheReqPacketQueue::sendDeferredPacket
virtual void sendDeferredPacket()
Override the normal sendDeferredPacket and do not only consider the transmit list (used for responses...
Definition: base.cc:2577
gem5::Packet::setHasSharers
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition: packet.hh:672
gem5::BaseCache::noTargetMSHR
MSHR * noTargetMSHR
Pointer to the MSHR that has no targets.
Definition: base.hh:974
gem5::SimObject::getProbeManager
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:120
gem5::BaseTags::forEachBlk
virtual void forEachBlk(std::function< void(CacheBlk &)> visitor)=0
Visit each block in the tags and apply a visitor.
gem5::BaseCache::clusivity
const enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: base.hh:935
warn_if
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:273
super_blk.hh
Copyright (c) 2018 Inria All rights reserved.
gem5::ResponsePort::owner
SimObject & owner
Definition: port.hh:279
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::BaseTags::insertBlock
virtual void insertBlock(const PacketPtr pkt, CacheBlk *blk)
Insert the new block into the cache and update stats.
Definition: base.cc:102
gem5::BaseCache::memInvalidate
virtual void memInvalidate() override
Invalidates all blocks in the cache.
Definition: base.cc:1719
gem5::BaseCache::CacheCmdStats::CacheCmdStats
CacheCmdStats(BaseCache &c, const std::string &name)
Definition: base.cc:1950
gem5::Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1049
gem5::BaseTags::anyBlk
virtual bool anyBlk(std::function< bool(CacheBlk &)> visitor)=0
Find if any of the blocks satisfies a condition.
gem5::BaseCache::regProbePoints
void regProbePoints() override
Registers probes.
Definition: base.cc:2432
gem5::Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1326
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::BaseCache::isDirty
bool isDirty() const
Determine if there are any dirty blocks in the cache.
Definition: base.cc:1725
gem5::BaseCache::CacheStats::cmdStats
CacheCmdStats & cmdStats(const PacketPtr p)
Definition: base.hh:1048
gem5::CompressionBlk
A superblock is composed of sub-blocks, and each sub-block has information regarding its superblock a...
Definition: super_blk.hh:51
gem5::BaseCache::allocateBlock
CacheBlk * allocateBlock(const PacketPtr pkt, PacketList &writebacks)
Allocate a new block and perform any necessary writebacks.
Definition: base.cc:1531
gem5::BaseCache::CacheStats::dataContractions
statistics::Scalar dataContractions
Number of data contractions (blocks that had their compression factor improved).
Definition: base.hh:1143
gem5::compression::Base::compress
virtual std::unique_ptr< CompressionData > compress(const std::vector< Chunk > &chunks, Cycles &comp_lat, Cycles &decomp_lat)=0
Apply the compression process to the cache line.
gem5::MSHR::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: mshr.hh:430
gem5::Packet::writeDataToBlock
void writeDataToBlock(uint8_t *blk_data, int blkSize) const
Copy data from the packet to the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1305
gem5::MSHR::isCleaning
bool isCleaning() const
Definition: mshr.hh:321
gem5::BaseCache::fillLatency
const Cycles fillLatency
The latency to fill a cache block.
Definition: base.hh:910
gem5::Packet::makeResponse
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:1031
gem5::BaseCache::handleTimingReqHit
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition: base.cc:224
gem5::BaseCache::evictBlock
virtual PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
gem5::Clocked::tick
Tick tick
Definition: clocked_object.hh:68
gem5::BaseCache::addrRanges
const AddrRangeList addrRanges
The address range to which the cache responds on the CPU side.
Definition: base.hh:982
gem5::BaseCache::system
System * system
System we are currently operating in.
Definition: base.hh:986
gem5::WriteAllocator::delay
bool delay(Addr blk_addr)
Access whether we need to delay the current write.
Definition: base.hh:1427
gem5::BaseCache::mshrQueue
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:344
gem5::BaseCache::memSidePort
MemSidePort memSidePort
Definition: base.hh:339
gem5::CacheBlk::wasPrefetched
bool wasPrefetched() const
Check if this block was the result of a hardware prefetch, yet to be touched.
Definition: cache_blk.hh:249
gem5::BaseCache::handleUncacheableWriteResp
void handleUncacheableWriteResp(PacketPtr pkt)
Handling the special case of uncacheable write responses to make recvTimingResp less cluttered.
Definition: base.cc:408
gem5::BaseCache::writebackClean
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: base.hh:675
gem5::QueuedResponsePort::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:98
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::CompressionBlk::checkExpansionContraction
OverwriteType checkExpansionContraction(const std::size_t size) const
Determines if changing the size of the block will cause a data expansion (new size is bigger) or cont...
Definition: super_blk.cc:148
gem5::CacheBlk::trackLoadLocked
void trackLoadLocked(PacketPtr pkt)
Track the fact that a local locked was issued to the block.
Definition: cache_blk.hh:327
gem5::TaggedEntry::isSecure
bool isSecure() const
Check if this block holds data from the secure memory space.
Definition: tagged_entry.hh:64
gem5::CacheBlk::isSet
bool isSet(unsigned bits) const
Checks the given coherence bits are set.
Definition: cache_blk.hh:239
gem5::MSHR::promoteReadable
void promoteReadable()
Promotes deferred targets that do not require writable.
Definition: mshr.cc:643
gem5::BaseCache::CacheCmdStats::mshrUncacheableLatency
statistics::Vector mshrUncacheableLatency
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:1033
logging.hh
gem5::QueueEntry
A queue entry base class, to be used by both the MSHRs and write-queue entries.
Definition: queue_entry.hh:62
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::BaseCache::CacheResponsePort::blocked
bool blocked
Definition: base.hh:295
gem5::TempCacheBlk::insert
void insert(const Addr addr, const bool is_secure) override
Insert the block by assigning it a tag and marking it valid.
Definition: cache_blk.hh:531
gem5::BaseCache::invalidateVisitor
void invalidateVisitor(CacheBlk &blk)
Cache block visitor that invalidates all blocks in the cache.
Definition: base.cc:1761
gem5::Packet::fromCache
bool fromCache() const
Definition: packet.hh:601
gem5::BaseCache::writebackTempBlockAtomic
void writebackTempBlockAtomic()
Send the outstanding tempBlock writeback.
Definition: base.hh:691
gem5::BaseCache::getNextQueueEntry
QueueEntry * getNextQueueEntry()
Return the next queue entry to service, either a pending miss from the MSHR queue,...
Definition: base.cc:774
gem5::Packet::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr other)
Check a functional request against a memory value stored in another packet (i.e.
Definition: packet.hh:1358
gem5::QueuedRequestPort::trySatisfyFunctional
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:163
gem5::BaseCache::serviceMSHRTargets
virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)=0
Service non-deferred MSHR targets using the received response.
gem5::BaseCache::lookupLatency
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:894
SUM_DEMAND
#define SUM_DEMAND(s)
gem5::WriteQueueEntry::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: write_queue_entry.hh:142
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::prefetch::Base::incrDemandMhsrMisses
void incrDemandMhsrMisses()
Definition: base.hh:397
gem5::CheckpointOut
std::ostream CheckpointOut
Definition: serialize.hh:66
gem5::BaseCache::allocateMissBuffer
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:1173
gem5::WriteAllocator::coalesce
bool coalesce() const
Should writes be coalesced? This is true if the mode is set to NO_ALLOCATE.
Definition: base.hh:1396
gem5::CacheBlkPrintWrapper
Simple class to provide virtual print() method on cache blocks without allocating a vtable pointer fo...
Definition: cache_blk.hh:554
gem5::CompressionBlk::DATA_CONTRACTION
@ DATA_CONTRACTION
New data contents are considered smaller than previous contents.
Definition: super_blk.hh:77
gem5::BaseTags::findBlock
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition: base.cc:80
gem5::Packet::isExpressSnoop
bool isExpressSnoop() const
Definition: packet.hh:689
gem5::BaseCache::doWritebacks
virtual void doWritebacks(PacketList &writebacks, Tick forward_time)=0
Insert writebacks into the write buffer.
gem5::BaseCache::setBlocked
void setBlocked(BlockedCause cause)
Marks the access path of the cache as blocked for the given cause.
Definition: base.hh:1236
gem5::BaseTags::invalidate
virtual void invalidate(CacheBlk *blk)
This function updates the tags when a block is invalidated.
Definition: base.hh:254
gem5_assert
#define gem5_assert(cond,...)
The assert macro will function like a normal assert, but will use panic instead of straight abort().
Definition: logging.hh:318
gem5::Clocked::ticksToCycles
Cycles ticksToCycles(Tick t) const
Definition: clocked_object.hh:222
std::list
STL list class.
Definition: stl.hh:51
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::BaseCache::writebackVisitor
void writebackVisitor(CacheBlk &blk)
Cache block visitor that writes back dirty cache blocks using functional writes.
Definition: base.cc:1738
gem5::BaseCache::CacheCmdStats
Definition: base.hh:988
gem5::BaseCache::numTarget
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:925
gem5::BaseCache::createMissPacket
virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const =0
Create an appropriate downstream bus request packet.
gem5::BaseTags::extractBlkOffset
int extractBlkOffset(Addr addr) const
Calculate the block offset of an address.
Definition: base.hh:225
gem5::CompressionBlk::OverwriteType
OverwriteType
When an overwrite happens, the data size may change an not fit in its current container any longer.
Definition: super_blk.hh:74
gem5::CompressionBlk::setDecompressionLatency
void setDecompressionLatency(const Cycles lat)
Set number of cycles needed to decompress this block.
Definition: super_blk.cc:135
gem5::QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:93
gem5::compression::Base::setDecompressionLatency
static void setDecompressionLatency(CacheBlk *blk, const Cycles lat)
Set the decompression latency of compressed block.
Definition: base.cc:215
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5::BaseCache::functionalAccess
virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side)
Performs the access specified by the request.
Definition: base.cc:639
gem5::prefetch::Base::getPacket
virtual PacketPtr getPacket()=0
gem5::BaseCache::CacheResponsePort::sendRetryEvent
EventFunctionWrapper sendRetryEvent
Definition: base.hh:303
gem5::BaseCache::handleEvictions
bool handleEvictions(std::vector< CacheBlk * > &evict_blks, PacketList &writebacks)
Try to evict the given blocks.
Definition: base.cc:865
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::QueueEntry::sendPacket
virtual bool sendPacket(BaseCache &cache)=0
Send this queue entry as a downstream packet, with the exact behaviour depending on the specific entr...
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
gem5::BaseCache::forwardLatency
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:907
gem5::BaseCache::nextQueueReadyTime
Tick nextQueueReadyTime() const
Find next request ready time from among possible sources.
Definition: base.cc:1774
gem5::MSHR::getTarget
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition: mshr.hh:457
gem5::BaseCache::CpuSidePort::getAddrRanges
virtual AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: base.cc:2519
gem5::QueueEntry::inService
bool inService
True if the entry has been sent downstream.
Definition: queue_entry.hh:110
gem5::BaseCache::CpuSidePort::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition: base.cc:2494
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
gem5::BaseTags::findVictim
virtual CacheBlk * findVictim(Addr addr, const bool is_secure, const std::size_t size, std::vector< CacheBlk * > &evict_blks)=0
Find replacement victim based on address.
gem5::BaseCache::handleFill
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition: base.cc:1435
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::Named::_name
const std::string _name
Definition: named.hh:41
gem5::BaseCache::CacheStats::CacheStats
CacheStats(BaseCache &c)
Definition: base.cc:2123
gem5::Queue::findPending
Entry * findPending(const QueueEntry *entry) const
Find any pending requests that overlap the given request of a different queue.
Definition: queue.hh:207
gem5::BaseCache::calculateAccessLatency
Cycles calculateAccessLatency(const CacheBlk *blk, const uint32_t delay, const Cycles lookup_lat) const
Calculate access latency in ticks given a tag lookup latency, and whether access was a hit or miss.
Definition: base.cc:1120
gem5::Packet::hasRespData
bool hasRespData() const
Definition: packet.hh:604
gem5::QueueEntry::getTarget
virtual Target * getTarget()=0
Returns a pointer to the first target.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
gem5::BaseCache::writebackBlk
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition: base.cc:1617
gem5::BaseCache::CpuSidePort::tryTiming
virtual bool tryTiming(PacketPtr pkt) override
Availability request from the peer.
Definition: base.cc:2461
gem5::TaggedEntry::isValid
virtual bool isValid() const
Checks if the entry is valid.
Definition: tagged_entry.hh:57
gem5::WriteAllocator::resetDelay
void resetDelay(Addr blk_addr)
Clear delay counter for the input block.
Definition: base.hh:1441
gem5::Packet::isEviction
bool isEviction() const
Definition: packet.hh:599
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::BaseCache::satisfyRequest
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: base.cc:1013
gem5::MemCmd::HardPFResp
@ HardPFResp
Definition: packet.hh:100
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::Packet::isDemand
bool isDemand() const
Definition: packet.hh:584
gem5::CacheBlk::ReadableBit
@ ReadableBit
Read permission.
Definition: cache_blk.hh:85
gem5::BaseCache::responseLatency
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:917
gem5::BaseCache::compressor
compression::Base * compressor
Compression method being used.
Definition: base.hh:353

Generated on Tue Feb 8 2022 11:47:02 for gem5 by doxygen 1.8.17