gem5  v21.2.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
cache.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2002-2005 The Regents of The University of Michigan
15  * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
47 #include "mem/cache/cache.hh"
48 
49 #include <cassert>
50 
51 #include "base/compiler.hh"
52 #include "base/logging.hh"
53 #include "base/trace.hh"
54 #include "base/types.hh"
55 #include "debug/Cache.hh"
56 #include "debug/CacheTags.hh"
57 #include "debug/CacheVerbose.hh"
58 #include "enums/Clusivity.hh"
59 #include "mem/cache/cache_blk.hh"
60 #include "mem/cache/mshr.hh"
61 #include "mem/cache/tags/base.hh"
63 #include "mem/request.hh"
64 #include "params/Cache.hh"
65 
66 namespace gem5
67 {
68 
70  : BaseCache(p, p.system->cacheLineSize()),
71  doFastWrites(true)
72 {
73  assert(p.tags);
74  assert(p.replacement_policy);
75 }
76 
77 void
79  bool deferred_response, bool pending_downgrade)
80 {
81  BaseCache::satisfyRequest(pkt, blk);
82 
83  if (pkt->isRead()) {
84  // determine if this read is from a (coherent) cache or not
85  if (pkt->fromCache()) {
86  assert(pkt->getSize() == blkSize);
87  // special handling for coherent block requests from
88  // upper-level caches
89  if (pkt->needsWritable()) {
90  // sanity check
91  assert(pkt->cmd == MemCmd::ReadExReq ||
93  assert(!pkt->hasSharers());
94 
95  // if we have a dirty copy, make sure the recipient
96  // keeps it marked dirty (in the modified state)
97  if (blk->isSet(CacheBlk::DirtyBit)) {
98  pkt->setCacheResponding();
100  }
101  } else if (blk->isSet(CacheBlk::WritableBit) &&
102  !pending_downgrade && !pkt->hasSharers() &&
103  pkt->cmd != MemCmd::ReadCleanReq) {
104  // we can give the requestor a writable copy on a read
105  // request if:
106  // - we have a writable copy at this level (& below)
107  // - we don't have a pending snoop from below
108  // signaling another read request
109  // - no other cache above has a copy (otherwise it
110  // would have set hasSharers flag when
111  // snooping the packet)
112  // - the read has explicitly asked for a clean
113  // copy of the line
114  if (blk->isSet(CacheBlk::DirtyBit)) {
115  // special considerations if we're owner:
116  if (!deferred_response) {
117  // respond with the line in Modified state
118  // (cacheResponding set, hasSharers not set)
119  pkt->setCacheResponding();
120 
121  // if this cache is mostly inclusive, we
122  // keep the block in the Exclusive state,
123  // and pass it upwards as Modified
124  // (writable and dirty), hence we have
125  // multiple caches, all on the same path
126  // towards memory, all considering the
127  // same block writable, but only one
128  // considering it Modified
129 
130  // we get away with multiple caches (on
131  // the same path to memory) considering
132  // the block writeable as we always enter
133  // the cache hierarchy through a cache,
134  // and first snoop upwards in all other
135  // branches
137  } else {
138  // if we're responding after our own miss,
139  // there's a window where the recipient didn't
140  // know it was getting ownership and may not
141  // have responded to snoops correctly, so we
142  // have to respond with a shared line
143  pkt->setHasSharers();
144  }
145  }
146  } else {
147  // otherwise only respond with a shared copy
148  pkt->setHasSharers();
149  }
150  }
151  }
152 }
153 
155 //
156 // Access path: requests coming in from the CPU side
157 //
159 
160 bool
162  PacketList &writebacks)
163 {
164 
165  if (pkt->req->isUncacheable()) {
166  assert(pkt->isRequest());
167 
168  gem5_assert(!(isReadOnly && pkt->isWrite()),
169  "Should never see a write in a read-only cache %s\n",
170  name());
171 
172  DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
173 
174  // flush and invalidate any existing block
175  CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
176  if (old_blk && old_blk->isValid()) {
177  BaseCache::evictBlock(old_blk, writebacks);
178  }
179 
180  blk = nullptr;
181  // lookupLatency is the latency in case the request is uncacheable.
182  lat = lookupLatency;
183  return false;
184  }
185 
186  return BaseCache::access(pkt, blk, lat, writebacks);
187 }
188 
189 void
190 Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
191 {
192  while (!writebacks.empty()) {
193  PacketPtr wbPkt = writebacks.front();
194  // We use forwardLatency here because we are copying writebacks to
195  // write buffer.
196 
197  // Call isCachedAbove for Writebacks, CleanEvicts and
198  // WriteCleans to discover if the block is cached above.
199  if (isCachedAbove(wbPkt)) {
200  if (wbPkt->cmd == MemCmd::CleanEvict) {
201  // Delete CleanEvict because cached copies exist above. The
202  // packet destructor will delete the request object because
203  // this is a non-snoop request packet which does not require a
204  // response.
205  delete wbPkt;
206  } else if (wbPkt->cmd == MemCmd::WritebackClean) {
207  // clean writeback, do not send since the block is
208  // still cached above
209  assert(writebackClean);
210  delete wbPkt;
211  } else {
212  assert(wbPkt->cmd == MemCmd::WritebackDirty ||
213  wbPkt->cmd == MemCmd::WriteClean);
214  // Set BLOCK_CACHED flag in Writeback and send below, so that
215  // the Writeback does not reset the bit corresponding to this
216  // address in the snoop filter below.
217  wbPkt->setBlockCached();
218  allocateWriteBuffer(wbPkt, forward_time);
219  }
220  } else {
221  // If the block is not cached above, send packet below. Both
222  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
223  // reset the bit corresponding to this address in the snoop filter
224  // below.
225  allocateWriteBuffer(wbPkt, forward_time);
226  }
227  writebacks.pop_front();
228  }
229 }
230 
231 void
233 {
234  while (!writebacks.empty()) {
235  PacketPtr wbPkt = writebacks.front();
236  // Call isCachedAbove for both Writebacks and CleanEvicts. If
237  // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
238  // and discard CleanEvicts.
239  if (isCachedAbove(wbPkt, false)) {
240  if (wbPkt->cmd == MemCmd::WritebackDirty ||
241  wbPkt->cmd == MemCmd::WriteClean) {
242  // Set BLOCK_CACHED flag in Writeback and send below,
243  // so that the Writeback does not reset the bit
244  // corresponding to this address in the snoop filter
245  // below. We can discard CleanEvicts because cached
246  // copies exist above. Atomic mode isCachedAbove
247  // modifies packet to set BLOCK_CACHED flag
248  memSidePort.sendAtomic(wbPkt);
249  }
250  } else {
251  // If the block is not cached above, send packet below. Both
252  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
253  // reset the bit corresponding to this address in the snoop filter
254  // below.
255  memSidePort.sendAtomic(wbPkt);
256  }
257  writebacks.pop_front();
258  // In case of CleanEvicts, the packet destructor will delete the
259  // request object because this is a non-snoop request packet which
260  // does not require a response.
261  delete wbPkt;
262  }
263 }
264 
265 
266 void
268 {
269  DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
270 
271  // determine if the response is from a snoop request we created
272  // (in which case it should be in the outstandingSnoop), or if we
273  // merely forwarded someone else's snoop request
274  const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
275  outstandingSnoop.end();
276 
277  if (!forwardAsSnoop) {
278  // the packet came from this cache, so sink it here and do not
279  // forward it
280  assert(pkt->cmd == MemCmd::HardPFResp);
281 
282  outstandingSnoop.erase(pkt->req);
283 
284  DPRINTF(Cache, "Got prefetch response from above for addr "
285  "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
286  recvTimingResp(pkt);
287  return;
288  }
289 
290  // forwardLatency is set here because there is a response from an
291  // upper level cache.
292  // To pay the delay that occurs if the packet comes from the bus,
293  // we charge also headerDelay.
294  Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
295  // Reset the timing of the packet.
296  pkt->headerDelay = pkt->payloadDelay = 0;
297  memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
298 }
299 
300 void
302 {
303  // Cache line clearing instructions
304  if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
305  (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
306  !pkt->isMaskedWrite()) {
307  pkt->cmd = MemCmd::WriteLineReq;
308  DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
309  }
310 }
311 
312 void
314 {
315  // should never be satisfying an uncacheable access as we
316  // flush and invalidate any existing block as part of the
317  // lookup
318  assert(!pkt->req->isUncacheable());
319 
320  BaseCache::handleTimingReqHit(pkt, blk, request_time);
321 }
322 
323 void
325  Tick request_time)
326 {
327  if (pkt->req->isUncacheable()) {
328  // ignore any existing MSHR if we are dealing with an
329  // uncacheable request
330 
331  // should have flushed and have no valid block
332  assert(!blk || !blk->isValid());
333 
334  stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++;
335 
336  if (pkt->isWrite()) {
337  allocateWriteBuffer(pkt, forward_time);
338  } else {
339  assert(pkt->isRead());
340 
341  // uncacheable accesses always allocate a new MSHR
342 
343  // Here we are using forward_time, modelling the latency of
344  // a miss (outbound) just as forwardLatency, neglecting the
345  // lookupLatency component.
346  allocateMissBuffer(pkt, forward_time);
347  }
348 
349  return;
350  }
351 
352  Addr blk_addr = pkt->getBlockAddr(blkSize);
353 
354  MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
355 
356  // Software prefetch handling:
357  // To keep the core from waiting on data it won't look at
358  // anyway, send back a response with dummy data. Miss handling
359  // will continue asynchronously. Unfortunately, the core will
360  // insist upon freeing original Packet/Request, so we have to
361  // create a new pair with a different lifecycle. Note that this
362  // processing happens before any MSHR munging on the behalf of
363  // this request because this new Request will be the one stored
364  // into the MSHRs, not the original.
365  if (pkt->cmd.isSWPrefetch()) {
366  assert(pkt->needsResponse());
367  assert(pkt->req->hasPaddr());
368  assert(!pkt->req->isUncacheable());
369 
370  // There's no reason to add a prefetch as an additional target
371  // to an existing MSHR. If an outstanding request is already
372  // in progress, there is nothing for the prefetch to do.
373  // If this is the case, we don't even create a request at all.
374  PacketPtr pf = nullptr;
375 
376  if (!mshr) {
377  // copy the request and create a new SoftPFReq packet
378  RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
379  pkt->req->getSize(),
380  pkt->req->getFlags(),
381  pkt->req->requestorId());
382  pf = new Packet(req, pkt->cmd);
383  pf->allocate();
384  assert(pf->matchAddr(pkt));
385  assert(pf->getSize() == pkt->getSize());
386  }
387 
388  pkt->makeTimingResponse();
389 
390  // request_time is used here, taking into account lat and the delay
391  // charged if the packet comes from the xbar.
392  cpuSidePort.schedTimingResp(pkt, request_time);
393 
394  // If an outstanding request is in progress (we found an
395  // MSHR) this is set to null
396  pkt = pf;
397  }
398 
399  BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
400 }
401 
402 void
404 {
405  DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
406 
408 
409  if (pkt->cacheResponding()) {
410  // a cache above us (but not where the packet came from) is
411  // responding to the request, in other words it has the line
412  // in Modified or Owned state
413  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
414  pkt->print());
415 
416  // if the packet needs the block to be writable, and the cache
417  // that has promised to respond (setting the cache responding
418  // flag) is not providing writable (it is in Owned rather than
419  // the Modified state), we know that there may be other Shared
420  // copies in the system; go out and invalidate them all
421  assert(pkt->needsWritable() && !pkt->responderHadWritable());
422 
423  // an upstream cache that had the line in Owned state
424  // (dirty, but not writable), is responding and thus
425  // transferring the dirty line from one branch of the
426  // cache hierarchy to another
427 
428  // send out an express snoop and invalidate all other
429  // copies (snooping a packet that needs writable is the
430  // same as an invalidation), thus turning the Owned line
431  // into a Modified line, note that we don't invalidate the
432  // block in the current cache or any other cache on the
433  // path to memory
434 
435  // create a downstream express snoop with cleared packet
436  // flags, there is no need to allocate any data as the
437  // packet is merely used to co-ordinate state transitions
438  Packet *snoop_pkt = new Packet(pkt, true, false);
439 
440  // also reset the bus time that the original packet has
441  // not yet paid for
442  snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
443 
444  // make this an instantaneous express snoop, and let the
445  // other caches in the system know that the another cache
446  // is responding, because we have found the authorative
447  // copy (Modified or Owned) that will supply the right
448  // data
449  snoop_pkt->setExpressSnoop();
450  snoop_pkt->setCacheResponding();
451 
452  // this express snoop travels towards the memory, and at
453  // every crossbar it is snooped upwards thus reaching
454  // every cache in the system
455  [[maybe_unused]] bool success = memSidePort.sendTimingReq(snoop_pkt);
456  // express snoops always succeed
457  assert(success);
458 
459  // main memory will delete the snoop packet
460 
461  // queue for deletion, as opposed to immediate deletion, as
462  // the sending cache is still relying on the packet
463  pendingDelete.reset(pkt);
464 
465  // no need to take any further action in this particular cache
466  // as an upstram cache has already committed to responding,
467  // and we have already sent out any express snoops in the
468  // section above to ensure all other copies in the system are
469  // invalidated
470  return;
471  }
472 
474 }
475 
476 PacketPtr
478  bool needsWritable,
479  bool is_whole_line_write) const
480 {
481  // should never see evictions here
482  assert(!cpu_pkt->isEviction());
483 
484  bool blkValid = blk && blk->isValid();
485 
486  if (cpu_pkt->req->isUncacheable() ||
487  (!blkValid && cpu_pkt->isUpgrade()) ||
488  cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
489  // uncacheable requests and upgrades from upper-level caches
490  // that missed completely just go through as is
491  return nullptr;
492  }
493 
494  assert(cpu_pkt->needsResponse());
495 
496  MemCmd cmd;
497  // @TODO make useUpgrades a parameter.
498  // Note that ownership protocols require upgrade, otherwise a
499  // write miss on a shared owned block will generate a ReadExcl,
500  // which will clobber the owned copy.
501  const bool useUpgrades = true;
502  assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
503  if (is_whole_line_write) {
504  assert(!blkValid || !blk->isSet(CacheBlk::WritableBit));
505  // forward as invalidate to all other caches, this gives us
506  // the line in Exclusive state, and invalidates all other
507  // copies
508  cmd = MemCmd::InvalidateReq;
509  } else if (blkValid && useUpgrades) {
510  // only reason to be here is that blk is read only and we need
511  // it to be writable
512  assert(needsWritable);
513  assert(!blk->isSet(CacheBlk::WritableBit));
514  cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
515  } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
516  cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
517  // Even though this SC will fail, we still need to send out the
518  // request and get the data to supply it to other snoopers in the case
519  // where the determination the StoreCond fails is delayed due to
520  // all caches not being on the same local bus.
522  } else {
523  // block is invalid
524 
525  // If the request does not need a writable there are two cases
526  // where we need to ensure the response will not fetch the
527  // block in dirty state:
528  // * this cache is read only and it does not perform
529  // writebacks,
530  // * this cache is mostly exclusive and will not fill (since
531  // it does not fill it will have to writeback the dirty data
532  // immediately which generates uneccesary writebacks).
533  bool force_clean_rsp = isReadOnly || clusivity == enums::mostly_excl;
534  cmd = needsWritable ? MemCmd::ReadExReq :
535  (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
536  }
537  PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
538 
539  // if there are upstream caches that have already marked the
540  // packet as having sharers (not passing writable), pass that info
541  // downstream
542  if (cpu_pkt->hasSharers() && !needsWritable) {
543  // note that cpu_pkt may have spent a considerable time in the
544  // MSHR queue and that the information could possibly be out
545  // of date, however, there is no harm in conservatively
546  // assuming the block has sharers
547  pkt->setHasSharers();
548  DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
549  __func__, cpu_pkt->print(), pkt->print());
550  }
551 
552  // the packet should be block aligned
553  assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
554 
555  pkt->allocate();
556  DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
557  cpu_pkt->print());
558  return pkt;
559 }
560 
561 
562 Cycles
564  PacketList &writebacks)
565 {
566  // deal with the packets that go through the write path of
567  // the cache, i.e. any evictions and writes
568  if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
569  (pkt->req->isUncacheable() && pkt->isWrite())) {
570  Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
571 
572  // at this point, if the request was an uncacheable write
573  // request, it has been satisfied by a memory below and the
574  // packet carries the response back
575  assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
576  pkt->isResponse());
577 
578  return latency;
579  }
580 
581  // only misses left
582 
583  PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
584  pkt->isWholeLineWrite(blkSize));
585 
586  bool is_forward = (bus_pkt == nullptr);
587 
588  if (is_forward) {
589  // just forwarding the same request to the next level
590  // no local cache operation involved
591  bus_pkt = pkt;
592  }
593 
594  DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
595  bus_pkt->print());
596 
597  const std::string old_state = blk ? blk->print() : "";
598 
599  Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
600 
601  bool is_invalidate = bus_pkt->isInvalidate();
602 
603  // We are now dealing with the response handling
604  DPRINTF(Cache, "%s: Receive response: %s for %s\n", __func__,
605  bus_pkt->print(), old_state);
606 
607  // If packet was a forward, the response (if any) is already
608  // in place in the bus_pkt == pkt structure, so we don't need
609  // to do anything. Otherwise, use the separate bus_pkt to
610  // generate response to pkt and then delete it.
611  if (!is_forward) {
612  if (pkt->needsResponse()) {
613  assert(bus_pkt->isResponse());
614  if (bus_pkt->isError()) {
615  pkt->makeAtomicResponse();
616  pkt->copyError(bus_pkt);
617  } else if (pkt->isWholeLineWrite(blkSize)) {
618  // note the use of pkt, not bus_pkt here.
619 
620  // write-line request to the cache that promoted
621  // the write to a whole line
622  const bool allocate = allocOnFill(pkt->cmd) &&
624  blk = handleFill(bus_pkt, blk, writebacks, allocate);
625  assert(blk != NULL);
626  is_invalidate = false;
627  satisfyRequest(pkt, blk);
628  } else if (bus_pkt->isRead() ||
629  bus_pkt->cmd == MemCmd::UpgradeResp) {
630  // we're updating cache state to allow us to
631  // satisfy the upstream request from the cache
632  blk = handleFill(bus_pkt, blk, writebacks,
633  allocOnFill(pkt->cmd));
634  satisfyRequest(pkt, blk);
635  maintainClusivity(pkt->fromCache(), blk);
636  } else {
637  // we're satisfying the upstream request without
638  // modifying cache state, e.g., a write-through
639  pkt->makeAtomicResponse();
640  }
641  }
642  delete bus_pkt;
643  }
644 
645  if (is_invalidate && blk && blk->isValid()) {
646  invalidateBlock(blk);
647  }
648 
649  return latency;
650 }
651 
652 Tick
654 {
656 
657  // follow the same flow as in recvTimingReq, and check if a cache
658  // above us is responding
659  if (pkt->cacheResponding()) {
660  assert(!pkt->req->isCacheInvalidate());
661  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
662  pkt->print());
663 
664  // if a cache is responding, and it had the line in Owned
665  // rather than Modified state, we need to invalidate any
666  // copies that are not on the same path to memory
667  assert(pkt->needsWritable() && !pkt->responderHadWritable());
668 
669  return memSidePort.sendAtomic(pkt);
670  }
671 
672  return BaseCache::recvAtomic(pkt);
673 }
674 
675 
677 //
678 // Response handling: responses from the memory side
679 //
681 
682 
683 void
685 {
686  QueueEntry::Target *initial_tgt = mshr->getTarget();
687  // First offset for critical word first calculations
688  const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
689 
690  const bool is_error = pkt->isError();
691  // allow invalidation responses originating from write-line
692  // requests to be discarded
693  bool is_invalidate = pkt->isInvalidate() &&
694  !mshr->wasWholeLineWrite;
695 
696  MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
697  for (auto &target: targets) {
698  Packet *tgt_pkt = target.pkt;
699  switch (target.source) {
701  Tick completion_time;
702  // Here we charge on completion_time the delay of the xbar if the
703  // packet comes from it, charged on headerDelay.
704  completion_time = pkt->headerDelay;
705 
706  // Software prefetch handling for cache closest to core
707  if (tgt_pkt->cmd.isSWPrefetch()) {
708  if (tgt_pkt->needsWritable()) {
709  // All other copies of the block were invalidated and we
710  // have an exclusive copy.
711 
712  // The coherence protocol assumes that if we fetched an
713  // exclusive copy of the block, we have the intention to
714  // modify it. Therefore the MSHR for the PrefetchExReq has
715  // been the point of ordering and this cache has commited
716  // to respond to snoops for the block.
717  //
718  // In most cases this is true anyway - a PrefetchExReq
719  // will be followed by a WriteReq. However, if that
720  // doesn't happen, the block is not marked as dirty and
721  // the cache doesn't respond to snoops that has committed
722  // to do so.
723  //
724  // To avoid deadlocks in cases where there is a snoop
725  // between the PrefetchExReq and the expected WriteReq, we
726  // proactively mark the block as Dirty.
727  assert(blk);
729 
730  panic_if(isReadOnly, "Prefetch exclusive requests from "
731  "read-only cache %s\n", name());
732  }
733 
734  // a software prefetch would have already been ack'd
735  // immediately with dummy data so the core would be able to
736  // retire it. This request completes right here, so we
737  // deallocate it.
738  delete tgt_pkt;
739  break; // skip response
740  }
741 
742  // unlike the other packet flows, where data is found in other
743  // caches or memory and brought back, write-line requests always
744  // have the data right away, so the above check for "is fill?"
745  // cannot actually be determined until examining the stored MSHR
746  // state. We "catch up" with that logic here, which is duplicated
747  // from above.
748  if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
749  assert(!is_error);
750  assert(blk);
751  assert(blk->isSet(CacheBlk::WritableBit));
752  }
753 
754  // Here we decide whether we will satisfy the target using
755  // data from the block or from the response. We use the
756  // block data to satisfy the request when the block is
757  // present and valid and in addition the response in not
758  // forwarding data to the cache above (we didn't fill
759  // either); otherwise we use the packet data.
760  if (blk && blk->isValid() &&
761  (!mshr->isForward || !pkt->hasData())) {
762  satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
763 
764  // How many bytes past the first request is this one
765  int transfer_offset =
766  tgt_pkt->getOffset(blkSize) - initial_offset;
767  if (transfer_offset < 0) {
768  transfer_offset += blkSize;
769  }
770 
771  // If not critical word (offset) return payloadDelay.
772  // responseLatency is the latency of the return path
773  // from lower level caches/memory to an upper level cache or
774  // the core.
775  completion_time += clockEdge(responseLatency) +
776  (transfer_offset ? pkt->payloadDelay : 0);
777 
778  assert(!tgt_pkt->req->isUncacheable());
779 
780  assert(tgt_pkt->req->requestorId() < system->maxRequestors());
781  stats.cmdStats(tgt_pkt)
782  .missLatency[tgt_pkt->req->requestorId()] +=
783  completion_time - target.recvTime;
784  } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
785  // failed StoreCond upgrade
786  assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
787  tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
788  tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
789  // responseLatency is the latency of the return path
790  // from lower level caches/memory to an upper level cache or
791  // the core.
792  completion_time += clockEdge(responseLatency) +
793  pkt->payloadDelay;
794  tgt_pkt->req->setExtraData(0);
795  } else {
796  if (is_invalidate && blk && blk->isValid()) {
797  // We are about to send a response to a cache above
798  // that asked for an invalidation; we need to
799  // invalidate our copy immediately as the most
800  // up-to-date copy of the block will now be in the
801  // cache above. It will also prevent this cache from
802  // responding (if the block was previously dirty) to
803  // snoops as they should snoop the caches above where
804  // they will get the response from.
805  invalidateBlock(blk);
806  }
807  // not a cache fill, just forwarding response
808  // responseLatency is the latency of the return path
809  // from lower level cahces/memory to the core.
810  completion_time += clockEdge(responseLatency) +
811  pkt->payloadDelay;
812  if (!is_error) {
813  if (pkt->isRead()) {
814  // sanity check
815  assert(pkt->matchAddr(tgt_pkt));
816  assert(pkt->getSize() >= tgt_pkt->getSize());
817 
818  tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
819  } else {
820  // MSHR targets can read data either from the
821  // block or the response pkt. If we can't get data
822  // from the block (i.e., invalid or has old data)
823  // or the response (did not bring in any data)
824  // then make sure that the target didn't expect
825  // any.
826  assert(!tgt_pkt->hasRespData());
827  }
828  }
829 
830  // this response did not allocate here and therefore
831  // it was not consumed, make sure that any flags are
832  // carried over to cache above
833  tgt_pkt->copyResponderFlags(pkt);
834  }
835  tgt_pkt->makeTimingResponse();
836  // if this packet is an error copy that to the new packet
837  if (is_error)
838  tgt_pkt->copyError(pkt);
839  if (tgt_pkt->cmd == MemCmd::ReadResp &&
840  (is_invalidate || mshr->hasPostInvalidate())) {
841  // If intermediate cache got ReadRespWithInvalidate,
842  // propagate that. Response should not have
843  // isInvalidate() set otherwise.
845  DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
846  tgt_pkt->print());
847  }
848  // Reset the bus additional time as it is now accounted for
849  tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
850  cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
851  break;
852 
854  assert(tgt_pkt->cmd == MemCmd::HardPFReq);
855  if (blk)
856  blk->setPrefetched();
857  delete tgt_pkt;
858  break;
859 
861  // I don't believe that a snoop can be in an error state
862  assert(!is_error);
863  // response to snoop request
864  DPRINTF(Cache, "processing deferred snoop...\n");
865  // If the response is invalidating, a snooping target can
866  // be satisfied if it is also invalidating. If the reponse is, not
867  // only invalidating, but more specifically an InvalidateResp and
868  // the MSHR was created due to an InvalidateReq then a cache above
869  // is waiting to satisfy a WriteLineReq. In this case even an
870  // non-invalidating snoop is added as a target here since this is
871  // the ordering point. When the InvalidateResp reaches this cache,
872  // the snooping target will snoop further the cache above with the
873  // WriteLineReq.
874  assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
875  pkt->req->isCacheMaintenance() ||
876  mshr->hasPostInvalidate());
877  handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
878  break;
879 
880  default:
881  panic("Illegal target->source enum %d\n", target.source);
882  }
883  }
884 
885  maintainClusivity(targets.hasFromCache, blk);
886 
887  if (blk && blk->isValid()) {
888  // an invalidate response stemming from a write line request
889  // should not invalidate the block, so check if the
890  // invalidation should be discarded
891  if (is_invalidate || mshr->hasPostInvalidate()) {
892  invalidateBlock(blk);
893  } else if (mshr->hasPostDowngrade()) {
895  }
896  }
897 }
898 
899 PacketPtr
901 {
903  writebackBlk(blk) : cleanEvictBlk(blk);
904 
905  invalidateBlock(blk);
906 
907  return pkt;
908 }
909 
910 PacketPtr
912 {
913  assert(!writebackClean);
914  assert(blk && blk->isValid() && !blk->isSet(CacheBlk::DirtyBit));
915 
916  // Creating a zero sized write, a message to the snoop filter
917  RequestPtr req = std::make_shared<Request>(
919 
920  if (blk->isSecure())
921  req->setFlags(Request::SECURE);
922 
923  req->taskId(blk->getTaskId());
924 
925  PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
926  pkt->allocate();
927  DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
928 
929  return pkt;
930 }
931 
933 //
934 // Snoop path: requests coming in from the memory side
935 //
937 
938 void
939 Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
940  bool already_copied, bool pending_inval)
941 {
942  // sanity check
943  assert(req_pkt->isRequest());
944  assert(req_pkt->needsResponse());
945 
946  DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
947  // timing-mode snoop responses require a new packet, unless we
948  // already made a copy...
949  PacketPtr pkt = req_pkt;
950  if (!already_copied)
951  // do not clear flags, and allocate space for data if the
952  // packet needs it (the only packets that carry data are read
953  // responses)
954  pkt = new Packet(req_pkt, false, req_pkt->isRead());
955 
956  assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
957  pkt->hasSharers());
958  pkt->makeTimingResponse();
959  if (pkt->isRead()) {
960  pkt->setDataFromBlock(blk_data, blkSize);
961  }
962  if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
963  // Assume we defer a response to a read from a far-away cache
964  // A, then later defer a ReadExcl from a cache B on the same
965  // bus as us. We'll assert cacheResponding in both cases, but
966  // in the latter case cacheResponding will keep the
967  // invalidation from reaching cache A. This special response
968  // tells cache A that it gets the block to satisfy its read,
969  // but must immediately invalidate it.
971  }
972  // Here we consider forward_time, paying for just forward latency and
973  // also charging the delay provided by the xbar.
974  // forward_time is used as send_time in next allocateWriteBuffer().
975  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
976  // Here we reset the timing of the packet.
977  pkt->headerDelay = pkt->payloadDelay = 0;
978  DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
979  pkt->print(), forward_time);
980  memSidePort.schedTimingSnoopResp(pkt, forward_time);
981 }
982 
983 uint32_t
984 Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
985  bool is_deferred, bool pending_inval)
986 {
987  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
988  // deferred snoops can only happen in timing mode
989  assert(!(is_deferred && !is_timing));
990  // pending_inval only makes sense on deferred snoops
991  assert(!(pending_inval && !is_deferred));
992  assert(pkt->isRequest());
993 
994  // the packet may get modified if we or a forwarded snooper
995  // responds in atomic mode, so remember a few things about the
996  // original packet up front
997  bool invalidate = pkt->isInvalidate();
998  [[maybe_unused]] bool needs_writable = pkt->needsWritable();
999 
1000  // at the moment we could get an uncacheable write which does not
1001  // have the invalidate flag, and we need a suitable way of dealing
1002  // with this case
1003  panic_if(invalidate && pkt->req->isUncacheable(),
1004  "%s got an invalidating uncacheable snoop request %s",
1005  name(), pkt->print());
1006 
1007  uint32_t snoop_delay = 0;
1008 
1009  if (forwardSnoops) {
1010  // first propagate snoop upward to see if anyone above us wants to
1011  // handle it. save & restore packet src since it will get
1012  // rewritten to be relative to CPU-side bus (if any)
1013  if (is_timing) {
1014  // copy the packet so that we can clear any flags before
1015  // forwarding it upwards, we also allocate data (passing
1016  // the pointer along in case of static data), in case
1017  // there is a snoop hit in upper levels
1018  Packet snoopPkt(pkt, true, true);
1019  snoopPkt.setExpressSnoop();
1020  // the snoop packet does not need to wait any additional
1021  // time
1022  snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1023  cpuSidePort.sendTimingSnoopReq(&snoopPkt);
1024 
1025  // add the header delay (including crossbar and snoop
1026  // delays) of the upward snoop to the snoop delay for this
1027  // cache
1028  snoop_delay += snoopPkt.headerDelay;
1029 
1030  // If this request is a prefetch or clean evict and an upper level
1031  // signals block present, make sure to propagate the block
1032  // presence to the requestor.
1033  if (snoopPkt.isBlockCached()) {
1034  pkt->setBlockCached();
1035  }
1036  // If the request was satisfied by snooping the cache
1037  // above, mark the original packet as satisfied too.
1038  if (snoopPkt.satisfied()) {
1039  pkt->setSatisfied();
1040  }
1041 
1042  // Copy over flags from the snoop response to make sure we
1043  // inform the final destination
1044  pkt->copyResponderFlags(&snoopPkt);
1045  } else {
1046  bool already_responded = pkt->cacheResponding();
1048  if (!already_responded && pkt->cacheResponding()) {
1049  // cache-to-cache response from some upper cache:
1050  // forward response to original requestor
1051  assert(pkt->isResponse());
1052  }
1053  }
1054  }
1055 
1056  bool respond = false;
1057  bool blk_valid = blk && blk->isValid();
1058  if (pkt->isClean()) {
1059  if (blk_valid && blk->isSet(CacheBlk::DirtyBit)) {
1060  DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1061  __func__, pkt->print(), blk->print());
1062  PacketPtr wb_pkt =
1063  writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1064  PacketList writebacks;
1065  writebacks.push_back(wb_pkt);
1066 
1067  if (is_timing) {
1068  // anything that is merely forwarded pays for the forward
1069  // latency and the delay provided by the crossbar
1070  Tick forward_time = clockEdge(forwardLatency) +
1071  pkt->headerDelay;
1072  doWritebacks(writebacks, forward_time);
1073  } else {
1074  doWritebacksAtomic(writebacks);
1075  }
1076  pkt->setSatisfied();
1077  }
1078  } else if (!blk_valid) {
1079  DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1080  pkt->print());
1081  if (is_deferred) {
1082  // we no longer have the block, and will not respond, but a
1083  // packet was allocated in MSHR::handleSnoop and we have
1084  // to delete it
1085  assert(pkt->needsResponse());
1086 
1087  // we have passed the block to a cache upstream, that
1088  // cache should be responding
1089  assert(pkt->cacheResponding());
1090 
1091  delete pkt;
1092  }
1093  return snoop_delay;
1094  } else {
1095  DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1096  pkt->print(), blk->print());
1097 
1098  // We may end up modifying both the block state and the packet (if
1099  // we respond in atomic mode), so just figure out what to do now
1100  // and then do it later. We respond to all snoops that need
1101  // responses provided we have the block in dirty state. The
1102  // invalidation itself is taken care of below. We don't respond to
1103  // cache maintenance operations as this is done by the destination
1104  // xbar.
1105  respond = blk->isSet(CacheBlk::DirtyBit) && pkt->needsResponse();
1106 
1108  "Should never have a dirty block in a read-only cache %s\n",
1109  name());
1110  }
1111 
1112  // Invalidate any prefetch's from below that would strip write permissions
1113  // MemCmd::HardPFReq is only observed by upstream caches. After missing
1114  // above and in it's own cache, a new MemCmd::ReadReq is created that
1115  // downstream caches observe.
1116  if (pkt->mustCheckAbove()) {
1117  DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1118  "from lower cache\n", pkt->getAddr(), pkt->print());
1119  pkt->setBlockCached();
1120  return snoop_delay;
1121  }
1122 
1123  if (pkt->isRead() && !invalidate) {
1124  // reading without requiring the line in a writable state
1125  assert(!needs_writable);
1126  pkt->setHasSharers();
1127 
1128  // if the requesting packet is uncacheable, retain the line in
1129  // the current state, otherwhise unset the writable flag,
1130  // which means we go from Modified to Owned (and will respond
1131  // below), remain in Owned (and will respond below), from
1132  // Exclusive to Shared, or remain in Shared
1133  if (!pkt->req->isUncacheable()) {
1135  }
1136  DPRINTF(Cache, "new state is %s\n", blk->print());
1137  }
1138 
1139  if (respond) {
1140  // prevent anyone else from responding, cache as well as
1141  // memory, and also prevent any memory from even seeing the
1142  // request
1143  pkt->setCacheResponding();
1144  if (!pkt->isClean() && blk->isSet(CacheBlk::WritableBit)) {
1145  // inform the cache hierarchy that this cache had the line
1146  // in the Modified state so that we avoid unnecessary
1147  // invalidations (see Packet::setResponderHadWritable)
1148  pkt->setResponderHadWritable();
1149 
1150  // in the case of an uncacheable request there is no point
1151  // in setting the responderHadWritable flag, but since the
1152  // recipient does not care there is no harm in doing so
1153  } else {
1154  // if the packet has needsWritable set we invalidate our
1155  // copy below and all other copies will be invalidates
1156  // through express snoops, and if needsWritable is not set
1157  // we already called setHasSharers above
1158  }
1159 
1160  // if we are returning a writable and dirty (Modified) line,
1161  // we should be invalidating the line
1162  panic_if(!invalidate && !pkt->hasSharers(),
1163  "%s is passing a Modified line through %s, "
1164  "but keeping the block", name(), pkt->print());
1165 
1166  if (is_timing) {
1167  doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1168  } else {
1169  pkt->makeAtomicResponse();
1170  // packets such as upgrades do not actually have any data
1171  // payload
1172  if (pkt->hasData())
1173  pkt->setDataFromBlock(blk->data, blkSize);
1174  }
1175 
1176  // When a block is compressed, it must first be decompressed before
1177  // being read, and this increases the snoop delay.
1178  if (compressor && pkt->isRead()) {
1179  snoop_delay += compressor->getDecompressionLatency(blk);
1180  }
1181  }
1182 
1183  if (!respond && is_deferred) {
1184  assert(pkt->needsResponse());
1185  delete pkt;
1186  }
1187 
1188  // Do this last in case it deallocates block data or something
1189  // like that
1190  if (blk_valid && invalidate) {
1191  invalidateBlock(blk);
1192  DPRINTF(Cache, "new state is %s\n", blk->print());
1193  }
1194 
1195  return snoop_delay;
1196 }
1197 
1198 
1199 void
1201 {
1202  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1203 
1204  // no need to snoop requests that are not in range
1205  if (!inRange(pkt->getAddr())) {
1206  return;
1207  }
1208 
1209  bool is_secure = pkt->isSecure();
1210  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1211 
1212  Addr blk_addr = pkt->getBlockAddr(blkSize);
1213  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1214 
1215  // Update the latency cost of the snoop so that the crossbar can
1216  // account for it. Do not overwrite what other neighbouring caches
1217  // have already done, rather take the maximum. The update is
1218  // tentative, for cases where we return before an upward snoop
1219  // happens below.
1220  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1222 
1223  // Inform request(Prefetch, CleanEvict or Writeback) from below of
1224  // MSHR hit, set setBlockCached.
1225  if (mshr && pkt->mustCheckAbove()) {
1226  DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1227  "mshr hit\n", pkt->print());
1228  pkt->setBlockCached();
1229  return;
1230  }
1231 
1232  // Let the MSHR itself track the snoop and decide whether we want
1233  // to go ahead and do the regular cache snoop
1234  if (mshr && mshr->handleSnoop(pkt, order++)) {
1235  DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1236  "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1237  mshr->print());
1238 
1239  if (mshr->getNumTargets() > numTarget)
1240  warn("allocating bonus target for snoop"); //handle later
1241  return;
1242  }
1243 
1244  //We also need to check the writeback buffers and handle those
1245  WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1246  if (wb_entry) {
1247  DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1248  pkt->getAddr(), is_secure ? "s" : "ns");
1249  // Expect to see only Writebacks and/or CleanEvicts here, both of
1250  // which should not be generated for uncacheable data.
1251  assert(!wb_entry->isUncacheable());
1252  // There should only be a single request responsible for generating
1253  // Writebacks/CleanEvicts.
1254  assert(wb_entry->getNumTargets() == 1);
1255  PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1256  assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1257 
1258  if (pkt->isEviction()) {
1259  // if the block is found in the write queue, set the BLOCK_CACHED
1260  // flag for Writeback/CleanEvict snoop. On return the snoop will
1261  // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1262  // any CleanEvicts from travelling down the memory hierarchy.
1263  pkt->setBlockCached();
1264  DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1265  "hit\n", __func__, pkt->print());
1266  return;
1267  }
1268 
1269  // conceptually writebacks are no different to other blocks in
1270  // this cache, so the behaviour is modelled after handleSnoop,
1271  // the difference being that instead of querying the block
1272  // state to determine if it is dirty and writable, we use the
1273  // command and fields of the writeback packet
1274  bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1275  pkt->needsResponse();
1276  bool have_writable = !wb_pkt->hasSharers();
1277  bool invalidate = pkt->isInvalidate();
1278 
1279  if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1280  assert(!pkt->needsWritable());
1281  pkt->setHasSharers();
1282  wb_pkt->setHasSharers();
1283  }
1284 
1285  if (respond) {
1286  pkt->setCacheResponding();
1287 
1288  if (have_writable) {
1289  pkt->setResponderHadWritable();
1290  }
1291 
1292  doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1293  false, false);
1294  }
1295 
1296  if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1297  // Invalidation trumps our writeback... discard here
1298  // Note: markInService will remove entry from writeback buffer.
1299  markInService(wb_entry);
1300  delete wb_pkt;
1301  }
1302  }
1303 
1304  // If this was a shared writeback, there may still be
1305  // other shared copies above that require invalidation.
1306  // We could be more selective and return here if the
1307  // request is non-exclusive or if the writeback is
1308  // exclusive.
1309  uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1310 
1311  // Override what we did when we first saw the snoop, as we now
1312  // also have the cost of the upwards snoops to account for
1313  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1315 }
1316 
1317 Tick
1319 {
1320  // no need to snoop requests that are not in range.
1321  if (!inRange(pkt->getAddr())) {
1322  return 0;
1323  }
1324 
1325  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1326  uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1327  return snoop_delay + lookupLatency * clockPeriod();
1328 }
1329 
1330 bool
1331 Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1332 {
1333  if (!forwardSnoops)
1334  return false;
1335  // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1336  // Writeback snoops into upper level caches to check for copies of the
1337  // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1338  // packet, the cache can inform the crossbar below of presence or absence
1339  // of the block.
1340  if (is_timing) {
1341  Packet snoop_pkt(pkt, true, false);
1342  snoop_pkt.setExpressSnoop();
1343  // Assert that packet is either Writeback or CleanEvict and not a
1344  // prefetch request because prefetch requests need an MSHR and may
1345  // generate a snoop response.
1346  assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1347  snoop_pkt.senderState = nullptr;
1348  cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1349  // Writeback/CleanEvict snoops do not generate a snoop response.
1350  assert(!(snoop_pkt.cacheResponding()));
1351  return snoop_pkt.isBlockCached();
1352  } else {
1354  return pkt->isBlockCached();
1355  }
1356 }
1357 
1358 bool
1360 {
1361  assert(mshr);
1362 
1363  // use request from 1st target
1364  PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1365 
1366  if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1367  DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1368 
1369  // we should never have hardware prefetches to allocated
1370  // blocks
1371  assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1372 
1373  // We need to check the caches above us to verify that
1374  // they don't have a copy of this block in the dirty state
1375  // at the moment. Without this check we could get a stale
1376  // copy from memory that might get used in place of the
1377  // dirty one.
1378  Packet snoop_pkt(tgt_pkt, true, false);
1379  snoop_pkt.setExpressSnoop();
1380  // We are sending this packet upwards, but if it hits we will
1381  // get a snoop response that we end up treating just like a
1382  // normal response, hence it needs the MSHR as its sender
1383  // state
1384  snoop_pkt.senderState = mshr;
1385  cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1386 
1387  // Check to see if the prefetch was squashed by an upper cache (to
1388  // prevent us from grabbing the line) or if a Check to see if a
1389  // writeback arrived between the time the prefetch was placed in
1390  // the MSHRs and when it was selected to be sent or if the
1391  // prefetch was squashed by an upper cache.
1392 
1393  // It is important to check cacheResponding before
1394  // prefetchSquashed. If another cache has committed to
1395  // responding, it will be sending a dirty response which will
1396  // arrive at the MSHR allocated for this request. Checking the
1397  // prefetchSquash first may result in the MSHR being
1398  // prematurely deallocated.
1399  if (snoop_pkt.cacheResponding()) {
1400  [[maybe_unused]] auto r = outstandingSnoop.insert(snoop_pkt.req);
1401  assert(r.second);
1402 
1403  // if we are getting a snoop response with no sharers it
1404  // will be allocated as Modified
1405  bool pending_modified_resp = !snoop_pkt.hasSharers();
1406  markInService(mshr, pending_modified_resp);
1407 
1408  DPRINTF(Cache, "Upward snoop of prefetch for addr"
1409  " %#x (%s) hit\n",
1410  tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1411  return false;
1412  }
1413 
1414  if (snoop_pkt.isBlockCached()) {
1415  DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1416  "Deallocating mshr target %#x.\n",
1417  mshr->blkAddr);
1418 
1419  // Deallocate the mshr target
1420  if (mshrQueue.forceDeallocateTarget(mshr)) {
1421  // Clear block if this deallocation resulted freed an
1422  // mshr when all had previously been utilized
1424  }
1425 
1426  // given that no response is expected, delete Request and Packet
1427  delete tgt_pkt;
1428 
1429  return false;
1430  }
1431  }
1432 
1433  return BaseCache::sendMSHRQueuePacket(mshr);
1434 }
1435 
1436 } // namespace gem5
gem5::Packet::getBlockAddr
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:805
gem5::MSHR::print
void print(std::ostream &os, int verbosity=0, const std::string &prefix="") const override
Prints the contents of this MSHR for debugging.
Definition: mshr.cc:712
gem5::MSHR
Miss Status and handling Register.
Definition: mshr.hh:74
gem5::MemCmd::StoreCondReq
@ StoreCondReq
Definition: packet.hh:112
gem5::BaseCache::cpuSidePort
CpuSidePort cpuSidePort
Definition: base.hh:338
gem5::RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
gem5::BaseCache::handleTimingReqMiss
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
gem5::Packet::isRequest
bool isRequest() const
Definition: packet.hh:586
warn
#define warn(...)
Definition: logging.hh:246
gem5::BaseCache::invalidateBlock
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition: base.cc:1588
gem5::Cache::handleAtomicReqMiss
Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks) override
Handle a request in atomic mode that missed in this cache.
Definition: cache.cc:563
gem5::MemCmd::WriteClean
@ WriteClean
Definition: packet.hh:94
gem5::Packet::getOffset
Addr getOffset(unsigned int blk_size) const
Definition: packet.hh:800
gem5::MemCmd::CleanEvict
@ CleanEvict
Definition: packet.hh:95
gem5::BaseCache::access
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition: base.cc:1154
gem5::BaseCache::forwardSnoops
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:928
gem5::QueueEntry::Target::pkt
const PacketPtr pkt
Pending request packet.
Definition: queue_entry.hh:93
gem5::BaseCache::writeBuffer
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:347
cache.hh
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1252
gem5::Cache::sendMSHRQueuePacket
bool sendMSHRQueuePacket(MSHR *mshr) override
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: cache.cc:1359
gem5::BaseCache::recvTimingResp
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition: base.cc:420
mshr.hh
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::MSHR::isForward
bool isForward
True if the entry is just a simple forward from an upper level.
Definition: mshr.hh:127
gem5::compression::Base::getDecompressionLatency
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition: base.cc:197
gem5::RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
gem5::BaseCache::CacheCmdStats::mshrUncacheable
statistics::Vector mshrUncacheable
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:1029
gem5::MSHR::hasPostInvalidate
bool hasPostInvalidate() const
Definition: mshr.hh:330
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::BaseCache::markInService
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:408
gem5::Packet::setCacheResponding
void setCacheResponding()
Snoop flags.
Definition: packet.hh:640
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::MemCmd::ReadSharedReq
@ ReadSharedReq
Definition: packet.hh:110
gem5::WriteQueueEntry::getTarget
Target * getTarget() override
Returns a reference to the first target.
Definition: write_queue_entry.hh:155
gem5::Packet::copyError
void copyError(Packet *pkt)
Definition: packet.hh:779
gem5::Packet::isUpgrade
bool isUpgrade() const
Definition: packet.hh:585
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::CacheBlk::clearCoherenceBits
void clearCoherenceBits(unsigned bits)
Clear the corresponding coherence bits.
Definition: cache_blk.hh:231
gem5::Packet::isSecure
bool isSecure() const
Definition: packet.hh:810
gem5::BaseCache::clearBlocked
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:1255
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:673
gem5::X86ISA::system
Bitfield< 15 > system
Definition: misc.hh:1003
gem5::Cache::recvAtomic
Tick recvAtomic(PacketPtr pkt) override
Performs the access specified by the request.
Definition: cache.cc:653
gem5::QueueEntry::Target
A queue entry is holding packets that will be serviced as soon as resources are available.
Definition: queue_entry.hh:87
gem5::MemCmd::UpgradeReq
@ UpgradeReq
Definition: packet.hh:102
gem5::Cache::evictBlock
PacketPtr evictBlock(CacheBlk *blk) override
Evict a cache block.
Definition: cache.cc:900
gem5::Cache::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt) override
Snoops bus transactions to maintain coherence.
Definition: cache.cc:1200
gem5::Packet::makeAtomicResponse
void makeAtomicResponse()
Definition: packet.hh:1043
gem5::Packet::setDataFromBlock
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1271
gem5::BaseCache::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: base.hh:402
gem5::CacheBlk::WritableBit
@ WritableBit
write permission
Definition: cache_blk.hh:80
gem5::MemCmd::HardPFReq
@ HardPFReq
Definition: packet.hh:98
gem5::Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:420
gem5::Cache::handleTimingReqMiss
void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time) override
Definition: cache.cc:324
gem5::Cache::handleSnoop
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool is_deferred, bool pending_inval)
Perform an upward snoop if needed, and update the block state (possibly invalidating the block).
Definition: cache.cc:984
gem5::QueueEntry::isUncacheable
bool isUncacheable() const
Definition: queue_entry.hh:130
gem5::Packet::setSatisfied
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition: packet.hh:736
gem5::QueueEntry::blkAddr
Addr blkAddr
Block aligned address.
Definition: queue_entry.hh:116
request.hh
gem5::Packet::snoopDelay
uint32_t snoopDelay
Keep track of the extra delay incurred by snooping upwards before sending a request down the memory s...
Definition: packet.hh:428
gem5::CacheBlk::setPrefetched
void setPrefetched()
Marks this blocks as a recently prefetched block.
Definition: cache_blk.hh:258
gem5::CacheBlk
A Basic Cache block.
Definition: cache_blk.hh:70
gem5::MemCmd::ReadCleanReq
@ ReadCleanReq
Definition: packet.hh:109
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::BaseCache::writeAllocator
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition: base.hh:388
gem5::MemCmd::StoreCondFailReq
@ StoreCondFailReq
Definition: packet.hh:113
gem5::MemCmd::WritebackDirty
@ WritebackDirty
Definition: packet.hh:92
gem5::BaseCache::inRange
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition: base.cc:213
gem5::Packet::hasData
bool hasData() const
Definition: packet.hh:603
gem5::MemCmd
Definition: packet.hh:75
gem5::Cache::recvAtomicSnoop
Tick recvAtomicSnoop(PacketPtr pkt) override
Snoop for the provided request in the cache and return the estimated time taken.
Definition: cache.cc:1318
gem5::MSHR::Target::FromCPU
@ FromCPU
Definition: mshr.hh:135
gem5::Packet::mustCheckAbove
bool mustCheckAbove() const
Does the request need to check for cached copies of the same block in the memory hierarchy above.
Definition: packet.hh:1384
gem5::Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:438
gem5::BaseCache::order
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:968
gem5::CacheBlk::getTaskId
uint32_t getTaskId() const
Get the task id associated to this block.
Definition: cache_blk.hh:285
gem5::System::maxRequestors
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition: system.hh:496
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:582
gem5::BaseCache::tags
BaseTags * tags
Tag and data Storage.
Definition: base.hh:350
gem5::WriteAllocator::allocate
bool allocate() const
Should writes allocate?
Definition: base.hh:1405
gem5::Packet::needsWritable
bool needsWritable() const
Definition: packet.hh:588
gem5::MemCmd::SCUpgradeReq
@ SCUpgradeReq
Definition: packet.hh:103
gem5::BaseCache::allocateWriteBuffer
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:1191
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:373
gem5::BaseCache::stats
gem5::BaseCache::CacheStats stats
gem5::Cache::Cache
Cache(const CacheParams &p)
Instantiates a basic cache object.
Definition: cache.cc:69
gem5::BaseCache
A basic cache interface.
Definition: base.hh:95
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::MemCmd::UpgradeResp
@ UpgradeResp
Definition: packet.hh:104
gem5::BaseCache::sendMSHRQueuePacket
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: base.cc:1791
gem5::BaseCache::writecleanBlk
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition: base.cc:1667
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::BaseCache::blkSize
const unsigned blkSize
Block size of this cache.
Definition: base.hh:888
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::Packet::satisfied
bool satisfied() const
Definition: packet.hh:742
gem5::BaseCache::regenerateBlkAddr
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition: base.cc:182
gem5::Cache::handleTimingReqHit
void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) override
Definition: cache.cc:313
gem5::X86ISA::CacheParams
@ CacheParams
Definition: cpuid.cc:45
gem5::QueueEntry::isSecure
bool isSecure
True if the entry targets the secure memory space.
Definition: queue_entry.hh:122
gem5::BaseCache::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:559
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::CacheBlk::setCoherenceBits
void setCoherenceBits(unsigned bits)
Sets the corresponding coherence bits.
Definition: cache_blk.hh:220
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:609
gem5::MemCmd::WritebackClean
@ WritebackClean
Definition: packet.hh:93
gem5::Packet::matchAddr
bool matchAddr(const Addr addr, const bool is_secure) const
Check if packet corresponds to a given address and address space.
Definition: packet.cc:408
gem5::Packet::setExpressSnoop
void setExpressSnoop()
The express snoop flag is used for two purposes.
Definition: packet.hh:688
gem5::Request::wbRequestorId
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition: request.hh:257
gem5::CacheBlk::print
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition: cache_blk.hh:364
gem5::Cache::promoteWholeLineWrites
void promoteWholeLineWrites(PacketPtr pkt)
Turn line-sized writes into WriteInvalidate transactions.
Definition: cache.cc:301
gem5::Queue::findMatch
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition: queue.hh:168
gem5::BaseCache::CacheCmdStats::missLatency
statistics::Vector missLatency
Total number of cycles per thread/command spent waiting for a miss.
Definition: base.hh:1017
base.hh
gem5::ResponsePort::sendTimingSnoopReq
void sendTimingSnoopReq(PacketPtr pkt)
Attempt to send a timing snoop request packet to the request port by calling its corresponding receiv...
Definition: port.hh:387
compiler.hh
gem5::Packet::id
const PacketId id
Definition: packet.hh:363
gem5::BaseCache::allocOnFill
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition: base.hh:441
gem5::Cache::outstandingSnoop
std::unordered_set< RequestPtr > outstandingSnoop
Store the outstanding requests that we are expecting snoop responses from so we can determine which s...
Definition: cache.hh:80
gem5::Packet::setResponderHadWritable
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition: packet.hh:700
gem5::X86ISA::pf
Bitfield< 2 > pf
Definition: misc.hh:556
gem5::MemCmd::ReadExReq
@ ReadExReq
Definition: packet.hh:107
gem5::Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:361
gem5::MSHRQueue::forceDeallocateTarget
bool forceDeallocateTarget(MSHR *mshr)
Deallocate top target, possibly freeing the MSHR.
Definition: mshr_queue.cc:136
gem5::Cache::serviceMSHRTargets
void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) override
Service non-deferred MSHR targets using the received response.
Definition: cache.cc:684
gem5::Packet::responderHadWritable
bool responderHadWritable() const
Definition: packet.hh:706
gem5::BaseCache::Blocked_NoMSHRs
@ Blocked_NoMSHRs
Definition: base.hh:113
gem5::MSHR::TargetList
Definition: mshr.hh:169
gem5::Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:597
gem5::MSHR::wasWholeLineWrite
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition: mshr.hh:124
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::BaseTags::print
std::string print()
Print all tags used.
Definition: base.cc:202
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
gem5::CacheBlk::DirtyBit
@ DirtyBit
dirty (modified)
Definition: cache_blk.hh:87
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::BaseCache::recvTimingReq
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:350
gem5::MemCmd::SCUpgradeFailReq
@ SCUpgradeFailReq
Definition: packet.hh:105
gem5::MemCmd::InvalidateResp
@ InvalidateResp
Definition: packet.hh:138
gem5::Cache
A coherent cache that can be arranged in flexible topologies.
Definition: cache.hh:67
gem5::WriteQueueEntry
Write queue entry.
Definition: write_queue_entry.hh:67
gem5::CacheBlk::data
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition: cache_blk.hh:103
gem5::Cache::cleanEvictBlk
PacketPtr cleanEvictBlk(CacheBlk *blk)
Create a CleanEvict request for the given block.
Definition: cache.cc:911
gem5::Cache::createMissPacket
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const override
Create an appropriate downstream bus request packet.
Definition: cache.cc:477
gem5::BaseCache::maintainClusivity
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition: base.cc:1423
gem5::Packet::isClean
bool isClean() const
Definition: packet.hh:600
gem5::BaseCache::isReadOnly
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:943
gem5::Packet::setHasSharers
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition: packet.hh:672
gem5::BaseCache::clusivity
const enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: base.hh:935
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1049
gem5::Cache::doWritebacks
void doWritebacks(PacketList &writebacks, Tick forward_time) override
Insert writebacks into the write buffer.
Definition: cache.cc:190
cache_blk.hh
gem5::Cache::doFastWrites
const bool doFastWrites
This cache should allocate a block on a line-sized write miss.
Definition: cache.hh:73
gem5::Cache::recvTimingSnoopResp
void recvTimingSnoopResp(PacketPtr pkt) override
Handle a snoop response.
Definition: cache.cc:267
gem5::Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1326
gem5::MemCmd::isSWPrefetch
bool isSWPrefetch() const
Definition: packet.hh:242
gem5::BaseCache::CacheStats::cmdStats
CacheCmdStats & cmdStats(const PacketPtr p)
Definition: base.hh:1048
types.hh
gem5::Cache::satisfyRequest
void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false) override
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: cache.cc:78
gem5::MSHR::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: mshr.hh:430
gem5::MSHR::Target::FromSnoop
@ FromSnoop
Definition: mshr.hh:136
gem5::MemCmd::ReadResp
@ ReadResp
Definition: packet.hh:87
gem5::BaseCache::handleTimingReqHit
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition: base.cc:224
gem5::BaseCache::evictBlock
virtual PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
gem5::MSHR::handleSnoop
bool handleSnoop(PacketPtr target, Counter order)
Definition: mshr.cc:423
gem5::BaseCache::system
System * system
System we are currently operating in.
Definition: base.hh:986
gem5::BaseCache::mshrQueue
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:344
gem5::BaseCache::memSidePort
MemSidePort memSidePort
Definition: base.hh:339
gem5::Cache::access
bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks) override
Does all the processing necessary to perform the provided request.
Definition: cache.cc:161
gem5::BaseCache::writebackClean
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: base.hh:675
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::TaggedEntry::isSecure
bool isSecure() const
Check if this block holds data from the secure memory space.
Definition: tagged_entry.hh:64
gem5::CacheBlk::isSet
bool isSet(unsigned bits) const
Checks the given coherence bits are set.
Definition: cache_blk.hh:239
gem5::Packet::setBlockCached
void setBlockCached()
Definition: packet.hh:746
logging.hh
gem5::MemCmd::InvalidateReq
@ InvalidateReq
Definition: packet.hh:137
gem5::Packet::fromCache
bool fromCache() const
Definition: packet.hh:601
gem5::BaseCache::lookupLatency
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:894
gem5::WriteQueueEntry::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: write_queue_entry.hh:142
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::BaseCache::allocateMissBuffer
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:1173
gem5::MSHR::extractServiceableTargets
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition: mshr.cc:547
gem5::BaseTags::findBlock
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition: base.cc:80
gem5::ResponsePort::sendAtomicSnoop
Tick sendAtomicSnoop(PacketPtr pkt)
Send an atomic snoop request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:326
trace.hh
gem5::MSHR::Target::FromPrefetcher
@ FromPrefetcher
Definition: mshr.hh:137
gem5::QueuedRequestPort::schedTimingSnoopResp
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing snoop response.
Definition: qport.hh:158
gem5::Clocked::ticksToCycles
Cycles ticksToCycles(Tick t) const
Definition: clocked_object.hh:222
gem5_assert
#define gem5_assert(cond,...)
The assert macro will function like a normal assert, but will use panic instead of straight abort().
Definition: logging.hh:318
gem5::Packet::isMaskedWrite
bool isMaskedWrite() const
Definition: packet.hh:1400
std::list
STL list class.
Definition: stl.hh:51
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::Packet::copyResponderFlags
void copyResponderFlags(const PacketPtr pkt)
Copy the reponse flags from an input packet to this packet.
Definition: packet.cc:306
gem5::BaseCache::numTarget
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:925
gem5::QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:93
gem5::Cache::isCachedAbove
bool isCachedAbove(PacketPtr pkt, bool is_timing=true)
Send up a snoop request and find cached copies.
Definition: cache.cc:1331
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::Cache::doWritebacksAtomic
void doWritebacksAtomic(PacketList &writebacks) override
Send writebacks down the memory hierarchy in atomic mode.
Definition: cache.cc:232
gem5::BaseCache::forwardLatency
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:907
gem5::MSHR::hasPostDowngrade
bool hasPostDowngrade() const
Definition: mshr.hh:334
gem5::MSHR::getTarget
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition: mshr.hh:457
gem5::MemCmd::ReadRespWithInvalidate
@ ReadRespWithInvalidate
Definition: packet.hh:88
gem5::Packet::isBlockCached
bool isBlockCached() const
Definition: packet.hh:747
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
write_queue_entry.hh
gem5::MemCmd::WriteLineReq
@ WriteLineReq
Definition: packet.hh:101
gem5::BaseCache::handleFill
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition: base.cc:1435
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::MemCmd::UpgradeFailResp
@ UpgradeFailResp
Definition: packet.hh:106
gem5::Packet::isWholeLineWrite
bool isWholeLineWrite(unsigned blk_size)
Definition: packet.hh:614
gem5::Packet::hasRespData
bool hasRespData() const
Definition: packet.hh:604
gem5::Cache::doTimingSupplyResponse
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
Definition: cache.cc:939
gem5::BaseCache::writebackBlk
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition: base.cc:1617
gem5::TaggedEntry::isValid
virtual bool isValid() const
Checks if the entry is valid.
Definition: tagged_entry.hh:57
gem5::Packet::isEviction
bool isEviction() const
Definition: packet.hh:599
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::BaseCache::satisfyRequest
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: base.cc:1013
gem5::MemCmd::HardPFResp
@ HardPFResp
Definition: packet.hh:100
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::Cache::recvTimingReq
void recvTimingReq(PacketPtr pkt) override
Performs the access specified by the request.
Definition: cache.cc:403
gem5::BaseCache::responseLatency
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:917
gem5::BaseCache::compressor
compression::Base * compressor
Compression method being used.
Definition: base.hh:353

Generated on Tue Dec 21 2021 11:34:31 for gem5 by doxygen 1.8.17