gem5  v20.1.0.0
cache.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2002-2005 The Regents of The University of Michigan
15  * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
47 #include "mem/cache/cache.hh"
48 
49 #include <cassert>
50 
51 #include "base/compiler.hh"
52 #include "base/logging.hh"
53 #include "base/trace.hh"
54 #include "base/types.hh"
55 #include "debug/Cache.hh"
56 #include "debug/CacheTags.hh"
57 #include "debug/CacheVerbose.hh"
58 #include "enums/Clusivity.hh"
59 #include "mem/cache/cache_blk.hh"
60 #include "mem/cache/mshr.hh"
61 #include "mem/cache/tags/base.hh"
63 #include "mem/request.hh"
64 #include "params/Cache.hh"
65 
67  : BaseCache(p, p->system->cacheLineSize()),
68  doFastWrites(true)
69 {
70 }
71 
72 void
74  bool deferred_response, bool pending_downgrade)
75 {
76  BaseCache::satisfyRequest(pkt, blk);
77 
78  if (pkt->isRead()) {
79  // determine if this read is from a (coherent) cache or not
80  if (pkt->fromCache()) {
81  assert(pkt->getSize() == blkSize);
82  // special handling for coherent block requests from
83  // upper-level caches
84  if (pkt->needsWritable()) {
85  // sanity check
86  assert(pkt->cmd == MemCmd::ReadExReq ||
88  assert(!pkt->hasSharers());
89 
90  // if we have a dirty copy, make sure the recipient
91  // keeps it marked dirty (in the modified state)
92  if (blk->isDirty()) {
93  pkt->setCacheResponding();
94  blk->status &= ~BlkDirty;
95  }
96  } else if (blk->isWritable() && !pending_downgrade &&
97  !pkt->hasSharers() &&
98  pkt->cmd != MemCmd::ReadCleanReq) {
99  // we can give the requestor a writable copy on a read
100  // request if:
101  // - we have a writable copy at this level (& below)
102  // - we don't have a pending snoop from below
103  // signaling another read request
104  // - no other cache above has a copy (otherwise it
105  // would have set hasSharers flag when
106  // snooping the packet)
107  // - the read has explicitly asked for a clean
108  // copy of the line
109  if (blk->isDirty()) {
110  // special considerations if we're owner:
111  if (!deferred_response) {
112  // respond with the line in Modified state
113  // (cacheResponding set, hasSharers not set)
114  pkt->setCacheResponding();
115 
116  // if this cache is mostly inclusive, we
117  // keep the block in the Exclusive state,
118  // and pass it upwards as Modified
119  // (writable and dirty), hence we have
120  // multiple caches, all on the same path
121  // towards memory, all considering the
122  // same block writable, but only one
123  // considering it Modified
124 
125  // we get away with multiple caches (on
126  // the same path to memory) considering
127  // the block writeable as we always enter
128  // the cache hierarchy through a cache,
129  // and first snoop upwards in all other
130  // branches
131  blk->status &= ~BlkDirty;
132  } else {
133  // if we're responding after our own miss,
134  // there's a window where the recipient didn't
135  // know it was getting ownership and may not
136  // have responded to snoops correctly, so we
137  // have to respond with a shared line
138  pkt->setHasSharers();
139  }
140  }
141  } else {
142  // otherwise only respond with a shared copy
143  pkt->setHasSharers();
144  }
145  }
146  }
147 }
148 
150 //
151 // Access path: requests coming in from the CPU side
152 //
154 
155 bool
157  PacketList &writebacks)
158 {
159 
160  if (pkt->req->isUncacheable()) {
161  assert(pkt->isRequest());
162 
163  chatty_assert(!(isReadOnly && pkt->isWrite()),
164  "Should never see a write in a read-only cache %s\n",
165  name());
166 
167  DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
168 
169  // flush and invalidate any existing block
170  CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
171  if (old_blk && old_blk->isValid()) {
172  BaseCache::evictBlock(old_blk, writebacks);
173  }
174 
175  blk = nullptr;
176  // lookupLatency is the latency in case the request is uncacheable.
177  lat = lookupLatency;
178  return false;
179  }
180 
181  return BaseCache::access(pkt, blk, lat, writebacks);
182 }
183 
184 void
185 Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
186 {
187  while (!writebacks.empty()) {
188  PacketPtr wbPkt = writebacks.front();
189  // We use forwardLatency here because we are copying writebacks to
190  // write buffer.
191 
192  // Call isCachedAbove for Writebacks, CleanEvicts and
193  // WriteCleans to discover if the block is cached above.
194  if (isCachedAbove(wbPkt)) {
195  if (wbPkt->cmd == MemCmd::CleanEvict) {
196  // Delete CleanEvict because cached copies exist above. The
197  // packet destructor will delete the request object because
198  // this is a non-snoop request packet which does not require a
199  // response.
200  delete wbPkt;
201  } else if (wbPkt->cmd == MemCmd::WritebackClean) {
202  // clean writeback, do not send since the block is
203  // still cached above
204  assert(writebackClean);
205  delete wbPkt;
206  } else {
207  assert(wbPkt->cmd == MemCmd::WritebackDirty ||
208  wbPkt->cmd == MemCmd::WriteClean);
209  // Set BLOCK_CACHED flag in Writeback and send below, so that
210  // the Writeback does not reset the bit corresponding to this
211  // address in the snoop filter below.
212  wbPkt->setBlockCached();
213  allocateWriteBuffer(wbPkt, forward_time);
214  }
215  } else {
216  // If the block is not cached above, send packet below. Both
217  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
218  // reset the bit corresponding to this address in the snoop filter
219  // below.
220  allocateWriteBuffer(wbPkt, forward_time);
221  }
222  writebacks.pop_front();
223  }
224 }
225 
226 void
228 {
229  while (!writebacks.empty()) {
230  PacketPtr wbPkt = writebacks.front();
231  // Call isCachedAbove for both Writebacks and CleanEvicts. If
232  // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
233  // and discard CleanEvicts.
234  if (isCachedAbove(wbPkt, false)) {
235  if (wbPkt->cmd == MemCmd::WritebackDirty ||
236  wbPkt->cmd == MemCmd::WriteClean) {
237  // Set BLOCK_CACHED flag in Writeback and send below,
238  // so that the Writeback does not reset the bit
239  // corresponding to this address in the snoop filter
240  // below. We can discard CleanEvicts because cached
241  // copies exist above. Atomic mode isCachedAbove
242  // modifies packet to set BLOCK_CACHED flag
243  memSidePort.sendAtomic(wbPkt);
244  }
245  } else {
246  // If the block is not cached above, send packet below. Both
247  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
248  // reset the bit corresponding to this address in the snoop filter
249  // below.
250  memSidePort.sendAtomic(wbPkt);
251  }
252  writebacks.pop_front();
253  // In case of CleanEvicts, the packet destructor will delete the
254  // request object because this is a non-snoop request packet which
255  // does not require a response.
256  delete wbPkt;
257  }
258 }
259 
260 
261 void
263 {
264  DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
265 
266  // determine if the response is from a snoop request we created
267  // (in which case it should be in the outstandingSnoop), or if we
268  // merely forwarded someone else's snoop request
269  const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
270  outstandingSnoop.end();
271 
272  if (!forwardAsSnoop) {
273  // the packet came from this cache, so sink it here and do not
274  // forward it
275  assert(pkt->cmd == MemCmd::HardPFResp);
276 
277  outstandingSnoop.erase(pkt->req);
278 
279  DPRINTF(Cache, "Got prefetch response from above for addr "
280  "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
281  recvTimingResp(pkt);
282  return;
283  }
284 
285  // forwardLatency is set here because there is a response from an
286  // upper level cache.
287  // To pay the delay that occurs if the packet comes from the bus,
288  // we charge also headerDelay.
289  Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
290  // Reset the timing of the packet.
291  pkt->headerDelay = pkt->payloadDelay = 0;
292  memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
293 }
294 
295 void
297 {
298  // Cache line clearing instructions
299  if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
300  (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
301  !pkt->isMaskedWrite()) {
302  pkt->cmd = MemCmd::WriteLineReq;
303  DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
304  }
305 }
306 
307 void
309 {
310  // should never be satisfying an uncacheable access as we
311  // flush and invalidate any existing block as part of the
312  // lookup
313  assert(!pkt->req->isUncacheable());
314 
315  BaseCache::handleTimingReqHit(pkt, blk, request_time);
316 }
317 
318 void
320  Tick request_time)
321 {
322  if (pkt->req->isUncacheable()) {
323  // ignore any existing MSHR if we are dealing with an
324  // uncacheable request
325 
326  // should have flushed and have no valid block
327  assert(!blk || !blk->isValid());
328 
329  stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++;
330 
331  if (pkt->isWrite()) {
332  allocateWriteBuffer(pkt, forward_time);
333  } else {
334  assert(pkt->isRead());
335 
336  // uncacheable accesses always allocate a new MSHR
337 
338  // Here we are using forward_time, modelling the latency of
339  // a miss (outbound) just as forwardLatency, neglecting the
340  // lookupLatency component.
341  allocateMissBuffer(pkt, forward_time);
342  }
343 
344  return;
345  }
346 
347  Addr blk_addr = pkt->getBlockAddr(blkSize);
348 
349  MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
350 
351  // Software prefetch handling:
352  // To keep the core from waiting on data it won't look at
353  // anyway, send back a response with dummy data. Miss handling
354  // will continue asynchronously. Unfortunately, the core will
355  // insist upon freeing original Packet/Request, so we have to
356  // create a new pair with a different lifecycle. Note that this
357  // processing happens before any MSHR munging on the behalf of
358  // this request because this new Request will be the one stored
359  // into the MSHRs, not the original.
360  if (pkt->cmd.isSWPrefetch()) {
361  assert(pkt->needsResponse());
362  assert(pkt->req->hasPaddr());
363  assert(!pkt->req->isUncacheable());
364 
365  // There's no reason to add a prefetch as an additional target
366  // to an existing MSHR. If an outstanding request is already
367  // in progress, there is nothing for the prefetch to do.
368  // If this is the case, we don't even create a request at all.
369  PacketPtr pf = nullptr;
370 
371  if (!mshr) {
372  // copy the request and create a new SoftPFReq packet
373  RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
374  pkt->req->getSize(),
375  pkt->req->getFlags(),
376  pkt->req->requestorId());
377  pf = new Packet(req, pkt->cmd);
378  pf->allocate();
379  assert(pf->matchAddr(pkt));
380  assert(pf->getSize() == pkt->getSize());
381  }
382 
383  pkt->makeTimingResponse();
384 
385  // request_time is used here, taking into account lat and the delay
386  // charged if the packet comes from the xbar.
387  cpuSidePort.schedTimingResp(pkt, request_time);
388 
389  // If an outstanding request is in progress (we found an
390  // MSHR) this is set to null
391  pkt = pf;
392  }
393 
394  BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
395 }
396 
397 void
399 {
400  DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
401 
403 
404  if (pkt->cacheResponding()) {
405  // a cache above us (but not where the packet came from) is
406  // responding to the request, in other words it has the line
407  // in Modified or Owned state
408  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
409  pkt->print());
410 
411  // if the packet needs the block to be writable, and the cache
412  // that has promised to respond (setting the cache responding
413  // flag) is not providing writable (it is in Owned rather than
414  // the Modified state), we know that there may be other Shared
415  // copies in the system; go out and invalidate them all
416  assert(pkt->needsWritable() && !pkt->responderHadWritable());
417 
418  // an upstream cache that had the line in Owned state
419  // (dirty, but not writable), is responding and thus
420  // transferring the dirty line from one branch of the
421  // cache hierarchy to another
422 
423  // send out an express snoop and invalidate all other
424  // copies (snooping a packet that needs writable is the
425  // same as an invalidation), thus turning the Owned line
426  // into a Modified line, note that we don't invalidate the
427  // block in the current cache or any other cache on the
428  // path to memory
429 
430  // create a downstream express snoop with cleared packet
431  // flags, there is no need to allocate any data as the
432  // packet is merely used to co-ordinate state transitions
433  Packet *snoop_pkt = new Packet(pkt, true, false);
434 
435  // also reset the bus time that the original packet has
436  // not yet paid for
437  snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
438 
439  // make this an instantaneous express snoop, and let the
440  // other caches in the system know that the another cache
441  // is responding, because we have found the authorative
442  // copy (Modified or Owned) that will supply the right
443  // data
444  snoop_pkt->setExpressSnoop();
445  snoop_pkt->setCacheResponding();
446 
447  // this express snoop travels towards the memory, and at
448  // every crossbar it is snooped upwards thus reaching
449  // every cache in the system
450  bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
451  // express snoops always succeed
452  assert(success);
453 
454  // main memory will delete the snoop packet
455 
456  // queue for deletion, as opposed to immediate deletion, as
457  // the sending cache is still relying on the packet
458  pendingDelete.reset(pkt);
459 
460  // no need to take any further action in this particular cache
461  // as an upstram cache has already committed to responding,
462  // and we have already sent out any express snoops in the
463  // section above to ensure all other copies in the system are
464  // invalidated
465  return;
466  }
467 
469 }
470 
471 PacketPtr
473  bool needsWritable,
474  bool is_whole_line_write) const
475 {
476  // should never see evictions here
477  assert(!cpu_pkt->isEviction());
478 
479  bool blkValid = blk && blk->isValid();
480 
481  if (cpu_pkt->req->isUncacheable() ||
482  (!blkValid && cpu_pkt->isUpgrade()) ||
483  cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
484  // uncacheable requests and upgrades from upper-level caches
485  // that missed completely just go through as is
486  return nullptr;
487  }
488 
489  assert(cpu_pkt->needsResponse());
490 
491  MemCmd cmd;
492  // @TODO make useUpgrades a parameter.
493  // Note that ownership protocols require upgrade, otherwise a
494  // write miss on a shared owned block will generate a ReadExcl,
495  // which will clobber the owned copy.
496  const bool useUpgrades = true;
497  assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
498  if (is_whole_line_write) {
499  assert(!blkValid || !blk->isWritable());
500  // forward as invalidate to all other caches, this gives us
501  // the line in Exclusive state, and invalidates all other
502  // copies
503  cmd = MemCmd::InvalidateReq;
504  } else if (blkValid && useUpgrades) {
505  // only reason to be here is that blk is read only and we need
506  // it to be writable
507  assert(needsWritable);
508  assert(!blk->isWritable());
509  cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
510  } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
511  cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
512  // Even though this SC will fail, we still need to send out the
513  // request and get the data to supply it to other snoopers in the case
514  // where the determination the StoreCond fails is delayed due to
515  // all caches not being on the same local bus.
517  } else {
518  // block is invalid
519 
520  // If the request does not need a writable there are two cases
521  // where we need to ensure the response will not fetch the
522  // block in dirty state:
523  // * this cache is read only and it does not perform
524  // writebacks,
525  // * this cache is mostly exclusive and will not fill (since
526  // it does not fill it will have to writeback the dirty data
527  // immediately which generates uneccesary writebacks).
528  bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
529  cmd = needsWritable ? MemCmd::ReadExReq :
530  (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
531  }
532  PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
533 
534  // if there are upstream caches that have already marked the
535  // packet as having sharers (not passing writable), pass that info
536  // downstream
537  if (cpu_pkt->hasSharers() && !needsWritable) {
538  // note that cpu_pkt may have spent a considerable time in the
539  // MSHR queue and that the information could possibly be out
540  // of date, however, there is no harm in conservatively
541  // assuming the block has sharers
542  pkt->setHasSharers();
543  DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
544  __func__, cpu_pkt->print(), pkt->print());
545  }
546 
547  // the packet should be block aligned
548  assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
549 
550  pkt->allocate();
551  DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
552  cpu_pkt->print());
553  return pkt;
554 }
555 
556 
557 Cycles
559  PacketList &writebacks)
560 {
561  // deal with the packets that go through the write path of
562  // the cache, i.e. any evictions and writes
563  if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
564  (pkt->req->isUncacheable() && pkt->isWrite())) {
565  Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
566 
567  // at this point, if the request was an uncacheable write
568  // request, it has been satisfied by a memory below and the
569  // packet carries the response back
570  assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
571  pkt->isResponse());
572 
573  return latency;
574  }
575 
576  // only misses left
577 
578  PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
579  pkt->isWholeLineWrite(blkSize));
580 
581  bool is_forward = (bus_pkt == nullptr);
582 
583  if (is_forward) {
584  // just forwarding the same request to the next level
585  // no local cache operation involved
586  bus_pkt = pkt;
587  }
588 
589  DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
590  bus_pkt->print());
591 
592 #if TRACING_ON
593  CacheBlk::State old_state = blk ? blk->status : 0;
594 #endif
595 
596  Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
597 
598  bool is_invalidate = bus_pkt->isInvalidate();
599 
600  // We are now dealing with the response handling
601  DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
602  bus_pkt->print(), old_state);
603 
604  // If packet was a forward, the response (if any) is already
605  // in place in the bus_pkt == pkt structure, so we don't need
606  // to do anything. Otherwise, use the separate bus_pkt to
607  // generate response to pkt and then delete it.
608  if (!is_forward) {
609  if (pkt->needsResponse()) {
610  assert(bus_pkt->isResponse());
611  if (bus_pkt->isError()) {
612  pkt->makeAtomicResponse();
613  pkt->copyError(bus_pkt);
614  } else if (pkt->isWholeLineWrite(blkSize)) {
615  // note the use of pkt, not bus_pkt here.
616 
617  // write-line request to the cache that promoted
618  // the write to a whole line
619  const bool allocate = allocOnFill(pkt->cmd) &&
621  blk = handleFill(bus_pkt, blk, writebacks, allocate);
622  assert(blk != NULL);
623  is_invalidate = false;
624  satisfyRequest(pkt, blk);
625  } else if (bus_pkt->isRead() ||
626  bus_pkt->cmd == MemCmd::UpgradeResp) {
627  // we're updating cache state to allow us to
628  // satisfy the upstream request from the cache
629  blk = handleFill(bus_pkt, blk, writebacks,
630  allocOnFill(pkt->cmd));
631  satisfyRequest(pkt, blk);
632  maintainClusivity(pkt->fromCache(), blk);
633  } else {
634  // we're satisfying the upstream request without
635  // modifying cache state, e.g., a write-through
636  pkt->makeAtomicResponse();
637  }
638  }
639  delete bus_pkt;
640  }
641 
642  if (is_invalidate && blk && blk->isValid()) {
643  invalidateBlock(blk);
644  }
645 
646  return latency;
647 }
648 
649 Tick
651 {
653 
654  // follow the same flow as in recvTimingReq, and check if a cache
655  // above us is responding
656  if (pkt->cacheResponding()) {
657  assert(!pkt->req->isCacheInvalidate());
658  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
659  pkt->print());
660 
661  // if a cache is responding, and it had the line in Owned
662  // rather than Modified state, we need to invalidate any
663  // copies that are not on the same path to memory
664  assert(pkt->needsWritable() && !pkt->responderHadWritable());
665 
666  return memSidePort.sendAtomic(pkt);
667  }
668 
669  return BaseCache::recvAtomic(pkt);
670 }
671 
672 
674 //
675 // Response handling: responses from the memory side
676 //
678 
679 
680 void
682 {
683  QueueEntry::Target *initial_tgt = mshr->getTarget();
684  // First offset for critical word first calculations
685  const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
686 
687  const bool is_error = pkt->isError();
688  // allow invalidation responses originating from write-line
689  // requests to be discarded
690  bool is_invalidate = pkt->isInvalidate() &&
691  !mshr->wasWholeLineWrite;
692 
693  MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
694  for (auto &target: targets) {
695  Packet *tgt_pkt = target.pkt;
696  switch (target.source) {
698  Tick completion_time;
699  // Here we charge on completion_time the delay of the xbar if the
700  // packet comes from it, charged on headerDelay.
701  completion_time = pkt->headerDelay;
702 
703  // Software prefetch handling for cache closest to core
704  if (tgt_pkt->cmd.isSWPrefetch()) {
705  if (tgt_pkt->needsWritable()) {
706  // All other copies of the block were invalidated and we
707  // have an exclusive copy.
708 
709  // The coherence protocol assumes that if we fetched an
710  // exclusive copy of the block, we have the intention to
711  // modify it. Therefore the MSHR for the PrefetchExReq has
712  // been the point of ordering and this cache has commited
713  // to respond to snoops for the block.
714  //
715  // In most cases this is true anyway - a PrefetchExReq
716  // will be followed by a WriteReq. However, if that
717  // doesn't happen, the block is not marked as dirty and
718  // the cache doesn't respond to snoops that has committed
719  // to do so.
720  //
721  // To avoid deadlocks in cases where there is a snoop
722  // between the PrefetchExReq and the expected WriteReq, we
723  // proactively mark the block as Dirty.
724  assert(blk);
725  blk->status |= BlkDirty;
726 
727  panic_if(isReadOnly, "Prefetch exclusive requests from "
728  "read-only cache %s\n", name());
729  }
730 
731  // a software prefetch would have already been ack'd
732  // immediately with dummy data so the core would be able to
733  // retire it. This request completes right here, so we
734  // deallocate it.
735  delete tgt_pkt;
736  break; // skip response
737  }
738 
739  // unlike the other packet flows, where data is found in other
740  // caches or memory and brought back, write-line requests always
741  // have the data right away, so the above check for "is fill?"
742  // cannot actually be determined until examining the stored MSHR
743  // state. We "catch up" with that logic here, which is duplicated
744  // from above.
745  if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
746  assert(!is_error);
747  assert(blk);
748  assert(blk->isWritable());
749  }
750 
751  // Here we decide whether we will satisfy the target using
752  // data from the block or from the response. We use the
753  // block data to satisfy the request when the block is
754  // present and valid and in addition the response in not
755  // forwarding data to the cache above (we didn't fill
756  // either); otherwise we use the packet data.
757  if (blk && blk->isValid() &&
758  (!mshr->isForward || !pkt->hasData())) {
759  satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
760 
761  // How many bytes past the first request is this one
762  int transfer_offset =
763  tgt_pkt->getOffset(blkSize) - initial_offset;
764  if (transfer_offset < 0) {
765  transfer_offset += blkSize;
766  }
767 
768  // If not critical word (offset) return payloadDelay.
769  // responseLatency is the latency of the return path
770  // from lower level caches/memory to an upper level cache or
771  // the core.
772  completion_time += clockEdge(responseLatency) +
773  (transfer_offset ? pkt->payloadDelay : 0);
774 
775  assert(!tgt_pkt->req->isUncacheable());
776 
777  assert(tgt_pkt->req->requestorId() < system->maxRequestors());
778  stats.cmdStats(tgt_pkt)
779  .missLatency[tgt_pkt->req->requestorId()] +=
780  completion_time - target.recvTime;
781  } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
782  // failed StoreCond upgrade
783  assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
784  tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
785  tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
786  // responseLatency is the latency of the return path
787  // from lower level caches/memory to an upper level cache or
788  // the core.
789  completion_time += clockEdge(responseLatency) +
790  pkt->payloadDelay;
791  tgt_pkt->req->setExtraData(0);
792  } else {
793  if (is_invalidate && blk && blk->isValid()) {
794  // We are about to send a response to a cache above
795  // that asked for an invalidation; we need to
796  // invalidate our copy immediately as the most
797  // up-to-date copy of the block will now be in the
798  // cache above. It will also prevent this cache from
799  // responding (if the block was previously dirty) to
800  // snoops as they should snoop the caches above where
801  // they will get the response from.
802  invalidateBlock(blk);
803  }
804  // not a cache fill, just forwarding response
805  // responseLatency is the latency of the return path
806  // from lower level cahces/memory to the core.
807  completion_time += clockEdge(responseLatency) +
808  pkt->payloadDelay;
809  if (!is_error) {
810  if (pkt->isRead()) {
811  // sanity check
812  assert(pkt->matchAddr(tgt_pkt));
813  assert(pkt->getSize() >= tgt_pkt->getSize());
814 
815  tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
816  } else {
817  // MSHR targets can read data either from the
818  // block or the response pkt. If we can't get data
819  // from the block (i.e., invalid or has old data)
820  // or the response (did not bring in any data)
821  // then make sure that the target didn't expect
822  // any.
823  assert(!tgt_pkt->hasRespData());
824  }
825  }
826 
827  // this response did not allocate here and therefore
828  // it was not consumed, make sure that any flags are
829  // carried over to cache above
830  tgt_pkt->copyResponderFlags(pkt);
831  }
832  tgt_pkt->makeTimingResponse();
833  // if this packet is an error copy that to the new packet
834  if (is_error)
835  tgt_pkt->copyError(pkt);
836  if (tgt_pkt->cmd == MemCmd::ReadResp &&
837  (is_invalidate || mshr->hasPostInvalidate())) {
838  // If intermediate cache got ReadRespWithInvalidate,
839  // propagate that. Response should not have
840  // isInvalidate() set otherwise.
842  DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
843  tgt_pkt->print());
844  }
845  // Reset the bus additional time as it is now accounted for
846  tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
847  cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
848  break;
849 
851  assert(tgt_pkt->cmd == MemCmd::HardPFReq);
852  if (blk)
853  blk->status |= BlkHWPrefetched;
854  delete tgt_pkt;
855  break;
856 
858  // I don't believe that a snoop can be in an error state
859  assert(!is_error);
860  // response to snoop request
861  DPRINTF(Cache, "processing deferred snoop...\n");
862  // If the response is invalidating, a snooping target can
863  // be satisfied if it is also invalidating. If the reponse is, not
864  // only invalidating, but more specifically an InvalidateResp and
865  // the MSHR was created due to an InvalidateReq then a cache above
866  // is waiting to satisfy a WriteLineReq. In this case even an
867  // non-invalidating snoop is added as a target here since this is
868  // the ordering point. When the InvalidateResp reaches this cache,
869  // the snooping target will snoop further the cache above with the
870  // WriteLineReq.
871  assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
872  pkt->req->isCacheMaintenance() ||
873  mshr->hasPostInvalidate());
874  handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
875  break;
876 
877  default:
878  panic("Illegal target->source enum %d\n", target.source);
879  }
880  }
881 
882  maintainClusivity(targets.hasFromCache, blk);
883 
884  if (blk && blk->isValid()) {
885  // an invalidate response stemming from a write line request
886  // should not invalidate the block, so check if the
887  // invalidation should be discarded
888  if (is_invalidate || mshr->hasPostInvalidate()) {
889  invalidateBlock(blk);
890  } else if (mshr->hasPostDowngrade()) {
891  blk->status &= ~BlkWritable;
892  }
893  }
894 }
895 
896 PacketPtr
898 {
899  PacketPtr pkt = (blk->isDirty() || writebackClean) ?
900  writebackBlk(blk) : cleanEvictBlk(blk);
901 
902  invalidateBlock(blk);
903 
904  return pkt;
905 }
906 
907 PacketPtr
909 {
910  assert(!writebackClean);
911  assert(blk && blk->isValid() && !blk->isDirty());
912 
913  // Creating a zero sized write, a message to the snoop filter
914  RequestPtr req = std::make_shared<Request>(
916 
917  if (blk->isSecure())
918  req->setFlags(Request::SECURE);
919 
920  req->taskId(blk->task_id);
921 
922  PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
923  pkt->allocate();
924  DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
925 
926  return pkt;
927 }
928 
930 //
931 // Snoop path: requests coming in from the memory side
932 //
934 
935 void
936 Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
937  bool already_copied, bool pending_inval)
938 {
939  // sanity check
940  assert(req_pkt->isRequest());
941  assert(req_pkt->needsResponse());
942 
943  DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
944  // timing-mode snoop responses require a new packet, unless we
945  // already made a copy...
946  PacketPtr pkt = req_pkt;
947  if (!already_copied)
948  // do not clear flags, and allocate space for data if the
949  // packet needs it (the only packets that carry data are read
950  // responses)
951  pkt = new Packet(req_pkt, false, req_pkt->isRead());
952 
953  assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
954  pkt->hasSharers());
955  pkt->makeTimingResponse();
956  if (pkt->isRead()) {
957  pkt->setDataFromBlock(blk_data, blkSize);
958  }
959  if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
960  // Assume we defer a response to a read from a far-away cache
961  // A, then later defer a ReadExcl from a cache B on the same
962  // bus as us. We'll assert cacheResponding in both cases, but
963  // in the latter case cacheResponding will keep the
964  // invalidation from reaching cache A. This special response
965  // tells cache A that it gets the block to satisfy its read,
966  // but must immediately invalidate it.
968  }
969  // Here we consider forward_time, paying for just forward latency and
970  // also charging the delay provided by the xbar.
971  // forward_time is used as send_time in next allocateWriteBuffer().
972  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
973  // Here we reset the timing of the packet.
974  pkt->headerDelay = pkt->payloadDelay = 0;
975  DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
976  pkt->print(), forward_time);
977  memSidePort.schedTimingSnoopResp(pkt, forward_time);
978 }
979 
980 uint32_t
981 Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
982  bool is_deferred, bool pending_inval)
983 {
984  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
985  // deferred snoops can only happen in timing mode
986  assert(!(is_deferred && !is_timing));
987  // pending_inval only makes sense on deferred snoops
988  assert(!(pending_inval && !is_deferred));
989  assert(pkt->isRequest());
990 
991  // the packet may get modified if we or a forwarded snooper
992  // responds in atomic mode, so remember a few things about the
993  // original packet up front
994  bool invalidate = pkt->isInvalidate();
995  bool M5_VAR_USED needs_writable = pkt->needsWritable();
996 
997  // at the moment we could get an uncacheable write which does not
998  // have the invalidate flag, and we need a suitable way of dealing
999  // with this case
1000  panic_if(invalidate && pkt->req->isUncacheable(),
1001  "%s got an invalidating uncacheable snoop request %s",
1002  name(), pkt->print());
1003 
1004  uint32_t snoop_delay = 0;
1005 
1006  if (forwardSnoops) {
1007  // first propagate snoop upward to see if anyone above us wants to
1008  // handle it. save & restore packet src since it will get
1009  // rewritten to be relative to CPU-side bus (if any)
1010  if (is_timing) {
1011  // copy the packet so that we can clear any flags before
1012  // forwarding it upwards, we also allocate data (passing
1013  // the pointer along in case of static data), in case
1014  // there is a snoop hit in upper levels
1015  Packet snoopPkt(pkt, true, true);
1016  snoopPkt.setExpressSnoop();
1017  // the snoop packet does not need to wait any additional
1018  // time
1019  snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1020  cpuSidePort.sendTimingSnoopReq(&snoopPkt);
1021 
1022  // add the header delay (including crossbar and snoop
1023  // delays) of the upward snoop to the snoop delay for this
1024  // cache
1025  snoop_delay += snoopPkt.headerDelay;
1026 
1027  // If this request is a prefetch or clean evict and an upper level
1028  // signals block present, make sure to propagate the block
1029  // presence to the requestor.
1030  if (snoopPkt.isBlockCached()) {
1031  pkt->setBlockCached();
1032  }
1033  // If the request was satisfied by snooping the cache
1034  // above, mark the original packet as satisfied too.
1035  if (snoopPkt.satisfied()) {
1036  pkt->setSatisfied();
1037  }
1038 
1039  // Copy over flags from the snoop response to make sure we
1040  // inform the final destination
1041  pkt->copyResponderFlags(&snoopPkt);
1042  } else {
1043  bool already_responded = pkt->cacheResponding();
1045  if (!already_responded && pkt->cacheResponding()) {
1046  // cache-to-cache response from some upper cache:
1047  // forward response to original requestor
1048  assert(pkt->isResponse());
1049  }
1050  }
1051  }
1052 
1053  bool respond = false;
1054  bool blk_valid = blk && blk->isValid();
1055  if (pkt->isClean()) {
1056  if (blk_valid && blk->isDirty()) {
1057  DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1058  __func__, pkt->print(), blk->print());
1059  PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1060  PacketList writebacks;
1061  writebacks.push_back(wb_pkt);
1062 
1063  if (is_timing) {
1064  // anything that is merely forwarded pays for the forward
1065  // latency and the delay provided by the crossbar
1066  Tick forward_time = clockEdge(forwardLatency) +
1067  pkt->headerDelay;
1068  doWritebacks(writebacks, forward_time);
1069  } else {
1070  doWritebacksAtomic(writebacks);
1071  }
1072  pkt->setSatisfied();
1073  }
1074  } else if (!blk_valid) {
1075  DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1076  pkt->print());
1077  if (is_deferred) {
1078  // we no longer have the block, and will not respond, but a
1079  // packet was allocated in MSHR::handleSnoop and we have
1080  // to delete it
1081  assert(pkt->needsResponse());
1082 
1083  // we have passed the block to a cache upstream, that
1084  // cache should be responding
1085  assert(pkt->cacheResponding());
1086 
1087  delete pkt;
1088  }
1089  return snoop_delay;
1090  } else {
1091  DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1092  pkt->print(), blk->print());
1093 
1094  // We may end up modifying both the block state and the packet (if
1095  // we respond in atomic mode), so just figure out what to do now
1096  // and then do it later. We respond to all snoops that need
1097  // responses provided we have the block in dirty state. The
1098  // invalidation itself is taken care of below. We don't respond to
1099  // cache maintenance operations as this is done by the destination
1100  // xbar.
1101  respond = blk->isDirty() && pkt->needsResponse();
1102 
1103  chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1104  "a dirty block in a read-only cache %s\n", name());
1105  }
1106 
1107  // Invalidate any prefetch's from below that would strip write permissions
1108  // MemCmd::HardPFReq is only observed by upstream caches. After missing
1109  // above and in it's own cache, a new MemCmd::ReadReq is created that
1110  // downstream caches observe.
1111  if (pkt->mustCheckAbove()) {
1112  DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1113  "from lower cache\n", pkt->getAddr(), pkt->print());
1114  pkt->setBlockCached();
1115  return snoop_delay;
1116  }
1117 
1118  if (pkt->isRead() && !invalidate) {
1119  // reading without requiring the line in a writable state
1120  assert(!needs_writable);
1121  pkt->setHasSharers();
1122 
1123  // if the requesting packet is uncacheable, retain the line in
1124  // the current state, otherwhise unset the writable flag,
1125  // which means we go from Modified to Owned (and will respond
1126  // below), remain in Owned (and will respond below), from
1127  // Exclusive to Shared, or remain in Shared
1128  if (!pkt->req->isUncacheable())
1129  blk->status &= ~BlkWritable;
1130  DPRINTF(Cache, "new state is %s\n", blk->print());
1131  }
1132 
1133  if (respond) {
1134  // prevent anyone else from responding, cache as well as
1135  // memory, and also prevent any memory from even seeing the
1136  // request
1137  pkt->setCacheResponding();
1138  if (!pkt->isClean() && blk->isWritable()) {
1139  // inform the cache hierarchy that this cache had the line
1140  // in the Modified state so that we avoid unnecessary
1141  // invalidations (see Packet::setResponderHadWritable)
1142  pkt->setResponderHadWritable();
1143 
1144  // in the case of an uncacheable request there is no point
1145  // in setting the responderHadWritable flag, but since the
1146  // recipient does not care there is no harm in doing so
1147  } else {
1148  // if the packet has needsWritable set we invalidate our
1149  // copy below and all other copies will be invalidates
1150  // through express snoops, and if needsWritable is not set
1151  // we already called setHasSharers above
1152  }
1153 
1154  // if we are returning a writable and dirty (Modified) line,
1155  // we should be invalidating the line
1156  panic_if(!invalidate && !pkt->hasSharers(),
1157  "%s is passing a Modified line through %s, "
1158  "but keeping the block", name(), pkt->print());
1159 
1160  if (is_timing) {
1161  doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1162  } else {
1163  pkt->makeAtomicResponse();
1164  // packets such as upgrades do not actually have any data
1165  // payload
1166  if (pkt->hasData())
1167  pkt->setDataFromBlock(blk->data, blkSize);
1168  }
1169 
1170  // When a block is compressed, it must first be decompressed before
1171  // being read, and this increases the snoop delay.
1172  if (compressor && pkt->isRead()) {
1173  snoop_delay += compressor->getDecompressionLatency(blk);
1174  }
1175  }
1176 
1177  if (!respond && is_deferred) {
1178  assert(pkt->needsResponse());
1179  delete pkt;
1180  }
1181 
1182  // Do this last in case it deallocates block data or something
1183  // like that
1184  if (blk_valid && invalidate) {
1185  invalidateBlock(blk);
1186  DPRINTF(Cache, "new state is %s\n", blk->print());
1187  }
1188 
1189  return snoop_delay;
1190 }
1191 
1192 
1193 void
1195 {
1196  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1197 
1198  // no need to snoop requests that are not in range
1199  if (!inRange(pkt->getAddr())) {
1200  return;
1201  }
1202 
1203  bool is_secure = pkt->isSecure();
1204  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1205 
1206  Addr blk_addr = pkt->getBlockAddr(blkSize);
1207  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1208 
1209  // Update the latency cost of the snoop so that the crossbar can
1210  // account for it. Do not overwrite what other neighbouring caches
1211  // have already done, rather take the maximum. The update is
1212  // tentative, for cases where we return before an upward snoop
1213  // happens below.
1214  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1216 
1217  // Inform request(Prefetch, CleanEvict or Writeback) from below of
1218  // MSHR hit, set setBlockCached.
1219  if (mshr && pkt->mustCheckAbove()) {
1220  DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1221  "mshr hit\n", pkt->print());
1222  pkt->setBlockCached();
1223  return;
1224  }
1225 
1226  // Let the MSHR itself track the snoop and decide whether we want
1227  // to go ahead and do the regular cache snoop
1228  if (mshr && mshr->handleSnoop(pkt, order++)) {
1229  DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1230  "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1231  mshr->print());
1232 
1233  if (mshr->getNumTargets() > numTarget)
1234  warn("allocating bonus target for snoop"); //handle later
1235  return;
1236  }
1237 
1238  //We also need to check the writeback buffers and handle those
1239  WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1240  if (wb_entry) {
1241  DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1242  pkt->getAddr(), is_secure ? "s" : "ns");
1243  // Expect to see only Writebacks and/or CleanEvicts here, both of
1244  // which should not be generated for uncacheable data.
1245  assert(!wb_entry->isUncacheable());
1246  // There should only be a single request responsible for generating
1247  // Writebacks/CleanEvicts.
1248  assert(wb_entry->getNumTargets() == 1);
1249  PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1250  assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1251 
1252  if (pkt->isEviction()) {
1253  // if the block is found in the write queue, set the BLOCK_CACHED
1254  // flag for Writeback/CleanEvict snoop. On return the snoop will
1255  // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1256  // any CleanEvicts from travelling down the memory hierarchy.
1257  pkt->setBlockCached();
1258  DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1259  "hit\n", __func__, pkt->print());
1260  return;
1261  }
1262 
1263  // conceptually writebacks are no different to other blocks in
1264  // this cache, so the behaviour is modelled after handleSnoop,
1265  // the difference being that instead of querying the block
1266  // state to determine if it is dirty and writable, we use the
1267  // command and fields of the writeback packet
1268  bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1269  pkt->needsResponse();
1270  bool have_writable = !wb_pkt->hasSharers();
1271  bool invalidate = pkt->isInvalidate();
1272 
1273  if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1274  assert(!pkt->needsWritable());
1275  pkt->setHasSharers();
1276  wb_pkt->setHasSharers();
1277  }
1278 
1279  if (respond) {
1280  pkt->setCacheResponding();
1281 
1282  if (have_writable) {
1283  pkt->setResponderHadWritable();
1284  }
1285 
1286  doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1287  false, false);
1288  }
1289 
1290  if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1291  // Invalidation trumps our writeback... discard here
1292  // Note: markInService will remove entry from writeback buffer.
1293  markInService(wb_entry);
1294  delete wb_pkt;
1295  }
1296  }
1297 
1298  // If this was a shared writeback, there may still be
1299  // other shared copies above that require invalidation.
1300  // We could be more selective and return here if the
1301  // request is non-exclusive or if the writeback is
1302  // exclusive.
1303  uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1304 
1305  // Override what we did when we first saw the snoop, as we now
1306  // also have the cost of the upwards snoops to account for
1307  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1309 }
1310 
1311 Tick
1313 {
1314  // no need to snoop requests that are not in range.
1315  if (!inRange(pkt->getAddr())) {
1316  return 0;
1317  }
1318 
1319  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1320  uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1321  return snoop_delay + lookupLatency * clockPeriod();
1322 }
1323 
1324 bool
1325 Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1326 {
1327  if (!forwardSnoops)
1328  return false;
1329  // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1330  // Writeback snoops into upper level caches to check for copies of the
1331  // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1332  // packet, the cache can inform the crossbar below of presence or absence
1333  // of the block.
1334  if (is_timing) {
1335  Packet snoop_pkt(pkt, true, false);
1336  snoop_pkt.setExpressSnoop();
1337  // Assert that packet is either Writeback or CleanEvict and not a
1338  // prefetch request because prefetch requests need an MSHR and may
1339  // generate a snoop response.
1340  assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1341  snoop_pkt.senderState = nullptr;
1342  cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1343  // Writeback/CleanEvict snoops do not generate a snoop response.
1344  assert(!(snoop_pkt.cacheResponding()));
1345  return snoop_pkt.isBlockCached();
1346  } else {
1348  return pkt->isBlockCached();
1349  }
1350 }
1351 
1352 bool
1354 {
1355  assert(mshr);
1356 
1357  // use request from 1st target
1358  PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1359 
1360  if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1361  DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1362 
1363  // we should never have hardware prefetches to allocated
1364  // blocks
1365  assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1366 
1367  // We need to check the caches above us to verify that
1368  // they don't have a copy of this block in the dirty state
1369  // at the moment. Without this check we could get a stale
1370  // copy from memory that might get used in place of the
1371  // dirty one.
1372  Packet snoop_pkt(tgt_pkt, true, false);
1373  snoop_pkt.setExpressSnoop();
1374  // We are sending this packet upwards, but if it hits we will
1375  // get a snoop response that we end up treating just like a
1376  // normal response, hence it needs the MSHR as its sender
1377  // state
1378  snoop_pkt.senderState = mshr;
1379  cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1380 
1381  // Check to see if the prefetch was squashed by an upper cache (to
1382  // prevent us from grabbing the line) or if a Check to see if a
1383  // writeback arrived between the time the prefetch was placed in
1384  // the MSHRs and when it was selected to be sent or if the
1385  // prefetch was squashed by an upper cache.
1386 
1387  // It is important to check cacheResponding before
1388  // prefetchSquashed. If another cache has committed to
1389  // responding, it will be sending a dirty response which will
1390  // arrive at the MSHR allocated for this request. Checking the
1391  // prefetchSquash first may result in the MSHR being
1392  // prematurely deallocated.
1393  if (snoop_pkt.cacheResponding()) {
1394  auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1395  assert(r.second);
1396 
1397  // if we are getting a snoop response with no sharers it
1398  // will be allocated as Modified
1399  bool pending_modified_resp = !snoop_pkt.hasSharers();
1400  markInService(mshr, pending_modified_resp);
1401 
1402  DPRINTF(Cache, "Upward snoop of prefetch for addr"
1403  " %#x (%s) hit\n",
1404  tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1405  return false;
1406  }
1407 
1408  if (snoop_pkt.isBlockCached()) {
1409  DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1410  "Deallocating mshr target %#x.\n",
1411  mshr->blkAddr);
1412 
1413  // Deallocate the mshr target
1414  if (mshrQueue.forceDeallocateTarget(mshr)) {
1415  // Clear block if this deallocation resulted freed an
1416  // mshr when all had previously been utilized
1418  }
1419 
1420  // given that no response is expected, delete Request and Packet
1421  delete tgt_pkt;
1422 
1423  return false;
1424  }
1425  }
1426 
1427  return BaseCache::sendMSHRQueuePacket(mshr);
1428 }
1429 
1430 Cache*
1431 CacheParams::create()
1432 {
1433  assert(tags);
1434  assert(replacement_policy);
1435 
1436  return new Cache(this);
1437 }
Cache::outstandingSnoop
std::unordered_set< RequestPtr > outstandingSnoop
Store the outstanding requests that we are expecting snoop responses from so we can determine which s...
Definition: cache.hh:76
MSHR::Target::FromPrefetcher
@ FromPrefetcher
Definition: mshr.hh:130
Packet::isError
bool isError() const
Definition: packet.hh:583
CacheBlk::isWritable
bool isWritable() const
Checks the write permissions of this block.
Definition: cache_blk.hh:181
MSHR::Target::FromCPU
@ FromCPU
Definition: mshr.hh:128
Packet::makeAtomicResponse
void makeAtomicResponse()
Definition: packet.hh:1016
BaseCache::clearBlocked
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:1179
BaseCache::writeBuffer
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:317
BaseCache::maintainClusivity
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition: base.cc:1294
Packet::isResponse
bool isResponse() const
Definition: packet.hh:560
MemCmd::WritebackClean
@ WritebackClean
Definition: packet.hh:89
warn
#define warn(...)
Definition: logging.hh:239
MemCmd::isSWPrefetch
bool isSWPrefetch() const
Definition: packet.hh:224
BaseCache::markInService
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:371
Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1022
Cache::doWritebacksAtomic
void doWritebacksAtomic(PacketList &writebacks) override
Send writebacks down the memory hierarchy in atomic mode.
Definition: cache.cc:227
Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:619
QueueEntry::blkAddr
Addr blkAddr
Block aligned address.
Definition: queue_entry.hh:111
MSHR::print
void print(std::ostream &os, int verbosity=0, const std::string &prefix="") const override
Prints the contents of this MSHR for debugging.
Definition: mshr.cc:701
Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:646
Packet::satisfied
bool satisfied() const
Definition: packet.hh:715
Cache::evictBlock
M5_NODISCARD PacketPtr evictBlock(CacheBlk *blk) override
Evict a cache block.
Definition: cache.cc:897
X86ISA::CacheParams
@ CacheParams
Definition: cpuid.cc:40
QueueEntry::Target
A queue entry is holding packets that will be serviced as soon as resources are available.
Definition: queue_entry.hh:83
Packet::copyResponderFlags
void copyResponderFlags(const PacketPtr pkt)
Copy the reponse flags from an input packet to this packet.
Definition: packet.cc:322
BaseCache::handleFill
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition: base.cc:1306
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:754
MemCmd::HardPFReq
@ HardPFReq
Definition: packet.hh:94
Cache::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt) override
Snoops bus transactions to maintain coherence.
Definition: cache.cc:1194
Packet::setCacheResponding
void setCacheResponding()
Snoop flags.
Definition: packet.hh:613
BaseCache::writebackBlk
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition: base.cc:1486
Packet::responderHadWritable
bool responderHadWritable() const
Definition: packet.hh:679
Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:412
cache.hh
BaseCache::system
System * system
System we are currently operating in.
Definition: base.hh:921
mshr.hh
MSHR::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: mshr.hh:422
X86ISA::pf
Bitfield< 2 > pf
Definition: misc.hh:550
Packet::setResponderHadWritable
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition: packet.hh:673
Packet::hasRespData
bool hasRespData() const
Definition: packet.hh:577
BaseCache::blkSize
const unsigned blkSize
Block size of this cache.
Definition: base.hh:839
MSHRQueue::forceDeallocateTarget
bool forceDeallocateTarget(MSHR *mshr)
Deallocate top target, possibly freeing the MSHR.
Definition: mshr_queue.cc:117
Packet::setHasSharers
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition: packet.hh:645
MemCmd::UpgradeReq
@ UpgradeReq
Definition: packet.hh:98
Packet::setSatisfied
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition: packet.hh:709
Packet::isRead
bool isRead() const
Definition: packet.hh:556
Packet::fromCache
bool fromCache() const
Definition: packet.hh:574
MemCmd::CleanEvict
@ CleanEvict
Definition: packet.hh:91
BaseTags::print
std::string print()
Print all tags used.
Definition: base.cc:187
BaseCache::CacheCmdStats::missLatency
Stats::Vector missLatency
Total number of cycles per thread/command spent waiting for a miss.
Definition: base.hh:947
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:63
BaseCache::memSidePort
MemSidePort memSidePort
Definition: base.hh:309
ResponsePort::sendAtomicSnoop
Tick sendAtomicSnoop(PacketPtr pkt)
Send an atomic snoop request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:323
Cache::recvAtomic
Tick recvAtomic(PacketPtr pkt) override
Performs the access specified by the request.
Definition: cache.cc:650
Packet::needsWritable
bool needsWritable() const
Definition: packet.hh:561
Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:571
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:82
BlkDirty
@ BlkDirty
dirty (modified)
Definition: cache_blk.hh:71
Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:340
Packet::isEviction
bool isEviction() const
Definition: packet.hh:572
Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:582
Cache::handleTimingReqHit
void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) override
Definition: cache.cc:308
BaseCache::handleTimingReqHit
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition: base.cc:211
Packet::setBlockCached
void setBlockCached()
Definition: packet.hh:719
CacheBlk::State
unsigned State
block state: OR of CacheBlkStatusBit
Definition: cache_blk.hh:102
Packet::getSize
unsigned getSize() const
Definition: packet.hh:764
CacheBlk::task_id
uint32_t task_id
Task Id associated with this block.
Definition: cache_blk.hh:88
MemCmd::HardPFResp
@ HardPFResp
Definition: packet.hh:96
Packet::isRequest
bool isRequest() const
Definition: packet.hh:559
QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:90
BaseCache::clusivity
const Enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: base.hh:886
MemCmd::ReadRespWithInvalidate
@ ReadRespWithInvalidate
Definition: packet.hh:84
BaseCache::numTarget
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:876
request.hh
BaseCache::forwardLatency
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:858
Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:394
MSHR::extractServiceableTargets
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition: mshr.cc:536
Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:389
Packet::isSecure
bool isSecure() const
Definition: packet.hh:783
Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1225
QueueEntry::Target::pkt
const PacketPtr pkt
Pending request packet.
Definition: queue_entry.hh:88
Packet::copyError
void copyError(Packet *pkt)
Definition: packet.hh:752
Cache::isCachedAbove
bool isCachedAbove(PacketPtr pkt, bool is_timing=true)
Send up a snoop request and find cached copies.
Definition: cache.cc:1325
MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:85
ResponsePort::sendTimingSnoopReq
void sendTimingSnoopReq(PacketPtr pkt)
Attempt to send a timing snoop request packet to the request port by calling its corresponding receiv...
Definition: port.hh:384
BaseCache::Blocked_NoMSHRs
@ Blocked_NoMSHRs
Definition: base.hh:105
X86ISA::system
Bitfield< 15 > system
Definition: misc.hh:997
QueueEntry::isSecure
bool isSecure
True if the entry targets the secure memory space.
Definition: queue_entry.hh:117
MemCmd::WriteLineReq
@ WriteLineReq
Definition: packet.hh:97
BaseCache::allocateWriteBuffer
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:1115
Packet::isWholeLineWrite
bool isWholeLineWrite(unsigned blk_size)
Definition: packet.hh:587
Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:173
BaseCache::mshrQueue
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:314
MemCmd::WritebackDirty
@ WritebackDirty
Definition: packet.hh:88
BaseCache::regenerateBlkAddr
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition: base.cc:169
Packet::matchAddr
bool matchAddr(const Addr addr, const bool is_secure) const
Check if packet corresponds to a given address and address space.
Definition: packet.cc:424
RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:492
CacheBlk::isDirty
bool isDirty() const
Check to see if a block has been written.
Definition: cache_blk.hh:226
BaseCache::cpuSidePort
CpuSidePort cpuSidePort
Definition: base.hh:308
MemCmd::SCUpgradeFailReq
@ SCUpgradeFailReq
Definition: packet.hh:101
Cache::recvAtomicSnoop
Tick recvAtomicSnoop(PacketPtr pkt) override
Snoop for the provided request in the cache and return the estimated time taken.
Definition: cache.cc:1312
Cache::recvTimingSnoopResp
void recvTimingSnoopResp(PacketPtr pkt) override
Handle a snoop response.
Definition: cache.cc:262
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
BaseCache::allocOnFill
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition: base.hh:404
MemCmd::ReadCleanReq
@ ReadCleanReq
Definition: packet.hh:105
BaseCache::stats
BaseCache::CacheStats stats
MSHR::handleSnoop
bool handleSnoop(PacketPtr target, Counter order)
Definition: mshr.cc:412
MemCmd
Definition: packet.hh:71
BaseCache::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: base.hh:365
Packet::getBlockAddr
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:778
BaseCache::invalidateBlock
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition: base.cc:1460
Request::wbRequestorId
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition: request.hh:243
Cache::recvTimingReq
void recvTimingReq(PacketPtr pkt) override
Performs the access specified by the request.
Definition: cache.cc:398
Cache::handleTimingReqMiss
void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time) override
Definition: cache.cc:319
Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:570
MipsISA::r
r
Definition: pra_constants.hh:95
BaseCache::recvTimingReq
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:334
Compressor::Base::getDecompressionLatency
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition: base.cc:172
Cache::cleanEvictBlk
PacketPtr cleanEvictBlk(CacheBlk *blk)
Create a CleanEvict request for the given block.
Definition: cache.cc:908
QueuedRequestPort::schedTimingSnoopResp
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing snoop response.
Definition: qport.hh:155
Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:174
Cache::access
bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks) override
Does all the processing necessary to perform the provided request.
Definition: cache.cc:156
CacheBlk::status
State status
The current status of this block.
Definition: cache_blk.hh:105
Packet::setDataFromBlock
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition: packet.hh:1244
BaseCache::forwardSnoops
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:879
chatty_assert
#define chatty_assert(cond,...)
The chatty assert macro will function like a normal assert, but will allow the specification of addit...
Definition: logging.hh:292
Packet::mustCheckAbove
bool mustCheckAbove() const
Does the request need to check for cached copies of the same block in the memory hierarchy above.
Definition: packet.hh:1357
base.hh
BaseCache::writecleanBlk
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition: base.cc:1534
BaseCache::access
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition: base.cc:1030
BaseCache::isReadOnly
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:894
Packet::getOffset
Addr getOffset(unsigned int blk_size) const
Definition: packet.hh:773
BaseCache::sendMSHRQueuePacket
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: base.cc:1657
Packet::snoopDelay
uint32_t snoopDelay
Keep track of the extra delay incurred by snooping upwards before sending a request down the memory s...
Definition: packet.hh:402
Packet::isUpgrade
bool isUpgrade() const
Definition: packet.hh:558
CacheBlk::print
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition: cache_blk.hh:346
compiler.hh
Queue::findMatch
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition: queue.hh:162
WriteQueueEntry::getNumTargets
int getNumTargets() const
Returns the current number of allocated targets.
Definition: write_queue_entry.hh:137
Cache::satisfyRequest
void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false) override
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: cache.cc:73
MSHR::TargetList
Definition: mshr.hh:162
BaseCache::lookupLatency
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:845
BaseCache
A basic cache interface.
Definition: base.hh:89
BaseCache::allocateMissBuffer
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:1097
BlkHWPrefetched
@ BlkHWPrefetched
block was a hardware prefetch yet unaccessed
Definition: cache_blk.hh:73
Packet::id
const PacketId id
Definition: packet.hh:337
WriteQueueEntry::getTarget
Target * getTarget() override
Returns a reference to the first target.
Definition: write_queue_entry.hh:150
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Cache::sendMSHRQueuePacket
bool sendMSHRQueuePacket(MSHR *mshr) override
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: cache.cc:1353
BaseCache::satisfyRequest
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: base.cc:904
Cache::promoteWholeLineWrites
void promoteWholeLineWrites(PacketPtr pkt)
Turn line-sized writes into WriteInvalidate transactions.
Definition: cache.cc:296
Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:214
Cache::handleAtomicReqMiss
Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks) override
Handle a request in atomic mode that missed in this cache.
Definition: cache.cc:558
MemCmd::SCUpgradeReq
@ SCUpgradeReq
Definition: packet.hh:99
Packet::hasData
bool hasData() const
Definition: packet.hh:576
MSHR::getTarget
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition: mshr.hh:449
BaseCache::compressor
Compressor::Base * compressor
Compression method being used.
Definition: base.hh:323
BaseCache::writebackClean
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: base.hh:626
SimObject::name
virtual const std::string name() const
Definition: sim_object.hh:133
Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:335
BaseCache::tags
BaseTags * tags
Tag and data Storage.
Definition: base.hh:320
MSHR::wasWholeLineWrite
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition: mshr.hh:119
CacheBlk::isValid
bool isValid() const
Checks that a block is valid.
Definition: cache_blk.hh:203
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
BaseCache::evictBlock
virtual M5_NODISCARD PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
MSHR::Target::FromSnoop
@ FromSnoop
Definition: mshr.hh:129
BlkWritable
@ BlkWritable
write permission
Definition: cache_blk.hh:67
MSHR::hasPostDowngrade
bool hasPostDowngrade() const
Definition: mshr.hh:326
CacheBlk::data
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition: cache_blk.hh:99
cache_blk.hh
Cache::handleSnoop
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool is_deferred, bool pending_inval)
Perform an upward snoop if needed, and update the block state (possibly invalidating the block).
Definition: cache.cc:981
CacheBlk
A Basic Cache block.
Definition: cache_blk.hh:84
System::maxRequestors
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition: system.hh:503
WriteQueueEntry
Write queue entry.
Definition: write_queue_entry.hh:65
types.hh
Packet::isClean
bool isClean() const
Definition: packet.hh:573
Clocked::ticksToCycles
Cycles ticksToCycles(Tick t) const
Definition: clocked_object.hh:219
Packet::setExpressSnoop
void setExpressSnoop()
The express snoop flag is used for two purposes.
Definition: packet.hh:661
BaseCache::handleTimingReqMiss
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
BaseCache::writeAllocator
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition: base.hh:351
Packet::isMaskedWrite
bool isMaskedWrite() const
Definition: packet.hh:1373
WriteAllocator::allocate
bool allocate() const
Should writes allocate?
Definition: base.hh:1328
MemCmd::InvalidateReq
@ InvalidateReq
Definition: packet.hh:133
BaseTags::findBlock
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition: base.cc:77
QueueEntry::isUncacheable
bool isUncacheable() const
Definition: queue_entry.hh:124
logging.hh
Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
Packet::isWrite
bool isWrite() const
Definition: packet.hh:557
MemCmd::ReadSharedReq
@ ReadSharedReq
Definition: packet.hh:106
MemCmd::ReadResp
@ ReadResp
Definition: packet.hh:83
MSHR::hasPostInvalidate
bool hasPostInvalidate() const
Definition: mshr.hh:322
BaseCache::CacheCmdStats::mshr_uncacheable
Stats::Vector mshr_uncacheable
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:959
MemCmd::InvalidateResp
@ InvalidateResp
Definition: packet.hh:134
Cache::doFastWrites
const bool doFastWrites
This cache should allocate a block on a line-sized write miss.
Definition: cache.hh:69
Cache::createMissPacket
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const override
Create an appropriate downstream bus request packet.
Definition: cache.cc:472
trace.hh
BaseCache::responseLatency
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:868
MemCmd::WriteClean
@ WriteClean
Definition: packet.hh:90
Cache::doWritebacks
void doWritebacks(PacketList &writebacks, Tick forward_time) override
Insert writebacks into the write buffer.
Definition: cache.cc:185
Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:508
BaseCache::recvTimingResp
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition: base.cc:402
RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:461
MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:323
std::list
STL list class.
Definition: stl.hh:51
Packet::isBlockCached
bool isBlockCached() const
Definition: packet.hh:720
MSHR::isForward
bool isForward
True if the entry is just a simple forward from an upper level.
Definition: mshr.hh:122
Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1299
MemCmd::ReadExReq
@ ReadExReq
Definition: packet.hh:103
MemCmd::UpgradeFailResp
@ UpgradeFailResp
Definition: packet.hh:102
Cache
A coherent cache that can be arranged in flexible topologies.
Definition: cache.hh:63
Cache::Cache
Cache(const CacheParams *p)
Instantiates a basic cache object.
Definition: cache.cc:66
BaseCache::recvAtomic
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition: base.cc:540
Cache::doTimingSupplyResponse
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
Definition: cache.cc:936
Cache::serviceMSHRTargets
void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) override
Service non-deferred MSHR targets using the received response.
Definition: cache.cc:681
MemCmd::StoreCondReq
@ StoreCondReq
Definition: packet.hh:108
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1166
BaseCache::CacheStats::cmdStats
CacheCmdStats & cmdStats(const PacketPtr p)
Definition: base.hh:978
BaseCache::inRange
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition: base.cc:200
CacheBlk::isSecure
bool isSecure() const
Check if this block holds data from the secure memory space.
Definition: cache_blk.hh:245
MemCmd::UpgradeResp
@ UpgradeResp
Definition: packet.hh:100
write_queue_entry.hh
MemCmd::StoreCondFailReq
@ StoreCondFailReq
Definition: packet.hh:109
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
MSHR
Miss Status and handling Register.
Definition: mshr.hh:69
BaseCache::order
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:903

Generated on Wed Sep 30 2020 14:02:12 for gem5 by doxygen 1.8.17