gem5 v24.0.0.0
Loading...
Searching...
No Matches
cache.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
47#include "mem/cache/cache.hh"
48
49#include <cassert>
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "base/trace.hh"
54#include "base/types.hh"
55#include "debug/Cache.hh"
56#include "debug/CacheTags.hh"
57#include "debug/CacheVerbose.hh"
58#include "enums/Clusivity.hh"
60#include "mem/cache/mshr.hh"
63#include "mem/request.hh"
64#include "params/Cache.hh"
65
66namespace gem5
67{
68
70 : BaseCache(p, p.system->cacheLineSize()),
71 doFastWrites(true)
72{
73 assert(p.tags);
74 assert(p.replacement_policy);
75}
76
77void
79 bool deferred_response, bool pending_downgrade)
80{
82
83 if (pkt->isRead()) {
84 // determine if this read is from a (coherent) cache or not
85 if (pkt->fromCache()) {
86 assert(pkt->getSize() == blkSize);
87 // special handling for coherent block requests from
88 // upper-level caches
89 if (pkt->needsWritable()) {
90 // sanity check
91 assert(pkt->cmd == MemCmd::ReadExReq ||
93 assert(!pkt->hasSharers());
94
95 // if we have a dirty copy, make sure the recipient
96 // keeps it marked dirty (in the modified state)
97 if (blk->isSet(CacheBlk::DirtyBit)) {
98 pkt->setCacheResponding();
100 }
101 } else if (blk->isSet(CacheBlk::WritableBit) &&
102 !pending_downgrade && !pkt->hasSharers() &&
103 pkt->cmd != MemCmd::ReadCleanReq) {
104 // we can give the requestor a writable copy on a read
105 // request if:
106 // - we have a writable copy at this level (& below)
107 // - we don't have a pending snoop from below
108 // signaling another read request
109 // - no other cache above has a copy (otherwise it
110 // would have set hasSharers flag when
111 // snooping the packet)
112 // - the read has explicitly asked for a clean
113 // copy of the line
114 if (blk->isSet(CacheBlk::DirtyBit)) {
115 // special considerations if we're owner:
116 if (!deferred_response) {
117 // respond with the line in Modified state
118 // (cacheResponding set, hasSharers not set)
119 pkt->setCacheResponding();
120
121 // if this cache is mostly inclusive, we
122 // keep the block in the Exclusive state,
123 // and pass it upwards as Modified
124 // (writable and dirty), hence we have
125 // multiple caches, all on the same path
126 // towards memory, all considering the
127 // same block writable, but only one
128 // considering it Modified
129
130 // we get away with multiple caches (on
131 // the same path to memory) considering
132 // the block writeable as we always enter
133 // the cache hierarchy through a cache,
134 // and first snoop upwards in all other
135 // branches
137 } else {
138 // if we're responding after our own miss,
139 // there's a window where the recipient didn't
140 // know it was getting ownership and may not
141 // have responded to snoops correctly, so we
142 // have to respond with a shared line
143 pkt->setHasSharers();
144 }
145 }
146 } else {
147 // otherwise only respond with a shared copy
148 pkt->setHasSharers();
149 }
150 }
151 }
152}
153
155//
156// Access path: requests coming in from the CPU side
157//
159
160bool
162 PacketList &writebacks)
163{
164
165 if (pkt->req->isUncacheable()) {
166 assert(pkt->isRequest());
167
168 gem5_assert(!(isReadOnly && pkt->isWrite()),
169 "Should never see a write in a read-only cache %s\n",
170 name());
171
172 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
173
174 // flush and invalidate any existing block
175 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
176 if (old_blk && old_blk->isValid()) {
177 BaseCache::evictBlock(old_blk, writebacks);
178 }
179
180 blk = nullptr;
181 // lookupLatency is the latency in case the request is uncacheable.
182 lat = lookupLatency;
183 return false;
184 }
185
186 return BaseCache::access(pkt, blk, lat, writebacks);
187}
188
189void
190Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
191{
192 while (!writebacks.empty()) {
193 PacketPtr wbPkt = writebacks.front();
194 // We use forwardLatency here because we are copying writebacks to
195 // write buffer.
196
197 // Call isCachedAbove for Writebacks, CleanEvicts and
198 // WriteCleans to discover if the block is cached above.
199 if (isCachedAbove(wbPkt)) {
200 if (wbPkt->cmd == MemCmd::CleanEvict) {
201 // Delete CleanEvict because cached copies exist above. The
202 // packet destructor will delete the request object because
203 // this is a non-snoop request packet which does not require a
204 // response.
205 delete wbPkt;
206 } else if (wbPkt->cmd == MemCmd::WritebackClean) {
207 // clean writeback, do not send since the block is
208 // still cached above
209 assert(writebackClean);
210 delete wbPkt;
211 } else {
212 assert(wbPkt->cmd == MemCmd::WritebackDirty ||
213 wbPkt->cmd == MemCmd::WriteClean);
214 // Set BLOCK_CACHED flag in Writeback and send below, so that
215 // the Writeback does not reset the bit corresponding to this
216 // address in the snoop filter below.
217 wbPkt->setBlockCached();
218 allocateWriteBuffer(wbPkt, forward_time);
219 }
220 } else {
221 // If the block is not cached above, send packet below. Both
222 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
223 // reset the bit corresponding to this address in the snoop filter
224 // below.
225 allocateWriteBuffer(wbPkt, forward_time);
226 }
227 writebacks.pop_front();
228 }
229}
230
231void
233{
234 while (!writebacks.empty()) {
235 PacketPtr wbPkt = writebacks.front();
236 // Call isCachedAbove for both Writebacks and CleanEvicts. If
237 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
238 // and discard CleanEvicts.
239 if (isCachedAbove(wbPkt, false)) {
240 if (wbPkt->cmd == MemCmd::WritebackDirty ||
241 wbPkt->cmd == MemCmd::WriteClean) {
242 // Set BLOCK_CACHED flag in Writeback and send below,
243 // so that the Writeback does not reset the bit
244 // corresponding to this address in the snoop filter
245 // below. We can discard CleanEvicts because cached
246 // copies exist above. Atomic mode isCachedAbove
247 // modifies packet to set BLOCK_CACHED flag
248 memSidePort.sendAtomic(wbPkt);
249 }
250 } else {
251 // If the block is not cached above, send packet below. Both
252 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
253 // reset the bit corresponding to this address in the snoop filter
254 // below.
255 memSidePort.sendAtomic(wbPkt);
256 }
257 writebacks.pop_front();
258 // In case of CleanEvicts, the packet destructor will delete the
259 // request object because this is a non-snoop request packet which
260 // does not require a response.
261 delete wbPkt;
262 }
263}
264
265
266void
268{
269 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
270
271 // determine if the response is from a snoop request we created
272 // (in which case it should be in the outstandingSnoop), or if we
273 // merely forwarded someone else's snoop request
274 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
275 outstandingSnoop.end();
276
277 if (!forwardAsSnoop) {
278 // the packet came from this cache, so sink it here and do not
279 // forward it
280 assert(pkt->cmd == MemCmd::HardPFResp);
281
282 outstandingSnoop.erase(pkt->req);
283
284 DPRINTF(Cache, "Got prefetch response from above for addr "
285 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
286 recvTimingResp(pkt);
287 return;
288 }
289
290 // forwardLatency is set here because there is a response from an
291 // upper level cache.
292 // To pay the delay that occurs if the packet comes from the bus,
293 // we charge also headerDelay.
294 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
295 // Reset the timing of the packet.
296 pkt->headerDelay = pkt->payloadDelay = 0;
297 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
298}
299
300void
302{
303 // Cache line clearing instructions
304 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
305 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
306 !pkt->isMaskedWrite()) {
308 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
309 }
310}
311
312void
314{
315 // should never be satisfying an uncacheable access as we
316 // flush and invalidate any existing block as part of the
317 // lookup
318 assert(!pkt->req->isUncacheable());
319
320 BaseCache::handleTimingReqHit(pkt, blk, request_time);
321}
322
323void
325 Tick request_time)
326{
327
328 // These should always hit due to the earlier Locked Read
329 assert(pkt->cmd != MemCmd::LockedRMWWriteReq);
330 if (pkt->req->isUncacheable()) {
331 // ignore any existing MSHR if we are dealing with an
332 // uncacheable request
333
334 // should have flushed and have no valid block
335 assert(!blk || !blk->isValid());
336
337 stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++;
338
339 if (pkt->isWrite()) {
340 allocateWriteBuffer(pkt, forward_time);
341 } else {
342 // uncacheable accesses always allocate a new MSHR
343
344 // Here we are using forward_time, modelling the latency of
345 // a miss (outbound) just as forwardLatency, neglecting the
346 // lookupLatency component.
347
348 // Here we allow allocating miss buffer for read requests
349 // and x86's clflush requests. A clflush request should be
350 // propagate through all levels of the cache system.
351
352 // Doing clflush in uncacheable regions might sound contradictory;
353 // however, it is entirely possible due to how the Linux kernel
354 // handle page property changes. When a linux kernel wants to
355 // change a page property, it flushes the related cache lines. The
356 // kernel might change the page property before flushing the cache
357 // lines. This results in the clflush might occur in an uncacheable
358 // region, where the kernel marks a region uncacheable before
359 // flushing. clflush results in a CleanInvalidReq.
360 assert(pkt->isRead() || pkt->isCleanInvalidateRequest());
361 allocateMissBuffer(pkt, forward_time);
362 }
363
364 return;
365 }
366
367 Addr blk_addr = pkt->getBlockAddr(blkSize);
368
369 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
370
371 // Software prefetch handling:
372 // To keep the core from waiting on data it won't look at
373 // anyway, send back a response with dummy data. Miss handling
374 // will continue asynchronously. Unfortunately, the core will
375 // insist upon freeing original Packet/Request, so we have to
376 // create a new pair with a different lifecycle. Note that this
377 // processing happens before any MSHR munging on the behalf of
378 // this request because this new Request will be the one stored
379 // into the MSHRs, not the original.
380 if (pkt->cmd.isSWPrefetch()) {
381 assert(pkt->needsResponse());
382 assert(pkt->req->hasPaddr());
383 assert(!pkt->req->isUncacheable());
384
385 // There's no reason to add a prefetch as an additional target
386 // to an existing MSHR. If an outstanding request is already
387 // in progress, there is nothing for the prefetch to do.
388 // If this is the case, we don't even create a request at all.
389 PacketPtr pf = nullptr;
390
391 if (!mshr) {
392 // copy the request and create a new SoftPFReq packet
393 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
394 pkt->req->getSize(),
395 pkt->req->getFlags(),
396 pkt->req->requestorId());
397 pf = new Packet(req, pkt->cmd);
398 pf->allocate();
399 assert(pf->matchAddr(pkt));
400 assert(pf->getSize() == pkt->getSize());
401 }
402
403 pkt->makeTimingResponse();
404
405 // request_time is used here, taking into account lat and the delay
406 // charged if the packet comes from the xbar.
407 cpuSidePort.schedTimingResp(pkt, request_time);
408
409 // If an outstanding request is in progress (we found an
410 // MSHR) this is set to null
411 pkt = pf;
412 }
413
414 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
415}
416
417void
419{
420 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
421
423
424 if (pkt->cacheResponding()) {
425 // a cache above us (but not where the packet came from) is
426 // responding to the request, in other words it has the line
427 // in Modified or Owned state
428 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
429 pkt->print());
430
431 // if the packet needs the block to be writable, and the cache
432 // that has promised to respond (setting the cache responding
433 // flag) is not providing writable (it is in Owned rather than
434 // the Modified state), we know that there may be other Shared
435 // copies in the system; go out and invalidate them all
436 assert(pkt->needsWritable() && !pkt->responderHadWritable());
437
438 // an upstream cache that had the line in Owned state
439 // (dirty, but not writable), is responding and thus
440 // transferring the dirty line from one branch of the
441 // cache hierarchy to another
442
443 // send out an express snoop and invalidate all other
444 // copies (snooping a packet that needs writable is the
445 // same as an invalidation), thus turning the Owned line
446 // into a Modified line, note that we don't invalidate the
447 // block in the current cache or any other cache on the
448 // path to memory
449
450 // create a downstream express snoop with cleared packet
451 // flags, there is no need to allocate any data as the
452 // packet is merely used to co-ordinate state transitions
453 Packet *snoop_pkt = new Packet(pkt, true, false);
454
455 // also reset the bus time that the original packet has
456 // not yet paid for
457 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
458
459 // make this an instantaneous express snoop, and let the
460 // other caches in the system know that the another cache
461 // is responding, because we have found the authorative
462 // copy (Modified or Owned) that will supply the right
463 // data
464 snoop_pkt->setExpressSnoop();
465 snoop_pkt->setCacheResponding();
466
467 // this express snoop travels towards the memory, and at
468 // every crossbar it is snooped upwards thus reaching
469 // every cache in the system
470 [[maybe_unused]] bool success = memSidePort.sendTimingReq(snoop_pkt);
471 // express snoops always succeed
472 assert(success);
473
474 // main memory will delete the snoop packet
475
476 // queue for deletion, as opposed to immediate deletion, as
477 // the sending cache is still relying on the packet
478 pendingDelete.reset(pkt);
479
480 // no need to take any further action in this particular cache
481 // as an upstram cache has already committed to responding,
482 // and we have already sent out any express snoops in the
483 // section above to ensure all other copies in the system are
484 // invalidated
485 return;
486 }
487
489}
490
493 bool needsWritable,
494 bool is_whole_line_write) const
495{
496 // should never see evictions here
497 assert(!cpu_pkt->isEviction());
498
499 bool blkValid = blk && blk->isValid();
500
501 if (cpu_pkt->req->isUncacheable() ||
502 (!blkValid && cpu_pkt->isUpgrade()) ||
503 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
504 // uncacheable requests and upgrades from upper-level caches
505 // that missed completely just go through as is
506 return nullptr;
507 }
508
509 assert(cpu_pkt->needsResponse());
510
511 MemCmd cmd;
512 // @TODO make useUpgrades a parameter.
513 // Note that ownership protocols require upgrade, otherwise a
514 // write miss on a shared owned block will generate a ReadExcl,
515 // which will clobber the owned copy.
516 const bool useUpgrades = true;
517 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
518 if (is_whole_line_write) {
519 assert(!blkValid || !blk->isSet(CacheBlk::WritableBit));
520 // forward as invalidate to all other caches, this gives us
521 // the line in Exclusive state, and invalidates all other
522 // copies
524 } else if (blkValid && useUpgrades) {
525 // only reason to be here is that blk is read only and we need
526 // it to be writable
527 assert(needsWritable);
528 assert(!blk->isSet(CacheBlk::WritableBit));
530 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
531 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
532 // Even though this SC will fail, we still need to send out the
533 // request and get the data to supply it to other snoopers in the case
534 // where the determination the StoreCond fails is delayed due to
535 // all caches not being on the same local bus.
537 } else {
538 // block is invalid
539
540 // If the request does not need a writable there are two cases
541 // where we need to ensure the response will not fetch the
542 // block in dirty state:
543 // * this cache is read only and it does not perform
544 // writebacks,
545 // * this cache is mostly exclusive and will not fill (since
546 // it does not fill it will have to writeback the dirty data
547 // immediately which generates uneccesary writebacks).
548 bool force_clean_rsp = isReadOnly || clusivity == enums::mostly_excl;
549 cmd = needsWritable ? MemCmd::ReadExReq :
550 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
551 }
552 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
553
554 // if there are upstream caches that have already marked the
555 // packet as having sharers (not passing writable), pass that info
556 // downstream
557 if (cpu_pkt->hasSharers() && !needsWritable) {
558 // note that cpu_pkt may have spent a considerable time in the
559 // MSHR queue and that the information could possibly be out
560 // of date, however, there is no harm in conservatively
561 // assuming the block has sharers
562 pkt->setHasSharers();
563 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
564 __func__, cpu_pkt->print(), pkt->print());
565 }
566
567 // the packet should be block aligned
568 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
569
570 pkt->allocate();
571 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
572 cpu_pkt->print());
573 return pkt;
574}
575
576
577Cycles
579 PacketList &writebacks)
580{
581 // deal with the packets that go through the write path of
582 // the cache, i.e. any evictions and writes
583 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
584 (pkt->req->isUncacheable() && pkt->isWrite())) {
586
587 // at this point, if the request was an uncacheable write
588 // request, it has been satisfied by a memory below and the
589 // packet carries the response back
590 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
591 pkt->isResponse());
592
593 return latency;
594 }
595
596 // only misses left
597
598 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
600
601 bool is_forward = (bus_pkt == nullptr);
602
603 if (is_forward) {
604 // just forwarding the same request to the next level
605 // no local cache operation involved
606 bus_pkt = pkt;
607 }
608
609 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
610 bus_pkt->print());
611
612 const std::string old_state = blk ? blk->print() : "";
613
614 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
615
616 bool is_invalidate = bus_pkt->isInvalidate();
617
618 // We are now dealing with the response handling
619 DPRINTF(Cache, "%s: Receive response: %s for %s\n", __func__,
620 bus_pkt->print(), old_state);
621
622 // If packet was a forward, the response (if any) is already
623 // in place in the bus_pkt == pkt structure, so we don't need
624 // to do anything. Otherwise, use the separate bus_pkt to
625 // generate response to pkt and then delete it.
626 if (!is_forward) {
627 if (pkt->needsResponse()) {
628 assert(bus_pkt->isResponse());
629 if (bus_pkt->isError()) {
630 pkt->makeAtomicResponse();
631 pkt->copyError(bus_pkt);
632 } else if (pkt->isWholeLineWrite(blkSize)) {
633 // note the use of pkt, not bus_pkt here.
634
635 // write-line request to the cache that promoted
636 // the write to a whole line
637 const bool allocate = allocOnFill(pkt->cmd) &&
639 blk = handleFill(bus_pkt, blk, writebacks, allocate);
640 assert(blk != NULL);
641 is_invalidate = false;
642 satisfyRequest(pkt, blk);
643 } else if (bus_pkt->isRead() ||
644 bus_pkt->cmd == MemCmd::UpgradeResp) {
645 // we're updating cache state to allow us to
646 // satisfy the upstream request from the cache
647 blk = handleFill(bus_pkt, blk, writebacks,
648 allocOnFill(pkt->cmd));
649 satisfyRequest(pkt, blk);
650 maintainClusivity(pkt->fromCache(), blk);
651 } else {
652 // we're satisfying the upstream request without
653 // modifying cache state, e.g., a write-through
654 pkt->makeAtomicResponse();
655 }
656 }
657 delete bus_pkt;
658 }
659
660 if (is_invalidate && blk && blk->isValid()) {
661 invalidateBlock(blk);
662 }
663
664 return latency;
665}
666
667Tick
669{
671
672 // follow the same flow as in recvTimingReq, and check if a cache
673 // above us is responding
674 if (pkt->cacheResponding()) {
675 assert(!pkt->req->isCacheInvalidate());
676 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
677 pkt->print());
678
679 // if a cache is responding, and it had the line in Owned
680 // rather than Modified state, we need to invalidate any
681 // copies that are not on the same path to memory
682 assert(pkt->needsWritable() && !pkt->responderHadWritable());
683
684 return memSidePort.sendAtomic(pkt);
685 }
686
687 return BaseCache::recvAtomic(pkt);
688}
689
690
692//
693// Response handling: responses from the memory side
694//
696
697
698void
700{
701 QueueEntry::Target *initial_tgt = mshr->getTarget();
702 // First offset for critical word first calculations
703 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
704
705 const bool is_error = pkt->isError();
706 // allow invalidation responses originating from write-line
707 // requests to be discarded
708 bool is_invalidate = pkt->isInvalidate() &&
709 !mshr->wasWholeLineWrite;
710
711 bool from_core = false;
712 bool from_pref = false;
713
714 if (pkt->cmd == MemCmd::LockedRMWWriteResp) {
715 // This is the fake response generated by the write half of the RMW;
716 // see comments in recvTimingReq(). The first target on the list
717 // should be the LockedRMWReadReq which has already been satisfied,
718 // either because it was a hit (and the MSHR was allocated in
719 // recvTimingReq()) or because it was left there after the inital
720 // response in extractServiceableTargets. In either case, we
721 // don't need to respond now, so pop it off to prevent the loop
722 // below from generating another response.
723 assert(initial_tgt->pkt->cmd == MemCmd::LockedRMWReadReq);
724 mshr->popTarget();
725 delete initial_tgt->pkt;
726 initial_tgt = nullptr;
727 }
728
729 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
730 for (auto &target: targets) {
731 Packet *tgt_pkt = target.pkt;
732 switch (target.source) {
734 from_core = true;
735
736 Tick completion_time;
737 // Here we charge on completion_time the delay of the xbar if the
738 // packet comes from it, charged on headerDelay.
739 completion_time = pkt->headerDelay;
740
741 // Software prefetch handling for cache closest to core
742 if (tgt_pkt->cmd.isSWPrefetch()) {
743 if (tgt_pkt->needsWritable()) {
744 // All other copies of the block were invalidated and we
745 // have an exclusive copy.
746
747 // The coherence protocol assumes that if we fetched an
748 // exclusive copy of the block, we have the intention to
749 // modify it. Therefore the MSHR for the PrefetchExReq has
750 // been the point of ordering and this cache has commited
751 // to respond to snoops for the block.
752 //
753 // In most cases this is true anyway - a PrefetchExReq
754 // will be followed by a WriteReq. However, if that
755 // doesn't happen, the block is not marked as dirty and
756 // the cache doesn't respond to snoops that has committed
757 // to do so.
758 //
759 // To avoid deadlocks in cases where there is a snoop
760 // between the PrefetchExReq and the expected WriteReq, we
761 // proactively mark the block as Dirty.
762 assert(blk);
764
765 panic_if(isReadOnly, "Prefetch exclusive requests from "
766 "read-only cache %s\n", name());
767 }
768
769 // a software prefetch would have already been ack'd
770 // immediately with dummy data so the core would be able to
771 // retire it. This request completes right here, so we
772 // deallocate it.
773 delete tgt_pkt;
774 break; // skip response
775 }
776
777 // unlike the other packet flows, where data is found in other
778 // caches or memory and brought back, write-line requests always
779 // have the data right away, so the above check for "is fill?"
780 // cannot actually be determined until examining the stored MSHR
781 // state. We "catch up" with that logic here, which is duplicated
782 // from above.
783 if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
784 assert(!is_error);
785 assert(blk);
786 assert(blk->isSet(CacheBlk::WritableBit));
787 }
788
789 // Here we decide whether we will satisfy the target using
790 // data from the block or from the response. We use the
791 // block data to satisfy the request when the block is
792 // present and valid and in addition the response in not
793 // forwarding data to the cache above (we didn't fill
794 // either); otherwise we use the packet data.
795 if (blk && blk->isValid() &&
796 (!mshr->isForward || !pkt->hasData())) {
797 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
798
799 // How many bytes past the first request is this one
800 int transfer_offset =
801 tgt_pkt->getOffset(blkSize) - initial_offset;
802 if (transfer_offset < 0) {
803 transfer_offset += blkSize;
804 }
805
806 // If not critical word (offset) return payloadDelay.
807 // responseLatency is the latency of the return path
808 // from lower level caches/memory to an upper level cache or
809 // the core.
810 completion_time += clockEdge(responseLatency) +
811 (transfer_offset ? pkt->payloadDelay : 0);
812
813 assert(!tgt_pkt->req->isUncacheable());
814
815 assert(tgt_pkt->req->requestorId() < system->maxRequestors());
816 stats.cmdStats(tgt_pkt)
817 .missLatency[tgt_pkt->req->requestorId()] +=
818 completion_time - target.recvTime;
819
820 if (tgt_pkt->cmd == MemCmd::LockedRMWReadReq) {
821 // We're going to leave a target in the MSHR until the
822 // write half of the RMW occurs (see comments above in
823 // recvTimingReq()). Since we'll be using the current
824 // request packet (which has the allocated data pointer)
825 // to form the response, we have to allocate a new dummy
826 // packet to save in the MSHR target.
827 mshr->updateLockedRMWReadTarget(tgt_pkt);
828 // skip the rest of target processing after we
829 // send the response
830 // Mark block inaccessible until write arrives
833 }
834 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
835 // failed StoreCond upgrade
836 assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
837 tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
838 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
839 // responseLatency is the latency of the return path
840 // from lower level caches/memory to an upper level cache or
841 // the core.
842 completion_time += clockEdge(responseLatency) +
843 pkt->payloadDelay;
844 tgt_pkt->req->setExtraData(0);
845 } else if (pkt->cmd == MemCmd::LockedRMWWriteResp) {
846 // Fake response on LockedRMW completion, see above.
847 // Since the data is already in the cache, we just use
848 // responseLatency with no extra penalties.
849 completion_time = clockEdge(responseLatency);
850 } else {
851 if (is_invalidate && blk && blk->isValid()) {
852 // We are about to send a response to a cache above
853 // that asked for an invalidation; we need to
854 // invalidate our copy immediately as the most
855 // up-to-date copy of the block will now be in the
856 // cache above. It will also prevent this cache from
857 // responding (if the block was previously dirty) to
858 // snoops as they should snoop the caches above where
859 // they will get the response from.
860 invalidateBlock(blk);
861 }
862 // not a cache fill, just forwarding response
863 // responseLatency is the latency of the return path
864 // from lower level cahces/memory to the core.
865 completion_time += clockEdge(responseLatency) +
866 pkt->payloadDelay;
867 if (!is_error) {
868 if (pkt->isRead()) {
869 // sanity check
870 assert(pkt->matchAddr(tgt_pkt));
871 assert(pkt->getSize() >= tgt_pkt->getSize());
872
873 tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
874 } else {
875 // MSHR targets can read data either from the
876 // block or the response pkt. If we can't get data
877 // from the block (i.e., invalid or has old data)
878 // or the response (did not bring in any data)
879 // then make sure that the target didn't expect
880 // any.
881 assert(!tgt_pkt->hasRespData());
882 }
883 }
884
885 // this response did not allocate here and therefore
886 // it was not consumed, make sure that any flags are
887 // carried over to cache above
888 tgt_pkt->copyResponderFlags(pkt);
889 }
890 tgt_pkt->makeTimingResponse();
891 // if this packet is an error copy that to the new packet
892 if (is_error)
893 tgt_pkt->copyError(pkt);
894 if (tgt_pkt->cmd == MemCmd::ReadResp &&
895 (is_invalidate || mshr->hasPostInvalidate())) {
896 // If intermediate cache got ReadRespWithInvalidate,
897 // propagate that. Response should not have
898 // isInvalidate() set otherwise.
900 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
901 tgt_pkt->print());
902 }
903 // Reset the bus additional time as it is now accounted for
904 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
905 cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
906 break;
907
909 assert(tgt_pkt->cmd == MemCmd::HardPFReq);
910 from_pref = true;
911
912 delete tgt_pkt;
913 break;
914
916 // I don't believe that a snoop can be in an error state
917 assert(!is_error);
918 // response to snoop request
919 DPRINTF(Cache, "processing deferred snoop...\n");
920 // If the response is invalidating, a snooping target can
921 // be satisfied if it is also invalidating. If the reponse is, not
922 // only invalidating, but more specifically an InvalidateResp and
923 // the MSHR was created due to an InvalidateReq then a cache above
924 // is waiting to satisfy a WriteLineReq. In this case even an
925 // non-invalidating snoop is added as a target here since this is
926 // the ordering point. When the InvalidateResp reaches this cache,
927 // the snooping target will snoop further the cache above with the
928 // WriteLineReq.
929 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
930 pkt->req->isCacheMaintenance() ||
931 mshr->hasPostInvalidate());
932 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
933 break;
934
935 default:
936 panic("Illegal target->source enum %d\n", target.source);
937 }
938 }
939
940 if (blk && !from_core && from_pref) {
941 blk->setPrefetched();
942 }
943
944 if (!mshr->hasLockedRMWReadTarget()) {
945 maintainClusivity(targets.hasFromCache, blk);
946
947 if (blk && blk->isValid()) {
948 // an invalidate response stemming from a write line request
949 // should not invalidate the block, so check if the
950 // invalidation should be discarded
951 if (is_invalidate || mshr->hasPostInvalidate()) {
952 invalidateBlock(blk);
953 } else if (mshr->hasPostDowngrade()) {
955 }
956 }
957 }
958}
959
962{
964 writebackBlk(blk) : cleanEvictBlk(blk);
965
966 invalidateBlock(blk);
967
968 return pkt;
969}
970
973{
974 assert(!writebackClean);
975 assert(blk && blk->isValid() && !blk->isSet(CacheBlk::DirtyBit));
976
977 // Creating a zero sized write, a message to the snoop filter
978 RequestPtr req = std::make_shared<Request>(
980
981 if (blk->isSecure())
982 req->setFlags(Request::SECURE);
983
984 req->taskId(blk->getTaskId());
985
986 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
987 pkt->allocate();
988 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
989
990 return pkt;
991}
992
994//
995// Snoop path: requests coming in from the memory side
996//
998
999void
1000Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1001 bool already_copied, bool pending_inval)
1002{
1003 // sanity check
1004 assert(req_pkt->isRequest());
1005 assert(req_pkt->needsResponse());
1006
1007 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
1008 // timing-mode snoop responses require a new packet, unless we
1009 // already made a copy...
1010 PacketPtr pkt = req_pkt;
1011 if (!already_copied)
1012 // do not clear flags, and allocate space for data if the
1013 // packet needs it (the only packets that carry data are read
1014 // responses)
1015 pkt = new Packet(req_pkt, false, req_pkt->isRead());
1016
1017 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1018 pkt->hasSharers());
1019 pkt->makeTimingResponse();
1020 if (pkt->isRead()) {
1021 pkt->setDataFromBlock(blk_data, blkSize);
1022 }
1023 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1024 // Assume we defer a response to a read from a far-away cache
1025 // A, then later defer a ReadExcl from a cache B on the same
1026 // bus as us. We'll assert cacheResponding in both cases, but
1027 // in the latter case cacheResponding will keep the
1028 // invalidation from reaching cache A. This special response
1029 // tells cache A that it gets the block to satisfy its read,
1030 // but must immediately invalidate it.
1032 }
1033 // Here we consider forward_time, paying for just forward latency and
1034 // also charging the delay provided by the xbar.
1035 // forward_time is used as send_time in next allocateWriteBuffer().
1036 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1037 // Here we reset the timing of the packet.
1038 pkt->headerDelay = pkt->payloadDelay = 0;
1039 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
1040 pkt->print(), forward_time);
1041 memSidePort.schedTimingSnoopResp(pkt, forward_time);
1042}
1043
1044uint32_t
1045Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1046 bool is_deferred, bool pending_inval)
1047{
1048 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1049 // deferred snoops can only happen in timing mode
1050 assert(!(is_deferred && !is_timing));
1051 // pending_inval only makes sense on deferred snoops
1052 assert(!(pending_inval && !is_deferred));
1053 assert(pkt->isRequest());
1054
1055 // the packet may get modified if we or a forwarded snooper
1056 // responds in atomic mode, so remember a few things about the
1057 // original packet up front
1058 bool invalidate = pkt->isInvalidate();
1059 [[maybe_unused]] bool needs_writable = pkt->needsWritable();
1060
1061 // at the moment we could get an uncacheable write which does not
1062 // have the invalidate flag, and we need a suitable way of dealing
1063 // with this case
1064 panic_if(invalidate && pkt->req->isUncacheable(),
1065 "%s got an invalidating uncacheable snoop request %s",
1066 name(), pkt->print());
1067
1068 uint32_t snoop_delay = 0;
1069
1070 if (forwardSnoops) {
1071 // first propagate snoop upward to see if anyone above us wants to
1072 // handle it. save & restore packet src since it will get
1073 // rewritten to be relative to CPU-side bus (if any)
1074 if (is_timing) {
1075 // copy the packet so that we can clear any flags before
1076 // forwarding it upwards, we also allocate data (passing
1077 // the pointer along in case of static data), in case
1078 // there is a snoop hit in upper levels
1079 Packet snoopPkt(pkt, true, true);
1080 snoopPkt.setExpressSnoop();
1081 // the snoop packet does not need to wait any additional
1082 // time
1083 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1085
1086 // add the header delay (including crossbar and snoop
1087 // delays) of the upward snoop to the snoop delay for this
1088 // cache
1089 snoop_delay += snoopPkt.headerDelay;
1090
1091 // If this request is a prefetch or clean evict and an upper level
1092 // signals block present, make sure to propagate the block
1093 // presence to the requestor.
1094 if (snoopPkt.isBlockCached()) {
1095 pkt->setBlockCached();
1096 }
1097 // If the request was satisfied by snooping the cache
1098 // above, mark the original packet as satisfied too.
1099 if (snoopPkt.satisfied()) {
1100 pkt->setSatisfied();
1101 }
1102
1103 // Copy over flags from the snoop response to make sure we
1104 // inform the final destination
1105 pkt->copyResponderFlags(&snoopPkt);
1106 } else {
1107 bool already_responded = pkt->cacheResponding();
1109 if (!already_responded && pkt->cacheResponding()) {
1110 // cache-to-cache response from some upper cache:
1111 // forward response to original requestor
1112 assert(pkt->isResponse());
1113 }
1114 }
1115 }
1116
1117 bool respond = false;
1118 bool blk_valid = blk && blk->isValid();
1119 if (pkt->isClean()) {
1120 if (blk_valid && blk->isSet(CacheBlk::DirtyBit)) {
1121 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1122 __func__, pkt->print(), blk->print());
1123 PacketPtr wb_pkt =
1124 writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1125 PacketList writebacks;
1126 writebacks.push_back(wb_pkt);
1127
1128 if (is_timing) {
1129 // anything that is merely forwarded pays for the forward
1130 // latency and the delay provided by the crossbar
1131 Tick forward_time = clockEdge(forwardLatency) +
1132 pkt->headerDelay;
1133 doWritebacks(writebacks, forward_time);
1134 } else {
1135 doWritebacksAtomic(writebacks);
1136 }
1137 pkt->setSatisfied();
1138 }
1139 } else if (!blk_valid) {
1140 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1141 pkt->print());
1142 if (is_deferred) {
1143 // we no longer have the block, and will not respond, but a
1144 // packet was allocated in MSHR::handleSnoop and we have
1145 // to delete it
1146 assert(pkt->needsResponse());
1147
1148 // we have passed the block to a cache upstream, that
1149 // cache should be responding
1150 assert(pkt->cacheResponding());
1151
1152 delete pkt;
1153 }
1154 return snoop_delay;
1155 } else {
1156 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1157 pkt->print(), blk->print());
1158
1159 // We may end up modifying both the block state and the packet (if
1160 // we respond in atomic mode), so just figure out what to do now
1161 // and then do it later. We respond to all snoops that need
1162 // responses provided we have the block in dirty state. The
1163 // invalidation itself is taken care of below. We don't respond to
1164 // cache maintenance operations as this is done by the destination
1165 // xbar.
1166 respond = blk->isSet(CacheBlk::DirtyBit) && pkt->needsResponse();
1167
1169 "Should never have a dirty block in a read-only cache %s\n",
1170 name());
1171 }
1172
1173 // Invalidate any prefetch's from below that would strip write permissions
1174 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1175 // above and in it's own cache, a new MemCmd::ReadReq is created that
1176 // downstream caches observe.
1177 if (pkt->mustCheckAbove()) {
1178 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1179 "from lower cache\n", pkt->getAddr(), pkt->print());
1180 pkt->setBlockCached();
1181 return snoop_delay;
1182 }
1183
1184 if (pkt->isRead() && !invalidate) {
1185 // reading without requiring the line in a writable state
1186 assert(!needs_writable);
1187 pkt->setHasSharers();
1188
1189 // if the requesting packet is uncacheable, retain the line in
1190 // the current state, otherwhise unset the writable flag,
1191 // which means we go from Modified to Owned (and will respond
1192 // below), remain in Owned (and will respond below), from
1193 // Exclusive to Shared, or remain in Shared
1194 if (!pkt->req->isUncacheable()) {
1196 }
1197 DPRINTF(Cache, "new state is %s\n", blk->print());
1198 }
1199
1200 if (respond) {
1201 // prevent anyone else from responding, cache as well as
1202 // memory, and also prevent any memory from even seeing the
1203 // request
1204 pkt->setCacheResponding();
1205 if (!pkt->isClean() && blk->isSet(CacheBlk::WritableBit)) {
1206 // inform the cache hierarchy that this cache had the line
1207 // in the Modified state so that we avoid unnecessary
1208 // invalidations (see Packet::setResponderHadWritable)
1210
1211 // in the case of an uncacheable request there is no point
1212 // in setting the responderHadWritable flag, but since the
1213 // recipient does not care there is no harm in doing so
1214 } else {
1215 // if the packet has needsWritable set we invalidate our
1216 // copy below and all other copies will be invalidates
1217 // through express snoops, and if needsWritable is not set
1218 // we already called setHasSharers above
1219 }
1220
1221 // if we are returning a writable and dirty (Modified) line,
1222 // we should be invalidating the line
1223 panic_if(!invalidate && !pkt->hasSharers(),
1224 "%s is passing a Modified line through %s, "
1225 "but keeping the block", name(), pkt->print());
1226
1227 if (is_timing) {
1228 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1229 } else {
1230 pkt->makeAtomicResponse();
1231 // packets such as upgrades do not actually have any data
1232 // payload
1233 if (pkt->hasData())
1234 pkt->setDataFromBlock(blk->data, blkSize);
1235 }
1236
1237 // When a block is compressed, it must first be decompressed before
1238 // being read, and this increases the snoop delay.
1239 if (compressor && pkt->isRead()) {
1240 snoop_delay += compressor->getDecompressionLatency(blk);
1241 }
1242 }
1243
1244 if (!respond && is_deferred) {
1245 assert(pkt->needsResponse());
1246 delete pkt;
1247 }
1248
1249 // Do this last in case it deallocates block data or something
1250 // like that
1251 if (blk_valid && invalidate) {
1252 invalidateBlock(blk);
1253 DPRINTF(Cache, "new state is %s\n", blk->print());
1254 }
1255
1256 return snoop_delay;
1257}
1258
1259
1260void
1262{
1263 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1264
1265 // no need to snoop requests that are not in range
1266 if (!inRange(pkt->getAddr())) {
1267 return;
1268 }
1269
1270 bool is_secure = pkt->isSecure();
1271 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1272
1273 Addr blk_addr = pkt->getBlockAddr(blkSize);
1274 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1275
1276 // Update the latency cost of the snoop so that the crossbar can
1277 // account for it. Do not overwrite what other neighbouring caches
1278 // have already done, rather take the maximum. The update is
1279 // tentative, for cases where we return before an upward snoop
1280 // happens below.
1281 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1283
1284 // Inform request(Prefetch, CleanEvict or Writeback) from below of
1285 // MSHR hit, set setBlockCached.
1286 if (mshr && pkt->mustCheckAbove()) {
1287 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1288 "mshr hit\n", pkt->print());
1289 pkt->setBlockCached();
1290 return;
1291 }
1292
1293 // Let the MSHR itself track the snoop and decide whether we want
1294 // to go ahead and do the regular cache snoop
1295 if (mshr && mshr->handleSnoop(pkt, order++)) {
1296 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1297 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1298 mshr->print());
1299
1300 if (mshr->getNumTargets() > numTarget)
1301 warn("allocating bonus target for snoop"); //handle later
1302 return;
1303 }
1304
1305 //We also need to check the writeback buffers and handle those
1306 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1307 if (wb_entry) {
1308 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1309 pkt->getAddr(), is_secure ? "s" : "ns");
1310 // Expect to see only Writebacks and/or CleanEvicts here, both of
1311 // which should not be generated for uncacheable data.
1312 assert(!wb_entry->isUncacheable());
1313 // There should only be a single request responsible for generating
1314 // Writebacks/CleanEvicts.
1315 assert(wb_entry->getNumTargets() == 1);
1316 PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1317 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1318
1319 if (pkt->isEviction()) {
1320 // if the block is found in the write queue, set the BLOCK_CACHED
1321 // flag for Writeback/CleanEvict snoop. On return the snoop will
1322 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1323 // any CleanEvicts from travelling down the memory hierarchy.
1324 pkt->setBlockCached();
1325 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1326 "hit\n", __func__, pkt->print());
1327 return;
1328 }
1329
1330 // conceptually writebacks are no different to other blocks in
1331 // this cache, so the behaviour is modelled after handleSnoop,
1332 // the difference being that instead of querying the block
1333 // state to determine if it is dirty and writable, we use the
1334 // command and fields of the writeback packet
1335 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1336 pkt->needsResponse();
1337 bool have_writable = !wb_pkt->hasSharers();
1338 bool invalidate = pkt->isInvalidate();
1339
1340 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1341 assert(!pkt->needsWritable());
1342 pkt->setHasSharers();
1343 wb_pkt->setHasSharers();
1344 }
1345
1346 if (respond) {
1347 pkt->setCacheResponding();
1348
1349 if (have_writable) {
1351 }
1352
1353 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1354 false, false);
1355 }
1356
1357 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1358 // Invalidation trumps our writeback... discard here
1359 // Note: markInService will remove entry from writeback buffer.
1360 markInService(wb_entry);
1361 delete wb_pkt;
1362 }
1363 }
1364
1365 // If this was a shared writeback, there may still be
1366 // other shared copies above that require invalidation.
1367 // We could be more selective and return here if the
1368 // request is non-exclusive or if the writeback is
1369 // exclusive.
1370 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1371
1372 // Override what we did when we first saw the snoop, as we now
1373 // also have the cost of the upwards snoops to account for
1374 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1376}
1377
1378Tick
1380{
1381 // no need to snoop requests that are not in range.
1382 if (!inRange(pkt->getAddr())) {
1383 return 0;
1384 }
1385
1386 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1387 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1388 return snoop_delay + lookupLatency * clockPeriod();
1389}
1390
1391bool
1393{
1394 if (!forwardSnoops)
1395 return false;
1396 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1397 // Writeback snoops into upper level caches to check for copies of the
1398 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1399 // packet, the cache can inform the crossbar below of presence or absence
1400 // of the block.
1401 if (is_timing) {
1402 Packet snoop_pkt(pkt, true, false);
1403 snoop_pkt.setExpressSnoop();
1404 // Assert that packet is either Writeback or CleanEvict and not a
1405 // prefetch request because prefetch requests need an MSHR and may
1406 // generate a snoop response.
1407 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1408 snoop_pkt.senderState = nullptr;
1409 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1410 // Writeback/CleanEvict snoops do not generate a snoop response.
1411 assert(!(snoop_pkt.cacheResponding()));
1412 return snoop_pkt.isBlockCached();
1413 } else {
1415 return pkt->isBlockCached();
1416 }
1417}
1418
1419bool
1421{
1422 assert(mshr);
1423
1424 // use request from 1st target
1425 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1426
1427 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1428 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1429
1430 // we should never have hardware prefetches to allocated
1431 // blocks
1432 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1433
1434 // We need to check the caches above us to verify that
1435 // they don't have a copy of this block in the dirty state
1436 // at the moment. Without this check we could get a stale
1437 // copy from memory that might get used in place of the
1438 // dirty one.
1439 Packet snoop_pkt(tgt_pkt, true, false);
1440 snoop_pkt.setExpressSnoop();
1441 // We are sending this packet upwards, but if it hits we will
1442 // get a snoop response that we end up treating just like a
1443 // normal response, hence it needs the MSHR as its sender
1444 // state
1445 snoop_pkt.senderState = mshr;
1446 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1447
1448 // Check to see if the prefetch was squashed by an upper cache (to
1449 // prevent us from grabbing the line) or if a Check to see if a
1450 // writeback arrived between the time the prefetch was placed in
1451 // the MSHRs and when it was selected to be sent or if the
1452 // prefetch was squashed by an upper cache.
1453
1454 // It is important to check cacheResponding before
1455 // prefetchSquashed. If another cache has committed to
1456 // responding, it will be sending a dirty response which will
1457 // arrive at the MSHR allocated for this request. Checking the
1458 // prefetchSquash first may result in the MSHR being
1459 // prematurely deallocated.
1460 if (snoop_pkt.cacheResponding()) {
1461 [[maybe_unused]] auto r = outstandingSnoop.insert(snoop_pkt.req);
1462 assert(r.second);
1463
1464 // if we are getting a snoop response with no sharers it
1465 // will be allocated as Modified
1466 bool pending_modified_resp = !snoop_pkt.hasSharers();
1467 markInService(mshr, pending_modified_resp);
1468
1469 DPRINTF(Cache, "Upward snoop of prefetch for addr"
1470 " %#x (%s) hit\n",
1471 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1472 return false;
1473 }
1474
1475 if (snoop_pkt.isBlockCached()) {
1476 DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1477 "Deallocating mshr target %#x.\n",
1478 mshr->blkAddr);
1479
1480 // Deallocate the mshr target
1481 if (mshrQueue.forceDeallocateTarget(mshr)) {
1482 // Clear block if this deallocation resulted freed an
1483 // mshr when all had previously been utilized
1485 }
1486
1487 // given that no response is expected, delete Request and Packet
1488 delete tgt_pkt;
1489
1490 return false;
1491 }
1492 }
1493
1494 return BaseCache::sendMSHRQueuePacket(mshr);
1495}
1496
1497} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
Describes a cache.
Definitions of a simple cache block class.
A basic cache interface.
Definition base.hh:100
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition base.hh:949
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition base.cc:407
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition base.hh:1179
CpuSidePort cpuSidePort
Definition base.hh:317
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition base.cc:1679
const bool writebackClean
Determine if clean lines should be written back or not.
Definition base.hh:681
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition base.cc:217
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition base.hh:447
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition base.hh:934
uint64_t order
Increasing order number assigned to each incoming request.
Definition base.hh:974
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition base.cc:1098
gem5::BaseCache::CacheStats stats
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition base.hh:900
MSHRQueue mshrQueue
Miss status registers.
Definition base.hh:347
const unsigned blkSize
Block size of this cache.
Definition base.hh:894
const Cycles forwardLatency
This is the forward latency of the cache.
Definition base.hh:913
compression::Base * compressor
Compression method being used.
Definition base.hh:356
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition base.hh:923
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition base.cc:1758
MemSidePort memSidePort
Definition base.hh:318
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition base.cc:228
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition base.cc:638
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition base.cc:492
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition base.cc:1242
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition base.cc:186
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition base.hh:408
const int numTarget
The number of targets for each MSHR.
Definition base.hh:931
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition base.hh:394
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition base.hh:414
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition base.hh:1197
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition base.cc:1523
WriteQueue writeBuffer
Write/writeback buffer.
Definition base.hh:350
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition base.cc:1708
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition base.hh:1261
virtual PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
BaseTags * tags
Tag and data Storage.
Definition base.hh:353
const enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition base.hh:941
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition base.cc:1882
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition base.cc:1511
System * system
System we are currently operating in.
Definition base.hh:992
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition base.cc:82
std::string print()
Print all tags used.
Definition base.cc:206
A Basic Cache block.
Definition cache_blk.hh:72
@ ReadableBit
Read permission.
Definition cache_blk.hh:86
@ WritableBit
write permission
Definition cache_blk.hh:81
@ DirtyBit
dirty (modified)
Definition cache_blk.hh:88
void setPrefetched()
Marks this blocks as a recently prefetched block.
Definition cache_blk.hh:261
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition cache_blk.hh:372
bool isSet(unsigned bits) const
Checks the given coherence bits are set.
Definition cache_blk.hh:242
void clearCoherenceBits(unsigned bits)
Clear the corresponding coherence bits.
Definition cache_blk.hh:234
uint32_t getTaskId() const
Get the task id associated to this block.
Definition cache_blk.hh:288
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition cache_blk.hh:104
void setCoherenceBits(unsigned bits)
Sets the corresponding coherence bits.
Definition cache_blk.hh:223
virtual bool isValid() const
Checks if the entry is valid.
A coherent cache that can be arranged in flexible topologies.
Definition cache.hh:68
PacketPtr cleanEvictBlk(CacheBlk *blk)
Create a CleanEvict request for the given block.
Definition cache.cc:972
Cache(const CacheParams &p)
Instantiates a basic cache object.
Definition cache.cc:69
void recvTimingSnoopReq(PacketPtr pkt) override
Snoops bus transactions to maintain coherence.
Definition cache.cc:1261
bool isCachedAbove(PacketPtr pkt, bool is_timing=true)
Send up a snoop request and find cached copies.
Definition cache.cc:1392
void promoteWholeLineWrites(PacketPtr pkt)
Turn line-sized writes into WriteInvalidate transactions.
Definition cache.cc:301
void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) override
Service non-deferred MSHR targets using the received response.
Definition cache.cc:699
Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks) override
Handle a request in atomic mode that missed in this cache.
Definition cache.cc:578
Tick recvAtomicSnoop(PacketPtr pkt) override
Snoop for the provided request in the cache and return the estimated time taken.
Definition cache.cc:1379
std::unordered_set< RequestPtr > outstandingSnoop
Store the outstanding requests that we are expecting snoop responses from so we can determine which s...
Definition cache.hh:80
void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false) override
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition cache.cc:78
void recvTimingSnoopResp(PacketPtr pkt) override
Handle a snoop response.
Definition cache.cc:267
void recvTimingReq(PacketPtr pkt) override
Performs the access specified by the request.
Definition cache.cc:418
Tick recvAtomic(PacketPtr pkt) override
Performs the access specified by the request.
Definition cache.cc:668
void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time) override
Definition cache.cc:324
bool sendMSHRQueuePacket(MSHR *mshr) override
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition cache.cc:1420
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
Definition cache.cc:1000
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const override
Create an appropriate downstream bus request packet.
Definition cache.cc:492
void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) override
Definition cache.cc:313
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool is_deferred, bool pending_inval)
Perform an upward snoop if needed, and update the block state (possibly invalidating the block).
Definition cache.cc:1045
PacketPtr evictBlock(CacheBlk *blk) override
Evict a cache block.
Definition cache.cc:961
const bool doFastWrites
This cache should allocate a block on a line-sized write miss.
Definition cache.hh:73
void doWritebacks(PacketList &writebacks, Tick forward_time) override
Insert writebacks into the write buffer.
Definition cache.cc:190
bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks) override
Does all the processing necessary to perform the provided request.
Definition cache.cc:161
void doWritebacksAtomic(PacketList &writebacks) override
Send writebacks down the memory hierarchy in atomic mode.
Definition cache.cc:232
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
bool forceDeallocateTarget(MSHR *mshr)
Deallocate top target, possibly freeing the MSHR.
bool hasFromCache
Determine whether there was at least one non-snooping target coming from another cache.
Definition mshr.hh:181
Miss Status and handling Register.
Definition mshr.hh:75
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition mshr.hh:124
void updateLockedRMWReadTarget(PacketPtr pkt)
Replaces the matching packet in the Targets list with a dummy packet to ensure the MSHR remains alloc...
Definition mshr.cc:786
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition mshr.cc:548
void popTarget()
Pop first target.
Definition mshr.hh:482
void print(std::ostream &os, int verbosity=0, const std::string &prefix="") const override
Prints the contents of this MSHR for debugging.
Definition mshr.cc:731
int getNumTargets() const
Returns the current number of allocated targets.
Definition mshr.hh:446
bool hasPostDowngrade() const
Definition mshr.hh:334
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition mshr.hh:473
bool handleSnoop(PacketPtr target, Counter order)
Definition mshr.cc:424
bool isForward
True if the entry is just a simple forward from an upper level.
Definition mshr.hh:127
bool hasLockedRMWReadTarget()
Determine if there are any LockedRMWReads in the Targets list.
Definition mshr.cc:794
bool hasPostInvalidate() const
Definition mshr.hh:330
bool isSWPrefetch() const
Definition packet.hh:253
@ ReadRespWithInvalidate
Definition packet.hh:89
@ WritebackDirty
Definition packet.hh:93
@ StoreCondFailReq
Definition packet.hh:114
@ LockedRMWReadReq
Definition packet.hh:116
@ LockedRMWWriteReq
Definition packet.hh:118
@ WritebackClean
Definition packet.hh:94
@ LockedRMWWriteResp
Definition packet.hh:119
@ SCUpgradeFailReq
Definition packet.hh:106
@ UpgradeFailResp
Definition packet.hh:107
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setExpressSnoop()
The express snoop flag is used for two purposes.
Definition packet.hh:701
bool responderHadWritable() const
Definition packet.hh:719
bool isUpgrade() const
Definition packet.hh:596
bool isRead() const
Definition packet.hh:593
bool isSecure() const
Definition packet.hh:836
const PacketId id
Definition packet.hh:374
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
uint32_t snoopDelay
Keep track of the extra delay incurred by snooping upwards before sending a request down the memory s...
Definition packet.hh:439
void makeTimingResponse()
Definition packet.hh:1080
bool needsWritable() const
Definition packet.hh:599
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
void copyError(Packet *pkt)
Definition packet.hh:805
bool satisfied() const
Definition packet.hh:755
bool needsResponse() const
Definition packet.hh:608
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition packet.hh:449
bool matchAddr(const Addr addr, const bool is_secure) const
Check if packet corresponds to a given address and address space.
Definition packet.cc:403
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition packet.hh:431
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition packet.hh:713
Addr getOffset(unsigned int blk_size) const
Definition packet.hh:826
bool mustCheckAbove() const
Does the request need to check for cached copies of the same block in the memory hierarchy above.
Definition packet.hh:1425
bool hasData() const
Definition packet.hh:614
void copyResponderFlags(const PacketPtr pkt)
Copy the reponse flags from an input packet to this packet.
Definition packet.cc:324
bool hasRespData() const
Definition packet.hh:615
bool fromCache() const
Definition packet.hh:612
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition packet.hh:1293
bool isWrite() const
Definition packet.hh:594
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition packet.hh:1312
Addr getBlockAddr(unsigned int blk_size) const
Definition packet.hh:831
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
bool isCleanInvalidateRequest() const
Is this packet a clean invalidate request, e.g., clflush/clflushopt?
Definition packet.hh:1444
void setCacheResponding()
Snoop flags.
Definition packet.hh:653
bool isClean() const
Definition packet.hh:611
const T * getConstPtr() const
Definition packet.hh:1234
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition packet.hh:685
bool isLLSC() const
Definition packet.hh:620
bool cacheResponding() const
Definition packet.hh:659
void makeAtomicResponse()
Definition packet.hh:1074
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition packet.hh:749
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isInvalidate() const
Definition packet.hh:609
bool isWholeLineWrite(unsigned blk_size)
Definition packet.hh:626
bool hasSharers() const
Definition packet.hh:686
bool isBlockCached() const
Definition packet.hh:760
void setBlockCached()
Definition packet.hh:759
void allocate()
Allocate memory for the packet.
Definition packet.hh:1367
bool isEviction() const
Definition packet.hh:610
bool isRequest() const
Definition packet.hh:597
A queue entry is holding packets that will be serviced as soon as resources are available.
PacketPtr pkt
Pending request packet.
bool isUncacheable() const
Addr blkAddr
Block aligned address.
bool isSecure
True if the entry targets the secure memory space.
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition queue.hh:168
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing snoop response.
Definition qport.hh:159
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition qport.hh:94
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition request.hh:274
Tick sendAtomicSnoop(PacketPtr pkt)
Send an atomic snoop request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:410
void sendTimingSnoopReq(PacketPtr pkt)
Attempt to send a timing snoop request packet to the request port by calling its corresponding receiv...
Definition port.hh:475
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition system.hh:495
bool isSecure() const
Check if this block holds data from the secure memory space.
bool allocate() const
Should writes allocate?
Definition base.hh:1414
int getNumTargets() const
Returns the current number of allocated targets.
Target * getTarget() override
Returns a reference to the first target.
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition base.cc:196
STL list class.
Definition stl.hh:51
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define gem5_assert(cond,...)
The assert macro will function like a normal assert, but will use panic instead of straight abort().
Definition logging.hh:317
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
#define warn(...)
Definition logging.hh:256
Declaration of a common base class for cache tagstore objects.
Miss Status and Handling Register (MSHR) declaration.
Bitfield< 0 > p
Bitfield< 15 > system
Definition misc.hh:1032
@ CacheParams
Definition cpuid.hh:51
Bitfield< 2 > pf
Definition misc.hh:565
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
statistics::Vector missLatency
Total number of ticks per thread/command spent waiting for a miss.
Definition base.hh:1023
statistics::Vector mshrUncacheable
Number of misses that miss in the MSHRs, per command and thread.
Definition base.hh:1035
CacheCmdStats & cmdStats(const PacketPtr p)
Definition base.hh:1054
Write queue entry.

Generated on Tue Jun 18 2024 16:24:04 for gem5 by doxygen 1.11.0