gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
cache.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
47#include "mem/cache/cache.hh"
48
49#include <cassert>
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "base/trace.hh"
54#include "base/types.hh"
55#include "debug/Cache.hh"
56#include "debug/CacheTags.hh"
57#include "debug/CacheVerbose.hh"
58#include "enums/Clusivity.hh"
60#include "mem/cache/mshr.hh"
63#include "mem/request.hh"
64#include "params/Cache.hh"
65
66namespace gem5
67{
68
70 : BaseCache(p, p.system->cacheLineSize()),
71 doFastWrites(true)
72{
73 assert(p.tags);
74 assert(p.replacement_policy);
75}
76
77void
79 bool deferred_response, bool pending_downgrade)
80{
82
83 if (pkt->isRead()) {
84 // determine if this read is from a (coherent) cache or not
85 if (pkt->fromCache()) {
86 assert(pkt->getSize() == blkSize);
87 // special handling for coherent block requests from
88 // upper-level caches
89 if (pkt->needsWritable()) {
90 // sanity check
91 assert(pkt->cmd == MemCmd::ReadExReq ||
93 assert(!pkt->hasSharers());
94
95 // if we have a dirty copy, make sure the recipient
96 // keeps it marked dirty (in the modified state)
97 if (blk->isSet(CacheBlk::DirtyBit)) {
98 pkt->setCacheResponding();
100 }
101 } else if (blk->isSet(CacheBlk::WritableBit) &&
102 !pending_downgrade && !pkt->hasSharers() &&
103 pkt->cmd != MemCmd::ReadCleanReq) {
104 // we can give the requestor a writable copy on a read
105 // request if:
106 // - we have a writable copy at this level (& below)
107 // - we don't have a pending snoop from below
108 // signaling another read request
109 // - no other cache above has a copy (otherwise it
110 // would have set hasSharers flag when
111 // snooping the packet)
112 // - the read has explicitly asked for a clean
113 // copy of the line
114 if (blk->isSet(CacheBlk::DirtyBit)) {
115 // special considerations if we're owner:
116 if (!deferred_response) {
117 // respond with the line in Modified state
118 // (cacheResponding set, hasSharers not set)
119 pkt->setCacheResponding();
120
121 // if this cache is mostly inclusive, we
122 // keep the block in the Exclusive state,
123 // and pass it upwards as Modified
124 // (writable and dirty), hence we have
125 // multiple caches, all on the same path
126 // towards memory, all considering the
127 // same block writable, but only one
128 // considering it Modified
129
130 // we get away with multiple caches (on
131 // the same path to memory) considering
132 // the block writeable as we always enter
133 // the cache hierarchy through a cache,
134 // and first snoop upwards in all other
135 // branches
137 } else {
138 // if we're responding after our own miss,
139 // there's a window where the recipient didn't
140 // know it was getting ownership and may not
141 // have responded to snoops correctly, so we
142 // have to respond with a shared line
143 pkt->setHasSharers();
144 }
145 }
146 } else {
147 // otherwise only respond with a shared copy
148 pkt->setHasSharers();
149 }
150 }
151 }
152}
153
155//
156// Access path: requests coming in from the CPU side
157//
159
160bool
162 PacketList &writebacks)
163{
164
165 if (pkt->req->isUncacheable()) {
166 assert(pkt->isRequest());
167
168 gem5_assert(!(isReadOnly && pkt->isWrite()),
169 "Should never see a write in a read-only cache %s\n",
170 name());
171
172 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
173
174 // flush and invalidate any existing block
175 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
176 if (old_blk && old_blk->isValid()) {
177 BaseCache::evictBlock(old_blk, writebacks);
178 }
179
180 blk = nullptr;
181 // lookupLatency is the latency in case the request is uncacheable.
182 lat = lookupLatency;
183 return false;
184 }
185
186 return BaseCache::access(pkt, blk, lat, writebacks);
187}
188
189void
190Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
191{
192 while (!writebacks.empty()) {
193 PacketPtr wbPkt = writebacks.front();
194 // We use forwardLatency here because we are copying writebacks to
195 // write buffer.
196
197 // Call isCachedAbove for Writebacks, CleanEvicts and
198 // WriteCleans to discover if the block is cached above.
199 if (isCachedAbove(wbPkt)) {
200 if (wbPkt->cmd == MemCmd::CleanEvict) {
201 // Delete CleanEvict because cached copies exist above. The
202 // packet destructor will delete the request object because
203 // this is a non-snoop request packet which does not require a
204 // response.
205 delete wbPkt;
206 } else if (wbPkt->cmd == MemCmd::WritebackClean) {
207 // clean writeback, do not send since the block is
208 // still cached above
209 assert(writebackClean);
210 delete wbPkt;
211 } else {
212 assert(wbPkt->cmd == MemCmd::WritebackDirty ||
213 wbPkt->cmd == MemCmd::WriteClean);
214 // Set BLOCK_CACHED flag in Writeback and send below, so that
215 // the Writeback does not reset the bit corresponding to this
216 // address in the snoop filter below.
217 wbPkt->setBlockCached();
218 allocateWriteBuffer(wbPkt, forward_time);
219 }
220 } else {
221 // If the block is not cached above, send packet below. Both
222 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
223 // reset the bit corresponding to this address in the snoop filter
224 // below.
225 allocateWriteBuffer(wbPkt, forward_time);
226 }
227 writebacks.pop_front();
228 }
229}
230
231void
233{
234 while (!writebacks.empty()) {
235 PacketPtr wbPkt = writebacks.front();
236 // Call isCachedAbove for both Writebacks and CleanEvicts. If
237 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
238 // and discard CleanEvicts.
239 if (isCachedAbove(wbPkt, false)) {
240 if (wbPkt->cmd == MemCmd::WritebackDirty ||
241 wbPkt->cmd == MemCmd::WriteClean) {
242 // Set BLOCK_CACHED flag in Writeback and send below,
243 // so that the Writeback does not reset the bit
244 // corresponding to this address in the snoop filter
245 // below. We can discard CleanEvicts because cached
246 // copies exist above. Atomic mode isCachedAbove
247 // modifies packet to set BLOCK_CACHED flag
248 memSidePort.sendAtomic(wbPkt);
249 }
250 } else {
251 // If the block is not cached above, send packet below. Both
252 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
253 // reset the bit corresponding to this address in the snoop filter
254 // below.
255 memSidePort.sendAtomic(wbPkt);
256 }
257 writebacks.pop_front();
258 // In case of CleanEvicts, the packet destructor will delete the
259 // request object because this is a non-snoop request packet which
260 // does not require a response.
261 delete wbPkt;
262 }
263}
264
265
266void
268{
269 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
270
271 // determine if the response is from a snoop request we created
272 // (in which case it should be in the outstandingSnoop), or if we
273 // merely forwarded someone else's snoop request
274 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
275 outstandingSnoop.end();
276
277 if (!forwardAsSnoop) {
278 // the packet came from this cache, so sink it here and do not
279 // forward it
280 assert(pkt->cmd == MemCmd::HardPFResp);
281
282 outstandingSnoop.erase(pkt->req);
283
284 DPRINTF(Cache, "Got prefetch response from above for addr "
285 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
286 recvTimingResp(pkt);
287 return;
288 }
289
290 // forwardLatency is set here because there is a response from an
291 // upper level cache.
292 // To pay the delay that occurs if the packet comes from the bus,
293 // we charge also headerDelay.
294 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
295 // Reset the timing of the packet.
296 pkt->headerDelay = pkt->payloadDelay = 0;
297 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
298}
299
300void
302{
303 // Cache line clearing instructions
304 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
305 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
306 !pkt->isMaskedWrite()) {
308 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
309 }
310}
311
312void
314{
315 // should never be satisfying an uncacheable access as we
316 // flush and invalidate any existing block as part of the
317 // lookup
318 assert(!pkt->req->isUncacheable());
319
320 BaseCache::handleTimingReqHit(pkt, blk, request_time);
321}
322
323void
325 Tick request_time)
326{
327
328 // These should always hit due to the earlier Locked Read
329 assert(pkt->cmd != MemCmd::LockedRMWWriteReq);
330 if (pkt->req->isUncacheable()) {
331 // ignore any existing MSHR if we are dealing with an
332 // uncacheable request
333
334 // should have flushed and have no valid block
335 assert(!blk || !blk->isValid());
336
337 stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++;
338
339 if (pkt->isWrite()) {
340 allocateWriteBuffer(pkt, forward_time);
341 } else {
342 assert(pkt->isRead());
343
344 // uncacheable accesses always allocate a new MSHR
345
346 // Here we are using forward_time, modelling the latency of
347 // a miss (outbound) just as forwardLatency, neglecting the
348 // lookupLatency component.
349 allocateMissBuffer(pkt, forward_time);
350 }
351
352 return;
353 }
354
355 Addr blk_addr = pkt->getBlockAddr(blkSize);
356
357 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
358
359 // Software prefetch handling:
360 // To keep the core from waiting on data it won't look at
361 // anyway, send back a response with dummy data. Miss handling
362 // will continue asynchronously. Unfortunately, the core will
363 // insist upon freeing original Packet/Request, so we have to
364 // create a new pair with a different lifecycle. Note that this
365 // processing happens before any MSHR munging on the behalf of
366 // this request because this new Request will be the one stored
367 // into the MSHRs, not the original.
368 if (pkt->cmd.isSWPrefetch()) {
369 assert(pkt->needsResponse());
370 assert(pkt->req->hasPaddr());
371 assert(!pkt->req->isUncacheable());
372
373 // There's no reason to add a prefetch as an additional target
374 // to an existing MSHR. If an outstanding request is already
375 // in progress, there is nothing for the prefetch to do.
376 // If this is the case, we don't even create a request at all.
377 PacketPtr pf = nullptr;
378
379 if (!mshr) {
380 // copy the request and create a new SoftPFReq packet
381 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
382 pkt->req->getSize(),
383 pkt->req->getFlags(),
384 pkt->req->requestorId());
385 pf = new Packet(req, pkt->cmd);
386 pf->allocate();
387 assert(pf->matchAddr(pkt));
388 assert(pf->getSize() == pkt->getSize());
389 }
390
391 pkt->makeTimingResponse();
392
393 // request_time is used here, taking into account lat and the delay
394 // charged if the packet comes from the xbar.
395 cpuSidePort.schedTimingResp(pkt, request_time);
396
397 // If an outstanding request is in progress (we found an
398 // MSHR) this is set to null
399 pkt = pf;
400 }
401
402 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
403}
404
405void
407{
408 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
409
411
412 if (pkt->cacheResponding()) {
413 // a cache above us (but not where the packet came from) is
414 // responding to the request, in other words it has the line
415 // in Modified or Owned state
416 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
417 pkt->print());
418
419 // if the packet needs the block to be writable, and the cache
420 // that has promised to respond (setting the cache responding
421 // flag) is not providing writable (it is in Owned rather than
422 // the Modified state), we know that there may be other Shared
423 // copies in the system; go out and invalidate them all
424 assert(pkt->needsWritable() && !pkt->responderHadWritable());
425
426 // an upstream cache that had the line in Owned state
427 // (dirty, but not writable), is responding and thus
428 // transferring the dirty line from one branch of the
429 // cache hierarchy to another
430
431 // send out an express snoop and invalidate all other
432 // copies (snooping a packet that needs writable is the
433 // same as an invalidation), thus turning the Owned line
434 // into a Modified line, note that we don't invalidate the
435 // block in the current cache or any other cache on the
436 // path to memory
437
438 // create a downstream express snoop with cleared packet
439 // flags, there is no need to allocate any data as the
440 // packet is merely used to co-ordinate state transitions
441 Packet *snoop_pkt = new Packet(pkt, true, false);
442
443 // also reset the bus time that the original packet has
444 // not yet paid for
445 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
446
447 // make this an instantaneous express snoop, and let the
448 // other caches in the system know that the another cache
449 // is responding, because we have found the authorative
450 // copy (Modified or Owned) that will supply the right
451 // data
452 snoop_pkt->setExpressSnoop();
453 snoop_pkt->setCacheResponding();
454
455 // this express snoop travels towards the memory, and at
456 // every crossbar it is snooped upwards thus reaching
457 // every cache in the system
458 [[maybe_unused]] bool success = memSidePort.sendTimingReq(snoop_pkt);
459 // express snoops always succeed
460 assert(success);
461
462 // main memory will delete the snoop packet
463
464 // queue for deletion, as opposed to immediate deletion, as
465 // the sending cache is still relying on the packet
466 pendingDelete.reset(pkt);
467
468 // no need to take any further action in this particular cache
469 // as an upstram cache has already committed to responding,
470 // and we have already sent out any express snoops in the
471 // section above to ensure all other copies in the system are
472 // invalidated
473 return;
474 }
475
477}
478
481 bool needsWritable,
482 bool is_whole_line_write) const
483{
484 // should never see evictions here
485 assert(!cpu_pkt->isEviction());
486
487 bool blkValid = blk && blk->isValid();
488
489 if (cpu_pkt->req->isUncacheable() ||
490 (!blkValid && cpu_pkt->isUpgrade()) ||
491 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
492 // uncacheable requests and upgrades from upper-level caches
493 // that missed completely just go through as is
494 return nullptr;
495 }
496
497 assert(cpu_pkt->needsResponse());
498
499 MemCmd cmd;
500 // @TODO make useUpgrades a parameter.
501 // Note that ownership protocols require upgrade, otherwise a
502 // write miss on a shared owned block will generate a ReadExcl,
503 // which will clobber the owned copy.
504 const bool useUpgrades = true;
505 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
506 if (is_whole_line_write) {
507 assert(!blkValid || !blk->isSet(CacheBlk::WritableBit));
508 // forward as invalidate to all other caches, this gives us
509 // the line in Exclusive state, and invalidates all other
510 // copies
512 } else if (blkValid && useUpgrades) {
513 // only reason to be here is that blk is read only and we need
514 // it to be writable
515 assert(needsWritable);
516 assert(!blk->isSet(CacheBlk::WritableBit));
518 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
519 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
520 // Even though this SC will fail, we still need to send out the
521 // request and get the data to supply it to other snoopers in the case
522 // where the determination the StoreCond fails is delayed due to
523 // all caches not being on the same local bus.
525 } else {
526 // block is invalid
527
528 // If the request does not need a writable there are two cases
529 // where we need to ensure the response will not fetch the
530 // block in dirty state:
531 // * this cache is read only and it does not perform
532 // writebacks,
533 // * this cache is mostly exclusive and will not fill (since
534 // it does not fill it will have to writeback the dirty data
535 // immediately which generates uneccesary writebacks).
536 bool force_clean_rsp = isReadOnly || clusivity == enums::mostly_excl;
537 cmd = needsWritable ? MemCmd::ReadExReq :
538 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
539 }
540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
541
542 // if there are upstream caches that have already marked the
543 // packet as having sharers (not passing writable), pass that info
544 // downstream
545 if (cpu_pkt->hasSharers() && !needsWritable) {
546 // note that cpu_pkt may have spent a considerable time in the
547 // MSHR queue and that the information could possibly be out
548 // of date, however, there is no harm in conservatively
549 // assuming the block has sharers
550 pkt->setHasSharers();
551 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
552 __func__, cpu_pkt->print(), pkt->print());
553 }
554
555 // the packet should be block aligned
556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
557
558 pkt->allocate();
559 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
560 cpu_pkt->print());
561 return pkt;
562}
563
564
565Cycles
567 PacketList &writebacks)
568{
569 // deal with the packets that go through the write path of
570 // the cache, i.e. any evictions and writes
571 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
572 (pkt->req->isUncacheable() && pkt->isWrite())) {
574
575 // at this point, if the request was an uncacheable write
576 // request, it has been satisfied by a memory below and the
577 // packet carries the response back
578 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
579 pkt->isResponse());
580
581 return latency;
582 }
583
584 // only misses left
585
586 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
588
589 bool is_forward = (bus_pkt == nullptr);
590
591 if (is_forward) {
592 // just forwarding the same request to the next level
593 // no local cache operation involved
594 bus_pkt = pkt;
595 }
596
597 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
598 bus_pkt->print());
599
600 const std::string old_state = blk ? blk->print() : "";
601
602 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
603
604 bool is_invalidate = bus_pkt->isInvalidate();
605
606 // We are now dealing with the response handling
607 DPRINTF(Cache, "%s: Receive response: %s for %s\n", __func__,
608 bus_pkt->print(), old_state);
609
610 // If packet was a forward, the response (if any) is already
611 // in place in the bus_pkt == pkt structure, so we don't need
612 // to do anything. Otherwise, use the separate bus_pkt to
613 // generate response to pkt and then delete it.
614 if (!is_forward) {
615 if (pkt->needsResponse()) {
616 assert(bus_pkt->isResponse());
617 if (bus_pkt->isError()) {
618 pkt->makeAtomicResponse();
619 pkt->copyError(bus_pkt);
620 } else if (pkt->isWholeLineWrite(blkSize)) {
621 // note the use of pkt, not bus_pkt here.
622
623 // write-line request to the cache that promoted
624 // the write to a whole line
625 const bool allocate = allocOnFill(pkt->cmd) &&
627 blk = handleFill(bus_pkt, blk, writebacks, allocate);
628 assert(blk != NULL);
629 is_invalidate = false;
630 satisfyRequest(pkt, blk);
631 } else if (bus_pkt->isRead() ||
632 bus_pkt->cmd == MemCmd::UpgradeResp) {
633 // we're updating cache state to allow us to
634 // satisfy the upstream request from the cache
635 blk = handleFill(bus_pkt, blk, writebacks,
636 allocOnFill(pkt->cmd));
637 satisfyRequest(pkt, blk);
638 maintainClusivity(pkt->fromCache(), blk);
639 } else {
640 // we're satisfying the upstream request without
641 // modifying cache state, e.g., a write-through
642 pkt->makeAtomicResponse();
643 }
644 }
645 delete bus_pkt;
646 }
647
648 if (is_invalidate && blk && blk->isValid()) {
649 invalidateBlock(blk);
650 }
651
652 return latency;
653}
654
655Tick
657{
659
660 // follow the same flow as in recvTimingReq, and check if a cache
661 // above us is responding
662 if (pkt->cacheResponding()) {
663 assert(!pkt->req->isCacheInvalidate());
664 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
665 pkt->print());
666
667 // if a cache is responding, and it had the line in Owned
668 // rather than Modified state, we need to invalidate any
669 // copies that are not on the same path to memory
670 assert(pkt->needsWritable() && !pkt->responderHadWritable());
671
672 return memSidePort.sendAtomic(pkt);
673 }
674
675 return BaseCache::recvAtomic(pkt);
676}
677
678
680//
681// Response handling: responses from the memory side
682//
684
685
686void
688{
689 QueueEntry::Target *initial_tgt = mshr->getTarget();
690 // First offset for critical word first calculations
691 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
692
693 const bool is_error = pkt->isError();
694 // allow invalidation responses originating from write-line
695 // requests to be discarded
696 bool is_invalidate = pkt->isInvalidate() &&
697 !mshr->wasWholeLineWrite;
698
699 bool from_core = false;
700 bool from_pref = false;
701
702 if (pkt->cmd == MemCmd::LockedRMWWriteResp) {
703 // This is the fake response generated by the write half of the RMW;
704 // see comments in recvTimingReq(). The first target on the list
705 // should be the LockedRMWReadReq which has already been satisfied,
706 // either because it was a hit (and the MSHR was allocated in
707 // recvTimingReq()) or because it was left there after the inital
708 // response in extractServiceableTargets. In either case, we
709 // don't need to respond now, so pop it off to prevent the loop
710 // below from generating another response.
711 assert(initial_tgt->pkt->cmd == MemCmd::LockedRMWReadReq);
712 mshr->popTarget();
713 delete initial_tgt->pkt;
714 initial_tgt = nullptr;
715 }
716
717 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
718 for (auto &target: targets) {
719 Packet *tgt_pkt = target.pkt;
720 switch (target.source) {
722 from_core = true;
723
724 Tick completion_time;
725 // Here we charge on completion_time the delay of the xbar if the
726 // packet comes from it, charged on headerDelay.
727 completion_time = pkt->headerDelay;
728
729 // Software prefetch handling for cache closest to core
730 if (tgt_pkt->cmd.isSWPrefetch()) {
731 if (tgt_pkt->needsWritable()) {
732 // All other copies of the block were invalidated and we
733 // have an exclusive copy.
734
735 // The coherence protocol assumes that if we fetched an
736 // exclusive copy of the block, we have the intention to
737 // modify it. Therefore the MSHR for the PrefetchExReq has
738 // been the point of ordering and this cache has commited
739 // to respond to snoops for the block.
740 //
741 // In most cases this is true anyway - a PrefetchExReq
742 // will be followed by a WriteReq. However, if that
743 // doesn't happen, the block is not marked as dirty and
744 // the cache doesn't respond to snoops that has committed
745 // to do so.
746 //
747 // To avoid deadlocks in cases where there is a snoop
748 // between the PrefetchExReq and the expected WriteReq, we
749 // proactively mark the block as Dirty.
750 assert(blk);
752
753 panic_if(isReadOnly, "Prefetch exclusive requests from "
754 "read-only cache %s\n", name());
755 }
756
757 // a software prefetch would have already been ack'd
758 // immediately with dummy data so the core would be able to
759 // retire it. This request completes right here, so we
760 // deallocate it.
761 delete tgt_pkt;
762 break; // skip response
763 }
764
765 // unlike the other packet flows, where data is found in other
766 // caches or memory and brought back, write-line requests always
767 // have the data right away, so the above check for "is fill?"
768 // cannot actually be determined until examining the stored MSHR
769 // state. We "catch up" with that logic here, which is duplicated
770 // from above.
771 if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
772 assert(!is_error);
773 assert(blk);
774 assert(blk->isSet(CacheBlk::WritableBit));
775 }
776
777 // Here we decide whether we will satisfy the target using
778 // data from the block or from the response. We use the
779 // block data to satisfy the request when the block is
780 // present and valid and in addition the response in not
781 // forwarding data to the cache above (we didn't fill
782 // either); otherwise we use the packet data.
783 if (blk && blk->isValid() &&
784 (!mshr->isForward || !pkt->hasData())) {
785 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
786
787 // How many bytes past the first request is this one
788 int transfer_offset =
789 tgt_pkt->getOffset(blkSize) - initial_offset;
790 if (transfer_offset < 0) {
791 transfer_offset += blkSize;
792 }
793
794 // If not critical word (offset) return payloadDelay.
795 // responseLatency is the latency of the return path
796 // from lower level caches/memory to an upper level cache or
797 // the core.
798 completion_time += clockEdge(responseLatency) +
799 (transfer_offset ? pkt->payloadDelay : 0);
800
801 assert(!tgt_pkt->req->isUncacheable());
802
803 assert(tgt_pkt->req->requestorId() < system->maxRequestors());
804 stats.cmdStats(tgt_pkt)
805 .missLatency[tgt_pkt->req->requestorId()] +=
806 completion_time - target.recvTime;
807
808 if (tgt_pkt->cmd == MemCmd::LockedRMWReadReq) {
809 // We're going to leave a target in the MSHR until the
810 // write half of the RMW occurs (see comments above in
811 // recvTimingReq()). Since we'll be using the current
812 // request packet (which has the allocated data pointer)
813 // to form the response, we have to allocate a new dummy
814 // packet to save in the MSHR target.
815 mshr->updateLockedRMWReadTarget(tgt_pkt);
816 // skip the rest of target processing after we
817 // send the response
818 // Mark block inaccessible until write arrives
821 }
822 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
823 // failed StoreCond upgrade
824 assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
825 tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
826 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
827 // responseLatency is the latency of the return path
828 // from lower level caches/memory to an upper level cache or
829 // the core.
830 completion_time += clockEdge(responseLatency) +
831 pkt->payloadDelay;
832 tgt_pkt->req->setExtraData(0);
833 } else if (pkt->cmd == MemCmd::LockedRMWWriteResp) {
834 // Fake response on LockedRMW completion, see above.
835 // Since the data is already in the cache, we just use
836 // responseLatency with no extra penalties.
837 completion_time = clockEdge(responseLatency);
838 } else {
839 if (is_invalidate && blk && blk->isValid()) {
840 // We are about to send a response to a cache above
841 // that asked for an invalidation; we need to
842 // invalidate our copy immediately as the most
843 // up-to-date copy of the block will now be in the
844 // cache above. It will also prevent this cache from
845 // responding (if the block was previously dirty) to
846 // snoops as they should snoop the caches above where
847 // they will get the response from.
848 invalidateBlock(blk);
849 }
850 // not a cache fill, just forwarding response
851 // responseLatency is the latency of the return path
852 // from lower level cahces/memory to the core.
853 completion_time += clockEdge(responseLatency) +
854 pkt->payloadDelay;
855 if (!is_error) {
856 if (pkt->isRead()) {
857 // sanity check
858 assert(pkt->matchAddr(tgt_pkt));
859 assert(pkt->getSize() >= tgt_pkt->getSize());
860
861 tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
862 } else {
863 // MSHR targets can read data either from the
864 // block or the response pkt. If we can't get data
865 // from the block (i.e., invalid or has old data)
866 // or the response (did not bring in any data)
867 // then make sure that the target didn't expect
868 // any.
869 assert(!tgt_pkt->hasRespData());
870 }
871 }
872
873 // this response did not allocate here and therefore
874 // it was not consumed, make sure that any flags are
875 // carried over to cache above
876 tgt_pkt->copyResponderFlags(pkt);
877 }
878 tgt_pkt->makeTimingResponse();
879 // if this packet is an error copy that to the new packet
880 if (is_error)
881 tgt_pkt->copyError(pkt);
882 if (tgt_pkt->cmd == MemCmd::ReadResp &&
883 (is_invalidate || mshr->hasPostInvalidate())) {
884 // If intermediate cache got ReadRespWithInvalidate,
885 // propagate that. Response should not have
886 // isInvalidate() set otherwise.
888 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
889 tgt_pkt->print());
890 }
891 // Reset the bus additional time as it is now accounted for
892 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
893 cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
894 break;
895
897 assert(tgt_pkt->cmd == MemCmd::HardPFReq);
898 from_pref = true;
899
900 delete tgt_pkt;
901 break;
902
904 // I don't believe that a snoop can be in an error state
905 assert(!is_error);
906 // response to snoop request
907 DPRINTF(Cache, "processing deferred snoop...\n");
908 // If the response is invalidating, a snooping target can
909 // be satisfied if it is also invalidating. If the reponse is, not
910 // only invalidating, but more specifically an InvalidateResp and
911 // the MSHR was created due to an InvalidateReq then a cache above
912 // is waiting to satisfy a WriteLineReq. In this case even an
913 // non-invalidating snoop is added as a target here since this is
914 // the ordering point. When the InvalidateResp reaches this cache,
915 // the snooping target will snoop further the cache above with the
916 // WriteLineReq.
917 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
918 pkt->req->isCacheMaintenance() ||
919 mshr->hasPostInvalidate());
920 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
921 break;
922
923 default:
924 panic("Illegal target->source enum %d\n", target.source);
925 }
926 }
927
928 if (blk && !from_core && from_pref) {
929 blk->setPrefetched();
930 }
931
932 if (!mshr->hasLockedRMWReadTarget()) {
933 maintainClusivity(targets.hasFromCache, blk);
934
935 if (blk && blk->isValid()) {
936 // an invalidate response stemming from a write line request
937 // should not invalidate the block, so check if the
938 // invalidation should be discarded
939 if (is_invalidate || mshr->hasPostInvalidate()) {
940 invalidateBlock(blk);
941 } else if (mshr->hasPostDowngrade()) {
943 }
944 }
945 }
946}
947
950{
952 writebackBlk(blk) : cleanEvictBlk(blk);
953
954 invalidateBlock(blk);
955
956 return pkt;
957}
958
961{
962 assert(!writebackClean);
963 assert(blk && blk->isValid() && !blk->isSet(CacheBlk::DirtyBit));
964
965 // Creating a zero sized write, a message to the snoop filter
966 RequestPtr req = std::make_shared<Request>(
968
969 if (blk->isSecure())
970 req->setFlags(Request::SECURE);
971
972 req->taskId(blk->getTaskId());
973
974 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
975 pkt->allocate();
976 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
977
978 return pkt;
979}
980
982//
983// Snoop path: requests coming in from the memory side
984//
986
987void
988Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
989 bool already_copied, bool pending_inval)
990{
991 // sanity check
992 assert(req_pkt->isRequest());
993 assert(req_pkt->needsResponse());
994
995 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
996 // timing-mode snoop responses require a new packet, unless we
997 // already made a copy...
998 PacketPtr pkt = req_pkt;
999 if (!already_copied)
1000 // do not clear flags, and allocate space for data if the
1001 // packet needs it (the only packets that carry data are read
1002 // responses)
1003 pkt = new Packet(req_pkt, false, req_pkt->isRead());
1004
1005 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1006 pkt->hasSharers());
1007 pkt->makeTimingResponse();
1008 if (pkt->isRead()) {
1009 pkt->setDataFromBlock(blk_data, blkSize);
1010 }
1011 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1012 // Assume we defer a response to a read from a far-away cache
1013 // A, then later defer a ReadExcl from a cache B on the same
1014 // bus as us. We'll assert cacheResponding in both cases, but
1015 // in the latter case cacheResponding will keep the
1016 // invalidation from reaching cache A. This special response
1017 // tells cache A that it gets the block to satisfy its read,
1018 // but must immediately invalidate it.
1020 }
1021 // Here we consider forward_time, paying for just forward latency and
1022 // also charging the delay provided by the xbar.
1023 // forward_time is used as send_time in next allocateWriteBuffer().
1024 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1025 // Here we reset the timing of the packet.
1026 pkt->headerDelay = pkt->payloadDelay = 0;
1027 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
1028 pkt->print(), forward_time);
1029 memSidePort.schedTimingSnoopResp(pkt, forward_time);
1030}
1031
1032uint32_t
1033Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1034 bool is_deferred, bool pending_inval)
1035{
1036 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1037 // deferred snoops can only happen in timing mode
1038 assert(!(is_deferred && !is_timing));
1039 // pending_inval only makes sense on deferred snoops
1040 assert(!(pending_inval && !is_deferred));
1041 assert(pkt->isRequest());
1042
1043 // the packet may get modified if we or a forwarded snooper
1044 // responds in atomic mode, so remember a few things about the
1045 // original packet up front
1046 bool invalidate = pkt->isInvalidate();
1047 [[maybe_unused]] bool needs_writable = pkt->needsWritable();
1048
1049 // at the moment we could get an uncacheable write which does not
1050 // have the invalidate flag, and we need a suitable way of dealing
1051 // with this case
1052 panic_if(invalidate && pkt->req->isUncacheable(),
1053 "%s got an invalidating uncacheable snoop request %s",
1054 name(), pkt->print());
1055
1056 uint32_t snoop_delay = 0;
1057
1058 if (forwardSnoops) {
1059 // first propagate snoop upward to see if anyone above us wants to
1060 // handle it. save & restore packet src since it will get
1061 // rewritten to be relative to CPU-side bus (if any)
1062 if (is_timing) {
1063 // copy the packet so that we can clear any flags before
1064 // forwarding it upwards, we also allocate data (passing
1065 // the pointer along in case of static data), in case
1066 // there is a snoop hit in upper levels
1067 Packet snoopPkt(pkt, true, true);
1068 snoopPkt.setExpressSnoop();
1069 // the snoop packet does not need to wait any additional
1070 // time
1071 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1073
1074 // add the header delay (including crossbar and snoop
1075 // delays) of the upward snoop to the snoop delay for this
1076 // cache
1077 snoop_delay += snoopPkt.headerDelay;
1078
1079 // If this request is a prefetch or clean evict and an upper level
1080 // signals block present, make sure to propagate the block
1081 // presence to the requestor.
1082 if (snoopPkt.isBlockCached()) {
1083 pkt->setBlockCached();
1084 }
1085 // If the request was satisfied by snooping the cache
1086 // above, mark the original packet as satisfied too.
1087 if (snoopPkt.satisfied()) {
1088 pkt->setSatisfied();
1089 }
1090
1091 // Copy over flags from the snoop response to make sure we
1092 // inform the final destination
1093 pkt->copyResponderFlags(&snoopPkt);
1094 } else {
1095 bool already_responded = pkt->cacheResponding();
1097 if (!already_responded && pkt->cacheResponding()) {
1098 // cache-to-cache response from some upper cache:
1099 // forward response to original requestor
1100 assert(pkt->isResponse());
1101 }
1102 }
1103 }
1104
1105 bool respond = false;
1106 bool blk_valid = blk && blk->isValid();
1107 if (pkt->isClean()) {
1108 if (blk_valid && blk->isSet(CacheBlk::DirtyBit)) {
1109 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1110 __func__, pkt->print(), blk->print());
1111 PacketPtr wb_pkt =
1112 writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1113 PacketList writebacks;
1114 writebacks.push_back(wb_pkt);
1115
1116 if (is_timing) {
1117 // anything that is merely forwarded pays for the forward
1118 // latency and the delay provided by the crossbar
1119 Tick forward_time = clockEdge(forwardLatency) +
1120 pkt->headerDelay;
1121 doWritebacks(writebacks, forward_time);
1122 } else {
1123 doWritebacksAtomic(writebacks);
1124 }
1125 pkt->setSatisfied();
1126 }
1127 } else if (!blk_valid) {
1128 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1129 pkt->print());
1130 if (is_deferred) {
1131 // we no longer have the block, and will not respond, but a
1132 // packet was allocated in MSHR::handleSnoop and we have
1133 // to delete it
1134 assert(pkt->needsResponse());
1135
1136 // we have passed the block to a cache upstream, that
1137 // cache should be responding
1138 assert(pkt->cacheResponding());
1139
1140 delete pkt;
1141 }
1142 return snoop_delay;
1143 } else {
1144 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1145 pkt->print(), blk->print());
1146
1147 // We may end up modifying both the block state and the packet (if
1148 // we respond in atomic mode), so just figure out what to do now
1149 // and then do it later. We respond to all snoops that need
1150 // responses provided we have the block in dirty state. The
1151 // invalidation itself is taken care of below. We don't respond to
1152 // cache maintenance operations as this is done by the destination
1153 // xbar.
1154 respond = blk->isSet(CacheBlk::DirtyBit) && pkt->needsResponse();
1155
1157 "Should never have a dirty block in a read-only cache %s\n",
1158 name());
1159 }
1160
1161 // Invalidate any prefetch's from below that would strip write permissions
1162 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1163 // above and in it's own cache, a new MemCmd::ReadReq is created that
1164 // downstream caches observe.
1165 if (pkt->mustCheckAbove()) {
1166 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1167 "from lower cache\n", pkt->getAddr(), pkt->print());
1168 pkt->setBlockCached();
1169 return snoop_delay;
1170 }
1171
1172 if (pkt->isRead() && !invalidate) {
1173 // reading without requiring the line in a writable state
1174 assert(!needs_writable);
1175 pkt->setHasSharers();
1176
1177 // if the requesting packet is uncacheable, retain the line in
1178 // the current state, otherwhise unset the writable flag,
1179 // which means we go from Modified to Owned (and will respond
1180 // below), remain in Owned (and will respond below), from
1181 // Exclusive to Shared, or remain in Shared
1182 if (!pkt->req->isUncacheable()) {
1184 }
1185 DPRINTF(Cache, "new state is %s\n", blk->print());
1186 }
1187
1188 if (respond) {
1189 // prevent anyone else from responding, cache as well as
1190 // memory, and also prevent any memory from even seeing the
1191 // request
1192 pkt->setCacheResponding();
1193 if (!pkt->isClean() && blk->isSet(CacheBlk::WritableBit)) {
1194 // inform the cache hierarchy that this cache had the line
1195 // in the Modified state so that we avoid unnecessary
1196 // invalidations (see Packet::setResponderHadWritable)
1198
1199 // in the case of an uncacheable request there is no point
1200 // in setting the responderHadWritable flag, but since the
1201 // recipient does not care there is no harm in doing so
1202 } else {
1203 // if the packet has needsWritable set we invalidate our
1204 // copy below and all other copies will be invalidates
1205 // through express snoops, and if needsWritable is not set
1206 // we already called setHasSharers above
1207 }
1208
1209 // if we are returning a writable and dirty (Modified) line,
1210 // we should be invalidating the line
1211 panic_if(!invalidate && !pkt->hasSharers(),
1212 "%s is passing a Modified line through %s, "
1213 "but keeping the block", name(), pkt->print());
1214
1215 if (is_timing) {
1216 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1217 } else {
1218 pkt->makeAtomicResponse();
1219 // packets such as upgrades do not actually have any data
1220 // payload
1221 if (pkt->hasData())
1222 pkt->setDataFromBlock(blk->data, blkSize);
1223 }
1224
1225 // When a block is compressed, it must first be decompressed before
1226 // being read, and this increases the snoop delay.
1227 if (compressor && pkt->isRead()) {
1228 snoop_delay += compressor->getDecompressionLatency(blk);
1229 }
1230 }
1231
1232 if (!respond && is_deferred) {
1233 assert(pkt->needsResponse());
1234 delete pkt;
1235 }
1236
1237 // Do this last in case it deallocates block data or something
1238 // like that
1239 if (blk_valid && invalidate) {
1240 invalidateBlock(blk);
1241 DPRINTF(Cache, "new state is %s\n", blk->print());
1242 }
1243
1244 return snoop_delay;
1245}
1246
1247
1248void
1250{
1251 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1252
1253 // no need to snoop requests that are not in range
1254 if (!inRange(pkt->getAddr())) {
1255 return;
1256 }
1257
1258 bool is_secure = pkt->isSecure();
1259 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1260
1261 Addr blk_addr = pkt->getBlockAddr(blkSize);
1262 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1263
1264 // Update the latency cost of the snoop so that the crossbar can
1265 // account for it. Do not overwrite what other neighbouring caches
1266 // have already done, rather take the maximum. The update is
1267 // tentative, for cases where we return before an upward snoop
1268 // happens below.
1269 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1271
1272 // Inform request(Prefetch, CleanEvict or Writeback) from below of
1273 // MSHR hit, set setBlockCached.
1274 if (mshr && pkt->mustCheckAbove()) {
1275 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1276 "mshr hit\n", pkt->print());
1277 pkt->setBlockCached();
1278 return;
1279 }
1280
1281 // Let the MSHR itself track the snoop and decide whether we want
1282 // to go ahead and do the regular cache snoop
1283 if (mshr && mshr->handleSnoop(pkt, order++)) {
1284 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1285 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1286 mshr->print());
1287
1288 if (mshr->getNumTargets() > numTarget)
1289 warn("allocating bonus target for snoop"); //handle later
1290 return;
1291 }
1292
1293 //We also need to check the writeback buffers and handle those
1294 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1295 if (wb_entry) {
1296 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1297 pkt->getAddr(), is_secure ? "s" : "ns");
1298 // Expect to see only Writebacks and/or CleanEvicts here, both of
1299 // which should not be generated for uncacheable data.
1300 assert(!wb_entry->isUncacheable());
1301 // There should only be a single request responsible for generating
1302 // Writebacks/CleanEvicts.
1303 assert(wb_entry->getNumTargets() == 1);
1304 PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1305 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1306
1307 if (pkt->isEviction()) {
1308 // if the block is found in the write queue, set the BLOCK_CACHED
1309 // flag for Writeback/CleanEvict snoop. On return the snoop will
1310 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1311 // any CleanEvicts from travelling down the memory hierarchy.
1312 pkt->setBlockCached();
1313 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1314 "hit\n", __func__, pkt->print());
1315 return;
1316 }
1317
1318 // conceptually writebacks are no different to other blocks in
1319 // this cache, so the behaviour is modelled after handleSnoop,
1320 // the difference being that instead of querying the block
1321 // state to determine if it is dirty and writable, we use the
1322 // command and fields of the writeback packet
1323 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1324 pkt->needsResponse();
1325 bool have_writable = !wb_pkt->hasSharers();
1326 bool invalidate = pkt->isInvalidate();
1327
1328 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1329 assert(!pkt->needsWritable());
1330 pkt->setHasSharers();
1331 wb_pkt->setHasSharers();
1332 }
1333
1334 if (respond) {
1335 pkt->setCacheResponding();
1336
1337 if (have_writable) {
1339 }
1340
1341 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1342 false, false);
1343 }
1344
1345 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1346 // Invalidation trumps our writeback... discard here
1347 // Note: markInService will remove entry from writeback buffer.
1348 markInService(wb_entry);
1349 delete wb_pkt;
1350 }
1351 }
1352
1353 // If this was a shared writeback, there may still be
1354 // other shared copies above that require invalidation.
1355 // We could be more selective and return here if the
1356 // request is non-exclusive or if the writeback is
1357 // exclusive.
1358 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1359
1360 // Override what we did when we first saw the snoop, as we now
1361 // also have the cost of the upwards snoops to account for
1362 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1364}
1365
1366Tick
1368{
1369 // no need to snoop requests that are not in range.
1370 if (!inRange(pkt->getAddr())) {
1371 return 0;
1372 }
1373
1374 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1375 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1376 return snoop_delay + lookupLatency * clockPeriod();
1377}
1378
1379bool
1381{
1382 if (!forwardSnoops)
1383 return false;
1384 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1385 // Writeback snoops into upper level caches to check for copies of the
1386 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1387 // packet, the cache can inform the crossbar below of presence or absence
1388 // of the block.
1389 if (is_timing) {
1390 Packet snoop_pkt(pkt, true, false);
1391 snoop_pkt.setExpressSnoop();
1392 // Assert that packet is either Writeback or CleanEvict and not a
1393 // prefetch request because prefetch requests need an MSHR and may
1394 // generate a snoop response.
1395 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1396 snoop_pkt.senderState = nullptr;
1397 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1398 // Writeback/CleanEvict snoops do not generate a snoop response.
1399 assert(!(snoop_pkt.cacheResponding()));
1400 return snoop_pkt.isBlockCached();
1401 } else {
1403 return pkt->isBlockCached();
1404 }
1405}
1406
1407bool
1409{
1410 assert(mshr);
1411
1412 // use request from 1st target
1413 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1414
1415 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1416 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1417
1418 // we should never have hardware prefetches to allocated
1419 // blocks
1420 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1421
1422 // We need to check the caches above us to verify that
1423 // they don't have a copy of this block in the dirty state
1424 // at the moment. Without this check we could get a stale
1425 // copy from memory that might get used in place of the
1426 // dirty one.
1427 Packet snoop_pkt(tgt_pkt, true, false);
1428 snoop_pkt.setExpressSnoop();
1429 // We are sending this packet upwards, but if it hits we will
1430 // get a snoop response that we end up treating just like a
1431 // normal response, hence it needs the MSHR as its sender
1432 // state
1433 snoop_pkt.senderState = mshr;
1434 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1435
1436 // Check to see if the prefetch was squashed by an upper cache (to
1437 // prevent us from grabbing the line) or if a Check to see if a
1438 // writeback arrived between the time the prefetch was placed in
1439 // the MSHRs and when it was selected to be sent or if the
1440 // prefetch was squashed by an upper cache.
1441
1442 // It is important to check cacheResponding before
1443 // prefetchSquashed. If another cache has committed to
1444 // responding, it will be sending a dirty response which will
1445 // arrive at the MSHR allocated for this request. Checking the
1446 // prefetchSquash first may result in the MSHR being
1447 // prematurely deallocated.
1448 if (snoop_pkt.cacheResponding()) {
1449 [[maybe_unused]] auto r = outstandingSnoop.insert(snoop_pkt.req);
1450 assert(r.second);
1451
1452 // if we are getting a snoop response with no sharers it
1453 // will be allocated as Modified
1454 bool pending_modified_resp = !snoop_pkt.hasSharers();
1455 markInService(mshr, pending_modified_resp);
1456
1457 DPRINTF(Cache, "Upward snoop of prefetch for addr"
1458 " %#x (%s) hit\n",
1459 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1460 return false;
1461 }
1462
1463 if (snoop_pkt.isBlockCached()) {
1464 DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1465 "Deallocating mshr target %#x.\n",
1466 mshr->blkAddr);
1467
1468 // Deallocate the mshr target
1469 if (mshrQueue.forceDeallocateTarget(mshr)) {
1470 // Clear block if this deallocation resulted freed an
1471 // mshr when all had previously been utilized
1473 }
1474
1475 // given that no response is expected, delete Request and Packet
1476 delete tgt_pkt;
1477
1478 return false;
1479 }
1480 }
1481
1482 return BaseCache::sendMSHRQueuePacket(mshr);
1483}
1484
1485} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
Describes a cache.
Definitions of a simple cache block class.
A basic cache interface.
Definition base.hh:95
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition base.hh:939
virtual void recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition base.cc:404
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition base.hh:1169
CpuSidePort cpuSidePort
Definition base.hh:334
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition base.cc:1664
const bool writebackClean
Determine if clean lines should be written back or not.
Definition base.hh:671
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition base.cc:214
virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time)=0
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition base.hh:437
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition base.hh:924
uint64_t order
Increasing order number assigned to each incoming request.
Definition base.hh:964
virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition base.cc:1089
gem5::BaseCache::CacheStats stats
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition base.hh:890
MSHRQueue mshrQueue
Miss status registers.
Definition base.hh:340
const unsigned blkSize
Block size of this cache.
Definition base.hh:884
const Cycles forwardLatency
This is the forward latency of the cache.
Definition base.hh:903
compression::Base * compressor
Compression method being used.
Definition base.hh:349
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition base.hh:913
PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
Create a writeclean request for the given block.
Definition base.cc:1743
MemSidePort memSidePort
Definition base.hh:335
virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
Definition base.cc:225
virtual Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition base.cc:635
virtual void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition base.cc:489
virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition base.cc:1230
Addr regenerateBlkAddr(CacheBlk *blk)
Regenerate block address using tags.
Definition base.cc:183
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition base.hh:398
const int numTarget
The number of targets for each MSHR.
Definition base.hh:921
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition base.hh:384
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition base.hh:404
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition base.hh:1187
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Handle a fill operation caused by a received packet.
Definition base.cc:1511
WriteQueue writeBuffer
Write/writeback buffer.
Definition base.hh:343
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition base.cc:1693
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition base.hh:1251
virtual PacketPtr evictBlock(CacheBlk *blk)=0
Evict a cache block.
BaseTags * tags
Tag and data Storage.
Definition base.hh:346
const enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition base.hh:931
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition base.cc:1867
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition base.cc:1499
System * system
System we are currently operating in.
Definition base.hh:982
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition base.cc:80
std::string print()
Print all tags used.
Definition base.cc:202
A Basic Cache block.
Definition cache_blk.hh:71
@ ReadableBit
Read permission.
Definition cache_blk.hh:85
@ WritableBit
write permission
Definition cache_blk.hh:80
@ DirtyBit
dirty (modified)
Definition cache_blk.hh:87
void setPrefetched()
Marks this blocks as a recently prefetched block.
Definition cache_blk.hh:258
std::string print() const override
Pretty-print tag, set and way, and interpret state bits to readable form including mapping to a MOESI...
Definition cache_blk.hh:364
bool isSet(unsigned bits) const
Checks the given coherence bits are set.
Definition cache_blk.hh:239
void clearCoherenceBits(unsigned bits)
Clear the corresponding coherence bits.
Definition cache_blk.hh:231
uint32_t getTaskId() const
Get the task id associated to this block.
Definition cache_blk.hh:285
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition cache_blk.hh:103
void setCoherenceBits(unsigned bits)
Sets the corresponding coherence bits.
Definition cache_blk.hh:220
A coherent cache that can be arranged in flexible topologies.
Definition cache.hh:68
PacketPtr cleanEvictBlk(CacheBlk *blk)
Create a CleanEvict request for the given block.
Definition cache.cc:960
Cache(const CacheParams &p)
Instantiates a basic cache object.
Definition cache.cc:69
void recvTimingSnoopReq(PacketPtr pkt) override
Snoops bus transactions to maintain coherence.
Definition cache.cc:1249
bool isCachedAbove(PacketPtr pkt, bool is_timing=true)
Send up a snoop request and find cached copies.
Definition cache.cc:1380
void promoteWholeLineWrites(PacketPtr pkt)
Turn line-sized writes into WriteInvalidate transactions.
Definition cache.cc:301
void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) override
Service non-deferred MSHR targets using the received response.
Definition cache.cc:687
Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk, PacketList &writebacks) override
Handle a request in atomic mode that missed in this cache.
Definition cache.cc:566
Tick recvAtomicSnoop(PacketPtr pkt) override
Snoop for the provided request in the cache and return the estimated time taken.
Definition cache.cc:1367
std::unordered_set< RequestPtr > outstandingSnoop
Store the outstanding requests that we are expecting snoop responses from so we can determine which s...
Definition cache.hh:80
void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false) override
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition cache.cc:78
void recvTimingSnoopResp(PacketPtr pkt) override
Handle a snoop response.
Definition cache.cc:267
void recvTimingReq(PacketPtr pkt) override
Performs the access specified by the request.
Definition cache.cc:406
Tick recvAtomic(PacketPtr pkt) override
Performs the access specified by the request.
Definition cache.cc:656
void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, Tick request_time) override
Definition cache.cc:324
bool sendMSHRQueuePacket(MSHR *mshr) override
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition cache.cc:1408
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
Definition cache.cc:988
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needs_writable, bool is_whole_line_write) const override
Create an appropriate downstream bus request packet.
Definition cache.cc:480
void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time) override
Definition cache.cc:313
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool is_deferred, bool pending_inval)
Perform an upward snoop if needed, and update the block state (possibly invalidating the block).
Definition cache.cc:1033
PacketPtr evictBlock(CacheBlk *blk) override
Evict a cache block.
Definition cache.cc:949
const bool doFastWrites
This cache should allocate a block on a line-sized write miss.
Definition cache.hh:73
void doWritebacks(PacketList &writebacks, Tick forward_time) override
Insert writebacks into the write buffer.
Definition cache.cc:190
bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks) override
Does all the processing necessary to perform the provided request.
Definition cache.cc:161
void doWritebacksAtomic(PacketList &writebacks) override
Send writebacks down the memory hierarchy in atomic mode.
Definition cache.cc:232
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
bool forceDeallocateTarget(MSHR *mshr)
Deallocate top target, possibly freeing the MSHR.
bool hasFromCache
Determine whether there was at least one non-snooping target coming from another cache.
Definition mshr.hh:181
Miss Status and handling Register.
Definition mshr.hh:75
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition mshr.hh:124
void updateLockedRMWReadTarget(PacketPtr pkt)
Replaces the matching packet in the Targets list with a dummy packet to ensure the MSHR remains alloc...
Definition mshr.cc:786
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition mshr.cc:548
void popTarget()
Pop first target.
Definition mshr.hh:482
void print(std::ostream &os, int verbosity=0, const std::string &prefix="") const override
Prints the contents of this MSHR for debugging.
Definition mshr.cc:731
int getNumTargets() const
Returns the current number of allocated targets.
Definition mshr.hh:446
bool hasPostDowngrade() const
Definition mshr.hh:334
QueueEntry::Target * getTarget() override
Returns a reference to the first target.
Definition mshr.hh:473
bool handleSnoop(PacketPtr target, Counter order)
Definition mshr.cc:424
bool isForward
True if the entry is just a simple forward from an upper level.
Definition mshr.hh:127
bool hasLockedRMWReadTarget()
Determine if there are any LockedRMWReads in the Targets list.
Definition mshr.cc:794
bool hasPostInvalidate() const
Definition mshr.hh:330
bool isSWPrefetch() const
Definition packet.hh:253
@ ReadRespWithInvalidate
Definition packet.hh:89
@ WritebackDirty
Definition packet.hh:93
@ StoreCondFailReq
Definition packet.hh:114
@ LockedRMWReadReq
Definition packet.hh:116
@ LockedRMWWriteReq
Definition packet.hh:118
@ WritebackClean
Definition packet.hh:94
@ LockedRMWWriteResp
Definition packet.hh:119
@ SCUpgradeFailReq
Definition packet.hh:106
@ UpgradeFailResp
Definition packet.hh:107
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setExpressSnoop()
The express snoop flag is used for two purposes.
Definition packet.hh:701
bool responderHadWritable() const
Definition packet.hh:719
bool isUpgrade() const
Definition packet.hh:596
bool isRead() const
Definition packet.hh:593
bool isSecure() const
Definition packet.hh:836
const PacketId id
Definition packet.hh:374
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
uint32_t snoopDelay
Keep track of the extra delay incurred by snooping upwards before sending a request down the memory s...
Definition packet.hh:439
void makeTimingResponse()
Definition packet.hh:1080
bool needsWritable() const
Definition packet.hh:599
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
void copyError(Packet *pkt)
Definition packet.hh:805
bool satisfied() const
Definition packet.hh:755
bool needsResponse() const
Definition packet.hh:608
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition packet.hh:449
bool matchAddr(const Addr addr, const bool is_secure) const
Check if packet corresponds to a given address and address space.
Definition packet.cc:403
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition packet.hh:431
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition packet.hh:713
Addr getOffset(unsigned int blk_size) const
Definition packet.hh:826
bool mustCheckAbove() const
Does the request need to check for cached copies of the same block in the memory hierarchy above.
Definition packet.hh:1425
bool hasData() const
Definition packet.hh:614
void copyResponderFlags(const PacketPtr pkt)
Copy the reponse flags from an input packet to this packet.
Definition packet.cc:324
bool hasRespData() const
Definition packet.hh:615
bool fromCache() const
Definition packet.hh:612
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition packet.hh:1293
bool isWrite() const
Definition packet.hh:594
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size.
Definition packet.hh:1312
Addr getBlockAddr(unsigned int blk_size) const
Definition packet.hh:831
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
void setCacheResponding()
Snoop flags.
Definition packet.hh:653
bool isClean() const
Definition packet.hh:611
const T * getConstPtr() const
Definition packet.hh:1234
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition packet.hh:685
bool isLLSC() const
Definition packet.hh:620
bool cacheResponding() const
Definition packet.hh:659
void makeAtomicResponse()
Definition packet.hh:1074
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition packet.hh:749
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1441
bool isInvalidate() const
Definition packet.hh:609
bool isWholeLineWrite(unsigned blk_size)
Definition packet.hh:626
bool hasSharers() const
Definition packet.hh:686
bool isBlockCached() const
Definition packet.hh:760
void setBlockCached()
Definition packet.hh:759
void allocate()
Allocate memory for the packet.
Definition packet.hh:1367
bool isEviction() const
Definition packet.hh:610
bool isRequest() const
Definition packet.hh:597
A queue entry is holding packets that will be serviced as soon as resources are available.
PacketPtr pkt
Pending request packet.
bool isUncacheable() const
Addr blkAddr
Block aligned address.
bool isSecure
True if the entry targets the secure memory space.
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition queue.hh:168
void schedTimingSnoopResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing snoop response.
Definition qport.hh:159
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition qport.hh:94
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:487
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:530
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ wbRequestorId
This requestor id is used for writeback requests by the caches.
Definition request.hh:274
Tick sendAtomicSnoop(PacketPtr pkt)
Send an atomic snoop request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:349
void sendTimingSnoopReq(PacketPtr pkt)
Attempt to send a timing snoop request packet to the request port by calling its corresponding receiv...
Definition port.hh:410
RequestorID maxRequestors()
Get the number of requestors registered in the system.
Definition system.hh:495
virtual bool isValid() const
Checks if the entry is valid.
bool isSecure() const
Check if this block holds data from the secure memory space.
bool allocate() const
Should writes allocate?
Definition base.hh:1401
int getNumTargets() const
Returns the current number of allocated targets.
Target * getTarget() override
Returns a reference to the first target.
Cycles getDecompressionLatency(const CacheBlk *blk)
Get the decompression latency if the block is compressed.
Definition base.cc:196
STL list class.
Definition stl.hh:51
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define gem5_assert(cond,...)
The assert macro will function like a normal assert, but will use panic instead of straight abort().
Definition logging.hh:317
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
#define warn(...)
Definition logging.hh:256
Declaration of a common base class for cache tagstore objects.
Miss Status and Handling Register (MSHR) declaration.
Bitfield< 0 > p
Bitfield< 15 > system
Definition misc.hh:1004
@ CacheParams
Definition cpuid.cc:45
Bitfield< 2 > pf
Definition misc.hh:555
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
statistics::Vector missLatency
Total number of ticks per thread/command spent waiting for a miss.
Definition base.hh:1013
statistics::Vector mshrUncacheable
Number of misses that miss in the MSHRs, per command and thread.
Definition base.hh:1025
CacheCmdStats & cmdStats(const PacketPtr p)
Definition base.hh:1044
Write queue entry.

Generated on Mon Jul 10 2023 15:32:04 for gem5 by doxygen 1.9.7