gem5 v24.0.0.0
Loading...
Searching...
No Matches
mshr.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012-2013, 2015-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
47#include "mem/cache/mshr.hh"
48
49#include <cassert>
50#include <string>
51
52#include "base/logging.hh"
53#include "base/trace.hh"
54#include "base/types.hh"
55#include "debug/MSHR.hh"
56#include "mem/cache/base.hh"
57#include "mem/request.hh"
58
59namespace gem5
60{
61
62MSHR::MSHR(const std::string &name)
64 downstreamPending(false),
65 pendingModified(false),
66 postInvalidate(false), postDowngrade(false),
67 wasWholeLineWrite(false), isForward(false),
68 targets(name + ".targets"),
69 deferredTargets(name + ".deferredTargets")
70{
71}
72
74 : Named(name),
75 needsWritable(false), hasUpgrade(false),
76 allocOnFill(false), hasFromCache(false)
77{}
78
79
80void
82 bool alloc_on_fill)
83{
84 if (source != Target::FromSnoop) {
85 if (pkt->needsWritable()) {
86 needsWritable = true;
87 }
88
89 // StoreCondReq is effectively an upgrade if it's in an MSHR
90 // since it would have been failed already if we didn't have a
91 // read-only copy
92 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
93 hasUpgrade = true;
94 }
95
96 // potentially re-evaluate whether we should allocate on a fill or
97 // not
98 allocOnFill = allocOnFill || alloc_on_fill;
99
100 if (source != Target::FromPrefetcher) {
102
103 updateWriteFlags(pkt);
104 }
105 }
106}
107
108void
110{
111 resetFlags();
112 for (auto& t: *this) {
113 updateFlags(t.pkt, t.source, t.allocOnFill);
114 }
115}
116
117void
119{
120 if (isWholeLineWrite()) {
121 // if we have already seen writes for the full block
122 // stop here, this might be a full line write followed
123 // by other compatible requests (e.g., reads)
124 return;
125 }
126
127 if (canMergeWrites) {
128 if (!pkt->isWrite()) {
129 // We won't allow further merging if this hasn't
130 // been a write
131 canMergeWrites = false;
132 return;
133 }
134
135 // Avoid merging requests with special flags (e.g.,
136 // strictly ordered)
137 const Request::FlagsType no_merge_flags =
141 const auto &req_flags = pkt->req->getFlags();
142 bool compat_write = !req_flags.isSet(no_merge_flags);
143 bool masked_write = pkt->isMaskedWrite();
144
145 // if this is the first write, it might be a whole
146 // line write and even if we can't merge any
147 // subsequent write requests, we still need to service
148 // it as a whole line write (e.g., SECURE whole line
149 // write)
150 bool first_write = empty();
151 if (!masked_write && (first_write || compat_write)) {
152 auto offset = pkt->getOffset(blkSize);
153 auto begin = writesBitmap.begin() + offset;
154 std::fill(begin, begin + pkt->getSize(), true);
155 }
156
157 // We won't allow further merging if this has been a
158 // special write
159 canMergeWrites &= compat_write;
160 }
161}
162
163inline void
165 Counter order, Target::Source source, bool markPending,
166 bool alloc_on_fill)
167{
168 updateFlags(pkt, source, alloc_on_fill);
169 if (markPending) {
170 // Iterate over the SenderState stack and see if we find
171 // an MSHR entry. If we do, set the downstreamPending
172 // flag. Otherwise, do nothing.
173 MSHR *mshr = pkt->findNextSenderState<MSHR>();
174 if (mshr != nullptr) {
175 assert(!mshr->downstreamPending);
176 mshr->downstreamPending = true;
177 } else {
178 // No need to clear downstreamPending later
179 markPending = false;
180 }
181 }
182
183 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
184
185 DPRINTF(MSHR, "New target allocated: %s\n", pkt->print());
186}
187
188
189static void
191{
192 // remember if the current packet has data allocated
193 bool has_data = pkt->hasData() || pkt->hasRespData();
194
195 if (pkt->cmd == MemCmd::UpgradeReq) {
196 pkt->cmd = MemCmd::ReadExReq;
197 DPRINTF(MSHR, "Replacing UpgradeReq with ReadExReq\n");
198 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
200 DPRINTF(MSHR, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
201 } else if (pkt->cmd == MemCmd::StoreCondReq) {
203 DPRINTF(MSHR, "Replacing StoreCondReq with StoreCondFailReq\n");
204 }
205
206 if (!has_data) {
207 // there is no sensible way of setting the data field if the
208 // new command actually would carry data
209 assert(!pkt->hasData());
210
211 if (pkt->hasRespData()) {
212 // we went from a packet that had no data (neither request,
213 // nor response), to one that does, and therefore we need to
214 // actually allocate space for the data payload
215 pkt->allocate();
216 }
217 }
218}
219
220
221void
223{
224 if (!hasUpgrade)
225 return;
226
227 for (auto& t : *this) {
228 replaceUpgrade(t.pkt);
229 }
230
231 hasUpgrade = false;
232}
233
234
235void
236MSHR::TargetList::clearDownstreamPending(MSHR::TargetList::iterator begin,
237 MSHR::TargetList::iterator end)
238{
239 for (auto t = begin; t != end; t++) {
240 if (t->markedPending) {
241 // Iterate over the SenderState stack and see if we find
242 // an MSHR entry. If we find one, clear the
243 // downstreamPending flag by calling
244 // clearDownstreamPending(). This recursively clears the
245 // downstreamPending flag in all caches this packet has
246 // passed through.
247 MSHR *mshr = t->pkt->findNextSenderState<MSHR>();
248 if (mshr != nullptr) {
250 }
251 t->markedPending = false;
252 }
253 }
254}
255
256void
261
262
263bool
265{
266 for (auto& t : *this) {
267 if (pkt->trySatisfyFunctional(t.pkt)) {
268 return true;
269 }
270 }
271
272 return false;
273}
274
275
276void
277MSHR::TargetList::print(std::ostream &os, int verbosity,
278 const std::string &prefix) const
279{
280 for (auto& t : *this) {
281 const char *s;
282 switch (t.source) {
283 case Target::FromCPU:
284 s = "FromCPU";
285 break;
287 s = "FromSnoop";
288 break;
290 s = "FromPrefetcher";
291 break;
292 default:
293 s = "";
294 break;
295 }
296 ccprintf(os, "%s%s: ", prefix, s);
297 t.pkt->print(os, verbosity, "");
298 ccprintf(os, "\n");
299 }
300}
301
302
303void
304MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
305 Tick when_ready, Counter _order, bool alloc_on_fill)
306{
307 blkAddr = blk_addr;
308 blkSize = blk_size;
309 isSecure = target->isSecure();
310 readyTime = when_ready;
311 order = _order;
312 assert(target);
313 isForward = false;
314 wasWholeLineWrite = false;
315 _isUncacheable = target->req->isUncacheable();
316 inService = false;
317 downstreamPending = false;
318
321
322 // Don't know of a case where we would allocate a new MSHR for a
323 // snoop (mem-side request), so set source according to request here
324 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
326 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
327
328 // All targets must refer to the same block
329 assert(target->matchBlockAddr(targets.front().pkt, blkSize));
330}
331
332
333void
335{
336 assert(downstreamPending);
337 downstreamPending = false;
338 // recursively clear flag on any MSHRs we will be forwarding
339 // responses to
341}
342
343void
344MSHR::markInService(bool pending_modified_resp)
345{
346 assert(!inService);
347
348 inService = true;
349 pendingModified = targets.needsWritable || pending_modified_resp;
351
352 if (!downstreamPending) {
353 // let upstream caches know that the request has made it to a
354 // level where it's going to get a response
356 }
357 // if the line is not considered a whole-line write when sent
358 // downstream, make sure it is also not considered a whole-line
359 // write when receiving the response, and vice versa
361}
362
363
364void
366{
367 assert(targets.empty());
369 assert(deferredTargets.isReset());
370 inService = false;
371}
372
373/*
374 * Adds a target to an MSHR
375 */
376void
378 bool alloc_on_fill)
379{
380 // assume we'd never issue a prefetch when we've got an
381 // outstanding miss
382 assert(pkt->cmd != MemCmd::HardPFReq);
383
384 // if there's a request already in service for this MSHR, we will
385 // have to defer the new target until after the response if any of
386 // the following are true:
387 // - there are other targets already deferred
388 // - there's a pending invalidate to be applied after the response
389 // comes back (but before this target is processed)
390 // - the MSHR's first (and only) non-deferred target is a cache
391 // maintenance packet
392 // - the new target is a cache maintenance packet (this is probably
393 // overly conservative but certainly safe)
394 // - this target requires a writable block and either we're not
395 // getting a writable block back or we have already snooped
396 // another read request that will downgrade our writable block
397 // to non-writable (Shared or Owned)
398 PacketPtr tgt_pkt = targets.front().pkt;
399 if (pkt->req->isCacheMaintenance() ||
400 tgt_pkt->req->isCacheMaintenance() ||
401 !deferredTargets.empty() ||
402 (inService &&
404 (pkt->needsWritable() &&
406 // need to put on deferred list
408 replaceUpgrade(pkt);
409 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
410 alloc_on_fill);
411 } else {
412 // No request outstanding, or still OK to append to
413 // outstanding request: append to regular target list. Only
414 // mark pending if current request hasn't been issued yet
415 // (isn't in service).
416 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
417 alloc_on_fill);
418 }
419
420 DPRINTF(MSHR, "After target allocation: %s", print());
421}
422
423bool
425{
426 DPRINTF(MSHR, "%s for %s\n", __func__, pkt->print());
427
428 // when we snoop packets the needsWritable and isInvalidate flags
429 // should always be the same, however, this assumes that we never
430 // snoop writes as they are currently not marked as invalidations
431 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
432 !pkt->req->isCacheMaintenance(),
433 "%s got snoop %s where needsWritable, "
434 "does not match isInvalidate", name(), pkt->print());
435
436 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
437 // Request has not been issued yet, or it's been issued
438 // locally but is buffered unissued at some downstream cache
439 // which is forwarding us this snoop. Either way, the packet
440 // we're snooping logically precedes this MSHR's request, so
441 // the snoop has no impact on the MSHR, but must be processed
442 // in the standard way by the cache. The only exception is
443 // that if we're an L2+ cache buffering an UpgradeReq from a
444 // higher-level cache, and the snoop is invalidating, then our
445 // buffered upgrades must be converted to read exclusives,
446 // since the upper-level cache no longer has a valid copy.
447 // That is, even though the upper-level cache got out on its
448 // local bus first, some other invalidating transaction
449 // reached the global bus before the upgrade did.
450 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
453 }
454
455 return false;
456 }
457
458 // From here on down, the request issued by this MSHR logically
459 // precedes the request we're snooping.
460 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
461 // snooped request still precedes the re-request we'll have to
462 // issue for deferred targets, if any...
464 }
465
466 PacketPtr tgt_pkt = targets.front().pkt;
467 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
468 // a prior snoop has already appended an invalidation or a
469 // cache invalidation operation is in progress, so logically
470 // we don't have the block anymore; no need for further
471 // snooping.
472 return true;
473 }
474
475 // Start by determining if we will eventually respond or not,
476 // matching the conditions checked in Cache::handleSnoop
477 const bool will_respond = isPendingModified() && pkt->needsResponse() &&
478 !pkt->isClean();
479 if (isPendingModified() || pkt->isInvalidate()) {
480 // We need to save and replay the packet in two cases:
481 // 1. We're awaiting a writable copy (Modified or Exclusive),
482 // so this MSHR is the orgering point, and we need to respond
483 // after we receive data.
484 // 2. It's an invalidation (e.g., UpgradeReq), and we need
485 // to forward the snoop up the hierarchy after the current
486 // transaction completes.
487
488 // The packet we are snooping may be deleted by the time we
489 // actually process the target, and we consequently need to
490 // save a copy here. Clear flags and also allocate new data as
491 // the original packet data storage may have been deleted by
492 // the time we get to process this packet. In the cases where
493 // we are not responding after handling the snoop we also need
494 // to create a copy of the request to be on the safe side. In
495 // the latter case the cache is responsible for deleting both
496 // the packet and the request as part of handling the deferred
497 // snoop.
498 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
499 new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
500 blkSize, pkt->id);
501
502 if (will_respond) {
503 // we are the ordering point, and will consequently
504 // respond, and depending on whether the packet
505 // needsWritable or not we either pass a Shared line or a
506 // Modified line
507 pkt->setCacheResponding();
508
509 // inform the cache hierarchy that this cache had the line
510 // in the Modified state, even if the response is passed
511 // as Shared (and thus non-writable)
513
514 // in the case of an uncacheable request there is no need
515 // to set the responderHadWritable flag, but since the
516 // recipient does not care there is no harm in doing so
517 } else if (isPendingModified() && pkt->isClean()) {
518 // this cache doesn't respond to the clean request, a
519 // destination xbar will respond to this request, but to
520 // do so it needs to know if it should wait for the
521 // WriteCleanReq
522 pkt->setSatisfied();
523 }
524
525 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
527
528 if (pkt->needsWritable() || pkt->isInvalidate()) {
529 // This transaction will take away our pending copy
530 postInvalidate = true;
531 }
532 }
533
534 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
535 // This transaction will get a read-shared copy, downgrading
536 // our copy if we had a writable one
537 postDowngrade = true;
538 // make sure that any downstream cache does not respond with a
539 // writable (and dirty) copy even if it has one, unless it was
540 // explicitly asked for one
541 pkt->setHasSharers();
542 }
543
544 return will_respond;
545}
546
549{
550 TargetList ready_targets;
551 ready_targets.init(blkAddr, blkSize);
552 // If the downstream MSHR got an invalidation request then we only
553 // service the first of the FromCPU targets and any other
554 // non-FromCPU target. This way the remaining FromCPU targets
555 // issue a new request and get a fresh copy of the block and we
556 // avoid memory consistency violations.
558 auto it = targets.begin();
559 assert((it->source == Target::FromCPU) ||
560 (it->source == Target::FromPrefetcher));
561 ready_targets.push_back(*it);
562 // Leave the Locked RMW Read until the corresponding Locked Write
563 // request comes in
564 if (it->pkt->cmd != MemCmd::LockedRMWReadReq) {
565 it = targets.erase(it);
566 while (it != targets.end()) {
567 if (it->source == Target::FromCPU) {
568 it++;
569 } else {
570 assert(it->source == Target::FromSnoop);
571 ready_targets.push_back(*it);
572 it = targets.erase(it);
573 }
574 }
575 }
576 ready_targets.populateFlags();
577 } else {
578 auto it = targets.begin();
579 while (it != targets.end()) {
580 ready_targets.push_back(*it);
581 if (it->pkt->cmd == MemCmd::LockedRMWReadReq) {
582 // Leave the Locked RMW Read until the corresponding Locked
583 // Write comes in. Also don't service any later targets as the
584 // line is now "locked".
585 break;
586 }
587 it = targets.erase(it);
588 }
589 ready_targets.populateFlags();
590 }
592
593 return ready_targets;
594}
595
596bool
598{
599 if (targets.empty() && deferredTargets.empty()) {
600 // nothing to promote
601 return false;
602 }
603
604 // the deferred targets can be generally promoted unless they
605 // contain a cache maintenance request
606
607 // find the first target that is a cache maintenance request
608 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
609 [](MSHR::Target &t) {
610 return t.pkt->req->isCacheMaintenance();
611 });
612 if (it == deferredTargets.begin()) {
613 // if the first deferred target is a cache maintenance packet
614 // then we can promote provided the targets list is empty and
615 // we can service it on its own
616 if (targets.empty()) {
617 targets.splice(targets.end(), deferredTargets, it);
618 }
619 } else {
620 // if a cache maintenance operation exists, we promote all the
621 // deferred targets that precede it, or all deferred targets
622 // otherwise
623 targets.splice(targets.end(), deferredTargets,
624 deferredTargets.begin(), it);
625 }
626
629 order = targets.front().order;
630 readyTime = std::max(curTick(), targets.front().readyTime);
631
632 return true;
633}
634
635void
636MSHR::promoteIf(const std::function<bool (Target &)>& pred)
637{
638 // if any of the deferred targets were upper-level cache
639 // requests marked downstreamPending, need to clear that
640 assert(!downstreamPending); // not pending here anymore
641
642 // find the first target does not satisfy the condition
643 auto last_it = std::find_if_not(deferredTargets.begin(),
644 deferredTargets.end(),
645 pred);
646
647 // for the prefix of the deferredTargets [begin(), last_it) clear
648 // the downstreamPending flag and move them to the target list
650 last_it);
651 targets.splice(targets.end(), deferredTargets,
652 deferredTargets.begin(), last_it);
653 // We need to update the flags for the target lists after the
654 // modifications
656}
657
658void
660{
661 if (!deferredTargets.empty() && !hasPostInvalidate()) {
662 // We got a non invalidating response, and we have the block
663 // but we have deferred targets which are waiting and they do
664 // not need writable. This can happen if the original request
665 // was for a cache clean operation and we had a copy of the
666 // block. Since we serviced the cache clean operation and we
667 // have the block, there's no need to defer the targets, so
668 // move them up to the regular target list.
669
670 auto pred = [](Target &t) {
671 assert(t.source == Target::FromCPU);
672 return !t.pkt->req->isCacheInvalidate() &&
673 !t.pkt->needsWritable();
674 };
676 }
677}
678
679void
681{
682 if (deferredTargets.empty()) {
683 return;
684 }
685 PacketPtr def_tgt_pkt = deferredTargets.front().pkt;
688 !def_tgt_pkt->req->isCacheInvalidate()) {
689 // We got a writable response, but we have deferred targets
690 // which are waiting to request a writable copy (not because
691 // of a pending invalidate). This can happen if the original
692 // request was for a read-only block, but we got a writable
693 // response anyway. Since we got the writable copy there's no
694 // need to defer the targets, so move them up to the regular
695 // target list.
696 assert(!targets.needsWritable);
697 targets.needsWritable = true;
698
699 auto pred = [](Target &t) {
700 assert(t.source == Target::FromCPU);
701 return !t.pkt->req->isCacheInvalidate();
702 };
703
705 }
706}
707
708
709bool
711{
712 // For printing, we treat the MSHR as a whole as single entity.
713 // For other requests, we iterate over the individual targets
714 // since that's where the actual data lies.
715 if (pkt->isPrint()) {
716 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
717 return false;
718 } else {
719 return (targets.trySatisfyFunctional(pkt) ||
721 }
722}
723
724bool
726{
727 return cache.sendMSHRQueuePacket(this);
728}
729
730void
731MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
732{
733 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
734 prefix, blkAddr, blkAddr + blkSize - 1,
735 isSecure ? "s" : "ns",
736 isForward ? "Forward" : "",
737 allocOnFill() ? "AllocOnFill" : "",
738 needsWritable() ? "Wrtbl" : "",
739 _isUncacheable ? "Unc" : "",
740 inService ? "InSvc" : "",
741 downstreamPending ? "DwnPend" : "",
742 postInvalidate ? "PostInv" : "",
743 postDowngrade ? "PostDowngr" : "",
744 hasFromCache() ? "HasFromCache" : "");
745
746 if (!targets.empty()) {
747 ccprintf(os, "%s Targets:\n", prefix);
748 targets.print(os, verbosity, prefix + " ");
749 }
750 if (!deferredTargets.empty()) {
751 ccprintf(os, "%s Deferred Targets:\n", prefix);
752 deferredTargets.print(os, verbosity, prefix + " ");
753 }
754}
755
756std::string
758{
759 std::ostringstream str;
760 print(str);
761 return str.str();
762}
763
764bool
765MSHR::matchBlockAddr(const Addr addr, const bool is_secure) const
766{
767 assert(hasTargets());
768 return (blkAddr == addr) && (isSecure == is_secure);
769}
770
771bool
773{
774 assert(hasTargets());
775 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
776}
777
778bool
780{
781 assert(hasTargets());
782 return entry->matchBlockAddr(blkAddr, isSecure);
783}
784
785void
787{
788 assert(!targets.empty() && targets.front().pkt == pkt);
789 RequestPtr r = std::make_shared<Request>(*(pkt->req));
790 targets.front().pkt = new Packet(r, MemCmd::LockedRMWReadReq);
791}
792
793bool
795{
796 if (!targets.empty() &&
797 targets.front().pkt->cmd == MemCmd::LockedRMWReadReq) {
798 return true;
799 }
800 return false;
801}
802
803
804} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
A basic cache interface.
Definition base.hh:100
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition base.cc:1882
void updateFlags(PacketPtr pkt, Target::Source source, bool alloc_on_fill)
Use the provided packet and the source to update the flags of this TargetList.
Definition mshr.cc:81
void replaceUpgrades()
Convert upgrades to the equivalent request if the cache line they refer to would have been invalid (U...
Definition mshr.cc:222
void populateFlags()
Goes through the list of targets and uses them to populate the flags of this TargetList.
Definition mshr.cc:109
bool trySatisfyFunctional(PacketPtr pkt)
Definition mshr.cc:264
bool isReset() const
Tests if the flags of this TargetList have their default values.
Definition mshr.hh:243
void add(PacketPtr pkt, Tick readyTime, Counter order, Target::Source source, bool markPending, bool alloc_on_fill)
Add the specified packet in the TargetList.
Definition mshr.cc:164
void print(std::ostream &os, int verbosity, const std::string &prefix) const
Definition mshr.cc:277
void updateWriteFlags(PacketPtr pkt)
Add the specified packet in the TargetList.
Definition mshr.cc:118
TargetList(const std::string &name=".unnamedTargetList")
Definition mshr.cc:73
void init(Addr blk_addr, Addr blk_size)
Reset state.
Definition mshr.hh:202
void clearDownstreamPending()
Definition mshr.cc:257
Miss Status and handling Register.
Definition mshr.hh:75
bool postInvalidate
Did we snoop an invalidate while waiting for data?
Definition mshr.hh:116
TargetList targets
List of all requests that match the address.
Definition mshr.hh:394
void clearDownstreamPending()
Definition mshr.cc:334
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition mshr.hh:124
void updateLockedRMWReadTarget(PacketPtr pkt)
Replaces the matching packet in the Targets list with a dummy packet to ensure the MSHR remains alloc...
Definition mshr.cc:786
std::string print() const
A no-args wrapper of print(std::ostream...) meant to be invoked from DPRINTFs avoiding string overhea...
Definition mshr.cc:757
MSHR(const std::string &name)
A simple constructor.
Definition mshr.cc:62
void promoteIf(const std::function< bool(Target &)> &pred)
Promotes deferred targets that satisfy a predicate.
Definition mshr.cc:636
void markInService(bool pending_modified_resp)
Definition mshr.cc:344
bool downstreamPending
Flag set by downstream caches.
Definition mshr.hh:87
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition mshr.cc:548
bool isPendingModified() const
Definition mshr.hh:326
bool postDowngrade
Did we snoop a read while waiting for data?
Definition mshr.hh:119
bool conflictAddr(const QueueEntry *entry) const override
Check if given entry's packets conflict with this' entries packets.
Definition mshr.cc:779
void promoteReadable()
Promotes deferred targets that do not require writable.
Definition mshr.cc:659
bool pendingModified
Here we use one flag to track both if:
Definition mshr.hh:113
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, Tick when_ready, Counter _order, bool alloc_on_fill)
Allocate a miss to this MSHR.
Definition mshr.cc:304
TargetList deferredTargets
Definition mshr.hh:396
bool sendPacket(BaseCache &cache) override
Send this queue entry as a downstream packet, with the exact behaviour depending on the specific entr...
Definition mshr.cc:725
bool trySatisfyFunctional(PacketPtr pkt)
Definition mshr.cc:710
bool hasPostDowngrade() const
Definition mshr.hh:334
bool matchBlockAddr(const Addr addr, const bool is_secure) const override
Check if entry corresponds to the one being looked for.
Definition mshr.cc:765
bool handleSnoop(PacketPtr target, Counter order)
Definition mshr.cc:424
bool needsWritable() const
The pending* and post* flags are only valid if inService is true.
Definition mshr.hh:319
bool isForward
True if the entry is just a simple forward from an upper level.
Definition mshr.hh:127
bool hasLockedRMWReadTarget()
Determine if there are any LockedRMWReads in the Targets list.
Definition mshr.cc:794
bool hasFromCache() const
Determine if there are non-deferred requests from other caches.
Definition mshr.hh:349
bool promoteDeferredTargets()
Definition mshr.cc:597
bool isWholeLineWrite() const
Check if this MSHR contains only compatible writes, and if they span the entire cache line.
Definition mshr.hh:406
void allocateTarget(PacketPtr target, Tick when, Counter order, bool alloc_on_fill)
Add a request to the list of targets.
Definition mshr.cc:377
void promoteWritable()
Promotes deferred targets that do not require writable.
Definition mshr.cc:680
bool hasPostInvalidate() const
Definition mshr.hh:330
void deallocate()
Mark this MSHR as free.
Definition mshr.cc:365
bool allocOnFill() const
Definition mshr.hh:340
bool hasTargets() const
Returns true if there are targets left.
Definition mshr.hh:467
@ ReadRespWithInvalidate
Definition packet.hh:89
@ StoreCondFailReq
Definition packet.hh:114
@ LockedRMWReadReq
Definition packet.hh:116
@ SCUpgradeFailReq
Definition packet.hh:106
Interface for things with names.
Definition named.hh:39
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isUpgrade() const
Definition packet.hh:596
bool isSecure() const
Definition packet.hh:836
const PacketId id
Definition packet.hh:374
bool needsWritable() const
Definition packet.hh:599
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
bool needsResponse() const
Definition packet.hh:608
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition packet.hh:713
Addr getOffset(unsigned int blk_size) const
Definition packet.hh:826
bool hasData() const
Definition packet.hh:614
bool hasRespData() const
Definition packet.hh:615
bool fromCache() const
Definition packet.hh:612
bool isWrite() const
Definition packet.hh:594
bool trySatisfyFunctional(PacketPtr other)
Check a functional request against a memory value stored in another packet (i.e.
Definition packet.hh:1399
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool isPrint() const
Definition packet.hh:623
unsigned getSize() const
Definition packet.hh:817
void setCacheResponding()
Snoop flags.
Definition packet.hh:653
bool isClean() const
Definition packet.hh:611
bool isExpressSnoop() const
Definition packet.hh:702
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition packet.hh:685
bool matchBlockAddr(const Addr addr, const bool is_secure, const int blk_size) const
Check if packet corresponds to a given block-aligned address and address space.
Definition packet.cc:389
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition packet.hh:749
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isInvalidate() const
Definition packet.hh:609
void allocate()
Allocate memory for the packet.
Definition packet.hh:1367
A queue entry base class, to be used by both the MSHRs and write-queue entries.
bool _isUncacheable
True if the entry is uncacheable.
unsigned blkSize
Block size of the cache.
virtual bool matchBlockAddr(const Addr addr, const bool is_secure) const =0
Check if entry corresponds to the one being looked for.
Addr blkAddr
Block aligned address.
Counter order
Order number assigned to disambiguate writes and misses.
bool inService
True if the entry has been sent downstream.
bool isSecure
True if the entry targets the secure memory space.
Tick readyTime
Tick when ready to issue.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ LOCKED_RMW
This request will lock or unlock the accessed memory.
Definition request.hh:154
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ PRIVILEGED
This request is made in privileged mode.
Definition request.hh:137
@ MEM_SWAP
This request is for a memory swap.
Definition request.hh:158
@ LLSC
The request is a Load locked/store conditional.
Definition request.hh:156
uint64_t FlagsType
Definition request.hh:100
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
Declares a basic cache interface BaseCache.
Miss Status and Handling Register (MSHR) declaration.
Bitfield< 5 > t
Definition misc_types.hh:71
Bitfield< 4 > s
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 27, 24 > pred
Definition types.hh:90
Bitfield< 17 > os
Definition misc.hh:838
Bitfield< 3 > addr
Definition types.hh:84
double Counter
All counters are of 64-bit values.
Definition types.hh:46
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
static void replaceUpgrade(PacketPtr pkt)
Definition mshr.cc:190
void ccprintf(cp::Print &print)
Definition cprintf.hh:130
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0