gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
mshr.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012-2013, 2015-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
46
47#include "mem/cache/mshr.hh"
48
49#include <cassert>
50#include <string>
51
52#include "base/logging.hh"
53#include "base/trace.hh"
54#include "base/types.hh"
55#include "debug/MSHR.hh"
56#include "mem/cache/base.hh"
57#include "mem/request.hh"
58
59namespace gem5
60{
61
62MSHR::MSHR(const std::string &name)
64 pendingModified(false), postInvalidate(false), postDowngrade(false),
66 targets(name + ".targets"), deferredTargets(name + ".deferredTargets")
67{
68}
69
71 : Named(name), needsWritable(false), hasUpgrade(false), allocOnFill(false),
72 hasFromCache(false), canMergeWrites(true)
73{}
74
75
76void
78 bool alloc_on_fill)
79{
80 if (source != Target::FromSnoop) {
81 if (pkt->needsWritable()) {
82 needsWritable = true;
83 }
84
85 // StoreCondReq is effectively an upgrade if it's in an MSHR
86 // since it would have been failed already if we didn't have a
87 // read-only copy
88 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
89 hasUpgrade = true;
90 }
91
92 // potentially re-evaluate whether we should allocate on a fill or
93 // not
94 allocOnFill = allocOnFill || alloc_on_fill;
95
96 if (source != Target::FromPrefetcher) {
98
100 }
101 }
102}
103
104void
106{
107 resetFlags();
108 for (auto& t: *this) {
109 updateFlags(t.pkt, t.source, t.allocOnFill);
110 }
111}
112
113void
115{
116 if (isWholeLineWrite()) {
117 // if we have already seen writes for the full block
118 // stop here, this might be a full line write followed
119 // by other compatible requests (e.g., reads)
120 return;
121 }
122
123 if (canMergeWrites) {
124 if (!pkt->isWrite()) {
125 // We won't allow further merging if this hasn't
126 // been a write
127 canMergeWrites = false;
128 return;
129 }
130
131 // Avoid merging requests with special flags (e.g.,
132 // strictly ordered)
133 const Request::FlagsType no_merge_flags =
137 const auto &req_flags = pkt->req->getFlags();
138 bool compat_write = !req_flags.isSet(no_merge_flags);
139 bool masked_write = pkt->isMaskedWrite();
140
141 // if this is the first write, it might be a whole
142 // line write and even if we can't merge any
143 // subsequent write requests, we still need to service
144 // it as a whole line write (e.g., SECURE whole line
145 // write)
146 bool first_write = empty();
147 if (!masked_write && (first_write || compat_write)) {
148 auto offset = pkt->getOffset(blkSize);
149 auto begin = writesBitmap.begin() + offset;
150 std::fill(begin, begin + pkt->getSize(), true);
151 }
152
153 // We won't allow further merging if this has been a
154 // special write
155 canMergeWrites &= compat_write;
156 }
157}
158
159inline void
161 Counter order, Target::Source source, bool markPending,
162 bool alloc_on_fill)
163{
164 updateFlags(pkt, source, alloc_on_fill);
165 if (markPending) {
166 // Iterate over the SenderState stack and see if we find
167 // an MSHR entry. If we do, set the downstreamPending
168 // flag. Otherwise, do nothing.
169 MSHR *mshr = pkt->findNextSenderState<MSHR>();
170 if (mshr != nullptr) {
171 assert(!mshr->downstreamPending);
172 mshr->downstreamPending = true;
173 } else {
174 // No need to clear downstreamPending later
175 markPending = false;
176 }
177 }
178
179 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
180
181 DPRINTF(MSHR, "New target allocated: %s\n", pkt->print());
182}
183
184
185static void
187{
188 // remember if the current packet has data allocated
189 bool has_data = pkt->hasData() || pkt->hasRespData();
190
191 if (pkt->cmd == MemCmd::UpgradeReq) {
192 pkt->cmd = MemCmd::ReadExReq;
193 DPRINTF(MSHR, "Replacing UpgradeReq with ReadExReq\n");
194 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
196 DPRINTF(MSHR, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
197 } else if (pkt->cmd == MemCmd::StoreCondReq) {
199 DPRINTF(MSHR, "Replacing StoreCondReq with StoreCondFailReq\n");
200 }
201
202 if (!has_data) {
203 // there is no sensible way of setting the data field if the
204 // new command actually would carry data
205 assert(!pkt->hasData());
206
207 if (pkt->hasRespData()) {
208 // we went from a packet that had no data (neither request,
209 // nor response), to one that does, and therefore we need to
210 // actually allocate space for the data payload
211 pkt->allocate();
212 }
213 }
214}
215
216
217void
219{
220 if (!hasUpgrade)
221 return;
222
223 for (auto& t : *this) {
224 replaceUpgrade(t.pkt);
225 }
226
227 hasUpgrade = false;
228}
229
230
231void
232MSHR::TargetList::clearDownstreamPending(MSHR::TargetList::iterator begin,
233 MSHR::TargetList::iterator end)
234{
235 for (auto t = begin; t != end; t++) {
236 if (t->markedPending) {
237 // Iterate over the SenderState stack and see if we find
238 // an MSHR entry. If we find one, clear the
239 // downstreamPending flag by calling
240 // clearDownstreamPending(). This recursively clears the
241 // downstreamPending flag in all caches this packet has
242 // passed through.
243 MSHR *mshr = t->pkt->findNextSenderState<MSHR>();
244 if (mshr != nullptr) {
246 }
247 t->markedPending = false;
248 }
249 }
250}
251
252void
257
258
259bool
261{
262 for (auto& t : *this) {
263 if (pkt->trySatisfyFunctional(t.pkt)) {
264 return true;
265 }
266 }
267
268 return false;
269}
270
271
272void
273MSHR::TargetList::print(std::ostream &os, int verbosity,
274 const std::string &prefix) const
275{
276 for (auto& t : *this) {
277 const char *s;
278 switch (t.source) {
279 case Target::FromCPU:
280 s = "FromCPU";
281 break;
283 s = "FromSnoop";
284 break;
286 s = "FromPrefetcher";
287 break;
288 default:
289 s = "";
290 break;
291 }
292 ccprintf(os, "%s%s: ", prefix, s);
293 t.pkt->print(os, verbosity, "");
294 ccprintf(os, "\n");
295 }
296}
297
298
299void
300MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
301 Tick when_ready, Counter _order, bool alloc_on_fill)
302{
303 blkAddr = blk_addr;
304 blkSize = blk_size;
305 isSecure = target->isSecure();
306 readyTime = when_ready;
307 order = _order;
308 assert(target);
309 isForward = false;
310 wasWholeLineWrite = false;
311 _isUncacheable = target->req->isUncacheable();
312 inService = false;
313 downstreamPending = false;
314
315 targets.init(blkAddr, blkSize);
317
318 // Don't know of a case where we would allocate a new MSHR for a
319 // snoop (mem-side request), so set source according to request here
320 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
322 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
323
324 // All targets must refer to the same block
325 assert(target->matchBlockAddr(targets.front().pkt, blkSize));
326}
327
328
329void
331{
332 assert(downstreamPending);
333 downstreamPending = false;
334 // recursively clear flag on any MSHRs we will be forwarding
335 // responses to
336 targets.clearDownstreamPending();
337}
338
339void
340MSHR::markInService(bool pending_modified_resp)
341{
342 assert(!inService);
343
344 inService = true;
345 pendingModified = targets.needsWritable || pending_modified_resp;
347
348 if (!downstreamPending) {
349 // let upstream caches know that the request has made it to a
350 // level where it's going to get a response
351 targets.clearDownstreamPending();
352 }
353 // if the line is not considered a whole-line write when sent
354 // downstream, make sure it is also not considered a whole-line
355 // write when receiving the response, and vice versa
357}
358
359
360void
362{
363 assert(targets.empty());
364 targets.resetFlags();
365 assert(deferredTargets.isReset());
366 inService = false;
367}
368
369/*
370 * Adds a target to an MSHR
371 */
372void
374 bool alloc_on_fill)
375{
376 // assume we'd never issue a prefetch when we've got an
377 // outstanding miss
378 assert(pkt->cmd != MemCmd::HardPFReq);
379
380 // if there's a request already in service for this MSHR, we will
381 // have to defer the new target until after the response if any of
382 // the following are true:
383 // - there are other targets already deferred
384 // - there's a pending invalidate to be applied after the response
385 // comes back (but before this target is processed)
386 // - the MSHR's first (and only) non-deferred target is a cache
387 // maintenance packet
388 // - the new target is a cache maintenance packet (this is probably
389 // overly conservative but certainly safe)
390 // - this target requires a writable block and either we're not
391 // getting a writable block back or we have already snooped
392 // another read request that will downgrade our writable block
393 // to non-writable (Shared or Owned)
394 PacketPtr tgt_pkt = targets.front().pkt;
395 if (pkt->req->isCacheMaintenance() ||
396 tgt_pkt->req->isCacheMaintenance() ||
397 !deferredTargets.empty() ||
398 (inService &&
400 (pkt->needsWritable() &&
402 // need to put on deferred list
404 replaceUpgrade(pkt);
405 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
406 alloc_on_fill);
407 } else {
408 // No request outstanding, or still OK to append to
409 // outstanding request: append to regular target list. Only
410 // mark pending if current request hasn't been issued yet
411 // (isn't in service).
412 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
413 alloc_on_fill);
414 }
415
416 DPRINTF(MSHR, "After target allocation: %s", print());
417}
418
419bool
421{
422 DPRINTF(MSHR, "%s for %s\n", __func__, pkt->print());
423
424 // when we snoop packets the needsWritable and isInvalidate flags
425 // should always be the same, however, this assumes that we never
426 // snoop writes as they are currently not marked as invalidations
427 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
428 !pkt->req->isCacheMaintenance(),
429 "%s got snoop %s where needsWritable, "
430 "does not match isInvalidate", name(), pkt->print());
431
432 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
433 // Request has not been issued yet, or it's been issued
434 // locally but is buffered unissued at some downstream cache
435 // which is forwarding us this snoop. Either way, the packet
436 // we're snooping logically precedes this MSHR's request, so
437 // the snoop has no impact on the MSHR, but must be processed
438 // in the standard way by the cache. The only exception is
439 // that if we're an L2+ cache buffering an UpgradeReq from a
440 // higher-level cache, and the snoop is invalidating, then our
441 // buffered upgrades must be converted to read exclusives,
442 // since the upper-level cache no longer has a valid copy.
443 // That is, even though the upper-level cache got out on its
444 // local bus first, some other invalidating transaction
445 // reached the global bus before the upgrade did.
446 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
447 targets.replaceUpgrades();
448 deferredTargets.replaceUpgrades();
449 }
450
451 return false;
452 }
453
454 // From here on down, the request issued by this MSHR logically
455 // precedes the request we're snooping.
456 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
457 // snooped request still precedes the re-request we'll have to
458 // issue for deferred targets, if any...
459 deferredTargets.replaceUpgrades();
460 }
461
462 PacketPtr tgt_pkt = targets.front().pkt;
463 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
464 // a prior snoop has already appended an invalidation or a
465 // cache invalidation operation is in progress, so logically
466 // we don't have the block anymore; no need for further
467 // snooping.
468 return true;
469 }
470
471 // Start by determining if we will eventually respond or not,
472 // matching the conditions checked in Cache::handleSnoop
473 const bool will_respond = isPendingModified() && pkt->needsResponse() &&
474 !pkt->isClean();
475 if (isPendingModified() || pkt->isInvalidate()) {
476 // We need to save and replay the packet in two cases:
477 // 1. We're awaiting a writable copy (Modified or Exclusive),
478 // so this MSHR is the orgering point, and we need to respond
479 // after we receive data.
480 // 2. It's an invalidation (e.g., UpgradeReq), and we need
481 // to forward the snoop up the hierarchy after the current
482 // transaction completes.
483
484 // The packet we are snooping may be deleted by the time we
485 // actually process the target, and we consequently need to
486 // save a copy here. Clear flags and also allocate new data as
487 // the original packet data storage may have been deleted by
488 // the time we get to process this packet. In the cases where
489 // we are not responding after handling the snoop we also need
490 // to create a copy of the request to be on the safe side. In
491 // the latter case the cache is responsible for deleting both
492 // the packet and the request as part of handling the deferred
493 // snoop.
494 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
495 new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
496 blkSize, pkt->id);
497
498 if (will_respond) {
499 // we are the ordering point, and will consequently
500 // respond, and depending on whether the packet
501 // needsWritable or not we either pass a Shared line or a
502 // Modified line
503 pkt->setCacheResponding();
504
505 // inform the cache hierarchy that this cache had the line
506 // in the Modified state, even if the response is passed
507 // as Shared (and thus non-writable)
509
510 // in the case of an uncacheable request there is no need
511 // to set the responderHadWritable flag, but since the
512 // recipient does not care there is no harm in doing so
513 } else if (isPendingModified() && pkt->isClean()) {
514 // this cache doesn't respond to the clean request, a
515 // destination xbar will respond to this request, but to
516 // do so it needs to know if it should wait for the
517 // WriteCleanReq
518 pkt->setSatisfied();
519 }
520
521 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
522 downstreamPending && targets.needsWritable, false);
523
524 if (pkt->needsWritable() || pkt->isInvalidate()) {
525 // This transaction will take away our pending copy
526 postInvalidate = true;
527 }
528 }
529
530 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
531 // This transaction will get a read-shared copy, downgrading
532 // our copy if we had a writable one
533 postDowngrade = true;
534 // make sure that any downstream cache does not respond with a
535 // writable (and dirty) copy even if it has one, unless it was
536 // explicitly asked for one
537 pkt->setHasSharers();
538 }
539
540 return will_respond;
541}
542
545{
546 TargetList ready_targets;
547 ready_targets.init(blkAddr, blkSize);
548 // If the downstream MSHR got an invalidation request then we only
549 // service the first of the FromCPU targets and any other
550 // non-FromCPU target. This way the remaining FromCPU targets
551 // issue a new request and get a fresh copy of the block and we
552 // avoid memory consistency violations.
554 auto it = targets.begin();
555 assert((it->source == Target::FromCPU) ||
556 (it->source == Target::FromPrefetcher));
557 ready_targets.push_back(*it);
558 // Leave the Locked RMW Read until the corresponding Locked Write
559 // request comes in
560 if (it->pkt->cmd != MemCmd::LockedRMWReadReq) {
561 it = targets.erase(it);
562 while (it != targets.end()) {
563 if (it->source == Target::FromCPU) {
564 it++;
565 } else {
566 assert(it->source == Target::FromSnoop);
567 ready_targets.push_back(*it);
568 it = targets.erase(it);
569 }
570 }
571 }
572 ready_targets.populateFlags();
573 } else {
574 auto it = targets.begin();
575 while (it != targets.end()) {
576 ready_targets.push_back(*it);
577 if (it->pkt->cmd == MemCmd::LockedRMWReadReq) {
578 // Leave the Locked RMW Read until the corresponding Locked
579 // Write comes in. Also don't service any later targets as the
580 // line is now "locked".
581 break;
582 }
583 it = targets.erase(it);
584 }
585 ready_targets.populateFlags();
586 }
587 targets.populateFlags();
588
589 return ready_targets;
590}
591
592bool
594{
595 if (targets.empty() && deferredTargets.empty()) {
596 // nothing to promote
597 return false;
598 }
599
600 // the deferred targets can be generally promoted unless they
601 // contain a cache maintenance request
602
603 // find the first target that is a cache maintenance request
604 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
605 [](MSHR::Target &t) {
606 return t.pkt->req->isCacheMaintenance();
607 });
608 if (it == deferredTargets.begin()) {
609 // if the first deferred target is a cache maintenance packet
610 // then we can promote provided the targets list is empty and
611 // we can service it on its own
612 if (targets.empty()) {
613 targets.splice(targets.end(), deferredTargets, it);
614 }
615 } else {
616 // if a cache maintenance operation exists, we promote all the
617 // deferred targets that precede it, or all deferred targets
618 // otherwise
619 targets.splice(targets.end(), deferredTargets,
620 deferredTargets.begin(), it);
621 }
622
623 deferredTargets.populateFlags();
624 targets.populateFlags();
625 order = targets.front().order;
626 readyTime = std::max(curTick(), targets.front().readyTime);
627
628 return true;
629}
630
631void
632MSHR::promoteIf(const std::function<bool (Target &)>& pred)
633{
634 // if any of the deferred targets were upper-level cache
635 // requests marked downstreamPending, need to clear that
636 assert(!downstreamPending); // not pending here anymore
637
638 // find the first target does not satisfy the condition
639 auto last_it = std::find_if_not(deferredTargets.begin(),
640 deferredTargets.end(),
641 pred);
642
643 // for the prefix of the deferredTargets [begin(), last_it) clear
644 // the downstreamPending flag and move them to the target list
645 deferredTargets.clearDownstreamPending(deferredTargets.begin(),
646 last_it);
647 targets.splice(targets.end(), deferredTargets,
648 deferredTargets.begin(), last_it);
649 // We need to update the flags for the target lists after the
650 // modifications
651 deferredTargets.populateFlags();
652}
653
654void
656{
657 if (!deferredTargets.empty() && !hasPostInvalidate()) {
658 // We got a non invalidating response, and we have the block
659 // but we have deferred targets which are waiting and they do
660 // not need writable. This can happen if the original request
661 // was for a cache clean operation and we had a copy of the
662 // block. Since we serviced the cache clean operation and we
663 // have the block, there's no need to defer the targets, so
664 // move them up to the regular target list.
665
666 auto pred = [](Target &t) {
667 assert(t.source == Target::FromCPU);
668 return !t.pkt->req->isCacheInvalidate() &&
669 !t.pkt->needsWritable();
670 };
672 }
673}
674
675void
677{
678 if (deferredTargets.empty()) {
679 return;
680 }
681 PacketPtr def_tgt_pkt = deferredTargets.front().pkt;
682 if (deferredTargets.needsWritable &&
684 !def_tgt_pkt->req->isCacheInvalidate()) {
685 // We got a writable response, but we have deferred targets
686 // which are waiting to request a writable copy (not because
687 // of a pending invalidate). This can happen if the original
688 // request was for a read-only block, but we got a writable
689 // response anyway. Since we got the writable copy there's no
690 // need to defer the targets, so move them up to the regular
691 // target list.
692 assert(!targets.needsWritable);
693 targets.needsWritable = true;
694
695 auto pred = [](Target &t) {
696 assert(t.source == Target::FromCPU);
697 return !t.pkt->req->isCacheInvalidate();
698 };
699
701 }
702}
703
704
705bool
707{
708 // For printing, we treat the MSHR as a whole as single entity.
709 // For other requests, we iterate over the individual targets
710 // since that's where the actual data lies.
711 if (pkt->isPrint()) {
712 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
713 return false;
714 } else {
715 return (targets.trySatisfyFunctional(pkt) ||
716 deferredTargets.trySatisfyFunctional(pkt));
717 }
718}
719
720bool
722{
723 return cache.sendMSHRQueuePacket(this);
724}
725
726void
727MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
728{
729 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
730 prefix, blkAddr, blkAddr + blkSize - 1,
731 isSecure ? "s" : "ns",
732 isForward ? "Forward" : "",
733 allocOnFill() ? "AllocOnFill" : "",
734 needsWritable() ? "Wrtbl" : "",
735 _isUncacheable ? "Unc" : "",
736 inService ? "InSvc" : "",
737 downstreamPending ? "DwnPend" : "",
738 postInvalidate ? "PostInv" : "",
739 postDowngrade ? "PostDowngr" : "",
740 hasFromCache() ? "HasFromCache" : "");
741
742 if (!targets.empty()) {
743 ccprintf(os, "%s Targets:\n", prefix);
744 targets.print(os, verbosity, prefix + " ");
745 }
746 if (!deferredTargets.empty()) {
747 ccprintf(os, "%s Deferred Targets:\n", prefix);
748 deferredTargets.print(os, verbosity, prefix + " ");
749 }
750}
751
752std::string
754{
755 std::ostringstream str;
756 print(str);
757 return str.str();
758}
759
760bool
761MSHR::matchBlockAddr(const Addr addr, const bool is_secure) const
762{
763 assert(hasTargets());
764 return (blkAddr == addr) && (isSecure == is_secure);
765}
766
767bool
769{
770 assert(hasTargets());
771 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
772}
773
774bool
776{
777 assert(hasTargets());
778 return entry->matchBlockAddr(blkAddr, isSecure);
779}
780
781void
783{
784 assert(!targets.empty() && targets.front().pkt == pkt);
785 RequestPtr r = std::make_shared<Request>(*(pkt->req));
786 targets.front().pkt = new Packet(r, MemCmd::LockedRMWReadReq);
787}
788
789bool
791{
792 if (!targets.empty() &&
793 targets.front().pkt->cmd == MemCmd::LockedRMWReadReq) {
794 return true;
795 }
796 return false;
797}
798
799
800} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
A basic cache interface.
Definition base.hh:104
virtual bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition base.cc:1930
void updateFlags(PacketPtr pkt, Target::Source source, bool alloc_on_fill)
Use the provided packet and the source to update the flags of this TargetList.
Definition mshr.cc:77
void replaceUpgrades()
Convert upgrades to the equivalent request if the cache line they refer to would have been invalid (U...
Definition mshr.cc:218
void populateFlags()
Goes through the list of targets and uses them to populate the flags of this TargetList.
Definition mshr.cc:105
Addr blkSize
Size of the cache block.
Definition mshr.hh:293
bool trySatisfyFunctional(PacketPtr pkt)
Definition mshr.cc:260
bool allocOnFill
Set when the response should allocate on fill.
Definition mshr.hh:176
std::vector< char > writesBitmap
Track which bytes are written by requests in this target list.
Definition mshr.hh:305
void add(PacketPtr pkt, Tick readyTime, Counter order, Target::Source source, bool markPending, bool alloc_on_fill)
Add the specified packet in the TargetList.
Definition mshr.cc:160
void print(std::ostream &os, int verbosity, const std::string &prefix) const
Definition mshr.cc:273
bool canMergeWrites
Indicates whether we can merge incoming write requests.
Definition mshr.hh:296
void updateWriteFlags(PacketPtr pkt)
Add the specified packet in the TargetList.
Definition mshr.cc:114
TargetList(const std::string &name=".unnamedTargetList")
Definition mshr.cc:70
void init(Addr blk_addr, Addr blk_size)
Reset state.
Definition mshr.hh:202
bool hasFromCache
Determine whether there was at least one non-snooping target coming from another cache.
Definition mshr.hh:181
void clearDownstreamPending()
Definition mshr.cc:253
Miss Status and handling Register.
Definition mshr.hh:75
bool postInvalidate
Did we snoop an invalidate while waiting for data?
Definition mshr.hh:116
TargetList targets
List of all requests that match the address.
Definition mshr.hh:394
void clearDownstreamPending()
Definition mshr.cc:330
bool wasWholeLineWrite
Track if we sent this as a whole line write or not.
Definition mshr.hh:124
void updateLockedRMWReadTarget(PacketPtr pkt)
Replaces the matching packet in the Targets list with a dummy packet to ensure the MSHR remains alloc...
Definition mshr.cc:782
std::string print() const
A no-args wrapper of print(std::ostream...) meant to be invoked from DPRINTFs avoiding string overhea...
Definition mshr.cc:753
MSHR(const std::string &name)
A simple constructor.
Definition mshr.cc:62
void promoteIf(const std::function< bool(Target &)> &pred)
Promotes deferred targets that satisfy a predicate.
Definition mshr.cc:632
void markInService(bool pending_modified_resp)
Definition mshr.cc:340
bool downstreamPending
Flag set by downstream caches.
Definition mshr.hh:87
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition mshr.cc:544
bool isPendingModified() const
Definition mshr.hh:326
bool postDowngrade
Did we snoop a read while waiting for data?
Definition mshr.hh:119
bool conflictAddr(const QueueEntry *entry) const override
Check if given entry's packets conflict with this' entries packets.
Definition mshr.cc:775
void promoteReadable()
Promotes deferred targets that do not require writable.
Definition mshr.cc:655
bool pendingModified
Here we use one flag to track both if:
Definition mshr.hh:113
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, Tick when_ready, Counter _order, bool alloc_on_fill)
Allocate a miss to this MSHR.
Definition mshr.cc:300
TargetList deferredTargets
Definition mshr.hh:396
bool sendPacket(BaseCache &cache) override
Send this queue entry as a downstream packet, with the exact behaviour depending on the specific entr...
Definition mshr.cc:721
bool trySatisfyFunctional(PacketPtr pkt)
Definition mshr.cc:706
bool hasPostDowngrade() const
Definition mshr.hh:334
Iterator readyIter
Pointer to this MSHR on the ready list.
Definition mshr.hh:385
bool matchBlockAddr(const Addr addr, const bool is_secure) const override
Check if entry corresponds to the one being looked for.
Definition mshr.cc:761
bool handleSnoop(PacketPtr target, Counter order)
Definition mshr.cc:420
bool needsWritable() const
The pending* and post* flags are only valid if inService is true.
Definition mshr.hh:319
bool isForward
True if the entry is just a simple forward from an upper level.
Definition mshr.hh:127
bool hasLockedRMWReadTarget()
Determine if there are any LockedRMWReads in the Targets list.
Definition mshr.cc:790
bool hasFromCache() const
Determine if there are non-deferred requests from other caches.
Definition mshr.hh:349
bool promoteDeferredTargets()
Definition mshr.cc:593
bool isWholeLineWrite() const
Check if this MSHR contains only compatible writes, and if they span the entire cache line.
Definition mshr.hh:406
void allocateTarget(PacketPtr target, Tick when, Counter order, bool alloc_on_fill)
Add a request to the list of targets.
Definition mshr.cc:373
void promoteWritable()
Promotes deferred targets that do not require writable.
Definition mshr.cc:676
bool hasPostInvalidate() const
Definition mshr.hh:330
void deallocate()
Mark this MSHR as free.
Definition mshr.cc:361
Iterator allocIter
Pointer to this MSHR on the allocated list.
Definition mshr.hh:391
bool allocOnFill() const
Definition mshr.hh:340
bool hasTargets() const
Returns true if there are targets left.
Definition mshr.hh:467
@ ReadRespWithInvalidate
Definition packet.hh:89
@ StoreCondFailReq
Definition packet.hh:114
@ LockedRMWReadReq
Definition packet.hh:116
@ SCUpgradeFailReq
Definition packet.hh:106
Named(std::string_view name_)
Definition named.hh:57
virtual std::string name() const
Definition named.hh:60
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isUpgrade() const
Definition packet.hh:596
bool isSecure() const
Definition packet.hh:836
const PacketId id
Definition packet.hh:374
bool needsWritable() const
Definition packet.hh:599
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
bool needsResponse() const
Definition packet.hh:608
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines),...
Definition packet.hh:713
Addr getOffset(unsigned int blk_size) const
Definition packet.hh:826
bool hasData() const
Definition packet.hh:614
bool hasRespData() const
Definition packet.hh:615
bool fromCache() const
Definition packet.hh:612
bool isWrite() const
Definition packet.hh:594
bool trySatisfyFunctional(PacketPtr other)
Check a functional request against a memory value stored in another packet (i.e.
Definition packet.hh:1399
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool isPrint() const
Definition packet.hh:623
unsigned getSize() const
Definition packet.hh:817
void setCacheResponding()
Snoop flags.
Definition packet.hh:653
bool isClean() const
Definition packet.hh:611
bool isExpressSnoop() const
Definition packet.hh:702
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag,...
Definition packet.hh:685
bool matchBlockAddr(const Addr addr, const bool is_secure, const int blk_size) const
Check if packet corresponds to a given block-aligned address and address space.
Definition packet.cc:389
void setSatisfied()
Set when a request hits in a cache and the cache is not going to respond.
Definition packet.hh:749
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isInvalidate() const
Definition packet.hh:609
void allocate()
Allocate memory for the packet.
Definition packet.hh:1367
bool _isUncacheable
True if the entry is uncacheable.
unsigned blkSize
Block size of the cache.
virtual bool matchBlockAddr(const Addr addr, const bool is_secure) const =0
Check if entry corresponds to the one being looked for.
Addr blkAddr
Block aligned address.
Counter order
Order number assigned to disambiguate writes and misses.
bool inService
True if the entry has been sent downstream.
QueueEntry(const std::string &name)
bool isSecure
True if the entry targets the secure memory space.
Tick readyTime
Tick when ready to issue.
uint64_t FlagsType
Definition request.hh:100
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ LOCKED_RMW
This request will lock or unlock the accessed memory.
Definition request.hh:154
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ PRIVILEGED
This request is made in privileged mode.
Definition request.hh:137
@ MEM_SWAP
This request is for a memory swap.
Definition request.hh:158
@ LLSC
The request is a Load locked/store conditional.
Definition request.hh:156
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:246
Declares a basic cache interface BaseCache.
Miss Status and Handling Register (MSHR) declaration.
Bitfield< 5 > t
Definition misc_types.hh:71
Bitfield< 4 > s
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 27, 24 > pred
Definition types.hh:91
Bitfield< 17 > os
Definition misc.hh:838
Bitfield< 3 > addr
Definition types.hh:84
double Counter
All counters are of 64-bit values.
Definition types.hh:46
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Packet * PacketPtr
static void replaceUpgrade(PacketPtr pkt)
Definition mshr.cc:186
void ccprintf(cp::Print &print)
Definition cprintf.hh:130
Declaration of a request, the overall memory request consisting of the parts of the request that are ...

Generated on Mon Oct 27 2025 04:13:03 for gem5 by doxygen 1.14.0