gem5  v20.0.0.2
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
base.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013, 2015-2016, 2018-2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2003-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
46 #ifndef __MEM_CACHE_BASE_HH__
47 #define __MEM_CACHE_BASE_HH__
48 
49 #include <cassert>
50 #include <cstdint>
51 #include <string>
52 
53 #include "base/addr_range.hh"
54 #include "base/statistics.hh"
55 #include "base/trace.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "enums/Clusivity.hh"
60 #include "mem/cache/cache_blk.hh"
62 #include "mem/cache/mshr_queue.hh"
63 #include "mem/cache/tags/base.hh"
64 #include "mem/cache/write_queue.hh"
66 #include "mem/packet.hh"
67 #include "mem/packet_queue.hh"
68 #include "mem/qport.hh"
69 #include "mem/request.hh"
70 #include "params/WriteAllocator.hh"
71 #include "sim/clocked_object.hh"
72 #include "sim/eventq.hh"
73 #include "sim/probe/probe.hh"
74 #include "sim/serialize.hh"
75 #include "sim/sim_exit.hh"
76 #include "sim/system.hh"
77 
78 namespace Prefetcher {
79  class Base;
80 }
81 class MSHR;
82 class MasterPort;
83 class QueueEntry;
84 struct BaseCacheParams;
85 
89 class BaseCache : public ClockedObject
90 {
91  protected:
97  MSHRQueue_WriteBuffer
98  };
99 
100  public:
105  Blocked_NoMSHRs = MSHRQueue_MSHRs,
106  Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
108  NUM_BLOCKED_CAUSES
109  };
110 
111  protected:
112 
123  {
124 
125  public:
126 
131  void schedSendEvent(Tick time)
132  {
133  DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
134  reqQueue.schedSendEvent(time);
135  }
136 
137  protected:
138 
139  CacheMasterPort(const std::string &_name, BaseCache *_cache,
140  ReqPacketQueue &_reqQueue,
141  SnoopRespPacketQueue &_snoopRespQueue) :
142  QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
143  { }
144 
150  virtual bool isSnooping() const { return true; }
151  };
152 
160  {
161 
162  protected:
163 
166 
167  public:
168 
170  SnoopRespPacketQueue &snoop_resp_queue,
171  const std::string &label) :
172  ReqPacketQueue(cache, port, label), cache(cache),
173  snoopRespQueue(snoop_resp_queue) { }
174 
180  virtual void sendDeferredPacket();
181 
191  {
192  if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
193  DPRINTF(CachePort, "Waiting for snoop response to be "
194  "sent\n");
195  Tick when = snoopRespQueue.deferredPacketReadyTime();
196  schedSendEvent(when);
197  return true;
198  }
199  return false;
200  }
201  };
202 
203 
209  {
210  private:
211 
214 
216 
217  // a pointer to our specific cache implementation
219 
220  protected:
221 
222  virtual void recvTimingSnoopReq(PacketPtr pkt);
223 
224  virtual bool recvTimingResp(PacketPtr pkt);
225 
226  virtual Tick recvAtomicSnoop(PacketPtr pkt);
227 
228  virtual void recvFunctionalSnoop(PacketPtr pkt);
229 
230  public:
231 
232  MemSidePort(const std::string &_name, BaseCache *_cache,
233  const std::string &_label);
234  };
235 
245  {
246 
247  public:
248 
250  void setBlocked();
251 
253  void clearBlocked();
254 
255  bool isBlocked() const { return blocked; }
256 
257  protected:
258 
259  CacheSlavePort(const std::string &_name, BaseCache *_cache,
260  const std::string &_label);
261 
264 
265  bool blocked;
266 
268 
269  private:
270 
271  void processSendRetry();
272 
274 
275  };
276 
282  {
283  private:
284 
285  // a pointer to our specific cache implementation
287 
288  protected:
289  virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
290 
291  virtual bool tryTiming(PacketPtr pkt) override;
292 
293  virtual bool recvTimingReq(PacketPtr pkt) override;
294 
295  virtual Tick recvAtomic(PacketPtr pkt) override;
296 
297  virtual void recvFunctional(PacketPtr pkt) override;
298 
299  virtual AddrRangeList getAddrRanges() const override;
300 
301  public:
302 
303  CpuSidePort(const std::string &_name, BaseCache *_cache,
304  const std::string &_label);
305 
306  };
307 
310 
311  protected:
312 
315 
318 
321 
324 
327 
330 
333 
336 
352 
360 
365  std::unique_ptr<Packet> pendingDelete;
366 
371  void markInService(MSHR *mshr, bool pending_modified_resp)
372  {
373  bool wasFull = mshrQueue.isFull();
374  mshrQueue.markInService(mshr, pending_modified_resp);
375 
376  if (wasFull && !mshrQueue.isFull()) {
377  clearBlocked(Blocked_NoMSHRs);
378  }
379  }
380 
382  {
383  bool wasFull = writeBuffer.isFull();
384  writeBuffer.markInService(entry);
385 
386  if (wasFull && !writeBuffer.isFull()) {
387  clearBlocked(Blocked_NoWBBuffers);
388  }
389  }
390 
404  inline bool allocOnFill(MemCmd cmd) const
405  {
406  return clusivity == Enums::mostly_incl ||
407  cmd == MemCmd::WriteLineReq ||
408  cmd == MemCmd::ReadReq ||
409  cmd == MemCmd::WriteReq ||
410  cmd.isPrefetch() ||
411  cmd.isLLSC();
412  }
413 
422  Addr regenerateBlkAddr(CacheBlk* blk);
423 
432  Cycles calculateTagOnlyLatency(const uint32_t delay,
433  const Cycles lookup_lat) const;
443  Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
444  const Cycles lookup_lat) const;
445 
454  virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
455  PacketList &writebacks);
456 
457  /*
458  * Handle a timing request that hit in the cache
459  *
460  * @param ptk The request packet
461  * @param blk The referenced block
462  * @param request_time The tick at which the block lookup is compete
463  */
464  virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
465  Tick request_time);
466 
467  /*
468  * Handle a timing request that missed in the cache
469  *
470  * Implementation specific handling for different cache
471  * implementations
472  *
473  * @param ptk The request packet
474  * @param blk The referenced block
475  * @param forward_time The tick at which we can process dependent requests
476  * @param request_time The tick at which the block lookup is compete
477  */
478  virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
479  Tick forward_time,
480  Tick request_time) = 0;
481 
482  /*
483  * Handle a timing request that missed in the cache
484  *
485  * Common functionality across different cache implementations
486  *
487  * @param ptk The request packet
488  * @param blk The referenced block
489  * @param mshr Any existing mshr for the referenced cache block
490  * @param forward_time The tick at which we can process dependent requests
491  * @param request_time The tick at which the block lookup is compete
492  */
493  void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
494  Tick forward_time, Tick request_time);
495 
500  virtual void recvTimingReq(PacketPtr pkt);
501 
506  void handleUncacheableWriteResp(PacketPtr pkt);
507 
518  virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
519  CacheBlk *blk) = 0;
520 
525  virtual void recvTimingResp(PacketPtr pkt);
526 
531  virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
532 
537  virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
538 
551  virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
552  PacketList &writebacks) = 0;
553 
559  virtual Tick recvAtomic(PacketPtr pkt);
560 
567  virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
568 
575  virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
576 
580  void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
581 
588  QueueEntry* getNextQueueEntry();
589 
593  virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
594 
598  virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
599 
615  virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
616  bool needs_writable,
617  bool is_whole_line_write) const = 0;
618 
626  const bool writebackClean;
627 
636 
643  assert(tempBlockWriteback != nullptr);
644  PacketList writebacks{tempBlockWriteback};
645  doWritebacksAtomic(writebacks);
646  tempBlockWriteback = nullptr;
647  }
648 
655 
680  bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
681  PacketList &writebacks);
682 
693  virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
694  bool deferred_response = false,
695  bool pending_downgrade = false);
696 
706  void maintainClusivity(bool from_cache, CacheBlk *blk);
707 
717  bool handleEvictions(std::vector<CacheBlk*> &evict_blks,
718  PacketList &writebacks);
719 
738  CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
739  PacketList &writebacks, bool allocate);
740 
753  CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
762  M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
763 
772  void evictBlock(CacheBlk *blk, PacketList &writebacks);
773 
779  void invalidateBlock(CacheBlk *blk);
780 
787  PacketPtr writebackBlk(CacheBlk *blk);
788 
800  PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
801 
805  virtual void memWriteback() override;
806 
814  virtual void memInvalidate() override;
815 
821  bool isDirty() const;
822 
831  bool inRange(Addr addr) const;
832 
836  Tick nextQueueReadyTime() const;
837 
839  const unsigned blkSize;
840 
846 
852 
859 
862 
869 
873  const bool sequentialAccess;
874 
876  const int numTarget;
877 
880 
886  const Enums::Clusivity clusivity;
887 
894  const bool isReadOnly;
895 
900  uint8_t blocked;
901 
903  uint64_t order;
904 
907 
910 
913 
918 
919  public:
922 
923  struct CacheCmdStats : public Stats::Group
924  {
925  CacheCmdStats(BaseCache &c, const std::string &name);
926 
933  void regStatsFromParent();
934 
935  const BaseCache &cache;
936 
970  };
971 
972  struct CacheStats : public Stats::Group
973  {
975 
976  void regStats() override;
977 
979  return *cmd[p->cmdToIndex()];
980  }
981 
982  const BaseCache &cache;
983 
988 
993 
998 
1003 
1008 
1013 
1018 
1021 
1025 
1028 
1033 
1038 
1041 
1046 
1049 
1054 
1059 
1062 
1065 
1068 
1071  } stats;
1072 
1074  void regProbePoints() override;
1075 
1076  public:
1077  BaseCache(const BaseCacheParams *p, unsigned blk_size);
1078  ~BaseCache();
1079 
1080  void init() override;
1081 
1082  Port &getPort(const std::string &if_name,
1083  PortID idx=InvalidPortID) override;
1084 
1089  unsigned
1091  {
1092  return blkSize;
1093  }
1094 
1095  const AddrRangeList &getAddrRanges() const { return addrRanges; }
1096 
1097  MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1098  {
1099  MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1100  pkt, time, order++,
1101  allocOnFill(pkt->cmd));
1102 
1103  if (mshrQueue.isFull()) {
1104  setBlocked((BlockedCause)MSHRQueue_MSHRs);
1105  }
1106 
1107  if (sched_send) {
1108  // schedule the send
1109  schedMemSideSendEvent(time);
1110  }
1111 
1112  return mshr;
1113  }
1114 
1116  {
1117  // should only see writes or clean evicts here
1118  assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1119 
1120  Addr blk_addr = pkt->getBlockAddr(blkSize);
1121 
1122  // If using compression, on evictions the block is decompressed and
1123  // the operation's latency is added to the payload delay. Consume
1124  // that payload delay here, meaning that the data is always stored
1125  // uncompressed in the writebuffer
1126  if (compressor) {
1127  time += pkt->payloadDelay;
1128  pkt->payloadDelay = 0;
1129  }
1130 
1131  WriteQueueEntry *wq_entry =
1132  writeBuffer.findMatch(blk_addr, pkt->isSecure());
1133  if (wq_entry && !wq_entry->inService) {
1134  DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1135  }
1136 
1137  writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1138 
1139  if (writeBuffer.isFull()) {
1140  setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1141  }
1142 
1143  // schedule the send
1144  schedMemSideSendEvent(time);
1145  }
1146 
1150  bool isBlocked() const
1151  {
1152  return blocked != 0;
1153  }
1154 
1161  {
1162  uint8_t flag = 1 << cause;
1163  if (blocked == 0) {
1164  stats.blocked_causes[cause]++;
1165  blockedCycle = curCycle();
1166  cpuSidePort.setBlocked();
1167  }
1168  blocked |= flag;
1169  DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1170  }
1171 
1180  {
1181  uint8_t flag = 1 << cause;
1182  blocked &= ~flag;
1183  DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1184  if (blocked == 0) {
1185  stats.blocked_cycles[cause] += curCycle() - blockedCycle;
1186  cpuSidePort.clearBlocked();
1187  }
1188  }
1189 
1199  {
1200  memSidePort.schedSendEvent(time);
1201  }
1202 
1203  bool inCache(Addr addr, bool is_secure) const {
1204  return tags->findBlock(addr, is_secure);
1205  }
1206 
1207  bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1208  CacheBlk *block = tags->findBlock(addr, is_secure);
1209  if (block) {
1210  return block->wasPrefetched();
1211  } else {
1212  return false;
1213  }
1214  }
1215 
1216  bool inMissQueue(Addr addr, bool is_secure) const {
1217  return mshrQueue.findMatch(addr, is_secure);
1218  }
1219 
1221  {
1222  assert(pkt->req->masterId() < system->maxMasters());
1223  stats.cmdStats(pkt).misses[pkt->req->masterId()]++;
1224  pkt->req->incAccessDepth();
1225  if (missCount) {
1226  --missCount;
1227  if (missCount == 0)
1228  exitSimLoop("A cache reached the maximum miss count");
1229  }
1230  }
1232  {
1233  assert(pkt->req->masterId() < system->maxMasters());
1234  stats.cmdStats(pkt).hits[pkt->req->masterId()]++;
1235  }
1236 
1242  bool coalesce() const;
1243 
1244 
1249  void writebackVisitor(CacheBlk &blk);
1250 
1256  void invalidateVisitor(CacheBlk &blk);
1257 
1266  virtual bool sendMSHRQueuePacket(MSHR* mshr);
1267 
1276  bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1277 
1283  void serialize(CheckpointOut &cp) const override;
1284  void unserialize(CheckpointIn &cp) override;
1285 };
1286 
1302 class WriteAllocator : public SimObject {
1303  public:
1304  WriteAllocator(const WriteAllocatorParams *p) :
1305  SimObject(p),
1306  coalesceLimit(p->coalesce_limit * p->block_size),
1307  noAllocateLimit(p->no_allocate_limit * p->block_size),
1308  delayThreshold(p->delay_threshold)
1309  {
1310  reset();
1311  }
1312 
1319  bool coalesce() const {
1320  return mode != WriteMode::ALLOCATE;
1321  }
1322 
1328  bool allocate() const {
1329  return mode != WriteMode::NO_ALLOCATE;
1330  }
1331 
1338  void reset() {
1339  mode = WriteMode::ALLOCATE;
1340  byteCount = 0;
1341  nextAddr = 0;
1342  }
1343 
1350  bool delay(Addr blk_addr) {
1351  if (delayCtr[blk_addr] > 0) {
1352  --delayCtr[blk_addr];
1353  return true;
1354  } else {
1355  return false;
1356  }
1357  }
1358 
1364  void resetDelay(Addr blk_addr) {
1365  delayCtr.erase(blk_addr);
1366  }
1367 
1378  void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1379 
1380  private:
1386  enum class WriteMode : char {
1387  ALLOCATE,
1388  COALESCE,
1389  NO_ALLOCATE,
1390  };
1392 
1395 
1400  uint32_t byteCount;
1401 
1405  const uint32_t coalesceLimit;
1406  const uint32_t noAllocateLimit;
1410  const uint32_t delayThreshold;
1411 
1416  std::unordered_map<Addr, Counter> delayCtr;
1417 };
1418 
1419 #endif //__MEM_CACHE_BASE_HH__
A MasterPort is a specialisation of a BaseMasterPort, which implements the default protocol for the t...
Definition: port.hh:71
Stats::Formula demandMissLatency
Total number of cycles spent waiting for demand misses.
Definition: base.hh:995
#define DPRINTF(x,...)
Definition: trace.hh:222
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const
Finds the block in the cache without touching it.
Definition: base.cc:77
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:879
Ports are used to interface objects to each other.
Definition: port.hh:56
std::unordered_map< Addr, Counter > delayCtr
Keep track of the number of times the allocator has delayed an WriteReq MSHR.
Definition: base.hh:1416
Special instance of CacheBlk for use with tempBlk that deals with its block address regeneration...
Definition: cache_blk.hh:441
Declaration of a simple PacketQueue that is associated with a port on which it attempts to send packe...
bool inService
True if the entry has been sent downstream.
Definition: queue_entry.hh:105
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
Stats::Formula demandHits
Number of hits for demand accesses.
Definition: base.hh:985
A Class for maintaining a list of pending and allocated memory requests.
Definition: mshr_queue.hh:58
Definition: packet.hh:70
Stats::Vector missLatency
Total number of cycles per thread/command spent waiting for a miss.
Definition: base.hh:947
const std::string & name()
Definition: trace.cc:50
CpuSidePort cpuSidePort
Definition: base.hh:308
Stats::Formula avgMshrUncacheableLatency
The average latency of an MSHR miss, per command and thread.
Definition: base.hh:969
const PortID InvalidPortID
Definition: types.hh:236
The memory-side port extends the base cache master port with access functions for functional...
Definition: base.hh:208
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:317
Stats::Formula overallMshrMissLatency
Total cycle latency of overall MSHR misses.
Definition: base.hh:1045
Write queue entry.
const BaseCache & cache
Definition: base.hh:935
Stats::Vector mshr_miss_latency
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:961
Stats::Scalar replacements
Number of replacements of valid blocks.
Definition: base.hh:1064
MSHR * noTargetMSHR
Pointer to the MSHR that has no targets.
Definition: base.hh:909
Stats::Formula overallMshrUncacheableLatency
Total cycle latency of overall MSHR misses.
Definition: base.hh:1048
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:845
std::vector< std::unique_ptr< CacheCmdStats > > cmd
Per-command statistics.
Definition: base.hh:1070
A write queue for all eviction packets, i.e.
Definition: write_queue.hh:57
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
Stats::Vector mshr_hits
Number of misses that hit in the MSHRs per command and thread.
Definition: base.hh:955
EventFunctionWrapper writebackTempBlockAtomicEvent
An event to writeback the tempBlock after recvAtomic finishes.
Definition: base.hh:654
const Enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: base.hh:886
Stats::Formula demandMshrMisses
Demand misses that miss in the MSHRs.
Definition: base.hh:1035
The QueuedMasterPort combines two queues, a request queue and a snoop response queue, that both share the same port.
Definition: qport.hh:106
ip6_addr_t addr
Definition: inet.hh:330
System * system
System we are currently operating in.
Definition: base.hh:921
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:894
ProbePointArg< PacketPtr > * ppMiss
To probe when a cache miss occurs.
Definition: base.hh:332
const AddrRangeList addrRanges
The address range to which the cache responds on the CPU side.
Definition: base.hh:917
Stats::Formula overallMshrMisses
Total number of misses that miss in the MSHRs.
Definition: base.hh:1037
void reset()
Definition: statistics.cc:569
Stats::Formula overallMshrUncacheable
Total number of misses that miss in the MSHRs.
Definition: base.hh:1040
BaseCache * cache
Definition: base.hh:218
Definition of a basic cache compressor.
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:1097
Definition: system.hh:72
A vector of scalar stats.
Definition: statistics.hh:2547
bool allocate() const
Should writes allocate?
Definition: base.hh:1328
bool isBlocked() const
Returns true if the cache is blocked for accesses.
Definition: base.hh:1150
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:58
const Cycles dataLatency
The latency of data access of a cache.
Definition: base.hh:851
Definition: cprintf.cc:40
const Cycles fillLatency
The latency to fill a cache block.
Definition: base.hh:861
Bitfield< 4, 0 > mode
WriteAllocator *const writeAllocator
The writeAllocator drive optimizations for streaming writes.
Definition: base.hh:351
void markInService(WriteQueueEntry *entry)
Mark the given entry as in service.
Definition: write_queue.cc:74
Stats::Formula overallMissLatency
Total number of cycles spent waiting for all misses.
Definition: base.hh:997
Declaration of a structure to manage MSHRs.
Stats::Formula overallAvgMshrUncacheableLatency
The average overall latency of an MSHR miss.
Definition: base.hh:1061
const uint32_t noAllocateLimit
Definition: base.hh:1406
bool isWrite() const
Definition: packet.hh:523
Declaration of Statistics objects.
const BaseCache & cache
Definition: base.hh:982
Prefetcher::Base * prefetcher
Prefetcher.
Definition: base.hh:326
This is a simple scalar statistic, like a counter.
Definition: statistics.hh:2505
CacheReqPacketQueue(BaseCache &cache, MasterPort &port, SnoopRespPacketQueue &snoop_resp_queue, const std::string &label)
Definition: base.hh:169
SnoopRespPacketQueue & snoopRespQueue
Definition: base.hh:165
STL vector class.
Definition: stl.hh:37
Stats::Vector writebacks
Number of blocks written back per thread.
Definition: base.hh:1027
MemSidePort memSidePort
Definition: base.hh:309
Stats::Scalar dataExpansions
Number of data expansions.
Definition: base.hh:1067
EventFunctionWrapper sendRetryEvent
Definition: base.hh:273
CacheCmdStats & cmdStats(const PacketPtr p)
Definition: base.hh:978
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:744
Stats::Vector hits
Number of hits per thread for each type of command.
Definition: base.hh:939
RequestPtr req
A pointer to the original request.
Definition: packet.hh:321
void markInService(WriteQueueEntry *entry)
Definition: base.hh:381
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:868
Stats::Vector mshr_uncacheable_lat
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:963
WriteMode
The current mode for write coalescing and allocation, either normal operation (ALLOCATE), write coalescing (COALESCE), or write coalescing without allocation (NO_ALLOCATE).
Definition: base.hh:1386
Stats::Formula demandMshrMissRate
The demand miss rate in the MSHRs.
Definition: base.hh:1051
ProbePointArg< PacketPtr > * ppFill
To probe when a cache fill occurs.
Definition: base.hh:335
Declaration of a common base class for cache tagstore objects.
A coherent cache that can be arranged in flexible topologies.
Definition: cache.hh:63
Declaration of the queued port.
Cycles blockedCycle
Stores time the cache blocked for statistics.
Definition: base.hh:906
const uint32_t coalesceLimit
Limits for when to switch between the different write modes.
Definition: base.hh:1405
A cache master port is used for the memory-side port of the cache, and in addition to the basic timin...
Definition: base.hh:122
A Basic Cache block.
Definition: cache_blk.hh:84
#define M5_NODISCARD
Definition: compiler.hh:86
Stats::Vector mshr_misses
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:957
PacketPtr tempBlockWriteback
Writebacks from the tempBlock, resulting on the response path in atomic mode, must happen after the c...
Definition: base.hh:635
CacheMasterPort(const std::string &_name, BaseCache *_cache, ReqPacketQueue &_reqQueue, SnoopRespPacketQueue &_snoopRespQueue)
Definition: base.hh:139
bool isBlocked() const
Definition: base.hh:255
Stats::Vector blocked_causes
The number of times this cache blocked for each blocked cause.
Definition: base.hh:1017
Stats::Formula avg_blocked
The average number of cycles blocked for each blocked cause.
Definition: base.hh:1020
MSHR * allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, Tick when_ready, Counter order, bool alloc_on_fill)
Allocates a new MSHR for the request and size.
Definition: mshr_queue.cc:58
const bool sequentialAccess
Whether tags and data are accessed sequentially.
Definition: base.hh:873
uint64_t Tick
Tick count type.
Definition: types.hh:61
WriteMode mode
Definition: base.hh:1391
void incMissCount(PacketPtr pkt)
Definition: base.hh:1220
BaseCache * cache
Definition: base.hh:286
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Miss Status and handling Register.
Definition: mshr.hh:69
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark the given MSHR as in service.
Definition: mshr_queue.cc:96
ClockedObject declaration and implementation.
Stats::Formula demandMshrHits
Demand misses that hit in the MSHRs.
Definition: base.hh:1030
void serialize(const ThreadContext &tc, CheckpointOut &cp)
Thread context serialization helpers.
Stats::Formula overallAvgMissLatency
The average miss latency for all misses.
Definition: base.hh:1012
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: base.hh:365
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:1115
A basic cache interface.
Definition: base.hh:89
Stats::Vector blocked_cycles
The total number of cycles blocked for each blocked cause.
Definition: base.hh:1015
CacheReqPacketQueue _reqQueue
The cache-specific queue.
Definition: base.hh:213
Stats::Formula overallHits
Number of hit for all accesses.
Definition: base.hh:987
Stats::Formula overallMissRate
The miss rate for all accesses.
Definition: base.hh:1007
Stats::Formula overallAccesses
The number of overall accesses.
Definition: base.hh:1002
bool checkConflictingSnoop(const PacketPtr pkt)
Check if there is a conflicting snoop response about to be send out, and if so simply stall any reque...
Definition: base.hh:190
Stats::Formula demandAvgMissLatency
The average miss latency for demand misses.
Definition: base.hh:1010
bool isLLSC() const
Definition: packet.hh:215
void reset()
Reset the write allocator state, meaning that it allocates for writes and has not recorded any inform...
Definition: base.hh:1338
void setBlocked()
Do not accept any new requests.
Definition: base.cc:132
uint8_t blocked
Bit vector of the blocking reasons for the access path.
Definition: base.hh:900
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,16,32,64}_t.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
void schedMemSideSendEvent(Tick time)
Schedule a send event for the memory-side port.
Definition: base.hh:1198
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:378
Base cache compressor interface.
Definition: base.hh:52
uint32_t byteCount
Bytes written contiguously.
Definition: base.hh:1400
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: base.hh:626
WriteAllocator(const WriteAllocatorParams *p)
Definition: base.hh:1304
const unsigned blkSize
Block size of this cache.
Definition: base.hh:839
int64_t Counter
Statistics counter type.
Definition: types.hh:56
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
Entry * findMatch(Addr blk_addr, bool is_secure, bool ignore_uncacheable=true) const
Find the first entry that matches the provided address.
Definition: queue.hh:162
Statistics container.
Definition: group.hh:83
Stats::Vector misses
Number of misses per thread for each type of command.
Definition: base.hh:942
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:903
Stats::Formula demandMisses
Number of misses for demand accesses.
Definition: base.hh:990
MasterID maxMasters()
Get the number of masters registered in the system.
Definition: system.hh:373
TempCacheBlk * tempBlock
Temporary cache block for occasional transitory use.
Definition: base.hh:359
int cmdToIndex() const
Return the index of this command.
Definition: packet.hh:520
A formula for statistics that is calculated when printed.
Definition: statistics.hh:3009
const uint32_t delayThreshold
The number of times the allocator will delay an WriteReq MSHR.
Definition: base.hh:1410
A queue entry base class, to be used by both the MSHRs and write-queue entries.
Definition: queue_entry.hh:58
Definitions of a simple cache block class.
Override the default behaviour of sendDeferredPacket to enable the memory-side cache port to also sen...
Definition: base.hh:159
ProbePointArg< PacketPtr > * ppHit
To probe when a cache hit occurs.
Definition: base.hh:329
const AddrRangeList & getAddrRanges() const
Definition: base.hh:1095
Stats::Formula mshrMissRate
The miss rate in the MSHRs pre command and thread.
Definition: base.hh:965
ProbePointArg generates a point for the class of Arg.
void exitSimLoop(const std::string &message, int exit_code, Tick when, Tick repeat, bool serialize)
Schedule an event to exit the simulation loop (returning to Python) at the end of the current cycle (...
Definition: sim_events.cc:88
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:371
Bitfield< 29 > c
Stats::Formula missRate
The miss rate per command and thread.
Definition: base.hh:951
bool allocOnFill(MemCmd cmd) const
Determine whether we should allocate on a fill or not.
Definition: base.hh:404
Stats::Formula avgMshrMissLatency
The average latency of an MSHR miss, per command and thread.
Definition: base.hh:967
Declaration of the Packet class.
A cache slave port is used for the CPU-side port of the cache, and it is basically a simple timing po...
Definition: base.hh:244
std::ostream CheckpointOut
Definition: serialize.hh:63
void schedSendEvent(Tick time)
Schedule a send of a request packet (from the MSHR).
Definition: base.hh:131
void setBlocked(BlockedCause cause)
Marks the access path of the cache as blocked for the given cause.
Definition: base.hh:1160
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:373
MemCmd cmd
The command field of the packet.
Definition: packet.hh:316
The write allocator inspects write packets and detects streaming patterns.
Definition: base.hh:1302
Stats::Formula avgMissLatency
The average miss latency per command and thread.
Definition: base.hh:953
RespPacketQueue queue
A normal packet queue used to store responses.
Definition: base.hh:263
bool inMissQueue(Addr addr, bool is_secure) const
Definition: base.hh:1216
Stats::Formula accesses
The number of accesses per command and thread.
Definition: base.hh:949
Tick deferredPacketReadyTime() const
Get the next packet ready time.
BlockedCause
Reasons for caches to be blocked.
Definition: base.hh:104
bool checkConflict(const PacketPtr pkt, const int blk_size) const
Check if a packet corresponding to the same address exists in the queue.
Definition: packet_queue.cc:72
MSHRQueueIndex
Indexes to enumerate the MSHR queues.
Definition: base.hh:95
Stats::Formula overallMshrMissRate
The overall miss rate in the MSHRs.
Definition: base.hh:1053
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:876
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:858
unsigned getBlockSize() const
Query block size of a cache.
Definition: base.hh:1090
bool coalesce() const
Should writes be coalesced? This is true if the mode is set to NO_ALLOCATE.
Definition: base.hh:1319
void unserialize(ThreadContext &tc, CheckpointIn &cp)
bool inCache(Addr addr, bool is_secure) const
Definition: base.hh:1203
void resetDelay(Addr blk_addr)
Clear delay counter for the input block.
Definition: base.hh:1364
bool isSecure() const
Definition: packet.hh:749
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:235
Stats::Formula overallMshrHits
Total number of misses that hit in the MSHRs.
Definition: base.hh:1032
Stats::Formula demandAccesses
The number of demand accesses.
Definition: base.hh:1000
Counter missCount
The number of misses to trigger an exit event.
Definition: base.hh:912
virtual bool isSnooping() const
Memory-side port always snoops.
Definition: base.hh:150
Stats::Scalar unusedPrefetches
The number of times a HW-prefetched block is evicted w/o reference.
Definition: base.hh:1024
SnoopRespPacketQueue _snoopRespQueue
Definition: base.hh:215
bool isPrefetch() const
Definition: packet.hh:218
Write queue entry.
Copyright (c) 2018 Metempsy Technology Consulting All rights reserved.
Definition: base.hh:78
BaseCacheCompressor * compressor
Compression method being used.
Definition: base.hh:323
void writebackTempBlockAtomic()
Send the outstanding tempBlock writeback.
Definition: base.hh:642
bool wasPrefetched() const
Check if this block was the result of a hardware prefetch, yet to be touched.
Definition: cache_blk.hh:236
Stats::Formula demandMissRate
The miss rate of all demand accesses.
Definition: base.hh:1005
Bitfield< 0 > p
A common base class of Cache tagstore objects.
Definition: base.hh:70
Addr nextAddr
Address to match writes against to detect streams.
Definition: base.hh:1394
const char data[]
WriteQueueEntry * allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, Tick when_ready, Counter order)
Allocates a new WriteQueueEntry for the request and size.
Definition: write_queue.cc:57
uint64_t PacketId
Definition: packet.hh:68
void incHitCount(PacketPtr pkt)
Definition: base.hh:1231
bool delay(Addr blk_addr)
Access whether we need to delay the current write.
Definition: base.hh:1350
bool hasBeenPrefetched(Addr addr, bool is_secure) const
Definition: base.hh:1207
Stats::Formula demandAvgMshrMissLatency
The average latency of a demand MSHR miss.
Definition: base.hh:1056
Abstract superclass for simulation objects.
Definition: sim_object.hh:92
BaseTags * tags
Tag and data Storage.
Definition: base.hh:320
bool isFull() const
Definition: queue.hh:144
const FlagsType init
This Stat is Initialized.
Definition: info.hh:45
Stats::Vector mshr_uncacheable
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:959
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:1179
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:314
The CPU-side port extends the base cache slave port with access functions for functional, atomic and timing requests.
Definition: base.hh:281
Stats::Formula demandMshrMissLatency
Total cycle latency of demand MSHR misses.
Definition: base.hh:1043
Stats::Formula overallMisses
Number of misses for all accesses.
Definition: base.hh:992
Stats::Formula overallAvgMshrMissLatency
The average overall latency of an MSHR miss.
Definition: base.hh:1058
void clearBlocked()
Return to normal operation and accept new requests.
Definition: base.cc:147

Generated on Mon Jun 8 2020 15:45:07 for gem5 by doxygen 1.8.13