gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
CacheMemory.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020-2021 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "base/compiler.hh"
45#include "base/intmath.hh"
46#include "base/logging.hh"
47#include "debug/HtmMem.hh"
48#include "debug/RubyCache.hh"
49#include "debug/RubyCacheTrace.hh"
50#include "debug/RubyResourceStalls.hh"
51#include "debug/RubyStats.hh"
53#include "mem/ruby/protocol/AccessPermission.hh"
55
56namespace gem5
57{
58
59namespace ruby
60{
61
62std::ostream&
63operator<<(std::ostream& out, const CacheMemory& obj)
64{
65 obj.print(out);
66 out << std::flush;
67 return out;
68}
69
71 : SimObject(p),
72 dataArray(p.dataArrayBanks, p.dataAccessLatency,
73 p.start_index_bit, p.ruby_system),
74 tagArray(p.tagArrayBanks, p.tagAccessLatency,
75 p.start_index_bit, p.ruby_system),
76 cacheMemoryStats(this)
77{
78 m_cache_size = p.size;
79 m_cache_assoc = p.assoc;
80 m_replacementPolicy_ptr = p.replacement_policy;
81 m_start_index_bit = p.start_index_bit;
83 m_resource_stalls = p.resourceStalls;
84 m_block_size = p.block_size; // may be 0 at this point. Updated in init()
86 m_replacementPolicy_ptr) ? true : false;
87}
88
89void
91{
92 if (m_block_size == 0) {
94 }
96 assert(m_cache_num_sets > 1);
98 assert(m_cache_num_set_bits > 0);
99
104 // instantiate all the replacement_data here
105 for (int i = 0; i < m_cache_num_sets; i++) {
106 for ( int j = 0; j < m_cache_assoc; j++) {
109 }
110 }
111}
112
114{
117 for (int i = 0; i < m_cache_num_sets; i++) {
118 for (int j = 0; j < m_cache_assoc; j++) {
119 delete m_cache[i][j];
120 }
121 }
122}
123
124// convert a Address to its location in the cache
125int64_t
127{
128 assert(address == makeLineAddress(address));
129 return bitSelect(address, m_start_index_bit,
131}
132
133// Given a cache index: returns the index of the tag in a set.
134// returns -1 if the tag is not found.
135int
136CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
137{
138 assert(tag == makeLineAddress(tag));
139 // search the set for the tags
140 auto it = m_tag_index.find(tag);
141 if (it != m_tag_index.end())
142 if (m_cache[cacheSet][it->second]->m_Permission !=
143 AccessPermission_NotPresent)
144 return it->second;
145 return -1; // Not found
146}
147
148// Given a cache index: returns the index of the tag in a set.
149// returns -1 if the tag is not found.
150int
152 Addr tag) const
153{
154 assert(tag == makeLineAddress(tag));
155 // search the set for the tags
156 auto it = m_tag_index.find(tag);
157 if (it != m_tag_index.end())
158 return it->second;
159 return -1; // Not found
160}
161
162// Given an unique cache block identifier (idx): return the valid address
163// stored by the cache block. If the block is invalid/notpresent, the
164// function returns the 0 address
165Addr
167{
168 Addr tmp(0);
169
170 int set = idx / m_cache_assoc;
171 assert(set < m_cache_num_sets);
172
173 int way = idx - set * m_cache_assoc;
174 assert (way < m_cache_assoc);
175
176 AbstractCacheEntry* entry = m_cache[set][way];
177 if (entry == NULL ||
178 entry->m_Permission == AccessPermission_Invalid ||
179 entry->m_Permission == AccessPermission_NotPresent) {
180 return tmp;
181 }
182 return entry->m_Address;
183}
184
185bool
186CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
187 DataBlock*& data_ptr)
188{
189 DPRINTF(RubyCache, "address: %#x\n", address);
190 AbstractCacheEntry* entry = lookup(address);
191 if (entry != nullptr) {
192 // Do we even have a tag match?
194 entry->setLastAccess(curTick());
195 data_ptr = &(entry->getDataBlk());
196
197 if (entry->m_Permission == AccessPermission_Read_Write) {
198 return true;
199 }
200 if ((entry->m_Permission == AccessPermission_Read_Only) &&
201 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
202 return true;
203 }
204 // The line must not be accessible
205 }
206 data_ptr = NULL;
207 return false;
208}
209
210bool
211CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
212 DataBlock*& data_ptr)
213{
214 DPRINTF(RubyCache, "address: %#x\n", address);
215 AbstractCacheEntry* entry = lookup(address);
216 if (entry != nullptr) {
217 // Do we even have a tag match?
219 entry->setLastAccess(curTick());
220 data_ptr = &(entry->getDataBlk());
221
222 return entry->m_Permission != AccessPermission_NotPresent;
223 }
224
225 data_ptr = NULL;
226 return false;
227}
228
229// tests to see if an address is present in the cache
230bool
232{
233 const AbstractCacheEntry* const entry = lookup(address);
234 if (entry == nullptr) {
235 // We didn't find the tag
236 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
237 return false;
238 }
239 DPRINTF(RubyCache, "address: %#x found\n", address);
240 return true;
241}
242
243// Returns true if there is:
244// a) a tag match on this address or there is
245// b) an unused line in the same cache "way"
246bool
248{
249 assert(address == makeLineAddress(address));
250
251 int64_t cacheSet = addressToCacheSet(address);
252
253 for (int i = 0; i < m_cache_assoc; i++) {
254 AbstractCacheEntry* entry = m_cache[cacheSet][i];
255 if (entry != NULL) {
256 if (entry->m_Address == address ||
257 entry->m_Permission == AccessPermission_NotPresent) {
258 // Already in the cache or we found an empty entry
259 return true;
260 }
261 } else {
262 return true;
263 }
264 }
265 return false;
266}
267
270{
271 assert(address == makeLineAddress(address));
272 assert(!isTagPresent(address));
273 assert(cacheAvail(address));
274 DPRINTF(RubyCache, "address: %#x\n", address);
275
276 // Find the first open slot
277 int64_t cacheSet = addressToCacheSet(address);
279 for (int i = 0; i < m_cache_assoc; i++) {
280 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
281 if (set[i] && (set[i] != entry)) {
282 warn_once("This protocol contains a cache entry handling bug: "
283 "Entries in the cache should never be NotPresent! If\n"
284 "this entry (%#x) is not tracked elsewhere, it will memory "
285 "leak here. Fix your protocol to eliminate these!",
286 address);
287 }
288 set[i] = entry; // Init entry
289 set[i]->m_Address = address;
290 set[i]->m_Permission = AccessPermission_Invalid;
291 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
292 address);
293 set[i]->m_locked = -1;
294 m_tag_index[address] = i;
295 set[i]->setPosition(cacheSet, i);
296 set[i]->replacementData = replacement_data[cacheSet][i];
297 set[i]->setLastAccess(curTick());
298
299 // Call reset function here to set initial value for different
300 // replacement policies.
302
303 return entry;
304 }
305 }
306 panic("Allocate didn't find an available entry");
307}
308
309void
311{
312 DPRINTF(RubyCache, "address: %#x\n", address);
313 AbstractCacheEntry* entry = lookup(address);
314 assert(entry != nullptr);
316 uint32_t cache_set = entry->getSet();
317 uint32_t way = entry->getWay();
318 delete entry;
319 m_cache[cache_set][way] = NULL;
320 m_tag_index.erase(address);
321}
322
323// Returns with the physical address of the conflicting cache line
324Addr
326{
327 assert(address == makeLineAddress(address));
328 assert(!cacheAvail(address));
329
330 int64_t cacheSet = addressToCacheSet(address);
332 for (int i = 0; i < m_cache_assoc; i++) {
333 candidates.push_back(static_cast<ReplaceableEntry*>(
334 m_cache[cacheSet][i]));
335 }
336 return m_cache[cacheSet][m_replacementPolicy_ptr->
337 getVictim(candidates)->getWay()]->m_Address;
338}
339
340// looks an address up in the cache
343{
344 assert(address == makeLineAddress(address));
345 int64_t cacheSet = addressToCacheSet(address);
346 int loc = findTagInSet(cacheSet, address);
347 if (loc == -1) return NULL;
348 return m_cache[cacheSet][loc];
349}
350
351// looks an address up in the cache
354{
355 assert(address == makeLineAddress(address));
356 int64_t cacheSet = addressToCacheSet(address);
357 int loc = findTagInSet(cacheSet, address);
358 if (loc == -1) return NULL;
359 return m_cache[cacheSet][loc];
360}
361
362// Sets the most recently used bit for a cache block
363void
365{
366 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
367 if (entry != nullptr) {
369 entry->setLastAccess(curTick());
370 }
371}
372
373void
375{
376 assert(entry != nullptr);
378 entry->setLastAccess(curTick());
379}
380
381void
382CacheMemory::setMRU(Addr address, int occupancy)
383{
384 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
385 if (entry != nullptr) {
386 // m_use_occupancy can decide whether we are using WeightedLRU
387 // replacement policy. Depending on different replacement policies,
388 // use different touch() function.
389 if (m_use_occupancy) {
392 entry->replacementData, occupancy);
393 } else {
395 }
396 entry->setLastAccess(curTick());
397 }
398}
399
400int
402{
403 assert(set < m_cache_num_sets);
404 assert(loc < m_cache_assoc);
405 int ret = 0;
406 if (m_cache[set][loc] != NULL) {
407 ret = m_cache[set][loc]->getNumValidBlocks();
408 assert(ret >= 0);
409 }
410
411 return ret;
412}
413
414void
416{
417 uint64_t warmedUpBlocks = 0;
418 [[maybe_unused]] uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
419 (uint64_t)m_cache_assoc;
420
421 for (int i = 0; i < m_cache_num_sets; i++) {
422 for (int j = 0; j < m_cache_assoc; j++) {
423 if (m_cache[i][j] != NULL) {
424 AccessPermission perm = m_cache[i][j]->m_Permission;
425 RubyRequestType request_type = RubyRequestType_NULL;
426 if (perm == AccessPermission_Read_Only) {
428 request_type = RubyRequestType_IFETCH;
429 } else {
430 request_type = RubyRequestType_LD;
431 }
432 } else if (perm == AccessPermission_Read_Write) {
433 request_type = RubyRequestType_ST;
434 }
435
436 if (request_type != RubyRequestType_NULL) {
437 Tick lastAccessTick;
438 lastAccessTick = m_cache[i][j]->getLastAccess();
439 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
440 0, request_type, lastAccessTick,
441 m_cache[i][j]->getDataBlk());
442 warmedUpBlocks++;
443 }
444 }
445 }
446 }
447
448 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
449 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
450 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
451}
452
453void
454CacheMemory::print(std::ostream& out) const
455{
456 out << "Cache dump: " << name() << std::endl;
457 for (int i = 0; i < m_cache_num_sets; i++) {
458 for (int j = 0; j < m_cache_assoc; j++) {
459 if (m_cache[i][j] != NULL) {
460 out << " Index: " << i
461 << " way: " << j
462 << " entry: " << *m_cache[i][j] << std::endl;
463 } else {
464 out << " Index: " << i
465 << " way: " << j
466 << " entry: NULL" << std::endl;
467 }
468 }
469 }
470}
471
472void
473CacheMemory::printData(std::ostream& out) const
474{
475 out << "printData() not supported" << std::endl;
476}
477
478void
479CacheMemory::setLocked(Addr address, int context)
480{
481 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
482 AbstractCacheEntry* entry = lookup(address);
483 assert(entry != nullptr);
484 entry->setLocked(context);
485}
486
487void
489{
490 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
491 AbstractCacheEntry* entry = lookup(address);
492 assert(entry != nullptr);
493 entry->clearLocked();
494}
495
496void
498{
499 // iterate through every set and way to get a cache line
500 for (auto i = m_cache.begin(); i != m_cache.end(); ++i) {
502 for (auto j = set.begin(); j != set.end(); ++j) {
503 AbstractCacheEntry *line = *j;
504 if (line && line->isLocked(context)) {
505 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n",
506 line->m_Address);
507 line->clearLocked();
508 }
509 }
510 }
511}
512
513bool
514CacheMemory::isLocked(Addr address, int context)
515{
516 AbstractCacheEntry* entry = lookup(address);
517 assert(entry != nullptr);
518 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
519 address, entry->m_locked, context);
520 return entry->isLocked(context);
521}
522
525 : statistics::Group(parent),
526 ADD_STAT(numDataArrayReads, "Number of data array reads"),
527 ADD_STAT(numDataArrayWrites, "Number of data array writes"),
528 ADD_STAT(numTagArrayReads, "Number of tag array reads"),
529 ADD_STAT(numTagArrayWrites, "Number of tag array writes"),
530 ADD_STAT(numTagArrayStalls, "Number of stalls caused by tag array"),
531 ADD_STAT(numDataArrayStalls, "Number of stalls caused by data array"),
532 ADD_STAT(htmTransCommitReadSet, "Read set size of a committed "
533 "transaction"),
534 ADD_STAT(htmTransCommitWriteSet, "Write set size of a committed "
535 "transaction"),
536 ADD_STAT(htmTransAbortReadSet, "Read set size of a aborted transaction"),
537 ADD_STAT(htmTransAbortWriteSet, "Write set size of a aborted "
538 "transaction"),
539 ADD_STAT(m_demand_hits, "Number of cache demand hits"),
540 ADD_STAT(m_demand_misses, "Number of cache demand misses"),
541 ADD_STAT(m_demand_accesses, "Number of cache demand accesses",
542 m_demand_hits + m_demand_misses),
543 ADD_STAT(m_prefetch_hits, "Number of cache prefetch hits"),
544 ADD_STAT(m_prefetch_misses, "Number of cache prefetch misses"),
545 ADD_STAT(m_prefetch_accesses, "Number of cache prefetch accesses",
546 m_prefetch_hits + m_prefetch_misses),
547 ADD_STAT(m_accessModeType, "")
548{
551
554
557
560
563
566
568 .init(8)
571
573 .init(8)
576
578 .init(8)
581
583 .init(8)
586
589
592
595
597 .init(RubyRequestType_NUM)
599
600 for (int i = 0; i < RubyAccessMode_NUM; i++) {
602 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
603 .flags(statistics::nozero)
604 ;
605 }
606}
607
608// assumption: SLICC generated files will only call this function
609// once **all** resources are granted
610void
611CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
612{
613 DPRINTF(RubyStats, "Recorded statistic: %s\n",
614 CacheRequestType_to_string(requestType));
615 switch(requestType) {
616 case CacheRequestType_DataArrayRead:
620 return;
621 case CacheRequestType_DataArrayWrite:
625 return;
626 case CacheRequestType_TagArrayRead:
630 return;
631 case CacheRequestType_TagArrayWrite:
635 return;
636 default:
637 warn("CacheMemory access_type not found: %s",
638 CacheRequestType_to_string(requestType));
639 }
640}
641
642bool
644{
645 if (!m_resource_stalls) {
646 return true;
647 }
648
649 if (res == CacheResourceType_TagArray) {
650 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
651 else {
652 DPRINTF(RubyResourceStalls,
653 "Tag array stall on addr %#x in set %d\n",
656 return false;
657 }
658 } else if (res == CacheResourceType_DataArray) {
659 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
660 else {
661 DPRINTF(RubyResourceStalls,
662 "Data array stall on addr %#x in set %d\n",
665 return false;
666 }
667 } else {
668 panic("Unrecognized cache resource type.");
669 }
670}
671
672bool
673CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
674{
675 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
676}
677
678bool
679CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
680{
681 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
682}
683
684/* hardware transactional memory */
685
686void
688{
689 uint64_t htmReadSetSize = 0;
690 uint64_t htmWriteSetSize = 0;
691
692 // iterate through every set and way to get a cache line
693 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
694 {
696
697 for (auto j = set.begin(); j != set.end(); ++j)
698 {
699 AbstractCacheEntry *line = *j;
700
701 if (line != nullptr) {
702 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
703 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
704 if (line->getInHtmWriteSet()) {
705 line->invalidateEntry();
706 }
707 line->setInHtmWriteSet(false);
708 line->setInHtmReadSet(false);
709 line->clearLocked();
710 }
711 }
712 }
713
716 DPRINTF(HtmMem, "htmAbortTransaction: read set=%u write set=%u\n",
717 htmReadSetSize, htmWriteSetSize);
718}
719
720void
722{
723 uint64_t htmReadSetSize = 0;
724 uint64_t htmWriteSetSize = 0;
725
726 // iterate through every set and way to get a cache line
727 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
728 {
730
731 for (auto j = set.begin(); j != set.end(); ++j)
732 {
733 AbstractCacheEntry *line = *j;
734 if (line != nullptr) {
735 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
736 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
737 line->setInHtmWriteSet(false);
738 line->setInHtmReadSet(false);
739 line->clearLocked();
740 }
741 }
742 }
743
746 DPRINTF(HtmMem, "htmCommitTransaction: read set=%u write set=%u\n",
747 htmReadSetSize, htmWriteSetSize);
748}
749
750void
752{
754}
755
756void
758{
760}
761
762void
764{
766}
767
768void
770{
772}
773
774} // namespace ruby
775} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
virtual std::string name() const
Definition named.hh:47
A replaceable entry is a basic entry in a 2d table-like structure that needs to have replacement func...
std::shared_ptr< replacement_policy::ReplacementData > replacementData
Replacement data associated to this entry.
uint32_t getWay() const
Get way number.
uint32_t getSet() const
Get set number.
Abstract superclass for simulation objects.
virtual void invalidate(const std::shared_ptr< ReplacementData > &replacement_data)=0
Invalidate replacement data to set it as the next probable victim.
virtual void reset(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Reset replacement data.
Definition base.hh:89
virtual void touch(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Update replacement data.
Definition base.hh:75
virtual std::shared_ptr< ReplacementData > instantiateEntry()=0
Instantiate a replacement data entry.
bool isLocked(int context) const
void reserve(int64_t idx)
bool tryAccess(int64_t idx)
bool isTagPresent(Addr address) const
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
bool testCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
bool m_use_occupancy
Set to true when using WeightedLRU replacement policy, otherwise, set to false.
void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
std::vector< std::vector< ReplData > > replacement_data
We store all the ReplacementData in a 2-dimensional array.
void print(std::ostream &out) const
void recordCacheContents(int cntrl, CacheRecorder *tr) const
void setMRU(Addr address)
gem5::ruby::CacheMemory::CacheMemoryStats cacheMemoryStats
void deallocate(Addr address)
int findTagInSet(int64_t line, Addr tag) const
void setLocked(Addr addr, int context)
int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const
bool isBlockInvalid(int64_t cache_set, int64_t loc)
int64_t addressToCacheSet(Addr address) const
bool tryCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
RubyCacheParams Params
void clearLocked(Addr addr)
bool isBlockNotBusy(int64_t cache_set, int64_t loc)
Addr cacheProbe(Addr address) const
void recordRequestType(CacheRequestType requestType, Addr addr)
void printData(std::ostream &out) const
bool cacheAvail(Addr address) const
CacheMemory(const Params &p)
replacement_policy::Base * m_replacementPolicy_ptr
We use the replacement policies from the Classic memory system.
std::unordered_map< Addr, int > m_tag_index
AbstractCacheEntry * allocate(Addr address, AbstractCacheEntry *new_entry)
int getReplacementWeight(int64_t set, int64_t loc)
std::vector< std::vector< AbstractCacheEntry * > > m_cache
bool checkResourceAvailable(CacheResourceType res, Addr addr)
AbstractCacheEntry * lookup(Addr address)
bool isLocked(Addr addr, int context)
void addRecord(int cntrl, Addr data_addr, Addr pc_addr, RubyRequestType type, Tick time, DataBlock &data)
static uint32_t getBlockSizeBytes()
Definition RubySystem.hh:72
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type size)
Set this vector to have the given size.
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
Definition intmath.hh:59
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define warn(...)
Definition logging.hh:256
#define warn_once(...)
Definition logging.hh:260
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 12, 11 > set
Bitfield< 24 > j
Definition misc_types.hh:57
Bitfield< 0 > p
Bitfield< 3, 1 > perm
Definition pagetable.hh:72
Bitfield< 3 > addr
Definition types.hh:84
Addr makeLineAddress(Addr addr)
Definition Address.cc:60
Addr bitSelect(Addr addr, unsigned int small, unsigned int big)
Definition Address.cc:41
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
std::ostream & operator<<(std::ostream &os, const ArmSemihosting::InPlaceArg &ipa)
uint64_t Tick
Tick count type.
Definition types.hh:58
CacheMemoryStats(statistics::Group *parent)

Generated on Mon Jul 10 2023 15:32:05 for gem5 by doxygen 1.9.7