gem5 v24.1.0.1
Loading...
Searching...
No Matches
CacheMemory.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020-2021 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "base/compiler.hh"
45#include "base/intmath.hh"
46#include "base/logging.hh"
47#include "debug/HtmMem.hh"
48#include "debug/RubyCache.hh"
49#include "debug/RubyCacheTrace.hh"
50#include "debug/RubyResourceStalls.hh"
51#include "debug/RubyStats.hh"
53#include "mem/ruby/protocol/AccessPermission.hh"
55
56namespace gem5
57{
58
59namespace ruby
60{
61
62std::ostream&
63operator<<(std::ostream& out, const CacheMemory& obj)
64{
65 obj.print(out);
66 out << std::flush;
67 return out;
68}
69
71 : SimObject(p),
72 dataArray(p.dataArrayBanks, p.dataAccessLatency, p.start_index_bit),
73 tagArray(p.tagArrayBanks, p.tagAccessLatency, p.start_index_bit),
74 atomicALUArray(p.atomicALUs, p.atomicLatency),
75 cacheMemoryStats(this)
76{
77 m_cache_size = p.size;
78 m_cache_assoc = p.assoc;
79 m_replacementPolicy_ptr = p.replacement_policy;
80 m_start_index_bit = p.start_index_bit;
82 m_resource_stalls = p.resourceStalls;
83 m_block_size = p.block_size; // may be 0 at this point. Updated in init()
85 m_replacementPolicy_ptr) ? true : false;
86}
87
88void
90{
91 dataArray.setClockPeriod(rs->clockPeriod());
92 tagArray.setClockPeriod(rs->clockPeriod());
93 atomicALUArray.setClockPeriod(rs->clockPeriod());
94 atomicALUArray.setBlockSize(rs->getBlockSizeBytes());
95
96 if (m_block_size == 0) {
97 m_block_size = rs->getBlockSizeBytes();
98 }
99
101}
102
103void
105{
106 assert(m_block_size != 0);
108 assert(m_cache_num_sets > 1);
110 assert(m_cache_num_set_bits > 0);
111
116 // instantiate all the replacement_data here
117 for (int i = 0; i < m_cache_num_sets; i++) {
118 for ( int j = 0; j < m_cache_assoc; j++) {
119 replacement_data[i][j] =
121 }
122 }
123}
124
126{
129 for (int i = 0; i < m_cache_num_sets; i++) {
130 for (int j = 0; j < m_cache_assoc; j++) {
131 delete m_cache[i][j];
132 }
133 }
134}
135
136// convert a Address to its location in the cache
137int64_t
139{
140 assert(address == makeLineAddress(address));
141 return bitSelect(address, m_start_index_bit,
143}
144
145// Given a cache index: returns the index of the tag in a set.
146// returns -1 if the tag is not found.
147int
148CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
149{
150 assert(tag == makeLineAddress(tag));
151 // search the set for the tags
152 auto it = m_tag_index.find(tag);
153 if (it != m_tag_index.end())
154 if (m_cache[cacheSet][it->second]->m_Permission !=
155 AccessPermission_NotPresent)
156 return it->second;
157 return -1; // Not found
158}
159
160// Given a cache index: returns the index of the tag in a set.
161// returns -1 if the tag is not found.
162int
164 Addr tag) const
165{
166 assert(tag == makeLineAddress(tag));
167 // search the set for the tags
168 auto it = m_tag_index.find(tag);
169 if (it != m_tag_index.end())
170 return it->second;
171 return -1; // Not found
172}
173
174// Given an unique cache block identifier (idx): return the valid address
175// stored by the cache block. If the block is invalid/notpresent, the
176// function returns the 0 address
177Addr
179{
180 Addr tmp(0);
181
182 int set = idx / m_cache_assoc;
183 assert(set < m_cache_num_sets);
184
185 int way = idx - set * m_cache_assoc;
186 assert (way < m_cache_assoc);
187
188 AbstractCacheEntry* entry = m_cache[set][way];
189 if (entry == NULL ||
190 entry->m_Permission == AccessPermission_Invalid ||
191 entry->m_Permission == AccessPermission_NotPresent) {
192 return tmp;
193 }
194 return entry->m_Address;
195}
196
197bool
198CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
199 DataBlock*& data_ptr)
200{
201 DPRINTF(RubyCache, "trying to access address: %#x\n", address);
202 AbstractCacheEntry* entry = lookup(address);
203 if (entry != nullptr) {
204 // Do we even have a tag match?
206 entry->setLastAccess(curTick());
207 data_ptr = &(entry->getDataBlk());
208
209 if (entry->m_Permission == AccessPermission_Read_Write) {
210 DPRINTF(RubyCache, "Have permission to access address: %#x\n",
211 address);
212 return true;
213 }
214 if ((entry->m_Permission == AccessPermission_Read_Only) &&
215 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
216 DPRINTF(RubyCache, "Have permission to access address: %#x\n",
217 address);
218 return true;
219 }
220 // The line must not be accessible
221 }
222 DPRINTF(RubyCache, "Do not have permission to access address: %#x\n",
223 address);
224 data_ptr = NULL;
225 return false;
226}
227
228bool
229CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
230 DataBlock*& data_ptr)
231{
232 DPRINTF(RubyCache, "testing address: %#x\n", address);
233 AbstractCacheEntry* entry = lookup(address);
234 if (entry != nullptr) {
235 // Do we even have a tag match?
237 entry->setLastAccess(curTick());
238 data_ptr = &(entry->getDataBlk());
239
240 DPRINTF(RubyCache, "have permission for address %#x?: %d\n",
241 address,
242 entry->m_Permission != AccessPermission_NotPresent);
243 return entry->m_Permission != AccessPermission_NotPresent;
244 }
245
246 DPRINTF(RubyCache, "do not have permission for address %#x\n",
247 address);
248 data_ptr = NULL;
249 return false;
250}
251
252// tests to see if an address is present in the cache
253bool
255{
256 const AbstractCacheEntry* const entry = lookup(address);
257 if (entry == nullptr) {
258 // We didn't find the tag
259 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
260 return false;
261 }
262 DPRINTF(RubyCache, "address: %#x found\n", address);
263 return true;
264}
265
266// Returns true if there is:
267// a) a tag match on this address or there is
268// b) an unused line in the same cache "way"
269bool
271{
272 assert(address == makeLineAddress(address));
273
274 int64_t cacheSet = addressToCacheSet(address);
275
276 for (int i = 0; i < m_cache_assoc; i++) {
277 AbstractCacheEntry* entry = m_cache[cacheSet][i];
278 if (entry != NULL) {
279 if (entry->m_Address == address ||
280 entry->m_Permission == AccessPermission_NotPresent) {
281 // Already in the cache or we found an empty entry
282 return true;
283 }
284 } else {
285 return true;
286 }
287 }
288 return false;
289}
290
293{
294 assert(address == makeLineAddress(address));
295 assert(!isTagPresent(address));
296 assert(cacheAvail(address));
297 DPRINTF(RubyCache, "allocating address: %#x\n", address);
298
301
302 // Find the first open slot
303 int64_t cacheSet = addressToCacheSet(address);
305 for (int i = 0; i < m_cache_assoc; i++) {
306 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
307 if (set[i] && (set[i] != entry)) {
308 warn_once("This protocol contains a cache entry handling bug: "
309 "Entries in the cache should never be NotPresent! If\n"
310 "this entry (%#x) is not tracked elsewhere, it will memory "
311 "leak here. Fix your protocol to eliminate these!",
312 address);
313 }
314 set[i] = entry; // Init entry
315 set[i]->m_Address = address;
316 set[i]->m_Permission = AccessPermission_Invalid;
317 DPRINTF(RubyCache, "Allocate clearing lock for addr: 0x%x\n",
318 address);
319 set[i]->m_locked = -1;
320 m_tag_index[address] = i;
321 set[i]->setPosition(cacheSet, i);
322 set[i]->replacementData = replacement_data[cacheSet][i];
323 set[i]->setLastAccess(curTick());
324
325 // Call reset function here to set initial value for different
326 // replacement policies.
328
329 return entry;
330 }
331 }
332 panic("Allocate didn't find an available entry");
333}
334
335void
337{
338 DPRINTF(RubyCache, "deallocating address: %#x\n", address);
339 AbstractCacheEntry* entry = lookup(address);
340 assert(entry != nullptr);
342 uint32_t cache_set = entry->getSet();
343 uint32_t way = entry->getWay();
344 delete entry;
345 m_cache[cache_set][way] = NULL;
346 m_tag_index.erase(address);
347}
348
349// Returns with the physical address of the conflicting cache line
350Addr
352{
353 assert(address == makeLineAddress(address));
354 assert(!cacheAvail(address));
355
356 int64_t cacheSet = addressToCacheSet(address);
358 for (int i = 0; i < m_cache_assoc; i++) {
359 candidates.push_back(static_cast<ReplaceableEntry*>(
360 m_cache[cacheSet][i]));
361 }
362 return m_cache[cacheSet][m_replacementPolicy_ptr->
363 getVictim(candidates)->getWay()]->m_Address;
364}
365
366// looks an address up in the cache
369{
370 assert(address == makeLineAddress(address));
371 int64_t cacheSet = addressToCacheSet(address);
372 int loc = findTagInSet(cacheSet, address);
373 if (loc == -1) return NULL;
374 return m_cache[cacheSet][loc];
375}
376
377// looks an address up in the cache
380{
381 assert(address == makeLineAddress(address));
382 int64_t cacheSet = addressToCacheSet(address);
383 int loc = findTagInSet(cacheSet, address);
384 if (loc == -1) return NULL;
385 return m_cache[cacheSet][loc];
386}
387
388// Sets the most recently used bit for a cache block
389void
391{
392 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
393 if (entry != nullptr) {
395 entry->setLastAccess(curTick());
396 }
397}
398
399void
401{
402 assert(entry != nullptr);
404 entry->setLastAccess(curTick());
405}
406
407void
408CacheMemory::setMRU(Addr address, int occupancy)
409{
410 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
411 if (entry != nullptr) {
412 // m_use_occupancy can decide whether we are using WeightedLRU
413 // replacement policy. Depending on different replacement policies,
414 // use different touch() function.
415 if (m_use_occupancy) {
418 entry->replacementData, occupancy);
419 } else {
421 }
422 entry->setLastAccess(curTick());
423 }
424}
425
426int
428{
429 assert(set < m_cache_num_sets);
430 assert(loc < m_cache_assoc);
431 int ret = 0;
432 if (m_cache[set][loc] != NULL) {
433 ret = m_cache[set][loc]->getNumValidBlocks();
434 assert(ret >= 0);
435 }
436
437 return ret;
438}
439
440void
442{
443 uint64_t warmedUpBlocks = 0;
444 [[maybe_unused]] uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
445 (uint64_t)m_cache_assoc;
446
447 for (int i = 0; i < m_cache_num_sets; i++) {
448 for (int j = 0; j < m_cache_assoc; j++) {
449 if (m_cache[i][j] != NULL) {
450 AccessPermission perm = m_cache[i][j]->m_Permission;
451 RubyRequestType request_type = RubyRequestType_NULL;
452 if (perm == AccessPermission_Read_Only) {
454 request_type = RubyRequestType_IFETCH;
455 } else {
456 request_type = RubyRequestType_LD;
457 }
458 } else if (perm == AccessPermission_Read_Write) {
459 request_type = RubyRequestType_ST;
460 }
461
462 if (request_type != RubyRequestType_NULL) {
463 Tick lastAccessTick;
464 lastAccessTick = m_cache[i][j]->getLastAccess();
465 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
466 0, request_type, lastAccessTick,
467 m_cache[i][j]->getDataBlk());
468 warmedUpBlocks++;
469 }
470 }
471 }
472 }
473
474 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
475 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
476 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
477}
478
479void
480CacheMemory::print(std::ostream& out) const
481{
482 out << "Cache dump: " << name() << std::endl;
483 for (int i = 0; i < m_cache_num_sets; i++) {
484 for (int j = 0; j < m_cache_assoc; j++) {
485 if (m_cache[i][j] != NULL) {
486 out << " Index: " << i
487 << " way: " << j
488 << " entry: " << *m_cache[i][j] << std::endl;
489 } else {
490 out << " Index: " << i
491 << " way: " << j
492 << " entry: NULL" << std::endl;
493 }
494 }
495 }
496}
497
498void
499CacheMemory::printData(std::ostream& out) const
500{
501 out << "printData() not supported" << std::endl;
502}
503
504void
505CacheMemory::setLocked(Addr address, int context)
506{
507 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
508 AbstractCacheEntry* entry = lookup(address);
509 assert(entry != nullptr);
510 entry->setLocked(context);
511}
512
513void
515{
516 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
517 AbstractCacheEntry* entry = lookup(address);
518 assert(entry != nullptr);
519 entry->clearLocked();
520}
521
522void
524{
525 // iterate through every set and way to get a cache line
526 for (auto i = m_cache.begin(); i != m_cache.end(); ++i) {
528 for (auto j = set.begin(); j != set.end(); ++j) {
529 AbstractCacheEntry *line = *j;
530 if (line && line->isLocked(context)) {
531 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n",
532 line->m_Address);
533 line->clearLocked();
534 }
535 }
536 }
537}
538
539bool
540CacheMemory::isLocked(Addr address, int context)
541{
542 AbstractCacheEntry* entry = lookup(address);
543 assert(entry != nullptr);
544 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
545 address, entry->m_locked, context);
546 return entry->isLocked(context);
547}
548
551 : statistics::Group(parent),
552 ADD_STAT(numDataArrayReads, "Number of data array reads"),
553 ADD_STAT(numDataArrayWrites, "Number of data array writes"),
554 ADD_STAT(numTagArrayReads, "Number of tag array reads"),
555 ADD_STAT(numTagArrayWrites, "Number of tag array writes"),
556 ADD_STAT(numTagArrayStalls, "Number of stalls caused by tag array"),
557 ADD_STAT(numDataArrayStalls, "Number of stalls caused by data array"),
558 ADD_STAT(numAtomicALUOperations, "Number of atomic ALU operations"),
559 ADD_STAT(numAtomicALUArrayStalls, "Number of stalls caused by atomic ALU array"),
560 ADD_STAT(htmTransCommitReadSet, "Read set size of a committed "
561 "transaction"),
562 ADD_STAT(htmTransCommitWriteSet, "Write set size of a committed "
563 "transaction"),
564 ADD_STAT(htmTransAbortReadSet, "Read set size of a aborted transaction"),
565 ADD_STAT(htmTransAbortWriteSet, "Write set size of a aborted "
566 "transaction"),
567 ADD_STAT(m_demand_hits, "Number of cache demand hits"),
568 ADD_STAT(m_demand_misses, "Number of cache demand misses"),
569 ADD_STAT(m_demand_accesses, "Number of cache demand accesses",
570 m_demand_hits + m_demand_misses),
571 ADD_STAT(m_prefetch_hits, "Number of cache prefetch hits"),
572 ADD_STAT(m_prefetch_misses, "Number of cache prefetch misses"),
573 ADD_STAT(m_prefetch_accesses, "Number of cache prefetch accesses",
574 m_prefetch_hits + m_prefetch_misses),
575 ADD_STAT(m_accessModeType, "")
576{
579
582
585
588
591
594
597
600
602 .init(8)
605
607 .init(8)
610
612 .init(8)
615
617 .init(8)
620
623
626
629
631 .init(RubyRequestType_NUM)
633
634 for (int i = 0; i < RubyAccessMode_NUM; i++) {
636 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
637 .flags(statistics::nozero)
638 ;
639 }
640}
641
642// assumption: SLICC generated files will only call this function
643// once **all** resources are granted
644void
645CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
646{
647 DPRINTF(RubyStats, "Recorded statistic: %s\n",
648 CacheRequestType_to_string(requestType));
649 switch(requestType) {
650 case CacheRequestType_DataArrayRead:
654 return;
655 case CacheRequestType_DataArrayWrite:
659 return;
660 case CacheRequestType_TagArrayRead:
664 return;
665 case CacheRequestType_TagArrayWrite:
669 return;
670 case CacheRequestType_AtomicALUOperation:
674 return;
675 default:
676 warn("CacheMemory access_type not found: %s",
677 CacheRequestType_to_string(requestType));
678 }
679}
680
681bool
683{
684 if (!m_resource_stalls) {
685 return true;
686 }
687
688 if (res == CacheResourceType_TagArray) {
689 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
690 else {
691 DPRINTF(RubyResourceStalls,
692 "Tag array stall on addr %#x in set %d\n",
695 return false;
696 }
697 } else if (res == CacheResourceType_DataArray) {
698 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
699 else {
700 DPRINTF(RubyResourceStalls,
701 "Data array stall on addr %#x in set %d\n",
704 return false;
705 }
706 } else if (res == CacheResourceType_AtomicALUArray) {
707 if (atomicALUArray.tryAccess(addr)) return true;
708 else {
709 DPRINTF(RubyResourceStalls,
710 "Atomic ALU array stall on addr %#x in line address %#x\n",
713 return false;
714 }
715 } else {
716 panic("Unrecognized cache resource type.");
717 }
718}
719
720bool
721CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
722{
723 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
724}
725
726bool
727CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
728{
729 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
730}
731
732/* hardware transactional memory */
733
734void
736{
737 uint64_t htmReadSetSize = 0;
738 uint64_t htmWriteSetSize = 0;
739
740 // iterate through every set and way to get a cache line
741 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
742 {
744
745 for (auto j = set.begin(); j != set.end(); ++j)
746 {
747 AbstractCacheEntry *line = *j;
748
749 if (line != nullptr) {
750 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
751 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
752 if (line->getInHtmWriteSet()) {
753 line->invalidateEntry();
754 }
755 line->setInHtmWriteSet(false);
756 line->setInHtmReadSet(false);
757 line->clearLocked();
758 }
759 }
760 }
761
764 DPRINTF(HtmMem, "htmAbortTransaction: read set=%u write set=%u\n",
765 htmReadSetSize, htmWriteSetSize);
766}
767
768void
770{
771 uint64_t htmReadSetSize = 0;
772 uint64_t htmWriteSetSize = 0;
773
774 // iterate through every set and way to get a cache line
775 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
776 {
778
779 for (auto j = set.begin(); j != set.end(); ++j)
780 {
781 AbstractCacheEntry *line = *j;
782 if (line != nullptr) {
783 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
784 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
785 line->setInHtmWriteSet(false);
786 line->setInHtmReadSet(false);
787 line->clearLocked();
788 }
789 }
790 }
791
794 DPRINTF(HtmMem, "htmCommitTransaction: read set=%u write set=%u\n",
795 htmReadSetSize, htmWriteSetSize);
796}
797
798void
803
804void
809
810void
815
816void
821
822} // namespace ruby
823} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
virtual std::string name() const
Definition named.hh:47
A replaceable entry is a basic entry in a 2d table-like structure that needs to have replacement func...
std::shared_ptr< replacement_policy::ReplacementData > replacementData
Replacement data associated to this entry.
uint32_t getWay() const
Get way number.
uint32_t getSet() const
Get set number.
Abstract superclass for simulation objects.
virtual void invalidate(const std::shared_ptr< ReplacementData > &replacement_data)=0
Invalidate replacement data to set it as the next probable victim.
virtual void reset(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Reset replacement data.
Definition base.hh:89
virtual void touch(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Update replacement data.
Definition base.hh:75
virtual std::shared_ptr< ReplacementData > instantiateEntry()=0
Instantiate a replacement data entry.
void setBlockSize(int block_size)
void setClockPeriod(Tick clockPeriod)
virtual void setRubySystem(RubySystem *rs)
virtual void initBlockSize(int block_size)
bool isLocked(int context) const
void reserve(int64_t idx)
bool tryAccess(int64_t idx)
void setClockPeriod(Tick _clockPeriod)
bool isTagPresent(Addr address) const
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
bool testCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
ALUFreeListArray atomicALUArray
bool m_use_occupancy
Set to true when using WeightedLRU replacement policy, otherwise, set to false.
void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
std::vector< std::vector< ReplData > > replacement_data
We store all the ReplacementData in a 2-dimensional array.
void print(std::ostream &out) const
void recordCacheContents(int cntrl, CacheRecorder *tr) const
void setMRU(Addr address)
gem5::ruby::CacheMemory::CacheMemoryStats cacheMemoryStats
void deallocate(Addr address)
int findTagInSet(int64_t line, Addr tag) const
void setLocked(Addr addr, int context)
int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const
bool isBlockInvalid(int64_t cache_set, int64_t loc)
int64_t addressToCacheSet(Addr address) const
void setRubySystem(RubySystem *rs)
bool tryCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
RubyCacheParams Params
void clearLocked(Addr addr)
bool isBlockNotBusy(int64_t cache_set, int64_t loc)
Addr cacheProbe(Addr address) const
void recordRequestType(CacheRequestType requestType, Addr addr)
void printData(std::ostream &out) const
bool cacheAvail(Addr address) const
CacheMemory(const Params &p)
replacement_policy::Base * m_replacementPolicy_ptr
We use the replacement policies from the Classic memory system.
std::unordered_map< Addr, int > m_tag_index
AbstractCacheEntry * allocate(Addr address, AbstractCacheEntry *new_entry)
int getReplacementWeight(int64_t set, int64_t loc)
std::vector< std::vector< AbstractCacheEntry * > > m_cache
bool checkResourceAvailable(CacheResourceType res, Addr addr)
Addr makeLineAddress(Addr addr) const
AbstractCacheEntry * lookup(Addr address)
bool isLocked(Addr addr, int context)
void addRecord(int cntrl, Addr data_addr, Addr pc_addr, RubyRequestType type, Tick time, DataBlock &data)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type size)
Set this vector to have the given size.
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
Definition intmath.hh:59
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define warn(...)
Definition logging.hh:256
#define warn_once(...)
Definition logging.hh:260
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9, 8 > rs
Bitfield< 12, 11 > set
Bitfield< 0 > p
Bitfield< 3, 1 > perm
Definition pagetable.hh:73
Bitfield< 3 > addr
Definition types.hh:84
Addr bitSelect(Addr addr, unsigned int small, unsigned int big)
Definition Address.cc:41
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
std::ostream & operator<<(std::ostream &os, const BaseSemihosting::InPlaceArg &ipa)
CacheMemoryStats(statistics::Group *parent)

Generated on Mon Jan 13 2025 04:28:40 for gem5 by doxygen 1.9.8