47#include "debug/HtmMem.hh"
48#include "debug/RubyCache.hh"
49#include "debug/RubyCacheTrace.hh"
50#include "debug/RubyResourceStalls.hh"
51#include "debug/RubyStats.hh"
53#include "mem/ruby/protocol/AccessPermission.hh"
72 dataArray(
p.dataArrayBanks,
p.dataAccessLatency,
p.start_index_bit),
73 tagArray(
p.tagArrayBanks,
p.tagAccessLatency,
p.start_index_bit),
74 atomicALUArray(
p.atomicALUs,
p.atomicLatency),
75 cacheMemoryStats(this)
154 if (
m_cache[cacheSet][it->second]->m_Permission !=
155 AccessPermission_NotPresent)
201 DPRINTF(RubyCache,
"trying to access address: %#x\n", address);
203 if (entry !=
nullptr) {
209 if (entry->
m_Permission == AccessPermission_Read_Write) {
210 DPRINTF(RubyCache,
"Have permission to access address: %#x\n",
214 if ((entry->
m_Permission == AccessPermission_Read_Only) &&
215 (
type == RubyRequestType_LD ||
type == RubyRequestType_IFETCH)) {
216 DPRINTF(RubyCache,
"Have permission to access address: %#x\n",
222 DPRINTF(RubyCache,
"Do not have permission to access address: %#x\n",
232 DPRINTF(RubyCache,
"testing address: %#x\n", address);
234 if (entry !=
nullptr) {
240 DPRINTF(RubyCache,
"have permission for address %#x?: %d\n",
243 return entry->
m_Permission != AccessPermission_NotPresent;
246 DPRINTF(RubyCache,
"do not have permission for address %#x\n",
257 if (entry ==
nullptr) {
259 DPRINTF(RubyCache,
"No tag match for address: %#x\n", address);
262 DPRINTF(RubyCache,
"address: %#x found\n", address);
297 DPRINTF(RubyCache,
"allocating address: %#x\n", address);
306 if (!
set[
i] ||
set[
i]->m_Permission == AccessPermission_NotPresent) {
308 warn_once(
"This protocol contains a cache entry handling bug: "
309 "Entries in the cache should never be NotPresent! If\n"
310 "this entry (%#x) is not tracked elsewhere, it will memory "
311 "leak here. Fix your protocol to eliminate these!",
316 set[
i]->m_Permission = AccessPermission_Invalid;
317 DPRINTF(RubyCache,
"Allocate clearing lock for addr: 0x%x\n",
319 set[
i]->m_locked = -1;
321 set[
i]->setPosition(cacheSet,
i);
332 panic(
"Allocate didn't find an available entry");
338 DPRINTF(RubyCache,
"deallocating address: %#x\n", address);
340 assert(entry !=
nullptr);
342 uint32_t cache_set = entry->
getSet();
343 uint32_t way = entry->
getWay();
345 m_cache[cache_set][way] = NULL;
363 getVictim(candidates)->getWay()]->m_Address;
373 if (loc == -1)
return NULL;
384 if (loc == -1)
return NULL;
393 if (entry !=
nullptr) {
402 assert(entry !=
nullptr);
411 if (entry !=
nullptr) {
443 uint64_t warmedUpBlocks = 0;
451 RubyRequestType request_type = RubyRequestType_NULL;
452 if (
perm == AccessPermission_Read_Only) {
454 request_type = RubyRequestType_IFETCH;
456 request_type = RubyRequestType_LD;
458 }
else if (
perm == AccessPermission_Read_Write) {
459 request_type = RubyRequestType_ST;
462 if (request_type != RubyRequestType_NULL) {
464 lastAccessTick =
m_cache[
i][j]->getLastAccess();
466 0, request_type, lastAccessTick,
474 DPRINTF(RubyCacheTrace,
"%s: %lli blocks of %lli total blocks"
475 "recorded %.2f%% \n",
name().c_str(), warmedUpBlocks,
476 totalBlocks, (
float(warmedUpBlocks) /
float(totalBlocks)) * 100.0);
482 out <<
"Cache dump: " <<
name() << std::endl;
486 out <<
" Index: " <<
i
488 <<
" entry: " << *
m_cache[
i][j] << std::endl;
490 out <<
" Index: " <<
i
492 <<
" entry: NULL" << std::endl;
501 out <<
"printData() not supported" << std::endl;
507 DPRINTF(RubyCache,
"Setting Lock for addr: %#x to %d\n", address, context);
509 assert(entry !=
nullptr);
516 DPRINTF(RubyCache,
"Clear Lock for addr: %#x\n", address);
518 assert(entry !=
nullptr);
528 for (
auto j =
set.begin(); j !=
set.end(); ++j) {
530 if (line && line->
isLocked(context)) {
531 DPRINTF(RubyCache,
"Clear Lock for addr: %#x\n",
543 assert(entry !=
nullptr);
544 DPRINTF(RubyCache,
"Testing Lock for addr: %#llx cur %d con %d\n",
551 : statistics::
Group(parent),
552 ADD_STAT(numDataArrayReads,
"Number of data array reads"),
553 ADD_STAT(numDataArrayWrites,
"Number of data array writes"),
554 ADD_STAT(numTagArrayReads,
"Number of tag array reads"),
555 ADD_STAT(numTagArrayWrites,
"Number of tag array writes"),
556 ADD_STAT(numTagArrayStalls,
"Number of stalls caused by tag array"),
557 ADD_STAT(numDataArrayStalls,
"Number of stalls caused by data array"),
558 ADD_STAT(numAtomicALUOperations,
"Number of atomic ALU operations"),
559 ADD_STAT(numAtomicALUArrayStalls,
"Number of stalls caused by atomic ALU array"),
560 ADD_STAT(htmTransCommitReadSet,
"Read set size of a committed "
562 ADD_STAT(htmTransCommitWriteSet,
"Write set size of a committed "
564 ADD_STAT(htmTransAbortReadSet,
"Read set size of a aborted transaction"),
565 ADD_STAT(htmTransAbortWriteSet,
"Write set size of a aborted "
567 ADD_STAT(m_demand_hits,
"Number of cache demand hits"),
568 ADD_STAT(m_demand_misses,
"Number of cache demand misses"),
569 ADD_STAT(m_demand_accesses,
"Number of cache demand accesses",
570 m_demand_hits + m_demand_misses),
571 ADD_STAT(m_prefetch_hits,
"Number of cache prefetch hits"),
572 ADD_STAT(m_prefetch_misses,
"Number of cache prefetch misses"),
573 ADD_STAT(m_prefetch_accesses,
"Number of cache prefetch accesses",
574 m_prefetch_hits + m_prefetch_misses),
631 .
init(RubyRequestType_NUM)
634 for (
int i = 0;
i < RubyAccessMode_NUM;
i++) {
636 .
subname(
i, RubyAccessMode_to_string(RubyAccessMode(
i)))
647 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
648 CacheRequestType_to_string(requestType));
649 switch(requestType) {
650 case CacheRequestType_DataArrayRead:
655 case CacheRequestType_DataArrayWrite:
660 case CacheRequestType_TagArrayRead:
665 case CacheRequestType_TagArrayWrite:
670 case CacheRequestType_AtomicALUOperation:
676 warn(
"CacheMemory access_type not found: %s",
677 CacheRequestType_to_string(requestType));
688 if (res == CacheResourceType_TagArray) {
692 "Tag array stall on addr %#x in set %d\n",
697 }
else if (res == CacheResourceType_DataArray) {
701 "Data array stall on addr %#x in set %d\n",
706 }
else if (res == CacheResourceType_AtomicALUArray) {
710 "Atomic ALU array stall on addr %#x in line address %#x\n",
716 panic(
"Unrecognized cache resource type.");
723 return (
m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
729 return (
m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
737 uint64_t htmReadSetSize = 0;
738 uint64_t htmWriteSetSize = 0;
745 for (
auto j =
set.begin(); j !=
set.end(); ++j)
749 if (line !=
nullptr) {
764 DPRINTF(HtmMem,
"htmAbortTransaction: read set=%u write set=%u\n",
765 htmReadSetSize, htmWriteSetSize);
771 uint64_t htmReadSetSize = 0;
772 uint64_t htmWriteSetSize = 0;
779 for (
auto j =
set.begin(); j !=
set.end(); ++j)
782 if (line !=
nullptr) {
794 DPRINTF(HtmMem,
"htmCommitTransaction: read set=%u write set=%u\n",
795 htmReadSetSize, htmWriteSetSize);
virtual std::string name() const
A replaceable entry is a basic entry in a 2d table-like structure that needs to have replacement func...
std::shared_ptr< replacement_policy::ReplacementData > replacementData
Replacement data associated to this entry.
uint32_t getWay() const
Get way number.
uint32_t getSet() const
Get set number.
Abstract superclass for simulation objects.
virtual void invalidate(const std::shared_ptr< ReplacementData > &replacement_data)=0
Invalidate replacement data to set it as the next probable victim.
virtual void reset(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Reset replacement data.
virtual void touch(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Update replacement data.
virtual std::shared_ptr< ReplacementData > instantiateEntry()=0
Instantiate a replacement data entry.
void setBlockSize(int block_size)
bool tryAccess(Addr addr)
void setClockPeriod(Tick clockPeriod)
bool getInHtmWriteSet() const
virtual void invalidateEntry()
virtual DataBlock & getDataBlk()
void setInHtmReadSet(bool val)
AccessPermission m_Permission
virtual void setRubySystem(RubySystem *rs)
virtual void initBlockSize(int block_size)
bool getInHtmReadSet() const
bool isLocked(int context) const
void setInHtmWriteSet(bool val)
void setLocked(int context)
void setLastAccess(Tick tick)
void reserve(int64_t idx)
bool tryAccess(int64_t idx)
void setClockPeriod(Tick _clockPeriod)
bool isTagPresent(Addr address) const
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
bool testCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
ALUFreeListArray atomicALUArray
bool m_use_occupancy
Set to true when using WeightedLRU replacement policy, otherwise, set to false.
void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
std::vector< std::vector< ReplData > > replacement_data
We store all the ReplacementData in a 2-dimensional array.
void print(std::ostream &out) const
RubySystem * m_ruby_system
void recordCacheContents(int cntrl, CacheRecorder *tr) const
void setMRU(Addr address)
bool m_is_instruction_only_cache
gem5::ruby::CacheMemory::CacheMemoryStats cacheMemoryStats
void deallocate(Addr address)
void htmCommitTransaction()
int findTagInSet(int64_t line, Addr tag) const
void setLocked(Addr addr, int context)
int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const
bool isBlockInvalid(int64_t cache_set, int64_t loc)
int64_t addressToCacheSet(Addr address) const
void htmAbortTransaction()
void setRubySystem(RubySystem *rs)
bool tryCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
void profilePrefetchMiss()
void clearLocked(Addr addr)
void profilePrefetchHit()
bool isBlockNotBusy(int64_t cache_set, int64_t loc)
Addr cacheProbe(Addr address) const
void recordRequestType(CacheRequestType requestType, Addr addr)
void printData(std::ostream &out) const
bool cacheAvail(Addr address) const
CacheMemory(const Params &p)
replacement_policy::Base * m_replacementPolicy_ptr
We use the replacement policies from the Classic memory system.
std::unordered_map< Addr, int > m_tag_index
AbstractCacheEntry * allocate(Addr address, AbstractCacheEntry *new_entry)
int getReplacementWeight(int64_t set, int64_t loc)
std::vector< std::vector< AbstractCacheEntry * > > m_cache
bool checkResourceAvailable(CacheResourceType res, Addr addr)
Addr makeLineAddress(Addr addr) const
AbstractCacheEntry * lookup(Addr address)
bool isLocked(Addr addr, int context)
void addRecord(int cntrl, Addr data_addr, Addr pc_addr, RubyRequestType type, Tick time, DataBlock &data)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
#define panic(...)
This implements a cprintf based panic() function.
Addr bitSelect(Addr addr, unsigned int small, unsigned int big)
const FlagsType pdf
Print the percent of the total that this entry represents.
const FlagsType nonan
Don't print if this is NAN.
const FlagsType nozero
Don't print if this is zero.
const FlagsType total
Print the total.
const FlagsType dist
Print the distribution.
Copyright (c) 2024 Arm Limited All rights reserved.
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
std::ostream & operator<<(std::ostream &os, const BaseSemihosting::InPlaceArg &ipa)
statistics::Histogram htmTransCommitWriteSet
statistics::Scalar m_prefetch_misses
statistics::Scalar numTagArrayWrites
statistics::Scalar numAtomicALUOperations
statistics::Scalar numDataArrayStalls
statistics::Scalar numTagArrayReads
statistics::Scalar numTagArrayStalls
statistics::Scalar m_demand_hits
statistics::Histogram htmTransCommitReadSet
statistics::Vector m_accessModeType
statistics::Scalar m_demand_misses
statistics::Scalar numDataArrayWrites
statistics::Formula m_prefetch_accesses
statistics::Scalar m_prefetch_hits
statistics::Histogram htmTransAbortWriteSet
statistics::Scalar numAtomicALUArrayStalls
statistics::Scalar numDataArrayReads
statistics::Histogram htmTransAbortReadSet
CacheMemoryStats(statistics::Group *parent)