Go to the documentation of this file.
47 #include "debug/HtmMem.hh"
48 #include "debug/RubyCache.hh"
49 #include "debug/RubyCacheTrace.hh"
50 #include "debug/RubyResourceStalls.hh"
51 #include "debug/RubyStats.hh"
53 #include "mem/ruby/protocol/AccessPermission.hh"
72 dataArray(
p.dataArrayBanks,
p.dataAccessLatency,
73 p.start_index_bit,
p.ruby_system),
74 tagArray(
p.tagArrayBanks,
p.tagAccessLatency,
75 p.start_index_bit,
p.ruby_system),
76 cacheMemoryStats(this)
142 if (
m_cache[cacheSet][it->second]->m_Permission !=
143 AccessPermission_NotPresent)
189 DPRINTF(RubyCache,
"address: %#x\n", address);
191 if (entry !=
nullptr) {
197 if (entry->
m_Permission == AccessPermission_Read_Write) {
200 if ((entry->
m_Permission == AccessPermission_Read_Only) &&
201 (
type == RubyRequestType_LD ||
type == RubyRequestType_IFETCH)) {
214 DPRINTF(RubyCache,
"address: %#x\n", address);
216 if (entry !=
nullptr) {
222 return entry->
m_Permission != AccessPermission_NotPresent;
234 if (entry ==
nullptr) {
236 DPRINTF(RubyCache,
"No tag match for address: %#x\n", address);
239 DPRINTF(RubyCache,
"address: %#x found\n", address);
274 DPRINTF(RubyCache,
"address: %#x\n", address);
280 if (!
set[
i] ||
set[
i]->m_Permission == AccessPermission_NotPresent) {
282 warn_once(
"This protocol contains a cache entry handling bug: "
283 "Entries in the cache should never be NotPresent! If\n"
284 "this entry (%#x) is not tracked elsewhere, it will memory "
285 "leak here. Fix your protocol to eliminate these!",
289 set[
i]->m_Address = address;
290 set[
i]->m_Permission = AccessPermission_Invalid;
291 DPRINTF(RubyCache,
"Allocate clearing lock for addr: %x\n",
293 set[
i]->m_locked = -1;
295 set[
i]->setPosition(cacheSet,
i);
306 panic(
"Allocate didn't find an available entry");
312 DPRINTF(RubyCache,
"address: %#x\n", address);
314 assert(entry !=
nullptr);
316 uint32_t cache_set = entry->
getSet();
317 uint32_t way = entry->
getWay();
319 m_cache[cache_set][way] = NULL;
337 getVictim(candidates)->getWay()]->m_Address;
347 if (loc == -1)
return NULL;
358 if (loc == -1)
return NULL;
367 if (entry !=
nullptr) {
376 assert(entry !=
nullptr);
385 if (entry !=
nullptr) {
417 uint64_t warmedUpBlocks = 0;
425 RubyRequestType request_type = RubyRequestType_NULL;
426 if (
perm == AccessPermission_Read_Only) {
428 request_type = RubyRequestType_IFETCH;
430 request_type = RubyRequestType_LD;
432 }
else if (
perm == AccessPermission_Read_Write) {
433 request_type = RubyRequestType_ST;
436 if (request_type != RubyRequestType_NULL) {
438 lastAccessTick =
m_cache[
i][
j]->getLastAccess();
440 0, request_type, lastAccessTick,
448 DPRINTF(RubyCacheTrace,
"%s: %lli blocks of %lli total blocks"
449 "recorded %.2f%% \n",
name().c_str(), warmedUpBlocks,
450 totalBlocks, (
float(warmedUpBlocks) /
float(totalBlocks)) * 100.0);
456 out <<
"Cache dump: " <<
name() << std::endl;
460 out <<
" Index: " <<
i
462 <<
" entry: " << *
m_cache[
i][
j] << std::endl;
464 out <<
" Index: " <<
i
466 <<
" entry: NULL" << std::endl;
475 out <<
"printData() not supported" << std::endl;
481 DPRINTF(RubyCache,
"Setting Lock for addr: %#x to %d\n", address, context);
483 assert(entry !=
nullptr);
490 DPRINTF(RubyCache,
"Clear Lock for addr: %#x\n", address);
492 assert(entry !=
nullptr);
502 for (
auto j =
set.begin();
j !=
set.end(); ++
j) {
504 if (line && line->
isLocked(context)) {
505 DPRINTF(RubyCache,
"Clear Lock for addr: %#x\n",
517 assert(entry !=
nullptr);
518 DPRINTF(RubyCache,
"Testing Lock for addr: %#llx cur %d con %d\n",
525 : statistics::
Group(parent),
526 ADD_STAT(numDataArrayReads,
"Number of data array reads"),
527 ADD_STAT(numDataArrayWrites,
"Number of data array writes"),
528 ADD_STAT(numTagArrayReads,
"Number of tag array reads"),
529 ADD_STAT(numTagArrayWrites,
"Number of tag array writes"),
530 ADD_STAT(numTagArrayStalls,
"Number of stalls caused by tag array"),
531 ADD_STAT(numDataArrayStalls,
"Number of stalls caused by data array"),
532 ADD_STAT(htmTransCommitReadSet,
"Read set size of a committed "
534 ADD_STAT(htmTransCommitWriteSet,
"Write set size of a committed "
536 ADD_STAT(htmTransAbortReadSet,
"Read set size of a aborted transaction"),
537 ADD_STAT(htmTransAbortWriteSet,
"Write set size of a aborted "
539 ADD_STAT(m_demand_hits,
"Number of cache demand hits"),
540 ADD_STAT(m_demand_misses,
"Number of cache demand misses"),
541 ADD_STAT(m_demand_accesses,
"Number of cache demand accesses",
542 m_demand_hits + m_demand_misses),
543 ADD_STAT(m_prefetch_hits,
"Number of cache prefetch hits"),
544 ADD_STAT(m_prefetch_misses,
"Number of cache prefetch misses"),
545 ADD_STAT(m_prefetch_accesses,
"Number of cache prefetch accesses",
546 m_prefetch_hits + m_prefetch_misses),
597 .
init(RubyRequestType_NUM)
600 for (
int i = 0;
i < RubyAccessMode_NUM;
i++) {
602 .
subname(
i, RubyAccessMode_to_string(RubyAccessMode(
i)))
613 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
614 CacheRequestType_to_string(requestType));
615 switch(requestType) {
616 case CacheRequestType_DataArrayRead:
621 case CacheRequestType_DataArrayWrite:
626 case CacheRequestType_TagArrayRead:
631 case CacheRequestType_TagArrayWrite:
637 warn(
"CacheMemory access_type not found: %s",
638 CacheRequestType_to_string(requestType));
649 if (res == CacheResourceType_TagArray) {
653 "Tag array stall on addr %#x in set %d\n",
658 }
else if (res == CacheResourceType_DataArray) {
662 "Data array stall on addr %#x in set %d\n",
668 panic(
"Unrecognized cache resource type.");
675 return (
m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
681 return (
m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
689 uint64_t htmReadSetSize = 0;
690 uint64_t htmWriteSetSize = 0;
697 for (
auto j =
set.begin();
j !=
set.end(); ++
j)
701 if (line !=
nullptr) {
716 DPRINTF(HtmMem,
"htmAbortTransaction: read set=%u write set=%u\n",
717 htmReadSetSize, htmWriteSetSize);
723 uint64_t htmReadSetSize = 0;
724 uint64_t htmWriteSetSize = 0;
731 for (
auto j =
set.begin();
j !=
set.end(); ++
j)
734 if (line !=
nullptr) {
746 DPRINTF(HtmMem,
"htmCommitTransaction: read set=%u write set=%u\n",
747 htmReadSetSize, htmWriteSetSize);
statistics::Scalar m_prefetch_hits
Tick curTick()
The universal simulation clock.
statistics::Formula m_prefetch_accesses
uint32_t getSet() const
Get set number.
replacement_policy::Base * m_replacementPolicy_ptr
We use the replacement policies from the Classic memory system.
void htmCommitTransaction()
statistics::Scalar m_prefetch_misses
AccessPermission m_Permission
Addr getAddressAtIdx(int idx) const
bool isBlockInvalid(int64_t cache_set, int64_t loc)
void htmAbortTransaction()
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
void print(std::ostream &out) const
int getReplacementWeight(int64_t set, int64_t loc)
gem5::ruby::CacheMemory::CacheMemoryStats cacheMemoryStats
virtual void reset(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Reset replacement data.
const FlagsType nozero
Don't print if this is zero.
bool tryCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
bool getInHtmReadSet() const
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
CacheMemory(const Params &p)
statistics::Scalar numTagArrayWrites
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
static uint32_t getBlockSizeBytes()
int64_t addressToCacheSet(Addr address) const
statistics::Histogram htmTransCommitReadSet
Addr bitSelect(Addr addr, unsigned int small, unsigned int big)
const FlagsType nonan
Don't print if this is NAN.
Addr cacheProbe(Addr address) const
void setLastAccess(Tick tick)
AbstractCacheEntry * lookup(Addr address)
Addr makeLineAddress(Addr addr)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
int findTagInSet(int64_t line, Addr tag) const
const FlagsType dist
Print the distribution.
bool isTagPresent(Addr address) const
virtual std::shared_ptr< ReplacementData > instantiateEntry()=0
Instantiate a replacement data entry.
std::vector< std::vector< AbstractCacheEntry * > > m_cache
statistics::Scalar numTagArrayStalls
const FlagsType pdf
Print the percent of the total that this entry represents.
std::unordered_map< Addr, int > m_tag_index
void printData(std::ostream &out) const
virtual std::string name() const
statistics::Scalar m_demand_misses
statistics::Scalar numDataArrayWrites
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
CacheMemoryStats(statistics::Group *parent)
AbstractCacheEntry * allocate(Addr address, AbstractCacheEntry *new_entry)
uint64_t Tick
Tick count type.
bool cacheAvail(Addr address) const
bool getInHtmWriteSet() const
statistics::Scalar numDataArrayStalls
statistics::Scalar numDataArrayReads
bool checkResourceAvailable(CacheResourceType res, Addr addr)
void setInHtmReadSet(bool val)
void setLocked(int context)
Abstract superclass for simulation objects.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Histogram & init(size_type size)
Set the parameters of this histogram.
int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const
bool tryAccess(int64_t idx)
virtual void invalidateEntry()
bool testCacheAccess(Addr address, RubyRequestType type, DataBlock *&data_ptr)
uint32_t getWay() const
Get way number.
void recordCacheContents(int cntrl, CacheRecorder *tr) const
void setMRU(Addr address)
void setInHtmWriteSet(bool val)
void addRecord(int cntrl, Addr data_addr, Addr pc_addr, RubyRequestType type, Tick time, DataBlock &data)
bool m_is_instruction_only_cache
bool isBlockNotBusy(int64_t cache_set, int64_t loc)
virtual void touch(const std::shared_ptr< ReplacementData > &replacement_data, const PacketPtr pkt)
Update replacement data.
void setLocked(Addr addr, int context)
bool isLocked(Addr addr, int context)
void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
void profilePrefetchHit()
A replaceable entry is a basic entry in a 2d table-like structure that needs to have replacement func...
bool m_use_occupancy
Set to true when using WeightedLRU replacement policy, otherwise, set to false.
virtual DataBlock & getDataBlk()
void clearLocked(Addr addr)
statistics::Histogram htmTransAbortReadSet
bool isLocked(int context) const
std::shared_ptr< replacement_policy::ReplacementData > replacementData
Replacement data associated to this entry.
void reserve(int64_t idx)
void clearLockedAll(int context)
statistics::Histogram htmTransAbortWriteSet
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
statistics::Histogram htmTransCommitWriteSet
statistics::Scalar numTagArrayReads
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
const FlagsType total
Print the total.
Derived & init(size_type size)
Set this vector to have the given size.
void profilePrefetchMiss()
void recordRequestType(CacheRequestType requestType, Addr addr)
void deallocate(Addr address)
std::vector< std::vector< ReplData > > replacement_data
We store all the ReplacementData in a 2-dimensional array.
statistics::Scalar m_demand_hits
#define panic(...)
This implements a cprintf based panic() function.
virtual void invalidate(const std::shared_ptr< ReplacementData > &replacement_data)=0
Invalidate replacement data to set it as the next probable victim.
statistics::Vector m_accessModeType
Generated on Sun Jul 30 2023 01:56:59 for gem5 by doxygen 1.8.17