46#include "debug/Config.hh"
47#include "debug/Drain.hh"
48#include "debug/Ruby.hh"
49#include "mem/ruby/protocol/AccessPermission.hh"
63 m_controller(NULL), m_mandatory_q_ptr(NULL),
65 pioRequestPort(
csprintf(
"%s.pio-request-port",
name()), *this),
66 pioResponsePort(
csprintf(
"%s.pio-response-port",
name()), *this),
67 memRequestPort(
csprintf(
"%s.mem-request-port",
name()), *this),
68 memResponsePort(
csprintf(
"%s-mem-response-port",
name()), *this,
69 p.ruby_system->getAccessBackingStore(), -1,
71 gotAddrRanges(
p.port_interrupt_out_port_connection_count),
72 m_isCPUSequencer(
p.is_cpu_sequencer)
77 for (
size_t i = 0;
i <
p.port_in_ports_connection_count; ++
i) {
79 (
"%s.response_ports%d",
name(),
i), *
this,
80 p.ruby_system->getAccessBackingStore(),
81 i,
p.no_retry_on_stall));
85 for (
size_t i = 0;
i <
p.port_interrupt_out_port_connection_count; ++
i) {
87 "%s.request_ports%d",
name(),
i), *
this));
97 response_port->sendRangeChange();
106 if (if_name ==
"mem_request_port") {
108 }
else if (if_name ==
"pio_request_port") {
110 }
else if (if_name ==
"mem_response_port") {
112 }
else if (if_name ==
"pio_response_port") {
114 }
else if (if_name ==
"interrupt_out_port") {
118 panic(
"%s: unknown %s index (%d)\n", __func__, if_name, idx);
122 }
else if (if_name ==
"in_ports") {
126 panic(
"%s: unknown %s index (%d)\n", __func__, if_name, idx);
140 reqQueue(_port, *this),
141 snoopRespQueue(_port, *this)
157 reqQueue(_port, *this),
158 snoopRespQueue(_port, *this)
165 bool _access_backing_store,
PortID id,
166 bool _no_retry_on_stall):
170 access_backing_store(_access_backing_store),
171 no_retry_on_stall(_no_retry_on_stall)
183 owner.pioResponsePort.schedTimingResp(
184 pkt,
curTick() + owner.m_ruby_system->clockPeriod());
198 assert(port != NULL);
216 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
218 for (
auto it =
l.begin(); it !=
l.end(); ++it) {
219 if (it->contains(pkt->
getAddr())) {
222 [[maybe_unused]]
bool success =
223 owner.request_ports[
i]->sendTimingReq(pkt);
229 panic(
"Should never reach here!\n");
236 if (!owner.system->bypassCaches()) {
237 panic(
"Ruby supports atomic accesses only in noncaching mode\n");
240 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
242 for (
auto it =
l.begin(); it !=
l.end(); ++it) {
243 if (it->contains(pkt->
getAddr())) {
244 return owner.request_ports[
i]->sendAtomic(pkt);
248 panic(
"Could not find address in Ruby PIO address ranges!\n");
258 panic(
"RubyPort should never see request with the "
259 "cacheResponding flag set\n");
263 if (pkt->
req->isCacheMaintenance()) {
264 warn_once(
"Cache maintenance operations are not supported in Ruby.\n");
266 schedTimingResp(pkt,
curTick());
272 if (!pkt->
req->isMemMgmt() && !isPhysMemAddress(pkt)) {
273 assert(owner.memRequestPort.isConnected());
275 "pio address\n", pkt->
getAddr());
283 owner.memRequestPort.schedTimingReq(pkt,
294 RequestStatus requestStatus = owner.makeRequest(pkt);
299 if (requestStatus == RequestStatus_Issued) {
311 "Request %s for address %#x did not issue because %s\n",
313 RequestStatus_to_string(requestStatus));
325 if (!owner.system->bypassCaches()) {
326 panic(
"Ruby supports atomic accesses only in noncaching mode\n");
334 if (!isPhysMemAddress(pkt)) {
335 assert(owner.memRequestPort.isConnected());
337 "pio address\n", pkt->
getAddr());
344 Tick req_ticks = owner.memRequestPort.sendAtomic(pkt);
345 return owner.ticksToCycles(req_ticks);
349 rs->getBlockSizeBytes());
353 static int mem_interface_type = -1;
354 if (mem_interface_type == -1) {
355 if (
rs->m_abstract_controls[MachineType_Directory].size() != 0) {
356 mem_interface_type = MachineType_Directory;
358 else if (
rs->m_abstract_controls[MachineType_Memory].size() != 0) {
359 mem_interface_type = MachineType_Memory;
362 panic(
"Can't find the memory controller interface\n");
367 MachineID id = owner.m_controller->mapAddressToMachine(
368 pkt->
getAddr(), (MachineType)mem_interface_type);
370 rs->m_abstract_controls[mem_interface_type][
id.getNum()];
372 if (access_backing_store)
373 rs->getPhysMem()->access(pkt);
386 if (!no_retry_on_stall && !owner.onRetryList(
this)) {
387 owner.addToRetryList(
this);
405 if (!isPhysMemAddress(pkt)) {
407 assert(owner.pioRequestPort.isConnected());
408 owner.pioRequestPort.sendFunctional(pkt);
413 owner.makeLineAddress(pkt->
getAddr()) +
rs->getBlockSizeBytes());
415 if (pkt->
req->getGPUFuncAccess()) {
416 pkt->
req->requestorId(owner.m_controller->getRequestorId());
419 if (access_backing_store) {
424 rs->getPhysMem()->functionalAccess(pkt);
426 bool accessSucceeded =
false;
431 accessSucceeded =
rs->functionalRead(pkt);
433 accessSucceeded =
rs->functionalWrite(pkt);
441 fatal(
"Ruby functional %s failed for address %#x\n",
457 accessSucceeded ?
"successful":
"failed");
471 pkt->
req->hasNoAddr());
478 assert(port != NULL);
497 assert(port != NULL);
513 auto request = std::make_shared<Request>(
517 request->setExtraData(txnId);
527 if (port->isSnooping()) {
529 port->sendTimingSnoopReq(&pkt);
551 for (
auto i = curRetryList.begin();
i != curRetryList.end(); ++
i) {
553 "Sequencer may now be free. SendRetry to port %s\n",
555 (*i)->sendRetryReq();
566 DPRINTF(Drain,
"Drain count: %u\n", drainCount);
567 if (drainCount == 0) {
568 DPRINTF(Drain,
"RubyPort done draining, signaling drain done\n");
587 DPRINTF(Drain,
"RubyPort not drained\n");
601 bool accessPhysMem = access_backing_store;
605 if (pkt->
req->getExtraData() != 0) {
615 accessPhysMem =
false;
629 accessPhysMem =
false;
632 if (pkt->
req->isKernel()) {
633 accessPhysMem =
false;
634 needsResponse =
true;
643 if (owner.system->isDeviceMemAddr(pkt)) {
644 auto dmem = owner.system->getDeviceMemory(pkt);
646 }
else if (owner.system->isMemAddr(pkt->
getAddr())) {
649 panic(
"Packet is in neither device nor system memory!");
651 }
else if (needsResponse) {
661 schedTimingResp(pkt,
curTick());
675 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
676 ranges.splice(ranges.begin(),
677 owner.request_ports[
i]->getAddrRanges());
679 for ([[maybe_unused]]
const auto &
r : ranges)
689 for (
auto it = ranges.begin(); it != ranges.end(); ++it) {
690 if (it->contains(
addr)) {
702 return (owner.system->isMemAddr(
addr) && !isShadowRomAddress(
addr))
703 || owner.system->isDeviceMemAddr(pkt);
713 auto request = std::make_shared<Request>(
723 if ((*p)->isSnooping()) {
725 (*p)->sendTimingSnoopReq(&pkt);
736 r.pioResponsePort.sendRangeChange();
745 if (port->trySatisfyFunctional(func_pkt)) {
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
bool needsResponse() const
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
void convertScToWrite()
It has been determined that the SC packet should successfully update memory.
RequestPtr req
A pointer to the original request.
void setFunctionalResponseStatus(bool success)
void convertLlToRead()
When ruby is in use, Ruby will monitor the cache line and the phys memory should treat LL ops as norm...
bool cacheResponding() const
MemCmd cmd
The command field of the packet.
bool suppressFuncError() const
bool htmTransactionFailedInCache() const
Returns whether or not this packet/request has returned from the cache hierarchy in a failed transact...
Ports are used to interface objects to each other.
const std::string name() const
Return port name (for DPRINTF).
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
@ TLBI_EXT_SYNC
The Request tells the CPU model that a remote TLB Sync has been requested.
@ funcRequestorId
This requestor id is used for functional requests that don't come from a particular device.
void sendRangeChange() const
Called by the owner to send a range change.
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map.
bool isDeviceMemAddr(const PacketPtr &pkt) const
Similar to isMemAddr but for devices.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Tick recvAtomic(PacketPtr pkt)
virtual MessageBuffer * getMandatoryQueue() const =0
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
MemRequestPort(const std::string &_name, RubyPort &_port)
bool isShadowRomAddress(Addr addr) const
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
MemResponsePort(const std::string &_name, RubyPort &_port, bool _access_backing_store, PortID id, bool _no_retry_on_stall)
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
bool isPhysMemAddress(PacketPtr pkt) const
void hitCallback(PacketPtr pkt)
void recvRangeChange()
Called to receive an address range change from the peer response port.
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
PioRequestPort(const std::string &_name, RubyPort &_port)
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
PioResponsePort(const std::string &_name, RubyPort &_port)
void ruby_hit_callback(PacketPtr pkt)
std::vector< MemResponsePort * > response_ports
Addr makeLineAddress(Addr addr) const
void ruby_unaddressed_callback(PacketPtr pkt)
virtual int outstandingCount() const =0
void ruby_stale_translation_callback(Addr txnId)
virtual void descheduleDeadlockEvent()=0
std::string printAddress(Addr addr) const
std::vector< MemResponsePort * > retryList
RubySystem * m_ruby_system
MemResponsePort memResponsePort
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
PioResponsePort pioResponsePort
PioRequestPort pioRequestPort
RubyPort(const Params &p)
virtual bool isDeadlockEventScheduled() const =0
virtual int functionalWrite(Packet *func_pkt)
unsigned int gotAddrRanges
void addToRetryList(MemResponsePort *port)
MemRequestPort memRequestPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
AbstractController * m_controller
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
void ruby_eviction_callback(Addr address)
MessageBuffer * m_mandatory_q_ptr
std::vector< PioRequestPort * > request_ports
Addr getOffset(Addr addr) const
std::vector< MemResponsePort * >::iterator CpuPortIter
Vector of M5 Ports attached to this Ruby port.
uint32_t getBlockSizeBits()
uint32_t getBlockSizeBytes()
memory::SimpleMemory * getPhysMem()
void signalDrainDone() const
Signal that an object is drained.
DrainState drainState() const
Return the current drain state of an object.
DrainState
Object drain/handover states.
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal(...)
This implements a cprintf based fatal() function.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Addr makeLineAddress(Addr addr, int cacheLineBits)
std::string printAddress(Addr addr, int cacheLineBits)
Addr getOffset(Addr addr, int cacheLineBits)
Copyright (c) 2024 Arm Limited All rights reserved.
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
uint64_t Tick
Tick count type.
std::string csprintf(const char *format, const Args &...args)
SimpleMemory declaration.
const std::string & name()