46#include "debug/Config.hh"
47#include "debug/Drain.hh"
48#include "debug/Ruby.hh"
49#include "mem/ruby/protocol/AccessPermission.hh"
63 m_controller(NULL), m_mandatory_q_ptr(NULL),
65 pioRequestPort(
csprintf(
"%s.pio-request-port",
name()), *this),
66 pioResponsePort(
csprintf(
"%s.pio-response-port",
name()), *this),
67 memRequestPort(
csprintf(
"%s.mem-request-port",
name()), *this),
68 memResponsePort(
csprintf(
"%s-mem-response-port",
name()), *this,
69 p.ruby_system->getAccessBackingStore(), -1,
71 gotAddrRanges(
p.port_interrupt_out_port_connection_count),
72 m_isCPUSequencer(
p.is_cpu_sequencer)
77 for (
size_t i = 0;
i <
p.port_in_ports_connection_count; ++
i) {
79 (
"%s.response_ports%d",
name(),
i), *
this,
80 p.ruby_system->getAccessBackingStore(),
81 i,
p.no_retry_on_stall));
85 for (
size_t i = 0;
i <
p.port_interrupt_out_port_connection_count; ++
i) {
87 "%s.request_ports%d",
name(),
i), *
this));
97 response_port->sendRangeChange();
106 if (if_name ==
"mem_request_port") {
108 }
else if (if_name ==
"pio_request_port") {
110 }
else if (if_name ==
"mem_response_port") {
112 }
else if (if_name ==
"pio_response_port") {
114 }
else if (if_name ==
"interrupt_out_port") {
118 panic(
"%s: unknown %s index (%d)\n", __func__, if_name, idx);
122 }
else if (if_name ==
"in_ports") {
126 panic(
"%s: unknown %s index (%d)\n", __func__, if_name, idx);
140 reqQueue(_port, *this),
141 snoopRespQueue(_port, *this)
157 reqQueue(_port, *this),
158 snoopRespQueue(_port, *this)
165 bool _access_backing_store,
PortID id,
166 bool _no_retry_on_stall):
170 access_backing_store(_access_backing_store),
171 no_retry_on_stall(_no_retry_on_stall)
183 owner.pioResponsePort.schedTimingResp(
184 pkt,
curTick() + owner.m_ruby_system->clockPeriod());
198 assert(port != NULL);
216 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
218 for (
auto it =
l.begin(); it !=
l.end(); ++it) {
219 if (it->contains(pkt->
getAddr())) {
222 [[maybe_unused]]
bool success =
223 owner.request_ports[
i]->sendTimingReq(pkt);
229 panic(
"Should never reach here!\n");
236 if (!owner.system->bypassCaches()) {
237 panic(
"Ruby supports atomic accesses only in noncaching mode\n");
240 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
242 for (
auto it =
l.begin(); it !=
l.end(); ++it) {
243 if (it->contains(pkt->
getAddr())) {
244 return owner.request_ports[
i]->sendAtomic(pkt);
248 panic(
"Could not find address in Ruby PIO address ranges!\n");
258 panic(
"RubyPort should never see request with the "
259 "cacheResponding flag set\n");
263 if (pkt->
req->isCacheMaintenance()) {
264 warn_once(
"Cache maintenance operations are not supported in Ruby.\n");
266 schedTimingResp(pkt,
curTick());
272 if (!pkt->
req->isMemMgmt() && !isPhysMemAddress(pkt)) {
273 assert(owner.memRequestPort.isConnected());
275 "pio address\n", pkt->
getAddr());
283 owner.memRequestPort.schedTimingReq(pkt,
294 RequestStatus requestStatus = owner.makeRequest(pkt);
299 if (requestStatus == RequestStatus_Issued) {
311 "Request %s for address %#x did not issue because %s\n",
313 RequestStatus_to_string(requestStatus));
325 if (!owner.system->bypassCaches()) {
326 panic(
"Ruby supports atomic accesses only in noncaching mode\n");
332 if (!isPhysMemAddress(pkt)) {
333 assert(owner.memRequestPort.isConnected());
335 "pio address\n", pkt->
getAddr());
342 Tick req_ticks = owner.memRequestPort.sendAtomic(pkt);
343 return owner.ticksToCycles(req_ticks);
352 static int mem_interface_type = -1;
353 if (mem_interface_type == -1) {
354 if (
rs->m_abstract_controls[MachineType_Directory].size() != 0) {
355 mem_interface_type = MachineType_Directory;
357 else if (
rs->m_abstract_controls[MachineType_Memory].size() != 0) {
358 mem_interface_type = MachineType_Memory;
361 panic(
"Can't find the memory controller interface\n");
366 MachineID id = owner.m_controller->mapAddressToMachine(
367 pkt->
getAddr(), (MachineType)mem_interface_type);
369 rs->m_abstract_controls[mem_interface_type][
id.getNum()];
371 if (access_backing_store)
372 rs->getPhysMem()->access(pkt);
385 if (!no_retry_on_stall && !owner.onRetryList(
this)) {
386 owner.addToRetryList(
this);
399 if (!isPhysMemAddress(pkt)) {
401 assert(owner.pioRequestPort.isConnected());
402 owner.pioRequestPort.sendFunctional(pkt);
409 if (access_backing_store) {
414 rs->getPhysMem()->functionalAccess(pkt);
416 bool accessSucceeded =
false;
421 accessSucceeded =
rs->functionalRead(pkt);
423 accessSucceeded =
rs->functionalWrite(pkt);
431 fatal(
"Ruby functional %s failed for address %#x\n",
447 accessSucceeded ?
"successful":
"failed");
466 assert(port != NULL);
485 assert(port != NULL);
501 auto request = std::make_shared<Request>(
505 request->setExtraData(txnId);
515 if (port->isSnooping()) {
517 port->sendTimingSnoopReq(&pkt);
539 for (
auto i = curRetryList.begin();
i != curRetryList.end(); ++
i) {
541 "Sequencer may now be free. SendRetry to port %s\n",
543 (*i)->sendRetryReq();
554 DPRINTF(Drain,
"Drain count: %u\n", drainCount);
555 if (drainCount == 0) {
556 DPRINTF(Drain,
"RubyPort done draining, signaling drain done\n");
575 DPRINTF(Drain,
"RubyPort not drained\n");
589 bool accessPhysMem = access_backing_store;
593 if (pkt->
req->getExtraData() != 0) {
603 accessPhysMem =
false;
617 accessPhysMem =
false;
620 if (pkt->
req->isKernel()) {
621 accessPhysMem =
false;
622 needsResponse =
true;
631 if (owner.system->isDeviceMemAddr(pkt)) {
632 auto dmem = owner.system->getDeviceMemory(pkt);
634 }
else if (owner.system->isMemAddr(pkt->
getAddr())) {
637 panic(
"Packet is in neither device nor system memory!");
639 }
else if (needsResponse) {
649 schedTimingResp(pkt,
curTick());
663 for (
size_t i = 0;
i < owner.request_ports.size(); ++
i) {
664 ranges.splice(ranges.begin(),
665 owner.request_ports[
i]->getAddrRanges());
667 for ([[maybe_unused]]
const auto &
r : ranges)
677 for (
auto it = ranges.begin(); it != ranges.end(); ++it) {
678 if (it->contains(
addr)) {
690 return (owner.system->isMemAddr(
addr) && !isShadowRomAddress(
addr))
691 || owner.system->isDeviceMemAddr(pkt);
701 auto request = std::make_shared<Request>(
711 if ((*p)->isSnooping()) {
713 (*p)->sendTimingSnoopReq(&pkt);
724 r.pioResponsePort.sendRangeChange();
733 if (port->trySatisfyFunctional(func_pkt)) {
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
bool needsResponse() const
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
void convertScToWrite()
It has been determined that the SC packet should successfully update memory.
RequestPtr req
A pointer to the original request.
void setFunctionalResponseStatus(bool success)
void convertLlToRead()
When ruby is in use, Ruby will monitor the cache line and the phys memory should treat LL ops as norm...
bool cacheResponding() const
MemCmd cmd
The command field of the packet.
bool suppressFuncError() const
bool htmTransactionFailedInCache() const
Returns whether or not this packet/request has returned from the cache hierarchy in a failed transact...
Ports are used to interface objects to each other.
const std::string name() const
Return port name (for DPRINTF).
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
@ TLBI_EXT_SYNC
The Request tells the CPU model that a remote TLB Sync has been requested.
@ funcRequestorId
This requestor id is used for functional requests that don't come from a particular device.
void sendRangeChange() const
Called by the owner to send a range change.
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map.
bool isDeviceMemAddr(const PacketPtr &pkt) const
Similar to isMemAddr but for devices.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Tick recvAtomic(PacketPtr pkt)
virtual MessageBuffer * getMandatoryQueue() const =0
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
MemRequestPort(const std::string &_name, RubyPort &_port)
bool isShadowRomAddress(Addr addr) const
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
MemResponsePort(const std::string &_name, RubyPort &_port, bool _access_backing_store, PortID id, bool _no_retry_on_stall)
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
bool isPhysMemAddress(PacketPtr pkt) const
void hitCallback(PacketPtr pkt)
void recvRangeChange()
Called to receive an address range change from the peer response port.
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
PioRequestPort(const std::string &_name, RubyPort &_port)
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
PioResponsePort(const std::string &_name, RubyPort &_port)
void ruby_hit_callback(PacketPtr pkt)
std::vector< MemResponsePort * > response_ports
void ruby_unaddressed_callback(PacketPtr pkt)
virtual int outstandingCount() const =0
void ruby_stale_translation_callback(Addr txnId)
virtual void descheduleDeadlockEvent()=0
std::vector< MemResponsePort * > retryList
MemResponsePort memResponsePort
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
PioResponsePort pioResponsePort
PioRequestPort pioRequestPort
RubyPort(const Params &p)
virtual bool isDeadlockEventScheduled() const =0
virtual int functionalWrite(Packet *func_pkt)
unsigned int gotAddrRanges
void addToRetryList(MemResponsePort *port)
MemRequestPort memRequestPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
AbstractController * m_controller
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
void ruby_eviction_callback(Addr address)
MessageBuffer * m_mandatory_q_ptr
std::vector< PioRequestPort * > request_ports
std::vector< MemResponsePort * >::iterator CpuPortIter
Vector of M5 Ports attached to this Ruby port.
static uint32_t getBlockSizeBytes()
memory::SimpleMemory * getPhysMem()
void signalDrainDone() const
Signal that an object is drained.
DrainState drainState() const
Return the current drain state of an object.
DrainState
Object drain/handover states.
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal(...)
This implements a cprintf based fatal() function.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Addr makeLineAddress(Addr addr)
Addr getOffset(Addr addr)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
T safe_cast(U &&ref_or_ptr)
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
uint64_t Tick
Tick count type.
RubyTester::SenderState SenderState
std::string csprintf(const char *format, const Args &...args)
SimpleMemory declaration.
const std::string & name()