gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
RubyPort.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012-2013,2020-2021 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2009-2013 Advanced Micro Devices, Inc.
15 * Copyright (c) 2011 Mark D. Hill and David A. Wood
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "base/compiler.hh"
46#include "debug/Config.hh"
47#include "debug/Drain.hh"
48#include "debug/Ruby.hh"
49#include "mem/ruby/protocol/AccessPermission.hh"
51#include "mem/simple_mem.hh"
52#include "sim/full_system.hh"
53#include "sim/system.hh"
54
55namespace gem5
56{
57
58namespace ruby
59{
60
62 : ClockedObject(p), m_ruby_system(p.ruby_system), m_version(p.version),
63 m_controller(NULL), m_mandatory_q_ptr(NULL),
64 m_usingRubyTester(p.using_ruby_tester), system(p.system),
65 pioRequestPort(csprintf("%s.pio-request-port", name()), *this),
66 pioResponsePort(csprintf("%s.pio-response-port", name()), *this),
67 memRequestPort(csprintf("%s.mem-request-port", name()), *this),
68 memResponsePort(csprintf("%s-mem-response-port", name()), *this,
69 p.ruby_system->getAccessBackingStore(), -1,
70 p.no_retry_on_stall),
71 gotAddrRanges(p.port_interrupt_out_port_connection_count),
72 m_isCPUSequencer(p.is_cpu_sequencer)
73{
74 assert(m_version != -1);
75
76 // create the response ports based on the number of connected ports
77 for (size_t i = 0; i < p.port_in_ports_connection_count; ++i) {
79 ("%s.response_ports%d", name(), i), *this,
80 p.ruby_system->getAccessBackingStore(),
81 i, p.no_retry_on_stall));
82 }
83
84 // create the request ports based on the number of connected ports
85 for (size_t i = 0; i < p.port_interrupt_out_port_connection_count; ++i) {
87 "%s.request_ports%d", name(), i), *this));
88 }
89}
90
91void
93{
94 assert(m_controller != NULL);
96 for (const auto &response_port : response_ports)
97 response_port->sendRangeChange();
98 if (gotAddrRanges == 0 && FullSystem) {
100 }
101}
102
103Port &
104RubyPort::getPort(const std::string &if_name, PortID idx)
105{
106 if (if_name == "mem_request_port") {
107 return memRequestPort;
108 } else if (if_name == "pio_request_port") {
109 return pioRequestPort;
110 } else if (if_name == "mem_response_port") {
111 return memResponsePort;
112 } else if (if_name == "pio_response_port") {
113 return pioResponsePort;
114 } else if (if_name == "interrupt_out_port") {
115 // used by the x86 CPUs to connect the interrupt PIO and interrupt
116 // response port
117 if (idx >= static_cast<PortID>(request_ports.size())) {
118 panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
119 }
120
121 return *request_ports[idx];
122 } else if (if_name == "in_ports") {
123 // used by the CPUs to connect the caches to the interconnect, and
124 // for the x86 case also the interrupt request port
125 if (idx >= static_cast<PortID>(response_ports.size())) {
126 panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
127 }
128
129 return *response_ports[idx];
130 }
131
132 // pass it along to our super class
133 return ClockedObject::getPort(if_name, idx);
134}
135
137 RubyPort& _port) :
138 QueuedRequestPort(_name, reqQueue, snoopRespQueue),
139 owner{_port},
140 reqQueue(_port, *this),
141 snoopRespQueue(_port, *this)
142{
143 DPRINTF(RubyPort, "Created request pioport on sequencer %s\n", _name);
144}
145
147 RubyPort& _port)
148 : QueuedResponsePort(_name, queue), owner{_port}, queue(_port, *this)
149{
150 DPRINTF(RubyPort, "Created response pioport on sequencer %s\n", _name);
151}
152
154 RubyPort& _port):
155 QueuedRequestPort(_name, reqQueue, snoopRespQueue),
156 owner{_port},
157 reqQueue(_port, *this),
158 snoopRespQueue(_port, *this)
159{
160 DPRINTF(RubyPort, "Created request memport on ruby sequencer %s\n", _name);
161}
162
164MemResponsePort::MemResponsePort(const std::string &_name, RubyPort& _port,
165 bool _access_backing_store, PortID id,
166 bool _no_retry_on_stall):
167 QueuedResponsePort(_name, queue, id),
168 owner{_port},
169 queue(_port, *this),
170 access_backing_store(_access_backing_store),
171 no_retry_on_stall(_no_retry_on_stall)
172{
173 DPRINTF(RubyPort, "Created response memport on ruby sequencer %s\n",
174 _name);
175}
176
177bool
179{
180 DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
181
182 // send next cycle
183 owner.pioResponsePort.schedTimingResp(
184 pkt, curTick() + owner.m_ruby_system->clockPeriod());
185 return true;
186}
187
189{
190 // got a response from a device
191 assert(pkt->isResponse());
192 assert(!pkt->htmTransactionFailedInCache());
193
194 // First we must retrieve the request port from the sender State
195 RubyPort::SenderState *senderState =
196 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
197 MemResponsePort *port = senderState->port;
198 assert(port != NULL);
199 delete senderState;
200
201 // In FS mode, ruby memory will receive pio responses from devices
202 // and it must forward these responses back to the particular CPU.
203 DPRINTF(RubyPort, "Pio response for address %#x, going to %s\n",
204 pkt->getAddr(), port->name());
205
206 // attempt to send the response in the next cycle
207 port->schedTimingResp(pkt, curTick() + owner.m_ruby_system->clockPeriod());
208
209 return true;
210}
211
212bool
214{
215
216 for (size_t i = 0; i < owner.request_ports.size(); ++i) {
217 AddrRangeList l = owner.request_ports[i]->getAddrRanges();
218 for (auto it = l.begin(); it != l.end(); ++it) {
219 if (it->contains(pkt->getAddr())) {
220 // generally it is not safe to assume success here as
221 // the port could be blocked
222 [[maybe_unused]] bool success =
223 owner.request_ports[i]->sendTimingReq(pkt);
224 assert(success);
225 return true;
226 }
227 }
228 }
229 panic("Should never reach here!\n");
230}
231
232Tick
234{
235 // Only atomic_noncaching mode supported!
236 if (!owner.system->bypassCaches()) {
237 panic("Ruby supports atomic accesses only in noncaching mode\n");
238 }
239
240 for (size_t i = 0; i < owner.request_ports.size(); ++i) {
241 AddrRangeList l = owner.request_ports[i]->getAddrRanges();
242 for (auto it = l.begin(); it != l.end(); ++it) {
243 if (it->contains(pkt->getAddr())) {
244 return owner.request_ports[i]->sendAtomic(pkt);
245 }
246 }
247 }
248 panic("Could not find address in Ruby PIO address ranges!\n");
249}
250
251bool
253{
254 DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
255 pkt->getAddr(), id);
256
257 if (pkt->cacheResponding())
258 panic("RubyPort should never see request with the "
259 "cacheResponding flag set\n");
260
261 // ruby doesn't support cache maintenance operations at the
262 // moment, as a workaround, we respond right away
263 if (pkt->req->isCacheMaintenance()) {
264 warn_once("Cache maintenance operations are not supported in Ruby.\n");
265 pkt->makeResponse();
266 schedTimingResp(pkt, curTick());
267 return true;
268 }
269 // Check for pio requests and directly send them to the dedicated
270 // pio port.
271 if (pkt->cmd != MemCmd::MemSyncReq && !pkt->req->hasNoAddr()) {
272 if (!pkt->req->isMemMgmt() && !isPhysMemAddress(pkt)) {
273 assert(owner.memRequestPort.isConnected());
274 DPRINTF(RubyPort, "Request address %#x assumed to be a "
275 "pio address\n", pkt->getAddr());
276
277 // Save the port in the sender state object to be used later to
278 // route the response
279 pkt->pushSenderState(new SenderState(this));
280
281 // send next cycle
282 RubySystem *rs = owner.m_ruby_system;
283 owner.memRequestPort.schedTimingReq(pkt,
284 curTick() + rs->clockPeriod());
285 return true;
286 }
287 }
288
289 // Save the port in the sender state object to be used later to
290 // route the response
291 pkt->pushSenderState(new SenderState(this));
292
293 // Submit the ruby request
294 RequestStatus requestStatus = owner.makeRequest(pkt);
295
296 // If the request successfully issued then we should return true.
297 // Otherwise, we need to tell the port to retry at a later point
298 // and return false.
299 if (requestStatus == RequestStatus_Issued) {
300 DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
301 pkt->getAddr());
302 return true;
303 }
304
305 // pop off sender state as this request failed to issue
306 SenderState *ss = safe_cast<SenderState *>(pkt->popSenderState());
307 delete ss;
308
309 if (pkt->cmd != MemCmd::MemSyncReq) {
311 "Request %s for address %#x did not issue because %s\n",
312 pkt->cmdString(), pkt->getAddr(),
313 RequestStatus_to_string(requestStatus));
314 }
315
317
318 return false;
319}
320
321Tick
323{
324 // Only atomic_noncaching mode supported!
325 if (!owner.system->bypassCaches()) {
326 panic("Ruby supports atomic accesses only in noncaching mode\n");
327 }
328
329 RubySystem *rs = owner.m_ruby_system;
330
331 // Check for pio requests and directly send them to the dedicated
332 // pio port.
333 if (pkt->cmd != MemCmd::MemSyncReq) {
334 if (!isPhysMemAddress(pkt)) {
335 assert(owner.memRequestPort.isConnected());
336 DPRINTF(RubyPort, "Request address %#x assumed to be a "
337 "pio address\n", pkt->getAddr());
338
339 // Save the port in the sender state object to be used later to
340 // route the response
341 pkt->pushSenderState(new SenderState(this));
342
343 // send next cycle
344 Tick req_ticks = owner.memRequestPort.sendAtomic(pkt);
345 return owner.ticksToCycles(req_ticks);
346 }
347
348 assert(owner.getOffset(pkt->getAddr()) + pkt->getSize() <=
349 rs->getBlockSizeBytes());
350 }
351
352 // Find the machine type of memory controller interface
353 static int mem_interface_type = -1;
354 if (mem_interface_type == -1) {
355 if (rs->m_abstract_controls[MachineType_Directory].size() != 0) {
356 mem_interface_type = MachineType_Directory;
357 }
358 else if (rs->m_abstract_controls[MachineType_Memory].size() != 0) {
359 mem_interface_type = MachineType_Memory;
360 }
361 else {
362 panic("Can't find the memory controller interface\n");
363 }
364 }
365
366 // Find the controller for the target address
367 MachineID id = owner.m_controller->mapAddressToMachine(
368 pkt->getAddr(), (MachineType)mem_interface_type);
369 AbstractController *mem_interface =
370 rs->m_abstract_controls[mem_interface_type][id.getNum()];
371 Tick latency = mem_interface->recvAtomic(pkt);
372 if (access_backing_store)
373 rs->getPhysMem()->access(pkt);
374 return latency;
375}
376
377void
379{
380
381 //
382 // Unless the request port do not want retries (e.g., the Ruby tester),
383 // record the stalled M5 port for later retry when the sequencer
384 // becomes free.
385 //
386 if (!no_retry_on_stall && !owner.onRetryList(this)) {
387 owner.addToRetryList(this);
388 }
389}
390
391void
393{
394 DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
395
396 // In a CPU+dGPU system, GPU functional packets are injected into
397 // the CPU network. This happens because the requestorId is automatically
398 // set to that of the CPU network for these packets. Here, we set it
399 // to that of the GPU RubyPort so that it uses the right network to
400 // access GPU caches
401 RubySystem *rs = owner.m_ruby_system;
402
403 // Check for pio requests and directly send them to the dedicated
404 // pio port.
405 if (!isPhysMemAddress(pkt)) {
406 DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
407 assert(owner.pioRequestPort.isConnected());
408 owner.pioRequestPort.sendFunctional(pkt);
409 return;
410 }
411
412 assert(pkt->getAddr() + pkt->getSize() <=
413 owner.makeLineAddress(pkt->getAddr()) + rs->getBlockSizeBytes());
414
415 if (pkt->req->getGPUFuncAccess()) {
416 pkt->req->requestorId(owner.m_controller->getRequestorId());
417 }
418
419 if (access_backing_store) {
420 // The attached physmem contains the official version of data.
421 // The following command performs the real functional access.
422 // This line should be removed once Ruby supplies the official version
423 // of data.
424 rs->getPhysMem()->functionalAccess(pkt);
425 } else {
426 bool accessSucceeded = false;
427 bool needsResponse = pkt->needsResponse();
428
429 // Do the functional access on ruby memory
430 if (pkt->isRead()) {
431 accessSucceeded = rs->functionalRead(pkt);
432 } else if (pkt->isWrite()) {
433 accessSucceeded = rs->functionalWrite(pkt);
434 } else {
435 panic("Unsupported functional command %s\n", pkt->cmdString());
436 }
437
438 // Unless the request port explicitly said otherwise, generate an error
439 // if the functional request failed
440 if (!accessSucceeded && !pkt->suppressFuncError()) {
441 fatal("Ruby functional %s failed for address %#x\n",
442 pkt->isWrite() ? "write" : "read", pkt->getAddr());
443 }
444
445 // turn packet around to go back to request port if response expected
446 if (needsResponse) {
447 // The pkt is already turned into a reponse if the directory
448 // forwarded the request to the memory controller (see
449 // AbstractController::functionalMemoryWrite and
450 // AbstractMemory::functionalAccess)
451 if (!pkt->isResponse())
452 pkt->makeResponse();
453 pkt->setFunctionalResponseStatus(accessSucceeded);
454 }
455
456 DPRINTF(RubyPort, "Functional access %s!\n",
457 accessSucceeded ? "successful":"failed");
458 }
459}
460
461void
463{
464 DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
465 pkt->getAddr());
466
467 // The packet was destined for memory and has not yet been turned
468 // into a response
469 assert(system->isMemAddr(pkt->getAddr()) ||
470 system->isDeviceMemAddr(pkt) ||
471 pkt->req->hasNoAddr());
472 assert(pkt->isRequest());
473
474 // First we must retrieve the request port from the sender State
475 RubyPort::SenderState *senderState =
476 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
477 MemResponsePort *port = senderState->port;
478 assert(port != NULL);
479 delete senderState;
480
481 port->hitCallback(pkt);
482
484}
485
486void
488{
489 DPRINTF(RubyPort, "Unaddressed callback for %s\n", pkt->cmdString());
490
491 assert(pkt->isRequest());
492
493 // First we must retrieve the request port from the sender State
494 RubyPort::SenderState *senderState =
495 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
496 MemResponsePort *port = senderState->port;
497 assert(port != NULL);
498 delete senderState;
499
500 port->hitCallback(pkt);
501
503}
504
505void
507{
508 DPRINTF(RubyPort, "Stale Translation Callback\n");
509
510 // Allocate the invalidate request and packet on the stack, as it is
511 // assumed they will not be modified or deleted by receivers.
512 // TODO: should this really be using funcRequestorId?
513 auto request = std::make_shared<Request>(
516 // Store the txnId in extraData instead of the address
517 request->setExtraData(txnId);
518
519 // Use a single packet to signal all snooping ports of the external sync.
520 // This assumes that snooping ports do NOT modify the packet/request
521 // TODO rename TlbiExtSync to StaleTranslation
522 Packet pkt(request, MemCmd::TlbiExtSync);
523 // TODO - see where response_ports is filled, might be we only want to send
524 // to specific places
525 for (auto &port : response_ports) {
526 // check if the connected request port is snooping
527 if (port->isSnooping()) {
528 // send as a snoop request
529 port->sendTimingSnoopReq(&pkt);
530 }
531 }
532}
533
534void
536{
537 //
538 // If we had to stall the MemResponsePorts, wake them up because the
539 // sequencer likely has free resources now.
540 //
541 if (!retryList.empty()) {
542 // Record the current list of ports to retry on a temporary list
543 // before calling sendRetryReq on those ports. sendRetryReq will cause
544 // an immediate retry, which may result in the ports being put back on
545 // the list. Therefore we want to clear the retryList before calling
546 // sendRetryReq.
548
549 retryList.clear();
550
551 for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
553 "Sequencer may now be free. SendRetry to port %s\n",
554 (*i)->name());
555 (*i)->sendRetryReq();
556 }
557 }
558}
559
560void
562{
563 //If we weren't able to drain before, we might be able to now.
565 unsigned int drainCount = outstandingCount();
566 DPRINTF(Drain, "Drain count: %u\n", drainCount);
567 if (drainCount == 0) {
568 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
570 }
571 }
572}
573
576{
579 }
580
581 //
582 // If the RubyPort is not empty, then it needs to clear all outstanding
583 // requests before it should call signalDrainDone()
584 //
585 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
586 if (outstandingCount() > 0) {
587 DPRINTF(Drain, "RubyPort not drained\n");
589 } else {
590 return DrainState::Drained;
591 }
592}
593
594void
596{
597 bool needsResponse = pkt->needsResponse();
598
599 // Unless specified at configuration, all responses except failed SC
600 // and Flush operations access M5 physical memory.
601 bool accessPhysMem = access_backing_store;
602
603 if (pkt->isLLSC()) {
604 if (pkt->isWrite()) {
605 if (pkt->req->getExtraData() != 0) {
606 //
607 // Successful SC packets convert to normal writes
608 //
609 pkt->convertScToWrite();
610 } else {
611 //
612 // Failed SC packets don't access physical memory and thus
613 // the RubyPort itself must convert it to a response.
614 //
615 accessPhysMem = false;
616 }
617 } else {
618 //
619 // All LL packets convert to normal loads so that M5 PhysMem does
620 // not lock the blocks.
621 //
622 pkt->convertLlToRead();
623 }
624 }
625
626 // Flush, acquire, release requests don't access physical memory
627 if (pkt->isFlush() || pkt->cmd == MemCmd::MemSyncReq
628 || pkt->cmd == MemCmd::WriteCompleteResp || pkt->req->hasNoAddr()) {
629 accessPhysMem = false;
630 }
631
632 if (pkt->req->isKernel()) {
633 accessPhysMem = false;
634 needsResponse = true;
635 }
636
637 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
638
639 RubySystem *rs = owner.m_ruby_system;
640 if (accessPhysMem) {
641 // We must check device memory first in case it overlaps with the
642 // system memory range.
643 if (owner.system->isDeviceMemAddr(pkt)) {
644 auto dmem = owner.system->getDeviceMemory(pkt);
645 dmem->access(pkt);
646 } else if (owner.system->isMemAddr(pkt->getAddr())) {
647 rs->getPhysMem()->access(pkt);
648 } else {
649 panic("Packet is in neither device nor system memory!");
650 }
651 } else if (needsResponse) {
652 pkt->makeResponse();
653 }
654
655 // turn packet around to go back to request port if response expected
656 if (needsResponse || pkt->isResponse()) {
657 DPRINTF(RubyPort, "Sending packet back over port\n");
658 // Send a response in the same cycle. There is no need to delay the
659 // response because the response latency is already incurred in the
660 // Ruby protocol.
661 schedTimingResp(pkt, curTick());
662 } else {
663 delete pkt;
664 }
665
666 DPRINTF(RubyPort, "Hit callback done!\n");
667}
668
671{
672 // at the moment the assumption is that the request port does not care
673 AddrRangeList ranges;
674
675 for (size_t i = 0; i < owner.request_ports.size(); ++i) {
676 ranges.splice(ranges.begin(),
677 owner.request_ports[i]->getAddrRanges());
678 }
679 for ([[maybe_unused]] const auto &r : ranges)
680 DPRINTF(RubyPort, "%s\n", r.to_string());
681 return ranges;
682}
683
684bool
686{
687 AddrRangeList ranges = owner.system->getShadowRomRanges();
688
689 for (auto it = ranges.begin(); it != ranges.end(); ++it) {
690 if (it->contains(addr)) {
691 return true;
692 }
693 }
694
695 return false;
696}
697
698bool
700{
701 Addr addr = pkt->getAddr();
702 return (owner.system->isMemAddr(addr) && !isShadowRomAddress(addr))
703 || owner.system->isDeviceMemAddr(pkt);
704}
705
706void
708{
709 DPRINTF(RubyPort, "Sending invalidations.\n");
710 // Allocate the invalidate request and packet on the stack, as it is
711 // assumed they will not be modified or deleted by receivers.
712 // TODO: should this really be using funcRequestorId?
713 auto request = std::make_shared<Request>(
714 address, m_ruby_system->getBlockSizeBytes(), 0,
716
717 // Use a single packet to signal all snooping ports of the invalidation.
718 // This assumes that snooping ports do NOT modify the packet/request
719 Packet pkt(request, MemCmd::InvalidateReq);
720 for (CpuPortIter p = response_ports.begin(); p != response_ports.end();
721 ++p) {
722 // check if the connected request port is snooping
723 if ((*p)->isSnooping()) {
724 // send as a snoop request
725 (*p)->sendTimingSnoopReq(&pkt);
726 }
727 }
728}
729
730void
732{
733 RubyPort &r = static_cast<RubyPort &>(owner);
735 if (r.gotAddrRanges == 0 && FullSystem) {
736 r.pioResponsePort.sendRangeChange();
737 }
738}
739
740int
742{
743 int num_written = 0;
744 for (auto port : response_ports) {
745 if (port->trySatisfyFunctional(func_pkt)) {
746 num_written += 1;
747 }
748 }
749 return num_written;
750}
751
752Addr
757
758Addr
763
764std::string
769
770} // namespace ruby
771} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
@ WriteCompleteResp
Definition packet.hh:92
const std::string _name
Definition named.hh:41
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
bool isResponse() const
Definition packet.hh:598
bool needsResponse() const
Definition packet.hh:608
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition packet.hh:1062
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition packet.cc:334
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition packet.cc:342
void convertScToWrite()
It has been determined that the SC packet should successfully update memory.
Definition packet.hh:853
bool isWrite() const
Definition packet.hh:594
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
void setFunctionalResponseStatus(bool success)
Definition packet.hh:1086
void convertLlToRead()
When ruby is in use, Ruby will monitor the cache line and the phys memory should treat LL ops as norm...
Definition packet.hh:865
bool isLLSC() const
Definition packet.hh:620
bool cacheResponding() const
Definition packet.hh:659
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool suppressFuncError() const
Definition packet.hh:758
bool htmTransactionFailedInCache() const
Returns whether or not this packet/request has returned from the cache hierarchy in a failed transact...
Definition packet.cc:503
bool isFlush() const
Definition packet.hh:624
bool isRequest() const
Definition packet.hh:597
Ports are used to interface objects to each other.
Definition port.hh:62
const std::string name() const
Return port name (for DPRINTF).
Definition port.hh:111
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition qport.hh:111
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition qport.hh:62
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition qport.hh:94
@ TLBI_EXT_SYNC
The Request tells the CPU model that a remote TLB Sync has been requested.
Definition request.hh:248
@ funcRequestorId
This requestor id is used for functional requests that don't come from a particular device.
Definition request.hh:282
void sendRangeChange() const
Called by the owner to send a range change.
Definition port.hh:380
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map.
Definition system.cc:288
bool isDeviceMemAddr(const PacketPtr &pkt) const
Similar to isMemAddr but for devices.
Definition system.cc:301
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
virtual MessageBuffer * getMandatoryQueue() const =0
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition RubyPort.cc:188
MemRequestPort(const std::string &_name, RubyPort &_port)
Definition RubyPort.cc:153
bool isShadowRomAddress(Addr addr) const
Definition RubyPort.cc:685
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
Definition RubyPort.cc:322
MemResponsePort(const std::string &_name, RubyPort &_port, bool _access_backing_store, PortID id, bool _no_retry_on_stall)
Definition RubyPort.cc:164
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
Definition RubyPort.cc:392
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
Definition RubyPort.cc:252
bool isPhysMemAddress(PacketPtr pkt) const
Definition RubyPort.cc:699
void recvRangeChange()
Called to receive an address range change from the peer response port.
Definition RubyPort.cc:731
bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition RubyPort.cc:178
PioRequestPort(const std::string &_name, RubyPort &_port)
Definition RubyPort.cc:136
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
Definition RubyPort.cc:233
bool recvTimingReq(PacketPtr pkt)
Receive a timing request from the peer.
Definition RubyPort.cc:213
AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition RubyPort.cc:670
PioResponsePort(const std::string &_name, RubyPort &_port)
Definition RubyPort.cc:146
void ruby_hit_callback(PacketPtr pkt)
Definition RubyPort.cc:462
std::vector< MemResponsePort * > response_ports
Definition RubyPort.hh:214
Addr makeLineAddress(Addr addr) const
Definition RubyPort.cc:759
void ruby_unaddressed_callback(PacketPtr pkt)
Definition RubyPort.cc:487
virtual int outstandingCount() const =0
void ruby_stale_translation_callback(Addr txnId)
Definition RubyPort.cc:506
virtual void descheduleDeadlockEvent()=0
std::string printAddress(Addr addr) const
Definition RubyPort.cc:765
std::vector< MemResponsePort * > retryList
Definition RubyPort.hh:242
RubySystem * m_ruby_system
Definition RubyPort.hh:207
MemResponsePort memResponsePort
Definition RubyPort.hh:231
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition RubyPort.cc:92
PioResponsePort pioResponsePort
Definition RubyPort.hh:229
PioRequestPort pioRequestPort
Definition RubyPort.hh:228
RubyPort(const Params &p)
Definition RubyPort.cc:61
virtual bool isDeadlockEventScheduled() const =0
virtual int functionalWrite(Packet *func_pkt)
Definition RubyPort.cc:741
unsigned int gotAddrRanges
Definition RubyPort.hh:232
void addToRetryList(MemResponsePort *port)
Definition RubyPort.hh:222
MemRequestPort memRequestPort
Definition RubyPort.hh:230
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition RubyPort.cc:104
RubyPortParams Params
Definition RubyPort.hh:158
AbstractController * m_controller
Definition RubyPort.hh:209
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition RubyPort.cc:575
void ruby_eviction_callback(Addr address)
Definition RubyPort.cc:707
MessageBuffer * m_mandatory_q_ptr
Definition RubyPort.hh:210
std::vector< PioRequestPort * > request_ports
Definition RubyPort.hh:236
Addr getOffset(Addr addr) const
Definition RubyPort.cc:753
std::vector< MemResponsePort * >::iterator CpuPortIter
Vector of M5 Ports attached to this Ruby port.
Definition RubyPort.hh:235
uint32_t getBlockSizeBits()
Definition RubySystem.hh:74
uint32_t getBlockSizeBytes()
Definition RubySystem.hh:73
memory::SimpleMemory * getPhysMem()
Definition RubySystem.hh:79
STL vector class.
Definition stl.hh:37
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
#define warn_once(...)
Definition logging.hh:260
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9, 8 > rs
Bitfield< 33 > id
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 5 > l
Bitfield< 0 > p
Bitfield< 15 > system
Definition misc.hh:1032
Bitfield< 3 > addr
Definition types.hh:84
Addr makeLineAddress(Addr addr, int cacheLineBits)
Definition Address.cc:61
std::string printAddress(Addr addr, int cacheLineBits)
Definition Address.cc:76
Addr getOffset(Addr addr, int cacheLineBits)
Definition Address.cc:54
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
SimpleMemory declaration.
const std::string & name()
Definition trace.cc:48

Generated on Mon Jan 13 2025 04:28:41 for gem5 by doxygen 1.9.8