gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
dma_device.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012, 2015, 2017, 2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "dev/dma_device.hh"
42
43#include <algorithm>
44#include <cassert>
45#include <cstring>
46#include <utility>
47
48#include "base/logging.hh"
49#include "base/trace.hh"
50#include "debug/DMA.hh"
51#include "debug/Drain.hh"
52#include "sim/clocked_object.hh"
53#include "sim/system.hh"
54
55namespace gem5
56{
57
59 uint32_t sid, uint32_t ssid)
60 : RequestPort(dev->name() + ".dma"),
61 device(dev), sys(s), requestorId(s->getRequestorId(dev)),
62 sendEvent([this]{ sendDma(); }, dev->name()),
63 defaultSid(sid), defaultSSid(ssid), cacheLineSize(s->cacheLineSize())
64{ }
65
66void
68{
69 // Should always see a response with a sender state.
70 assert(pkt->isResponse());
71
72 // Get the DMA sender state.
73 auto *state = dynamic_cast<DmaReqState*>(pkt->senderState);
74 assert(state);
75
76 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
77
78 delete pkt;
79}
80
81void
83{
84 assert(pendingCount != 0);
86 DPRINTF(DMA, "Received response %s for addr: %#x size: %d nb: %d," \
87 " tot: %d sched %d\n",
88 MemCmd(state->cmd).toString(), addr, size,
89 state->numBytes, state->totBytes,
90 state->completionEvent ?
91 state->completionEvent->scheduled() : 0);
92
93 // Update the number of bytes received based on the request rather
94 // than the packet as the latter could be rounded up to line sizes.
95 state->numBytes += size;
96 assert(state->totBytes >= state->numBytes);
97
98 bool all_bytes = (state->totBytes == state->numBytes);
99 if (state->aborted) {
100 // If this request was aborted, check to see if its in flight accesses
101 // have finished. There may be packets for more than one request in
102 // flight at a time, so check for finished requests, or no more
103 // packets.
104 if (all_bytes || pendingCount == 0) {
105 // If yes, signal its abort event (if any) and delete the state.
106 if (state->abortEvent) {
107 device->schedule(state->abortEvent, curTick());
108 }
109 delete state;
110 }
111 } else if (all_bytes) {
112 // If we have reached the end of this DMA request, then signal the
113 // completion and delete the sate.
114 if (state->completionEvent) {
115 delay += state->delay;
116 device->schedule(state->completionEvent, curTick() + delay);
117 }
118 delete state;
119 }
120
121 // We might be drained at this point, if so signal the drain event.
122 if (pendingCount == 0)
124}
125
128{
129 RequestPtr req = std::make_shared<Request>(
130 gen.addr(), gen.size(), flags, id);
131 req->setStreamId(sid);
132 req->setSubstreamId(ssid);
133 req->taskId(context_switch_task_id::DMA);
134
135 PacketPtr pkt = new Packet(req, cmd);
136
137 if (data)
138 pkt->dataStatic(data + gen.complete());
139
140 pkt->senderState = this;
141 return pkt;
142}
143
144bool
146{
147 // We shouldn't ever get a cacheable block in Modified state.
148 assert(pkt->req->isUncacheable() ||
149 !(pkt->cacheResponding() && !pkt->hasSharers()));
150
151 handleRespPacket(pkt);
152
153 return true;
154}
155
157 : PioDevice(p), dmaPort(this, sys, p.sid, p.ssid)
158{ }
159
160void
162{
164 "DMA port of %s not connected to anything!", name());
166}
167
170{
171 if (pendingCount == 0) {
172 return DrainState::Drained;
173 } else {
174 DPRINTF(Drain, "DmaPort not drained\n");
176 }
177}
178
179void
181{
182 retryPending = false;
183 if (transmitList.size())
185}
186
187void
189 uint8_t *data, uint32_t sid, uint32_t ssid, Tick delay,
190 Request::Flags flag)
191{
192 DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size,
193 event ? event->scheduled() : -1);
194
195 // One DMA request sender state for every action, that is then
196 // split into many requests and packets based on the block size,
197 // i.e. cache line size.
198 transmitList.push_back(
199 new DmaReqState(cmd, addr, cacheLineSize, size,
200 data, flag, requestorId, sid, ssid, event, delay));
201
202 // In zero time, also initiate the sending of the packets for the request
203 // we have just created. For atomic this involves actually completing all
204 // the requests.
205 sendDma();
206}
207
208void
210 uint8_t *data, Tick delay, Request::Flags flag)
211{
212 dmaAction(cmd, addr, size, event, data,
213 defaultSid, defaultSSid, delay, flag);
214}
215
216void
218{
219 if (inRetry) {
220 delete inRetry;
221 inRetry = nullptr;
222 }
223
224 if (pendingCount && !transmitList.empty()) {
225 auto *state = transmitList.front();
226 if (state->numBytes != state->gen.complete()) {
227 // In flight packets refer to the transmission at the front of the
228 // list, and not a transmission whose packets have all been sent
229 // but not completed. Preserve the state so the packets don't have
230 // dangling pointers.
231 transmitList.pop_front();
232 state->aborted = true;
233 }
234 }
235
236 // Get rid of requests that haven't started yet.
237 while (!transmitList.empty()) {
238 auto *state = transmitList.front();
239 if (state->abortEvent)
240 device->schedule(state->abortEvent, curTick());
241 delete state;
242 transmitList.pop_front();
243 }
244
245 if (sendEvent.scheduled())
247
248 if (pendingCount == 0)
250}
251
252void
254{
255 // Send the next packet for the first DMA request on the transmit list,
256 // and schedule the following send if it is successful
257 DmaReqState *state = transmitList.front();
258
259 PacketPtr pkt = inRetry ? inRetry : state->createPacket();
260 inRetry = nullptr;
261
262 DPRINTF(DMA, "Trying to send %s addr %#x\n", pkt->cmdString(),
263 pkt->getAddr());
264
265 // Check if this was the last packet now, since hypothetically the packet
266 // response may come immediately, and state may be deleted.
267 bool last = state->gen.last();
268 if (sendTimingReq(pkt)) {
269 pendingCount++;
270 } else {
271 retryPending = true;
272 inRetry = pkt;
273 }
274 if (!retryPending) {
275 state->gen.next();
276 // If that was the last packet from this request, pop it from the list.
277 if (last)
278 transmitList.pop_front();
279 DPRINTF(DMA, "-- Done\n");
280 // If there is more to do, then do so.
281 if (!transmitList.empty()) {
282 // This should ultimately wait for as many cycles as the device
283 // needs to send the packet, but currently the port does not have
284 // any known width so simply wait a single cycle.
286 }
287 } else {
288 DPRINTF(DMA, "-- Failed, waiting for retry\n");
289 }
290
291 DPRINTF(DMA, "TransmitList: %d, retryPending: %d\n",
292 transmitList.size(), retryPending ? 1 : 0);
293}
294
295bool
297{
298 PacketPtr pkt = state->createPacket();
299 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n",
300 state->gen.addr(), state->gen.size());
301 pendingCount++;
302 Tick lat = sendAtomic(pkt);
303
304 // Check if we're done, since handleResp may delete state.
305 bool done = !state->gen.next();
306 handleRespPacket(pkt, lat);
307 return done;
308}
309
310bool
312{
313 bool done = false;
314 pendingCount++;
315
316 auto bd_it = memBackdoors.contains(state->gen.addr());
317 if (bd_it == memBackdoors.end()) {
318 // We don't have a backdoor for this address, so use a packet.
319
320 PacketPtr pkt = state->createPacket();
321 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n",
322 state->gen.addr(), state->gen.size());
323
324 MemBackdoorPtr bd = nullptr;
325 Tick lat = sendAtomicBackdoor(pkt, bd);
326
327 // If we got a backdoor, record it.
328 if (bd && memBackdoors.insert(bd->range(), bd) != memBackdoors.end()) {
329 // Invalidation callback which finds this backdoor and removes it.
330 auto callback = [this](const MemBackdoor &backdoor) {
331 for (auto it = memBackdoors.begin();
332 it != memBackdoors.end(); it++) {
333 if (it->second == &backdoor) {
334 memBackdoors.erase(it);
335 return;
336 }
337 }
338 panic("Got invalidation for unknown memory backdoor.");
339 };
340 bd->addInvalidationCallback(callback);
341 }
342
343 // Check if we're done now, since handleResp may delete state.
344 done = !state->gen.next();
345 handleRespPacket(pkt, lat);
346 } else {
347 // We have a backdoor that can at least partially satisfy this request.
348 DPRINTF(DMA, "Handling DMA for addr: %#x size %d through backdoor\n",
349 state->gen.addr(), state->gen.size());
350
351 const auto *bd = bd_it->second;
352 // Offset of this access into the backdoor.
353 const Addr offset = state->gen.addr() - bd->range().start();
354 // How many bytes we still need.
355 const Addr remaining = state->totBytes - state->gen.complete();
356 // How many bytes this backdoor can provide, starting from offset.
357 const Addr available = bd->range().size() - offset;
358
359 // How many bytes we're going to handle through this backdoor.
360 const Addr handled = std::min(remaining, available);
361
362 // If there's a buffer for data, read/write it.
363 if (state->data) {
364 uint8_t *bd_data = bd->ptr() + offset;
365 uint8_t *state_data = state->data + state->gen.complete();
366 if (MemCmd(state->cmd).isRead())
367 memcpy(state_data, bd_data, handled);
368 else
369 memcpy(bd_data, state_data, handled);
370 }
371
372 // Advance the chunk generator past this region of memory.
373 state->gen.setNext(state->gen.addr() + handled);
374
375 // Check if we're done now, since handleResp may delete state.
376 done = !state->gen.next();
377 handleResp(state, state->gen.addr(), handled);
378 }
379
380 return done;
381}
382
383void
385{
386 // Some kind of selection between access methods. More work is going to
387 // have to be done to make switching actually work.
388 assert(transmitList.size());
389
390 if (sys->isTimingMode()) {
391 // If we are either waiting for a retry or are still waiting after
392 // sending the last packet, then do not proceed.
394 DPRINTF(DMA, "Can't send immediately, waiting to send\n");
395 return;
396 }
397
399 } else if (sys->isAtomicMode()) {
400 const bool bypass = sys->bypassCaches();
401
402 // Send everything there is to send in zero time.
403 while (!transmitList.empty()) {
404 DmaReqState *state = transmitList.front();
405 transmitList.pop_front();
406
407 bool done = state->gen.done();
408 while (!done)
409 done = bypass ? sendAtomicBdReq(state) : sendAtomicReq(state);
410 }
411 } else {
412 panic("Unknown memory mode.");
413 }
414}
415
416Port &
417DmaDevice::getPort(const std::string &if_name, PortID idx)
418{
419 if (if_name == "dma") {
420 return dmaPort;
421 }
422 return PioDevice::getPort(if_name, idx);
423}
424
426 unsigned max_req_size,
427 unsigned max_pending,
429 : maxReqSize(max_req_size), fifoSize(size),
430 reqFlags(flags), port(_port), cacheLineSize(port.sys->cacheLineSize()),
431 buffer(size)
432{
433 freeRequests.resize(max_pending);
434 for (auto &e : freeRequests)
435 e.reset(new DmaDoneEvent(this, max_req_size));
436
437}
438
440{
441 for (auto &p : pendingRequests) {
442 DmaDoneEvent *e(p.release());
443
444 if (e->done()) {
445 delete e;
446 } else {
447 // We can't kill in-flight DMAs, so we'll just transfer
448 // ownership to the event queue so that they get freed
449 // when they are done.
450 e->kill();
451 }
452 }
453}
454
455void
457{
458 assert(pendingRequests.empty());
459
463}
464
465void
467{
471}
472
473bool
474DmaReadFifo::tryGet(uint8_t *dst, size_t len)
475{
476 if (buffer.size() >= len) {
477 buffer.read(dst, len);
478 resumeFill();
479 return true;
480 } else {
481 return false;
482 }
483}
484
485void
486DmaReadFifo::get(uint8_t *dst, size_t len)
487{
488 panic_if(!tryGet(dst, len), "Buffer underrun in DmaReadFifo::get()");
489}
490
491void
492DmaReadFifo::startFill(Addr start, size_t size)
493{
494 assert(atEndOfBlock());
495
496 nextAddr = start;
497 endAddr = start + size;
498 resumeFill();
499}
500
501void
503{
504 // Prevent new DMA requests by setting the next address to the end
505 // address. Pending requests will still complete.
507
508 // Flag in-flight accesses as canceled. This prevents their data
509 // from being written to the FIFO.
510 for (auto &p : pendingRequests)
511 p->cancel();
512}
513
514void
516{
517 // Don't try to fetch more data if we are draining. This ensures
518 // that the DMA engine settles down before we checkpoint it.
520 return;
521
522 const bool old_eob(atEndOfBlock());
523
524 if (port.sys->bypassCaches())
526 else
528
529 if (!old_eob && atEndOfBlock())
530 onEndOfBlock();
531}
532
533void
535{
536 const size_t fifo_space = buffer.capacity() - buffer.size();
537 if (fifo_space >= cacheLineSize || buffer.capacity() < cacheLineSize) {
538 const size_t block_remaining = endAddr - nextAddr;
539 const size_t xfer_size = std::min(fifo_space, block_remaining);
540 std::vector<uint8_t> tmp_buffer(xfer_size);
541
542 assert(pendingRequests.empty());
543 DPRINTF(DMA, "Direct bypass startAddr=%#x xfer_size=%#x " \
544 "fifo_space=%#x block_remaining=%#x\n",
545 nextAddr, xfer_size, fifo_space, block_remaining);
546
547 port.dmaAction(MemCmd::ReadReq, nextAddr, xfer_size, nullptr,
548 tmp_buffer.data(), 0, reqFlags);
549
550 buffer.write(tmp_buffer.begin(), xfer_size);
551 nextAddr += xfer_size;
552 }
553}
554
555void
557{
558 size_t size_pending(0);
559 for (auto &e : pendingRequests)
560 size_pending += e->requestSize();
561
562 while (!freeRequests.empty() && !atEndOfBlock()) {
563 const size_t req_size(std::min(maxReqSize, endAddr - nextAddr));
564 if (buffer.size() + size_pending + req_size > fifoSize)
565 break;
566
567 DmaDoneEventUPtr event(std::move(freeRequests.front()));
568 freeRequests.pop_front();
569 assert(event);
570
571 event->reset(req_size);
572 port.dmaAction(MemCmd::ReadReq, nextAddr, req_size, event.get(),
573 event->data(), 0, reqFlags);
574 nextAddr += req_size;
575 size_pending += req_size;
576
577 pendingRequests.emplace_back(std::move(event));
578 }
579}
580
581void
583{
584 const bool old_active(isActive());
585
587 resumeFill();
588
589 if (old_active && !isActive())
590 onIdle();
591}
592
593void
595{
596 while (!pendingRequests.empty() && pendingRequests.front()->done()) {
597 // Get the first finished pending request
598 DmaDoneEventUPtr event(std::move(pendingRequests.front()));
599 pendingRequests.pop_front();
600
601 if (!event->canceled())
602 buffer.write(event->data(), event->requestSize());
603
604 // Move the event to the list of free requests
605 freeRequests.emplace_back(std::move(event));
606 }
607
608 if (pendingRequests.empty())
610}
611
614{
615 return pendingRequests.empty() ?
617}
618
619
621 : parent(_parent), _data(max_size, 0)
622{
623}
624
625void
627{
628 parent = nullptr;
629 setFlags(AutoDelete);
630}
631
632void
634{
635 _canceled = true;
636}
637
638void
640{
641 assert(size <= _data.size());
642 _done = false;
643 _canceled = false;
644 _requestSize = size;
645}
646
647void
649{
650 if (!parent)
651 return;
652
653 assert(!_done);
654 _done = true;
655 parent->dmaDone();
656}
657
658} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
DmaDevice(const Params &p)
DmaDeviceParams Params
void abortPending()
ClockedObject *const device
The device that owns this port.
EventFunctionWrapper sendEvent
Event used to schedule a future sending from the transmit list.
System *const sys
The system that device/port are in.
void dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, uint8_t *data, Tick delay, Request::Flags flag=0)
bool sendAtomicBdReq(DmaReqState *state)
Send the next packet from a DMA request in atomic mode, and request and/or use memory backdoors if po...
void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
DmaPort(ClockedObject *dev, System *s, uint32_t sid=0, uint32_t ssid=0)
Definition dma_device.cc:58
void trySendTimingReq()
Take the first request on the transmit list and attempt to send a timing packet from it.
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
PacketPtr inRetry
The packet (if any) waiting for a retry to send.
uint32_t pendingCount
Number of outstanding packets the dma port has.
std::deque< DmaReqState * > transmitList
Use a deque as we never do any insertion or removal in the middle.
AddrRangeMap< MemBackdoorPtr, 1 > memBackdoors
Definition dma_device.hh:64
void sendDma()
For timing, attempt to send the first item on the transmit list, and if it is successful and there ar...
const uint32_t defaultSid
Default streamId.
void handleResp(DmaReqState *state, Addr addr, Addr size, Tick delay=0)
Definition dma_device.cc:82
bool retryPending
Whether the other side expects us to wait for a retry.
const RequestorID requestorId
Id for all requests.
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
bool sendAtomicReq(DmaReqState *state)
Send the next packet from a DMA request in atomic mode.
const uint32_t defaultSSid
Default substreamId.
void handleRespPacket(PacketPtr pkt, Tick delay=0)
Handle a response packet by updating the corresponding DMA request state to reflect the bytes receive...
Definition dma_device.cc:67
const int cacheLineSize
DmaDoneEvent(DmaReadFifo *_parent, size_t max_size)
Buffered DMA engine helper class.
Fifo< uint8_t > buffer
void serialize(CheckpointOut &cp) const override
Serialize an object.
size_t size() const
Get the amount of data stored in the FIFO.
virtual void onIdle()
Last response received callback.
bool tryGet(uint8_t *dst, size_t len)
Try to read data from the FIFO.
void startFill(Addr start, size_t size)
Start filling the FIFO.
DmaReadFifo(DmaPort &port, size_t size, unsigned max_req_size, unsigned max_pending, Request::Flags flags=0)
const Request::Flags reqFlags
Request flags.
virtual void onEndOfBlock()
End of block callback.
void resumeFill()
Try to issue new DMA requests or bypass DMA requests.
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
void unserialize(CheckpointIn &cp) override
Unserialize an object.
std::deque< DmaDoneEventUPtr > freeRequests
void dmaDone()
DMA request done, handle incoming data and issue new request.
bool isActive() const
Is the DMA engine active (i.e., are there still in-flight accesses)?
const int cacheLineSize
void stopFill()
Stop the DMA engine.
void resumeFillBypass()
Try to bypass DMA requests in non-caching mode.
bool atEndOfBlock() const
Has the DMA engine sent out the last request for the active block?
const Addr maxReqSize
Maximum request size in bytes.
const size_t fifoSize
Maximum FIFO size in bytes.
void handlePending()
Handle pending requests that have been flagged as done.
void resumeFillTiming()
Try to issue new DMA requests during normal execution.
std::unique_ptr< DmaDoneEvent > DmaDoneEventUPtr
std::deque< DmaDoneEventUPtr > pendingRequests
void write(InputIterator in, size_t len)
Definition circlebuf.hh:234
size_t capacity() const
Definition circlebuf.hh:223
size_t size() const
Definition circlebuf.hh:222
void read(OutputIterator out, size_t len)
Definition circlebuf.hh:230
const std::string & toString() const
Return the string to a cmd given by idx.
Definition packet.hh:276
bool isRead() const
Definition packet.hh:227
Command
List of all commands associated with a packet.
Definition packet.hh:85
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
This device is the base class which all devices senstive to an address range inherit from.
Definition io_device.hh:103
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition io_device.cc:67
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition io_device.cc:59
Ports are used to interface objects to each other.
Definition port.hh:62
bool isConnected() const
Is this port currently connected to a peer?
Definition port.hh:133
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition port.hh:79
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:487
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:530
Tick sendAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Send an atomic request packet like above, but also request a backdoor to the data being accessed.
Definition port.hh:497
bool isAtomicMode() const
Is the system in atomic mode?
Definition system.hh:258
bool isTimingMode() const
Is the system in timing mode?
Definition system.hh:270
bool bypassCaches() const
Should caches be bypassed?
Definition system.hh:279
STL vector class.
Definition stl.hh:37
ClockedObject declaration and implementation.
Addr addr() const
Return starting address of current chunk.
Addr complete() const
Number of bytes we have already chunked up.
Addr size() const
Return size in bytes of current chunk.
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition eventq.hh:1021
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
#define UNSERIALIZE_CONTAINER(member)
Definition serialize.hh:634
#define SERIALIZE_CONTAINER(member)
Definition serialize.hh:626
atomic_var_t state
Definition helpers.cc:188
uint8_t flags
Definition helpers.cc:66
Bitfield< 18, 16 > len
Bitfield< 4 > s
void sendEvent(ThreadContext *tc)
Send an event (SEV) to a specific PE if there isn't already a pending event.
Definition utility.cc:65
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 15, 2 > bd
Definition types.hh:79
Bitfield< 3 > addr
Definition types.hh:84
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
uint64_t Tick
Tick count type.
Definition types.hh:58
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
const Packet::Command cmd
Command for the request.
const uint32_t sid
Stream IDs.
ChunkGenerator gen
Object to track what chunks of bytes to send at a time.
uint8_t *const data
Pointer to a buffer for the data.
const Request::Flags flags
The flags to use for requests.
const std::string & name()
Definition trace.cc:48

Generated on Mon Jul 10 2023 15:32:02 for gem5 by doxygen 1.9.7