gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2021 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
33
34#include <cmath>
35#include <cstring>
36
40#include "debug/GPUPrefetch.hh"
41#include "debug/GPUTLB.hh"
43
44namespace gem5
45{
46namespace VegaISA
47{
48
49// we have no limit for the number of translations we send
50// downstream as we depend on the limit of the coalescer
51// above us
52GpuTLB::GpuTLB(const VegaGPUTLBParams &p)
53 : ClockedObject(p), walker(p.walker),
54 gpuDevice(p.gpu_device), size(p.size), stats(this),
55 cleanupEvent([this]{ cleanup(); }, name(), false,
57{
58 assoc = p.assoc;
59 assert(assoc <= size);
60 numSets = size/assoc;
61 allocationPolicy = p.allocationPolicy;
62 hasMemSidePort = false;
63
64 tlb.assign(size, VegaTlbEntry());
65
66 freeList.resize(numSets);
67 entryList.resize(numSets);
68
69 for (int set = 0; set < numSets; ++set) {
70 for (int way = 0; way < assoc; ++way) {
71 int x = set * assoc + way;
72 freeList[set].push_back(&tlb.at(x));
73 }
74 }
75
76 FA = (size == assoc);
77 setMask = numSets - 1;
78
79 maxCoalescedReqs = p.maxOutstandingReqs;
80
81
82 outstandingReqs = 0;
83 hitLatency = p.hitLatency;
84 missLatency1 = p.missLatency1;
85 missLatency2 = p.missLatency2;
86
87 // create the response ports based on the number of connected ports
88 for (size_t i = 0; i < p.port_cpu_side_ports_connection_count; ++i) {
89 cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d",
90 name(), i), this, i));
91 }
92
93 // create the requestor ports based on the number of connected ports
94 for (size_t i = 0; i < p.port_mem_side_ports_connection_count; ++i) {
95 memSidePort.push_back(new MemSidePort(csprintf("%s-port%d",
96 name(), i), this, i));
97 }
98
99 // assuming one walker per TLB, set our walker's TLB to this TLB.
100 walker->setTLB(this);
101
102 // gpuDevice should be non-null in full system only and is set by GpuTLB
103 // params from the config file.
104 if (gpuDevice) {
105 gpuDevice->getVM().registerTLB(this);
106 }
107}
108
110{
111}
112
113Port &
114GpuTLB::getPort(const std::string &if_name, PortID idx)
115{
116 if (if_name == "cpu_side_ports") {
117 if (idx >= static_cast<PortID>(cpuSidePort.size())) {
118 panic("TLBCoalescer::getPort: unknown index %d\n", idx);
119 }
120
121 return *cpuSidePort[idx];
122 } else if (if_name == "mem_side_ports") {
123 if (idx >= static_cast<PortID>(memSidePort.size())) {
124 panic("TLBCoalescer::getPort: unknown index %d\n", idx);
125 }
126
127 hasMemSidePort = true;
128
129 return *memSidePort[idx];
130 } else {
131 panic("TLBCoalescer::getPort: unknown port %s\n", if_name);
132 }
133}
134
135Fault
137{
138 DPRINTF(GPUTLB, "GPUTLB: Raising page fault.\n");
139 ExceptionCode code;
140 if (mode == BaseMMU::Read)
142 else if (mode == BaseMMU::Write)
144 else
146 return std::make_shared<PageFault>(vaddr, code, true, mode, true);
147}
148
149Addr
151{
152 Addr pageMask = mask(VegaISA::PageShift);
153 return (vaddr & ~pageMask);
154}
155
156VegaTlbEntry*
157GpuTLB::insert(Addr vpn, VegaTlbEntry &entry)
158{
159 VegaTlbEntry *newEntry = nullptr;
160
161 int set = (entry.vaddr >> VegaISA::PageShift) & setMask;
162
163 if (!freeList[set].empty()) {
164 newEntry = freeList[set].front();
165 freeList[set].pop_front();
166 } else {
167 newEntry = entryList[set].back();
168 entryList[set].pop_back();
169 }
170
171 *newEntry = entry;
172 entryList[set].push_front(newEntry);
173
174 DPRINTF(GPUTLB, "Inserted %#lx -> %#lx of size %#lx into set %d\n",
175 newEntry->vaddr, newEntry->paddr, entry.size(), set);
176
177 return newEntry;
178}
179
180GpuTLB::EntryList::iterator
181GpuTLB::lookupIt(Addr va, bool update_lru)
182{
183 int set = (va >> VegaISA::PageShift) & setMask;
184
185 if (FA) {
186 assert(!set);
187 }
188
189 auto entry = entryList[set].begin();
190 for (; entry != entryList[set].end(); ++entry) {
191 int page_size = (*entry)->size();
192
193 if ((*entry)->vaddr <= va && (*entry)->vaddr + page_size > va) {
194 DPRINTF(GPUTLB, "Matched vaddr %#x to entry starting at %#x "
195 "with size %#x.\n", va, (*entry)->vaddr, page_size);
196
197 if (update_lru) {
198 entryList[set].push_front(*entry);
199 entryList[set].erase(entry);
200 entry = entryList[set].begin();
201 }
202
203 break;
204 }
205 }
206
207 return entry;
208}
209
210VegaTlbEntry*
211GpuTLB::lookup(Addr va, bool update_lru)
212{
213 int set = (va >> VegaISA::PageShift) & setMask;
214
215 auto entry = lookupIt(va, update_lru);
216
217 if (entry == entryList[set].end())
218 return nullptr;
219 else
220 return *entry;
221}
222
223void
225{
226 DPRINTF(GPUTLB, "Invalidating all entries.\n");
227
228 for (int i = 0; i < numSets; ++i) {
229 while (!entryList[i].empty()) {
230 VegaTlbEntry *entry = entryList[i].front();
231 entryList[i].pop_front();
232 freeList[i].push_back(entry);
233 }
234 }
235}
236
237void
239{
240
241 int set = (va >> VegaISA::PageShift) & setMask;
242 auto entry = lookupIt(va, false);
243
244 if (entry != entryList[set].end()) {
245 freeList[set].push_back(*entry);
246 entryList[set].erase(entry);
247 }
248}
249
250
251
259VegaTlbEntry *
260GpuTLB::tlbLookup(const RequestPtr &req, bool update_stats)
261{
262 if (req->hasNoAddr()) {
263 return NULL;
264 }
265 Addr vaddr = req->getVaddr();
266 Addr alignedVaddr = pageAlign(vaddr);
267 DPRINTF(GPUTLB, "TLB Lookup for vaddr %#x.\n", vaddr);
268
269 //update LRU stack on a hit
270 VegaTlbEntry *entry = lookup(alignedVaddr, true);
271
272 if (!update_stats) {
273 // functional tlb access for memory initialization
274 // i.e., memory seeding or instr. seeding -> don't update
275 // TLB and stats
276 return entry;
277 }
278
280
281 if (!entry) {
283 } else {
285 }
286
287 return entry;
288}
289
290Walker*
292{
293 return walker;
294}
295
296
297void
299{
300}
301
302void
306
312void
314{
315 assert(pkt);
316 assert(pkt->senderState);
317
324 Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
326
327 GpuTranslationState *sender_state =
328 safe_cast<GpuTranslationState*>(pkt->senderState);
329
330 bool update_stats = !sender_state->isPrefetch;
331
332 DPRINTF(GPUTLB, "Translation req. for virt. page addr %#x\n",
333 virt_page_addr);
334
335 int req_cnt = sender_state->reqCnt.back();
336
337 if (update_stats) {
338 stats.accessCycles -= (curCycle() * req_cnt);
340 stats.globalNumTLBAccesses += req_cnt;
341 }
342
343 tlbOutcome lookup_outcome = TLB_MISS;
344 const RequestPtr &tmp_req = pkt->req;
345
346 // Access the TLB and figure out if it's a hit or a miss.
347 auto entry = tlbLookup(tmp_req, update_stats);
348 if (entry || pkt->req->hasNoAddr()) {
349 // Put the entry in SenderState
350 lookup_outcome = TLB_HIT;
351 if (pkt->req->hasNoAddr()) {
352 sender_state->tlbEntry =
353 new VegaTlbEntry(1 /* VMID */, 0, 0, 0, 0);
354 // set false because we shouldn't go to
355 // host memory for a memtime request
356 pkt->req->setSystemReq(false);
357 } else {
358 VegaTlbEntry *entry = lookup(virt_page_addr, false);
359 assert(entry);
360
361 // Set if this is a system request
362 pkt->req->setSystemReq(entry->pte.s);
363
364 Addr alignedPaddr = pageAlign(entry->paddr);
365 sender_state->tlbEntry =
366 new VegaTlbEntry(1 /* VMID */, virt_page_addr, alignedPaddr,
367 entry->logBytes, entry->pte);
368 }
369
370 if (update_stats) {
371 // the reqCnt has an entry per level, so its size tells us
372 // which level we are in
373 sender_state->hitLevel = sender_state->reqCnt.size();
374 stats.globalNumTLBHits += req_cnt;
375 }
376 } else {
377 if (update_stats)
378 stats.globalNumTLBMisses += req_cnt;
379 }
380
381 /*
382 * We now know the TLB lookup outcome (if it's a hit or a miss), as
383 * well as the TLB access latency.
384 *
385 * We create and schedule a new TLBEvent which will help us take the
386 * appropriate actions (e.g., update TLB on a hit, send request to
387 * lower level TLB on a miss, or start a page walk if this was the
388 * last-level TLB)
389 */
390 TLBEvent *tlb_event =
391 new TLBEvent(this, virt_page_addr, lookup_outcome, pkt);
392
393 if (translationReturnEvent.count(virt_page_addr)) {
394 panic("Virtual Page Address %#x already has a return event\n",
395 virt_page_addr);
396 }
397
398 translationReturnEvent[virt_page_addr] = tlb_event;
399 assert(tlb_event);
400
401 DPRINTF(GPUTLB, "schedule translationReturnEvent @ curTick %d\n",
403
405}
406
408 tlbOutcome tlb_outcome, PacketPtr _pkt)
409 : Event(CPU_Tick_Pri), tlb(_tlb), virtPageAddr(_addr),
410 outcome(tlb_outcome), pkt(_pkt)
411{
412}
413
418void
419GpuTLB::pagingProtectionChecks(PacketPtr pkt, VegaTlbEntry * tlb_entry,
420 Mode mode)
421{
422 // Do paging protection checks.
423 bool badWrite = (!tlb_entry->writable());
424
425 if (mode == BaseMMU::Write && badWrite) {
426 // The page must have been present to get into the TLB in
427 // the first place. We'll assume the reserved bits are
428 // fine even though we're not checking them.
429 fatal("Page fault on addr %lx PTE=%#lx", pkt->req->getVaddr(),
430 (uint64_t)tlb_entry->pte);
431 }
432}
433
434void
435GpuTLB::walkerResponse(VegaTlbEntry& entry, PacketPtr pkt)
436{
437 DPRINTF(GPUTLB, "WalkerResponse for %#lx. Entry: (%#lx, %#lx, %#lx)\n",
438 pkt->req->getVaddr(), entry.vaddr, entry.paddr, entry.size());
439
440 Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
442
443 Addr page_addr = entry.pte.ppn << VegaISA::PageShift;
444 Addr paddr = page_addr + (entry.vaddr & mask(entry.logBytes));
445 pkt->req->setPaddr(paddr);
446 pkt->req->setSystemReq(entry.pte.s);
447
448 GpuTranslationState *sender_state =
449 safe_cast<GpuTranslationState*>(pkt->senderState);
450 sender_state->tlbEntry = new VegaTlbEntry(entry);
451
452 handleTranslationReturn(virt_page_addr, TLB_MISS, pkt);
453}
454
460void
462 tlbOutcome tlb_outcome, PacketPtr pkt)
463{
464 assert(pkt);
465 Addr vaddr = pkt->req->getVaddr();
466
467 GpuTranslationState *sender_state =
468 safe_cast<GpuTranslationState*>(pkt->senderState);
469
470 Mode mode = sender_state->tlbMode;
471
472 VegaTlbEntry *local_entry, *new_entry;
473
474 int req_cnt = sender_state->reqCnt.back();
475 bool update_stats = !sender_state->isPrefetch;
476
477 if (update_stats) {
478 stats.accessCycles += (req_cnt * curCycle());
480 }
481
482 if (tlb_outcome == TLB_HIT) {
483 DPRINTF(GPUTLB, "Translation Done - TLB Hit for addr %#x\n",
484 vaddr);
485 local_entry = safe_cast<VegaTlbEntry *>(sender_state->tlbEntry);
486 } else {
487 DPRINTF(GPUTLB, "Translation Done - TLB Miss for addr %#x\n",
488 vaddr);
489
495 new_entry = safe_cast<VegaTlbEntry *>(sender_state->tlbEntry);
496 assert(new_entry);
497 local_entry = new_entry;
498
499 if (allocationPolicy) {
500 assert(new_entry->pte);
501 DPRINTF(GPUTLB, "allocating entry w/ addr %#lx of size %#lx\n",
502 virt_page_addr, new_entry->size());
503
504 local_entry = insert(virt_page_addr, *new_entry);
505 }
506
507 assert(local_entry);
508 }
509
515 DPRINTF(GPUTLB, "Entry found with vaddr %#x, doing protection checks "
516 "while paddr was %#x.\n", local_entry->vaddr,
517 local_entry->paddr);
518
519 pagingProtectionChecks(pkt, local_entry, mode);
520 int page_size = local_entry->size();
521 Addr paddr = local_entry->paddr + (vaddr & (page_size - 1));
522 DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
523
524 // Since this packet will be sent through the cpu side port, it must be
525 // converted to a response pkt if it is not one already
526 if (pkt->isRequest()) {
527 pkt->makeTimingResponse();
528 }
529
530 pkt->req->setPaddr(paddr);
531
532 if (local_entry->uncacheable()) {
533 pkt->req->setFlags(Request::UNCACHEABLE);
534 }
535
536 //send packet back to coalescer
537 cpuSidePort[0]->sendTimingResp(pkt);
538 //schedule cleanup event
539 cleanupQueue.push(virt_page_addr);
540
541 DPRINTF(GPUTLB, "Scheduled %#lx for cleanup\n", virt_page_addr);
542
543 // schedule this only once per cycle.
544 // The check is required because we might have multiple translations
545 // returning the same cycle
546 // this is a maximum priority event and must be on the same cycle
547 // as the cleanup event in TLBCoalescer to avoid a race with
548 // IssueProbeEvent caused by TLBCoalescer::MemSidePort::recvReqRetry
549 if (!cleanupEvent.scheduled())
551}
552
557void
559 PacketPtr pkt)
560{
561 DPRINTF(GPUTLB, "Triggered TLBEvent for addr %#x\n", virtPageAddr);
562
563 assert(translationReturnEvent[virtPageAddr]);
564 assert(pkt);
565
566 GpuTranslationState *tmp_sender_state =
567 safe_cast<GpuTranslationState*>(pkt->senderState);
568
569 int req_cnt = tmp_sender_state->reqCnt.back();
570 bool update_stats = !tmp_sender_state->isPrefetch;
571
572
573 if (outcome == TLB_HIT) {
574 handleTranslationReturn(virtPageAddr, TLB_HIT, pkt);
575
576 } else if (outcome == TLB_MISS) {
577
578 DPRINTF(GPUTLB, "This is a TLB miss\n");
579 if (hasMemSidePort) {
580 // the one cyle added here represent the delay from when we get
581 // the reply back till when we propagate it to the coalescer
582 // above.
583
589 tmp_sender_state->deviceId = 1;
590 tmp_sender_state->pasId = 0;
591
592 if (!memSidePort[0]->sendTimingReq(pkt)) {
593 DPRINTF(GPUTLB, "Failed sending translation request to "
594 "lower level TLB for addr %#x\n", virtPageAddr);
595
596 memSidePort[0]->retries.push_back(pkt);
597 } else {
598 DPRINTF(GPUTLB, "Sent translation request to lower level "
599 "TLB for addr %#x\n", virtPageAddr);
600 }
601 } else {
602 //this is the last level TLB. Start a page walk
603 DPRINTF(GPUTLB, "Last level TLB - start a page walk for "
604 "addr %#x\n", virtPageAddr);
605
606 if (update_stats)
607 stats.pageTableCycles -= (req_cnt*curCycle());
608
609 TLBEvent *tlb_event = translationReturnEvent[virtPageAddr];
610 assert(tlb_event);
611 tlb_event->updateOutcome(PAGE_WALK);
612 schedule(tlb_event,
614 }
615 } else if (outcome == PAGE_WALK) {
616 if (update_stats)
617 stats.pageTableCycles += (req_cnt*curCycle());
618
619 // Need to access the page table and update the TLB
620 DPRINTF(GPUTLB, "Doing a page walk for address %#x\n",
621 virtPageAddr);
622
624 Addr vaddr = pkt->req->getVaddr();
626
627 // Do page table walk
629 } else if (outcome == MISS_RETURN) {
633 handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);
634 } else {
635 panic("Unexpected TLB outcome %d", outcome);
636 }
637}
638
639void
641{
642 tlb->translationReturn(virtPageAddr, outcome, pkt);
643}
644
645const char*
647{
648 return "trigger translationDoneEvent";
649}
650
651void
653{
654 outcome = _outcome;
655}
656
657Addr
659{
660 return virtPageAddr;
661}
662
669bool
671{
672 bool ret = false;
673 [[maybe_unused]] Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
675
676 if (tlb->outstandingReqs < tlb->maxCoalescedReqs) {
677 assert(!tlb->translationReturnEvent.count(virt_page_addr));
678 tlb->issueTLBLookup(pkt);
679 // update number of outstanding translation requests
680 tlb->outstandingReqs++;
681 ret = true;
682 } else {
683 DPRINTF(GPUTLB, "Reached maxCoalescedReqs number %d\n",
684 tlb->outstandingReqs);
685 tlb->stats.maxDownstreamReached++;
686 ret = false;
687
688 }
689
690 if (tlb->outstandingReqs > tlb->stats.outstandingReqsMax.value())
691 tlb->stats.outstandingReqsMax = tlb->outstandingReqs;
692
693 return ret;
694}
695
704void
706{
707 GpuTranslationState *sender_state =
708 safe_cast<GpuTranslationState*>(pkt->senderState);
709
710 Mode mode = sender_state->tlbMode;
711 Addr vaddr = pkt->req->getVaddr();
712
713 VegaTlbEntry *local_entry, *new_entry;
714
715 if (tlb_outcome == TLB_HIT) {
716 DPRINTF(GPUTLB, "Functional Translation Done - TLB hit for addr "
717 "%#x\n", vaddr);
718
719 local_entry = safe_cast<VegaTlbEntry *>(sender_state->tlbEntry);
720 } else {
721 DPRINTF(GPUTLB, "Functional Translation Done - TLB miss for addr "
722 "%#x\n", vaddr);
723
729 new_entry = safe_cast<VegaTlbEntry *>(sender_state->tlbEntry);
730 assert(new_entry);
731 local_entry = new_entry;
732
733 if (allocationPolicy) {
734 Addr virt_page_addr = roundDown(vaddr, VegaISA::PageBytes);
735
736 DPRINTF(GPUTLB, "allocating entry w/ addr %#lx\n",
737 virt_page_addr);
738
739 local_entry = insert(virt_page_addr, *new_entry);
740 }
741
742 assert(local_entry);
743 }
744
745 DPRINTF(GPUTLB, "Entry found with vaddr %#x, doing protection checks "
746 "while paddr was %#x.\n", local_entry->vaddr,
747 local_entry->paddr);
748
760 if (!sender_state->isPrefetch && sender_state->tlbEntry)
761 pagingProtectionChecks(pkt, local_entry, mode);
762
763 int page_size = local_entry->size();
764 Addr paddr = local_entry->paddr + (vaddr & (page_size - 1));
765 DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
766
767 pkt->req->setPaddr(paddr);
768
769 if (local_entry->uncacheable())
770 pkt->req->setFlags(Request::UNCACHEABLE);
771}
772
773// This is used for atomic translations. Need to
774// make it all happen during the same cycle.
775void
777{
778 GpuTranslationState *sender_state =
779 safe_cast<GpuTranslationState*>(pkt->senderState);
780
781 bool update_stats = !sender_state->isPrefetch;
782
783 Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
785
786 // do the TLB lookup without updating the stats
787 bool success = tlb->tlbLookup(pkt->req, update_stats);
788 tlbOutcome tlb_outcome = success ? TLB_HIT : TLB_MISS;
789
790 // functional mode means no coalescing
791 // global metrics are the same as the local metrics
792 if (update_stats) {
793 tlb->stats.globalNumTLBAccesses++;
794
795 if (success) {
796 sender_state->hitLevel = sender_state->reqCnt.size();
797 tlb->stats.globalNumTLBHits++;
798 } else {
799 tlb->stats.globalNumTLBMisses++;
800 }
801 }
802
803 if (!success) {
804 if (tlb->hasMemSidePort) {
805 // there is a TLB below -> propagate down the TLB hierarchy
806 tlb->memSidePort[0]->sendFunctional(pkt);
807 // If no valid translation from a prefetch, then just return
808 if (sender_state->isPrefetch && !pkt->req->hasPaddr())
809 return;
810 } else {
811 // Need to access the page table and update the TLB
812 DPRINTF(GPUTLB, "Doing a page walk for address %#x\n",
813 virt_page_addr);
814
815 Addr vaddr = pkt->req->getVaddr();
816 [[maybe_unused]] Addr alignedVaddr =
817 tlb->pageAlign(virt_page_addr);
818 assert(alignedVaddr == virt_page_addr);
819
820 unsigned logBytes;
821 PageTableEntry pte;
822
823 // Initialize walker state for VMID
824 Addr base = tlb->gpuDevice->getVM().getPageTableBase(1);
825 tlb->walker->setDevRequestor(tlb->gpuDevice->vramRequestorId());
826
827 // Do page table walk
828 Fault fault = tlb->walker->startFunctional(base, vaddr, pte,
829 logBytes,
831 if (fault != NoFault) {
832 fatal("Translation fault in TLB at %d!", __LINE__);
833 }
834
835 // PPN is already shifted by fragment so we only shift by native
836 // page size. Fragment is still used via logBytes to select lower
837 // bits from vaddr.
838 Addr page_addr = pte.ppn << PageShift;
839 Addr paddr = page_addr + (vaddr & mask(logBytes));
840 Addr alignedPaddr = tlb->pageAlign(paddr);
841 pkt->req->setPaddr(paddr);
842 pkt->req->setSystemReq(pte.s);
843
844 if (!sender_state->isPrefetch) {
845 assert(paddr);
846
847 DPRINTF(GPUTLB, "Mapping %#x to %#x\n", vaddr, paddr);
848
849 sender_state->tlbEntry =
850 new VegaTlbEntry(1 /* VMID */, virt_page_addr,
851 alignedPaddr, logBytes, pte);
852 } else {
853 // If this was a prefetch, then do the normal thing if it
854 // was a successful translation. Otherwise, send an empty
855 // TLB entry back so that it can be figured out as empty
856 // and handled accordingly.
857 if (paddr) {
858 DPRINTF(GPUTLB, "Mapping %#x to %#x\n", vaddr, paddr);
859
860 sender_state->tlbEntry =
861 new VegaTlbEntry(1 /* VMID */, virt_page_addr,
862 alignedPaddr, logBytes, pte);
863 } else {
864 DPRINTF(GPUPrefetch, "Prefetch failed %#x\n", vaddr);
865
866 sender_state->tlbEntry = nullptr;
867
868 return;
869 }
870 }
871 }
872 } else {
873 VegaTlbEntry *entry = tlb->lookup(virt_page_addr, update_stats);
874 assert(entry);
875
876 if (sender_state->isPrefetch) {
877 DPRINTF(GPUPrefetch, "Functional Hit for vaddr %#x\n",
878 entry->vaddr);
879 }
880
881 sender_state->tlbEntry = new VegaTlbEntry(1 /* VMID */, entry->vaddr,
882 entry->paddr, entry->logBytes,
883 entry->pte);
884 }
885
886 // This is the function that would populate pkt->req with the paddr of
887 // the translation. But if no translation happens (i.e Prefetch fails)
888 // then the early returns in the above code wiill keep this function
889 // from executing.
890 tlb->handleFuncTranslationReturn(pkt, tlb_outcome);
891}
892
893void
895{
896 // The CPUSidePort never sends anything but replies. No retries
897 // expected.
898 panic("recvReqRetry called");
899}
900
903{
904 // currently not checked by the requestor
905 AddrRangeList ranges;
906
907 return ranges;
908}
909
915bool
917{
918 Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
920
921 DPRINTF(GPUTLB, "MemSidePort recvTiming for virt_page_addr %#x\n",
922 virt_page_addr);
923
924 TLBEvent *tlb_event = tlb->translationReturnEvent[virt_page_addr];
925 assert(tlb_event);
926 assert(virt_page_addr == tlb_event->getTLBEventVaddr());
927
928 tlb_event->updateOutcome(MISS_RETURN);
929 tlb->schedule(tlb_event, curTick()+tlb->clockPeriod());
930
931 return true;
932}
933
934void
936{
937 // No retries should reach the TLB. The retries
938 // should only reach the TLBCoalescer.
939 panic("recvReqRetry called");
940}
941
942void
944{
945 while (!cleanupQueue.empty()) {
946 Addr cleanup_addr = cleanupQueue.front();
947 cleanupQueue.pop();
948
949 DPRINTF(GPUTLB, "Deleting return event for %#lx\n", cleanup_addr);
950
951 // delete TLBEvent
952 TLBEvent * old_tlb_event = translationReturnEvent[cleanup_addr];
953 delete old_tlb_event;
954 translationReturnEvent.erase(cleanup_addr);
955
956 // update number of outstanding requests
958 }
959
963 for (int i = 0; i < cpuSidePort.size(); ++i) {
964 cpuSidePort[i]->sendRetryReq();
965 }
966}
967
969 : statistics::Group(parent),
970 ADD_STAT(maxDownstreamReached, "Number of refused translation requests"),
971 ADD_STAT(outstandingReqsMax, "Maximum count in coalesced request queue"),
972 ADD_STAT(localNumTLBAccesses, "Number of TLB accesses"),
973 ADD_STAT(localNumTLBHits, "Number of TLB hits"),
974 ADD_STAT(localNumTLBMisses, "Number of TLB misses"),
975 ADD_STAT(localTLBMissRate, "TLB miss rate"),
976 ADD_STAT(globalNumTLBAccesses, "Number of TLB accesses"),
977 ADD_STAT(globalNumTLBHits, "Number of TLB hits"),
978 ADD_STAT(globalNumTLBMisses, "Number of TLB misses"),
979 ADD_STAT(globalTLBMissRate, "TLB miss rate"),
980 ADD_STAT(accessCycles, "Cycles spent accessing this TLB level"),
981 ADD_STAT(pageTableCycles, "Cycles spent accessing the page table"),
982 ADD_STAT(localCycles, "Number of cycles spent in queue for all "
983 "incoming reqs"),
984 ADD_STAT(localLatency, "Avg. latency over incoming coalesced reqs")
985{
988
990}
991
992} // namespace VegaISA
993} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
RequestorID vramRequestorId()
Methods related to translations and system/device memory.
Addr getPageTableBase(uint16_t vmid)
Definition amdgpu_vm.hh:283
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick cyclesToTicks(Cycles c) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void makeTimingResponse()
Definition packet.hh:1080
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool isRequest() const
Definition packet.hh:597
Ports are used to interface objects to each other.
Definition port.hh:62
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
virtual bool recvTimingReq(PacketPtr pkt)
recvTiming receives a coalesced timing request from a TLBCoalescer and it calls issueTLBLookup() It o...
Definition tlb.cc:670
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition tlb.cc:902
virtual void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
Definition tlb.cc:776
virtual bool recvTimingResp(PacketPtr pkt)
MemSidePort receives the packet back.
Definition tlb.cc:916
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition tlb.cc:935
const char * description() const
Return a C string describing the event.
Definition tlb.cc:646
void updateOutcome(tlbOutcome _outcome)
Definition tlb.cc:652
TLBEvent(GpuTLB *_tlb, Addr _addr, tlbOutcome outcome, PacketPtr _pkt)
Definition tlb.cc:407
VegaTlbEntry * lookup(Addr va, bool update_lru=true)
Definition tlb.cc:211
EntryList::iterator lookupIt(Addr va, bool update_lru=true)
Definition tlb.cc:181
void walkerResponse(VegaTlbEntry &entry, PacketPtr pkt)
Definition tlb.cc:435
std::vector< CpuSidePort * > cpuSidePort
Definition tlb.hh:261
virtual void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition tlb.cc:298
virtual void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition tlb.cc:303
void issueTLBLookup(PacketPtr pkt)
Do the TLB lookup for this coalesced request and schedule another event <TLB access latency> cycles l...
Definition tlb.cc:313
VegaTlbEntry * insert(Addr vpn, VegaTlbEntry &entry)
Definition tlb.cc:157
void translationReturn(Addr virtPageAddr, tlbOutcome outcome, PacketPtr pkt)
A TLBEvent is scheduled after the TLB lookup and helps us take the appropriate actions: (e....
Definition tlb.cc:558
void pagingProtectionChecks(PacketPtr pkt, VegaTlbEntry *tlb_entry, Mode mode)
Do Paging protection checks.
Definition tlb.cc:419
void handleFuncTranslationReturn(PacketPtr pkt, tlbOutcome outcome)
handleFuncTranslationReturn is called on a TLB hit, when a TLB miss returns or when a page fault retu...
Definition tlb.cc:705
enum BaseMMU::Mode Mode
Definition tlb.hh:68
std::unordered_map< Addr, TLBEvent * > translationReturnEvent
Definition tlb.hh:310
std::vector< MemSidePort * > memSidePort
Definition tlb.hh:263
Walker * getWalker()
Definition tlb.cc:291
std::vector< EntryList > freeList
Definition tlb.hh:140
void invalidateAll()
Definition tlb.cc:224
Walker * walker
Definition tlb.hh:109
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition tlb.cc:114
AMDGPUDevice * gpuDevice
Definition tlb.hh:110
Addr pageAlign(Addr vaddr)
Definition tlb.cc:150
void handleTranslationReturn(Addr addr, tlbOutcome outcome, PacketPtr pkt)
handleTranslationReturn is called on a TLB hit, when a TLB miss returns or when a page fault returns.
Definition tlb.cc:461
std::queue< Addr > cleanupQueue
Definition tlb.hh:314
bool FA
true if this is a fully-associative TLB
Definition tlb.hh:119
bool allocationPolicy
Allocation Policy: true if we always allocate on a hit, false otherwise.
Definition tlb.hh:126
EventFunctionWrapper cleanupEvent
Definition tlb.hh:320
gem5::VegaISA::GpuTLB::VegaTLBStats stats
VegaTlbEntry * tlbLookup(const RequestPtr &req, bool update_stats)
TLB_lookup will only perform a TLB lookup returning the TLB entry on a TLB hit and nullptr on a TLB m...
Definition tlb.cc:260
std::vector< EntryList > entryList
An entryList per set is the equivalent of an LRU stack; it's used to guide replacement decisions.
Definition tlb.hh:149
void demapPage(Addr va, uint64_t asn)
Definition tlb.cc:238
GpuTLB(const VegaGPUTLBParams &p)
Definition tlb.cc:52
std::vector< VegaTlbEntry > tlb
Definition tlb.hh:133
Fault createPagefault(Addr vaddr, Mode mode)
Definition tlb.cc:136
bool hasMemSidePort
if true, then this is not the last level TLB
Definition tlb.hh:131
void setDevRequestor(RequestorID mid)
void startTiming(PacketPtr pkt, Addr base, Addr vaddr, BaseMMU::Mode mode)
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
static const Priority Maximum_Pri
Maximum priority.
Definition eventq.hh:244
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 12, 11 > set
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 4 > x
Definition pagetable.hh:61
const Addr PageShift
Definition page_size.hh:41
const Addr PageBytes
Definition page_size.hh:42
Bitfield< 54 > p
Definition pagetable.hh:70
Bitfield< 51, 12 > base
Definition pagetable.hh:141
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
constexpr decltype(nullptr) NoFault
Definition types.hh:253
GPU TranslationState: this currently is a somewhat bastardization of the usage of SenderState,...
statistics::Scalar localNumTLBMisses
Definition tlb.hh:168
statistics::Formula localLatency
Definition tlb.hh:185
statistics::Formula localTLBMissRate
Definition tlb.hh:169
statistics::Scalar localCycles
Definition tlb.hh:184
statistics::Scalar globalNumTLBAccesses
Definition tlb.hh:174
statistics::Scalar accessCycles
Definition tlb.hh:180
statistics::Scalar localNumTLBAccesses
Definition tlb.hh:166
VegaTLBStats(statistics::Group *parent)
Definition tlb.cc:968
statistics::Formula globalTLBMissRate
Definition tlb.hh:177
statistics::Scalar globalNumTLBMisses
Definition tlb.hh:176
statistics::Scalar localNumTLBHits
Definition tlb.hh:167
statistics::Scalar pageTableCycles
Definition tlb.hh:181
statistics::Scalar globalNumTLBHits
Definition tlb.hh:175
const std::string & name()
Definition trace.cc:48

Generated on Mon Jan 13 2025 04:28:27 for gem5 by doxygen 1.9.8