gem5 v25.0.0.1
Loading...
Searching...
No Matches
tage_base.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2014 The University of Wisconsin
3 * Copyright (c) 2024 Technical University of Munich
4 *
5 * Copyright (c) 2006 INRIA (Institut National de Recherche en
6 * Informatique et en Automatique / French National Research Institute
7 * for Computer Science and Applied Mathematics)
8 *
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are
13 * met: redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer;
15 * redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution;
18 * neither the name of the copyright holders nor the names of its
19 * contributors may be used to endorse or promote products derived from
20 * this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/* @file
36 * Implementation of a TAGE branch predictor
37 */
38
39#include "cpu/pred/tage_base.hh"
40
41#include "base/intmath.hh"
42#include "base/logging.hh"
43#include "debug/Fetch.hh"
44#include "debug/Tage.hh"
45
46namespace gem5
47{
48
49namespace branch_prediction
50{
81
83TAGEBase::makeBranchInfo(Addr pc, bool conditional) {
84 return new BranchInfo(*this, pc, conditional);
85}
86
87void
89{
90 if (initialized) {
91 return;
92 }
93
94 // Current method for periodically resetting the u counter bits only
95 // works for 1 or 2 bits
96 // Also make sure that it is not 0
97 assert(tagTableUBits <= 2 && (tagTableUBits > 0));
98
99 // we use int type for the path history, so it cannot be more than
100 // its size
101 assert(pathHistBits <= (sizeof(int)*8));
102
103 // initialize the counter to half of the period
104 assert(logUResetPeriod != 0);
106
107 assert(histBufferSize > maxHist * 3);
108
110
111 for (auto& history : threadHistory) {
112 history.pathHist = 0;
113 history.nonSpecPathHist = 0;
114 history.globalHist.resize(histBufferSize, 0);
115 history.ptGhist = 0;
116 }
117
118 histLengths = new int [nHistoryTables+1];
119
121
122 assert(tagTableTagWidths.size() == (nHistoryTables+1));
123 assert(logTagTableSizes.size() == (nHistoryTables+1));
124
125 // First entry is for the Bimodal table and it is untagged in this
126 // implementation
127 assert(tagTableTagWidths[0] == 0);
128
129 for (auto& history : threadHistory) {
130 history.computeIndices = new FoldedHistory[nHistoryTables+1];
131 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
132 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
133
134 initFoldedHistories(history);
135 }
136
137 const uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
138 btablePrediction.resize(bimodalTableSize, false);
139 btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
140 true);
141
142 gtable = new TageEntry*[nHistoryTables + 1];
144
145 tableIndices = new int [nHistoryTables+1];
146 tableTags = new int [nHistoryTables+1];
147 initialized = true;
148}
149
150void
152{
153 for (int i = 1; i <= nHistoryTables; i++) {
154 history.computeIndices[i].init(
156 history.computeTags[0][i].init(
158 history.computeTags[1][i].init(
160 DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
162 }
163}
164
165void
167{
168 for (int i = 1; i <= nHistoryTables; i++) {
169 gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
170 }
171}
172
173void
175{
176 histLengths[1] = minHist;
178
179 for (int i = 2; i <= nHistoryTables; i++) {
180 histLengths[i] = (int) (((double) minHist *
181 pow ((double) (maxHist) / (double) minHist,
182 (double) (i - 1) / (double) ((nHistoryTables- 1))))
183 + 0.5);
184 }
185}
186
187int
189{
190 return ((pc_in >> instShiftAmt) & ((1ULL << (logTagTableSizes[0])) - 1));
191}
192
193int
194TAGEBase::F(int A, int size, int bank) const
195{
196 int A1, A2;
197
198 A = A & ((1ULL << size) - 1);
199 A1 = (A & ((1ULL << logTagTableSizes[bank]) - 1));
200 A2 = (A >> logTagTableSizes[bank]);
201 A2 = ((A2 << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
202 + (A2 >> (logTagTableSizes[bank] - bank));
203 A = A1 ^ A2;
204 A = ((A << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
205 + (A >> (logTagTableSizes[bank] - bank));
206 return (A);
207}
208
209// gindex computes a full hash of pc, ghist and pathHist
210int
211TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
212{
213 int index;
214 int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
215 histLengths[bank];
216 const unsigned int shiftedPc = pc >> instShiftAmt;
217 index =
218 shiftedPc ^
219 (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
220 threadHistory[tid].computeIndices[bank].comp ^
221 F(threadHistory[tid].pathHist, hlen, bank);
222
223 return (index & ((1ULL << (logTagTableSizes[bank])) - 1));
224}
225
226
227// Tag computation
228uint16_t
229TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
230{
231 int tag = (pc >> instShiftAmt) ^
232 threadHistory[tid].computeTags[0][bank].comp ^
233 (threadHistory[tid].computeTags[1][bank].comp << 1);
234
235 return (tag & ((1ULL << tagTableTagWidths[bank]) - 1));
236}
237
238
239// Up-down saturating counter
240template<typename T>
241void
242TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
243{
244 assert(nbits <= sizeof(T) << 3);
245 if (taken) {
246 if (ctr < ((1 << (nbits - 1)) - 1))
247 ctr++;
248 } else {
249 if (ctr > -(1 << (nbits - 1)))
250 ctr--;
251 }
252}
253
254// int8_t and int versions of this function may be needed
255template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
256template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
257
258// Up-down unsigned saturating counter
259void
260TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
261{
262 assert(nbits <= sizeof(uint8_t) << 3);
263 if (up) {
264 if (ctr < ((1 << nbits) - 1))
265 ctr++;
266 } else {
267 if (ctr)
268 ctr--;
269 }
270}
271
272// Bimodal prediction
273bool
275{
276 return btablePrediction[bi->bimodalIndex];
277}
278
279
280// Update the bimodal predictor: a hysteresis bit is shared among N prediction
281// bits (N = 2 ^ logRatioBiModalHystEntries)
282void
284{
285 int inter = (btablePrediction[bi->bimodalIndex] << 1)
287 if (taken) {
288 if (inter < 3)
289 inter++;
290 } else if (inter > 0) {
291 inter--;
292 }
293 const bool pred = inter >> 1;
294 const bool hyst = inter & 1;
295 btablePrediction[bi->bimodalIndex] = pred;
296 btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries] = hyst;
297 DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
298}
299
300// shifting the global history: we manage the history in a big table in order
301// to reduce simulation time
302void
303TAGEBase::updateGHist(ThreadID tid, uint64_t bv, uint8_t n)
304{
305 if (n == 0) return;
306
307 // Handle rollovers first.
308 ThreadHistory& tHist = threadHistory[tid];
309 if (tHist.ptGhist < n) {
310 DPRINTF(Tage, "Rolling over the histories\n");
311 // Copy beginning of globalHistoryBuffer to end, such that
312 // the last maxHist outcomes are still reachable
313 // through globalHist[0 .. maxHist - 1].
314 for (int i = 0; i < maxHist; i++) {
316 tHist.globalHist[tHist.ptGhist + i];
317 }
318
320 }
321
322 // Update the global history
323 for (int i = 0; i < n; i++) {
324 // Shift the next bit of the bit vector into the history
325 // Use `at` to check for out-of-bounds access.
326 tHist.ptGhist--;
327 tHist.globalHist.at(tHist.ptGhist) = (bv & 1) ? 1 : 0;
328 bv >>= 1;
329
330 // Update the folded histories with the new bit.
331 uint8_t *gh_ptr = &(tHist.globalHist[tHist.ptGhist]);
332
333 for (int i = 1; i <= nHistoryTables; i++) {
334 tHist.computeIndices[i].update(gh_ptr);
335 tHist.computeTags[0][i].update(gh_ptr);
336 tHist.computeTags[1][i].update(gh_ptr);
337 }
338 }
339}
340
341void
343 BranchInfo* bi)
344{
345 // computes the table addresses and the partial tags
346 for (int i = 1; i <= nHistoryTables; i++) {
347 tableIndices[i] = gindex(tid, branch_pc, i);
348 bi->tableIndices[i] = tableIndices[i];
349 tableTags[i] = gtag(tid, branch_pc, i);
350 bi->tableTags[i] = tableTags[i];
351 }
352 bi->valid = true;
353}
354
355unsigned
357{
358 // There is only 1 counter on the base TAGE implementation
359 return 0;
360}
361
362bool
364 bool cond_branch, BranchInfo* bi)
365{
366 if (!cond_branch) {
367 // Unconditional branch, predict taken
368 assert(bi->branchPC == branch_pc);
369 return true;
370 }
371
372 // TAGE prediction
373
374 calculateIndicesAndTags(tid, branch_pc, bi);
375 bi->bimodalIndex = bindex(branch_pc);
376
377 bi->hitBank = 0;
378 bi->altBank = 0;
379 //Look for the bank with longest matching history
380 for (int i = nHistoryTables; i > 0; i--) {
381 if (noSkip[i] &&
382 gtable[i][tableIndices[i]].tag == tableTags[i]) {
383 bi->hitBank = i;
384 bi->hitBankIndex = tableIndices[bi->hitBank];
385 break;
386 }
387 }
388 //Look for the alternate bank
389 for (int i = bi->hitBank - 1; i > 0; i--) {
390 if (noSkip[i] &&
391 gtable[i][tableIndices[i]].tag == tableTags[i]) {
392 bi->altBank = i;
393 bi->altBankIndex = tableIndices[bi->altBank];
394 break;
395 }
396 }
397 //computes the prediction and the alternate prediction
398 if (bi->hitBank > 0) {
399 if (bi->altBank > 0) {
400 bi->altTaken =
401 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
403 } else {
404 bi->altTaken = getBimodePred(branch_pc, bi);
405 }
406
407 bi->longestMatchPred =
408 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
409 bi->pseudoNewAlloc =
410 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
411
412 //if the entry is recognized as a newly allocated entry and
413 //useAltPredForNewlyAllocated is positive use the alternate
414 //prediction
415 if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
416 || ! bi->pseudoNewAlloc) {
417 bi->tagePred = bi->longestMatchPred;
418 bi->provider = TAGE_LONGEST_MATCH;
419 } else {
420 bi->tagePred = bi->altTaken;
421 bi->provider = bi->altBank ? TAGE_ALT_MATCH
423 }
424 } else {
425 bi->altTaken = getBimodePred(branch_pc, bi);
426 bi->tagePred = bi->altTaken;
427 bi->longestMatchPred = bi->altTaken;
428 bi->provider = BIMODAL_ONLY;
429 }
430
431 DPRINTF(Tage, "Predict for %lx: tagePred:%d, altPred:%d\n",
432 branch_pc, bi->tagePred, bi->altTaken);
433
434 return bi->tagePred;
435}
436
437void
438TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
439{
440 // Nothing for this base class implementation
441}
442
443void
445 int nrand)
446{
447 if (alloc) {
448 // is there some "unuseful" entry to allocate
449 uint8_t min = 1;
450 for (int i = nHistoryTables; i > bi->hitBank; i--) {
451 if (gtable[i][bi->tableIndices[i]].u < min) {
452 min = gtable[i][bi->tableIndices[i]].u;
453 }
454 }
455
456 // we allocate an entry with a longer history
457 // to avoid ping-pong, we do not choose systematically the next
458 // entry, but among the 3 next entries
459 int Y = nrand &
460 ((1ULL << (nHistoryTables - bi->hitBank - 1)) - 1);
461 int X = bi->hitBank + 1;
462 if (Y & 1) {
463 X++;
464 if (Y & 2)
465 X++;
466 }
467 // No entry available, forces one to be available
468 if (min > 0) {
469 gtable[X][bi->tableIndices[X]].u = 0;
470 }
471
472
473 //Allocate entries
474 unsigned numAllocated = 0;
475 for (int i = X; i <= nHistoryTables; i++) {
476 if (gtable[i][bi->tableIndices[i]].u == 0) {
477 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
478 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
479 ++numAllocated;
480 if (numAllocated == maxNumAlloc) {
481 break;
482 }
483 }
484 }
485 }
486
487 tCounter++;
488
489 handleUReset();
490}
491
492void
494{
495 //periodic reset of u: reset is not complete but bit by bit
496 if ((tCounter & ((1ULL << logUResetPeriod) - 1)) == 0) {
497 // reset least significant bit
498 // most significant bit becomes least significant bit
499 for (int i = 1; i <= nHistoryTables; i++) {
500 for (int j = 0; j < (1ULL << logTagTableSizes[i]); j++) {
501 resetUctr(gtable[i][j].u);
502 }
503 }
504 }
505}
506
507void
509{
510 u >>= 1;
511}
512
513void
514TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
515 BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
516{
517 // TAGE UPDATE
518 // try to allocate a new entries only if prediction was wrong
519 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
520
521 if (preAdjustAlloc) {
522 adjustAlloc(alloc, taken, pred);
523 }
524
525 if (bi->hitBank > 0) {
526 // Manage the selection between longest matching and alternate
527 // matching for "pseudo"-newly allocated longest matching entry
528 bool PseudoNewAlloc = bi->pseudoNewAlloc;
529 // an entry is considered as newly allocated if its prediction
530 // counter is weak
531 if (PseudoNewAlloc) {
532 if (bi->longestMatchPred == taken) {
533 alloc = false;
534 }
535 // if it was delivering the correct prediction, no need to
536 // allocate new entry even if the overall prediction was false
537 if (bi->longestMatchPred != bi->altTaken) {
538 ctrUpdate(
540 bi->altTaken == taken, useAltOnNaBits);
541 }
542 }
543 }
544
545 if (!preAdjustAlloc) {
546 adjustAlloc(alloc, taken, pred);
547 }
548
549 handleAllocAndUReset(alloc, taken, bi, nrand);
550
551 handleTAGEUpdate(branch_pc, taken, bi);
552}
553
554void
556{
557 if (bi->hitBank > 0) {
558 DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
559 bi->hitBank, bi->hitBankIndex, branch_pc);
560 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
562 // if the provider entry is not certified to be useful also update
563 // the alternate prediction
564 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
565 if (bi->altBank > 0) {
566 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
568 DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
569 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
570 branch_pc);
571 }
572 if (bi->altBank == 0) {
573 baseUpdate(branch_pc, taken, bi);
574 }
575 }
576
577 // update the u counter
578 if (bi->tagePred != bi->altTaken) {
579 unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
580 bi->tagePred == taken, tagTableUBits);
581 }
582 } else {
583 baseUpdate(branch_pc, taken, bi);
584 }
585}
586
587void
589 Addr branch_pc, Addr target, BranchInfo* bi)
590{
591 ThreadHistory& tHist = threadHistory[tid];
592
593 // Update path history
594 tHist.pathHist =
595 calcNewPathHist(tid, branch_pc, tHist.pathHist, taken, brtype, target);
596
597 if (takenOnlyHistory) {
598 // Taken-only history is implemented after the paper:
599 // https://ieeexplore.ieee.org/document/9246215
600 //
601 // For taken-only history two bits of a hash of pc and its target
602 // is shifted into the global history in case the branch was taken.
603 // For not-taken branches no history update will happen.
604 if (taken) {
605 bi->ghist = (((branch_pc >> instShiftAmt) >> 2) ^
606 ((target >> instShiftAmt) >> 3));
607 bi->nGhist = 2;
608 }
609
610 } else {
611 // For normal direction history update the history by
612 // whether the branch was taken or not.
613 bi->ghist = taken ? 1 : 0;
614 bi->nGhist = 1;
615 }
616
617 // Update the global history
618 updateGHist(tid, bi->ghist, bi->nGhist);
619}
620
621
622void
623TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool speculative,
624 bool taken, Addr target,
625 const StaticInstPtr & inst, BranchInfo* bi)
626{
627 if (speculative != speculativeHistUpdate) {
628 if (!speculative) {
629 // Save the speculative path history as non-speculative
630 threadHistory[tid].nonSpecPathHist =
631 calcNewPathHist(tid, branch_pc, bi->pathHist, taken,
632 branchTypeExtra(inst), target);
633 }
634 return;
635 }
636
637 // If this is the first time we see this branch record the current
638 // state of the history to be able to recover.
639 if (speculativeHistUpdate && (!bi->modified)) {
640 recordHistState(tid, bi);
641 }
642
643 // In case the branch already updated the history
644 // we need to revert the previous update first.
645 if (bi->modified) {
646 restoreHistState(tid, bi);
647 }
648
649 // Recalculate the tags and indices if needed. This can be the case
650 // as in the decoupled frontend branches can be inserted out of order
651 // (surprise branches). We can not compute the tags and indices
652 // at that point since the BPU might already speculated on other branches
653 // which updated the history. We can recalculate the tags and indices
654 // now since either the branch was correctly not taken and the history
655 // will not be updated or the branch was incorrect in which case the
656 // branches afterwards where squashed and the history was restored.
657 if (!bi->valid && bi->condBranch) {
658 calculateIndicesAndTags(tid, branch_pc, bi);
659 }
660
661 // We should have not already modified the history
662 // for this branch
663 assert(bi->nGhist == 0);
664
665 // Do the actual history update. Might be different for different
666 // TAGE implementations.
668 branch_pc, target, bi);
669 bi->modified = true;
670
671 DPRINTF(Tage,
672 "Updating global histories with branch:%lx; taken?:%d, "
673 "path Hist: %x; pointer:%d\n",
674 branch_pc, taken, threadHistory[tid].pathHist,
675 threadHistory[tid].ptGhist);
676}
677
678void
680{
681 ThreadHistory &tHist = threadHistory[tid];
682 bi->pathHist = tHist.pathHist;
683
684 for (int i = 1; i <= nHistoryTables; i++) {
685 bi->ci[i] = tHist.computeIndices[i].comp;
686 bi->ct0[i] = tHist.computeTags[0][i].comp;
687 bi->ct1[i] = tHist.computeTags[1][i].comp;
688 }
689}
690
691void
693{
695 return;
696 }
697 if (!bi->modified) {
698 return;
699 }
700
701 ThreadHistory& tHist = threadHistory[tid];
702 tHist.pathHist = bi->pathHist;
703
704 if (bi->nGhist == 0)
705 return;
706
707 // RESTORE HISTORIES
708 // Shift out the inserted bits from the folded history
709 // and the global history vector
710 for (int n = 0; n < bi->nGhist; n++) {
711 uint8_t *gh_ptr = &(tHist.globalHist[tHist.ptGhist]);
712
713 // First revert the folded history
714 for (int i = 1; i <= nHistoryTables; i++) {
715 tHist.computeIndices[i].restore(gh_ptr);
716 tHist.computeTags[0][i].restore(gh_ptr);
717 tHist.computeTags[1][i].restore(gh_ptr);
718 }
719 tHist.ptGhist++;
720 }
721 bi->nGhist = 0;
722 bi->modified = false;
723}
724
725int
726TAGEBase::calcNewPathHist(ThreadID tid, Addr pc, int cur_phist, bool taken,
727 int brtype, Addr target) const
728{
729 if (takenOnlyHistory && !taken) {
730 // For taken-only history we update only if the branch was taken
731 return cur_phist;
732 }
733 int pathbit = ((pc >> instShiftAmt) & 1);
734 cur_phist = (cur_phist << 1) + pathbit;
735 cur_phist = (cur_phist & ((1ULL << pathHistBits) - 1));
736 return cur_phist;
737}
738
739void
740TAGEBase::squash(ThreadID tid, bool taken, Addr target,
742{
743 updateHistories(tid, bi->branchPC, true, taken, target, inst, bi);
744}
745
746void
748{
749 // do nothing. This is only used in some derived classes
750 return;
751}
752
753void
755{
756 if (taken == bi->tagePred) {
757 // correct prediction
758 switch (bi->provider) {
759 case BIMODAL_ONLY: stats.bimodalProviderCorrect++; break;
760 case TAGE_LONGEST_MATCH: stats.longestMatchProviderCorrect++; break;
762 stats.bimodalAltMatchProviderCorrect++;
763 break;
764 case TAGE_ALT_MATCH: stats.altMatchProviderCorrect++; break;
765 }
766 } else {
767 // wrong prediction
768 switch (bi->provider) {
769 case BIMODAL_ONLY: stats.bimodalProviderWrong++; break;
771 stats.longestMatchProviderWrong++;
772 if (bi->altTaken == taken) {
773 stats.altMatchProviderWouldHaveHit++;
774 }
775 break;
777 stats.bimodalAltMatchProviderWrong++;
778 break;
779 case TAGE_ALT_MATCH:
780 stats.altMatchProviderWrong++;
781 break;
782 }
783
784 switch (bi->provider) {
786 case TAGE_ALT_MATCH:
787 if (bi->longestMatchPred == taken) {
788 stats.longestMatchProviderWouldHaveHit++;
789 }
790 }
791 }
792
793 switch (bi->provider) {
795 case TAGE_ALT_MATCH:
796 stats.longestMatchProvider[bi->hitBank]++;
797 stats.altMatchProvider[bi->altBank]++;
798 break;
799 }
800}
801
802unsigned
804{
805 unsigned val = 0;
806 int gh_ptr = threadHistory[tid].ptGhist;
807 for (unsigned i = 0; i < 16; i++) {
808 // Make sure we don't go out of bounds with `at`.
809 val |= ((threadHistory[tid].globalHist.at(gh_ptr + i) & 0x1) << i);
810 }
811 return val;
812}
813
814
816 statistics::Group *parent, unsigned nHistoryTables)
817 : statistics::Group(parent),
819 "Number of times TAGE Longest Match is the provider and the "
820 "prediction is correct"),
822 "Number of times TAGE Alt Match is the provider and the "
823 "prediction is correct"),
825 "Number of times TAGE Alt Match is the bimodal and it is the "
826 "provider and the prediction is correct"),
828 "Number of times there are no hits on the TAGE tables and the "
829 "bimodal prediction is correct"),
831 "Number of times TAGE Longest Match is the provider and the "
832 "prediction is wrong"),
834 "Number of times TAGE Alt Match is the provider and the "
835 "prediction is wrong"),
837 "Number of times TAGE Alt Match is the bimodal and it is the "
838 "provider and the prediction is wrong"),
840 "Number of times there are no hits on the TAGE tables and the "
841 "bimodal prediction is wrong"),
843 "Number of times TAGE Longest Match is the provider, the "
844 "prediction is wrong and Alt Match prediction was correct"),
846 "Number of times TAGE Alt Match is the provider, the "
847 "prediction is wrong and Longest Match prediction was correct"),
849 "TAGE provider for longest match"),
851 "TAGE provider for alt match")
852{
855}
856
857int8_t
858TAGEBase::getCtr(int hitBank, int hitBankIndex) const
859{
860 return gtable[hitBank][hitBankIndex].ctr;
861}
862
863unsigned
865{
866 return tagTableCounterBits;
867}
868
869int
870TAGEBase::getPathHist(ThreadID tid, bool speculative) const
871{
872 return speculative ? threadHistory[tid].pathHist
873 : threadHistory[tid].nonSpecPathHist;
874}
875
876bool
881
882size_t
884 size_t bits = 0;
885 for (int i = 1; i <= nHistoryTables; i++) {
886 bits += (1 << logTagTableSizes[i]) *
888 }
889 uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
891 bits += bimodalTableSize;
892 bits += (bimodalTableSize >> logRatioBiModalHystEntries);
896 return bits;
897}
898
899} // namespace branch_prediction
900} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
int getPathHist(ThreadID tid, bool speculative=true) const
Definition tage_base.cc:870
TAGEBase(const TAGEBaseParams &p)
Definition tage_base.cc:51
virtual int branchTypeExtra(const StaticInstPtr &inst)
This function acts as a hook for other TAGE implementations to adjust the branch type.
Definition tage_base.hh:356
virtual BranchInfo * makeBranchInfo(Addr pc, bool conditional)
Definition tage_base.cc:83
virtual void handleAllocAndUReset(bool alloc, bool taken, BranchInfo *bi, int nrand)
Handles Allocation and U bits reset on an update.
Definition tage_base.cc:444
virtual void initFoldedHistories(ThreadHistory &history)
Initialization of the folded histories.
Definition tage_base.cc:151
static void ctrUpdate(T &ctr, bool taken, int nbits)
Updates a direction counter based on the actual branch outcome.
Definition tage_base.cc:242
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition tage_base.cc:88
virtual unsigned getUseAltIdx(BranchInfo *bi, Addr branch_pc)
Calculation of the index for useAltPredForNewlyAllocated On this base TAGE implementation it is alway...
Definition tage_base.cc:356
virtual void resetUctr(uint8_t &u)
Algorithm for resetting a single U counter.
Definition tage_base.cc:508
virtual void extraAltCalc(BranchInfo *bi)
Extra steps for calculating altTaken For this base TAGE class it does nothing.
Definition tage_base.cc:747
static void unsignedCtrUpdate(uint8_t &ctr, bool up, unsigned nbits)
Updates an unsigned counter based on up/down parameter.
Definition tage_base.cc:260
virtual void handleUReset()
Handles the U bits reset.
Definition tage_base.cc:493
virtual void updateStats(bool taken, BranchInfo *bi)
Update the stats.
Definition tage_base.cc:754
virtual void calculateParameters()
Calculates the history lengths and some other paramters in derived classes.
Definition tage_base.cc:174
const unsigned logRatioBiModalHystEntries
Definition tage_base.hh:484
std::vector< bool > btableHysteresis
Definition tage_base.hh:497
std::vector< bool > btablePrediction
Definition tage_base.hh:496
virtual void adjustAlloc(bool &alloc, bool taken, bool pred_taken)
Extra calculation to tell whether TAGE allocaitons may happen or not on an update For this base TAGE ...
Definition tage_base.cc:438
void recordHistState(ThreadID tid, BranchInfo *bi)
Records the current state of the histories to be able to restore it in case of a mispredicted specula...
Definition tage_base.cc:679
std::vector< ThreadHistory > threadHistory
Definition tage_base.hh:522
virtual void calculateIndicesAndTags(ThreadID tid, Addr branch_pc, BranchInfo *bi)
On a prediction, calculates the TAGE indices and tags for all the different history lengths.
Definition tage_base.cc:342
virtual bool getBimodePred(Addr pc, BranchInfo *bi) const
Get a branch prediction from the bimodal predictor.
Definition tage_base.cc:274
virtual int calcNewPathHist(ThreadID tid, Addr pc, int cur_phist, bool taken, int brtype, Addr target) const
Definition tage_base.cc:726
int8_t getCtr(int hitBank, int hitBankIndex) const
Definition tage_base.cc:858
virtual void squash(ThreadID tid, bool taken, Addr target, const StaticInstPtr &inst, BranchInfo *bi)
Restores speculatively updated path and direction histories.
Definition tage_base.cc:740
virtual uint16_t gtag(ThreadID tid, Addr pc, int bank) const
Computes the partial tag of a tagged table.
Definition tage_base.cc:229
std::vector< int > logTagTableSizes
Definition tage_base.hh:494
void baseUpdate(Addr pc, bool taken, BranchInfo *bi)
Updates the bimodal predictor.
Definition tage_base.cc:283
virtual int F(int phist, int size, int bank) const
Utility function to shuffle the path history depending on which tagged table we are accessing.
Definition tage_base.cc:194
bool tagePredict(ThreadID tid, Addr branch_pc, bool cond_branch, BranchInfo *bi)
TAGE prediction called from TAGE::predict.
Definition tage_base.cc:363
std::vector< int8_t > useAltPredForNewlyAllocated
Definition tage_base.hh:533
unsigned getGHR(ThreadID tid) const
Definition tage_base.cc:803
virtual void updatePathAndGlobalHistory(ThreadID tid, int brtype, bool taken, Addr branch_pc, Addr target, BranchInfo *bi)
Does the actual update of path and global history.
Definition tage_base.cc:588
const bool takenOnlyHistory
Use taken only history.
Definition tage_base.hh:541
virtual void updateHistories(ThreadID tid, Addr branch_pc, bool speculative, bool taken, Addr target, const StaticInstPtr &inst, BranchInfo *bi)
(Speculatively) updates global histories (path and direction).
Definition tage_base.cc:623
std::vector< unsigned > tagTableTagWidths
Definition tage_base.hh:493
gem5::branch_prediction::TAGEBase::TAGEBaseStats stats
virtual void condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc=false)
Update TAGE for conditional branches.
Definition tage_base.cc:514
virtual void buildTageTables()
Instantiates the TAGE table entries.
Definition tage_base.cc:166
void updateGHist(ThreadID tid, uint64_t bv, uint8_t n)
Internal history update function.
Definition tage_base.cc:303
virtual int gindex(ThreadID tid, Addr pc, int bank) const
Computes the index used to access a partially tagged table.
Definition tage_base.cc:211
virtual void handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo *bi)
Handles the update of the TAGE entries.
Definition tage_base.cc:555
virtual int bindex(Addr pc_in) const
Computes the index used to access the bimodal table.
Definition tage_base.cc:188
void restoreHistState(ThreadID tid, BranchInfo *bi)
Restore the state of the histories in case of detecting a mispredicted speculative update.
Definition tage_base.cc:692
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
SimObject(const Params &p)
Definition sim_object.cc:58
Bitfield< 31 > n
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23 > up
Definition types.hh:124
Bitfield< 22 > u
Bitfield< 4 > pc
Bitfield< 30, 0 > index
Bitfield< 0 > p
Bitfield< 20, 16 > bi
Definition types.hh:80
Bitfield< 27, 24 > pred
Definition types.hh:91
Bitfield< 15, 0 > X
Definition int.hh:58
Bitfield< 63 > val
Definition misc.hh:804
Units for Stats.
Definition units.hh:113
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
RefCountingPtr< StaticInst > StaticInstPtr
void init(int original_length, int compressed_length)
Definition tage_base.hh:100
TAGEBaseStats(statistics::Group *parent, unsigned nHistoryTables)
Definition tage_base.cc:815

Generated on Sat Oct 18 2025 08:06:42 for gem5 by doxygen 1.14.0