gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
tage_base.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2014 The University of Wisconsin
3 * Copyright (c) 2024 Technical University of Munich
4 *
5 * Copyright (c) 2006 INRIA (Institut National de Recherche en
6 * Informatique et en Automatique / French National Research Institute
7 * for Computer Science and Applied Mathematics)
8 *
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are
13 * met: redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer;
15 * redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution;
18 * neither the name of the copyright holders nor the names of its
19 * contributors may be used to endorse or promote products derived from
20 * this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/* @file
36 * Implementation of a TAGE branch predictor
37 */
38
39#include "cpu/pred/tage_base.hh"
40
41#include "base/intmath.hh"
42#include "base/logging.hh"
43#include "debug/Fetch.hh"
44#include "debug/Tage.hh"
45
46namespace gem5
47{
48
49namespace branch_prediction
50{
51
81
83TAGEBase::makeBranchInfo(Addr pc, bool conditional) {
84 return new BranchInfo(*this, pc, conditional);
85}
86
87void
89{
90 if (initialized) {
91 return;
92 }
93
94 // Current method for periodically resetting the u counter bits only
95 // works for 1 or 2 bits
96 // Also make sure that it is not 0
97 assert(tagTableUBits <= 2 && (tagTableUBits > 0));
98
99 // we use int type for the path history, so it cannot be more than
100 // its size
101 assert(pathHistBits <= (sizeof(int)*8));
102
103 // initialize the counter to half of the period
104 assert(logUResetPeriod != 0);
106
107 assert(histBufferSize > maxHist * 3);
108
110
111 for (auto& history : threadHistory) {
112 history.pathHist = 0;
113 history.nonSpecPathHist = 0;
114 history.globalHistory = new uint8_t[histBufferSize];
115 history.gHist = history.globalHistory;
116 memset(history.gHist, 0, histBufferSize);
117 history.ptGhist = 0;
118 }
119
120 histLengths = new int [nHistoryTables+1];
121
123
124 assert(tagTableTagWidths.size() == (nHistoryTables+1));
125 assert(logTagTableSizes.size() == (nHistoryTables+1));
126
127 // First entry is for the Bimodal table and it is untagged in this
128 // implementation
129 assert(tagTableTagWidths[0] == 0);
130
131 for (auto& history : threadHistory) {
132 history.computeIndices = new FoldedHistory[nHistoryTables+1];
133 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
134 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
135
136 initFoldedHistories(history);
137 }
138
139 const uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
140 btablePrediction.resize(bimodalTableSize, false);
141 btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
142 true);
143
144 gtable = new TageEntry*[nHistoryTables + 1];
146
147 tableIndices = new int [nHistoryTables+1];
148 tableTags = new int [nHistoryTables+1];
149 initialized = true;
150}
151
152void
154{
155 for (int i = 1; i <= nHistoryTables; i++) {
156 history.computeIndices[i].init(
158 history.computeTags[0][i].init(
160 history.computeTags[1][i].init(
162 DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
164 }
165}
166
167void
169{
170 for (int i = 1; i <= nHistoryTables; i++) {
171 gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
172 }
173}
174
175void
177{
178 histLengths[1] = minHist;
180
181 for (int i = 2; i <= nHistoryTables; i++) {
182 histLengths[i] = (int) (((double) minHist *
183 pow ((double) (maxHist) / (double) minHist,
184 (double) (i - 1) / (double) ((nHistoryTables- 1))))
185 + 0.5);
186 }
187}
188
189int
191{
192 return ((pc_in >> instShiftAmt) & ((1ULL << (logTagTableSizes[0])) - 1));
193}
194
195int
196TAGEBase::F(int A, int size, int bank) const
197{
198 int A1, A2;
199
200 A = A & ((1ULL << size) - 1);
201 A1 = (A & ((1ULL << logTagTableSizes[bank]) - 1));
202 A2 = (A >> logTagTableSizes[bank]);
203 A2 = ((A2 << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
204 + (A2 >> (logTagTableSizes[bank] - bank));
205 A = A1 ^ A2;
206 A = ((A << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
207 + (A >> (logTagTableSizes[bank] - bank));
208 return (A);
209}
210
211// gindex computes a full hash of pc, ghist and pathHist
212int
213TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
214{
215 int index;
216 int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
217 histLengths[bank];
218 const unsigned int shiftedPc = pc >> instShiftAmt;
219 index =
220 shiftedPc ^
221 (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
222 threadHistory[tid].computeIndices[bank].comp ^
223 F(threadHistory[tid].pathHist, hlen, bank);
224
225 return (index & ((1ULL << (logTagTableSizes[bank])) - 1));
226}
227
228
229// Tag computation
230uint16_t
231TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
232{
233 int tag = (pc >> instShiftAmt) ^
234 threadHistory[tid].computeTags[0][bank].comp ^
235 (threadHistory[tid].computeTags[1][bank].comp << 1);
236
237 return (tag & ((1ULL << tagTableTagWidths[bank]) - 1));
238}
239
240
241// Up-down saturating counter
242template<typename T>
243void
244TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
245{
246 assert(nbits <= sizeof(T) << 3);
247 if (taken) {
248 if (ctr < ((1 << (nbits - 1)) - 1))
249 ctr++;
250 } else {
251 if (ctr > -(1 << (nbits - 1)))
252 ctr--;
253 }
254}
255
256// int8_t and int versions of this function may be needed
257template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
258template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
259
260// Up-down unsigned saturating counter
261void
262TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
263{
264 assert(nbits <= sizeof(uint8_t) << 3);
265 if (up) {
266 if (ctr < ((1 << nbits) - 1))
267 ctr++;
268 } else {
269 if (ctr)
270 ctr--;
271 }
272}
273
274// Bimodal prediction
275bool
277{
278 return btablePrediction[bi->bimodalIndex];
279}
280
281
282// Update the bimodal predictor: a hysteresis bit is shared among N prediction
283// bits (N = 2 ^ logRatioBiModalHystEntries)
284void
286{
287 int inter = (btablePrediction[bi->bimodalIndex] << 1)
289 if (taken) {
290 if (inter < 3)
291 inter++;
292 } else if (inter > 0) {
293 inter--;
294 }
295 const bool pred = inter >> 1;
296 const bool hyst = inter & 1;
297 btablePrediction[bi->bimodalIndex] = pred;
298 btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries] = hyst;
299 DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
300}
301
302// shifting the global history: we manage the history in a big table in order
303// to reduce simulation time
304void
305TAGEBase::updateGHist(ThreadID tid, uint64_t bv, uint8_t n)
306{
307 if (n == 0) return;
308
309 // Handle rollovers first.
310 ThreadHistory& tHist = threadHistory[tid];
311 if (tHist.ptGhist < n) {
312 DPRINTF(Tage, "Rolling over the histories\n");
313 // Copy beginning of globalHistoryBuffer to end, such that
314 // the last maxHist outcomes are still reachable
315 // through globalHistory[0 .. maxHist - 1].
316 for (int i = 0; i < maxHist; i++) {
318 = tHist.globalHistory[tHist.ptGhist + i];
319 }
320
322 tHist.gHist = &tHist.globalHistory[tHist.ptGhist];
323 }
324
325 // Update the global history
326 for (int i = 0; i < n; i++) {
327
328 // Shift the next bit of the bit vector into the history
329 tHist.ptGhist--;
330 tHist.gHist--;
331 *(tHist.gHist) = (bv & 1) ? 1 : 0;
332 bv >>= 1;
333
334 // Update the folded histories with the new bit.
335 for (int i = 1; i <= nHistoryTables; i++) {
336 tHist.computeIndices[i].update(tHist.gHist);
337 tHist.computeTags[0][i].update(tHist.gHist);
338 tHist.computeTags[1][i].update(tHist.gHist);
339 }
340 }
341}
342
343void
345 BranchInfo* bi)
346{
347 // computes the table addresses and the partial tags
348 for (int i = 1; i <= nHistoryTables; i++) {
349 tableIndices[i] = gindex(tid, branch_pc, i);
350 bi->tableIndices[i] = tableIndices[i];
351 tableTags[i] = gtag(tid, branch_pc, i);
352 bi->tableTags[i] = tableTags[i];
353 }
354 bi->valid = true;
355}
356
357unsigned
359{
360 // There is only 1 counter on the base TAGE implementation
361 return 0;
362}
363
364bool
366 bool cond_branch, BranchInfo* bi)
367{
368 if (!cond_branch) {
369 // Unconditional branch, predict taken
370 assert(bi->branchPC == branch_pc);
371 return true;
372 }
373
374 // TAGE prediction
375
376 calculateIndicesAndTags(tid, branch_pc, bi);
377 bi->bimodalIndex = bindex(branch_pc);
378
379 bi->hitBank = 0;
380 bi->altBank = 0;
381 //Look for the bank with longest matching history
382 for (int i = nHistoryTables; i > 0; i--) {
383 if (noSkip[i] &&
384 gtable[i][tableIndices[i]].tag == tableTags[i]) {
385 bi->hitBank = i;
386 bi->hitBankIndex = tableIndices[bi->hitBank];
387 break;
388 }
389 }
390 //Look for the alternate bank
391 for (int i = bi->hitBank - 1; i > 0; i--) {
392 if (noSkip[i] &&
393 gtable[i][tableIndices[i]].tag == tableTags[i]) {
394 bi->altBank = i;
395 bi->altBankIndex = tableIndices[bi->altBank];
396 break;
397 }
398 }
399 //computes the prediction and the alternate prediction
400 if (bi->hitBank > 0) {
401 if (bi->altBank > 0) {
402 bi->altTaken =
403 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
405 } else {
406 bi->altTaken = getBimodePred(branch_pc, bi);
407 }
408
409 bi->longestMatchPred =
410 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
411 bi->pseudoNewAlloc =
412 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
413
414 //if the entry is recognized as a newly allocated entry and
415 //useAltPredForNewlyAllocated is positive use the alternate
416 //prediction
417 if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
418 || ! bi->pseudoNewAlloc) {
419 bi->tagePred = bi->longestMatchPred;
420 bi->provider = TAGE_LONGEST_MATCH;
421 } else {
422 bi->tagePred = bi->altTaken;
423 bi->provider = bi->altBank ? TAGE_ALT_MATCH
425 }
426 } else {
427 bi->altTaken = getBimodePred(branch_pc, bi);
428 bi->tagePred = bi->altTaken;
429 bi->longestMatchPred = bi->altTaken;
430 bi->provider = BIMODAL_ONLY;
431 }
432
433 DPRINTF(Tage, "Predict for %lx: tagePred:%d, altPred:%d\n",
434 branch_pc, bi->tagePred, bi->altTaken);
435
436 return bi->tagePred;
437}
438
439void
440TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
441{
442 // Nothing for this base class implementation
443}
444
445void
447 int nrand)
448{
449 if (alloc) {
450 // is there some "unuseful" entry to allocate
451 uint8_t min = 1;
452 for (int i = nHistoryTables; i > bi->hitBank; i--) {
453 if (gtable[i][bi->tableIndices[i]].u < min) {
454 min = gtable[i][bi->tableIndices[i]].u;
455 }
456 }
457
458 // we allocate an entry with a longer history
459 // to avoid ping-pong, we do not choose systematically the next
460 // entry, but among the 3 next entries
461 int Y = nrand &
462 ((1ULL << (nHistoryTables - bi->hitBank - 1)) - 1);
463 int X = bi->hitBank + 1;
464 if (Y & 1) {
465 X++;
466 if (Y & 2)
467 X++;
468 }
469 // No entry available, forces one to be available
470 if (min > 0) {
471 gtable[X][bi->tableIndices[X]].u = 0;
472 }
473
474
475 //Allocate entries
476 unsigned numAllocated = 0;
477 for (int i = X; i <= nHistoryTables; i++) {
478 if (gtable[i][bi->tableIndices[i]].u == 0) {
479 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
480 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
481 ++numAllocated;
482 if (numAllocated == maxNumAlloc) {
483 break;
484 }
485 }
486 }
487 }
488
489 tCounter++;
490
491 handleUReset();
492}
493
494void
496{
497 //periodic reset of u: reset is not complete but bit by bit
498 if ((tCounter & ((1ULL << logUResetPeriod) - 1)) == 0) {
499 // reset least significant bit
500 // most significant bit becomes least significant bit
501 for (int i = 1; i <= nHistoryTables; i++) {
502 for (int j = 0; j < (1ULL << logTagTableSizes[i]); j++) {
503 resetUctr(gtable[i][j].u);
504 }
505 }
506 }
507}
508
509void
511{
512 u >>= 1;
513}
514
515void
516TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
517 BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
518{
519 // TAGE UPDATE
520 // try to allocate a new entries only if prediction was wrong
521 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
522
523 if (preAdjustAlloc) {
524 adjustAlloc(alloc, taken, pred);
525 }
526
527 if (bi->hitBank > 0) {
528 // Manage the selection between longest matching and alternate
529 // matching for "pseudo"-newly allocated longest matching entry
530 bool PseudoNewAlloc = bi->pseudoNewAlloc;
531 // an entry is considered as newly allocated if its prediction
532 // counter is weak
533 if (PseudoNewAlloc) {
534 if (bi->longestMatchPred == taken) {
535 alloc = false;
536 }
537 // if it was delivering the correct prediction, no need to
538 // allocate new entry even if the overall prediction was false
539 if (bi->longestMatchPred != bi->altTaken) {
540 ctrUpdate(
542 bi->altTaken == taken, useAltOnNaBits);
543 }
544 }
545 }
546
547 if (!preAdjustAlloc) {
548 adjustAlloc(alloc, taken, pred);
549 }
550
551 handleAllocAndUReset(alloc, taken, bi, nrand);
552
553 handleTAGEUpdate(branch_pc, taken, bi);
554}
555
556void
558{
559 if (bi->hitBank > 0) {
560 DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
561 bi->hitBank, bi->hitBankIndex, branch_pc);
562 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
564 // if the provider entry is not certified to be useful also update
565 // the alternate prediction
566 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
567 if (bi->altBank > 0) {
568 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
570 DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
571 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
572 branch_pc);
573 }
574 if (bi->altBank == 0) {
575 baseUpdate(branch_pc, taken, bi);
576 }
577 }
578
579 // update the u counter
580 if (bi->tagePred != bi->altTaken) {
581 unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
582 bi->tagePred == taken, tagTableUBits);
583 }
584 } else {
585 baseUpdate(branch_pc, taken, bi);
586 }
587}
588
589void
591 Addr branch_pc, Addr target, BranchInfo* bi)
592{
593 ThreadHistory& tHist = threadHistory[tid];
594
595 // Update path history
596 tHist.pathHist = calcNewPathHist(tid, branch_pc, tHist.pathHist);
597
598 // For normal direction history update the history by
599 // whether the branch was taken or not.
600 bi->ghist = taken ? 1 : 0;
601 bi->nGhist = 1;
602 // Update the global history
603 updateGHist(tid, bi->ghist, bi->nGhist);
604}
605
606
607void
608TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool speculative,
609 bool taken, Addr target,
610 const StaticInstPtr & inst, BranchInfo* bi)
611{
612 if (speculative != speculativeHistUpdate) {
613 if (!speculative) {
614 // Save the speculative path history as non-speculative
615 threadHistory[tid].nonSpecPathHist
616 = calcNewPathHist(tid, branch_pc, bi->pathHist);
617 }
618 return;
619 }
620
621 // If this is the first time we see this branch record the current
622 // state of the history to be able to recover.
623 if (speculativeHistUpdate && (!bi->modified)) {
624 recordHistState(tid, bi);
625 }
626
627 // In case the branch already updated the history
628 // we need to revert the previous update first.
629 if (bi->modified) {
630 restoreHistState(tid, bi);
631 }
632
633 // Recalculate the tags and indices if needed. This can be the case
634 // as in the decoupled frontend branches can be inserted out of order
635 // (surprise branches). We can not compute the tags and indices
636 // at that point since the BPU might already speculated on other branches
637 // which updated the history. We can recalculate the tags and indices
638 // now since either the branch was correctly not taken and the history
639 // will not be updated or the branch was incorrect in which case the
640 // branches afterwards where squashed and the history was restored.
641 if (!bi->valid && bi->condBranch) {
642 calculateIndicesAndTags(tid, branch_pc, bi);
643 }
644
645 // We should have not already modified the history
646 // for this branch
647 assert(bi->nGhist == 0);
648
649 // Do the actual history update. Might be different for different
650 // TAGE implementations.
652 branch_pc, target, bi);
653 bi->modified = true;
654
655 DPRINTF(Tage, "Updating global histories with branch:%lx; taken?:%d, "
656 "path Hist: %x; pointer:%d\n", branch_pc, taken,
657 threadHistory[tid].pathHist, threadHistory[tid].ptGhist);
658 assert(threadHistory[tid].gHist ==
659 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
660}
661
662void
664{
665 ThreadHistory& tHist = threadHistory[tid];
666 bi->ptGhist = tHist.ptGhist;
667 bi->pathHist = tHist.pathHist;
668
669 for (int i = 1; i <= nHistoryTables; i++) {
670 bi->ci[i] = tHist.computeIndices[i].comp;
671 bi->ct0[i] = tHist.computeTags[0][i].comp;
672 bi->ct1[i] = tHist.computeTags[1][i].comp;
673 }
674}
675
676void
678{
680 return;
681 }
682 if (!bi->modified) {
683 return;
684 }
685
686 ThreadHistory& tHist = threadHistory[tid];
687 tHist.pathHist = bi->pathHist;
688
689 if (bi->nGhist == 0)
690 return;
691
692 // RESTORE HISTORIES
693 // Shift out the inserted bits
694 // from the folded history and the global history vector
695 for (int n = 0; n < bi->nGhist; n++) {
696
697 // First revert the folded history
698 for (int i = 1; i <= nHistoryTables; i++) {
699 tHist.computeIndices[i].restore(tHist.gHist);
700 tHist.computeTags[0][i].restore(tHist.gHist);
701 tHist.computeTags[1][i].restore(tHist.gHist);
702 }
703 tHist.ptGhist++;
704 tHist.gHist++;
705 }
706 bi->nGhist = 0;
707 bi->modified = false;
708}
709
710int
711TAGEBase::calcNewPathHist(ThreadID tid, Addr pc, int cur_phist) const
712{
713 int pathbit = ((pc >> instShiftAmt) & 1);
714 cur_phist = (cur_phist << 1) + pathbit;
715 cur_phist = (cur_phist & ((1ULL << pathHistBits) - 1));
716 return cur_phist;
717}
718
719void
720TAGEBase::squash(ThreadID tid, bool taken, Addr target,
722{
723 updateHistories(tid, bi->branchPC, true, taken, target, inst, bi);
724}
725
726void
728{
729 // do nothing. This is only used in some derived classes
730 return;
731}
732
733void
735{
736 if (taken == bi->tagePred) {
737 // correct prediction
738 switch (bi->provider) {
739 case BIMODAL_ONLY: stats.bimodalProviderCorrect++; break;
740 case TAGE_LONGEST_MATCH: stats.longestMatchProviderCorrect++; break;
742 stats.bimodalAltMatchProviderCorrect++;
743 break;
744 case TAGE_ALT_MATCH: stats.altMatchProviderCorrect++; break;
745 }
746 } else {
747 // wrong prediction
748 switch (bi->provider) {
749 case BIMODAL_ONLY: stats.bimodalProviderWrong++; break;
751 stats.longestMatchProviderWrong++;
752 if (bi->altTaken == taken) {
753 stats.altMatchProviderWouldHaveHit++;
754 }
755 break;
757 stats.bimodalAltMatchProviderWrong++;
758 break;
759 case TAGE_ALT_MATCH:
760 stats.altMatchProviderWrong++;
761 break;
762 }
763
764 switch (bi->provider) {
766 case TAGE_ALT_MATCH:
767 if (bi->longestMatchPred == taken) {
768 stats.longestMatchProviderWouldHaveHit++;
769 }
770 }
771 }
772
773 switch (bi->provider) {
775 case TAGE_ALT_MATCH:
776 stats.longestMatchProvider[bi->hitBank]++;
777 stats.altMatchProvider[bi->altBank]++;
778 break;
779 }
780}
781
782unsigned
784{
785 unsigned val = 0;
786 int gh_ptr = threadHistory[tid].ptGhist;
787 for (unsigned i = 0; i < 16; i++) {
788 // Make sure we don't go out of bounds
789 assert(&(threadHistory[tid].globalHistory[gh_ptr + i]) <
790 threadHistory[tid].globalHistory + histBufferSize);
791 val |= ((threadHistory[tid].globalHistory[gh_ptr + i] & 0x1) << i);
792 }
793 return val;
794}
795
796
798 statistics::Group *parent, unsigned nHistoryTables)
799 : statistics::Group(parent),
801 "Number of times TAGE Longest Match is the provider and the "
802 "prediction is correct"),
804 "Number of times TAGE Alt Match is the provider and the "
805 "prediction is correct"),
807 "Number of times TAGE Alt Match is the bimodal and it is the "
808 "provider and the prediction is correct"),
810 "Number of times there are no hits on the TAGE tables and the "
811 "bimodal prediction is correct"),
813 "Number of times TAGE Longest Match is the provider and the "
814 "prediction is wrong"),
816 "Number of times TAGE Alt Match is the provider and the "
817 "prediction is wrong"),
819 "Number of times TAGE Alt Match is the bimodal and it is the "
820 "provider and the prediction is wrong"),
822 "Number of times there are no hits on the TAGE tables and the "
823 "bimodal prediction is wrong"),
825 "Number of times TAGE Longest Match is the provider, the "
826 "prediction is wrong and Alt Match prediction was correct"),
828 "Number of times TAGE Alt Match is the provider, the "
829 "prediction is wrong and Longest Match prediction was correct"),
831 "TAGE provider for longest match"),
833 "TAGE provider for alt match")
834{
837}
838
839int8_t
840TAGEBase::getCtr(int hitBank, int hitBankIndex) const
841{
842 return gtable[hitBank][hitBankIndex].ctr;
843}
844
845unsigned
847{
848 return tagTableCounterBits;
849}
850
851int
852TAGEBase::getPathHist(ThreadID tid, bool speculative) const
853{
854 return speculative ? threadHistory[tid].pathHist
855 : threadHistory[tid].nonSpecPathHist;
856}
857
858bool
863
864size_t
866 size_t bits = 0;
867 for (int i = 1; i <= nHistoryTables; i++) {
868 bits += (1 << logTagTableSizes[i]) *
870 }
871 uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
873 bits += bimodalTableSize;
874 bits += (bimodalTableSize >> logRatioBiModalHystEntries);
878 return bits;
879}
880
881} // namespace branch_prediction
882} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
int getPathHist(ThreadID tid, bool speculative=true) const
Definition tage_base.cc:852
TAGEBase(const TAGEBaseParams &p)
Definition tage_base.cc:52
virtual int branchTypeExtra(const StaticInstPtr &inst)
This function acts as a hook for other TAGE implementations to adjust the branch type.
Definition tage_base.hh:357
virtual BranchInfo * makeBranchInfo(Addr pc, bool conditional)
Definition tage_base.cc:83
virtual void handleAllocAndUReset(bool alloc, bool taken, BranchInfo *bi, int nrand)
Handles Allocation and U bits reset on an update.
Definition tage_base.cc:446
virtual void initFoldedHistories(ThreadHistory &history)
Initialization of the folded histories.
Definition tage_base.cc:153
static void ctrUpdate(T &ctr, bool taken, int nbits)
Updates a direction counter based on the actual branch outcome.
Definition tage_base.cc:244
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition tage_base.cc:88
virtual unsigned getUseAltIdx(BranchInfo *bi, Addr branch_pc)
Calculation of the index for useAltPredForNewlyAllocated On this base TAGE implementation it is alway...
Definition tage_base.cc:358
virtual void resetUctr(uint8_t &u)
Algorithm for resetting a single U counter.
Definition tage_base.cc:510
virtual void extraAltCalc(BranchInfo *bi)
Extra steps for calculating altTaken For this base TAGE class it does nothing.
Definition tage_base.cc:727
int calcNewPathHist(ThreadID tid, Addr pc, int cur_phist) const
Definition tage_base.cc:711
static void unsignedCtrUpdate(uint8_t &ctr, bool up, unsigned nbits)
Updates an unsigned counter based on up/down parameter.
Definition tage_base.cc:262
virtual void handleUReset()
Handles the U bits reset.
Definition tage_base.cc:495
virtual void updateStats(bool taken, BranchInfo *bi)
Update the stats.
Definition tage_base.cc:734
virtual void calculateParameters()
Calculates the history lengths and some other paramters in derived classes.
Definition tage_base.cc:176
const unsigned logRatioBiModalHystEntries
Definition tage_base.hh:484
std::vector< bool > btableHysteresis
Definition tage_base.hh:497
std::vector< bool > btablePrediction
Definition tage_base.hh:496
virtual void adjustAlloc(bool &alloc, bool taken, bool pred_taken)
Extra calculation to tell whether TAGE allocaitons may happen or not on an update For this base TAGE ...
Definition tage_base.cc:440
void recordHistState(ThreadID tid, BranchInfo *bi)
Records the current state of the histories to be able to restore it in case of a mispredicted specula...
Definition tage_base.cc:663
std::vector< ThreadHistory > threadHistory
Definition tage_base.hh:526
virtual void calculateIndicesAndTags(ThreadID tid, Addr branch_pc, BranchInfo *bi)
On a prediction, calculates the TAGE indices and tags for all the different history lengths.
Definition tage_base.cc:344
virtual bool getBimodePred(Addr pc, BranchInfo *bi) const
Get a branch prediction from the bimodal predictor.
Definition tage_base.cc:276
int8_t getCtr(int hitBank, int hitBankIndex) const
Definition tage_base.cc:840
virtual void squash(ThreadID tid, bool taken, Addr target, const StaticInstPtr &inst, BranchInfo *bi)
Restores speculatively updated path and direction histories.
Definition tage_base.cc:720
virtual uint16_t gtag(ThreadID tid, Addr pc, int bank) const
Computes the partial tag of a tagged table.
Definition tage_base.cc:231
std::vector< int > logTagTableSizes
Definition tage_base.hh:494
void baseUpdate(Addr pc, bool taken, BranchInfo *bi)
Updates the bimodal predictor.
Definition tage_base.cc:285
virtual int F(int phist, int size, int bank) const
Utility function to shuffle the path history depending on which tagged table we are accessing.
Definition tage_base.cc:196
bool tagePredict(ThreadID tid, Addr branch_pc, bool cond_branch, BranchInfo *bi)
TAGE prediction called from TAGE::predict.
Definition tage_base.cc:365
std::vector< int8_t > useAltPredForNewlyAllocated
Definition tage_base.hh:537
unsigned getGHR(ThreadID tid) const
Definition tage_base.cc:783
virtual void updatePathAndGlobalHistory(ThreadID tid, int brtype, bool taken, Addr branch_pc, Addr target, BranchInfo *bi)
Does the actual update of path and global history.
Definition tage_base.cc:590
virtual void updateHistories(ThreadID tid, Addr branch_pc, bool speculative, bool taken, Addr target, const StaticInstPtr &inst, BranchInfo *bi)
(Speculatively) updates global histories (path and direction).
Definition tage_base.cc:608
std::vector< unsigned > tagTableTagWidths
Definition tage_base.hh:493
gem5::branch_prediction::TAGEBase::TAGEBaseStats stats
virtual void condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc=false)
Update TAGE for conditional branches.
Definition tage_base.cc:516
virtual void buildTageTables()
Instantiates the TAGE table entries.
Definition tage_base.cc:168
void updateGHist(ThreadID tid, uint64_t bv, uint8_t n)
Internal history update function.
Definition tage_base.cc:305
virtual int gindex(ThreadID tid, Addr pc, int bank) const
Computes the index used to access a partially tagged table.
Definition tage_base.cc:213
virtual void handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo *bi)
Handles the update of the TAGE entries.
Definition tage_base.cc:557
virtual int bindex(Addr pc_in) const
Computes the index used to access the bimodal table.
Definition tage_base.cc:190
void restoreHistState(ThreadID tid, BranchInfo *bi)
Restore the state of the histories in case of detecting a mispredicted speculative update.
Definition tage_base.cc:677
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
SimObject(const Params &p)
Definition sim_object.cc:58
Bitfield< 31 > n
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23 > up
Definition types.hh:124
Bitfield< 22 > u
Bitfield< 4 > pc
Bitfield< 30, 0 > index
Bitfield< 0 > p
Bitfield< 20, 16 > bi
Definition types.hh:80
Bitfield< 27, 24 > pred
Definition types.hh:91
Bitfield< 15, 0 > X
Definition int.hh:58
Bitfield< 63 > val
Definition misc.hh:804
Units for Stats.
Definition units.hh:113
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
RefCountingPtr< StaticInst > StaticInstPtr
void init(int original_length, int compressed_length)
Definition tage_base.hh:100
TAGEBaseStats(statistics::Group *parent, unsigned nHistoryTables)
Definition tage_base.cc:797

Generated on Mon May 26 2025 09:19:08 for gem5 by doxygen 1.13.2