gem5 v24.0.0.0
Loading...
Searching...
No Matches
tage_base.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2014 The University of Wisconsin
3 *
4 * Copyright (c) 2006 INRIA (Institut National de Recherche en
5 * Informatique et en Automatique / French National Research Institute
6 * for Computer Science and Applied Mathematics)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met: redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer;
14 * redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution;
17 * neither the name of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/* @file
35 * Implementation of a TAGE branch predictor
36 */
37
38#include "cpu/pred/tage_base.hh"
39
40#include "base/intmath.hh"
41#include "base/logging.hh"
42#include "debug/Fetch.hh"
43#include "debug/Tage.hh"
44
45namespace gem5
46{
47
48namespace branch_prediction
49{
50
51TAGEBase::TAGEBase(const TAGEBaseParams &p)
52 : SimObject(p),
53 logRatioBiModalHystEntries(p.logRatioBiModalHystEntries),
54 nHistoryTables(p.nHistoryTables),
55 tagTableCounterBits(p.tagTableCounterBits),
56 tagTableUBits(p.tagTableUBits),
57 histBufferSize(p.histBufferSize),
58 minHist(p.minHist),
59 maxHist(p.maxHist),
60 pathHistBits(p.pathHistBits),
61 tagTableTagWidths(p.tagTableTagWidths),
62 logTagTableSizes(p.logTagTableSizes),
63 threadHistory(p.numThreads),
64 logUResetPeriod(p.logUResetPeriod),
65 initialTCounterValue(p.initialTCounterValue),
66 numUseAltOnNa(p.numUseAltOnNa),
67 useAltOnNaBits(p.useAltOnNaBits),
68 maxNumAlloc(p.maxNumAlloc),
69 noSkip(p.noSkip),
70 speculativeHistUpdate(p.speculativeHistUpdate),
71 instShiftAmt(p.instShiftAmt),
72 initialized(false),
73 stats(this, nHistoryTables)
74{
75 if (noSkip.empty()) {
76 // Set all the table to enabled by default
77 noSkip.resize(nHistoryTables + 1, true);
78 }
79}
80
83 return new BranchInfo(*this);
84}
85
86void
88{
89 if (initialized) {
90 return;
91 }
92
93 // Current method for periodically resetting the u counter bits only
94 // works for 1 or 2 bits
95 // Also make sure that it is not 0
96 assert(tagTableUBits <= 2 && (tagTableUBits > 0));
97
98 // we use int type for the path history, so it cannot be more than
99 // its size
100 assert(pathHistBits <= (sizeof(int)*8));
101
102 // initialize the counter to half of the period
103 assert(logUResetPeriod != 0);
105
106 assert(histBufferSize > maxHist * 2);
107
109
110 for (auto& history : threadHistory) {
111 history.pathHist = 0;
112 history.globalHistory = new uint8_t[histBufferSize];
113 history.gHist = history.globalHistory;
114 memset(history.gHist, 0, histBufferSize);
115 history.ptGhist = 0;
116 }
117
118 histLengths = new int [nHistoryTables+1];
119
121
122 assert(tagTableTagWidths.size() == (nHistoryTables+1));
123 assert(logTagTableSizes.size() == (nHistoryTables+1));
124
125 // First entry is for the Bimodal table and it is untagged in this
126 // implementation
127 assert(tagTableTagWidths[0] == 0);
128
129 for (auto& history : threadHistory) {
130 history.computeIndices = new FoldedHistory[nHistoryTables+1];
131 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
132 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
133
134 initFoldedHistories(history);
135 }
136
137 const uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
138 btablePrediction.resize(bimodalTableSize, false);
139 btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
140 true);
141
142 gtable = new TageEntry*[nHistoryTables + 1];
144
145 tableIndices = new int [nHistoryTables+1];
146 tableTags = new int [nHistoryTables+1];
147 initialized = true;
148}
149
150void
152{
153 for (int i = 1; i <= nHistoryTables; i++) {
154 history.computeIndices[i].init(
156 history.computeTags[0][i].init(
158 history.computeTags[1][i].init(
160 DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
162 }
163}
164
165void
167{
168 for (int i = 1; i <= nHistoryTables; i++) {
169 gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
170 }
171}
172
173void
175{
176 histLengths[1] = minHist;
178
179 for (int i = 2; i <= nHistoryTables; i++) {
180 histLengths[i] = (int) (((double) minHist *
181 pow ((double) (maxHist) / (double) minHist,
182 (double) (i - 1) / (double) ((nHistoryTables- 1))))
183 + 0.5);
184 }
185}
186
187void
189{
191 ThreadHistory& tHist = threadHistory[tid];
192 DPRINTF(Tage, "BTB miss resets prediction: %lx\n", branch_pc);
193 assert(tHist.gHist == &tHist.globalHistory[tHist.ptGhist]);
194 tHist.gHist[0] = 0;
195 for (int i = 1; i <= nHistoryTables; i++) {
196 tHist.computeIndices[i].comp = bi->ci[i];
197 tHist.computeTags[0][i].comp = bi->ct0[i];
198 tHist.computeTags[1][i].comp = bi->ct1[i];
199 tHist.computeIndices[i].update(tHist.gHist);
200 tHist.computeTags[0][i].update(tHist.gHist);
201 tHist.computeTags[1][i].update(tHist.gHist);
202 }
203 }
204}
205
206int
208{
209 return ((pc_in >> instShiftAmt) & ((1ULL << (logTagTableSizes[0])) - 1));
210}
211
212int
213TAGEBase::F(int A, int size, int bank) const
214{
215 int A1, A2;
216
217 A = A & ((1ULL << size) - 1);
218 A1 = (A & ((1ULL << logTagTableSizes[bank]) - 1));
219 A2 = (A >> logTagTableSizes[bank]);
220 A2 = ((A2 << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
221 + (A2 >> (logTagTableSizes[bank] - bank));
222 A = A1 ^ A2;
223 A = ((A << bank) & ((1ULL << logTagTableSizes[bank]) - 1))
224 + (A >> (logTagTableSizes[bank] - bank));
225 return (A);
226}
227
228// gindex computes a full hash of pc, ghist and pathHist
229int
230TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
231{
232 int index;
233 int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
234 histLengths[bank];
235 const unsigned int shiftedPc = pc >> instShiftAmt;
236 index =
237 shiftedPc ^
238 (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
239 threadHistory[tid].computeIndices[bank].comp ^
240 F(threadHistory[tid].pathHist, hlen, bank);
241
242 return (index & ((1ULL << (logTagTableSizes[bank])) - 1));
243}
244
245
246// Tag computation
247uint16_t
248TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
249{
250 int tag = (pc >> instShiftAmt) ^
251 threadHistory[tid].computeTags[0][bank].comp ^
252 (threadHistory[tid].computeTags[1][bank].comp << 1);
253
254 return (tag & ((1ULL << tagTableTagWidths[bank]) - 1));
255}
256
257
258// Up-down saturating counter
259template<typename T>
260void
261TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
262{
263 assert(nbits <= sizeof(T) << 3);
264 if (taken) {
265 if (ctr < ((1 << (nbits - 1)) - 1))
266 ctr++;
267 } else {
268 if (ctr > -(1 << (nbits - 1)))
269 ctr--;
270 }
271}
272
273// int8_t and int versions of this function may be needed
274template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
275template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
276
277// Up-down unsigned saturating counter
278void
279TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
280{
281 assert(nbits <= sizeof(uint8_t) << 3);
282 if (up) {
283 if (ctr < ((1 << nbits) - 1))
284 ctr++;
285 } else {
286 if (ctr)
287 ctr--;
288 }
289}
290
291// Bimodal prediction
292bool
294{
295 return btablePrediction[bi->bimodalIndex];
296}
297
298
299// Update the bimodal predictor: a hysteresis bit is shared among N prediction
300// bits (N = 2 ^ logRatioBiModalHystEntries)
301void
303{
304 int inter = (btablePrediction[bi->bimodalIndex] << 1)
306 if (taken) {
307 if (inter < 3)
308 inter++;
309 } else if (inter > 0) {
310 inter--;
311 }
312 const bool pred = inter >> 1;
313 const bool hyst = inter & 1;
314 btablePrediction[bi->bimodalIndex] = pred;
315 btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries] = hyst;
316 DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
317}
318
319// shifting the global history: we manage the history in a big table in order
320// to reduce simulation time
321void
322TAGEBase::updateGHist(uint8_t * &h, bool dir, uint8_t * tab, int &pt)
323{
324 if (pt == 0) {
325 DPRINTF(Tage, "Rolling over the histories\n");
326 // Copy beginning of globalHistoryBuffer to end, such that
327 // the last maxHist outcomes are still reachable
328 // through pt[0 .. maxHist - 1].
329 for (int i = 0; i < maxHist; i++)
330 tab[histBufferSize - maxHist + i] = tab[i];
332 h = &tab[pt];
333 }
334 pt--;
335 h--;
336 h[0] = (dir) ? 1 : 0;
337}
338
339void
341 BranchInfo* bi)
342{
343 // computes the table addresses and the partial tags
344 for (int i = 1; i <= nHistoryTables; i++) {
345 tableIndices[i] = gindex(tid, branch_pc, i);
346 bi->tableIndices[i] = tableIndices[i];
347 tableTags[i] = gtag(tid, branch_pc, i);
348 bi->tableTags[i] = tableTags[i];
349 }
350}
351
352unsigned
354{
355 // There is only 1 counter on the base TAGE implementation
356 return 0;
357}
358
359bool
361 bool cond_branch, BranchInfo* bi)
362{
363 Addr pc = branch_pc;
364 bool pred_taken = true;
365
366 if (cond_branch) {
367 // TAGE prediction
368
370
371 bi->bimodalIndex = bindex(pc);
372
373 bi->hitBank = 0;
374 bi->altBank = 0;
375 //Look for the bank with longest matching history
376 for (int i = nHistoryTables; i > 0; i--) {
377 if (noSkip[i] &&
378 gtable[i][tableIndices[i]].tag == tableTags[i]) {
379 bi->hitBank = i;
380 bi->hitBankIndex = tableIndices[bi->hitBank];
381 break;
382 }
383 }
384 //Look for the alternate bank
385 for (int i = bi->hitBank - 1; i > 0; i--) {
386 if (noSkip[i] &&
387 gtable[i][tableIndices[i]].tag == tableTags[i]) {
388 bi->altBank = i;
389 bi->altBankIndex = tableIndices[bi->altBank];
390 break;
391 }
392 }
393 //computes the prediction and the alternate prediction
394 if (bi->hitBank > 0) {
395 if (bi->altBank > 0) {
396 bi->altTaken =
397 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
399 }else {
400 bi->altTaken = getBimodePred(pc, bi);
401 }
402
403 bi->longestMatchPred =
404 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
405 bi->pseudoNewAlloc =
406 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
407
408 //if the entry is recognized as a newly allocated entry and
409 //useAltPredForNewlyAllocated is positive use the alternate
410 //prediction
411 if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
412 || ! bi->pseudoNewAlloc) {
413 bi->tagePred = bi->longestMatchPred;
414 bi->provider = TAGE_LONGEST_MATCH;
415 } else {
416 bi->tagePred = bi->altTaken;
417 bi->provider = bi->altBank ? TAGE_ALT_MATCH
419 }
420 } else {
421 bi->altTaken = getBimodePred(pc, bi);
422 bi->tagePred = bi->altTaken;
423 bi->longestMatchPred = bi->altTaken;
424 bi->provider = BIMODAL_ONLY;
425 }
426 //end TAGE prediction
427
428 pred_taken = (bi->tagePred);
429 DPRINTF(Tage, "Predict for %lx: taken?:%d, tagePred:%d, altPred:%d\n",
430 branch_pc, pred_taken, bi->tagePred, bi->altTaken);
431 }
432 bi->branchPC = branch_pc;
433 bi->condBranch = cond_branch;
434 return pred_taken;
435}
436
437void
438TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
439{
440 // Nothing for this base class implementation
441}
442
443void
445 int nrand)
446{
447 if (alloc) {
448 // is there some "unuseful" entry to allocate
449 uint8_t min = 1;
450 for (int i = nHistoryTables; i > bi->hitBank; i--) {
451 if (gtable[i][bi->tableIndices[i]].u < min) {
452 min = gtable[i][bi->tableIndices[i]].u;
453 }
454 }
455
456 // we allocate an entry with a longer history
457 // to avoid ping-pong, we do not choose systematically the next
458 // entry, but among the 3 next entries
459 int Y = nrand &
460 ((1ULL << (nHistoryTables - bi->hitBank - 1)) - 1);
461 int X = bi->hitBank + 1;
462 if (Y & 1) {
463 X++;
464 if (Y & 2)
465 X++;
466 }
467 // No entry available, forces one to be available
468 if (min > 0) {
469 gtable[X][bi->tableIndices[X]].u = 0;
470 }
471
472
473 //Allocate entries
474 unsigned numAllocated = 0;
475 for (int i = X; i <= nHistoryTables; i++) {
476 if (gtable[i][bi->tableIndices[i]].u == 0) {
477 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
478 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
479 ++numAllocated;
480 if (numAllocated == maxNumAlloc) {
481 break;
482 }
483 }
484 }
485 }
486
487 tCounter++;
488
489 handleUReset();
490}
491
492void
494{
495 //periodic reset of u: reset is not complete but bit by bit
496 if ((tCounter & ((1ULL << logUResetPeriod) - 1)) == 0) {
497 // reset least significant bit
498 // most significant bit becomes least significant bit
499 for (int i = 1; i <= nHistoryTables; i++) {
500 for (int j = 0; j < (1ULL << logTagTableSizes[i]); j++) {
501 resetUctr(gtable[i][j].u);
502 }
503 }
504 }
505}
506
507void
509{
510 u >>= 1;
511}
512
513void
514TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
515 BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
516{
517 // TAGE UPDATE
518 // try to allocate a new entries only if prediction was wrong
519 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
520
521 if (preAdjustAlloc) {
522 adjustAlloc(alloc, taken, pred);
523 }
524
525 if (bi->hitBank > 0) {
526 // Manage the selection between longest matching and alternate
527 // matching for "pseudo"-newly allocated longest matching entry
528 bool PseudoNewAlloc = bi->pseudoNewAlloc;
529 // an entry is considered as newly allocated if its prediction
530 // counter is weak
531 if (PseudoNewAlloc) {
532 if (bi->longestMatchPred == taken) {
533 alloc = false;
534 }
535 // if it was delivering the correct prediction, no need to
536 // allocate new entry even if the overall prediction was false
537 if (bi->longestMatchPred != bi->altTaken) {
538 ctrUpdate(
540 bi->altTaken == taken, useAltOnNaBits);
541 }
542 }
543 }
544
545 if (!preAdjustAlloc) {
546 adjustAlloc(alloc, taken, pred);
547 }
548
549 handleAllocAndUReset(alloc, taken, bi, nrand);
550
551 handleTAGEUpdate(branch_pc, taken, bi);
552}
553
554void
556{
557 if (bi->hitBank > 0) {
558 DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
559 bi->hitBank, bi->hitBankIndex, branch_pc);
560 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
562 // if the provider entry is not certified to be useful also update
563 // the alternate prediction
564 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
565 if (bi->altBank > 0) {
566 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
568 DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
569 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
570 branch_pc);
571 }
572 if (bi->altBank == 0) {
573 baseUpdate(branch_pc, taken, bi);
574 }
575 }
576
577 // update the u counter
578 if (bi->tagePred != bi->altTaken) {
579 unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
580 bi->tagePred == taken, tagTableUBits);
581 }
582 } else {
583 baseUpdate(branch_pc, taken, bi);
584 }
585}
586
587void
588TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool taken,
589 BranchInfo* bi, bool speculative,
590 const StaticInstPtr &inst, Addr target)
591{
592 if (speculative != speculativeHistUpdate) {
593 return;
594 }
595 ThreadHistory& tHist = threadHistory[tid];
596 // UPDATE HISTORIES
597 bool pathbit = ((branch_pc >> instShiftAmt) & 1);
598 //on a squash, return pointers to this and recompute indices.
599 //update user history
600 updateGHist(tHist.gHist, taken, tHist.globalHistory, tHist.ptGhist);
601 tHist.pathHist = (tHist.pathHist << 1) + pathbit;
602 tHist.pathHist = (tHist.pathHist & ((1ULL << pathHistBits) - 1));
603
604 if (speculative) {
605 bi->ptGhist = tHist.ptGhist;
606 bi->pathHist = tHist.pathHist;
607 }
608
609 //prepare next index and tag computations for user branchs
610 for (int i = 1; i <= nHistoryTables; i++)
611 {
612 if (speculative) {
613 bi->ci[i] = tHist.computeIndices[i].comp;
614 bi->ct0[i] = tHist.computeTags[0][i].comp;
615 bi->ct1[i] = tHist.computeTags[1][i].comp;
616 }
617 tHist.computeIndices[i].update(tHist.gHist);
618 tHist.computeTags[0][i].update(tHist.gHist);
619 tHist.computeTags[1][i].update(tHist.gHist);
620 }
621 DPRINTF(Tage, "Updating global histories with branch:%lx; taken?:%d, "
622 "path Hist: %x; pointer:%d\n", branch_pc, taken, tHist.pathHist,
623 tHist.ptGhist);
624 assert(threadHistory[tid].gHist ==
625 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
626}
627
628void
630 Addr target)
631{
633 /* If there are no speculative updates, no actions are needed */
634 return;
635 }
636
637 ThreadHistory& tHist = threadHistory[tid];
638 DPRINTF(Tage, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
639 "pointer:%d\n", bi->branchPC,taken, bi->pathHist, bi->ptGhist);
640 tHist.pathHist = bi->pathHist;
641 tHist.ptGhist = bi->ptGhist;
642 tHist.gHist = &(tHist.globalHistory[tHist.ptGhist]);
643 tHist.gHist[0] = (taken ? 1 : 0);
644 for (int i = 1; i <= nHistoryTables; i++) {
645 tHist.computeIndices[i].comp = bi->ci[i];
646 tHist.computeTags[0][i].comp = bi->ct0[i];
647 tHist.computeTags[1][i].comp = bi->ct1[i];
648 tHist.computeIndices[i].update(tHist.gHist);
649 tHist.computeTags[0][i].update(tHist.gHist);
650 tHist.computeTags[1][i].update(tHist.gHist);
651 }
652}
653
654void
656{
657 // do nothing. This is only used in some derived classes
658 return;
659}
660
661void
663{
664 if (taken == bi->tagePred) {
665 // correct prediction
666 switch (bi->provider) {
671 break;
673 }
674 } else {
675 // wrong prediction
676 switch (bi->provider) {
680 if (bi->altTaken == taken) {
682 }
683 break;
686 break;
687 case TAGE_ALT_MATCH:
689 break;
690 }
691
692 switch (bi->provider) {
694 case TAGE_ALT_MATCH:
695 if (bi->longestMatchPred == taken) {
697 }
698 }
699 }
700
701 switch (bi->provider) {
703 case TAGE_ALT_MATCH:
704 stats.longestMatchProvider[bi->hitBank]++;
705 stats.altMatchProvider[bi->altBank]++;
706 break;
707 }
708}
709
710unsigned
712{
713 unsigned val = 0;
714 for (unsigned i = 0; i < 32; i++) {
715 // Make sure we don't go out of bounds
716 int gh_offset = bi->ptGhist + i;
717 assert(&(threadHistory[tid].globalHistory[gh_offset]) <
718 threadHistory[tid].globalHistory + histBufferSize);
719 val |= ((threadHistory[tid].globalHistory[gh_offset] & 0x1) << i);
720 }
721
722 return val;
723}
724
726 statistics::Group *parent, unsigned nHistoryTables)
727 : statistics::Group(parent),
728 ADD_STAT(longestMatchProviderCorrect, statistics::units::Count::get(),
729 "Number of times TAGE Longest Match is the provider and the "
730 "prediction is correct"),
731 ADD_STAT(altMatchProviderCorrect, statistics::units::Count::get(),
732 "Number of times TAGE Alt Match is the provider and the "
733 "prediction is correct"),
734 ADD_STAT(bimodalAltMatchProviderCorrect, statistics::units::Count::get(),
735 "Number of times TAGE Alt Match is the bimodal and it is the "
736 "provider and the prediction is correct"),
737 ADD_STAT(bimodalProviderCorrect, statistics::units::Count::get(),
738 "Number of times there are no hits on the TAGE tables and the "
739 "bimodal prediction is correct"),
740 ADD_STAT(longestMatchProviderWrong, statistics::units::Count::get(),
741 "Number of times TAGE Longest Match is the provider and the "
742 "prediction is wrong"),
743 ADD_STAT(altMatchProviderWrong, statistics::units::Count::get(),
744 "Number of times TAGE Alt Match is the provider and the "
745 "prediction is wrong"),
746 ADD_STAT(bimodalAltMatchProviderWrong, statistics::units::Count::get(),
747 "Number of times TAGE Alt Match is the bimodal and it is the "
748 "provider and the prediction is wrong"),
749 ADD_STAT(bimodalProviderWrong, statistics::units::Count::get(),
750 "Number of times there are no hits on the TAGE tables and the "
751 "bimodal prediction is wrong"),
752 ADD_STAT(altMatchProviderWouldHaveHit, statistics::units::Count::get(),
753 "Number of times TAGE Longest Match is the provider, the "
754 "prediction is wrong and Alt Match prediction was correct"),
755 ADD_STAT(longestMatchProviderWouldHaveHit, statistics::units::Count::get(),
756 "Number of times TAGE Alt Match is the provider, the "
757 "prediction is wrong and Longest Match prediction was correct"),
758 ADD_STAT(longestMatchProvider, statistics::units::Count::get(),
759 "TAGE provider for longest match"),
760 ADD_STAT(altMatchProvider, statistics::units::Count::get(),
761 "TAGE provider for alt match")
762{
765}
766
767int8_t
768TAGEBase::getCtr(int hitBank, int hitBankIndex) const
769{
770 return gtable[hitBank][hitBankIndex].ctr;
771}
772
773unsigned
775{
776 return tagTableCounterBits;
777}
778
779int
781{
782 return threadHistory[tid].pathHist;
783}
784
785bool
790
791size_t
793 size_t bits = 0;
794 for (int i = 1; i <= nHistoryTables; i++) {
795 bits += (1 << logTagTableSizes[i]) *
797 }
798 uint64_t bimodalTableSize = 1ULL << logTagTableSizes[0];
800 bits += bimodalTableSize;
801 bits += (bimodalTableSize >> logRatioBiModalHystEntries);
805 return bits;
806}
807
808} // namespace branch_prediction
809} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
Abstract superclass for simulation objects.
TAGEBase(const TAGEBaseParams &p)
Definition tage_base.cc:51
virtual void updateHistories(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *b, bool speculative, const StaticInstPtr &inst=nullStaticInstPtr, Addr target=MaxAddr)
(Speculatively) updates global histories (path and direction).
Definition tage_base.cc:588
virtual void handleAllocAndUReset(bool alloc, bool taken, BranchInfo *bi, int nrand)
Handles Allocation and U bits reset on an update.
Definition tage_base.cc:444
virtual void initFoldedHistories(ThreadHistory &history)
Initialization of the folded histories.
Definition tage_base.cc:151
static void ctrUpdate(T &ctr, bool taken, int nbits)
Updates a direction counter based on the actual branch outcome.
Definition tage_base.cc:261
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition tage_base.cc:87
virtual unsigned getUseAltIdx(BranchInfo *bi, Addr branch_pc)
Calculation of the index for useAltPredForNewlyAllocated On this base TAGE implementation it is alway...
Definition tage_base.cc:353
virtual void resetUctr(uint8_t &u)
Algorithm for resetting a single U counter.
Definition tage_base.cc:508
virtual BranchInfo * makeBranchInfo()
Definition tage_base.cc:82
virtual void extraAltCalc(BranchInfo *bi)
Extra steps for calculating altTaken For this base TAGE class it does nothing.
Definition tage_base.cc:655
static void unsignedCtrUpdate(uint8_t &ctr, bool up, unsigned nbits)
Updates an unsigned counter based on up/down parameter.
Definition tage_base.cc:279
virtual void handleUReset()
Handles the U bits reset.
Definition tage_base.cc:493
virtual void updateStats(bool taken, BranchInfo *bi)
Update the stats.
Definition tage_base.cc:662
virtual void calculateParameters()
Calculates the history lengths and some other paramters in derived classes.
Definition tage_base.cc:174
const unsigned logRatioBiModalHystEntries
Definition tage_base.hh:424
std::vector< bool > btableHysteresis
Definition tage_base.hh:437
void updateGHist(uint8_t *&h, bool dir, uint8_t *tab, int &PT)
(Speculatively) updates the global branch history.
Definition tage_base.cc:322
unsigned getGHR(ThreadID tid, BranchInfo *bi) const
Definition tage_base.cc:711
std::vector< bool > btablePrediction
Definition tage_base.hh:436
void btbUpdate(ThreadID tid, Addr branch_addr, BranchInfo *&bi)
Definition tage_base.cc:188
virtual void adjustAlloc(bool &alloc, bool taken, bool pred_taken)
Extra calculation to tell whether TAGE allocaitons may happen or not on an update For this base TAGE ...
Definition tage_base.cc:438
std::vector< ThreadHistory > threadHistory
Definition tage_base.hh:464
virtual void calculateIndicesAndTags(ThreadID tid, Addr branch_pc, BranchInfo *bi)
On a prediction, calculates the TAGE indices and tags for all the different history lengths.
Definition tage_base.cc:340
virtual bool getBimodePred(Addr pc, BranchInfo *bi) const
Get a branch prediction from the bimodal predictor.
Definition tage_base.cc:293
int8_t getCtr(int hitBank, int hitBankIndex) const
Definition tage_base.cc:768
virtual uint16_t gtag(ThreadID tid, Addr pc, int bank) const
Computes the partial tag of a tagged table.
Definition tage_base.cc:248
std::vector< int > logTagTableSizes
Definition tage_base.hh:434
void baseUpdate(Addr pc, bool taken, BranchInfo *bi)
Updates the bimodal predictor.
Definition tage_base.cc:302
virtual void squash(ThreadID tid, bool taken, BranchInfo *bi, Addr target)
Restores speculatively updated path and direction histories.
Definition tage_base.cc:629
virtual int F(int phist, int size, int bank) const
Utility function to shuffle the path history depending on which tagged table we are accessing.
Definition tage_base.cc:213
bool tagePredict(ThreadID tid, Addr branch_pc, bool cond_branch, BranchInfo *bi)
TAGE prediction called from TAGE::predict.
Definition tage_base.cc:360
std::vector< int8_t > useAltPredForNewlyAllocated
Definition tage_base.hh:475
int getPathHist(ThreadID tid) const
Definition tage_base.cc:780
std::vector< unsigned > tagTableTagWidths
Definition tage_base.hh:433
gem5::branch_prediction::TAGEBase::TAGEBaseStats stats
virtual void condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc=false)
Update TAGE for conditional branches.
Definition tage_base.cc:514
virtual void buildTageTables()
Instantiates the TAGE table entries.
Definition tage_base.cc:166
virtual int gindex(ThreadID tid, Addr pc, int bank) const
Computes the index used to access a partially tagged table.
Definition tage_base.cc:230
virtual void handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo *bi)
Handles the update of the TAGE entries.
Definition tage_base.cc:555
virtual int bindex(Addr pc_in) const
Computes the index used to access the bimodal table.
Definition tage_base.cc:207
Statistics container.
Definition group.hh:93
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23 > up
Definition types.hh:124
Bitfield< 22 > u
Bitfield< 4 > pc
Bitfield< 30, 0 > index
Bitfield< 0 > p
Bitfield< 20, 16 > bi
Definition types.hh:80
Bitfield< 27, 24 > pred
Definition types.hh:90
Bitfield< 15, 0 > X
Definition int.hh:58
Bitfield< 63 > val
Definition misc.hh:804
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
Bitfield< 8 > pt
Definition x86_cpu.cc:127
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
void init(int original_length, int compressed_length)
Definition tage_base.hh:99
TAGEBaseStats(statistics::Group *parent, unsigned nHistoryTables)
Definition tage_base.cc:725

Generated on Tue Jun 18 2024 16:24:02 for gem5 by doxygen 1.11.0