gem5  v20.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tage_base.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 The University of Wisconsin
3  *
4  * Copyright (c) 2006 INRIA (Institut National de Recherche en
5  * Informatique et en Automatique / French National Research Institute
6  * for Computer Science and Applied Mathematics)
7  *
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met: redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer;
14  * redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in the
16  * documentation and/or other materials provided with the distribution;
17  * neither the name of the copyright holders nor the names of its
18  * contributors may be used to endorse or promote products derived from
19  * this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* @file
35  * Implementation of a TAGE branch predictor
36  */
37 
38 #include "cpu/pred/tage_base.hh"
39 
40 #include "base/intmath.hh"
41 #include "base/logging.hh"
42 #include "debug/Fetch.hh"
43 #include "debug/Tage.hh"
44 
45 TAGEBase::TAGEBase(const TAGEBaseParams *p)
46  : SimObject(p),
47  logRatioBiModalHystEntries(p->logRatioBiModalHystEntries),
48  nHistoryTables(p->nHistoryTables),
49  tagTableCounterBits(p->tagTableCounterBits),
50  tagTableUBits(p->tagTableUBits),
51  histBufferSize(p->histBufferSize),
52  minHist(p->minHist),
53  maxHist(p->maxHist),
54  pathHistBits(p->pathHistBits),
55  tagTableTagWidths(p->tagTableTagWidths),
56  logTagTableSizes(p->logTagTableSizes),
57  threadHistory(p->numThreads),
58  logUResetPeriod(p->logUResetPeriod),
59  initialTCounterValue(p->initialTCounterValue),
60  numUseAltOnNa(p->numUseAltOnNa),
61  useAltOnNaBits(p->useAltOnNaBits),
62  maxNumAlloc(p->maxNumAlloc),
63  noSkip(p->noSkip),
64  speculativeHistUpdate(p->speculativeHistUpdate),
65  instShiftAmt(p->instShiftAmt),
66  initialized(false)
67 {
68  if (noSkip.empty()) {
69  // Set all the table to enabled by default
70  noSkip.resize(nHistoryTables + 1, true);
71  }
72 }
73 
76  return new BranchInfo(*this);
77 }
78 
79 void
81 {
82  if (initialized) {
83  return;
84  }
85  // Current method for periodically resetting the u counter bits only
86  // works for 1 or 2 bits
87  // Also make sure that it is not 0
88  assert(tagTableUBits <= 2 && (tagTableUBits > 0));
89 
90  // we use int type for the path history, so it cannot be more than
91  // its size
92  assert(pathHistBits <= (sizeof(int)*8));
93 
94  // initialize the counter to half of the period
95  assert(logUResetPeriod != 0);
97 
98  assert(histBufferSize > maxHist * 2);
99 
101 
102  for (auto& history : threadHistory) {
103  history.pathHist = 0;
104  history.globalHistory = new uint8_t[histBufferSize];
105  history.gHist = history.globalHistory;
106  memset(history.gHist, 0, histBufferSize);
107  history.ptGhist = 0;
108  }
109 
110  histLengths = new int [nHistoryTables+1];
111 
113 
114  assert(tagTableTagWidths.size() == (nHistoryTables+1));
115  assert(logTagTableSizes.size() == (nHistoryTables+1));
116 
117  // First entry is for the Bimodal table and it is untagged in this
118  // implementation
119  assert(tagTableTagWidths[0] == 0);
120 
121  for (auto& history : threadHistory) {
122  history.computeIndices = new FoldedHistory[nHistoryTables+1];
123  history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
124  history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
125 
126  initFoldedHistories(history);
127  }
128 
129  const uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
130  btablePrediction.resize(bimodalTableSize, false);
131  btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
132  true);
133 
134  gtable = new TageEntry*[nHistoryTables + 1];
135  buildTageTables();
136 
137  tableIndices = new int [nHistoryTables+1];
138  tableTags = new int [nHistoryTables+1];
139  initialized = true;
140 }
141 
142 void
144 {
145  for (int i = 1; i <= nHistoryTables; i++) {
146  history.computeIndices[i].init(
148  history.computeTags[0][i].init(
150  history.computeTags[1][i].init(
151  history.computeIndices[i].origLength, tagTableTagWidths[i]-1);
152  DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
154  }
155 }
156 
157 void
159 {
160  for (int i = 1; i <= nHistoryTables; i++) {
161  gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
162  }
163 }
164 
165 void
167 {
168  histLengths[1] = minHist;
170 
171  for (int i = 2; i <= nHistoryTables; i++) {
172  histLengths[i] = (int) (((double) minHist *
173  pow ((double) (maxHist) / (double) minHist,
174  (double) (i - 1) / (double) ((nHistoryTables- 1))))
175  + 0.5);
176  }
177 }
178 
179 void
181 {
182  if (speculativeHistUpdate) {
183  ThreadHistory& tHist = threadHistory[tid];
184  DPRINTF(Tage, "BTB miss resets prediction: %lx\n", branch_pc);
185  assert(tHist.gHist == &tHist.globalHistory[tHist.ptGhist]);
186  tHist.gHist[0] = 0;
187  for (int i = 1; i <= nHistoryTables; i++) {
188  tHist.computeIndices[i].comp = bi->ci[i];
189  tHist.computeTags[0][i].comp = bi->ct0[i];
190  tHist.computeTags[1][i].comp = bi->ct1[i];
191  tHist.computeIndices[i].update(tHist.gHist);
192  tHist.computeTags[0][i].update(tHist.gHist);
193  tHist.computeTags[1][i].update(tHist.gHist);
194  }
195  }
196 }
197 
198 int
199 TAGEBase::bindex(Addr pc_in) const
200 {
201  return ((pc_in >> instShiftAmt) & ((ULL(1) << (logTagTableSizes[0])) - 1));
202 }
203 
204 int
205 TAGEBase::F(int A, int size, int bank) const
206 {
207  int A1, A2;
208 
209  A = A & ((ULL(1) << size) - 1);
210  A1 = (A & ((ULL(1) << logTagTableSizes[bank]) - 1));
211  A2 = (A >> logTagTableSizes[bank]);
212  A2 = ((A2 << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
213  + (A2 >> (logTagTableSizes[bank] - bank));
214  A = A1 ^ A2;
215  A = ((A << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
216  + (A >> (logTagTableSizes[bank] - bank));
217  return (A);
218 }
219 
220 // gindex computes a full hash of pc, ghist and pathHist
221 int
222 TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
223 {
224  int index;
225  int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
226  histLengths[bank];
227  const unsigned int shiftedPc = pc >> instShiftAmt;
228  index =
229  shiftedPc ^
230  (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
231  threadHistory[tid].computeIndices[bank].comp ^
232  F(threadHistory[tid].pathHist, hlen, bank);
233 
234  return (index & ((ULL(1) << (logTagTableSizes[bank])) - 1));
235 }
236 
237 
238 // Tag computation
239 uint16_t
240 TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
241 {
242  int tag = (pc >> instShiftAmt) ^
243  threadHistory[tid].computeTags[0][bank].comp ^
244  (threadHistory[tid].computeTags[1][bank].comp << 1);
245 
246  return (tag & ((ULL(1) << tagTableTagWidths[bank]) - 1));
247 }
248 
249 
250 // Up-down saturating counter
251 template<typename T>
252 void
253 TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
254 {
255  assert(nbits <= sizeof(T) << 3);
256  if (taken) {
257  if (ctr < ((1 << (nbits - 1)) - 1))
258  ctr++;
259  } else {
260  if (ctr > -(1 << (nbits - 1)))
261  ctr--;
262  }
263 }
264 
265 // int8_t and int versions of this function may be needed
266 template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
267 template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
268 
269 // Up-down unsigned saturating counter
270 void
271 TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
272 {
273  assert(nbits <= sizeof(uint8_t) << 3);
274  if (up) {
275  if (ctr < ((1 << nbits) - 1))
276  ctr++;
277  } else {
278  if (ctr)
279  ctr--;
280  }
281 }
282 
283 // Bimodal prediction
284 bool
286 {
287  return btablePrediction[bi->bimodalIndex];
288 }
289 
290 
291 // Update the bimodal predictor: a hysteresis bit is shared among N prediction
292 // bits (N = 2 ^ logRatioBiModalHystEntries)
293 void
295 {
296  int inter = (btablePrediction[bi->bimodalIndex] << 1)
298  if (taken) {
299  if (inter < 3)
300  inter++;
301  } else if (inter > 0) {
302  inter--;
303  }
304  const bool pred = inter >> 1;
305  const bool hyst = inter & 1;
306  btablePrediction[bi->bimodalIndex] = pred;
308  DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
309 }
310 
311 // shifting the global history: we manage the history in a big table in order
312 // to reduce simulation time
313 void
314 TAGEBase::updateGHist(uint8_t * &h, bool dir, uint8_t * tab, int &pt)
315 {
316  if (pt == 0) {
317  DPRINTF(Tage, "Rolling over the histories\n");
318  // Copy beginning of globalHistoryBuffer to end, such that
319  // the last maxHist outcomes are still reachable
320  // through pt[0 .. maxHist - 1].
321  for (int i = 0; i < maxHist; i++)
322  tab[histBufferSize - maxHist + i] = tab[i];
323  pt = histBufferSize - maxHist;
324  h = &tab[pt];
325  }
326  pt--;
327  h--;
328  h[0] = (dir) ? 1 : 0;
329 }
330 
331 void
333  BranchInfo* bi)
334 {
335  // computes the table addresses and the partial tags
336  for (int i = 1; i <= nHistoryTables; i++) {
337  tableIndices[i] = gindex(tid, branch_pc, i);
338  bi->tableIndices[i] = tableIndices[i];
339  tableTags[i] = gtag(tid, branch_pc, i);
340  bi->tableTags[i] = tableTags[i];
341  }
342 }
343 
344 unsigned
346 {
347  // There is only 1 counter on the base TAGE implementation
348  return 0;
349 }
350 
351 bool
353  bool cond_branch, BranchInfo* bi)
354 {
355  Addr pc = branch_pc;
356  bool pred_taken = true;
357 
358  if (cond_branch) {
359  // TAGE prediction
360 
361  calculateIndicesAndTags(tid, pc, bi);
362 
363  bi->bimodalIndex = bindex(pc);
364 
365  bi->hitBank = 0;
366  bi->altBank = 0;
367  //Look for the bank with longest matching history
368  for (int i = nHistoryTables; i > 0; i--) {
369  if (noSkip[i] &&
370  gtable[i][tableIndices[i]].tag == tableTags[i]) {
371  bi->hitBank = i;
372  bi->hitBankIndex = tableIndices[bi->hitBank];
373  break;
374  }
375  }
376  //Look for the alternate bank
377  for (int i = bi->hitBank - 1; i > 0; i--) {
378  if (noSkip[i] &&
379  gtable[i][tableIndices[i]].tag == tableTags[i]) {
380  bi->altBank = i;
381  bi->altBankIndex = tableIndices[bi->altBank];
382  break;
383  }
384  }
385  //computes the prediction and the alternate prediction
386  if (bi->hitBank > 0) {
387  if (bi->altBank > 0) {
388  bi->altTaken =
389  gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
390  extraAltCalc(bi);
391  }else {
392  bi->altTaken = getBimodePred(pc, bi);
393  }
394 
395  bi->longestMatchPred =
396  gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
397  bi->pseudoNewAlloc =
398  abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
399 
400  //if the entry is recognized as a newly allocated entry and
401  //useAltPredForNewlyAllocated is positive use the alternate
402  //prediction
403  if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
404  || ! bi->pseudoNewAlloc) {
405  bi->tagePred = bi->longestMatchPred;
407  } else {
408  bi->tagePred = bi->altTaken;
409  bi->provider = bi->altBank ? TAGE_ALT_MATCH
411  }
412  } else {
413  bi->altTaken = getBimodePred(pc, bi);
414  bi->tagePred = bi->altTaken;
415  bi->longestMatchPred = bi->altTaken;
416  bi->provider = BIMODAL_ONLY;
417  }
418  //end TAGE prediction
419 
420  pred_taken = (bi->tagePred);
421  DPRINTF(Tage, "Predict for %lx: taken?:%d, tagePred:%d, altPred:%d\n",
422  branch_pc, pred_taken, bi->tagePred, bi->altTaken);
423  }
424  bi->branchPC = branch_pc;
425  bi->condBranch = cond_branch;
426  return pred_taken;
427 }
428 
429 void
430 TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
431 {
432  // Nothing for this base class implementation
433 }
434 
435 void
437  int nrand)
438 {
439  if (alloc) {
440  // is there some "unuseful" entry to allocate
441  uint8_t min = 1;
442  for (int i = nHistoryTables; i > bi->hitBank; i--) {
443  if (gtable[i][bi->tableIndices[i]].u < min) {
444  min = gtable[i][bi->tableIndices[i]].u;
445  }
446  }
447 
448  // we allocate an entry with a longer history
449  // to avoid ping-pong, we do not choose systematically the next
450  // entry, but among the 3 next entries
451  int Y = nrand &
452  ((ULL(1) << (nHistoryTables - bi->hitBank - 1)) - 1);
453  int X = bi->hitBank + 1;
454  if (Y & 1) {
455  X++;
456  if (Y & 2)
457  X++;
458  }
459  // No entry available, forces one to be available
460  if (min > 0) {
461  gtable[X][bi->tableIndices[X]].u = 0;
462  }
463 
464 
465  //Allocate entries
466  unsigned numAllocated = 0;
467  for (int i = X; i <= nHistoryTables; i++) {
468  if ((gtable[i][bi->tableIndices[i]].u == 0)) {
469  gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
470  gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
471  ++numAllocated;
472  if (numAllocated == maxNumAlloc) {
473  break;
474  }
475  }
476  }
477  }
478 
479  tCounter++;
480 
481  handleUReset();
482 }
483 
484 void
486 {
487  //periodic reset of u: reset is not complete but bit by bit
488  if ((tCounter & ((ULL(1) << logUResetPeriod) - 1)) == 0) {
489  // reset least significant bit
490  // most significant bit becomes least significant bit
491  for (int i = 1; i <= nHistoryTables; i++) {
492  for (int j = 0; j < (ULL(1) << logTagTableSizes[i]); j++) {
493  resetUctr(gtable[i][j].u);
494  }
495  }
496  }
497 }
498 
499 void
501 {
502  u >>= 1;
503 }
504 
505 void
506 TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
507  BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
508 {
509  // TAGE UPDATE
510  // try to allocate a new entries only if prediction was wrong
511  bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
512 
513  if (preAdjustAlloc) {
514  adjustAlloc(alloc, taken, pred);
515  }
516 
517  if (bi->hitBank > 0) {
518  // Manage the selection between longest matching and alternate
519  // matching for "pseudo"-newly allocated longest matching entry
520  bool PseudoNewAlloc = bi->pseudoNewAlloc;
521  // an entry is considered as newly allocated if its prediction
522  // counter is weak
523  if (PseudoNewAlloc) {
524  if (bi->longestMatchPred == taken) {
525  alloc = false;
526  }
527  // if it was delivering the correct prediction, no need to
528  // allocate new entry even if the overall prediction was false
529  if (bi->longestMatchPred != bi->altTaken) {
530  ctrUpdate(
532  bi->altTaken == taken, useAltOnNaBits);
533  }
534  }
535  }
536 
537  if (!preAdjustAlloc) {
538  adjustAlloc(alloc, taken, pred);
539  }
540 
541  handleAllocAndUReset(alloc, taken, bi, nrand);
542 
543  handleTAGEUpdate(branch_pc, taken, bi);
544 }
545 
546 void
548 {
549  if (bi->hitBank > 0) {
550  DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
551  bi->hitBank, bi->hitBankIndex, branch_pc);
552  ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
554  // if the provider entry is not certified to be useful also update
555  // the alternate prediction
556  if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
557  if (bi->altBank > 0) {
558  ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
560  DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
561  " branch %lx\n", bi->hitBank, bi->hitBankIndex,
562  branch_pc);
563  }
564  if (bi->altBank == 0) {
565  baseUpdate(branch_pc, taken, bi);
566  }
567  }
568 
569  // update the u counter
570  if (bi->tagePred != bi->altTaken) {
572  bi->tagePred == taken, tagTableUBits);
573  }
574  } else {
575  baseUpdate(branch_pc, taken, bi);
576  }
577 }
578 
579 void
580 TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool taken,
581  BranchInfo* bi, bool speculative,
582  const StaticInstPtr &inst, Addr target)
583 {
584  if (speculative != speculativeHistUpdate) {
585  return;
586  }
587  ThreadHistory& tHist = threadHistory[tid];
588  // UPDATE HISTORIES
589  bool pathbit = ((branch_pc >> instShiftAmt) & 1);
590  //on a squash, return pointers to this and recompute indices.
591  //update user history
592  updateGHist(tHist.gHist, taken, tHist.globalHistory, tHist.ptGhist);
593  tHist.pathHist = (tHist.pathHist << 1) + pathbit;
594  tHist.pathHist = (tHist.pathHist & ((ULL(1) << pathHistBits) - 1));
595 
596  if (speculative) {
597  bi->ptGhist = tHist.ptGhist;
598  bi->pathHist = tHist.pathHist;
599  }
600 
601  //prepare next index and tag computations for user branchs
602  for (int i = 1; i <= nHistoryTables; i++)
603  {
604  if (speculative) {
605  bi->ci[i] = tHist.computeIndices[i].comp;
606  bi->ct0[i] = tHist.computeTags[0][i].comp;
607  bi->ct1[i] = tHist.computeTags[1][i].comp;
608  }
609  tHist.computeIndices[i].update(tHist.gHist);
610  tHist.computeTags[0][i].update(tHist.gHist);
611  tHist.computeTags[1][i].update(tHist.gHist);
612  }
613  DPRINTF(Tage, "Updating global histories with branch:%lx; taken?:%d, "
614  "path Hist: %x; pointer:%d\n", branch_pc, taken, tHist.pathHist,
615  tHist.ptGhist);
616  assert(threadHistory[tid].gHist ==
617  &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
618 }
619 
620 void
622  Addr target)
623 {
624  if (!speculativeHistUpdate) {
625  /* If there are no speculative updates, no actions are needed */
626  return;
627  }
628 
629  ThreadHistory& tHist = threadHistory[tid];
630  DPRINTF(Tage, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
631  "pointer:%d\n", bi->branchPC,taken, bi->pathHist, bi->ptGhist);
632  tHist.pathHist = bi->pathHist;
633  tHist.ptGhist = bi->ptGhist;
634  tHist.gHist = &(tHist.globalHistory[tHist.ptGhist]);
635  tHist.gHist[0] = (taken ? 1 : 0);
636  for (int i = 1; i <= nHistoryTables; i++) {
637  tHist.computeIndices[i].comp = bi->ci[i];
638  tHist.computeTags[0][i].comp = bi->ct0[i];
639  tHist.computeTags[1][i].comp = bi->ct1[i];
640  tHist.computeIndices[i].update(tHist.gHist);
641  tHist.computeTags[0][i].update(tHist.gHist);
642  tHist.computeTags[1][i].update(tHist.gHist);
643  }
644 }
645 
646 void
648 {
649  // do nothing. This is only used in some derived classes
650  return;
651 }
652 
653 void
655 {
656  if (taken == bi->tagePred) {
657  // correct prediction
658  switch (bi->provider) {
663  }
664  } else {
665  // wrong prediction
666  switch (bi->provider) {
667  case BIMODAL_ONLY: tageBimodalProviderWrong++; break;
668  case TAGE_LONGEST_MATCH:
670  if (bi->altTaken == taken) {
672  }
673  break;
674  case BIMODAL_ALT_MATCH:
676  break;
677  case TAGE_ALT_MATCH:
679  break;
680  }
681 
682  switch (bi->provider) {
683  case BIMODAL_ALT_MATCH:
684  case TAGE_ALT_MATCH:
685  if (bi->longestMatchPred == taken) {
687  }
688  }
689  }
690 
691  switch (bi->provider) {
692  case TAGE_LONGEST_MATCH:
693  case TAGE_ALT_MATCH:
696  break;
697  }
698 }
699 
700 unsigned
702 {
703  unsigned val = 0;
704  for (unsigned i = 0; i < 32; i++) {
705  // Make sure we don't go out of bounds
706  int gh_offset = bi->ptGhist + i;
707  assert(&(threadHistory[tid].globalHistory[gh_offset]) <
708  threadHistory[tid].globalHistory + histBufferSize);
709  val |= ((threadHistory[tid].globalHistory[gh_offset] & 0x1) << i);
710  }
711 
712  return val;
713 }
714 
715 void
717 {
719  .name(name() + ".tageLongestMatchProviderCorrect")
720  .desc("Number of times TAGE Longest Match is the provider and "
721  "the prediction is correct");
722 
724  .name(name() + ".tageAltMatchProviderCorrect")
725  .desc("Number of times TAGE Alt Match is the provider and "
726  "the prediction is correct");
727 
729  .name(name() + ".bimodalAltMatchProviderCorrect")
730  .desc("Number of times TAGE Alt Match is the bimodal and it is the "
731  "provider and the prediction is correct");
732 
734  .name(name() + ".tageBimodalProviderCorrect")
735  .desc("Number of times there are no hits on the TAGE tables "
736  "and the bimodal prediction is correct");
737 
739  .name(name() + ".tageLongestMatchProviderWrong")
740  .desc("Number of times TAGE Longest Match is the provider and "
741  "the prediction is wrong");
742 
744  .name(name() + ".tageAltMatchProviderWrong")
745  .desc("Number of times TAGE Alt Match is the provider and "
746  "the prediction is wrong");
747 
749  .name(name() + ".bimodalAltMatchProviderWrong")
750  .desc("Number of times TAGE Alt Match is the bimodal and it is the "
751  "provider and the prediction is wrong");
752 
754  .name(name() + ".tageBimodalProviderWrong")
755  .desc("Number of times there are no hits on the TAGE tables "
756  "and the bimodal prediction is wrong");
757 
759  .name(name() + ".tageAltMatchProviderWouldHaveHit")
760  .desc("Number of times TAGE Longest Match is the provider, "
761  "the prediction is wrong and Alt Match prediction was correct");
762 
764  .name(name() + ".tageLongestMatchProviderWouldHaveHit")
765  .desc("Number of times TAGE Alt Match is the provider, the "
766  "prediction is wrong and Longest Match prediction was correct");
767 
769  .init(nHistoryTables + 1)
770  .name(name() + ".tageLongestMatchProvider")
771  .desc("TAGE provider for longest match");
772 
774  .init(nHistoryTables + 1)
775  .name(name() + ".tageAltMatchProvider")
776  .desc("TAGE provider for alt match");
777 }
778 
779 int8_t
780 TAGEBase::getCtr(int hitBank, int hitBankIndex) const
781 {
782  return gtable[hitBank][hitBankIndex].ctr;
783 }
784 
785 unsigned
787 {
788  return tagTableCounterBits;
789 }
790 
791 int
793 {
794  return threadHistory[tid].pathHist;
795 }
796 
797 bool
799 {
800  return speculativeHistUpdate;
801 }
802 
803 size_t
805  size_t bits = 0;
806  for (int i = 1; i <= nHistoryTables; i++) {
807  bits += (1 << logTagTableSizes[i]) *
809  }
810  uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
811  bits += numUseAltOnNa * useAltOnNaBits;
812  bits += bimodalTableSize;
813  bits += (bimodalTableSize >> logRatioBiModalHystEntries);
814  bits += histLengths[nHistoryTables];
815  bits += pathHistBits;
816  bits += logUResetPeriod;
817  return bits;
818 }
819 
820 TAGEBase*
821 TAGEBaseParams::create()
822 {
823  return new TAGEBase(this);
824 }
std::vector< bool > btablePrediction
Definition: tage_base.hh:429
#define DPRINTF(x,...)
Definition: trace.hh:225
const unsigned tagTableUBits
Definition: tage_base.hh:420
virtual void handleAllocAndUReset(bool alloc, bool taken, BranchInfo *bi, int nrand)
Handles Allocation and U bits reset on an update.
Definition: tage_base.cc:436
Bitfield< 30, 0 > index
virtual void calculateParameters()
Calculates the history lengths and some other paramters in derived classes.
Definition: tage_base.cc:166
std::vector< int > logTagTableSizes
Definition: tage_base.hh:427
Bitfield< 7 > i
unsigned useAltOnNaBits
Definition: tage_base.hh:472
bool isSpeculativeUpdateEnabled() const
Definition: tage_base.cc:798
virtual void extraAltCalc(BranchInfo *bi)
Extra steps for calculating altTaken For this base TAGE class it does nothing.
Definition: tage_base.cc:647
static void unsignedCtrUpdate(uint8_t &ctr, bool up, unsigned nbits)
Updates an unsigned counter based on up/down parameter.
Definition: tage_base.cc:271
virtual void updateStats(bool taken, BranchInfo *bi)
Update the stats.
Definition: tage_base.cc:654
virtual void condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc=false)
Update TAGE for conditional branches.
Definition: tage_base.cc:506
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: tage_base.cc:80
unsigned getGHR(ThreadID tid, BranchInfo *bi) const
Definition: tage_base.cc:701
const unsigned pathHistBits
Definition: tage_base.hh:424
void baseUpdate(Addr pc, bool taken, BranchInfo *bi)
Updates the bimodal predictor.
Definition: tage_base.cc:294
Stats::Scalar tageLongestMatchProviderCorrect
Definition: tage_base.hh:487
static void ctrUpdate(T &ctr, bool taken, int nbits)
Updates a direction counter based on the actual branch outcome.
Definition: tage_base.cc:253
virtual unsigned getUseAltIdx(BranchInfo *bi, Addr branch_pc)
Calculation of the index for useAltPredForNewlyAllocated On this base TAGE implementation it is alway...
Definition: tage_base.cc:345
virtual void resetUctr(uint8_t &u)
Algorithm for resetting a single U counter.
Definition: tage_base.cc:500
uint64_t logUResetPeriod
Definition: tage_base.hh:469
TageEntry ** gtable
Definition: tage_base.hh:431
virtual uint16_t gtag(ThreadID tid, Addr pc, int bank) const
Computes the partial tag of a tagged table.
Definition: tage_base.cc:240
int * tableIndices
Definition: tage_base.hh:464
const unsigned maxHist
Definition: tage_base.hh:423
void regStats() override
Callback to set stat parameters.
Definition: tage_base.cc:716
Bitfield< 20, 16 > bi
Definition: types.hh:63
Stats::Scalar tageBimodalProviderCorrect
Definition: tage_base.hh:490
Stats::Scalar tageAltMatchProviderWrong
Definition: tage_base.hh:492
FoldedHistory * computeTags[2]
Definition: tage_base.hh:453
bool initialized
Definition: tage_base.hh:484
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1149
Stats::Scalar tageAltMatchProviderWouldHaveHit
Definition: tage_base.hh:495
virtual BranchInfo * makeBranchInfo()
Definition: tage_base.cc:75
Bitfield< 63 > val
Definition: misc.hh:769
Stats::Vector tageLongestMatchProvider
Definition: tage_base.hh:498
std::vector< bool > btableHysteresis
Definition: tage_base.hh:430
const unsigned nHistoryTables
Definition: tage_base.hh:418
Bitfield< 15, 0 > X
Definition: int.hh:53
const unsigned instShiftAmt
Definition: tage_base.hh:482
virtual int F(int phist, int size, int bank) const
Utility function to shuffle the path history depending on which tagged table we are accessing...
Definition: tage_base.cc:205
const int64_t initialTCounterValue
Definition: tage_base.hh:470
Bitfield< 4 > pc
unsigned numUseAltOnNa
Definition: tage_base.hh:471
virtual void squash(ThreadID tid, bool taken, BranchInfo *bi, Addr target)
Restores speculatively updated path and direction histories.
Definition: tage_base.cc:621
Bitfield< 22 > u
virtual int gindex(ThreadID tid, Addr pc, int bank) const
Computes the index used to access a partially tagged table.
Definition: tage_base.cc:222
Stats::Scalar bimodalAltMatchProviderWrong
Definition: tage_base.hh:493
Stats::Scalar bimodalAltMatchProviderCorrect
Definition: tage_base.hh:489
const unsigned histBufferSize
Definition: tage_base.hh:421
const unsigned minHist
Definition: tage_base.hh:422
const bool speculativeHistUpdate
Definition: tage_base.hh:480
virtual bool getBimodePred(Addr pc, BranchInfo *bi) const
Get a branch prediction from the bimodal predictor.
Definition: tage_base.cc:285
virtual void adjustAlloc(bool &alloc, bool taken, bool pred_taken)
Extra calculation to tell whether TAGE allocaitons may happen or not on an update For this base TAGE ...
Definition: tage_base.cc:430
Stats::Scalar tageLongestMatchProviderWouldHaveHit
Definition: tage_base.hh:496
int getPathHist(ThreadID tid) const
Definition: tage_base.cc:792
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
Stats::Scalar tageBimodalProviderWrong
Definition: tage_base.hh:494
void updateGHist(uint8_t *&h, bool dir, uint8_t *tab, int &PT)
(Speculatively) updates the global branch history.
Definition: tage_base.cc:314
#define ULL(N)
uint64_t constant
Definition: types.hh:48
void init(int original_length, int compressed_length)
Definition: tage_base.hh:93
bool tagePredict(ThreadID tid, Addr branch_pc, bool cond_branch, BranchInfo *bi)
TAGE prediction called from TAGE::predict.
Definition: tage_base.cc:352
void btbUpdate(ThreadID tid, Addr branch_addr, BranchInfo *&bi)
Definition: tage_base.cc:180
unsigned maxNumAlloc
Definition: tage_base.hh:473
Bitfield< 24 > j
std::vector< bool > noSkip
Definition: tage_base.hh:478
void update(uint8_t *h)
Definition: tage_base.hh:100
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
const unsigned logRatioBiModalHystEntries
Definition: tage_base.hh:417
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:225
Bitfield< 23 > up
Definition: types.hh:132
virtual const std::string name() const
Definition: sim_object.hh:129
virtual void buildTageTables()
Instantiates the TAGE table entries.
Definition: tage_base.cc:158
unsigned getTageCtrBits() const
Definition: tage_base.cc:786
std::vector< ThreadHistory > threadHistory
Definition: tage_base.hh:456
int * tableTags
Definition: tage_base.hh:465
Stats::Scalar tageAltMatchProviderCorrect
Definition: tage_base.hh:488
virtual int bindex(Addr pc_in) const
Computes the index used to access the bimodal table.
Definition: tage_base.cc:199
int64_t tCounter
Definition: tage_base.hh:468
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
Stats::Vector tageAltMatchProvider
Definition: tage_base.hh:499
TAGEBase(const TAGEBaseParams *p)
Definition: tage_base.cc:45
std::vector< int8_t > useAltPredForNewlyAllocated
Definition: tage_base.hh:467
virtual void handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo *bi)
Handles the update of the TAGE entries.
Definition: tage_base.cc:547
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:71
std::vector< unsigned > tagTableTagWidths
Definition: tage_base.hh:426
Stats::Scalar tageLongestMatchProviderWrong
Definition: tage_base.hh:491
FoldedHistory * computeIndices
Definition: tage_base.hh:452
virtual void calculateIndicesAndTags(ThreadID tid, Addr branch_pc, BranchInfo *bi)
On a prediction, calculates the TAGE indices and tags for all the different history lengths...
Definition: tage_base.cc:332
int * histLengths
Definition: tage_base.hh:463
virtual void initFoldedHistories(ThreadHistory &history)
Initialization of the folded histories.
Definition: tage_base.cc:143
Bitfield< 0 > p
virtual void updateHistories(ThreadID tid, Addr branch_pc, bool taken, BranchInfo *b, bool speculative, const StaticInstPtr &inst=StaticInst::nullStaticInstPtr, Addr target=MaxAddr)
(Speculatively) updates global histories (path and direction).
Definition: tage_base.cc:580
Abstract superclass for simulation objects.
Definition: sim_object.hh:93
int8_t getCtr(int hitBank, int hitBankIndex) const
Definition: tage_base.cc:780
const unsigned tagTableCounterBits
Definition: tage_base.hh:419
virtual void handleUReset()
Handles the U bits reset.
Definition: tage_base.cc:485
size_t getSizeInBits() const
Definition: tage_base.cc:804

Generated on Thu May 28 2020 16:21:31 for gem5 by doxygen 1.8.13