gem5  v22.1.0.0
i8254xGBe.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* @file
30  * Device model for Intel's 8254x line of gigabit ethernet controllers.
31  * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
32  * fewest workarounds in the driver. It will probably work with most of the
33  * other MACs with slight modifications.
34  */
35 
36 #include "dev/net/i8254xGBe.hh"
37 
38 /*
39  * @todo really there are multiple dma engines.. we should implement them.
40  */
41 
42 #include <algorithm>
43 #include <memory>
44 
45 #include "base/inet.hh"
46 #include "base/trace.hh"
47 #include "debug/Drain.hh"
48 #include "debug/EthernetAll.hh"
49 #include "mem/packet.hh"
50 #include "mem/packet_access.hh"
51 #include "params/IGbE.hh"
52 #include "sim/stats.hh"
53 #include "sim/system.hh"
54 
55 namespace gem5
56 {
57 
58 using namespace igbreg;
59 using namespace networking;
60 
62  : EtherDevice(p), etherInt(NULL),
63  rxFifo(p.rx_fifo_size), txFifo(p.tx_fifo_size), inTick(false),
64  rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
65  pktOffset(0), fetchDelay(p.fetch_delay), wbDelay(p.wb_delay),
66  fetchCompDelay(p.fetch_comp_delay), wbCompDelay(p.wb_comp_delay),
67  rxWriteDelay(p.rx_write_delay), txReadDelay(p.tx_read_delay),
68  rdtrEvent([this]{ rdtrProcess(); }, name()),
69  radvEvent([this]{ radvProcess(); }, name()),
70  tadvEvent([this]{ tadvProcess(); }, name()),
71  tidvEvent([this]{ tidvProcess(); }, name()),
72  tickEvent([this]{ tick(); }, name()),
73  interEvent([this]{ delayIntEvent(); }, name()),
74  rxDescCache(this, name()+".RxDesc", p.rx_desc_cache_size),
75  txDescCache(this, name()+".TxDesc", p.tx_desc_cache_size),
76  lastInterrupt(0)
77 {
78  etherInt = new IGbEInt(name() + ".int", this);
79 
80  // Initialized internal registers per Intel documentation
81  // All registers intialized to 0 by per register constructor
82  regs.ctrl.fd(1);
83  regs.ctrl.lrst(1);
84  regs.ctrl.speed(2);
85  regs.ctrl.frcspd(1);
86  regs.sts.speed(3); // Say we're 1000Mbps
87  regs.sts.fd(1); // full duplex
88  regs.sts.lu(1); // link up
89  regs.eecd.fwe(1);
90  regs.eecd.ee_type(1);
91  regs.imr = 0;
92  regs.iam = 0;
93  regs.rxdctl.gran(1);
94  regs.rxdctl.wthresh(1);
95  regs.fcrth(1);
96  regs.tdwba = 0;
97  regs.rlpml = 0;
98  regs.sw_fw_sync = 0;
99 
100  regs.pba.rxa(0x30);
101  regs.pba.txa(0x10);
102 
103  eeOpBits = 0;
104  eeAddrBits = 0;
105  eeDataBits = 0;
106  eeOpcode = 0;
107 
108  // clear all 64 16 bit words of the eeprom
109  memset(&flash, 0, EEPROM_SIZE * 2);
110 
111  // Set the MAC address
112  memcpy(flash, p.hardware_address.bytes(), ETH_ADDR_LEN);
113  for (int x = 0; x < ETH_ADDR_LEN / 2; x++)
114  flash[x] = htobe(flash[x]);
115 
116  uint16_t csum = 0;
117  for (int x = 0; x < EEPROM_SIZE; x++)
118  csum += htobe(flash[x]);
119 
120 
121  // Magic happy checksum value
122  flash[EEPROM_SIZE - 1] = htobe((uint16_t)(EEPROM_CSUM - csum));
123 
124  // Store the MAC address as queue ID
125  macAddr = p.hardware_address;
126 
127  rxFifo.clear();
128  txFifo.clear();
129 }
130 
132 {
133  delete etherInt;
134 }
135 
136 void
138 {
139  PciDevice::init();
140 }
141 
142 Port &
143 IGbE::getPort(const std::string &if_name, PortID idx)
144 {
145  if (if_name == "interface")
146  return *etherInt;
147  return EtherDevice::getPort(if_name, idx);
148 }
149 
150 Tick
152 {
153  int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156  else
157  panic("Device specific PCI config space not implemented.\n");
158 
159  //
160  // Some work may need to be done here based for the pci COMMAND bits.
161  //
162 
163  return configDelay;
164 }
165 
166 // Handy macro for range-testing register access addresses
167 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168 
169 Tick
171 {
172  int bar;
173  Addr daddr;
174 
175  if (!getBAR(pkt->getAddr(), bar, daddr))
176  panic("Invalid PCI memory access to unmapped memory.\n");
177 
178  // Only Memory register BAR is allowed
179  assert(bar == 0);
180 
181  // Only 32bit accesses allowed
182  assert(pkt->getSize() == 4);
183 
184  DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185 
186  //
187  // Handle read of register here
188  //
189 
190 
191  switch (daddr) {
192  case REG_CTRL:
193  pkt->setLE<uint32_t>(regs.ctrl());
194  break;
195  case REG_STATUS:
196  pkt->setLE<uint32_t>(regs.sts());
197  break;
198  case REG_EECD:
199  pkt->setLE<uint32_t>(regs.eecd());
200  break;
201  case REG_EERD:
202  pkt->setLE<uint32_t>(regs.eerd());
203  break;
204  case REG_CTRL_EXT:
205  pkt->setLE<uint32_t>(regs.ctrl_ext());
206  break;
207  case REG_MDIC:
208  pkt->setLE<uint32_t>(regs.mdic());
209  break;
210  case REG_ICR:
211  DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212  regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213  pkt->setLE<uint32_t>(regs.icr());
214  if (regs.icr.int_assert() || regs.imr == 0) {
215  regs.icr = regs.icr() & ~mask(30);
216  DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217  }
218  if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219  regs.imr &= ~regs.iam;
220  chkInterrupt();
221  break;
222  case REG_EICR:
223  // This is only useful for MSI, but the driver reads it every time
224  // Just don't do anything
225  pkt->setLE<uint32_t>(0);
226  break;
227  case REG_ITR:
228  pkt->setLE<uint32_t>(regs.itr());
229  break;
230  case REG_RCTL:
231  pkt->setLE<uint32_t>(regs.rctl());
232  break;
233  case REG_FCTTV:
234  pkt->setLE<uint32_t>(regs.fcttv());
235  break;
236  case REG_TCTL:
237  pkt->setLE<uint32_t>(regs.tctl());
238  break;
239  case REG_PBA:
240  pkt->setLE<uint32_t>(regs.pba());
241  break;
242  case REG_WUC:
243  case REG_WUFC:
244  case REG_WUS:
246  pkt->setLE<uint32_t>(0); // We don't care, so just return 0
247  break;
248  case REG_FCRTL:
249  pkt->setLE<uint32_t>(regs.fcrtl());
250  break;
251  case REG_FCRTH:
252  pkt->setLE<uint32_t>(regs.fcrth());
253  break;
254  case REG_RDBAL:
255  pkt->setLE<uint32_t>(regs.rdba.rdbal());
256  break;
257  case REG_RDBAH:
258  pkt->setLE<uint32_t>(regs.rdba.rdbah());
259  break;
260  case REG_RDLEN:
261  pkt->setLE<uint32_t>(regs.rdlen());
262  break;
263  case REG_SRRCTL:
264  pkt->setLE<uint32_t>(regs.srrctl());
265  break;
266  case REG_RDH:
267  pkt->setLE<uint32_t>(regs.rdh());
268  break;
269  case REG_RDT:
270  pkt->setLE<uint32_t>(regs.rdt());
271  break;
272  case REG_RDTR:
273  pkt->setLE<uint32_t>(regs.rdtr());
274  if (regs.rdtr.fpd()) {
276  DPRINTF(EthernetIntr,
277  "Posting interrupt because of RDTR.FPD write\n");
279  regs.rdtr.fpd(0);
280  }
281  break;
282  case REG_RXDCTL:
283  pkt->setLE<uint32_t>(regs.rxdctl());
284  break;
285  case REG_RADV:
286  pkt->setLE<uint32_t>(regs.radv());
287  break;
288  case REG_TDBAL:
289  pkt->setLE<uint32_t>(regs.tdba.tdbal());
290  break;
291  case REG_TDBAH:
292  pkt->setLE<uint32_t>(regs.tdba.tdbah());
293  break;
294  case REG_TDLEN:
295  pkt->setLE<uint32_t>(regs.tdlen());
296  break;
297  case REG_TDH:
298  pkt->setLE<uint32_t>(regs.tdh());
299  break;
300  case REG_TXDCA_CTL:
301  pkt->setLE<uint32_t>(regs.txdca_ctl());
302  break;
303  case REG_TDT:
304  pkt->setLE<uint32_t>(regs.tdt());
305  break;
306  case REG_TIDV:
307  pkt->setLE<uint32_t>(regs.tidv());
308  break;
309  case REG_TXDCTL:
310  pkt->setLE<uint32_t>(regs.txdctl());
311  break;
312  case REG_TADV:
313  pkt->setLE<uint32_t>(regs.tadv());
314  break;
315  case REG_TDWBAL:
316  pkt->setLE<uint32_t>(regs.tdwba & mask(32));
317  break;
318  case REG_TDWBAH:
319  pkt->setLE<uint32_t>(regs.tdwba >> 32);
320  break;
321  case REG_RXCSUM:
322  pkt->setLE<uint32_t>(regs.rxcsum());
323  break;
324  case REG_RLPML:
325  pkt->setLE<uint32_t>(regs.rlpml);
326  break;
327  case REG_RFCTL:
328  pkt->setLE<uint32_t>(regs.rfctl());
329  break;
330  case REG_MANC:
331  pkt->setLE<uint32_t>(regs.manc());
332  break;
333  case REG_SWSM:
334  pkt->setLE<uint32_t>(regs.swsm());
335  regs.swsm.smbi(1);
336  break;
337  case REG_FWSM:
338  pkt->setLE<uint32_t>(regs.fwsm());
339  break;
340  case REG_SWFWSYNC:
341  pkt->setLE<uint32_t>(regs.sw_fw_sync);
342  break;
343  default:
344  if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE * 4) &&
345  !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE * 8) &&
346  !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE * 4) &&
348  panic("Read request to unknown register number: %#x\n", daddr);
349  else
350  pkt->setLE<uint32_t>(0);
351  };
352 
353  pkt->makeAtomicResponse();
354  return pioDelay;
355 }
356 
357 Tick
359 {
360  int bar;
361  Addr daddr;
362 
363 
364  if (!getBAR(pkt->getAddr(), bar, daddr))
365  panic("Invalid PCI memory access to unmapped memory.\n");
366 
367  // Only Memory register BAR is allowed
368  assert(bar == 0);
369 
370  // Only 32bit accesses allowed
371  assert(pkt->getSize() == sizeof(uint32_t));
372 
373  DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
374  daddr, pkt->getLE<uint32_t>());
375 
376  //
377  // Handle write of register here
378  //
379  uint32_t val = pkt->getLE<uint32_t>();
380 
381  Regs::RCTL oldrctl;
382  Regs::TCTL oldtctl;
383 
384  switch (daddr) {
385  case REG_CTRL:
386  regs.ctrl = val;
387  if (regs.ctrl.tfce())
388  warn("TX Flow control enabled, should implement\n");
389  if (regs.ctrl.rfce())
390  warn("RX Flow control enabled, should implement\n");
391  break;
392  case REG_CTRL_EXT:
393  regs.ctrl_ext = val;
394  break;
395  case REG_STATUS:
396  regs.sts = val;
397  break;
398  case REG_EECD:
399  int oldClk;
400  oldClk = regs.eecd.sk();
401  regs.eecd = val;
402  // See if this is a eeprom access and emulate accordingly
403  if (!oldClk && regs.eecd.sk()) {
404  if (eeOpBits < 8) {
405  eeOpcode = eeOpcode << 1 | regs.eecd.din();
406  eeOpBits++;
407  } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
408  eeAddr = eeAddr << 1 | regs.eecd.din();
409  eeAddrBits++;
410  } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
411  assert(eeAddr >> 1 < EEPROM_SIZE);
412  DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
413  flash[eeAddr >> 1] >> eeDataBits & 0x1,
414  flash[eeAddr >> 1]);
415  regs.eecd.dout(
416  (flash[eeAddr >> 1] >> (15 - eeDataBits)) & 0x1);
417  eeDataBits++;
418  } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
419  regs.eecd.dout(0);
420  eeDataBits++;
421  } else
422  panic("What's going on with eeprom interface? opcode:"
423  " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
424  (uint32_t)eeOpBits, (uint32_t)eeAddr,
425  (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
426 
427  // Reset everything for the next command
428  if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
430  eeOpBits = 0;
431  eeAddrBits = 0;
432  eeDataBits = 0;
433  eeOpcode = 0;
434  eeAddr = 0;
435  }
436 
437  DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
438  (uint32_t)eeOpcode, (uint32_t) eeOpBits,
439  (uint32_t)eeAddr >> 1, (uint32_t)eeAddrBits);
440  if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
442  panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
443  (uint32_t)eeOpBits);
444 
445 
446  }
447  // If driver requests eeprom access, immediately give it to it
448  regs.eecd.ee_gnt(regs.eecd.ee_req());
449  break;
450  case REG_EERD:
451  regs.eerd = val;
452  if (regs.eerd.start()) {
453  regs.eerd.done(1);
454  assert(regs.eerd.addr() < EEPROM_SIZE);
455  regs.eerd.data(flash[regs.eerd.addr()]);
456  regs.eerd.start(0);
457  DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
458  regs.eerd.addr(), regs.eerd.data());
459  }
460  break;
461  case REG_MDIC:
462  regs.mdic = val;
463  if (regs.mdic.i())
464  panic("No support for interrupt on mdic complete\n");
465  if (regs.mdic.phyadd() != 1)
466  panic("No support for reading anything but phy\n");
467  DPRINTF(Ethernet, "%s phy address %x\n",
468  regs.mdic.op() == 1 ? "Writing" : "Reading",
469  regs.mdic.regadd());
470  switch (regs.mdic.regadd()) {
471  case PHY_PSTATUS:
472  regs.mdic.data(0x796D); // link up
473  break;
474  case PHY_PID:
475  regs.mdic.data(params().phy_pid);
476  break;
477  case PHY_EPID:
478  regs.mdic.data(params().phy_epid);
479  break;
480  case PHY_GSTATUS:
481  regs.mdic.data(0x7C00);
482  break;
483  case PHY_EPSTATUS:
484  regs.mdic.data(0x3000);
485  break;
486  case PHY_AGC:
487  regs.mdic.data(0x180); // some random length
488  break;
489  default:
490  regs.mdic.data(0);
491  }
492  regs.mdic.r(1);
493  break;
494  case REG_ICR:
495  DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
496  regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
497  if (regs.ctrl_ext.iame())
498  regs.imr &= ~regs.iam;
499  regs.icr = ~bits(val, 30, 0) & regs.icr();
500  chkInterrupt();
501  break;
502  case REG_ITR:
503  regs.itr = val;
504  break;
505  case REG_ICS:
506  DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
508  break;
509  case REG_IMS:
510  regs.imr |= val;
511  chkInterrupt();
512  break;
513  case REG_IMC:
514  regs.imr &= ~val;
515  chkInterrupt();
516  break;
517  case REG_IAM:
518  regs.iam = val;
519  break;
520  case REG_RCTL:
521  oldrctl = regs.rctl;
522  regs.rctl = val;
523  if (regs.rctl.rst()) {
524  rxDescCache.reset();
525  DPRINTF(EthernetSM, "RXS: Got RESET!\n");
526  rxFifo.clear();
527  regs.rctl.rst(0);
528  }
529  if (regs.rctl.en())
530  rxTick = true;
531  restartClock();
532  break;
533  case REG_FCTTV:
534  regs.fcttv = val;
535  break;
536  case REG_TCTL:
537  regs.tctl = val;
538  oldtctl = regs.tctl;
539  regs.tctl = val;
540  if (regs.tctl.en())
541  txTick = true;
542  restartClock();
543  if (regs.tctl.en() && !oldtctl.en()) {
544  txDescCache.reset();
545  }
546  break;
547  case REG_PBA:
548  regs.pba.rxa(val);
549  regs.pba.txa(64 - regs.pba.rxa());
550  break;
551  case REG_WUC:
552  case REG_WUFC:
553  case REG_WUS:
554  case REG_LEDCTL:
555  case REG_FCAL:
556  case REG_FCAH:
557  case REG_FCT:
558  case REG_VET:
559  case REG_AIFS:
560  case REG_TIPG:
561  ; // We don't care, so don't store anything
562  break;
563  case REG_IVAR0:
564  warn("Writing to IVAR0, ignoring...\n");
565  break;
566  case REG_FCRTL:
567  regs.fcrtl = val;
568  break;
569  case REG_FCRTH:
570  regs.fcrth = val;
571  break;
572  case REG_RDBAL:
573  regs.rdba.rdbal(val & ~mask(4));
575  break;
576  case REG_RDBAH:
577  regs.rdba.rdbah(val);
579  break;
580  case REG_RDLEN:
581  regs.rdlen = val & ~mask(7);
583  break;
584  case REG_SRRCTL:
585  regs.srrctl = val;
586  break;
587  case REG_RDH:
588  regs.rdh = val;
590  break;
591  case REG_RDT:
592  regs.rdt = val;
593  DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
594  if (drainState() == DrainState::Running) {
595  DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
597  } else {
598  DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
599  }
600  break;
601  case REG_RDTR:
602  regs.rdtr = val;
603  break;
604  case REG_RADV:
605  regs.radv = val;
606  break;
607  case REG_RXDCTL:
608  regs.rxdctl = val;
609  break;
610  case REG_TDBAL:
611  regs.tdba.tdbal(val & ~mask(4));
613  break;
614  case REG_TDBAH:
615  regs.tdba.tdbah(val);
617  break;
618  case REG_TDLEN:
619  regs.tdlen = val & ~mask(7);
621  break;
622  case REG_TDH:
623  regs.tdh = val;
625  break;
626  case REG_TXDCA_CTL:
627  regs.txdca_ctl = val;
628  if (regs.txdca_ctl.enabled())
629  panic("No support for DCA\n");
630  break;
631  case REG_TDT:
632  regs.tdt = val;
633  DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
634  if (drainState() == DrainState::Running) {
635  DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
637  } else {
638  DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
639  }
640  break;
641  case REG_TIDV:
642  regs.tidv = val;
643  break;
644  case REG_TXDCTL:
645  regs.txdctl = val;
646  break;
647  case REG_TADV:
648  regs.tadv = val;
649  break;
650  case REG_TDWBAL:
651  regs.tdwba &= ~mask(32);
652  regs.tdwba |= val;
654  regs.tdwba & mask(1));
655  break;
656  case REG_TDWBAH:
657  regs.tdwba &= mask(32);
658  regs.tdwba |= (uint64_t)val << 32;
660  regs.tdwba & mask(1));
661  break;
662  case REG_RXCSUM:
663  regs.rxcsum = val;
664  break;
665  case REG_RLPML:
666  regs.rlpml = val;
667  break;
668  case REG_RFCTL:
669  regs.rfctl = val;
670  if (regs.rfctl.exsten())
671  panic("Extended RX descriptors not implemented\n");
672  break;
673  case REG_MANC:
674  regs.manc = val;
675  break;
676  case REG_SWSM:
677  regs.swsm = val;
678  if (regs.fwsm.eep_fw_semaphore())
679  regs.swsm.swesmbi(0);
680  break;
681  case REG_SWFWSYNC:
682  regs.sw_fw_sync = val;
683  break;
684  default:
685  if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE * 4) &&
686  !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE * 8) &&
687  !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE * 4))
688  panic("Write request to unknown register number: %#x\n", daddr);
689  };
690 
691  pkt->makeAtomicResponse();
692  return pioDelay;
693 }
694 
695 void
697 {
698  assert(t);
699 
700  // Interrupt is already pending
701  if (t & regs.icr() && !now)
702  return;
703 
704  regs.icr = regs.icr() | t;
705 
706  Tick itr_interval = sim_clock::as_int::ns * 256 * regs.itr.interval();
707  DPRINTF(EthernetIntr,
708  "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
709  curTick(), regs.itr.interval(), itr_interval);
710 
711  if (regs.itr.interval() == 0 || now ||
712  lastInterrupt + itr_interval <= curTick()) {
713  if (interEvent.scheduled()) {
715  }
716  cpuPostInt();
717  } else {
718  Tick int_time = lastInterrupt + itr_interval;
719  assert(int_time > 0);
720  DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
721  int_time);
722  if (!interEvent.scheduled()) {
723  schedule(interEvent, int_time);
724  }
725  }
726 }
727 
728 void
730 {
731  cpuPostInt();
732 }
733 
734 
735 void
737 {
738 
740 
741  if (!(regs.icr() & regs.imr)) {
742  DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
743  return;
744  }
745 
746  DPRINTF(Ethernet, "Posting Interrupt\n");
747 
748 
749  if (interEvent.scheduled()) {
751  }
752 
753  if (rdtrEvent.scheduled()) {
754  regs.icr.rxt0(1);
756  }
757  if (radvEvent.scheduled()) {
758  regs.icr.rxt0(1);
760  }
761  if (tadvEvent.scheduled()) {
762  regs.icr.txdw(1);
764  }
765  if (tidvEvent.scheduled()) {
766  regs.icr.txdw(1);
768  }
769 
770  regs.icr.int_assert(1);
771  DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
772  regs.icr());
773 
774  intrPost();
775 
777 }
778 
779 void
781 {
782  if (regs.icr.int_assert()) {
783  regs.icr.int_assert(0);
784  DPRINTF(EthernetIntr,
785  "EINT: Clearing interrupt to CPU now. Vector %#x\n",
786  regs.icr());
787  intrClear();
788  }
789 }
790 
791 void
793 {
794  DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
795  regs.imr);
796  // Check if we need to clear the cpu interrupt
797  if (!(regs.icr() & regs.imr)) {
798  DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
799  if (interEvent.scheduled())
801  if (regs.icr.int_assert())
802  cpuClearInt();
803  }
804  DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
805  regs.itr(), regs.itr.interval());
806 
807  if (regs.icr() & regs.imr) {
808  if (regs.itr.interval() == 0) {
809  cpuPostInt();
810  } else {
811  DPRINTF(Ethernet,
812  "Possibly scheduling interrupt because of imr write\n");
813  if (!interEvent.scheduled()) {
814  Tick t = curTick() +
815  sim_clock::as_int::ns * 256 * regs.itr.interval();
816  DPRINTF(Ethernet, "Scheduling for %d\n", t);
818  }
819  }
820  }
821 }
822 
823 
825 
826 template<class T>
827 IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
828  : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
829  wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
830  wbDelayEvent([this]{ writeback1(); }, n),
831  fetchDelayEvent([this]{ fetchDescriptors1(); }, n),
832  fetchEvent([this]{ fetchComplete(); }, n),
833  wbEvent([this]{ wbComplete(); }, n)
834 {
835  fetchBuf = new T[size];
836  wbBuf = new T[size];
837 }
838 
839 template<class T>
841 {
842  reset();
843  delete[] fetchBuf;
844  delete[] wbBuf;
845 }
846 
847 template<class T>
848 void
850 {
851  if (usedCache.size() > 0 || curFetching || wbOut)
852  panic("Descriptor Address, Length or Head changed. Bad\n");
853  reset();
854 
855 }
856 
857 template<class T>
858 void
860 {
861  int curHead = descHead();
862  int max_to_wb = usedCache.size();
863 
864  // Check if this writeback is less restrictive that the previous
865  // and if so setup another one immediately following it
866  if (wbOut) {
867  if (aMask < wbAlignment) {
868  moreToWb = true;
869  wbAlignment = aMask;
870  }
871  DPRINTF(EthernetDesc,
872  "Writing back already in process, returning\n");
873  return;
874  }
875 
876  moreToWb = false;
877  wbAlignment = aMask;
878 
879 
880  DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
881  "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
882  curHead, descTail(), descLen(), cachePnt, max_to_wb,
883  descLeft());
884 
885  if (max_to_wb + curHead >= descLen()) {
886  max_to_wb = descLen() - curHead;
887  moreToWb = true;
888  // this is by definition aligned correctly
889  } else if (wbAlignment != 0) {
890  // align the wb point to the mask
891  max_to_wb = max_to_wb & ~wbAlignment;
892  }
893 
894  DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
895 
896  if (max_to_wb <= 0)
897  return;
898 
899  wbOut = max_to_wb;
900 
901  assert(!wbDelayEvent.scheduled());
902  igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
903 }
904 
905 template<class T>
906 void
908 {
909  // If we're draining delay issuing this DMA
910  if (igbe->drainState() != DrainState::Running) {
911  igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
912  return;
913  }
914 
915  DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
916 
917  for (int x = 0; x < wbOut; x++) {
918  assert(usedCache.size());
919  memcpy(&wbBuf[x], usedCache[x], sizeof(T));
920  }
921 
922 
923  assert(wbOut);
924  igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
925  wbOut * sizeof(T), &wbEvent, (uint8_t *)wbBuf,
926  igbe->wbCompDelay);
927 }
928 
929 template<class T>
930 void
932 {
933  size_t max_to_fetch;
934 
935  if (curFetching) {
936  DPRINTF(EthernetDesc,
937  "Currently fetching %d descriptors, returning\n",
938  curFetching);
939  return;
940  }
941 
942  if (descTail() >= cachePnt)
943  max_to_fetch = descTail() - cachePnt;
944  else
945  max_to_fetch = descLen() - cachePnt;
946 
947  size_t free_cache = size - usedCache.size() - unusedCache.size();
948 
949  max_to_fetch = std::min(max_to_fetch, free_cache);
950 
951 
952  DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
953  "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
954  descHead(), descTail(), descLen(), cachePnt,
955  max_to_fetch, descLeft());
956 
957  // Nothing to do
958  if (max_to_fetch == 0)
959  return;
960 
961  // So we don't have two descriptor fetches going on at once
962  curFetching = max_to_fetch;
963 
964  assert(!fetchDelayEvent.scheduled());
965  igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
966 }
967 
968 template<class T>
969 void
971 {
972  // If we're draining delay issuing this DMA
973  if (igbe->drainState() != DrainState::Running) {
974  igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
975  return;
976  }
977 
978  DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
979  descBase() + cachePnt * sizeof(T),
980  pciToDma(descBase() + cachePnt * sizeof(T)),
981  curFetching * sizeof(T));
982  assert(curFetching);
983  igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
984  curFetching * sizeof(T), &fetchEvent, (uint8_t *)fetchBuf,
985  igbe->fetchCompDelay);
986 }
987 
988 template<class T>
989 void
991 {
992  T *newDesc;
993  for (int x = 0; x < curFetching; x++) {
994  newDesc = new T;
995  memcpy(newDesc, &fetchBuf[x], sizeof(T));
996  unusedCache.push_back(newDesc);
997  }
998 
999 
1000  int oldCp = cachePnt;
1001 
1002  cachePnt += curFetching;
1003  assert(cachePnt <= descLen());
1004  if (cachePnt == descLen())
1005  cachePnt = 0;
1006 
1007  curFetching = 0;
1008 
1009  DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1010  oldCp, cachePnt);
1011 
1012  enableSm();
1013  igbe->checkDrain();
1014 }
1015 
1016 template<class T>
1017 void
1019 {
1020 
1021  long curHead = descHead();
1022  long oldHead = curHead;
1023 
1024  for (int x = 0; x < wbOut; x++) {
1025  assert(usedCache.size());
1026  delete usedCache[0];
1027  usedCache.pop_front();
1028  }
1029 
1030  curHead += wbOut;
1031  wbOut = 0;
1032 
1033  if (curHead >= descLen())
1034  curHead -= descLen();
1035 
1036  // Update the head
1037  updateHead(curHead);
1038 
1039  DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1040  oldHead, curHead);
1041 
1042  // If we still have more to wb, call wb now
1043  actionAfterWb();
1044  if (moreToWb) {
1045  moreToWb = false;
1046  DPRINTF(EthernetDesc, "Writeback has more todo\n");
1047  writeback(wbAlignment);
1048  }
1049 
1050  if (!wbOut)
1051  igbe->checkDrain();
1052  fetchAfterWb();
1053 }
1054 
1055 template<class T>
1056 void
1058 {
1059  DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1060  for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1061  delete usedCache[x];
1062  for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1063  delete unusedCache[x];
1064 
1065  usedCache.clear();
1066  unusedCache.clear();
1067 
1068  cachePnt = 0;
1069 
1070 }
1071 
1072 template<class T>
1073 void
1075 {
1076  SERIALIZE_SCALAR(cachePnt);
1077  SERIALIZE_SCALAR(curFetching);
1078  SERIALIZE_SCALAR(wbOut);
1079  SERIALIZE_SCALAR(moreToWb);
1080  SERIALIZE_SCALAR(wbAlignment);
1081 
1082  typename CacheType::size_type usedCacheSize = usedCache.size();
1083  SERIALIZE_SCALAR(usedCacheSize);
1084  for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1085  arrayParamOut(cp, csprintf("usedCache_%d", x),
1086  (uint8_t *)usedCache[x], sizeof(T));
1087  }
1088 
1089  typename CacheType::size_type unusedCacheSize = unusedCache.size();
1090  SERIALIZE_SCALAR(unusedCacheSize);
1091  for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1092  arrayParamOut(cp, csprintf("unusedCache_%d", x),
1093  (uint8_t *)unusedCache[x], sizeof(T));
1094  }
1095 
1096  Tick fetch_delay = 0, wb_delay = 0;
1097  if (fetchDelayEvent.scheduled())
1098  fetch_delay = fetchDelayEvent.when();
1099  SERIALIZE_SCALAR(fetch_delay);
1100  if (wbDelayEvent.scheduled())
1101  wb_delay = wbDelayEvent.when();
1102  SERIALIZE_SCALAR(wb_delay);
1103 
1104 
1105 }
1106 
1107 template<class T>
1108 void
1110 {
1111  UNSERIALIZE_SCALAR(cachePnt);
1112  UNSERIALIZE_SCALAR(curFetching);
1113  UNSERIALIZE_SCALAR(wbOut);
1114  UNSERIALIZE_SCALAR(moreToWb);
1115  UNSERIALIZE_SCALAR(wbAlignment);
1116 
1117  typename CacheType::size_type usedCacheSize;
1118  UNSERIALIZE_SCALAR(usedCacheSize);
1119  T *temp;
1120  for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1121  temp = new T;
1122  arrayParamIn(cp, csprintf("usedCache_%d", x),
1123  (uint8_t *)temp, sizeof(T));
1124  usedCache.push_back(temp);
1125  }
1126 
1127  typename CacheType::size_type unusedCacheSize;
1128  UNSERIALIZE_SCALAR(unusedCacheSize);
1129  for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1130  temp = new T;
1131  arrayParamIn(cp, csprintf("unusedCache_%d", x),
1132  (uint8_t *)temp, sizeof(T));
1133  unusedCache.push_back(temp);
1134  }
1135  Tick fetch_delay = 0, wb_delay = 0;
1136  UNSERIALIZE_SCALAR(fetch_delay);
1137  UNSERIALIZE_SCALAR(wb_delay);
1138  if (fetch_delay)
1139  igbe->schedule(fetchDelayEvent, fetch_delay);
1140  if (wb_delay)
1141  igbe->schedule(wbDelayEvent, wb_delay);
1142 
1143 
1144 }
1145 
1147 
1148 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1149  : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1150  pktEvent([this]{ pktComplete(); }, n),
1151  pktHdrEvent([this]{ pktSplitDone(); }, n),
1152  pktDataEvent([this]{ pktSplitDone(); }, n)
1153 
1154 {
1155  annSmFetch = "RX Desc Fetch";
1156  annSmWb = "RX Desc Writeback";
1157  annUnusedDescQ = "RX Unused Descriptors";
1158  annUnusedCacheQ = "RX Unused Descriptor Cache";
1159  annUsedCacheQ = "RX Used Descriptor Cache";
1160  annUsedDescQ = "RX Used Descriptors";
1161  annDescQ = "RX Descriptors";
1162 }
1163 
1164 void
1166 {
1167  splitCount++;
1168  DPRINTF(EthernetDesc,
1169  "Part of split packet done: splitcount now %d\n", splitCount);
1170  assert(splitCount <= 2);
1171  if (splitCount != 2)
1172  return;
1173  splitCount = 0;
1174  DPRINTF(EthernetDesc,
1175  "Part of split packet done: calling pktComplete()\n");
1176  pktComplete();
1177 }
1178 
1179 int
1181 {
1182  assert(unusedCache.size());
1183  //if (!unusedCache.size())
1184  // return false;
1185 
1186  pktPtr = packet;
1187  pktDone = false;
1188  unsigned buf_len, hdr_len;
1189 
1190  RxDesc *desc = unusedCache.front();
1191  switch (igbe->regs.srrctl.desctype()) {
1192  case RXDT_LEGACY:
1193  assert(pkt_offset == 0);
1194  bytesCopied = packet->length;
1195  DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1196  packet->length, igbe->regs.rctl.descSize());
1197  assert(packet->length < igbe->regs.rctl.descSize());
1198  igbe->dmaWrite(pciToDma(desc->legacy.buf),
1199  packet->length, &pktEvent, packet->data,
1200  igbe->rxWriteDelay);
1201  break;
1202  case RXDT_ADV_ONEBUF:
1203  assert(pkt_offset == 0);
1204  bytesCopied = packet->length;
1205  buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1206  igbe->regs.rctl.descSize();
1207  DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1208  packet->length, igbe->regs.srrctl(), buf_len);
1209  assert(packet->length < buf_len);
1210  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1211  packet->length, &pktEvent, packet->data,
1212  igbe->rxWriteDelay);
1213  desc->adv_wb.header_len = htole(0);
1214  desc->adv_wb.sph = htole(0);
1215  desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1216  break;
1217  case RXDT_ADV_SPLIT_A:
1218  int split_point;
1219 
1220  buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1221  igbe->regs.rctl.descSize();
1222  hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1223  DPRINTF(EthernetDesc,
1224  "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1225  "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1226  igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1227  igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1228  desc->adv_read.pkt, buf_len);
1229 
1230  split_point = hsplit(pktPtr);
1231 
1232  if (packet->length <= hdr_len) {
1233  bytesCopied = packet->length;
1234  assert(pkt_offset == 0);
1235  DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1236  igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1237  packet->length, &pktEvent, packet->data,
1238  igbe->rxWriteDelay);
1239  desc->adv_wb.header_len = htole((uint16_t)packet->length);
1240  desc->adv_wb.sph = htole(0);
1241  desc->adv_wb.pkt_len = htole(0);
1242  } else if (split_point) {
1243  if (pkt_offset) {
1244  // we are only copying some data, header/data has already been
1245  // copied
1246  int max_to_copy =
1247  std::min(packet->length - pkt_offset, buf_len);
1248  bytesCopied += max_to_copy;
1249  DPRINTF(EthernetDesc,
1250  "Hdr split: Continuing data buffer copy\n");
1251  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1252  max_to_copy, &pktEvent,
1253  packet->data + pkt_offset, igbe->rxWriteDelay);
1254  desc->adv_wb.header_len = htole(0);
1255  desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1256  desc->adv_wb.sph = htole(0);
1257  } else {
1258  int max_to_copy =
1259  std::min(packet->length - split_point, buf_len);
1260  bytesCopied += max_to_copy + split_point;
1261 
1262  DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1263  split_point);
1264  igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1265  split_point, &pktHdrEvent,
1266  packet->data, igbe->rxWriteDelay);
1267  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1268  max_to_copy, &pktDataEvent,
1269  packet->data + split_point, igbe->rxWriteDelay);
1270  desc->adv_wb.header_len = htole(split_point);
1271  desc->adv_wb.sph = 1;
1272  desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1273  }
1274  } else {
1275  panic("Header split not fitting within header buffer or "
1276  "undecodable packet not fitting in header unsupported\n");
1277  }
1278  break;
1279  default:
1280  panic("Unimplemnted RX receive buffer type: %d\n",
1281  igbe->regs.srrctl.desctype());
1282  }
1283  return bytesCopied;
1284 
1285 }
1286 
1287 void
1289 {
1290  assert(unusedCache.size());
1291  RxDesc *desc;
1292  desc = unusedCache.front();
1293 
1294  uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1295  DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1296  "stripcrc offset: %d value written: %d %d\n",
1297  pktPtr->length, bytesCopied, crcfixup,
1298  htole((uint16_t)(pktPtr->length + crcfixup)),
1299  (uint16_t)(pktPtr->length + crcfixup));
1300 
1301  // no support for anything but starting at 0
1302  assert(igbe->regs.rxcsum.pcss() == 0);
1303 
1304  DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1305 
1306  uint16_t status = RXDS_DD;
1307  uint8_t err = 0;
1308  uint16_t ext_err = 0;
1309  uint16_t csum = 0;
1310  uint16_t ptype = 0;
1311  uint16_t ip_id = 0;
1312 
1313  assert(bytesCopied <= pktPtr->length);
1314  if (bytesCopied == pktPtr->length)
1315  status |= RXDS_EOP;
1316 
1317  IpPtr ip(pktPtr);
1318  Ip6Ptr ip6(pktPtr);
1319 
1320  if (ip || ip6) {
1321  if (ip) {
1322  DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1323  ip->id());
1324  ptype |= RXDP_IPV4;
1325  ip_id = ip->id();
1326  }
1327  if (ip6)
1328  ptype |= RXDP_IPV6;
1329 
1330  if (ip && igbe->regs.rxcsum.ipofld()) {
1331  DPRINTF(EthernetDesc, "Checking IP checksum\n");
1332  status |= RXDS_IPCS;
1333  csum = htole(cksum(ip));
1334  igbe->etherDeviceStats.rxIpChecksums++;
1335  if (cksum(ip) != 0) {
1336  err |= RXDE_IPE;
1337  ext_err |= RXDEE_IPE;
1338  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1339  }
1340  }
1341  TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1342  if (tcp && igbe->regs.rxcsum.tuofld()) {
1343  DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1344  status |= RXDS_TCPCS;
1345  ptype |= RXDP_TCP;
1346  csum = htole(cksum(tcp));
1347  igbe->etherDeviceStats.rxTcpChecksums++;
1348  if (cksum(tcp) != 0) {
1349  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1350  err |= RXDE_TCPE;
1351  ext_err |= RXDEE_TCPE;
1352  }
1353  }
1354 
1355  UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1356  if (udp && igbe->regs.rxcsum.tuofld()) {
1357  DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1358  status |= RXDS_UDPCS;
1359  ptype |= RXDP_UDP;
1360  csum = htole(cksum(udp));
1361  igbe->etherDeviceStats.rxUdpChecksums++;
1362  if (cksum(udp) != 0) {
1363  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1364  ext_err |= RXDEE_TCPE;
1365  err |= RXDE_TCPE;
1366  }
1367  }
1368  } else { // if ip
1369  DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1370  }
1371 
1372  switch (igbe->regs.srrctl.desctype()) {
1373  case RXDT_LEGACY:
1374  desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1375  desc->legacy.status = htole(status);
1376  desc->legacy.errors = htole(err);
1377  // No vlan support at this point... just set it to 0
1378  desc->legacy.vlan = 0;
1379  break;
1380  case RXDT_ADV_SPLIT_A:
1381  case RXDT_ADV_ONEBUF:
1382  desc->adv_wb.rss_type = htole(0);
1383  desc->adv_wb.pkt_type = htole(ptype);
1384  if (igbe->regs.rxcsum.pcsd()) {
1385  // no rss support right now
1386  desc->adv_wb.rss_hash = htole(0);
1387  } else {
1388  desc->adv_wb.id = htole(ip_id);
1389  desc->adv_wb.csum = htole(csum);
1390  }
1391  desc->adv_wb.status = htole(status);
1392  desc->adv_wb.errors = htole(ext_err);
1393  // no vlan support
1394  desc->adv_wb.vlan_tag = htole(0);
1395  break;
1396  default:
1397  panic("Unimplemnted RX receive buffer type %d\n",
1398  igbe->regs.srrctl.desctype());
1399  }
1400 
1401  DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1402  desc->adv_read.pkt, desc->adv_read.hdr);
1403 
1404  if (bytesCopied == pktPtr->length) {
1405  DPRINTF(EthernetDesc,
1406  "Packet completely written to descriptor buffers\n");
1407  // Deal with the rx timer interrupts
1408  if (igbe->regs.rdtr.delay()) {
1409  Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1410  DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1411  igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1412  }
1413 
1414  if (igbe->regs.radv.idv()) {
1415  Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1416  DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1417  if (!igbe->radvEvent.scheduled()) {
1418  igbe->schedule(igbe->radvEvent, curTick() + delay);
1419  }
1420  }
1421 
1422  // if neither radv or rdtr, maybe itr is set...
1423  if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1424  DPRINTF(EthernetSM,
1425  "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1426  igbe->postInterrupt(IT_RXT);
1427  }
1428 
1429  // If the packet is small enough, interrupt appropriately
1430  // I wonder if this is delayed or not?!
1431  if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1432  DPRINTF(EthernetSM,
1433  "RXS: Posting IT_SRPD beacuse small packet received\n");
1434  igbe->postInterrupt(IT_SRPD);
1435  }
1436  bytesCopied = 0;
1437  }
1438 
1439  pktPtr = NULL;
1440  igbe->checkDrain();
1441  enableSm();
1442  pktDone = true;
1443 
1444  DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1445  unusedCache.pop_front();
1446  usedCache.push_back(desc);
1447 }
1448 
1449 void
1451 {
1452  if (igbe->drainState() != DrainState::Draining) {
1453  igbe->rxTick = true;
1454  igbe->restartClock();
1455  }
1456 }
1457 
1458 bool
1460 {
1461  if (pktDone) {
1462  pktDone = false;
1463  return true;
1464  }
1465  return false;
1466 }
1467 
1468 bool
1470 {
1471  return pktEvent.scheduled() || wbEvent.scheduled() ||
1472  fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1473  pktDataEvent.scheduled();
1474 
1475 }
1476 
1477 void
1479 {
1481  SERIALIZE_SCALAR(pktDone);
1482  SERIALIZE_SCALAR(splitCount);
1483  SERIALIZE_SCALAR(bytesCopied);
1484 }
1485 
1486 void
1488 {
1490  UNSERIALIZE_SCALAR(pktDone);
1491  UNSERIALIZE_SCALAR(splitCount);
1492  UNSERIALIZE_SCALAR(bytesCopied);
1493 }
1494 
1495 
1497 
1498 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1499  : DescCache<TxDesc>(i, n, s), pktDone(false), isTcp(false),
1500  pktWaiting(false), pktMultiDesc(false),
1501  completionAddress(0), completionEnabled(false),
1502  useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1503  tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1504  tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1505  pktEvent([this]{ pktComplete(); }, n),
1506  headerEvent([this]{ headerComplete(); }, n),
1507  nullEvent([this]{ nullCallback(); }, n)
1508 {
1509  annSmFetch = "TX Desc Fetch";
1510  annSmWb = "TX Desc Writeback";
1511  annUnusedDescQ = "TX Unused Descriptors";
1512  annUnusedCacheQ = "TX Unused Descriptor Cache";
1513  annUsedCacheQ = "TX Used Descriptor Cache";
1514  annUsedDescQ = "TX Used Descriptors";
1515  annDescQ = "TX Descriptors";
1516 }
1517 
1518 void
1520 {
1521  assert(unusedCache.size());
1522  TxDesc *desc;
1523 
1524  DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1525 
1526  while (!useTso && unusedCache.size() &&
1527  txd_op::isContext(unusedCache.front())) {
1528  DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1529 
1530  desc = unusedCache.front();
1531  DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1532  desc->d1, desc->d2);
1533 
1534 
1535  // is this going to be a tcp or udp packet?
1536  isTcp = txd_op::tcp(desc) ? true : false;
1537 
1538  // setup all the TSO variables, they'll be ignored if we don't use
1539  // tso for this connection
1540  tsoHeaderLen = txd_op::hdrlen(desc);
1541  tsoMss = txd_op::mss(desc);
1542 
1543  if (txd_op::isType(desc, txd_op::TXD_CNXT) && txd_op::tse(desc)) {
1544  DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1545  "%d mss: %d paylen %d\n", txd_op::hdrlen(desc),
1546  txd_op::mss(desc), txd_op::getLen(desc));
1547  useTso = true;
1548  tsoTotalLen = txd_op::getLen(desc);
1549  tsoLoadedHeader = false;
1550  tsoDescBytesUsed = 0;
1551  tsoUsedLen = 0;
1552  tsoPrevSeq = 0;
1553  tsoPktHasHeader = false;
1554  tsoPkts = 0;
1555  tsoCopyBytes = 0;
1556  }
1557 
1558  txd_op::setDd(desc);
1559  unusedCache.pop_front();
1560  usedCache.push_back(desc);
1561  }
1562 
1563  if (!unusedCache.size())
1564  return;
1565 
1566  desc = unusedCache.front();
1567  if (!useTso && txd_op::isType(desc, txd_op::TXD_ADVDATA) &&
1568  txd_op::tse(desc)) {
1569  DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1570  "hdrlen: %d mss: %d paylen %d\n",
1571  tsoHeaderLen, tsoMss, txd_op::getTsoLen(desc));
1572  useTso = true;
1573  tsoTotalLen = txd_op::getTsoLen(desc);
1574  tsoLoadedHeader = false;
1575  tsoDescBytesUsed = 0;
1576  tsoUsedLen = 0;
1577  tsoPrevSeq = 0;
1578  tsoPktHasHeader = false;
1579  tsoPkts = 0;
1580  }
1581 
1582  if (useTso && !tsoLoadedHeader) {
1583  // we need to fetch a header
1584  DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1585  assert(txd_op::isData(desc) && txd_op::getLen(desc) >= tsoHeaderLen);
1586  pktWaiting = true;
1587  assert(tsoHeaderLen <= 256);
1588  igbe->dmaRead(pciToDma(txd_op::getBuf(desc)),
1589  tsoHeaderLen, &headerEvent, tsoHeader, 0);
1590  }
1591 }
1592 
1593 void
1595 {
1596  DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1597  pktWaiting = false;
1598 
1599  assert(unusedCache.size());
1600  TxDesc *desc = unusedCache.front();
1601  DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1602  txd_op::getLen(desc), tsoHeaderLen);
1603 
1604  if (txd_op::getLen(desc) == tsoHeaderLen) {
1605  tsoDescBytesUsed = 0;
1606  tsoLoadedHeader = true;
1607  unusedCache.pop_front();
1608  usedCache.push_back(desc);
1609  } else {
1610  DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1611  tsoDescBytesUsed = tsoHeaderLen;
1612  tsoLoadedHeader = true;
1613  }
1614  enableSm();
1615  igbe->checkDrain();
1616 }
1617 
1618 unsigned
1620 {
1621  if (!unusedCache.size())
1622  return 0;
1623 
1624  DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1625 
1626  assert(!useTso || tsoLoadedHeader);
1627  TxDesc *desc = unusedCache.front();
1628 
1629  if (useTso) {
1630  DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1631  "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1632  DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1633  "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1634  tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1635 
1636  if (tsoPktHasHeader)
1637  tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1638  txd_op::getLen(desc) - tsoDescBytesUsed);
1639  else
1640  tsoCopyBytes = std::min(tsoMss,
1641  txd_op::getLen(desc) - tsoDescBytesUsed);
1642  unsigned pkt_size =
1643  tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1644 
1645  DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1646  "this descLen: %d\n",
1647  tsoDescBytesUsed, tsoCopyBytes, txd_op::getLen(desc));
1648  DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1649  DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1650  return pkt_size;
1651  }
1652 
1653  DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1654  txd_op::getLen(unusedCache.front()));
1655  return txd_op::getLen(desc);
1656 }
1657 
1658 void
1660 {
1661  assert(unusedCache.size());
1662 
1663  TxDesc *desc;
1664  desc = unusedCache.front();
1665 
1666  DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1667  "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1668  assert((txd_op::isLegacy(desc) || txd_op::isData(desc)) &&
1669  txd_op::getLen(desc));
1670 
1671  pktPtr = p;
1672 
1673  pktWaiting = true;
1674 
1675  DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1676 
1677  if (useTso) {
1678  assert(tsoLoadedHeader);
1679  if (!tsoPktHasHeader) {
1680  DPRINTF(EthernetDesc,
1681  "Loading TSO header (%d bytes) into start of packet\n",
1682  tsoHeaderLen);
1683  memcpy(p->data, &tsoHeader, tsoHeaderLen);
1684  p->length +=tsoHeaderLen;
1685  tsoPktHasHeader = true;
1686  }
1687  }
1688 
1689  if (useTso) {
1690  DPRINTF(EthernetDesc,
1691  "Starting DMA of packet at offset %d length: %d\n",
1692  p->length, tsoCopyBytes);
1693  igbe->dmaRead(pciToDma(txd_op::getBuf(desc))
1694  + tsoDescBytesUsed,
1695  tsoCopyBytes, &pktEvent, p->data + p->length,
1696  igbe->txReadDelay);
1697  tsoDescBytesUsed += tsoCopyBytes;
1698  assert(tsoDescBytesUsed <= txd_op::getLen(desc));
1699  } else {
1700  igbe->dmaRead(pciToDma(txd_op::getBuf(desc)),
1701  txd_op::getLen(desc), &pktEvent, p->data + p->length,
1702  igbe->txReadDelay);
1703  }
1704 }
1705 
1706 void
1708 {
1709 
1710  TxDesc *desc;
1711  assert(unusedCache.size());
1712  assert(pktPtr);
1713 
1714  DPRINTF(EthernetDesc, "DMA of packet complete\n");
1715 
1716 
1717  desc = unusedCache.front();
1718  assert((txd_op::isLegacy(desc) || txd_op::isData(desc)) &&
1719  txd_op::getLen(desc));
1720 
1721  DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1722  desc->d1, desc->d2);
1723 
1724  // Set the length of the data in the EtherPacket
1725  if (useTso) {
1726  DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1727  "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1728  tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1729  pktPtr->simLength += tsoCopyBytes;
1730  pktPtr->length += tsoCopyBytes;
1731  tsoUsedLen += tsoCopyBytes;
1732  DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1733  tsoDescBytesUsed, tsoCopyBytes);
1734  } else {
1735  pktPtr->simLength += txd_op::getLen(desc);
1736  pktPtr->length += txd_op::getLen(desc);
1737  }
1738 
1739 
1740 
1741  if ((!txd_op::eop(desc) && !useTso) ||
1742  (pktPtr->length < (tsoMss + tsoHeaderLen) &&
1743  tsoTotalLen != tsoUsedLen && useTso)) {
1744  assert(!useTso || (tsoDescBytesUsed == txd_op::getLen(desc)));
1745  unusedCache.pop_front();
1746  usedCache.push_back(desc);
1747 
1748  tsoDescBytesUsed = 0;
1749  pktDone = true;
1750  pktWaiting = false;
1751  pktMultiDesc = true;
1752 
1753  DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1754  pktPtr->length);
1755  pktPtr = NULL;
1756 
1757  enableSm();
1758  igbe->checkDrain();
1759  return;
1760  }
1761 
1762 
1763  pktMultiDesc = false;
1764  // no support for vlans
1765  assert(!txd_op::vle(desc));
1766 
1767  // we only support single packet descriptors at this point
1768  if (!useTso)
1769  assert(txd_op::eop(desc));
1770 
1771  // set that this packet is done
1772  if (txd_op::rs(desc))
1773  txd_op::setDd(desc);
1774 
1775  DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1776  desc->d1, desc->d2);
1777 
1778  if (useTso) {
1779  IpPtr ip(pktPtr);
1780  Ip6Ptr ip6(pktPtr);
1781  if (ip) {
1782  DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1783  tsoPkts);
1784  ip->id(ip->id() + tsoPkts++);
1785  ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1786  }
1787  if (ip6)
1788  ip6->plen(pktPtr->length - EthPtr(pktPtr)->size());
1789  TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1790  if (tcp) {
1791  DPRINTF(EthernetDesc,
1792  "TSO: Modifying TCP header. old seq %d + %d\n",
1793  tcp->seq(), tsoPrevSeq);
1794  tcp->seq(tcp->seq() + tsoPrevSeq);
1795  if (tsoUsedLen != tsoTotalLen)
1796  tcp->flags(tcp->flags() & ~9); // clear fin & psh
1797  }
1798  UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1799  if (udp) {
1800  DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1801  udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1802  }
1803  tsoPrevSeq = tsoUsedLen;
1804  }
1805 
1806  if (debug::EthernetDesc) {
1807  IpPtr ip(pktPtr);
1808  if (ip)
1809  DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1810  ip->id());
1811  else
1812  DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1813  }
1814 
1815  // Checksums are only ofloaded for new descriptor types
1816  if (txd_op::isData(desc) && (txd_op::ixsm(desc) || txd_op::txsm(desc))) {
1817  DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1818  IpPtr ip(pktPtr);
1819  Ip6Ptr ip6(pktPtr);
1820  assert(ip || ip6);
1821  if (ip && txd_op::ixsm(desc)) {
1822  ip->sum(0);
1823  ip->sum(cksum(ip));
1824  igbe->etherDeviceStats.txIpChecksums++;
1825  DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1826  }
1827  if (txd_op::txsm(desc)) {
1828  TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1829  UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1830  if (tcp) {
1831  tcp->sum(0);
1832  tcp->sum(cksum(tcp));
1833  igbe->etherDeviceStats.txTcpChecksums++;
1834  DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1835  } else if (udp) {
1836  assert(udp);
1837  udp->sum(0);
1838  udp->sum(cksum(udp));
1839  igbe->etherDeviceStats.txUdpChecksums++;
1840  DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1841  } else {
1842  panic("Told to checksum, but don't know how\n");
1843  }
1844  }
1845  }
1846 
1847  if (txd_op::ide(desc)) {
1848  // Deal with the rx timer interrupts
1849  DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1850  if (igbe->regs.tidv.idv()) {
1851  Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1852  DPRINTF(EthernetDesc, "setting tidv\n");
1853  igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1854  }
1855 
1856  if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1857  Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1858  DPRINTF(EthernetDesc, "setting tadv\n");
1859  if (!igbe->tadvEvent.scheduled()) {
1860  igbe->schedule(igbe->tadvEvent, curTick() + delay);
1861  }
1862  }
1863  }
1864 
1865 
1866  if (!useTso || txd_op::getLen(desc) == tsoDescBytesUsed) {
1867  DPRINTF(EthernetDesc, "Descriptor Done\n");
1868  unusedCache.pop_front();
1869  usedCache.push_back(desc);
1870  tsoDescBytesUsed = 0;
1871  }
1872 
1873  if (useTso && tsoUsedLen == tsoTotalLen)
1874  useTso = false;
1875 
1876 
1877  DPRINTF(EthernetDesc,
1878  "------Packet of %d bytes ready for transmission-------\n",
1879  pktPtr->length);
1880  pktDone = true;
1881  pktWaiting = false;
1882  pktPtr = NULL;
1883  tsoPktHasHeader = false;
1884 
1885  if (igbe->regs.txdctl.wthresh() == 0) {
1886  DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1887  writeback(0);
1888  } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1889  descInBlock(usedCache.size())) {
1890  DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1891  writeback((igbe->cacheBlockSize()-1)>>4);
1892  } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1893  DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1894  writeback((igbe->cacheBlockSize()-1)>>4);
1895  }
1896 
1897  enableSm();
1898  igbe->checkDrain();
1899 }
1900 
1901 void
1903 {
1904  DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1905  completionEnabled);
1906  igbe->postInterrupt(igbreg::IT_TXDW);
1907  if (completionEnabled) {
1908  descEnd = igbe->regs.tdh();
1909  DPRINTF(EthernetDesc,
1910  "Completion writing back value: %d to addr: %#x\n", descEnd,
1911  completionAddress);
1912  igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1913  sizeof(descEnd), &nullEvent, (uint8_t *)&descEnd, 0);
1914  }
1915 }
1916 
1917 void
1919 {
1921 
1922  SERIALIZE_SCALAR(pktDone);
1923  SERIALIZE_SCALAR(isTcp);
1924  SERIALIZE_SCALAR(pktWaiting);
1925  SERIALIZE_SCALAR(pktMultiDesc);
1926 
1927  SERIALIZE_SCALAR(useTso);
1928  SERIALIZE_SCALAR(tsoHeaderLen);
1929  SERIALIZE_SCALAR(tsoMss);
1930  SERIALIZE_SCALAR(tsoTotalLen);
1931  SERIALIZE_SCALAR(tsoUsedLen);
1932  SERIALIZE_SCALAR(tsoPrevSeq);;
1933  SERIALIZE_SCALAR(tsoPktPayloadBytes);
1934  SERIALIZE_SCALAR(tsoLoadedHeader);
1935  SERIALIZE_SCALAR(tsoPktHasHeader);
1936  SERIALIZE_ARRAY(tsoHeader, 256);
1937  SERIALIZE_SCALAR(tsoDescBytesUsed);
1938  SERIALIZE_SCALAR(tsoCopyBytes);
1939  SERIALIZE_SCALAR(tsoPkts);
1940 
1941  SERIALIZE_SCALAR(completionAddress);
1942  SERIALIZE_SCALAR(completionEnabled);
1943  SERIALIZE_SCALAR(descEnd);
1944 }
1945 
1946 void
1948 {
1950 
1951  UNSERIALIZE_SCALAR(pktDone);
1952  UNSERIALIZE_SCALAR(isTcp);
1953  UNSERIALIZE_SCALAR(pktWaiting);
1954  UNSERIALIZE_SCALAR(pktMultiDesc);
1955 
1956  UNSERIALIZE_SCALAR(useTso);
1957  UNSERIALIZE_SCALAR(tsoHeaderLen);
1958  UNSERIALIZE_SCALAR(tsoMss);
1959  UNSERIALIZE_SCALAR(tsoTotalLen);
1960  UNSERIALIZE_SCALAR(tsoUsedLen);
1961  UNSERIALIZE_SCALAR(tsoPrevSeq);;
1962  UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
1963  UNSERIALIZE_SCALAR(tsoLoadedHeader);
1964  UNSERIALIZE_SCALAR(tsoPktHasHeader);
1965  UNSERIALIZE_ARRAY(tsoHeader, 256);
1966  UNSERIALIZE_SCALAR(tsoDescBytesUsed);
1967  UNSERIALIZE_SCALAR(tsoCopyBytes);
1968  UNSERIALIZE_SCALAR(tsoPkts);
1969 
1970  UNSERIALIZE_SCALAR(completionAddress);
1971  UNSERIALIZE_SCALAR(completionEnabled);
1972  UNSERIALIZE_SCALAR(descEnd);
1973 }
1974 
1975 bool
1977 {
1978  if (pktDone) {
1979  pktDone = false;
1980  return true;
1981  }
1982  return false;
1983 }
1984 
1985 void
1987 {
1988  if (igbe->drainState() != DrainState::Draining) {
1989  igbe->txTick = true;
1990  igbe->restartClock();
1991  }
1992 }
1993 
1994 bool
1996 {
1997  return pktEvent.scheduled() || wbEvent.scheduled() ||
1998  fetchEvent.scheduled();
1999 }
2000 
2001 
2003 
2004 void
2006 {
2007  if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2010 }
2011 
2012 DrainState
2014 {
2015  unsigned int count(0);
2018  count++;
2019  }
2020 
2021  txFifoTick = false;
2022  txTick = false;
2023  rxTick = false;
2024 
2025  if (tickEvent.scheduled())
2027 
2028  if (count) {
2029  DPRINTF(Drain, "IGbE not drained\n");
2030  return DrainState::Draining;
2031  } else
2032  return DrainState::Drained;
2033 }
2034 
2035 void
2037 {
2039 
2040  txFifoTick = true;
2041  txTick = true;
2042  rxTick = true;
2043 
2044  restartClock();
2045  DPRINTF(EthernetSM, "resuming from drain");
2046 }
2047 
2048 void
2050 {
2052  return;
2053 
2054  txFifoTick = false;
2055  txTick = false;
2056  rxTick = false;
2059  DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2060  signalDrainDone();
2061  }
2062 }
2063 
2064 void
2066 {
2067  if (!regs.tctl.en()) {
2068  txTick = false;
2069  DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2070  return;
2071  }
2072 
2073  // If we have a packet available and it's length is not 0 (meaning it's not
2074  // a multidescriptor packet) put it in the fifo, otherwise an the next
2075  // iteration we'll get the rest of the data
2077  && !txDescCache.packetMultiDesc() && txPacket->length) {
2078  DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2079 #ifndef NDEBUG
2080  bool success =
2081 #endif
2082  txFifo.push(txPacket);
2084  assert(success);
2085  txPacket = NULL;
2087  return;
2088  }
2089 
2090  // Only support descriptor granularity
2091  if (regs.txdctl.lwthresh() &&
2092  txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2093  DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2095  }
2096 
2097  if (!txPacket) {
2098  txPacket = std::make_shared<EthPacketData>(16384);
2099  }
2100 
2101  if (!txDescCache.packetWaiting()) {
2102  if (txDescCache.descLeft() == 0) {
2106  DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2107  "writeback stopping ticking and posting TXQE\n");
2108  txTick = false;
2109  return;
2110  }
2111 
2112 
2113  if (!(txDescCache.descUnused())) {
2115  DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2116  "fetching and stopping ticking\n");
2117  txTick = false;
2118  return;
2119  }
2120 
2121 
2123  if (txDescCache.packetWaiting()) {
2124  DPRINTF(EthernetSM,
2125  "TXS: Fetching TSO header, stopping ticking\n");
2126  txTick = false;
2127  return;
2128  }
2129 
2130  unsigned size = txDescCache.getPacketSize(txPacket);
2131  if (size > 0 && txFifo.avail() > size) {
2132  DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2133  "beginning DMA of next packet\n", size);
2134  txFifo.reserve(size);
2136  } else if (size == 0) {
2137  DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2138  DPRINTF(EthernetSM,
2139  "TXS: No packets to get, writing back used descriptors\n");
2141  } else {
2142  DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2143  "available in FIFO\n");
2144  txTick = false;
2145  }
2146 
2147 
2148  return;
2149  }
2150  DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2151  txTick = false;
2152 }
2153 
2154 bool
2156 {
2157  etherDeviceStats.rxBytes += pkt->length;
2159 
2160  DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2161 
2162 
2163  if (!regs.rctl.en()) {
2164  DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2165  return true;
2166  }
2167 
2168  // restart the state machines if they are stopped
2169  rxTick = true && drainState() != DrainState::Draining;
2170  if ((rxTick || txTick) && !tickEvent.scheduled()) {
2171  DPRINTF(EthernetSM,
2172  "RXS: received packet into fifo, starting ticking\n");
2173  restartClock();
2174  }
2175 
2176  if (!rxFifo.push(pkt)) {
2177  DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2178  postInterrupt(IT_RXO, true);
2179  return false;
2180  }
2181 
2182  return true;
2183 }
2184 
2185 
2186 void
2188 {
2189  if (!regs.rctl.en()) {
2190  rxTick = false;
2191  DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2192  return;
2193  }
2194 
2195  // If the packet is done check for interrupts/descriptors/etc
2196  if (rxDescCache.packetDone()) {
2197  rxDmaPacket = false;
2198  DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2199  int descLeft = rxDescCache.descLeft();
2200  DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2201  descLeft, regs.rctl.rdmts(), regs.rdlen());
2202 
2203  // rdmts 2->1/8, 1->1/4, 0->1/2
2204  int ratio = (1ULL << (regs.rctl.rdmts() + 1));
2205  if (descLeft * ratio <= regs.rdlen()) {
2206  DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2207  "because of descriptors left\n");
2209  }
2210 
2211  if (rxFifo.empty())
2213 
2214  if (descLeft == 0) {
2216  DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2217  " writeback and stopping ticking\n");
2218  rxTick = false;
2219  }
2220 
2221  // only support descriptor granulaties
2222  assert(regs.rxdctl.gran());
2223 
2224  if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2225  DPRINTF(EthernetSM,
2226  "RXS: Writing back because WTHRESH >= descUsed\n");
2227  if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2228  rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2229  else
2231  }
2232 
2233  if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2235  regs.rxdctl.hthresh())) {
2236  DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2237  "descUnused < PTHRESH\n");
2239  }
2240 
2241  if (rxDescCache.descUnused() == 0) {
2243  DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2244  "fetching descriptors and stopping ticking\n");
2245  rxTick = false;
2246  }
2247  return;
2248  }
2249 
2250  if (rxDmaPacket) {
2251  DPRINTF(EthernetSM,
2252  "RXS: stopping ticking until packet DMA completes\n");
2253  rxTick = false;
2254  return;
2255  }
2256 
2257  if (!rxDescCache.descUnused()) {
2259  DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2260  "stopping ticking\n");
2261  rxTick = false;
2262  DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2263  return;
2264  }
2265 
2266  if (rxFifo.empty()) {
2267  DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2268  rxTick = false;
2269  return;
2270  }
2271 
2272  EthPacketPtr pkt;
2273  pkt = rxFifo.front();
2274 
2275 
2277  DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2278  if (pktOffset == pkt->length) {
2279  DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2280  pktOffset = 0;
2281  rxFifo.pop();
2282  }
2283 
2284  DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2285  rxTick = false;
2286  rxDmaPacket = true;
2287 }
2288 
2289 void
2291 {
2292  txFifoTick = false;
2293 
2294  if (txFifo.empty())
2295  return;
2296 
2297 
2298  if (etherInt->sendPacket(txFifo.front())) {
2299  if (debug::EthernetSM) {
2300  IpPtr ip(txFifo.front());
2301  if (ip)
2302  DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2303  ip->id());
2304  else
2305  DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2306  }
2307  DPRINTF(EthernetSM,
2308  "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2309  txFifo.avail());
2310 
2311  etherDeviceStats.txBytes += txFifo.front()->length;
2313 
2314  txFifo.pop();
2315  }
2316 }
2317 
2318 void
2320 {
2321  DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2322 
2323  inTick = true;
2324 
2325  if (rxTick)
2326  rxStateMachine();
2327 
2328  if (txTick)
2329  txStateMachine();
2330 
2331  // If txWire returns and txFifoTick is still set, that means the data we
2332  // sent to the other end was already accepted and we can send another
2333  // frame right away. This is consistent with the previous behavior which
2334  // would send another frame if one was ready in ethTxDone. This version
2335  // avoids growing the stack with each frame sent which can cause stack
2336  // overflow.
2337  while (txFifoTick)
2338  txWire();
2339 
2340  if (rxTick || txTick || txFifoTick)
2342 
2343  inTick = false;
2344 }
2345 
2346 void
2348 {
2349  // restart the tx state machines if they are stopped
2350  // fifo to send another packet
2351  // tx sm to put more data into the fifo
2354  txTick = true;
2355 
2356  if (!inTick)
2357  restartClock();
2358  DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2359 }
2360 
2361 void
2363 {
2365 
2366  regs.serialize(cp);
2374 
2375  rxFifo.serialize("rxfifo", cp);
2376  txFifo.serialize("txfifo", cp);
2377 
2378  bool txPktExists = txPacket != nullptr;
2379  SERIALIZE_SCALAR(txPktExists);
2380  if (txPktExists)
2381  txPacket->serialize("txpacket", cp);
2382 
2383  Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2384  inter_time = 0;
2385 
2386  if (rdtrEvent.scheduled())
2387  rdtr_time = rdtrEvent.when();
2388  SERIALIZE_SCALAR(rdtr_time);
2389 
2390  if (radvEvent.scheduled())
2391  radv_time = radvEvent.when();
2392  SERIALIZE_SCALAR(radv_time);
2393 
2394  if (tidvEvent.scheduled())
2395  tidv_time = tidvEvent.when();
2396  SERIALIZE_SCALAR(tidv_time);
2397 
2398  if (tadvEvent.scheduled())
2399  tadv_time = tadvEvent.when();
2400  SERIALIZE_SCALAR(tadv_time);
2401 
2402  if (interEvent.scheduled())
2403  inter_time = interEvent.when();
2404  SERIALIZE_SCALAR(inter_time);
2405 
2407 
2408  txDescCache.serializeSection(cp, "TxDescCache");
2409  rxDescCache.serializeSection(cp, "RxDescCache");
2410 }
2411 
2412 void
2414 {
2416 
2417  regs.unserialize(cp);
2425 
2426  rxFifo.unserialize("rxfifo", cp);
2427  txFifo.unserialize("txfifo", cp);
2428 
2429  bool txPktExists;
2430  UNSERIALIZE_SCALAR(txPktExists);
2431  if (txPktExists) {
2432  txPacket = std::make_shared<EthPacketData>(16384);
2433  txPacket->unserialize("txpacket", cp);
2434  }
2435 
2436  rxTick = true;
2437  txTick = true;
2438  txFifoTick = true;
2439 
2440  Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2441  UNSERIALIZE_SCALAR(rdtr_time);
2442  UNSERIALIZE_SCALAR(radv_time);
2443  UNSERIALIZE_SCALAR(tidv_time);
2444  UNSERIALIZE_SCALAR(tadv_time);
2445  UNSERIALIZE_SCALAR(inter_time);
2446 
2447  if (rdtr_time)
2448  schedule(rdtrEvent, rdtr_time);
2449 
2450  if (radv_time)
2451  schedule(radvEvent, radv_time);
2452 
2453  if (tidv_time)
2454  schedule(tidvEvent, tidv_time);
2455 
2456  if (tadv_time)
2457  schedule(tadvEvent, tadv_time);
2458 
2459  if (inter_time)
2460  schedule(interEvent, inter_time);
2461 
2463 
2464  txDescCache.unserializeSection(cp, "TxDescCache");
2465  rxDescCache.unserializeSection(cp, "RxDescCache");
2466 }
2467 
2468 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: dma_device.cc:363
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: dma_device.cc:148
unsigned int cacheBlockSize() const
Definition: dma_device.hh:245
DmaDeviceParams Params
Definition: dma_device.hh:209
gem5::EtherDevice::EtherDeviceStats etherDeviceStats
bool sendPacket(EthPacketPtr packet)
Definition: etherint.hh:73
void wbComplete()
Called by event when dma to writeback descriptors is completed.
Definition: i8254xGBe.cc:1018
unsigned descUsed() const
Definition: i8254xGBe.hh:285
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:1109
void areaChanged()
If the address/len/head change when we've got descriptors that are dirty that is very bad.
Definition: i8254xGBe.cc:849
unsigned descUnused() const
Definition: i8254xGBe.hh:288
void fetchComplete()
Called by event when dma to read descriptors is completed.
Definition: i8254xGBe.cc:990
unsigned descLeft() const
Definition: i8254xGBe.hh:272
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1074
void writeback(Addr aMask)
Definition: i8254xGBe.cc:859
void fetchDescriptors()
Fetch a chunk of descriptors into the descriptor cache.
Definition: i8254xGBe.cc:931
DescCache(IGbE *i, const std::string n, int s)
Definition: i8254xGBe.cc:827
bool hasOutstandingEvents() override
Definition: i8254xGBe.cc:1469
void pktComplete()
Called by event when dma to write packet is completed.
Definition: i8254xGBe.cc:1288
int writePacket(EthPacketPtr packet, int pkt_offset)
Write the given packet into the buffer(s) pointed to by the descriptor and update the book keeping.
Definition: i8254xGBe.cc:1180
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1478
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:1487
void enableSm() override
Definition: i8254xGBe.cc:1450
bool packetDone()
Check if the dma on the packet has completed and RX state machine can continue.
Definition: i8254xGBe.cc:1459
RxDescCache(IGbE *i, std::string n, int s)
Definition: i8254xGBe.cc:1148
bool packetMultiDesc()
Ask if this packet is composed of multiple descriptors so even if we've got data, we need to wait for...
Definition: i8254xGBe.hh:444
void completionWriteback(Addr a, bool enabled)
Definition: i8254xGBe.hh:455
bool packetAvailable()
Ask if the packet has been transfered so the state machine can give it to the fifo.
Definition: i8254xGBe.cc:1976
bool packetWaiting()
Ask if we are still waiting for the packet to be transfered.
Definition: i8254xGBe.hh:436
void getPacketData(EthPacketPtr p)
Definition: i8254xGBe.cc:1659
TxDescCache(IGbE *i, std::string n, int s)
Definition: i8254xGBe.cc:1498
bool hasOutstandingEvents() override
Definition: i8254xGBe.cc:1995
unsigned getPacketSize(EthPacketPtr p)
Tell the cache to DMA a packet from main memory into its buffer and return the size the of the packet...
Definition: i8254xGBe.cc:1619
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:1947
void pktComplete()
Called by event when dma to write packet is completed.
Definition: i8254xGBe.cc:1707
void enableSm() override
Definition: i8254xGBe.cc:1986
void actionAfterWb() override
Definition: i8254xGBe.cc:1902
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1918
Tick lastInterrupt
Definition: i8254xGBe.hh:488
uint64_t macAddr
Definition: i8254xGBe.hh:140
EventFunctionWrapper interEvent
Definition: i8254xGBe.hh:163
bool rxTick
Definition: i8254xGBe.hh:82
void ethTxDone()
Definition: i8254xGBe.cc:2347
TxDescCache txDescCache
Definition: i8254xGBe.hh:476
void rxStateMachine()
Definition: i8254xGBe.cc:2187
IGbEInt * etherInt
Definition: i8254xGBe.hh:63
void postInterrupt(igbreg::IntTypes t, bool now=false)
Write an interrupt into the interrupt pending register and check mask and interrupt limit timer befor...
Definition: i8254xGBe.cc:696
uint8_t eeAddr
Definition: i8254xGBe.hh:70
Tick write(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: i8254xGBe.cc:358
void chkInterrupt()
Check and see if changes to the mask register have caused an interrupt to need to be sent or perhaps ...
Definition: i8254xGBe.cc:792
EventFunctionWrapper tickEvent
Definition: i8254xGBe.hh:137
bool txTick
Definition: i8254xGBe.hh:83
PacketFifo txFifo
Definition: i8254xGBe.hh:75
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: i8254xGBe.cc:2013
IGbE(const Params &params)
Definition: i8254xGBe.cc:61
PacketFifo rxFifo
Definition: i8254xGBe.hh:74
int eeOpBits
Definition: i8254xGBe.hh:69
uint16_t flash[igbreg::EEPROM_SIZE]
Definition: i8254xGBe.hh:71
void drainResume() override
Resume execution after a successful drain.
Definition: i8254xGBe.cc:2036
void cpuClearInt()
Clear the interupt line to the cpu.
Definition: i8254xGBe.cc:780
EventFunctionWrapper radvEvent
Definition: i8254xGBe.hh:114
uint8_t eeOpcode
Definition: i8254xGBe.hh:70
void cpuPostInt()
Definition: i8254xGBe.cc:736
bool txFifoTick
Definition: i8254xGBe.hh:84
Tick read(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: i8254xGBe.cc:170
RxDescCache rxDescCache
Definition: i8254xGBe.hh:364
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: i8254xGBe.cc:143
bool inTick
Definition: i8254xGBe.hh:81
EventFunctionWrapper tidvEvent
Definition: i8254xGBe.hh:133
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:2362
unsigned pktOffset
Definition: i8254xGBe.hh:89
Tick writeConfig(PacketPtr pkt) override
Write to the PCI config space data that is stored locally.
Definition: i8254xGBe.cc:151
void rdtrProcess()
Definition: i8254xGBe.hh:97
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:2413
EthPacketPtr txPacket
Definition: i8254xGBe.hh:78
int eeDataBits
Definition: i8254xGBe.hh:69
EventFunctionWrapper tadvEvent
Definition: i8254xGBe.hh:124
void checkDrain()
Check if all the draining things that need to occur have occured and handle the drain event if so.
Definition: i8254xGBe.cc:2049
bool ethRxPkt(EthPacketPtr packet)
Definition: i8254xGBe.cc:2155
void delayIntEvent()
Send an interrupt to the cpu.
Definition: i8254xGBe.cc:729
void tick()
Definition: i8254xGBe.cc:2319
void txStateMachine()
Definition: i8254xGBe.cc:2065
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: i8254xGBe.cc:137
igbreg::Regs regs
Definition: i8254xGBe.hh:66
bool rxDmaPacket
Definition: i8254xGBe.hh:86
void restartClock()
This function is used to restart the clock so it can handle things like draining and resume in one pl...
Definition: i8254xGBe.cc:2005
int eeAddrBits
Definition: i8254xGBe.hh:69
EventFunctionWrapper rdtrEvent
Definition: i8254xGBe.hh:104
void txWire()
Definition: i8254xGBe.cc:2290
virtual std::string name() const
Definition: named.hh:47
void serialize(const std::string &base, CheckpointOut &cp) const
Serialization stuff.
Definition: pktfifo.cc:87
unsigned reserve(unsigned len=0)
Definition: pktfifo.hh:109
unsigned avail() const
Definition: pktfifo.hh:104
void unserialize(const std::string &base, CheckpointIn &cp)
Definition: pktfifo.cc:100
EthPacketPtr front()
Definition: pktfifo.hh:122
bool push(EthPacketPtr ptr)
Definition: pktfifo.hh:125
bool empty() const
Definition: pktfifo.hh:105
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
Addr getAddr() const
Definition: packet.hh:805
void setLE(T v)
Set the value in the data pointer to v as little endian.
unsigned getSize() const
Definition: packet.hh:815
void makeAtomicResponse()
Definition: packet.hh:1071
T getLE() const
Get the data in the packet byte swapped from little endian to host endian.
void intrClear()
Definition: device.hh:365
void unserialize(CheckpointIn &cp) override
Reconstruct the state of this object from a checkpoint.
Definition: device.cc:464
void serialize(CheckpointOut &cp) const override
Serialize this object to the given output stream.
Definition: device.cc:401
bool getBAR(Addr addr, int &num, Addr &offs)
Which base address register (if any) maps the given address?
Definition: device.hh:320
Addr pciToDma(Addr pci_addr) const
Definition: device.hh:359
virtual Tick writeConfig(PacketPtr pkt)
Write to the PCI config space data that is stored locally.
Definition: device.cc:283
void intrPost()
Definition: device.hh:364
Tick configDelay
Definition: device.hh:355
Ports are used to interface objects to each other.
Definition: port.hh:62
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
constexpr uint64_t mask(unsigned nbits)
Generate a 64-bit mask of 'nbits' 1s, right justified.
Definition: bitfield.hh:63
virtual void drainResume()
Resume execution after a successful drain.
Definition: drain.hh:293
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
DrainState
Object drain/handover states.
Definition: drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Running
Running normally.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition: eventq.hh:1028
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
Tick when() const
Get the time that the event is scheduled.
Definition: eventq.hh:508
uint16_t cksum(const IpPtr &ptr)
Definition: inet.cc:209
int hsplit(const EthPacketPtr &ptr)
Definition: inet.cc:387
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
void serializeSection(CheckpointOut &cp, const char *name) const
Serialize an object into a new section.
Definition: serialize.cc:74
decltype(std::begin(std::declval< const T & >()), std::end(std::declval< const T & >()), void()) arrayParamOut(CheckpointOut &os, const std::string &name, const T &param)
Definition: serialize.hh:409
#define UNSERIALIZE_ARRAY(member, size)
Definition: serialize.hh:618
#define SERIALIZE_ARRAY(member, size)
Definition: serialize.hh:610
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition: serialize.cc:81
const Params & params() const
Definition: sim_object.hh:176
#define IN_RANGE(val, base, len)
Definition: i8254xGBe.cc:167
#define warn(...)
Definition: logging.hh:246
Bitfield< 31 > n
Definition: misc_types.hh:462
Bitfield< 15 > ide
Definition: misc_types.hh:450
Bitfield< 21 > writeback
Definition: types.hh:126
Bitfield< 6 > err
Definition: misc_types.hh:809
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 23, 0 > offset
Definition: types.hh:144
Bitfield< 9, 8 > rs
Definition: misc_types.hh:383
Bitfield< 5, 0 > status
Definition: misc_types.hh:429
Bitfield< 14 > ip6
Bitfield< 1 > s
Definition: pagetable.hh:64
Bitfield< 4 > x
Definition: pagetable.hh:61
Bitfield< 51 > t
Definition: pagetable.hh:56
Bitfield< 54 > p
Definition: pagetable.hh:70
Bitfield< 63 > val
Definition: misc.hh:776
Addr getLen(TxDesc *d)
bool txsm(TxDesc *d)
const uint8_t TXD_ADVDATA
bool tcp(TxDesc *d)
bool ip(TxDesc *d)
bool isContext(TxDesc *d)
bool isData(TxDesc *d)
const uint8_t TXD_CNXT
void setDd(TxDesc *d)
Addr getBuf(TxDesc *d)
int getTsoLen(TxDesc *d)
bool vle(TxDesc *d)
bool isType(TxDesc *d, uint8_t type)
bool tse(TxDesc *d)
bool isLegacy(TxDesc *d)
bool eop(TxDesc *d)
bool ixsm(TxDesc *d)
int hdrlen(TxDesc *d)
const uint32_t REG_FWSM
const uint32_t REG_FCRTL
const uint8_t RCV_ADDRESS_TABLE_SIZE
const uint16_t RXDS_EOP
const uint32_t REG_TADV
const uint32_t REG_CTRL_EXT
const uint32_t REG_CRCERRS
const uint32_t REG_VET
const uint8_t PHY_EPID
const uint16_t RXDS_UDPCS
const uint32_t REG_MTA
const uint32_t REG_RDBAH
const uint32_t REG_ITR
const uint32_t REG_WUFC
const uint32_t STATS_REGS_SIZE
const uint32_t REG_RDTR
const uint32_t REG_AIFS
const uint32_t REG_TXDCTL
const uint32_t REG_TDWBAL
const uint8_t PHY_PSTATUS
const uint32_t REG_RXDCTL
const uint32_t REG_RXCSUM
const uint16_t RXDS_TCPCS
const uint16_t RXDP_IPV6
const uint32_t REG_EECD
const uint32_t REG_CTRL
const uint32_t REG_TCTL
const uint32_t REG_TDBAL
const uint8_t EEPROM_SIZE
const uint32_t REG_SWFWSYNC
const uint8_t VLAN_FILTER_TABLE_SIZE
const uint32_t REG_RDLEN
const uint8_t RXDE_TCPE
const uint32_t REG_MANC
const uint32_t REG_FCAL
const uint32_t REG_IAM
const uint32_t REG_RDH
const uint32_t REG_TIDV
const uint32_t REG_FCRTH
const uint8_t RXDT_ADV_SPLIT_A
const uint16_t RXDS_IPCS
const uint16_t RXDEE_TCPE
const uint32_t REG_TIPG
const uint32_t REG_RFCTL
const uint32_t REG_ICR
const uint32_t REG_WUC
const uint8_t EEPROM_READ_OPCODE_SPI
const uint8_t PHY_EPSTATUS
const uint32_t REG_TDH
const uint32_t REG_FCAH
const uint8_t MULTICAST_TABLE_SIZE
const uint16_t RXDP_UDP
const uint32_t REG_PBA
const uint8_t PHY_AGC
const uint32_t REG_EICR
const uint32_t REG_RADV
const uint32_t REG_STATUS
const uint32_t REG_RAL
const uint32_t REG_SWSM
const uint8_t RXDT_ADV_ONEBUF
const uint32_t REG_TDBAH
const uint32_t REG_IVAR0
const uint32_t REG_IMS
const uint32_t REG_RDT
const uint32_t REG_LEDCTL
const uint32_t REG_RLPML
const uint32_t REG_RDBAL
const uint32_t REG_TXDCA_CTL
const uint32_t REG_TDLEN
const uint8_t PHY_PID
const uint32_t REG_SRRCTL
const uint32_t REG_ICS
const uint32_t REG_VFTA
const uint16_t EEPROM_CSUM
const uint8_t RXDT_LEGACY
const uint32_t REG_EERD
const uint16_t RXDS_DD
const uint32_t REG_MDIC
const uint16_t RXDP_IPV4
const uint8_t EEPROM_RDSR_OPCODE_SPI
const uint32_t REG_FCT
const uint32_t REG_TDWBAH
const uint16_t RXDP_TCP
const uint32_t REG_RCTL
const uint32_t REG_FCTTV
const uint32_t REG_WUS
const uint8_t PHY_GSTATUS
const uint32_t REG_TDT
const uint16_t RXDEE_IPE
const uint8_t RXDE_IPE
const uint32_t REG_IMC
Tick ns
nanosecond
Definition: core.cc:71
unsigned int size_type
Definition: types.hh:60
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
std::ostream CheckpointOut
Definition: serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:245
uint64_t Tick
Tick count type.
Definition: types.hh:58
T htole(T value)
Definition: byteswap.hh:172
const uint8_t EEPROM_SIZE
Definition: ns_gige.hh:56
T htobe(T value)
Definition: byteswap.hh:174
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
void arrayParamIn(CheckpointIn &cp, const std::string &name, CircleBuf< T > &param)
Definition: circlebuf.hh:257
std::shared_ptr< EthPacketData > EthPacketPtr
Definition: etherpkt.hh:90
Declaration of the Packet class.
#define PCI_DEVICE_SPECIFIC
Definition: pcireg.h:164
#define PCI_CONFIG_SIZE
Definition: pcireg.h:165
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:568
statistics::Scalar postedInterrupts
Definition: etherdevice.hh:62
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void serialize(CheckpointOut &cp) const override
Serialize an object.
struct gem5::igbreg::RxDesc::@305::@308 adv_read
struct gem5::igbreg::RxDesc::@305::@309 adv_wb
struct gem5::igbreg::RxDesc::@305::@307 legacy
uint16_t len() const
Definition: inet.hh:747
uint16_t sum() const
Definition: inet.hh:748
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:34 for gem5 by doxygen 1.9.1