gem5  v21.1.0.2
x86_cpu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Andreas Sandberg
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "cpu/kvm/x86_cpu.hh"
30 
31 #include <linux/kvm.h>
32 
33 #include <algorithm>
34 #include <cerrno>
35 #include <memory>
36 
37 #include "arch/x86/cpuid.hh"
38 #include "arch/x86/faults.hh"
39 #include "arch/x86/interrupts.hh"
40 #include "arch/x86/regs/int.hh"
41 #include "arch/x86/regs/msr.hh"
42 #include "arch/x86/utility.hh"
43 #include "base/compiler.hh"
44 #include "cpu/kvm/base.hh"
45 #include "debug/Drain.hh"
46 #include "debug/Kvm.hh"
47 #include "debug/KvmContext.hh"
48 #include "debug/KvmIO.hh"
49 #include "debug/KvmInt.hh"
50 
51 namespace gem5
52 {
53 
54 using namespace X86ISA;
55 
56 #define MSR_TSC 0x10
57 
58 #define IO_PCI_CONF_ADDR 0xCF8
59 #define IO_PCI_CONF_DATA_BASE 0xCFC
60 
61 // Task segment type of an inactive 32-bit or 64-bit task
62 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
63 // Task segment type of an active 32-bit or 64-bit task
64 #define SEG_SYS_TYPE_TSS_BUSY 11
65 
66 // Non-conforming accessed code segment
67 #define SEG_CS_TYPE_ACCESSED 9
68 // Non-conforming accessed code segment that can be read
69 #define SEG_CS_TYPE_READ_ACCESSED 11
70 
71 // The lowest bit of the type field for normal segments (code and
72 // data) is used to indicate that a segment has been accessed.
73 #define SEG_TYPE_BIT_ACCESSED 1
74 
75 struct GEM5_PACKED FXSave
76 {
77  uint16_t fcw;
78  uint16_t fsw;
79  uint8_t ftwx;
80  uint8_t pad0;
81  uint16_t last_opcode;
82  union
83  {
84  struct
85  {
86  uint32_t fpu_ip;
87  uint16_t fpu_cs;
88  uint16_t pad1;
89  uint32_t fpu_dp;
90  uint16_t fpu_ds;
91  uint16_t pad2;
92  } ctrl32;
93 
94  struct
95  {
96  uint64_t fpu_ip;
97  uint64_t fpu_dp;
98  } ctrl64;
99  };
100  uint32_t mxcsr;
101  uint32_t mxcsr_mask;
102 
103  uint8_t fpr[8][16];
104  uint8_t xmm[16][16];
105 
106  uint64_t reserved[12];
107 };
108 
109 static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
110 
111 #define FOREACH_IREG() \
112  do { \
113  APPLY_IREG(rax, INTREG_RAX); \
114  APPLY_IREG(rbx, INTREG_RBX); \
115  APPLY_IREG(rcx, INTREG_RCX); \
116  APPLY_IREG(rdx, INTREG_RDX); \
117  APPLY_IREG(rsi, INTREG_RSI); \
118  APPLY_IREG(rdi, INTREG_RDI); \
119  APPLY_IREG(rsp, INTREG_RSP); \
120  APPLY_IREG(rbp, INTREG_RBP); \
121  APPLY_IREG(r8, INTREG_R8); \
122  APPLY_IREG(r9, INTREG_R9); \
123  APPLY_IREG(r10, INTREG_R10); \
124  APPLY_IREG(r11, INTREG_R11); \
125  APPLY_IREG(r12, INTREG_R12); \
126  APPLY_IREG(r13, INTREG_R13); \
127  APPLY_IREG(r14, INTREG_R14); \
128  APPLY_IREG(r15, INTREG_R15); \
129  } while (0)
130 
131 #define FOREACH_SREG() \
132  do { \
133  APPLY_SREG(cr0, MISCREG_CR0); \
134  APPLY_SREG(cr2, MISCREG_CR2); \
135  APPLY_SREG(cr3, MISCREG_CR3); \
136  APPLY_SREG(cr4, MISCREG_CR4); \
137  APPLY_SREG(cr8, MISCREG_CR8); \
138  APPLY_SREG(efer, MISCREG_EFER); \
139  APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
140  } while (0)
141 
142 #define FOREACH_DREG() \
143  do { \
144  APPLY_DREG(db[0], MISCREG_DR0); \
145  APPLY_DREG(db[1], MISCREG_DR1); \
146  APPLY_DREG(db[2], MISCREG_DR2); \
147  APPLY_DREG(db[3], MISCREG_DR3); \
148  APPLY_DREG(dr6, MISCREG_DR6); \
149  APPLY_DREG(dr7, MISCREG_DR7); \
150  } while (0)
151 
152 #define FOREACH_SEGMENT() \
153  do { \
154  APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
155  APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
156  APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
157  APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
158  APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
159  APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
160  APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
161  APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
162  } while (0)
163 
164 #define FOREACH_DTABLE() \
165  do { \
166  APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
167  APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
168  } while (0)
169 
170 template<typename STRUCT, typename ENTRY>
171 static STRUCT *newVarStruct(size_t entries)
172 {
173  return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
174 }
175 
176 static void
177 dumpKvm(const struct kvm_regs &regs)
178 {
179  inform("KVM register state:\n");
180 
181 #define APPLY_IREG(kreg, mreg) \
182  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
183 
184  FOREACH_IREG();
185 
186 #undef APPLY_IREG
187 
188  inform("\trip: 0x%llx\n", regs.rip);
189  inform("\trflags: 0x%llx\n", regs.rflags);
190 }
191 
192 static void
193 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
194 {
195  inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
196  "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
197  reg_name,
198  seg.base, seg.limit, seg.selector, seg.type,
199  seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
200 }
201 
202 static void
203 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
204 {
205  inform("\t%s: @0x%llx+%x\n",
206  reg_name, dtable.base, dtable.limit);
207 }
208 
209 static void
210 dumpKvm(const struct kvm_sregs &sregs)
211 {
212 #define APPLY_SREG(kreg, mreg) \
213  inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
214 #define APPLY_SEGMENT(kreg, idx) \
215  dumpKvm(# kreg, sregs.kreg);
216 #define APPLY_DTABLE(kreg, idx) \
217  dumpKvm(# kreg, sregs.kreg);
218 
219  inform("Special registers:\n");
220  FOREACH_SEGMENT();
221  FOREACH_SREG();
222  FOREACH_DTABLE();
223 
224  inform("Interrupt Bitmap:");
225  for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
226  inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
227 
228 #undef APPLY_SREG
229 #undef APPLY_SEGMENT
230 #undef APPLY_DTABLE
231 }
232 
233 #ifdef KVM_GET_DEBUGREGS
234 static void
235 dumpKvm(const struct kvm_debugregs &regs)
236 {
237  inform("KVM debug state:\n");
238 
239 #define APPLY_DREG(kreg, mreg) \
240  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
241 
242  FOREACH_DREG();
243 
244 #undef APPLY_DREG
245 
246  inform("\tflags: 0x%llx\n", regs.flags);
247 }
248 #endif
249 
250 static void
251 dumpFpuSpec(const struct FXSave &xs)
252 {
253  inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
254  inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
255  inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
256 }
257 
258 static void
259 dumpFpuSpec(const struct kvm_fpu &fpu)
260 {
261  inform("\tlast_ip: 0x%x\n", fpu.last_ip);
262  inform("\tlast_dp: 0x%x\n", fpu.last_dp);
263 }
264 
265 template<typename T>
266 static void
267 dumpFpuCommon(const T &fpu)
268 {
269  const unsigned top((fpu.fsw >> 11) & 0x7);
270  inform("\tfcw: 0x%x\n", fpu.fcw);
271 
272  inform("\tfsw: 0x%x (top: %i, "
273  "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
274  fpu.fsw, top,
275 
276  (fpu.fsw & CC0Bit) ? "C0" : "",
277  (fpu.fsw & CC1Bit) ? "C1" : "",
278  (fpu.fsw & CC2Bit) ? "C2" : "",
279  (fpu.fsw & CC3Bit) ? "C3" : "",
280 
281  (fpu.fsw & IEBit) ? "I" : "",
282  (fpu.fsw & DEBit) ? "D" : "",
283  (fpu.fsw & ZEBit) ? "Z" : "",
284  (fpu.fsw & OEBit) ? "O" : "",
285  (fpu.fsw & UEBit) ? "U" : "",
286  (fpu.fsw & PEBit) ? "P" : "",
287 
288  (fpu.fsw & StackFaultBit) ? "SF " : "",
289  (fpu.fsw & ErrSummaryBit) ? "ES " : "",
290  (fpu.fsw & BusyBit) ? "BUSY " : ""
291  );
292  inform("\tftwx: 0x%x\n", fpu.ftwx);
293  inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
294  dumpFpuSpec(fpu);
295  inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
296  inform("\tFP Stack:\n");
297  for (int i = 0; i < 8; ++i) {
298  const unsigned reg_idx((i + top) & 0x7);
299  const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
300  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
301  char hex[33];
302  for (int j = 0; j < 10; ++j)
303  snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
304  inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
305  hex, value, empty ? " (e)" : "");
306  }
307  inform("\tXMM registers:\n");
308  for (int i = 0; i < 16; ++i) {
309  char hex[33];
310  for (int j = 0; j < 16; ++j)
311  snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
312  inform("\t\t%i: 0x%s\n", i, hex);
313  }
314 }
315 
316 static void
317 dumpKvm(const struct kvm_fpu &fpu)
318 {
319  inform("FPU registers:\n");
320  dumpFpuCommon(fpu);
321 }
322 
323 static void
324 dumpKvm(const struct kvm_xsave &xsave)
325 {
326  inform("FPU registers (XSave):\n");
327  dumpFpuCommon(*(FXSave *)xsave.region);
328 }
329 
330 static void
331 dumpKvm(const struct kvm_msrs &msrs)
332 {
333  inform("MSRs:\n");
334 
335  for (int i = 0; i < msrs.nmsrs; ++i) {
336  const struct kvm_msr_entry &e(msrs.entries[i]);
337 
338  inform("\t0x%x: 0x%x\n", e.index, e.data);
339  }
340 }
341 
342 static void
343 dumpKvm(const struct kvm_xcrs &regs)
344 {
345  inform("KVM XCR registers:\n");
346 
347  inform("\tFlags: 0x%x\n", regs.flags);
348  for (int i = 0; i < regs.nr_xcrs; ++i) {
349  inform("\tXCR[0x%x]: 0x%x\n",
350  regs.xcrs[i].xcr,
351  regs.xcrs[i].value);
352  }
353 }
354 
355 static void
356 dumpKvm(const struct kvm_vcpu_events &events)
357 {
358  inform("vCPU events:\n");
359 
360  inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
361  events.exception.injected, events.exception.nr,
362  events.exception.has_error_code, events.exception.error_code);
363 
364  inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
365  events.interrupt.injected, events.interrupt.nr,
366  events.interrupt.soft);
367 
368  inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
369  events.nmi.injected, events.nmi.pending,
370  events.nmi.masked);
371 
372  inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
373  inform("\tFlags: 0x%x\n", events.flags);
374 }
375 
376 static bool
378 {
379  // x86-64 doesn't currently use the full 64-bit virtual address
380  // space, instead it uses signed 48 bit addresses that are
381  // sign-extended to 64 bits. Such addresses are known as
382  // "canonical".
383  uint64_t upper_half(addr & 0xffff800000000000ULL);
384  return upper_half == 0 || upper_half == 0xffff800000000000;
385 }
386 
387 static void
388 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
389  struct kvm_sregs sregs)
390 {
391  // Check the register base
392  switch (idx) {
393  case MISCREG_TSL:
394  case MISCREG_TR:
395  case MISCREG_FS:
396  case MISCREG_GS:
397  if (!isCanonicalAddress(seg.base))
398  warn("Illegal %s base: 0x%x\n", name, seg.base);
399  break;
400 
401  case MISCREG_SS:
402  case MISCREG_DS:
403  case MISCREG_ES:
404  if (seg.unusable)
405  break;
407  case MISCREG_CS:
408  if (seg.base & 0xffffffff00000000ULL)
409  warn("Illegal %s base: 0x%x\n", name, seg.base);
410  break;
411  }
412 
413  // Check the type
414  switch (idx) {
415  case MISCREG_CS:
416  switch (seg.type) {
417  case 3:
418  if (seg.dpl != 0)
419  warn("CS type is 3 but dpl != 0.\n");
420  break;
421  case 9:
422  case 11:
423  if (seg.dpl != sregs.ss.dpl)
424  warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
425  break;
426  case 13:
427  case 15:
428  if (seg.dpl > sregs.ss.dpl)
429  warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
430  break;
431  default:
432  warn("Illegal CS type: %i\n", seg.type);
433  break;
434  }
435  break;
436 
437  case MISCREG_SS:
438  if (seg.unusable)
439  break;
440  switch (seg.type) {
441  case 3:
442  if (sregs.cs.type == 3 && seg.dpl != 0)
443  warn("CS type is 3, but SS DPL is != 0.\n");
445  case 7:
446  if (!(sregs.cr0 & 1) && seg.dpl != 0)
447  warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
448  break;
449  default:
450  warn("Illegal SS type: %i\n", seg.type);
451  break;
452  }
453  break;
454 
455  case MISCREG_DS:
456  case MISCREG_ES:
457  case MISCREG_FS:
458  case MISCREG_GS:
459  if (seg.unusable)
460  break;
461  if (!(seg.type & 0x1) ||
462  ((seg.type & 0x8) && !(seg.type & 0x2)))
463  warn("%s has an illegal type field: %i\n", name, seg.type);
464  break;
465 
466  case MISCREG_TR:
467  // TODO: We should check the CPU mode
468  if (seg.type != 3 && seg.type != 11)
469  warn("%s: Illegal segment type (%i)\n", name, seg.type);
470  break;
471 
472  case MISCREG_TSL:
473  if (seg.unusable)
474  break;
475  if (seg.type != 2)
476  warn("%s: Illegal segment type (%i)\n", name, seg.type);
477  break;
478  }
479 
480  switch (idx) {
481  case MISCREG_SS:
482  case MISCREG_DS:
483  case MISCREG_ES:
484  case MISCREG_FS:
485  case MISCREG_GS:
486  if (seg.unusable)
487  break;
489  case MISCREG_CS:
490  if (!seg.s)
491  warn("%s: S flag not set\n", name);
492  break;
493 
494  case MISCREG_TSL:
495  if (seg.unusable)
496  break;
498  case MISCREG_TR:
499  if (seg.s)
500  warn("%s: S flag is set\n", name);
501  break;
502  }
503 
504  switch (idx) {
505  case MISCREG_SS:
506  case MISCREG_DS:
507  case MISCREG_ES:
508  case MISCREG_FS:
509  case MISCREG_GS:
510  case MISCREG_TSL:
511  if (seg.unusable)
512  break;
514  case MISCREG_TR:
515  case MISCREG_CS:
516  if (!seg.present)
517  warn("%s: P flag not set\n", name);
518 
519  if (((seg.limit & 0xFFF) == 0 && seg.g) ||
520  ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
521  warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
522  name, seg.limit, seg.g);
523  }
524  break;
525  }
526 
527  // TODO: Check CS DB
528 }
529 
530 X86KvmCPU::X86KvmCPU(const X86KvmCPUParams &params)
531  : BaseKvmCPU(params),
532  useXSave(params.useXSave)
533 {
534  Kvm &kvm(*vm.kvm);
535 
536  if (!kvm.capSetTSSAddress())
537  panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
538  if (!kvm.capExtendedCPUID())
539  panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
540  if (!kvm.capUserNMI())
541  warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
542  if (!kvm.capVCPUEvents())
543  warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
544 
545  haveDebugRegs = kvm.capDebugRegs();
546  haveXSave = kvm.capXSave();
547  haveXCRs = kvm.capXCRs();
548 
549  if (useXSave && !haveXSave) {
550  warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
551  "unreliable due to kernel bugs.\n");
552  useXSave = false;
553  } else if (!useXSave) {
554  warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
555  }
556 }
557 
559 {
560 }
561 
562 void
564 {
566 
567  updateCPUID();
568 
569  // TODO: Do we need to create an identity mapped TSS area? We
570  // should call kvm.vm.setTSSAddress() here in that case. It should
571  // only be needed for old versions of the virtualization
572  // extensions. We should make sure that the identity range is
573  // reserved in the e820 memory map in that case.
574 }
575 
576 void
578 {
579  dumpIntRegs();
580  if (useXSave)
581  dumpXSave();
582  else
583  dumpFpuRegs();
584  dumpSpecRegs();
585  dumpDebugRegs();
586  dumpXCRs();
587  dumpVCpuEvents();
588  dumpMSRs();
589 }
590 
591 void
593 {
594  struct kvm_fpu fpu;
595  getFPUState(fpu);
596  dumpKvm(fpu);
597 }
598 
599 void
601 {
602  struct kvm_regs regs;
603  getRegisters(regs);
604  dumpKvm(regs);
605 }
606 
607 void
609 {
610  struct kvm_sregs sregs;
611  getSpecialRegisters(sregs);
612  dumpKvm(sregs);
613 }
614 
615 void
617 {
618  if (haveDebugRegs) {
619 #ifdef KVM_GET_DEBUGREGS
620  struct kvm_debugregs dregs;
621  getDebugRegisters(dregs);
622  dumpKvm(dregs);
623 #endif
624  } else {
625  inform("Debug registers not supported by kernel.\n");
626  }
627 }
628 
629 void
631 {
632  if (haveXCRs) {
633  struct kvm_xcrs xcrs;
634  getXCRs(xcrs);
635  dumpKvm(xcrs);
636  } else {
637  inform("XCRs not supported by kernel.\n");
638  }
639 }
640 
641 void
643 {
644  if (haveXSave) {
645  struct kvm_xsave xsave;
646  getXSave(xsave);
647  dumpKvm(xsave);
648  } else {
649  inform("XSave not supported by kernel.\n");
650  }
651 }
652 
653 void
655 {
656  struct kvm_vcpu_events events;
657  getVCpuEvents(events);
658  dumpKvm(events);
659 }
660 
661 void
663 {
664  const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
665  std::unique_ptr<struct kvm_msrs> msrs(
666  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
667  supported_msrs.size()));
668 
669  msrs->nmsrs = supported_msrs.size();
670  for (int i = 0; i < supported_msrs.size(); ++i) {
671  struct kvm_msr_entry &e(msrs->entries[i]);
672  e.index = supported_msrs[i];
673  e.reserved = 0;
674  e.data = 0;
675  }
676  getMSRs(*msrs.get());
677 
678  dumpKvm(*msrs.get());
679 }
680 
681 void
683 {
688 
689  DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
690  if (debug::KvmContext)
691  dump();
692 }
693 
694 void
696 {
697  struct kvm_regs regs;
698 
699 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
700  FOREACH_IREG();
701 #undef APPLY_IREG
702 
703  regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
704 
705  /* You might think that setting regs.rflags to the contents
706  * MISCREG_RFLAGS here would suffice. In that case you're
707  * mistaken. We need to reconstruct it from a bunch of ucode
708  * registers and wave a dead chicken over it (aka mask out and set
709  * reserved bits) to get it to work.
710  */
711  regs.rflags = X86ISA::getRFlags(tc);
712 
713  setRegisters(regs);
714 }
715 
716 static inline void
717 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
718  const int index)
719 {
721 
722  kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
723  kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
724  kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
725  kvm_seg.type = attr.type;
726  kvm_seg.present = attr.present;
727  kvm_seg.dpl = attr.dpl;
728  kvm_seg.db = attr.defaultSize;
729  kvm_seg.s = attr.system;
730  kvm_seg.l = attr.longMode;
731  kvm_seg.g = attr.granularity;
732  kvm_seg.avl = attr.avl;
733 
734  // A segment is normally unusable when the selector is zero. There
735  // is a attr.unusable flag in gem5, but it seems unused. qemu
736  // seems to set this to 0 all the time, so we just do the same and
737  // hope for the best.
738  kvm_seg.unusable = 0;
739 }
740 
741 static inline void
742 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
743  const int index)
744 {
745  kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
746  kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
747 }
748 
749 static void
750 forceSegAccessed(struct kvm_segment &seg)
751 {
752  // Intel's VMX requires that (some) usable segments are flagged as
753  // 'accessed' (i.e., the lowest bit in the segment type is set)
754  // when entering VMX. This wouldn't necessary be the case even if
755  // gem5 did set the access bits correctly, so we force it to one
756  // in that case.
757  if (!seg.unusable)
758  seg.type |= SEG_TYPE_BIT_ACCESSED;
759 }
760 
761 void
763 {
764  struct kvm_sregs sregs;
765 
766 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
767 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
768 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
769 
770  FOREACH_SREG();
771  FOREACH_SEGMENT();
772  FOREACH_DTABLE();
773 
774 #undef APPLY_SREG
775 #undef APPLY_SEGMENT
776 #undef APPLY_DTABLE
777 
778  // Clear the interrupt bitmap
779  memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
780 
781  // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
782  // bit in the type field set.
783  forceSegAccessed(sregs.cs);
784  forceSegAccessed(sregs.ss);
785  forceSegAccessed(sregs.ds);
786  forceSegAccessed(sregs.es);
787  forceSegAccessed(sregs.fs);
788  forceSegAccessed(sregs.gs);
789 
790  // There are currently some cases where the active task isn't
791  // marked as busy. This is illegal in VMX, so we force it to busy.
792  if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
793  hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
794  sregs.tr.type);
795  sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
796  }
797 
798  // VMX requires the DPL of SS and CS to be the same for
799  // non-conforming code segments. It seems like m5 doesn't set the
800  // DPL of SS correctly when taking interrupts, so we need to fix
801  // that here.
802  if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
803  sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
804  sregs.cs.dpl != sregs.ss.dpl) {
805 
806  hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
807  sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
808  sregs.ss.dpl = sregs.cs.dpl;
809  }
810 
811  // Do checks after fixing up the state to avoid getting excessive
812  // amounts of warnings.
813  RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
814  if (!rflags_nocc.vm) {
815  // Do segment verification if the CPU isn't entering virtual
816  // 8086 mode. We currently assume that unrestricted guest
817  // mode is available.
818 
819 #define APPLY_SEGMENT(kreg, idx) \
820  checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
821 
822  FOREACH_SEGMENT();
823 #undef APPLY_SEGMENT
824  }
825 
826  setSpecialRegisters(sregs);
827 }
828 
829 template <typename T>
830 static void
832 {
833  fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
834  fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
835  // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
836  // with effects.
837  fpu.fsw = tc->readMiscReg(MISCREG_FSW);
838 
839  uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
840  fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
841 
842  fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
843 
844  const unsigned top((fpu.fsw >> 11) & 0x7);
845  for (int i = 0; i < 8; ++i) {
846  const unsigned reg_idx((i + top) & 0x7);
847  const double value(bitsToFloat64(
848  tc->readFloatReg(FLOATREG_FPR(reg_idx))));
849  DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
850  reg_idx, i, value);
851  X86ISA::storeFloat80(fpu.fpr[i], value);
852  }
853 
854  // TODO: We should update the MMX state
855 
856  for (int i = 0; i < 16; ++i) {
857  *(uint64_t *)&fpu.xmm[i][0] =
859  *(uint64_t *)&fpu.xmm[i][8] =
861  }
862 }
863 
864 void
866 {
867  struct kvm_fpu fpu;
868 
869  // There is some padding in the FP registers, so we'd better zero
870  // the whole struct.
871  memset(&fpu, 0, sizeof(fpu));
872 
874 
876  warn_once("MISCREG_FISEG is non-zero.\n");
877 
878  fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
879 
881  warn_once("MISCREG_FOSEG is non-zero.\n");
882 
883  fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
884 
885  setFPUState(fpu);
886 }
887 
888 void
890 {
891  struct kvm_xsave kxsave;
892  FXSave &xsave(*(FXSave *)kxsave.region);
893 
894  // There is some padding and reserved fields in the structure, so
895  // we'd better zero the whole thing.
896  memset(&kxsave, 0, sizeof(kxsave));
897 
898  updateKvmStateFPUCommon(tc, xsave);
899 
901  warn_once("MISCREG_FISEG is non-zero.\n");
902 
903  xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
904 
906  warn_once("MISCREG_FOSEG is non-zero.\n");
907 
908  xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
909 
910  setXSave(kxsave);
911 }
912 
913 void
915 {
916  if (useXSave)
918  else
920 }
921 
922 void
924 {
925  KvmMSRVector msrs;
926 
927  const Kvm::MSRIndexVector &indices(getMsrIntersection());
928 
929  for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
930  struct kvm_msr_entry e;
931 
932  e.index = *it;
933  e.reserved = 0;
934  e.data = tc->readMiscReg(msrMap.at(*it));
935  DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
936  e.index, e.data);
937 
938  msrs.push_back(e);
939  }
940 
941  setMSRs(msrs);
942 }
943 
944 void
946 {
947  struct kvm_regs regs;
948  struct kvm_sregs sregs;
949 
950  getRegisters(regs);
951  getSpecialRegisters(sregs);
952 
953  DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
954  if (debug::KvmContext)
955  dump();
956 
957  updateThreadContextRegs(regs, sregs);
959  if (useXSave) {
960  struct kvm_xsave xsave;
961  getXSave(xsave);
962 
964  } else {
965  struct kvm_fpu fpu;
966  getFPUState(fpu);
967 
969  }
971 
972  // The M5 misc reg caches some values from other
973  // registers. Writing to it with side effects causes it to be
974  // updated from its source registers.
976 }
977 
978 void
979 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
980  const struct kvm_sregs &sregs)
981 {
982 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
983 
984  FOREACH_IREG();
985 
986 #undef APPLY_IREG
987 
988  tc->pcState(PCState(regs.rip + sregs.cs.base));
989 
990  // Flags are spread out across multiple semi-magic registers so we
991  // need some special care when updating them.
992  X86ISA::setRFlags(tc, regs.rflags);
993 }
994 
995 
996 inline void
997 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
998  const int index)
999 {
1000  SegAttr attr(0);
1001 
1002  attr.type = kvm_seg.type;
1003  attr.present = kvm_seg.present;
1004  attr.dpl = kvm_seg.dpl;
1005  attr.defaultSize = kvm_seg.db;
1006  attr.system = kvm_seg.s;
1007  attr.longMode = kvm_seg.l;
1008  attr.granularity = kvm_seg.g;
1009  attr.avl = kvm_seg.avl;
1010  attr.unusable = kvm_seg.unusable;
1011 
1012  // We need some setMiscReg magic here to keep the effective base
1013  // addresses in sync. We need an up-to-date version of EFER, so
1014  // make sure this is called after the sregs have been synced.
1015  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
1016  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
1017  tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
1019 }
1020 
1021 inline void
1022 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1023  const int index)
1024 {
1025  // We need some setMiscReg magic here to keep the effective base
1026  // addresses in sync. We need an up-to-date version of EFER, so
1027  // make sure this is called after the sregs have been synced.
1028  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
1029  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
1030 }
1031 
1032 void
1033 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1034 {
1035  assert(getKvmRunState()->apic_base == sregs.apic_base);
1036  assert(getKvmRunState()->cr8 == sregs.cr8);
1037 
1038 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1039 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1040 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1041  FOREACH_SREG();
1042  FOREACH_SEGMENT();
1043  FOREACH_DTABLE();
1044 #undef APPLY_SREG
1045 #undef APPLY_SEGMENT
1046 #undef APPLY_DTABLE
1047 }
1048 
1049 template<typename T>
1050 static void
1052 {
1053  const unsigned top((fpu.fsw >> 11) & 0x7);
1054 
1055  for (int i = 0; i < 8; ++i) {
1056  const unsigned reg_idx((i + top) & 0x7);
1057  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1058  DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1059  reg_idx, i, value);
1060  tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value));
1061  }
1062 
1063  // TODO: We should update the MMX state
1064 
1066  tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
1067  tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
1068  tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
1069 
1070  uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1071  // TODO: Are these registers really the same?
1072  tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
1073  tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
1074 
1075  tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
1076 
1077  for (int i = 0; i < 16; ++i) {
1078  tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]);
1079  tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]);
1080  }
1081 }
1082 
1083 void
1084 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1085 {
1087 
1089  tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
1091  tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
1092 }
1093 
1094 void
1095 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1096 {
1097  const FXSave &xsave(*(const FXSave *)kxsave.region);
1098 
1100 
1102  tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
1104  tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
1105 }
1106 
1107 void
1109 {
1110  const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1111 
1112  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1113  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1114  struct kvm_msr_entry *entry;
1115 
1116  // Create a list of MSRs to read
1117  kvm_msrs->nmsrs = msrs.size();
1118  entry = &kvm_msrs->entries[0];
1119  for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1120  entry->index = *it;
1121  entry->reserved = 0;
1122  entry->data = 0;
1123  }
1124 
1125  getMSRs(*kvm_msrs.get());
1126 
1127  // Update M5's state
1128  entry = &kvm_msrs->entries[0];
1129  for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1130  DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1131  entry->index, entry->data);
1132 
1133  tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1134  }
1135 }
1136 
1137 void
1139 {
1140  Fault fault;
1141 
1143 
1144  {
1145  // Migrate to the interrupt controller's thread to get the
1146  // interrupt. Even though the individual methods are safe to
1147  // call across threads, we might still lose interrupts unless
1148  // they are getInterrupt() and updateIntrInfo() are called
1149  // atomically.
1151  fault = interrupts[0]->getInterrupt();
1152  interrupts[0]->updateIntrInfo();
1153  }
1154 
1155  X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1156  if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1157  DPRINTF(KvmInt, "Delivering NMI\n");
1159  } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1160  DPRINTF(KvmInt, "INIT interrupt\n");
1161  fault.get()->invoke(tc);
1162  // Delay the kvm state update since we won't enter KVM on this
1163  // tick.
1164  threadContextDirty = true;
1165  // HACK: gem5 doesn't actually have any BIOS code, which means
1166  // that we need to halt the thread and wait for a startup
1167  // interrupt before restarting the thread. The simulated CPUs
1168  // use the same kind of hack using a microcode routine.
1169  thread->suspend();
1170  } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1171  DPRINTF(KvmInt, "STARTUP interrupt\n");
1172  fault.get()->invoke(tc);
1173  // The kvm state is assumed to have been updated when entering
1174  // kvmRun(), so we need to update manually it here.
1175  updateKvmState();
1176  } else if (x86int) {
1177  struct kvm_interrupt kvm_int;
1178  kvm_int.irq = x86int->getVector();
1179 
1180  DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1181  fault->name(), kvm_int.irq);
1182 
1183  kvmInterrupt(kvm_int);
1184  } else {
1185  panic("KVM: Unknown interrupt type\n");
1186  }
1187 
1188 }
1189 
1190 Tick
1192 {
1193  struct kvm_run &kvm_run(*getKvmRunState());
1194 
1195  auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1196 
1197  if (lapic->checkInterruptsRaw()) {
1198  if (lapic->hasPendingUnmaskable()) {
1199  DPRINTF(KvmInt,
1200  "Delivering unmaskable interrupt.\n");
1203  } else if (kvm_run.ready_for_interrupt_injection) {
1204  // KVM claims that it is ready for an interrupt. It might
1205  // be lying if we just updated rflags and disabled
1206  // interrupts (e.g., by doing a CPU handover). Let's sync
1207  // the thread context and check if there are /really/
1208  // interrupts that should be delivered now.
1210  if (lapic->checkInterrupts()) {
1211  DPRINTF(KvmInt,
1212  "M5 has pending interrupts, delivering interrupt.\n");
1213 
1215  } else {
1216  DPRINTF(KvmInt,
1217  "Interrupt delivery delayed due to KVM confusion.\n");
1218  kvm_run.request_interrupt_window = 1;
1219  }
1220  } else if (!kvm_run.request_interrupt_window) {
1221  DPRINTF(KvmInt,
1222  "M5 has pending interrupts, requesting interrupt "
1223  "window.\n");
1224  kvm_run.request_interrupt_window = 1;
1225  }
1226  } else {
1227  kvm_run.request_interrupt_window = 0;
1228  }
1229 
1230  // The CPU might have been suspended as a result of the INIT
1231  // interrupt delivery hack. In that case, don't enter into KVM.
1232  if (_status == Idle)
1233  return 0;
1234  else
1235  return BaseKvmCPU::kvmRun(ticks);
1236 }
1237 
1238 Tick
1240 {
1241  struct kvm_run &kvm_run(*getKvmRunState());
1242 
1243  if (!archIsDrained()) {
1244  DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1245 
1246  // Tell KVM to find a suitable place to deliver interrupts. This
1247  // should ensure that pending interrupts have been delivered and
1248  // things are reasonably consistent (i.e., no interrupts pending
1249  // in the guest).
1250  kvm_run.request_interrupt_window = 1;
1251 
1252  // Limit the run to 1 millisecond. That is hopefully enough to
1253  // reach an interrupt window. Otherwise, we'll just try again
1254  // later.
1256  } else {
1257  DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1258 
1259  return BaseKvmCPU::kvmRun(0);
1260  }
1261 }
1262 
1263 uint64_t
1265 {
1266  return getMSR(MSR_TSC);
1267 }
1268 
1269 void
1271 {
1272  struct kvm_run &kvm_run(*getKvmRunState());
1273  const uint16_t port(kvm_run.io.port);
1274 
1275  assert(kvm_run.exit_reason == KVM_EXIT_IO);
1276 
1277  if (kvm_run.io.size != 4) {
1278  panic("Unexpected IO size (%u) for address 0x%x.\n",
1279  kvm_run.io.size, port);
1280  }
1281 
1282  if (kvm_run.io.count != 1) {
1283  panic("Unexpected IO count (%u) for address 0x%x.\n",
1284  kvm_run.io.count, port);
1285  }
1286 
1287  uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1288  if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1289  tc->setMiscReg(miscreg, *data);
1290  else
1291  *data = tc->readMiscRegNoEffect(miscreg);
1292 }
1293 
1294 Tick
1296 {
1297  struct kvm_run &kvm_run(*getKvmRunState());
1298  bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1299  unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1300  Tick delay(0);
1301  uint16_t port(kvm_run.io.port);
1302  Addr pAddr;
1303  const int count(kvm_run.io.count);
1304 
1305  assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1306  kvm_run.io.direction == KVM_EXIT_IO_OUT);
1307 
1308  DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1309  (isWrite ? "out" : "in"), kvm_run.io.port);
1310 
1311  /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1312  * don't use the TLB component, we need to intercept and handle
1313  * the PCI configuration space IO ports here.
1314  *
1315  * The IO port PCI discovery mechanism uses one address register
1316  * and one data register. We map the address register to a misc
1317  * reg and use that to re-route data register accesses to the
1318  * right location in the PCI configuration space.
1319  */
1320  if (port == IO_PCI_CONF_ADDR) {
1322  return 0;
1323  } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1325  if (pciConfigAddr & 0x80000000) {
1326  pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1327  (port & 0x3));
1328  } else {
1329  pAddr = X86ISA::x86IOAddress(port);
1330  }
1331  } else {
1332  pAddr = X86ISA::x86IOAddress(port);
1333  }
1334 
1335  const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1336  // Temporarily lock and migrate to the device event queue to
1337  // prevent races in multi-core mode.
1339  for (int i = 0; i < count; ++i) {
1340  RequestPtr io_req = std::make_shared<Request>(
1341  pAddr, kvm_run.io.size,
1343 
1344  io_req->setContext(tc->contextId());
1345 
1346  PacketPtr pkt = new Packet(io_req, cmd);
1347 
1348  pkt->dataStatic(guestData);
1349  delay += dataPort.submitIO(pkt);
1350 
1351  guestData += kvm_run.io.size;
1352  }
1353 
1354  return delay;
1355 }
1356 
1357 Tick
1359 {
1360  // We don't need to do anything here since this is caught the next
1361  // time we execute kvmRun(). We still overload the exit event to
1362  // silence the warning about an unhandled exit event.
1363  return 0;
1364 }
1365 
1366 bool
1368 {
1369  struct kvm_vcpu_events events;
1370 
1371  getVCpuEvents(events);
1372 
1373  // We could probably handle this in a by re-inserting interrupts
1374  // that are pending into gem5 on a drain. However, that would
1375  // probably be tricky to do reliably, so we'll just prevent a
1376  // drain if there is anything pending in the
1377  // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1378  // executed in the guest by requesting an interrupt window if
1379  // there are pending interrupts.
1380  const bool pending_events(events.exception.injected ||
1381  events.interrupt.injected ||
1382  events.nmi.injected || events.nmi.pending);
1383 
1384  if (pending_events) {
1385  DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1386  events.exception.injected ? "exception" : "",
1387  events.interrupt.injected ? "interrupt" : "",
1388  events.nmi.injected ? "nmi[i]" : "",
1389  events.nmi.pending ? "nmi[p]" : "");
1390  }
1391 
1392  return !pending_events;
1393 }
1394 
1395 void
1397 {
1398  struct kvm_run &kvm_run(*getKvmRunState());
1399 
1400  // Synchronize the APIC base and CR8 here since they are present
1401  // in the kvm_run struct, which makes the synchronization really
1402  // cheap.
1403  kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
1404  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1405 
1407 
1408  tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
1409  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1410 }
1411 
1412 static struct kvm_cpuid_entry2
1413 makeKvmCpuid(uint32_t function, uint32_t index,
1414  CpuidResult &result)
1415 {
1416  struct kvm_cpuid_entry2 e;
1417  e.function = function;
1418  e.index = index;
1419  e.flags = 0;
1420  e.eax = (uint32_t)result.rax;
1421  e.ebx = (uint32_t)result.rbx;
1422  e.ecx = (uint32_t)result.rcx;
1423  e.edx = (uint32_t)result.rdx;
1424 
1425  return e;
1426 }
1427 
1428 void
1430 {
1431  Kvm::CPUIDVector m5_supported;
1432 
1433  /* TODO: We currently don't support any of the functions that
1434  * iterate through data structures in the CPU using an index. It's
1435  * currently not a problem since M5 doesn't expose any of them at
1436  * the moment.
1437  */
1438 
1439  /* Basic features */
1440  CpuidResult func0;
1441  X86ISA::doCpuid(tc, 0x0, 0, func0);
1442  for (uint32_t function = 0; function <= func0.rax; ++function) {
1444  uint32_t idx(0);
1445 
1446  X86ISA::doCpuid(tc, function, idx, cpuid);
1447  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1448  }
1449 
1450  /* Extended features */
1451  CpuidResult efunc0;
1452  X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1453  for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1455  uint32_t idx(0);
1456 
1457  X86ISA::doCpuid(tc, function, idx, cpuid);
1458  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1459  }
1460 
1461  setCPUID(m5_supported);
1462 }
1463 
1464 void
1465 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1466 {
1467  if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1468  panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1469  errno);
1470 }
1471 
1472 void
1473 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1474 {
1475  std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1476  newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
1477 
1478  kvm_cpuid->nent = cpuid.size();
1479  std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1480 
1481  setCPUID(*kvm_cpuid);
1482 }
1483 
1484 void
1485 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1486 {
1487  if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1488  panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1489  errno);
1490 }
1491 
1492 void
1494 {
1495  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1496  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1497 
1498  kvm_msrs->nmsrs = msrs.size();
1499  std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1500 
1501  setMSRs(*kvm_msrs);
1502 }
1503 
1504 void
1505 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1506 {
1507  if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1508  panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1509  errno);
1510 }
1511 
1512 
1513 void
1514 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1515 {
1516  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1517  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1518  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1519 
1520  kvm_msrs->nmsrs = 1;
1521  entry.index = index;
1522  entry.reserved = 0;
1523  entry.data = value;
1524 
1525  setMSRs(*kvm_msrs.get());
1526 }
1527 
1528 uint64_t
1529 X86KvmCPU::getMSR(uint32_t index) const
1530 {
1531  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1532  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1533  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1534 
1535  kvm_msrs->nmsrs = 1;
1536  entry.index = index;
1537  entry.reserved = 0;
1538  entry.data = 0;
1539 
1540  getMSRs(*kvm_msrs.get());
1541  return entry.data;
1542 }
1543 
1544 const Kvm::MSRIndexVector &
1546 {
1547  if (cachedMsrIntersection.empty()) {
1548  const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
1549 
1550  DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1551  for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1552  if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1553  cachedMsrIntersection.push_back(*it);
1554  DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1555  } else {
1556  warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1557  *it);
1558  }
1559  }
1560  }
1561 
1562  return cachedMsrIntersection;
1563 }
1564 
1565 void
1566 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1567 {
1568 #ifdef KVM_GET_DEBUGREGS
1569  if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1570  panic("KVM: Failed to get guest debug registers\n");
1571 #else
1572  panic("KVM: Unsupported getDebugRegisters call.\n");
1573 #endif
1574 }
1575 
1576 void
1577 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1578 {
1579 #ifdef KVM_SET_DEBUGREGS
1580  if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1581  panic("KVM: Failed to set guest debug registers\n");
1582 #else
1583  panic("KVM: Unsupported setDebugRegisters call.\n");
1584 #endif
1585 }
1586 
1587 void
1588 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1589 {
1590  if (ioctl(KVM_GET_XCRS, &regs) == -1)
1591  panic("KVM: Failed to get guest debug registers\n");
1592 }
1593 
1594 void
1595 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1596 {
1597  if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1598  panic("KVM: Failed to set guest debug registers\n");
1599 }
1600 
1601 void
1602 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1603 {
1604  if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1605  panic("KVM: Failed to get guest debug registers\n");
1606 }
1607 
1608 void
1609 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1610 {
1611  if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1612  panic("KVM: Failed to set guest debug registers\n");
1613 }
1614 
1615 
1616 void
1617 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1618 {
1619  if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1620  panic("KVM: Failed to get guest debug registers\n");
1621 }
1622 
1623 void
1624 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1625 {
1626  if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1627  panic("KVM: Failed to set guest debug registers\n");
1628 }
1629 
1630 } // namespace gem5
gem5::dumpKvm
static void dumpKvm(const struct kvm_regs &regs)
Definition: x86_cpu.cc:177
gem5::X86KvmCPU::dumpDebugRegs
void dumpDebugRegs() const
Definition: x86_cpu.cc:616
gem5::FXSave::fpu_cs
uint16_t fpu_cs
Definition: x86_cpu.cc:87
gem5::FXSave::last_opcode
uint16_t last_opcode
Definition: x86_cpu.cc:81
gem5::X86ISA::MISCREG_ES
@ MISCREG_ES
Definition: misc.hh:302
gem5::BaseKvmCPU::_status
Status _status
CPU run state.
Definition: base.hh:240
gem5::X86ISA::loadFloat80
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition: utility.cc:156
gem5::Kvm::capSetTSSAddress
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition: vm.cc:114
gem5::X86KvmCPU::setCPUID
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition: x86_cpu.cc:1465
gem5::X86KvmCPU::updateThreadContextRegs
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition: x86_cpu.cc:979
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::X86ISA::MISCREG_M5_REG
@ MISCREG_M5_REG
Definition: misc.hh:143
gem5::Kvm::capUserNMI
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition: vm.cc:126
gem5::FXSave::fcw
uint16_t fcw
Definition: x86_cpu.cc:77
gem5::X86ISA::MISCREG_SEG_SEL
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:511
gem5::updateKvmStateFPUCommon
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition: x86_cpu.cc:831
gem5::X86ISA::MISCREG_DS
@ MISCREG_DS
Definition: misc.hh:305
gem5::BaseCPU::interrupts
std::vector< BaseInterrupts * > interrupts
Definition: base.hh:225
gem5::FXSave::ctrl64
struct gem5::FXSave::@24::@27 ctrl64
warn
#define warn(...)
Definition: logging.hh:245
gem5::X86ISA::MISCREG_TR
@ MISCREG_TR
Definition: misc.hh:313
gem5::X86KvmCPU::dumpSpecRegs
void dumpSpecRegs() const
Definition: x86_cpu.cc:608
gem5::X86KvmCPU::startup
void startup() override
startup() is the final initialization call before simulation.
Definition: x86_cpu.cc:563
gem5::X86KvmCPU::dumpVCpuEvents
void dumpVCpuEvents() const
Definition: x86_cpu.cc:654
gem5::X86ISA::msrMap
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
x86_cpu.hh
gem5::forceSegAccessed
static void forceSegAccessed(struct kvm_segment &seg)
Definition: x86_cpu.cc:750
gem5::X86KvmCPU::haveXSave
bool haveXSave
Kvm::capXSave() available?
Definition: x86_cpu.hh:254
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::EventManager::eventQueue
EventQueue * eventQueue() const
Definition: eventq.hh:1010
gem5::MipsISA::cpuid
Bitfield< 28, 21 > cpuid
Definition: dt_constants.hh:95
gem5::ThreadContext::readFloatReg
virtual RegVal readFloatReg(RegIndex reg_idx) const =0
gem5::X86ISA::ZEBit
@ ZEBit
Definition: misc.hh:91
gem5::X86KvmCPU::dumpXCRs
void dumpXCRs() const
Definition: x86_cpu.cc:630
gem5::X86ISA::e
Bitfield< 11 > e
Definition: misc.hh:759
gem5::X86ISA::MISCREG_TSL
@ MISCREG_TSL
Definition: misc.hh:309
gem5::X86KvmCPU::~X86KvmCPU
virtual ~X86KvmCPU()
Definition: x86_cpu.cc:558
gem5::X86KvmCPU::dumpIntRegs
void dumpIntRegs() const
Definition: x86_cpu.cc:600
gem5::X86ISA::MISCREG_CR8
@ MISCREG_CR8
Definition: misc.hh:119
gem5::FXSave::pad1
uint16_t pad1
Definition: x86_cpu.cc:88
gem5::X86ISA::IEBit
@ IEBit
Definition: misc.hh:89
gem5::BaseKvmCPU::ioctl
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition: base.cc:1150
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
warn_once
#define warn_once(...)
Definition: logging.hh:249
gem5::X86ISA::CC2Bit
@ CC2Bit
Definition: misc.hh:101
gem5::X86ISA::MISCREG_APIC_BASE
@ MISCREG_APIC_BASE
Definition: misc.hh:399
gem5::X86KvmCPU::handleIOMiscReg32
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition: x86_cpu.cc:1270
gem5::X86ISA::ErrSummaryBit
@ ErrSummaryBit
Definition: misc.hh:98
gem5::X86KvmCPU::getDebugRegisters
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition: x86_cpu.cc:1566
gem5::BaseKvmCPU::deviceEventQueue
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition: base.hh:438
gem5::BaseKvmCPU::kvmInterrupt
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition: base.cc:799
gem5::BaseKvmCPU::syncThreadContext
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition: base.cc:931
gem5::X86KvmCPU::updateKvmStateFPU
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition: x86_cpu.cc:914
gem5::X86KvmCPU::updateKvmStateRegs
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition: x86_cpu.cc:695
gem5::ArmISA::attr
attr
Definition: misc_types.hh:655
gem5::X86ISA::MISCREG_SEG_LIMIT
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:532
gem5::X86KvmCPU::setMSRs
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition: x86_cpu.cc:1485
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::X86KvmCPU::setDebugRegisters
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition: x86_cpu.cc:1577
gem5::X86ISA::MISCREG_SEG_ATTR
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:539
gem5::X86ISA::CC0Bit
@ CC0Bit
Definition: misc.hh:99
gem5::bitsToFloat64
static double bitsToFloat64(uint64_t val)
Definition: types.hh:225
gem5::newVarStruct
static STRUCT * newVarStruct(size_t entries)
Definition: x86_cpu.cc:171
gem5::X86KvmCPU::updateThreadContextXSave
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:1095
gem5::SimObject::find
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
Definition: sim_object.cc:176
gem5::X86ISA::InitInterrupt
Definition: faults.hh:350
gem5::X86ISA::x86PciConfigAddress
static Addr x86PciConfigAddress(const uint32_t addr)
Definition: x86_traits.hh:82
GEM5_FALLTHROUGH
#define GEM5_FALLTHROUGH
Definition: compiler.hh:61
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:64
gem5::X86ISA::MISCREG_FIOFF
@ MISCREG_FIOFF
Definition: misc.hh:392
interrupts.hh
gem5::BaseKvmCPU::getKvmRunState
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition: base.hh:308
gem5::X86ISA::MISCREG_FISEG
@ MISCREG_FISEG
Definition: misc.hh:391
gem5::FXSave::fpu_ip
uint32_t fpu_ip
Definition: x86_cpu.cc:86
gem5::X86ISA::MISCREG_FOOFF
@ MISCREG_FOOFF
Definition: misc.hh:394
gem5::X86KvmCPU::updateCPUID
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition: x86_cpu.cc:1429
top
Definition: test.h:61
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::X86ISA::FLOATREG_XMM_LOW
static FloatRegIndex FLOATREG_XMM_LOW(int index)
Definition: float.hh:132
gem5::X86ISA::getRFlags
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition: utility.cc:58
gem5::Kvm::capExtendedCPUID
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition: vm.cc:120
std::vector
STL vector class.
Definition: stl.hh:37
gem5::X86ISA::MISCREG_X87_TOP
@ MISCREG_X87_TOP
Definition: misc.hh:383
IO_PCI_CONF_ADDR
#define IO_PCI_CONF_ADDR
Definition: x86_cpu.cc:58
gem5::X86KvmCPU::updateThreadContextFPU
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:1084
gem5::ThreadContext::instAddr
virtual Addr instAddr() const =0
gem5::X86ISA::UEBit
@ UEBit
Definition: misc.hh:93
gem5::X86KvmCPU::dumpXSave
void dumpXSave() const
Definition: x86_cpu.cc:642
gem5::X86KvmCPU::cachedMsrIntersection
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition: x86_cpu.hh:248
gem5::BaseKvmCPU::setFPUState
void setFPUState(const struct kvm_fpu &state)
Definition: base.cc:842
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
faults.hh
gem5::RiscvISA::xs
Bitfield< 16, 15 > xs
Definition: misc.hh:547
gem5::X86ISA::MISCREG_GS
@ MISCREG_GS
Definition: misc.hh:307
gem5::Kvm::capXCRs
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition: vm.cc:188
gem5::X86ISA::storeFloat80
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition: utility.cc:165
gem5::Kvm::capXSave
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition: vm.cc:198
FOREACH_IREG
#define FOREACH_IREG()
Definition: x86_cpu.cc:111
hack
#define hack(...)
Definition: logging.hh:247
gem5::X86ISA::BusyBit
@ BusyBit
Definition: misc.hh:103
gem5::X86KvmCPU::setMSR
void setMSR(uint32_t index, uint64_t value)
Definition: x86_cpu.cc:1514
gem5::BaseKvmCPU::getFPUState
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition: base.cc:835
gem5::X86KvmCPU::kvmRunDrain
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition: x86_cpu.cc:1239
gem5::X86KvmCPU::ioctlRun
void ioctlRun() override
Override for synchronizing state in kvm_run.
Definition: x86_cpu.cc:1396
gem5::BaseKvmCPU::kvmNonMaskableInterrupt
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition: base.cc:791
gem5::X86ISA::DEBit
@ DEBit
Definition: misc.hh:90
gem5::FXSave::mxcsr_mask
uint32_t mxcsr_mask
Definition: x86_cpu.cc:101
FOREACH_DTABLE
#define FOREACH_DTABLE()
Definition: x86_cpu.cc:164
gem5::X86ISA::OEBit
@ OEBit
Definition: misc.hh:92
gem5::ArmISA::at
Bitfield< 35, 32 > at
Definition: misc_types.hh:154
gem5::MemCmd
Definition: packet.hh:75
gem5::X86KvmCPU::archIsDrained
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition: x86_cpu.cc:1367
gem5::X86ISA::CpuidResult
Definition: cpuid.hh:41
gem5::FXSave::fpu_ip
uint64_t fpu_ip
Definition: x86_cpu.cc:96
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
reserved
reserved
Definition: pcireg.h:54
gem5::X86KvmCPU::getMsrIntersection
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition: x86_cpu.cc:1545
gem5::X86ISA::MISCREG_CS_BASE
@ MISCREG_CS_BASE
Definition: misc.hh:319
FOREACH_DREG
#define FOREACH_DREG()
Definition: x86_cpu.cc:142
gem5::X86ISA::CC3Bit
@ CC3Bit
Definition: misc.hh:102
gem5::BaseKvmCPU::thread
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition: base.hh:153
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::FXSave::fpu_dp
uint64_t fpu_dp
Definition: x86_cpu.cc:97
gem5::X86ISA::MISCREG_FOP
@ MISCREG_FOP
Definition: misc.hh:395
gem5::X86ISA::MISCREG_FOSEG
@ MISCREG_FOSEG
Definition: misc.hh:393
gem5::X86KvmCPU::haveDebugRegs
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition: x86_cpu.hh:252
gem5::BaseKvmCPU::startup
void startup() override
startup() is the final initialization call before simulation.
Definition: base.cc:120
gem5::BaseKvmCPU::ioctlRun
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition: base.cc:1311
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::Kvm::capVCPUEvents
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition: vm.cc:168
gem5::X86KvmCPU::getMSR
uint64_t getMSR(uint32_t index) const
Definition: x86_cpu.cc:1529
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::X86KvmCPU::updateKvmStateSRegs
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:762
MSR_TSC
#define MSR_TSC
Definition: x86_cpu.cc:56
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
int.hh
gem5::X86ISA::count
count
Definition: misc.hh:709
SEG_TYPE_BIT_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
Definition: x86_cpu.cc:73
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::BaseKvmCPU
Base class for KVM based CPU models.
Definition: base.hh:87
gem5::BaseKvmCPU::tc
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition: base.hh:158
gem5::FXSave::pad0
uint8_t pad0
Definition: x86_cpu.cc:80
msr.hh
gem5::MipsISA::PCState
GenericISA::DelaySlotPCState< 4 > PCState
Definition: pcstate.hh:40
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::X86ISA::StackFaultBit
@ StackFaultBit
Definition: misc.hh:97
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::setKvmSegmentReg
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:717
gem5::X86ISA::MISCREG_FTAG
@ MISCREG_FTAG
Definition: misc.hh:390
gem5::X86ISA::X86FaultBase::getVector
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition: faults.hh:86
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
SEG_SYS_TYPE_TSS_BUSY
#define SEG_SYS_TYPE_TSS_BUSY
Definition: x86_cpu.cc:64
gem5::FXSave::pad2
uint16_t pad2
Definition: x86_cpu.cc:91
gem5::dumpFpuCommon
static void dumpFpuCommon(const T &fpu)
Definition: x86_cpu.cc:267
gem5::FXSave::mxcsr
uint32_t mxcsr
Definition: x86_cpu.cc:100
gem5::X86ISA::FLOATREG_XMM_HIGH
static FloatRegIndex FLOATREG_XMM_HIGH(int index)
Definition: float.hh:138
gem5::BaseKvmCPU::getSpecialRegisters
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition: base.cc:821
gem5::X86ISA::MISCREG_PCI_CONFIG_ADDRESS
@ MISCREG_PCI_CONFIG_ADDRESS
Definition: misc.hh:402
gem5::EventQueue::ScopedMigration
Definition: eventq.hh:672
utility.hh
gem5::X86ISA::Interrupts
Definition: interrupts.hh:77
gem5::BaseKvmCPU::vm
KvmVM & vm
Definition: base.hh:160
compiler.hh
gem5::ThreadContext::setFloatReg
virtual void setFloatReg(RegIndex reg_idx, RegVal val)=0
gem5::X86ISA::MISCREG_SEG_BASE
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:518
gem5::X86KvmCPU::updateKvmStateMSRs
void updateKvmStateMSRs()
Update MSR registers.
Definition: x86_cpu.cc:923
gem5::X86ISA::MISCREG_RFLAGS
@ MISCREG_RFLAGS
Definition: misc.hh:140
gem5::X86KvmCPU::setXCRs
void setXCRs(const struct kvm_xcrs &regs)
Definition: x86_cpu.cc:1595
gem5::makeKvmCpuid
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition: x86_cpu.cc:1413
gem5::setContextSegment
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:997
gem5::X86ISA::MISCREG_FS
@ MISCREG_FS
Definition: misc.hh:306
gem5::ThreadContext::pcState
virtual TheISA::PCState pcState() const =0
gem5::updateThreadContextFPUCommon
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition: x86_cpu.cc:1051
gem5::ThreadContext::readMiscRegNoEffect
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
gem5::X86KvmCPU::getMSRs
void getMSRs(struct kvm_msrs &msrs) const
Definition: x86_cpu.cc:1505
IO_PCI_CONF_DATA_BASE
#define IO_PCI_CONF_DATA_BASE
Definition: x86_cpu.cc:59
gem5::X86ISA::convX87XTagsToTags
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition: utility.cc:115
gem5::sim_clock::as_float::ms
double ms
millisecond
Definition: core.cc:54
gem5::X86KvmCPU::dumpFpuRegs
void dumpFpuRegs() const
Definition: x86_cpu.cc:592
SEG_CS_TYPE_ACCESSED
#define SEG_CS_TYPE_ACCESSED
Definition: x86_cpu.cc:67
gem5::X86KvmCPU::getHostCycles
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition: x86_cpu.cc:1264
SEG_CS_TYPE_READ_ACCESSED
#define SEG_CS_TYPE_READ_ACCESSED
Definition: x86_cpu.cc:69
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::X86KvmCPU::handleKvmExitIO
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition: x86_cpu.cc:1295
gem5::X86ISA::MISCREG_SS
@ MISCREG_SS
Definition: misc.hh:304
gem5::checkSeg
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition: x86_cpu.cc:388
gem5::X86ISA::MISCREG_MXCSR
@ MISCREG_MXCSR
Definition: misc.hh:386
gem5::X86KvmCPU::updateThreadContextMSRs
void updateThreadContextMSRs()
Update MSR registers.
Definition: x86_cpu.cc:1108
gem5::X86ISA::X86Interrupt
Definition: faults.hh:119
gem5::BaseKvmCPU::Idle
@ Idle
Context not scheduled in KVM.
Definition: base.hh:199
name
const std::string & name()
Definition: trace.cc:49
base.hh
gem5::X86ISA::setRFlags
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition: utility.cc:74
gem5::BaseKvmCPU::setRegisters
void setRegisters(const struct kvm_regs &regs)
Definition: base.cc:814
gem5::KvmVM::kvm
Kvm * kvm
Global KVM interface.
Definition: vm.hh:417
gem5::X86ISA::doCpuid
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition: cpuid.cc:91
gem5::X86ISA::convX87TagsToXTags
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition: utility.cc:90
gem5::X86KvmCPU::updateKvmState
void updateKvmState() override
Update the KVM state from the current thread context.
Definition: x86_cpu.cc:682
gem5::X86ISA::MISCREG_FCW
@ MISCREG_FCW
Definition: misc.hh:387
gem5::X86ISA::MISCREG_CS
@ MISCREG_CS
Definition: misc.hh:303
gem5::X86ISA::FLOATREG_FPR
static FloatRegIndex FLOATREG_FPR(int index)
Definition: float.hh:126
gem5::X86KvmCPU::getVCpuEvents
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition: x86_cpu.cc:1617
gem5::X86KvmCPU::getXSave
void getXSave(struct kvm_xsave &xsave) const
Definition: x86_cpu.cc:1602
gem5::X86KvmCPU::deliverInterrupts
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition: x86_cpu.cc:1138
gem5::X86KvmCPU::dump
void dump() const override
Dump the internal state to the terminal.
Definition: x86_cpu.cc:577
gem5::X86KvmCPU::haveXCRs
bool haveXCRs
Kvm::capXCRs() available?
Definition: x86_cpu.hh:261
gem5::FXSave::fpu_dp
uint32_t fpu_dp
Definition: x86_cpu.cc:89
gem5::X86ISA::StartupInterrupt
Definition: faults.hh:361
gem5::X86KvmCPU::updateKvmStateFPUXSave
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:889
inform
#define inform(...)
Definition: logging.hh:246
FOREACH_SEGMENT
#define FOREACH_SEGMENT()
Definition: x86_cpu.cc:152
gem5::Kvm::capDebugRegs
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition: vm.cc:178
gem5::SimpleThread::suspend
void suspend() override
Set the status to Suspended.
Definition: simple_thread.cc:142
gem5::floatToBits64
static uint64_t floatToBits64(double val)
Definition: types.hh:198
gem5::ThreadContext::setMiscReg
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
gem5::dumpFpuSpec
static void dumpFpuSpec(const struct FXSave &xs)
Definition: x86_cpu.cc:251
gem5::X86ISA::seg
Bitfield< 2, 0 > seg
Definition: types.hh:87
gem5::X86KvmCPU::updateKvmStateFPULegacy
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:865
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::FXSave::ftwx
uint8_t ftwx
Definition: x86_cpu.cc:79
gem5::BaseKvmCPU::KVMCpuPort::submitIO
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition: base.cc:174
gem5::X86ISA::MISCREG_FSW
@ MISCREG_FSW
Definition: misc.hh:388
gem5::X86KvmCPU::X86KvmCPU
X86KvmCPU(const X86KvmCPUParams &params)
Definition: x86_cpu.cc:530
cpuid.hh
gem5::Kvm
KVM parent interface.
Definition: vm.hh:80
gem5::X86KvmCPU::kvmRun
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition: x86_cpu.cc:1191
gem5::FXSave::fsw
uint16_t fsw
Definition: x86_cpu.cc:78
gem5::MipsISA::NonMaskableInterrupt
Definition: faults.hh:149
gem5::X86KvmCPU::handleKvmExitIRQWindowOpen
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition: x86_cpu.cc:1358
gem5::X86KvmCPU::setXSave
void setXSave(const struct kvm_xsave &xsave)
Definition: x86_cpu.cc:1609
gem5::BaseKvmCPU::kvmRun
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition: base.cc:694
gem5::BaseKvmCPU::threadContextDirty
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition: base.hh:639
gem5::X86KvmCPU::getXCRs
void getXCRs(struct kvm_xcrs &regs) const
Definition: x86_cpu.cc:1588
gem5::X86ISA::CpuidResult::rax
uint64_t rax
Definition: cpuid.hh:43
gem5::BaseKvmCPU::getRegisters
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition: base.cc:807
gem5::BaseCPU::dataRequestorId
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:194
gem5::X86ISA::MISCREG_FTW
@ MISCREG_FTW
Definition: misc.hh:389
gem5::X86KvmCPU::updateThreadContextSRegs
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:1033
gem5::FXSave::fpu_ds
uint16_t fpu_ds
Definition: x86_cpu.cc:90
gem5::isCanonicalAddress
static bool isCanonicalAddress(uint64_t addr)
Definition: x86_cpu.cc:377
gem5::BaseKvmCPU::dataPort
KVMCpuPort dataPort
Port for data requests.
Definition: base.hh:624
gem5::BaseKvmCPU::getGuestData
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition: base.hh:318
SEG_SYS_TYPE_TSS_AVAILABLE
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition: x86_cpu.cc:62
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::X86KvmCPU::updateThreadContext
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition: x86_cpu.cc:945
gem5::X86ISA::CC1Bit
@ CC1Bit
Definition: misc.hh:100
gem5::FXSave
Definition: x86_cpu.cc:75
gem5::X86ISA::x86IOAddress
static Addr x86IOAddress(const uint32_t port)
Definition: x86_traits.hh:76
gem5::X86KvmCPU::dumpMSRs
void dumpMSRs() const
Definition: x86_cpu.cc:662
FOREACH_SREG
#define FOREACH_SREG()
Definition: x86_cpu.cc:131
gem5::BaseKvmCPU::setSpecialRegisters
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition: base.cc:828
gem5::X86KvmCPU::useXSave
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition: x86_cpu.hh:259
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::X86ISA::PEBit
@ PEBit
Definition: misc.hh:94
gem5::setKvmDTableReg
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition: x86_cpu.cc:742
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::X86KvmCPU::setVCpuEvents
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition: x86_cpu.cc:1624
gem5::ThreadContext::setMiscRegNoEffect
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0

Generated on Tue Sep 21 2021 12:25:00 for gem5 by doxygen 1.8.17