gem5  v21.2.1.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
x86_cpu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Andreas Sandberg
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "arch/x86/kvm/x86_cpu.hh"
30 
31 #include <linux/kvm.h>
32 
33 #include <algorithm>
34 #include <cerrno>
35 #include <memory>
36 
37 #include "arch/x86/cpuid.hh"
38 #include "arch/x86/faults.hh"
39 #include "arch/x86/interrupts.hh"
40 #include "arch/x86/regs/int.hh"
41 #include "arch/x86/regs/msr.hh"
42 #include "arch/x86/utility.hh"
43 #include "base/compiler.hh"
44 #include "cpu/kvm/base.hh"
45 #include "debug/Drain.hh"
46 #include "debug/Kvm.hh"
47 #include "debug/KvmContext.hh"
48 #include "debug/KvmIO.hh"
49 #include "debug/KvmInt.hh"
50 
51 namespace gem5
52 {
53 
54 using namespace X86ISA;
55 
56 #define MSR_TSC 0x10
57 
58 #define IO_PCI_CONF_ADDR 0xCF8
59 #define IO_PCI_CONF_DATA_BASE 0xCFC
60 
61 // Task segment type of an inactive 32-bit or 64-bit task
62 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
63 // Task segment type of an active 32-bit or 64-bit task
64 #define SEG_SYS_TYPE_TSS_BUSY 11
65 
66 // Non-conforming accessed code segment
67 #define SEG_CS_TYPE_ACCESSED 9
68 // Non-conforming accessed code segment that can be read
69 #define SEG_CS_TYPE_READ_ACCESSED 11
70 
71 // The lowest bit of the type field for normal segments (code and
72 // data) is used to indicate that a segment has been accessed.
73 #define SEG_TYPE_BIT_ACCESSED 1
74 
75 struct GEM5_PACKED FXSave
76 {
77  uint16_t fcw;
78  uint16_t fsw;
79  uint8_t ftwx;
80  uint8_t pad0;
81  uint16_t last_opcode;
82  union
83  {
84  struct
85  {
86  uint32_t fpu_ip;
87  uint16_t fpu_cs;
88  uint16_t pad1;
89  uint32_t fpu_dp;
90  uint16_t fpu_ds;
91  uint16_t pad2;
92  } ctrl32;
93 
94  struct
95  {
96  uint64_t fpu_ip;
97  uint64_t fpu_dp;
98  } ctrl64;
99  };
100  uint32_t mxcsr;
101  uint32_t mxcsr_mask;
102 
103  uint8_t fpr[8][16];
104  uint8_t xmm[16][16];
105 
106  uint64_t reserved[12];
107 };
108 
109 static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
110 
111 #define FOREACH_IREG() \
112  do { \
113  APPLY_IREG(rax, INTREG_RAX); \
114  APPLY_IREG(rbx, INTREG_RBX); \
115  APPLY_IREG(rcx, INTREG_RCX); \
116  APPLY_IREG(rdx, INTREG_RDX); \
117  APPLY_IREG(rsi, INTREG_RSI); \
118  APPLY_IREG(rdi, INTREG_RDI); \
119  APPLY_IREG(rsp, INTREG_RSP); \
120  APPLY_IREG(rbp, INTREG_RBP); \
121  APPLY_IREG(r8, INTREG_R8); \
122  APPLY_IREG(r9, INTREG_R9); \
123  APPLY_IREG(r10, INTREG_R10); \
124  APPLY_IREG(r11, INTREG_R11); \
125  APPLY_IREG(r12, INTREG_R12); \
126  APPLY_IREG(r13, INTREG_R13); \
127  APPLY_IREG(r14, INTREG_R14); \
128  APPLY_IREG(r15, INTREG_R15); \
129  } while (0)
130 
131 #define FOREACH_SREG() \
132  do { \
133  APPLY_SREG(cr0, MISCREG_CR0); \
134  APPLY_SREG(cr2, MISCREG_CR2); \
135  APPLY_SREG(cr3, MISCREG_CR3); \
136  APPLY_SREG(cr4, MISCREG_CR4); \
137  APPLY_SREG(cr8, MISCREG_CR8); \
138  APPLY_SREG(efer, MISCREG_EFER); \
139  APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
140  } while (0)
141 
142 #define FOREACH_DREG() \
143  do { \
144  APPLY_DREG(db[0], MISCREG_DR0); \
145  APPLY_DREG(db[1], MISCREG_DR1); \
146  APPLY_DREG(db[2], MISCREG_DR2); \
147  APPLY_DREG(db[3], MISCREG_DR3); \
148  APPLY_DREG(dr6, MISCREG_DR6); \
149  APPLY_DREG(dr7, MISCREG_DR7); \
150  } while (0)
151 
152 #define FOREACH_SEGMENT() \
153  do { \
154  APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
155  APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
156  APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
157  APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
158  APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
159  APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
160  APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
161  APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
162  } while (0)
163 
164 #define FOREACH_DTABLE() \
165  do { \
166  APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
167  APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
168  } while (0)
169 
170 template<typename Struct, typename Entry>
171 static auto
172 newVarStruct(size_t entries)
173 {
174  size_t size = sizeof(Struct) + entries * sizeof(Entry);
175  return std::unique_ptr<Struct, void(*)(Struct *)>(
176  (Struct *)operator new(size),
177  [](Struct *p) { operator delete(p); });
178 }
179 
180 static void
181 dumpKvm(const struct kvm_regs &regs)
182 {
183  inform("KVM register state:\n");
184 
185 #define APPLY_IREG(kreg, mreg) \
186  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
187 
188  FOREACH_IREG();
189 
190 #undef APPLY_IREG
191 
192  inform("\trip: 0x%llx\n", regs.rip);
193  inform("\trflags: 0x%llx\n", regs.rflags);
194 }
195 
196 static void
197 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
198 {
199  inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
200  "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
201  "unus.: %u\n",
202  reg_name,
203  seg.base, seg.limit, seg.selector, seg.type,
204  seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl,
205  seg.unusable);
206 }
207 
208 static void
209 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
210 {
211  inform("\t%s: @0x%llx+%x\n",
212  reg_name, dtable.base, dtable.limit);
213 }
214 
215 static void
216 dumpKvm(const struct kvm_sregs &sregs)
217 {
218 #define APPLY_SREG(kreg, mreg) \
219  inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
220 #define APPLY_SEGMENT(kreg, idx) \
221  dumpKvm(# kreg, sregs.kreg);
222 #define APPLY_DTABLE(kreg, idx) \
223  dumpKvm(# kreg, sregs.kreg);
224 
225  inform("Special registers:\n");
226  FOREACH_SEGMENT();
227  FOREACH_SREG();
228  FOREACH_DTABLE();
229 
230  inform("Interrupt Bitmap:");
231  for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
232  inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
233 
234 #undef APPLY_SREG
235 #undef APPLY_SEGMENT
236 #undef APPLY_DTABLE
237 }
238 
239 #ifdef KVM_GET_DEBUGREGS
240 static void
241 dumpKvm(const struct kvm_debugregs &regs)
242 {
243  inform("KVM debug state:\n");
244 
245 #define APPLY_DREG(kreg, mreg) \
246  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
247 
248  FOREACH_DREG();
249 
250 #undef APPLY_DREG
251 
252  inform("\tflags: 0x%llx\n", regs.flags);
253 }
254 #endif
255 
256 static void
257 dumpFpuSpec(const struct FXSave &xs)
258 {
259  inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
260  inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
261  inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
262 }
263 
264 static void
265 dumpFpuSpec(const struct kvm_fpu &fpu)
266 {
267  inform("\tlast_ip: 0x%x\n", fpu.last_ip);
268  inform("\tlast_dp: 0x%x\n", fpu.last_dp);
269 }
270 
271 template<typename T>
272 static void
273 dumpFpuCommon(const T &fpu)
274 {
275  const unsigned top((fpu.fsw >> 11) & 0x7);
276  inform("\tfcw: 0x%x\n", fpu.fcw);
277 
278  inform("\tfsw: 0x%x (top: %i, "
279  "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
280  fpu.fsw, top,
281 
282  (fpu.fsw & CC0Bit) ? "C0" : "",
283  (fpu.fsw & CC1Bit) ? "C1" : "",
284  (fpu.fsw & CC2Bit) ? "C2" : "",
285  (fpu.fsw & CC3Bit) ? "C3" : "",
286 
287  (fpu.fsw & IEBit) ? "I" : "",
288  (fpu.fsw & DEBit) ? "D" : "",
289  (fpu.fsw & ZEBit) ? "Z" : "",
290  (fpu.fsw & OEBit) ? "O" : "",
291  (fpu.fsw & UEBit) ? "U" : "",
292  (fpu.fsw & PEBit) ? "P" : "",
293 
294  (fpu.fsw & StackFaultBit) ? "SF " : "",
295  (fpu.fsw & ErrSummaryBit) ? "ES " : "",
296  (fpu.fsw & BusyBit) ? "BUSY " : ""
297  );
298  inform("\tftwx: 0x%x\n", fpu.ftwx);
299  inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
300  dumpFpuSpec(fpu);
301  inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
302  inform("\tFP Stack:\n");
303  for (int i = 0; i < 8; ++i) {
304  const unsigned reg_idx((i + top) & 0x7);
305  const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
306  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
307  char hex[33];
308  for (int j = 0; j < 10; ++j)
309  snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
310  inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
311  hex, value, empty ? " (e)" : "");
312  }
313  inform("\tXMM registers:\n");
314  for (int i = 0; i < 16; ++i) {
315  char hex[33];
316  for (int j = 0; j < 16; ++j)
317  snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
318  inform("\t\t%i: 0x%s\n", i, hex);
319  }
320 }
321 
322 static void
323 dumpKvm(const struct kvm_fpu &fpu)
324 {
325  inform("FPU registers:\n");
326  dumpFpuCommon(fpu);
327 }
328 
329 static void
330 dumpKvm(const struct kvm_xsave &xsave)
331 {
332  inform("FPU registers (XSave):\n");
333  dumpFpuCommon(*(FXSave *)xsave.region);
334 }
335 
336 static void
337 dumpKvm(const struct kvm_msrs &msrs)
338 {
339  inform("MSRs:\n");
340 
341  for (int i = 0; i < msrs.nmsrs; ++i) {
342  const struct kvm_msr_entry &e(msrs.entries[i]);
343 
344  inform("\t0x%x: 0x%x\n", e.index, e.data);
345  }
346 }
347 
348 static void
349 dumpKvm(const struct kvm_xcrs &regs)
350 {
351  inform("KVM XCR registers:\n");
352 
353  inform("\tFlags: 0x%x\n", regs.flags);
354  for (int i = 0; i < regs.nr_xcrs; ++i) {
355  inform("\tXCR[0x%x]: 0x%x\n",
356  regs.xcrs[i].xcr,
357  regs.xcrs[i].value);
358  }
359 }
360 
361 static void
362 dumpKvm(const struct kvm_vcpu_events &events)
363 {
364  inform("vCPU events:\n");
365 
366  inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
367  events.exception.injected, events.exception.nr,
368  events.exception.has_error_code, events.exception.error_code);
369 
370  inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
371  events.interrupt.injected, events.interrupt.nr,
372  events.interrupt.soft);
373 
374  inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
375  events.nmi.injected, events.nmi.pending,
376  events.nmi.masked);
377 
378  inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
379  inform("\tFlags: 0x%x\n", events.flags);
380 }
381 
382 static bool
384 {
385  // x86-64 doesn't currently use the full 64-bit virtual address
386  // space, instead it uses signed 48 bit addresses that are
387  // sign-extended to 64 bits. Such addresses are known as
388  // "canonical".
389  uint64_t upper_half(addr & 0xffff800000000000ULL);
390  return upper_half == 0 || upper_half == 0xffff800000000000;
391 }
392 
393 static void
394 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
395  struct kvm_sregs sregs)
396 {
397  // Check the register base
398  switch (idx) {
399  case MISCREG_TSL:
400  case MISCREG_TR:
401  case MISCREG_FS:
402  case MISCREG_GS:
403  if (!isCanonicalAddress(seg.base))
404  warn("Illegal %s base: 0x%x\n", name, seg.base);
405  break;
406 
407  case MISCREG_SS:
408  case MISCREG_DS:
409  case MISCREG_ES:
410  if (seg.unusable)
411  break;
412  [[fallthrough]];
413  case MISCREG_CS:
414  if (seg.base & 0xffffffff00000000ULL)
415  warn("Illegal %s base: 0x%x\n", name, seg.base);
416  break;
417  }
418 
419  // Check the type
420  switch (idx) {
421  case MISCREG_CS:
422  switch (seg.type) {
423  case 3:
424  if (seg.dpl != 0)
425  warn("CS type is 3 but dpl != 0.\n");
426  break;
427  case 9:
428  case 11:
429  if (seg.dpl != sregs.ss.dpl)
430  warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
431  break;
432  case 13:
433  case 15:
434  if (seg.dpl > sregs.ss.dpl)
435  warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
436  break;
437  default:
438  warn("Illegal CS type: %i\n", seg.type);
439  break;
440  }
441  break;
442 
443  case MISCREG_SS:
444  if (seg.unusable)
445  break;
446  switch (seg.type) {
447  case 3:
448  if (sregs.cs.type == 3 && seg.dpl != 0)
449  warn("CS type is 3, but SS DPL is != 0.\n");
450  [[fallthrough]];
451  case 7:
452  if (!(sregs.cr0 & 1) && seg.dpl != 0)
453  warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
454  break;
455  default:
456  warn("Illegal SS type: %i\n", seg.type);
457  break;
458  }
459  break;
460 
461  case MISCREG_DS:
462  case MISCREG_ES:
463  case MISCREG_FS:
464  case MISCREG_GS:
465  if (seg.unusable)
466  break;
467  if (!(seg.type & 0x1) ||
468  ((seg.type & 0x8) && !(seg.type & 0x2)))
469  warn("%s has an illegal type field: %i\n", name, seg.type);
470  break;
471 
472  case MISCREG_TR:
473  // TODO: We should check the CPU mode
474  if (seg.type != 3 && seg.type != 11)
475  warn("%s: Illegal segment type (%i)\n", name, seg.type);
476  break;
477 
478  case MISCREG_TSL:
479  if (seg.unusable)
480  break;
481  if (seg.type != 2)
482  warn("%s: Illegal segment type (%i)\n", name, seg.type);
483  break;
484  }
485 
486  switch (idx) {
487  case MISCREG_SS:
488  case MISCREG_DS:
489  case MISCREG_ES:
490  case MISCREG_FS:
491  case MISCREG_GS:
492  if (seg.unusable)
493  break;
494  [[fallthrough]];
495  case MISCREG_CS:
496  if (!seg.s)
497  warn("%s: S flag not set\n", name);
498  break;
499 
500  case MISCREG_TSL:
501  if (seg.unusable)
502  break;
503  [[fallthrough]];
504  case MISCREG_TR:
505  if (seg.s)
506  warn("%s: S flag is set\n", name);
507  break;
508  }
509 
510  switch (idx) {
511  case MISCREG_SS:
512  case MISCREG_DS:
513  case MISCREG_ES:
514  case MISCREG_FS:
515  case MISCREG_GS:
516  case MISCREG_TSL:
517  if (seg.unusable)
518  break;
519  [[fallthrough]];
520  case MISCREG_TR:
521  case MISCREG_CS:
522  if (!seg.present)
523  warn("%s: P flag not set\n", name);
524 
525  if (((seg.limit & 0xFFF) == 0 && seg.g) ||
526  ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
527  warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
528  name, seg.limit, seg.g);
529  }
530  break;
531  }
532 
533  // TODO: Check CS DB
534 }
535 
536 X86KvmCPU::X86KvmCPU(const X86KvmCPUParams &params)
537  : BaseKvmCPU(params),
538  useXSave(params.useXSave)
539 {
540  Kvm &kvm(*vm.kvm);
541 
542  if (!kvm.capSetTSSAddress())
543  panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
544  if (!kvm.capExtendedCPUID())
545  panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
546  if (!kvm.capUserNMI())
547  warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
548  if (!kvm.capVCPUEvents())
549  warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
550 
551  haveDebugRegs = kvm.capDebugRegs();
552  haveXSave = kvm.capXSave();
553  haveXCRs = kvm.capXCRs();
554 
555  if (useXSave && !haveXSave) {
556  warn("KVM: XSAVE not supported by host. MXCSR synchronization might "
557  "be unreliable due to kernel bugs.\n");
558  useXSave = false;
559  } else if (!useXSave) {
560  warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
561  }
562 }
563 
565 {
566 }
567 
568 void
570 {
572 
573  updateCPUID();
574 
575  // TODO: Do we need to create an identity mapped TSS area? We
576  // should call kvm.vm.setTSSAddress() here in that case. It should
577  // only be needed for old versions of the virtualization
578  // extensions. We should make sure that the identity range is
579  // reserved in the e820 memory map in that case.
580 }
581 
582 void
584 {
585  dumpIntRegs();
586  if (useXSave)
587  dumpXSave();
588  else
589  dumpFpuRegs();
590  dumpSpecRegs();
591  dumpDebugRegs();
592  dumpXCRs();
593  dumpVCpuEvents();
594  dumpMSRs();
595 }
596 
597 void
599 {
600  struct kvm_fpu fpu;
601  getFPUState(fpu);
602  dumpKvm(fpu);
603 }
604 
605 void
607 {
608  struct kvm_regs regs;
609  getRegisters(regs);
610  dumpKvm(regs);
611 }
612 
613 void
615 {
616  struct kvm_sregs sregs;
617  getSpecialRegisters(sregs);
618  dumpKvm(sregs);
619 }
620 
621 void
623 {
624  if (haveDebugRegs) {
625 #ifdef KVM_GET_DEBUGREGS
626  struct kvm_debugregs dregs;
627  getDebugRegisters(dregs);
628  dumpKvm(dregs);
629 #endif
630  } else {
631  inform("Debug registers not supported by kernel.\n");
632  }
633 }
634 
635 void
637 {
638  if (haveXCRs) {
639  struct kvm_xcrs xcrs;
640  getXCRs(xcrs);
641  dumpKvm(xcrs);
642  } else {
643  inform("XCRs not supported by kernel.\n");
644  }
645 }
646 
647 void
649 {
650  if (haveXSave) {
651  struct kvm_xsave xsave;
652  getXSave(xsave);
653  dumpKvm(xsave);
654  } else {
655  inform("XSave not supported by kernel.\n");
656  }
657 }
658 
659 void
661 {
662  struct kvm_vcpu_events events;
663  getVCpuEvents(events);
664  dumpKvm(events);
665 }
666 
667 void
669 {
670  const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
671  auto msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
672  supported_msrs.size());
673 
674  msrs->nmsrs = supported_msrs.size();
675  for (int i = 0; i < supported_msrs.size(); ++i) {
676  struct kvm_msr_entry &e(msrs->entries[i]);
677  e.index = supported_msrs[i];
678  e.reserved = 0;
679  e.data = 0;
680  }
681  getMSRs(*msrs.get());
682 
683  dumpKvm(*msrs.get());
684 }
685 
686 void
688 {
693 
694  DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
695  if (debug::KvmContext)
696  dump();
697 }
698 
699 void
701 {
702  struct kvm_regs regs;
703 
704 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
705  FOREACH_IREG();
706 #undef APPLY_IREG
707 
708  regs.rip = tc->pcState().instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
709 
710  /* You might think that setting regs.rflags to the contents
711  * MISCREG_RFLAGS here would suffice. In that case you're
712  * mistaken. We need to reconstruct it from a bunch of ucode
713  * registers and wave a dead chicken over it (aka mask out and set
714  * reserved bits) to get it to work.
715  */
716  regs.rflags = X86ISA::getRFlags(tc);
717 
718  setRegisters(regs);
719 }
720 
721 static inline void
722 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
723  const int index)
724 {
726 
727  kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
728  kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
729  kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
730  kvm_seg.type = attr.type;
731  kvm_seg.present = attr.present;
732  kvm_seg.dpl = attr.dpl;
733  kvm_seg.db = attr.defaultSize;
734  kvm_seg.s = attr.system;
735  kvm_seg.l = attr.longMode;
736  kvm_seg.g = attr.granularity;
737  kvm_seg.avl = attr.avl;
738 
739  // A segment is normally unusable when the selector is zero. There
740  // is a attr.unusable flag in gem5, but it seems unused. qemu
741  // seems to set this to 0 all the time, so we just do the same and
742  // hope for the best.
743  kvm_seg.unusable = 0;
744 }
745 
746 static inline void
747 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
748  const int index)
749 {
750  kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
751  kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
752 }
753 
754 static void
755 forceSegAccessed(struct kvm_segment &seg)
756 {
757  // Intel's VMX requires that (some) usable segments are flagged as
758  // 'accessed' (i.e., the lowest bit in the segment type is set)
759  // when entering VMX. This wouldn't necessary be the case even if
760  // gem5 did set the access bits correctly, so we force it to one
761  // in that case.
762  if (!seg.unusable)
763  seg.type |= SEG_TYPE_BIT_ACCESSED;
764 }
765 
766 void
768 {
769  struct kvm_sregs sregs;
770 
771 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
772 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
773 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
774 
775  FOREACH_SREG();
776  FOREACH_SEGMENT();
777  FOREACH_DTABLE();
778 
779 #undef APPLY_SREG
780 #undef APPLY_SEGMENT
781 #undef APPLY_DTABLE
782 
783  // Clear the interrupt bitmap
784  memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
785 
786  // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
787  // bit in the type field set.
788  forceSegAccessed(sregs.cs);
789  forceSegAccessed(sregs.ss);
790  forceSegAccessed(sregs.ds);
791  forceSegAccessed(sregs.es);
792  forceSegAccessed(sregs.fs);
793  forceSegAccessed(sregs.gs);
794 
795  // There are currently some cases where the active task isn't
796  // marked as busy. This is illegal in VMX, so we force it to busy.
797  if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
798  hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
799  sregs.tr.type);
800  sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
801  }
802 
803  // VMX requires the DPL of SS and CS to be the same for
804  // non-conforming code segments. It seems like m5 doesn't set the
805  // DPL of SS correctly when taking interrupts, so we need to fix
806  // that here.
807  if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
808  sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
809  sregs.cs.dpl != sregs.ss.dpl) {
810 
811  hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
812  sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
813  sregs.ss.dpl = sregs.cs.dpl;
814  }
815 
816  // Do checks after fixing up the state to avoid getting excessive
817  // amounts of warnings.
818  RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
819  if (!rflags_nocc.vm) {
820  // Do segment verification if the CPU isn't entering virtual
821  // 8086 mode. We currently assume that unrestricted guest
822  // mode is available.
823 
824 #define APPLY_SEGMENT(kreg, idx) \
825  checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
826 
827  FOREACH_SEGMENT();
828 #undef APPLY_SEGMENT
829  }
830 
831  setSpecialRegisters(sregs);
832 }
833 
834 template <typename T>
835 static void
837 {
838  fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
839  fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
840  // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
841  // with effects.
842  fpu.fsw = tc->readMiscReg(MISCREG_FSW);
843 
844  uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
845  fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
846 
847  fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
848 
849  const unsigned top((fpu.fsw >> 11) & 0x7);
850  for (int i = 0; i < 8; ++i) {
851  const unsigned reg_idx((i + top) & 0x7);
852  const double value(bitsToFloat64(
853  tc->readFloatReg(FLOATREG_FPR(reg_idx))));
854  DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
855  reg_idx, i, value);
856  X86ISA::storeFloat80(fpu.fpr[i], value);
857  }
858 
859  // TODO: We should update the MMX state
860 
861  for (int i = 0; i < 16; ++i) {
862  *(uint64_t *)&fpu.xmm[i][0] =
864  *(uint64_t *)&fpu.xmm[i][8] =
866  }
867 }
868 
869 void
871 {
872  struct kvm_fpu fpu;
873 
874  // There is some padding in the FP registers, so we'd better zero
875  // the whole struct.
876  memset(&fpu, 0, sizeof(fpu));
877 
879 
881  warn_once("MISCREG_FISEG is non-zero.\n");
882 
883  fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
884 
886  warn_once("MISCREG_FOSEG is non-zero.\n");
887 
888  fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
889 
890  setFPUState(fpu);
891 }
892 
893 void
895 {
896  struct kvm_xsave kxsave;
897  FXSave &xsave(*(FXSave *)kxsave.region);
898 
899  // There is some padding and reserved fields in the structure, so
900  // we'd better zero the whole thing.
901  memset(&kxsave, 0, sizeof(kxsave));
902 
903  updateKvmStateFPUCommon(tc, xsave);
904 
906  warn_once("MISCREG_FISEG is non-zero.\n");
907 
908  xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
909 
911  warn_once("MISCREG_FOSEG is non-zero.\n");
912 
913  xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
914 
915  setXSave(kxsave);
916 }
917 
918 void
920 {
921  if (useXSave)
923  else
925 }
926 
927 void
929 {
930  KvmMSRVector msrs;
931 
932  const Kvm::MSRIndexVector &indices(getMsrIntersection());
933 
934  for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
935  struct kvm_msr_entry e;
936 
937  e.index = *it;
938  e.reserved = 0;
939  e.data = tc->readMiscReg(msrMap.at(*it));
940  DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
941  e.index, e.data);
942 
943  msrs.push_back(e);
944  }
945 
946  setMSRs(msrs);
947 }
948 
949 void
951 {
952  struct kvm_regs regs;
953  struct kvm_sregs sregs;
954 
955  getRegisters(regs);
956  getSpecialRegisters(sregs);
957 
958  DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
959  if (debug::KvmContext)
960  dump();
961 
962  updateThreadContextRegs(regs, sregs);
964  if (useXSave) {
965  struct kvm_xsave xsave;
966  getXSave(xsave);
967 
969  } else {
970  struct kvm_fpu fpu;
971  getFPUState(fpu);
972 
974  }
976 
977  // The M5 misc reg caches some values from other
978  // registers. Writing to it with side effects causes it to be
979  // updated from its source registers.
981 }
982 
983 void
984 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
985  const struct kvm_sregs &sregs)
986 {
987 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
988 
989  FOREACH_IREG();
990 
991 #undef APPLY_IREG
992 
993  tc->pcState(PCState(regs.rip + sregs.cs.base));
994 
995  // Flags are spread out across multiple semi-magic registers so we
996  // need some special care when updating them.
997  X86ISA::setRFlags(tc, regs.rflags);
998 }
999 
1000 
1001 inline void
1002 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
1003  const int index)
1004 {
1005  SegAttr attr(0);
1006 
1007  attr.type = kvm_seg.type;
1008  attr.present = kvm_seg.present;
1009  attr.dpl = kvm_seg.dpl;
1010  attr.defaultSize = kvm_seg.db;
1011  attr.system = kvm_seg.s;
1012  attr.longMode = kvm_seg.l;
1013  attr.granularity = kvm_seg.g;
1014  attr.avl = kvm_seg.avl;
1015  attr.unusable = kvm_seg.unusable;
1016 
1017  // We need some setMiscReg magic here to keep the effective base
1018  // addresses in sync. We need an up-to-date version of EFER, so
1019  // make sure this is called after the sregs have been synced.
1020  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
1021  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
1022  tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
1024 }
1025 
1026 inline void
1027 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1028  const int index)
1029 {
1030  // We need some setMiscReg magic here to keep the effective base
1031  // addresses in sync. We need an up-to-date version of EFER, so
1032  // make sure this is called after the sregs have been synced.
1033  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
1034  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
1035 }
1036 
1037 void
1038 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1039 {
1040  assert(getKvmRunState()->apic_base == sregs.apic_base);
1041  assert(getKvmRunState()->cr8 == sregs.cr8);
1042 
1043 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1044 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1045 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1046  FOREACH_SREG();
1047  FOREACH_SEGMENT();
1048  FOREACH_DTABLE();
1049 #undef APPLY_SREG
1050 #undef APPLY_SEGMENT
1051 #undef APPLY_DTABLE
1052 }
1053 
1054 template<typename T>
1055 static void
1057 {
1058  const unsigned top((fpu.fsw >> 11) & 0x7);
1059 
1060  for (int i = 0; i < 8; ++i) {
1061  const unsigned reg_idx((i + top) & 0x7);
1062  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1063  DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1064  reg_idx, i, value);
1065  tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value));
1066  }
1067 
1068  // TODO: We should update the MMX state
1069 
1071  tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
1072  tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
1073  tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
1074 
1075  uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1076  // TODO: Are these registers really the same?
1077  tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
1078  tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
1079 
1080  tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
1081 
1082  for (int i = 0; i < 16; ++i) {
1083  tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]);
1084  tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]);
1085  }
1086 }
1087 
1088 void
1089 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1090 {
1092 
1094  tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
1096  tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
1097 }
1098 
1099 void
1100 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1101 {
1102  const FXSave &xsave(*(const FXSave *)kxsave.region);
1103 
1105 
1107  tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
1109  tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
1110 }
1111 
1112 void
1114 {
1115  const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1116 
1117  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1118  msrs.size());
1119  struct kvm_msr_entry *entry;
1120 
1121  // Create a list of MSRs to read
1122  kvm_msrs->nmsrs = msrs.size();
1123  entry = &kvm_msrs->entries[0];
1124  for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1125  entry->index = *it;
1126  entry->reserved = 0;
1127  entry->data = 0;
1128  }
1129 
1130  getMSRs(*kvm_msrs.get());
1131 
1132  // Update M5's state
1133  entry = &kvm_msrs->entries[0];
1134  for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1135  DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1136  entry->index, entry->data);
1137 
1138  tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1139  }
1140 }
1141 
1142 void
1144 {
1145  Fault fault;
1146 
1148 
1149  {
1150  // Migrate to the interrupt controller's thread to get the
1151  // interrupt. Even though the individual methods are safe to
1152  // call across threads, we might still lose interrupts unless
1153  // they are getInterrupt() and updateIntrInfo() are called
1154  // atomically.
1155  EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
1156  fault = interrupts[0]->getInterrupt();
1157  interrupts[0]->updateIntrInfo();
1158  }
1159 
1160  X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1161  if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1162  DPRINTF(KvmInt, "Delivering NMI\n");
1164  } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1165  DPRINTF(KvmInt, "INIT interrupt\n");
1166  fault.get()->invoke(tc);
1167  // Delay the kvm state update since we won't enter KVM on this
1168  // tick.
1169  threadContextDirty = true;
1170  // HACK: gem5 doesn't actually have any BIOS code, which means
1171  // that we need to halt the thread and wait for a startup
1172  // interrupt before restarting the thread. The simulated CPUs
1173  // use the same kind of hack using a microcode routine.
1174  thread->suspend();
1175  } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1176  DPRINTF(KvmInt, "STARTUP interrupt\n");
1177  fault.get()->invoke(tc);
1178  // The kvm state is assumed to have been updated when entering
1179  // kvmRun(), so we need to update manually it here.
1180  updateKvmState();
1181  } else if (x86int) {
1182  struct kvm_interrupt kvm_int;
1183  kvm_int.irq = x86int->getVector();
1184 
1185  DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1186  fault->name(), kvm_int.irq);
1187 
1188  kvmInterrupt(kvm_int);
1189  } else {
1190  panic("KVM: Unknown interrupt type\n");
1191  }
1192 
1193 }
1194 
1195 Tick
1197 {
1198  struct kvm_run &kvm_run(*getKvmRunState());
1199 
1200  auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1201 
1202  if (lapic->checkInterruptsRaw()) {
1203  if (lapic->hasPendingUnmaskable()) {
1204  DPRINTF(KvmInt,
1205  "Delivering unmaskable interrupt.\n");
1208  } else if (kvm_run.ready_for_interrupt_injection) {
1209  // KVM claims that it is ready for an interrupt. It might
1210  // be lying if we just updated rflags and disabled
1211  // interrupts (e.g., by doing a CPU handover). Let's sync
1212  // the thread context and check if there are /really/
1213  // interrupts that should be delivered now.
1215  if (lapic->checkInterrupts()) {
1216  DPRINTF(KvmInt,
1217  "M5 has pending interrupts, delivering interrupt.\n");
1218 
1220  } else {
1221  DPRINTF(KvmInt,
1222  "Interrupt delivery delayed due to KVM confusion.\n");
1223  kvm_run.request_interrupt_window = 1;
1224  }
1225  } else if (!kvm_run.request_interrupt_window) {
1226  DPRINTF(KvmInt,
1227  "M5 has pending interrupts, requesting interrupt "
1228  "window.\n");
1229  kvm_run.request_interrupt_window = 1;
1230  }
1231  } else {
1232  kvm_run.request_interrupt_window = 0;
1233  }
1234 
1235  // The CPU might have been suspended as a result of the INIT
1236  // interrupt delivery hack. In that case, don't enter into KVM.
1237  if (_status == Idle)
1238  return 0;
1239  else
1240  return BaseKvmCPU::kvmRun(ticks);
1241 }
1242 
1243 Tick
1245 {
1246  struct kvm_run &kvm_run(*getKvmRunState());
1247 
1248  if (!archIsDrained()) {
1249  DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1250 
1251  // Tell KVM to find a suitable place to deliver interrupts. This
1252  // should ensure that pending interrupts have been delivered and
1253  // things are reasonably consistent (i.e., no interrupts pending
1254  // in the guest).
1255  kvm_run.request_interrupt_window = 1;
1256 
1257  // Limit the run to 1 millisecond. That is hopefully enough to
1258  // reach an interrupt window. Otherwise, we'll just try again
1259  // later.
1261  } else {
1262  DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1263 
1264  return BaseKvmCPU::kvmRun(0);
1265  }
1266 }
1267 
1268 uint64_t
1270 {
1271  return getMSR(MSR_TSC);
1272 }
1273 
1274 void
1276 {
1277  struct kvm_run &kvm_run(*getKvmRunState());
1278  const uint16_t port(kvm_run.io.port);
1279 
1280  assert(kvm_run.exit_reason == KVM_EXIT_IO);
1281 
1282  if (kvm_run.io.size != 4) {
1283  panic("Unexpected IO size (%u) for address 0x%x.\n",
1284  kvm_run.io.size, port);
1285  }
1286 
1287  if (kvm_run.io.count != 1) {
1288  panic("Unexpected IO count (%u) for address 0x%x.\n",
1289  kvm_run.io.count, port);
1290  }
1291 
1292  uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1293  if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1294  tc->setMiscReg(miscreg, *data);
1295  else
1296  *data = tc->readMiscRegNoEffect(miscreg);
1297 }
1298 
1299 Tick
1301 {
1302  struct kvm_run &kvm_run(*getKvmRunState());
1303  bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1304  unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1305  Tick delay(0);
1306  uint16_t port(kvm_run.io.port);
1307  Addr pAddr;
1308  const int count(kvm_run.io.count);
1309 
1310  assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1311  kvm_run.io.direction == KVM_EXIT_IO_OUT);
1312 
1313  DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1314  (isWrite ? "out" : "in"), kvm_run.io.port);
1315 
1316  /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1317  * don't use the TLB component, we need to intercept and handle
1318  * the PCI configuration space IO ports here.
1319  *
1320  * The IO port PCI discovery mechanism uses one address register
1321  * and one data register. We map the address register to a misc
1322  * reg and use that to re-route data register accesses to the
1323  * right location in the PCI configuration space.
1324  */
1325  if (port == IO_PCI_CONF_ADDR) {
1327  return 0;
1328  } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1329  Addr pciConfigAddr(tc->readMiscRegNoEffect(
1331  if (pciConfigAddr & 0x80000000) {
1332  pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1333  (port & 0x3));
1334  } else {
1335  pAddr = X86ISA::x86IOAddress(port);
1336  }
1337  } else {
1338  pAddr = X86ISA::x86IOAddress(port);
1339  }
1340 
1341  const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1342  // Temporarily lock and migrate to the device event queue to
1343  // prevent races in multi-core mode.
1345  for (int i = 0; i < count; ++i) {
1346  RequestPtr io_req = std::make_shared<Request>(
1347  pAddr, kvm_run.io.size,
1348  Request::UNCACHEABLE, dataRequestorId());
1349 
1350  io_req->setContext(tc->contextId());
1351 
1352  PacketPtr pkt = new Packet(io_req, cmd);
1353 
1354  pkt->dataStatic(guestData);
1355  delay += dataPort.submitIO(pkt);
1356 
1357  guestData += kvm_run.io.size;
1358  }
1359 
1360  return delay;
1361 }
1362 
1363 Tick
1365 {
1366  // We don't need to do anything here since this is caught the next
1367  // time we execute kvmRun(). We still overload the exit event to
1368  // silence the warning about an unhandled exit event.
1369  return 0;
1370 }
1371 
1372 bool
1374 {
1375  struct kvm_vcpu_events events;
1376 
1377  getVCpuEvents(events);
1378 
1379  // We could probably handle this in a by re-inserting interrupts
1380  // that are pending into gem5 on a drain. However, that would
1381  // probably be tricky to do reliably, so we'll just prevent a
1382  // drain if there is anything pending in the
1383  // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1384  // executed in the guest by requesting an interrupt window if
1385  // there are pending interrupts.
1386  const bool pending_events(events.exception.injected ||
1387  events.interrupt.injected ||
1388  events.nmi.injected || events.nmi.pending);
1389 
1390  if (pending_events) {
1391  DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1392  events.exception.injected ? "exception" : "",
1393  events.interrupt.injected ? "interrupt" : "",
1394  events.nmi.injected ? "nmi[i]" : "",
1395  events.nmi.pending ? "nmi[p]" : "");
1396  }
1397 
1398  return !pending_events;
1399 }
1400 
1401 void
1403 {
1404  struct kvm_run &kvm_run(*getKvmRunState());
1405 
1406  // Synchronize the APIC base and CR8 here since they are present
1407  // in the kvm_run struct, which makes the synchronization really
1408  // cheap.
1409  kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
1410  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1411 
1413 
1414  tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
1415  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1416 }
1417 
1418 static struct kvm_cpuid_entry2
1419 makeKvmCpuid(uint32_t function, uint32_t index,
1420  CpuidResult &result)
1421 {
1422  struct kvm_cpuid_entry2 e;
1423  e.function = function;
1424  e.index = index;
1425  e.flags = 0;
1426  e.eax = (uint32_t)result.rax;
1427  e.ebx = (uint32_t)result.rbx;
1428  e.ecx = (uint32_t)result.rcx;
1429  e.edx = (uint32_t)result.rdx;
1430 
1431  return e;
1432 }
1433 
1434 void
1436 {
1437  Kvm::CPUIDVector m5_supported;
1438 
1439  /* TODO: We currently don't support any of the functions that
1440  * iterate through data structures in the CPU using an index. It's
1441  * currently not a problem since M5 doesn't expose any of them at
1442  * the moment.
1443  */
1444 
1445  /* Basic features */
1446  CpuidResult func0;
1447  X86ISA::doCpuid(tc, 0x0, 0, func0);
1448  for (uint32_t function = 0; function <= func0.rax; ++function) {
1450  uint32_t idx(0);
1451 
1452  X86ISA::doCpuid(tc, function, idx, cpuid);
1453  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1454  }
1455 
1456  /* Extended features */
1457  CpuidResult efunc0;
1458  X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1459  for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1461  uint32_t idx(0);
1462 
1463  X86ISA::doCpuid(tc, function, idx, cpuid);
1464  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1465  }
1466 
1467  setCPUID(m5_supported);
1468 }
1469 
1470 void
1471 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1472 {
1473  if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1474  panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1475  errno);
1476 }
1477 
1478 void
1479 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1480 {
1481  auto kvm_cpuid = newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
1482  cpuid.size());
1483 
1484  kvm_cpuid->nent = cpuid.size();
1485  std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1486 
1487  setCPUID(*kvm_cpuid);
1488 }
1489 
1490 void
1491 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1492 {
1493  if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1494  panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1495  errno);
1496 }
1497 
1498 void
1500 {
1501  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1502  msrs.size());
1503 
1504  kvm_msrs->nmsrs = msrs.size();
1505  std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1506 
1507  setMSRs(*kvm_msrs);
1508 }
1509 
1510 void
1511 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1512 {
1513  if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1514  panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1515  errno);
1516 }
1517 
1518 
1519 void
1520 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1521 {
1522  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1523  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1524 
1525  kvm_msrs->nmsrs = 1;
1526  entry.index = index;
1527  entry.reserved = 0;
1528  entry.data = value;
1529 
1530  setMSRs(*kvm_msrs.get());
1531 }
1532 
1533 uint64_t
1534 X86KvmCPU::getMSR(uint32_t index) const
1535 {
1536  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1537  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1538 
1539  kvm_msrs->nmsrs = 1;
1540  entry.index = index;
1541  entry.reserved = 0;
1542  entry.data = 0;
1543 
1544  getMSRs(*kvm_msrs.get());
1545  return entry.data;
1546 }
1547 
1548 const Kvm::MSRIndexVector &
1550 {
1551  if (cachedMsrIntersection.empty()) {
1552  const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
1553 
1554  DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1555  for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1556  if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1557  cachedMsrIntersection.push_back(*it);
1558  DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1559  } else {
1560  warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1561  *it);
1562  }
1563  }
1564  }
1565 
1566  return cachedMsrIntersection;
1567 }
1568 
1569 void
1570 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1571 {
1572 #ifdef KVM_GET_DEBUGREGS
1573  if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1574  panic("KVM: Failed to get guest debug registers\n");
1575 #else
1576  panic("KVM: Unsupported getDebugRegisters call.\n");
1577 #endif
1578 }
1579 
1580 void
1581 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1582 {
1583 #ifdef KVM_SET_DEBUGREGS
1584  if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1585  panic("KVM: Failed to set guest debug registers\n");
1586 #else
1587  panic("KVM: Unsupported setDebugRegisters call.\n");
1588 #endif
1589 }
1590 
1591 void
1592 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1593 {
1594  if (ioctl(KVM_GET_XCRS, &regs) == -1)
1595  panic("KVM: Failed to get guest debug registers\n");
1596 }
1597 
1598 void
1599 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1600 {
1601  if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1602  panic("KVM: Failed to set guest debug registers\n");
1603 }
1604 
1605 void
1606 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1607 {
1608  if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1609  panic("KVM: Failed to get guest debug registers\n");
1610 }
1611 
1612 void
1613 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1614 {
1615  if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1616  panic("KVM: Failed to set guest debug registers\n");
1617 }
1618 
1619 
1620 void
1621 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1622 {
1623  if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1624  panic("KVM: Failed to get guest debug registers\n");
1625 }
1626 
1627 void
1628 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1629 {
1630  if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1631  panic("KVM: Failed to set guest debug registers\n");
1632 }
1633 
1634 } // namespace gem5
gem5::dumpKvm
static void dumpKvm(const struct kvm_regs &regs)
Definition: x86_cpu.cc:181
gem5::X86KvmCPU::dumpDebugRegs
void dumpDebugRegs() const
Definition: x86_cpu.cc:622
gem5::FXSave::fpu_cs
uint16_t fpu_cs
Definition: x86_cpu.cc:87
gem5::FXSave::last_opcode
uint16_t last_opcode
Definition: x86_cpu.cc:81
gem5::X86ISA::MISCREG_ES
@ MISCREG_ES
Definition: misc.hh:302
gem5::BaseKvmCPU::_status
Status _status
CPU run state.
Definition: base.hh:240
gem5::X86ISA::loadFloat80
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition: utility.cc:156
gem5::Kvm::capSetTSSAddress
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition: vm.cc:114
gem5::PCStateBase::instAddr
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
gem5::X86KvmCPU::setCPUID
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition: x86_cpu.cc:1471
gem5::X86KvmCPU::updateThreadContextRegs
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition: x86_cpu.cc:984
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::X86ISA::MISCREG_M5_REG
@ MISCREG_M5_REG
Definition: misc.hh:143
gem5::Kvm::capUserNMI
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition: vm.cc:126
gem5::FXSave::fcw
uint16_t fcw
Definition: x86_cpu.cc:77
gem5::X86ISA::MISCREG_SEG_SEL
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:511
gem5::updateKvmStateFPUCommon
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition: x86_cpu.cc:836
gem5::X86ISA::MISCREG_DS
@ MISCREG_DS
Definition: misc.hh:305
warn
#define warn(...)
Definition: logging.hh:246
gem5::X86ISA::MISCREG_TR
@ MISCREG_TR
Definition: misc.hh:313
gem5::X86KvmCPU::dumpSpecRegs
void dumpSpecRegs() const
Definition: x86_cpu.cc:614
gem5::X86KvmCPU::startup
void startup() override
Definition: x86_cpu.cc:569
gem5::X86KvmCPU::dumpVCpuEvents
void dumpVCpuEvents() const
Definition: x86_cpu.cc:660
gem5::X86ISA::msrMap
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
x86_cpu.hh
gem5::forceSegAccessed
static void forceSegAccessed(struct kvm_segment &seg)
Definition: x86_cpu.cc:755
gem5::X86KvmCPU::haveXSave
bool haveXSave
Kvm::capXSave() available?
Definition: x86_cpu.hh:260
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::MipsISA::cpuid
Bitfield< 28, 21 > cpuid
Definition: dt_constants.hh:95
gem5::ThreadContext::readFloatReg
virtual RegVal readFloatReg(RegIndex reg_idx) const =0
gem5::X86ISA::ZEBit
@ ZEBit
Definition: misc.hh:91
gem5::X86KvmCPU::dumpXCRs
void dumpXCRs() const
Definition: x86_cpu.cc:636
gem5::X86ISA::e
Bitfield< 11 > e
Definition: misc.hh:759
gem5::X86ISA::MISCREG_TSL
@ MISCREG_TSL
Definition: misc.hh:309
gem5::X86KvmCPU::~X86KvmCPU
virtual ~X86KvmCPU()
Definition: x86_cpu.cc:564
gem5::X86KvmCPU::dumpIntRegs
void dumpIntRegs() const
Definition: x86_cpu.cc:606
gem5::X86ISA::MISCREG_CR8
@ MISCREG_CR8
Definition: misc.hh:119
gem5::FXSave::pad1
uint16_t pad1
Definition: x86_cpu.cc:88
gem5::X86ISA::IEBit
@ IEBit
Definition: misc.hh:89
gem5::BaseKvmCPU::ioctl
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition: base.cc:1156
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::X86ISA::CC2Bit
@ CC2Bit
Definition: misc.hh:101
gem5::X86ISA::MISCREG_APIC_BASE
@ MISCREG_APIC_BASE
Definition: misc.hh:399
gem5::X86KvmCPU::handleIOMiscReg32
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition: x86_cpu.cc:1275
gem5::X86ISA::ErrSummaryBit
@ ErrSummaryBit
Definition: misc.hh:98
gem5::X86KvmCPU::getDebugRegisters
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition: x86_cpu.cc:1570
gem5::BaseKvmCPU::deviceEventQueue
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition: base.hh:447
gem5::BaseKvmCPU::kvmInterrupt
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition: base.cc:802
gem5::BaseKvmCPU::syncThreadContext
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition: base.cc:934
gem5::X86KvmCPU::updateKvmStateFPU
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition: x86_cpu.cc:919
gem5::X86KvmCPU::updateKvmStateRegs
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition: x86_cpu.cc:700
gem5::ArmISA::attr
attr
Definition: misc_types.hh:656
gem5::X86ISA::MISCREG_SEG_LIMIT
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:532
gem5::X86KvmCPU::setMSRs
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition: x86_cpu.cc:1491
gem5::X86KvmCPU::setDebugRegisters
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition: x86_cpu.cc:1581
gem5::ThreadContext::pcState
virtual const PCStateBase & pcState() const =0
gem5::X86ISA::MISCREG_SEG_ATTR
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:539
gem5::X86ISA::CC0Bit
@ CC0Bit
Definition: misc.hh:99
gem5::bitsToFloat64
static double bitsToFloat64(uint64_t val)
Definition: types.hh:225
gem5::X86KvmCPU::updateThreadContextXSave
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:1100
gem5::X86ISA::InitInterrupt
Definition: faults.hh:350
gem5::X86ISA::x86PciConfigAddress
static Addr x86PciConfigAddress(const uint32_t addr)
Definition: x86_traits.hh:82
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:65
gem5::X86ISA::MISCREG_FIOFF
@ MISCREG_FIOFF
Definition: misc.hh:392
interrupts.hh
gem5::BaseKvmCPU::getKvmRunState
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition: base.hh:317
gem5::X86ISA::MISCREG_FISEG
@ MISCREG_FISEG
Definition: misc.hh:391
gem5::FXSave::fpu_ip
uint32_t fpu_ip
Definition: x86_cpu.cc:86
gem5::X86ISA::MISCREG_FOOFF
@ MISCREG_FOOFF
Definition: misc.hh:394
gem5::X86KvmCPU::updateCPUID
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition: x86_cpu.cc:1435
top
Definition: test.h:61
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::X86ISA::FLOATREG_XMM_LOW
static FloatRegIndex FLOATREG_XMM_LOW(int index)
Definition: float.hh:132
gem5::X86ISA::getRFlags
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition: utility.cc:58
gem5::Kvm::capExtendedCPUID
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition: vm.cc:120
std::vector
STL vector class.
Definition: stl.hh:37
gem5::X86ISA::MISCREG_X87_TOP
@ MISCREG_X87_TOP
Definition: misc.hh:383
IO_PCI_CONF_ADDR
#define IO_PCI_CONF_ADDR
Definition: x86_cpu.cc:58
gem5::X86KvmCPU::updateThreadContextFPU
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:1089
gem5::X86ISA::UEBit
@ UEBit
Definition: misc.hh:93
gem5::X86KvmCPU::dumpXSave
void dumpXSave() const
Definition: x86_cpu.cc:648
gem5::X86KvmCPU::cachedMsrIntersection
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition: x86_cpu.hh:254
gem5::BaseKvmCPU::setFPUState
void setFPUState(const struct kvm_fpu &state)
Definition: base.cc:845
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
faults.hh
gem5::RiscvISA::xs
Bitfield< 16, 15 > xs
Definition: misc.hh:557
gem5::X86ISA::MISCREG_GS
@ MISCREG_GS
Definition: misc.hh:307
gem5::Kvm::capXCRs
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition: vm.cc:188
gem5::X86ISA::storeFloat80
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition: utility.cc:165
gem5::Kvm::capXSave
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition: vm.cc:198
FOREACH_IREG
#define FOREACH_IREG()
Definition: x86_cpu.cc:111
hack
#define hack(...)
Definition: logging.hh:248
gem5::X86ISA::BusyBit
@ BusyBit
Definition: misc.hh:103
gem5::X86KvmCPU::setMSR
void setMSR(uint32_t index, uint64_t value)
Definition: x86_cpu.cc:1520
gem5::BaseKvmCPU::getFPUState
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition: base.cc:838
gem5::X86KvmCPU::kvmRunDrain
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition: x86_cpu.cc:1244
gem5::X86KvmCPU::ioctlRun
void ioctlRun() override
Override for synchronizing state in kvm_run.
Definition: x86_cpu.cc:1402
gem5::BaseKvmCPU::kvmNonMaskableInterrupt
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition: base.cc:794
gem5::X86ISA::DEBit
@ DEBit
Definition: misc.hh:90
gem5::FXSave::mxcsr_mask
uint32_t mxcsr_mask
Definition: x86_cpu.cc:101
FOREACH_DTABLE
#define FOREACH_DTABLE()
Definition: x86_cpu.cc:164
gem5::X86ISA::OEBit
@ OEBit
Definition: misc.hh:92
gem5::ArmISA::at
Bitfield< 35, 32 > at
Definition: misc_types.hh:155
gem5::MemCmd
Definition: packet.hh:75
gem5::X86KvmCPU::archIsDrained
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition: x86_cpu.cc:1373
gem5::X86ISA::CpuidResult
Definition: cpuid.hh:41
gem5::FXSave::fpu_ip
uint64_t fpu_ip
Definition: x86_cpu.cc:96
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
reserved
reserved
Definition: pcireg.h:54
gem5::X86KvmCPU::getMsrIntersection
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition: x86_cpu.cc:1549
gem5::X86ISA::MISCREG_CS_BASE
@ MISCREG_CS_BASE
Definition: misc.hh:319
FOREACH_DREG
#define FOREACH_DREG()
Definition: x86_cpu.cc:142
gem5::X86ISA::CC3Bit
@ CC3Bit
Definition: misc.hh:102
gem5::BaseKvmCPU::thread
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition: base.hh:153
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::FXSave::fpu_dp
uint64_t fpu_dp
Definition: x86_cpu.cc:97
gem5::X86ISA::MISCREG_FOP
@ MISCREG_FOP
Definition: misc.hh:395
gem5::X86ISA::MISCREG_FOSEG
@ MISCREG_FOSEG
Definition: misc.hh:393
gem5::X86KvmCPU::haveDebugRegs
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition: x86_cpu.hh:258
gem5::BaseKvmCPU::startup
void startup() override
Definition: base.cc:116
gem5::BaseKvmCPU::ioctlRun
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition: base.cc:1322
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::Kvm::capVCPUEvents
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition: vm.cc:168
gem5::X86KvmCPU::getMSR
uint64_t getMSR(uint32_t index) const
Definition: x86_cpu.cc:1534
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::X86KvmCPU::updateKvmStateSRegs
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:767
MSR_TSC
#define MSR_TSC
Definition: x86_cpu.cc:56
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
int.hh
gem5::X86ISA::count
count
Definition: misc.hh:709
SEG_TYPE_BIT_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
Definition: x86_cpu.cc:73
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::BaseKvmCPU
Base class for KVM based CPU models.
Definition: base.hh:87
gem5::BaseKvmCPU::tc
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition: base.hh:158
gem5::FXSave::pad0
uint8_t pad0
Definition: x86_cpu.cc:80
msr.hh
gem5::MipsISA::PCState
GenericISA::DelaySlotPCState< 4 > PCState
Definition: pcstate.hh:40
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::X86ISA::StackFaultBit
@ StackFaultBit
Definition: misc.hh:97
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::setKvmSegmentReg
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:722
gem5::X86ISA::MISCREG_FTAG
@ MISCREG_FTAG
Definition: misc.hh:390
gem5::X86ISA::X86FaultBase::getVector
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition: faults.hh:86
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
SEG_SYS_TYPE_TSS_BUSY
#define SEG_SYS_TYPE_TSS_BUSY
Definition: x86_cpu.cc:64
gem5::FXSave::pad2
uint16_t pad2
Definition: x86_cpu.cc:91
gem5::dumpFpuCommon
static void dumpFpuCommon(const T &fpu)
Definition: x86_cpu.cc:273
gem5::FXSave::mxcsr
uint32_t mxcsr
Definition: x86_cpu.cc:100
gem5::X86ISA::FLOATREG_XMM_HIGH
static FloatRegIndex FLOATREG_XMM_HIGH(int index)
Definition: float.hh:138
gem5::BaseKvmCPU::getSpecialRegisters
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition: base.cc:824
gem5::X86ISA::MISCREG_PCI_CONFIG_ADDRESS
@ MISCREG_PCI_CONFIG_ADDRESS
Definition: misc.hh:402
gem5::EventQueue::ScopedMigration
Definition: eventq.hh:672
utility.hh
gem5::X86ISA::Interrupts
Definition: interrupts.hh:77
gem5::BaseKvmCPU::vm
KvmVM & vm
Definition: base.hh:160
compiler.hh
gem5::ThreadContext::setFloatReg
virtual void setFloatReg(RegIndex reg_idx, RegVal val)=0
gem5::X86ISA::MISCREG_SEG_BASE
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:518
gem5::X86KvmCPU::updateKvmStateMSRs
void updateKvmStateMSRs()
Update MSR registers.
Definition: x86_cpu.cc:928
gem5::X86ISA::MISCREG_RFLAGS
@ MISCREG_RFLAGS
Definition: misc.hh:140
gem5::X86KvmCPU::setXCRs
void setXCRs(const struct kvm_xcrs &regs)
Definition: x86_cpu.cc:1599
gem5::makeKvmCpuid
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition: x86_cpu.cc:1419
gem5::setContextSegment
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:1002
gem5::X86ISA::MISCREG_FS
@ MISCREG_FS
Definition: misc.hh:306
gem5::updateThreadContextFPUCommon
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition: x86_cpu.cc:1056
gem5::ThreadContext::readMiscRegNoEffect
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
gem5::X86KvmCPU::getMSRs
void getMSRs(struct kvm_msrs &msrs) const
Definition: x86_cpu.cc:1511
IO_PCI_CONF_DATA_BASE
#define IO_PCI_CONF_DATA_BASE
Definition: x86_cpu.cc:59
gem5::X86ISA::convX87XTagsToTags
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition: utility.cc:115
gem5::sim_clock::as_float::ms
double ms
millisecond
Definition: core.cc:54
gem5::X86KvmCPU::dumpFpuRegs
void dumpFpuRegs() const
Definition: x86_cpu.cc:598
SEG_CS_TYPE_ACCESSED
#define SEG_CS_TYPE_ACCESSED
Definition: x86_cpu.cc:67
gem5::X86KvmCPU::getHostCycles
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition: x86_cpu.cc:1269
SEG_CS_TYPE_READ_ACCESSED
#define SEG_CS_TYPE_READ_ACCESSED
Definition: x86_cpu.cc:69
gem5::auxv::Entry
@ Entry
Definition: aux_vector.hh:76
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::X86KvmCPU::handleKvmExitIO
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition: x86_cpu.cc:1300
gem5::X86ISA::MISCREG_SS
@ MISCREG_SS
Definition: misc.hh:304
gem5::checkSeg
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition: x86_cpu.cc:394
gem5::X86ISA::MISCREG_MXCSR
@ MISCREG_MXCSR
Definition: misc.hh:386
gem5::X86KvmCPU::updateThreadContextMSRs
void updateThreadContextMSRs()
Update MSR registers.
Definition: x86_cpu.cc:1113
gem5::X86ISA::X86Interrupt
Definition: faults.hh:119
gem5::BaseKvmCPU::Idle
@ Idle
Context not scheduled in KVM.
Definition: base.hh:199
name
const std::string & name()
Definition: trace.cc:49
base.hh
gem5::X86ISA::setRFlags
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition: utility.cc:74
gem5::BaseKvmCPU::setRegisters
void setRegisters(const struct kvm_regs &regs)
Definition: base.cc:817
gem5::KvmVM::kvm
Kvm * kvm
Global KVM interface.
Definition: vm.hh:417
gem5::X86ISA::doCpuid
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition: cpuid.cc:91
gem5::X86ISA::convX87TagsToXTags
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition: utility.cc:90
gem5::X86KvmCPU::updateKvmState
void updateKvmState() override
Update the KVM state from the current thread context.
Definition: x86_cpu.cc:687
gem5::X86ISA::MISCREG_FCW
@ MISCREG_FCW
Definition: misc.hh:387
gem5::X86ISA::MISCREG_CS
@ MISCREG_CS
Definition: misc.hh:303
gem5::X86ISA::FLOATREG_FPR
static FloatRegIndex FLOATREG_FPR(int index)
Definition: float.hh:126
gem5::X86KvmCPU::getVCpuEvents
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition: x86_cpu.cc:1621
gem5::X86KvmCPU::getXSave
void getXSave(struct kvm_xsave &xsave) const
Definition: x86_cpu.cc:1606
gem5::X86KvmCPU::deliverInterrupts
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition: x86_cpu.cc:1143
gem5::X86KvmCPU::dump
void dump() const override
Dump the internal state to the terminal.
Definition: x86_cpu.cc:583
gem5::X86KvmCPU::haveXCRs
bool haveXCRs
Kvm::capXCRs() available?
Definition: x86_cpu.hh:267
gem5::FXSave::fpu_dp
uint32_t fpu_dp
Definition: x86_cpu.cc:89
gem5::X86ISA::StartupInterrupt
Definition: faults.hh:361
gem5::X86KvmCPU::updateKvmStateFPUXSave
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:894
inform
#define inform(...)
Definition: logging.hh:247
FOREACH_SEGMENT
#define FOREACH_SEGMENT()
Definition: x86_cpu.cc:152
gem5::Kvm::capDebugRegs
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition: vm.cc:178
gem5::SimpleThread::suspend
void suspend() override
Set the status to Suspended.
Definition: simple_thread.cc:144
gem5::floatToBits64
static uint64_t floatToBits64(double val)
Definition: types.hh:198
gem5::ThreadContext::setMiscReg
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
gem5::dumpFpuSpec
static void dumpFpuSpec(const struct FXSave &xs)
Definition: x86_cpu.cc:257
gem5::X86ISA::seg
Bitfield< 2, 0 > seg
Definition: types.hh:87
gem5::X86KvmCPU::updateKvmStateFPULegacy
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:870
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::FXSave::ftwx
uint8_t ftwx
Definition: x86_cpu.cc:79
gem5::BaseKvmCPU::KVMCpuPort::submitIO
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition: base.cc:170
gem5::X86ISA::MISCREG_FSW
@ MISCREG_FSW
Definition: misc.hh:388
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::X86KvmCPU::X86KvmCPU
X86KvmCPU(const X86KvmCPUParams &params)
Definition: x86_cpu.cc:536
gem5::newVarStruct
static auto newVarStruct(size_t entries)
Definition: x86_cpu.cc:172
cpuid.hh
gem5::Kvm
KVM parent interface.
Definition: vm.hh:80
gem5::X86KvmCPU::kvmRun
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition: x86_cpu.cc:1196
gem5::FXSave::fsw
uint16_t fsw
Definition: x86_cpu.cc:78
gem5::MipsISA::NonMaskableInterrupt
Definition: faults.hh:149
gem5::X86KvmCPU::handleKvmExitIRQWindowOpen
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition: x86_cpu.cc:1364
gem5::X86ISA::p
Bitfield< 0 > p
Definition: pagetable.hh:151
gem5::X86KvmCPU::setXSave
void setXSave(const struct kvm_xsave &xsave)
Definition: x86_cpu.cc:1613
gem5::BaseKvmCPU::kvmRun
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition: base.cc:697
gem5::BaseKvmCPU::threadContextDirty
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition: base.hh:648
gem5::X86KvmCPU::getXCRs
void getXCRs(struct kvm_xcrs &regs) const
Definition: x86_cpu.cc:1592
gem5::FXSave::ctrl64
struct gem5::FXSave::@18::@21 ctrl64
gem5::X86ISA::CpuidResult::rax
uint64_t rax
Definition: cpuid.hh:43
gem5::BaseKvmCPU::getRegisters
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition: base.cc:810
gem5::X86ISA::MISCREG_FTW
@ MISCREG_FTW
Definition: misc.hh:389
gem5::X86KvmCPU::updateThreadContextSRegs
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:1038
gem5::FXSave::fpu_ds
uint16_t fpu_ds
Definition: x86_cpu.cc:90
gem5::isCanonicalAddress
static bool isCanonicalAddress(uint64_t addr)
Definition: x86_cpu.cc:383
gem5::BaseKvmCPU::dataPort
KVMCpuPort dataPort
Port for data requests.
Definition: base.hh:633
gem5::BaseKvmCPU::getGuestData
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition: base.hh:327
SEG_SYS_TYPE_TSS_AVAILABLE
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition: x86_cpu.cc:62
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::X86KvmCPU::updateThreadContext
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition: x86_cpu.cc:950
gem5::X86ISA::CC1Bit
@ CC1Bit
Definition: misc.hh:100
gem5::FXSave
Definition: x86_cpu.cc:75
gem5::X86ISA::x86IOAddress
static Addr x86IOAddress(const uint32_t port)
Definition: x86_traits.hh:76
gem5::X86KvmCPU::dumpMSRs
void dumpMSRs() const
Definition: x86_cpu.cc:668
FOREACH_SREG
#define FOREACH_SREG()
Definition: x86_cpu.cc:131
gem5::BaseKvmCPU::setSpecialRegisters
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition: base.cc:831
gem5::X86KvmCPU::useXSave
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition: x86_cpu.hh:265
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::X86ISA::PEBit
@ PEBit
Definition: misc.hh:94
gem5::setKvmDTableReg
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition: x86_cpu.cc:747
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::X86KvmCPU::setVCpuEvents
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition: x86_cpu.cc:1628
gem5::ThreadContext::setMiscRegNoEffect
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0

Generated on Wed May 4 2022 12:13:50 for gem5 by doxygen 1.8.17