gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
x86_cpu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Andreas Sandberg
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Authors: Andreas Sandberg
29  */
30 
31 #include "cpu/kvm/x86_cpu.hh"
32 
33 #include <linux/kvm.h>
34 
35 #include <algorithm>
36 #include <cerrno>
37 #include <memory>
38 
39 #include "arch/registers.hh"
40 #include "arch/x86/cpuid.hh"
41 #include "arch/x86/faults.hh"
42 #include "arch/x86/interrupts.hh"
43 #include "arch/x86/regs/msr.hh"
44 #include "arch/x86/utility.hh"
45 #include "cpu/kvm/base.hh"
46 #include "debug/Drain.hh"
47 #include "debug/Kvm.hh"
48 #include "debug/KvmContext.hh"
49 #include "debug/KvmIO.hh"
50 #include "debug/KvmInt.hh"
51 
52 using namespace X86ISA;
53 
54 #define MSR_TSC 0x10
55 
56 #define IO_PCI_CONF_ADDR 0xCF8
57 #define IO_PCI_CONF_DATA_BASE 0xCFC
58 
59 // Task segment type of an inactive 32-bit or 64-bit task
60 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
61 // Task segment type of an active 32-bit or 64-bit task
62 #define SEG_SYS_TYPE_TSS_BUSY 11
63 
64 // Non-conforming accessed code segment
65 #define SEG_CS_TYPE_ACCESSED 9
66 // Non-conforming accessed code segment that can be read
67 #define SEG_CS_TYPE_READ_ACCESSED 11
68 
69 // The lowest bit of the type field for normal segments (code and
70 // data) is used to indicate that a segment has been accessed.
71 #define SEG_TYPE_BIT_ACCESSED 1
72 
73 struct FXSave
74 {
75  uint16_t fcw;
76  uint16_t fsw;
77  uint8_t ftwx;
78  uint8_t pad0;
79  uint16_t last_opcode;
80  union {
81  struct {
82  uint32_t fpu_ip;
83  uint16_t fpu_cs;
84  uint16_t pad1;
85  uint32_t fpu_dp;
86  uint16_t fpu_ds;
87  uint16_t pad2;
88  } ctrl32;
89 
90  struct {
91  uint64_t fpu_ip;
92  uint64_t fpu_dp;
93  } ctrl64;
94  };
95  uint32_t mxcsr;
96  uint32_t mxcsr_mask;
97 
98  uint8_t fpr[8][16];
99  uint8_t xmm[16][16];
100 
101  uint64_t reserved[12];
103 
104 static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
105 
106 #define FOREACH_IREG() \
107  do { \
108  APPLY_IREG(rax, INTREG_RAX); \
109  APPLY_IREG(rbx, INTREG_RBX); \
110  APPLY_IREG(rcx, INTREG_RCX); \
111  APPLY_IREG(rdx, INTREG_RDX); \
112  APPLY_IREG(rsi, INTREG_RSI); \
113  APPLY_IREG(rdi, INTREG_RDI); \
114  APPLY_IREG(rsp, INTREG_RSP); \
115  APPLY_IREG(rbp, INTREG_RBP); \
116  APPLY_IREG(r8, INTREG_R8); \
117  APPLY_IREG(r9, INTREG_R9); \
118  APPLY_IREG(r10, INTREG_R10); \
119  APPLY_IREG(r11, INTREG_R11); \
120  APPLY_IREG(r12, INTREG_R12); \
121  APPLY_IREG(r13, INTREG_R13); \
122  APPLY_IREG(r14, INTREG_R14); \
123  APPLY_IREG(r15, INTREG_R15); \
124  } while (0)
125 
126 #define FOREACH_SREG() \
127  do { \
128  APPLY_SREG(cr0, MISCREG_CR0); \
129  APPLY_SREG(cr2, MISCREG_CR2); \
130  APPLY_SREG(cr3, MISCREG_CR3); \
131  APPLY_SREG(cr4, MISCREG_CR4); \
132  APPLY_SREG(cr8, MISCREG_CR8); \
133  APPLY_SREG(efer, MISCREG_EFER); \
134  APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
135  } while (0)
136 
137 #define FOREACH_DREG() \
138  do { \
139  APPLY_DREG(db[0], MISCREG_DR0); \
140  APPLY_DREG(db[1], MISCREG_DR1); \
141  APPLY_DREG(db[2], MISCREG_DR2); \
142  APPLY_DREG(db[3], MISCREG_DR3); \
143  APPLY_DREG(dr6, MISCREG_DR6); \
144  APPLY_DREG(dr7, MISCREG_DR7); \
145  } while (0)
146 
147 #define FOREACH_SEGMENT() \
148  do { \
149  APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
150  APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
151  APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
152  APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
153  APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
154  APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
155  APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
156  APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
157  } while (0)
158 
159 #define FOREACH_DTABLE() \
160  do { \
161  APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
162  APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
163  } while (0)
164 
165 template<typename STRUCT, typename ENTRY>
166 static STRUCT *newVarStruct(size_t entries)
167 {
168  return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
169 }
170 
171 static void
172 dumpKvm(const struct kvm_regs &regs)
173 {
174  inform("KVM register state:\n");
175 
176 #define APPLY_IREG(kreg, mreg) \
177  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
178 
179  FOREACH_IREG();
180 
181 #undef APPLY_IREG
182 
183  inform("\trip: 0x%llx\n", regs.rip);
184  inform("\trflags: 0x%llx\n", regs.rflags);
185 }
186 
187 static void
188 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
189 {
190  inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
191  "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
192  reg_name,
193  seg.base, seg.limit, seg.selector, seg.type,
194  seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
195 }
196 
197 static void
198 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
199 {
200  inform("\t%s: @0x%llx+%x\n",
201  reg_name, dtable.base, dtable.limit);
202 }
203 
204 static void
205 dumpKvm(const struct kvm_sregs &sregs)
206 {
207 #define APPLY_SREG(kreg, mreg) \
208  inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
209 #define APPLY_SEGMENT(kreg, idx) \
210  dumpKvm(# kreg, sregs.kreg);
211 #define APPLY_DTABLE(kreg, idx) \
212  dumpKvm(# kreg, sregs.kreg);
213 
214  inform("Special registers:\n");
215  FOREACH_SEGMENT();
216  FOREACH_SREG();
217  FOREACH_DTABLE();
218 
219  inform("Interrupt Bitmap:");
220  for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
221  inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
222 
223 #undef APPLY_SREG
224 #undef APPLY_SEGMENT
225 #undef APPLY_DTABLE
226 }
227 
228 #ifdef KVM_GET_DEBUGREGS
229 static void
230 dumpKvm(const struct kvm_debugregs &regs)
231 {
232  inform("KVM debug state:\n");
233 
234 #define APPLY_DREG(kreg, mreg) \
235  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
236 
237  FOREACH_DREG();
238 
239 #undef APPLY_DREG
240 
241  inform("\tflags: 0x%llx\n", regs.flags);
242 }
243 #endif
244 
245 static void
246 dumpFpuSpec(const struct FXSave &xs)
247 {
248  inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
249  inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
250  inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
251 }
252 
253 static void
254 dumpFpuSpec(const struct kvm_fpu &fpu)
255 {
256  inform("\tlast_ip: 0x%x\n", fpu.last_ip);
257  inform("\tlast_dp: 0x%x\n", fpu.last_dp);
258 }
259 
260 template<typename T>
261 static void
262 dumpFpuCommon(const T &fpu)
263 {
264  const unsigned top((fpu.fsw >> 11) & 0x7);
265  inform("\tfcw: 0x%x\n", fpu.fcw);
266 
267  inform("\tfsw: 0x%x (top: %i, "
268  "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
269  fpu.fsw, top,
270 
271  (fpu.fsw & CC0Bit) ? "C0" : "",
272  (fpu.fsw & CC1Bit) ? "C1" : "",
273  (fpu.fsw & CC2Bit) ? "C2" : "",
274  (fpu.fsw & CC3Bit) ? "C3" : "",
275 
276  (fpu.fsw & IEBit) ? "I" : "",
277  (fpu.fsw & DEBit) ? "D" : "",
278  (fpu.fsw & ZEBit) ? "Z" : "",
279  (fpu.fsw & OEBit) ? "O" : "",
280  (fpu.fsw & UEBit) ? "U" : "",
281  (fpu.fsw & PEBit) ? "P" : "",
282 
283  (fpu.fsw & StackFaultBit) ? "SF " : "",
284  (fpu.fsw & ErrSummaryBit) ? "ES " : "",
285  (fpu.fsw & BusyBit) ? "BUSY " : ""
286  );
287  inform("\tftwx: 0x%x\n", fpu.ftwx);
288  inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
289  dumpFpuSpec(fpu);
290  inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
291  inform("\tFP Stack:\n");
292  for (int i = 0; i < 8; ++i) {
293  const unsigned reg_idx((i + top) & 0x7);
294  const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
295  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
296  char hex[33];
297  for (int j = 0; j < 10; ++j)
298  snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
299  inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
300  hex, value, empty ? " (e)" : "");
301  }
302  inform("\tXMM registers:\n");
303  for (int i = 0; i < 16; ++i) {
304  char hex[33];
305  for (int j = 0; j < 16; ++j)
306  snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
307  inform("\t\t%i: 0x%s\n", i, hex);
308  }
309 }
310 
311 static void
312 dumpKvm(const struct kvm_fpu &fpu)
313 {
314  inform("FPU registers:\n");
315  dumpFpuCommon(fpu);
316 }
317 
318 static void
319 dumpKvm(const struct kvm_xsave &xsave)
320 {
321  inform("FPU registers (XSave):\n");
322  dumpFpuCommon(*(FXSave *)xsave.region);
323 }
324 
325 static void
326 dumpKvm(const struct kvm_msrs &msrs)
327 {
328  inform("MSRs:\n");
329 
330  for (int i = 0; i < msrs.nmsrs; ++i) {
331  const struct kvm_msr_entry &e(msrs.entries[i]);
332 
333  inform("\t0x%x: 0x%x\n", e.index, e.data);
334  }
335 }
336 
337 static void
338 dumpKvm(const struct kvm_xcrs &regs)
339 {
340  inform("KVM XCR registers:\n");
341 
342  inform("\tFlags: 0x%x\n", regs.flags);
343  for (int i = 0; i < regs.nr_xcrs; ++i) {
344  inform("\tXCR[0x%x]: 0x%x\n",
345  regs.xcrs[i].xcr,
346  regs.xcrs[i].value);
347  }
348 }
349 
350 static void
351 dumpKvm(const struct kvm_vcpu_events &events)
352 {
353  inform("vCPU events:\n");
354 
355  inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
356  events.exception.injected, events.exception.nr,
357  events.exception.has_error_code, events.exception.error_code);
358 
359  inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
360  events.interrupt.injected, events.interrupt.nr,
361  events.interrupt.soft);
362 
363  inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
364  events.nmi.injected, events.nmi.pending,
365  events.nmi.masked);
366 
367  inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
368  inform("\tFlags: 0x%x\n", events.flags);
369 }
370 
371 static bool
373 {
374  // x86-64 doesn't currently use the full 64-bit virtual address
375  // space, instead it uses signed 48 bit addresses that are
376  // sign-extended to 64 bits. Such addresses are known as
377  // "canonical".
378  uint64_t upper_half(addr & 0xffff800000000000ULL);
379  return upper_half == 0 || upper_half == 0xffff800000000000;
380 }
381 
382 static void
383 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
384  struct kvm_sregs sregs)
385 {
386  // Check the register base
387  switch (idx) {
388  case MISCREG_TSL:
389  case MISCREG_TR:
390  case MISCREG_FS:
391  case MISCREG_GS:
392  if (!isCanonicalAddress(seg.base))
393  warn("Illegal %s base: 0x%x\n", name, seg.base);
394  break;
395 
396  case MISCREG_SS:
397  case MISCREG_DS:
398  case MISCREG_ES:
399  if (seg.unusable)
400  break;
402  case MISCREG_CS:
403  if (seg.base & 0xffffffff00000000ULL)
404  warn("Illegal %s base: 0x%x\n", name, seg.base);
405  break;
406  }
407 
408  // Check the type
409  switch (idx) {
410  case MISCREG_CS:
411  switch (seg.type) {
412  case 3:
413  if (seg.dpl != 0)
414  warn("CS type is 3 but dpl != 0.\n");
415  break;
416  case 9:
417  case 11:
418  if (seg.dpl != sregs.ss.dpl)
419  warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
420  break;
421  case 13:
422  case 15:
423  if (seg.dpl > sregs.ss.dpl)
424  warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
425  break;
426  default:
427  warn("Illegal CS type: %i\n", seg.type);
428  break;
429  }
430  break;
431 
432  case MISCREG_SS:
433  if (seg.unusable)
434  break;
435  switch (seg.type) {
436  case 3:
437  if (sregs.cs.type == 3 && seg.dpl != 0)
438  warn("CS type is 3, but SS DPL is != 0.\n");
440  case 7:
441  if (!(sregs.cr0 & 1) && seg.dpl != 0)
442  warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
443  break;
444  default:
445  warn("Illegal SS type: %i\n", seg.type);
446  break;
447  }
448  break;
449 
450  case MISCREG_DS:
451  case MISCREG_ES:
452  case MISCREG_FS:
453  case MISCREG_GS:
454  if (seg.unusable)
455  break;
456  if (!(seg.type & 0x1) ||
457  ((seg.type & 0x8) && !(seg.type & 0x2)))
458  warn("%s has an illegal type field: %i\n", name, seg.type);
459  break;
460 
461  case MISCREG_TR:
462  // TODO: We should check the CPU mode
463  if (seg.type != 3 && seg.type != 11)
464  warn("%s: Illegal segment type (%i)\n", name, seg.type);
465  break;
466 
467  case MISCREG_TSL:
468  if (seg.unusable)
469  break;
470  if (seg.type != 2)
471  warn("%s: Illegal segment type (%i)\n", name, seg.type);
472  break;
473  }
474 
475  switch (idx) {
476  case MISCREG_SS:
477  case MISCREG_DS:
478  case MISCREG_ES:
479  case MISCREG_FS:
480  case MISCREG_GS:
481  if (seg.unusable)
482  break;
484  case MISCREG_CS:
485  if (!seg.s)
486  warn("%s: S flag not set\n", name);
487  break;
488 
489  case MISCREG_TSL:
490  if (seg.unusable)
491  break;
493  case MISCREG_TR:
494  if (seg.s)
495  warn("%s: S flag is set\n", name);
496  break;
497  }
498 
499  switch (idx) {
500  case MISCREG_SS:
501  case MISCREG_DS:
502  case MISCREG_ES:
503  case MISCREG_FS:
504  case MISCREG_GS:
505  case MISCREG_TSL:
506  if (seg.unusable)
507  break;
509  case MISCREG_TR:
510  case MISCREG_CS:
511  if (!seg.present)
512  warn("%s: P flag not set\n", name);
513 
514  if (((seg.limit & 0xFFF) == 0 && seg.g) ||
515  ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
516  warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
517  name, seg.limit, seg.g);
518  }
519  break;
520  }
521 
522  // TODO: Check CS DB
523 }
524 
525 X86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
526  : BaseKvmCPU(params),
527  useXSave(params->useXSave)
528 {
529  Kvm &kvm(*vm.kvm);
530 
531  if (!kvm.capSetTSSAddress())
532  panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
533  if (!kvm.capExtendedCPUID())
534  panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
535  if (!kvm.capUserNMI())
536  warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
537  if (!kvm.capVCPUEvents())
538  warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
539 
540  haveDebugRegs = kvm.capDebugRegs();
541  haveXSave = kvm.capXSave();
542  haveXCRs = kvm.capXCRs();
543 
544  if (useXSave && !haveXSave) {
545  warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
546  "unreliable due to kernel bugs.\n");
547  useXSave = false;
548  } else if (!useXSave) {
549  warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
550  }
551 }
552 
554 {
555 }
556 
557 void
559 {
561 
562  updateCPUID();
563 
564  // TODO: Do we need to create an identity mapped TSS area? We
565  // should call kvm.vm.setTSSAddress() here in that case. It should
566  // only be needed for old versions of the virtualization
567  // extensions. We should make sure that the identity range is
568  // reserved in the e820 memory map in that case.
569 }
570 
571 void
573 {
574  dumpIntRegs();
575  if (useXSave)
576  dumpXSave();
577  else
578  dumpFpuRegs();
579  dumpSpecRegs();
580  dumpDebugRegs();
581  dumpXCRs();
582  dumpVCpuEvents();
583  dumpMSRs();
584 }
585 
586 void
588 {
589  struct kvm_fpu fpu;
590  getFPUState(fpu);
591  dumpKvm(fpu);
592 }
593 
594 void
596 {
597  struct kvm_regs regs;
598  getRegisters(regs);
599  dumpKvm(regs);
600 }
601 
602 void
604 {
605  struct kvm_sregs sregs;
606  getSpecialRegisters(sregs);
607  dumpKvm(sregs);
608 }
609 
610 void
612 {
613  if (haveDebugRegs) {
614 #ifdef KVM_GET_DEBUGREGS
615  struct kvm_debugregs dregs;
616  getDebugRegisters(dregs);
617  dumpKvm(dregs);
618 #endif
619  } else {
620  inform("Debug registers not supported by kernel.\n");
621  }
622 }
623 
624 void
626 {
627  if (haveXCRs) {
628  struct kvm_xcrs xcrs;
629  getXCRs(xcrs);
630  dumpKvm(xcrs);
631  } else {
632  inform("XCRs not supported by kernel.\n");
633  }
634 }
635 
636 void
638 {
639  if (haveXSave) {
640  struct kvm_xsave xsave;
641  getXSave(xsave);
642  dumpKvm(xsave);
643  } else {
644  inform("XSave not supported by kernel.\n");
645  }
646 }
647 
648 void
650 {
651  struct kvm_vcpu_events events;
652  getVCpuEvents(events);
653  dumpKvm(events);
654 }
655 
656 void
658 {
659  const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
660  std::unique_ptr<struct kvm_msrs> msrs(
661  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
662  supported_msrs.size()));
663 
664  msrs->nmsrs = supported_msrs.size();
665  for (int i = 0; i < supported_msrs.size(); ++i) {
666  struct kvm_msr_entry &e(msrs->entries[i]);
667  e.index = supported_msrs[i];
668  e.reserved = 0;
669  e.data = 0;
670  }
671  getMSRs(*msrs.get());
672 
673  dumpKvm(*msrs.get());
674 }
675 
676 void
678 {
683 
684  DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
685  if (DTRACE(KvmContext))
686  dump();
687 }
688 
689 void
691 {
692  struct kvm_regs regs;
693 
694 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
695  FOREACH_IREG();
696 #undef APPLY_IREG
697 
698  regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
699 
700  /* You might think that setting regs.rflags to the contents
701  * MISCREG_RFLAGS here would suffice. In that case you're
702  * mistaken. We need to reconstruct it from a bunch of ucode
703  * registers and wave a dead chicken over it (aka mask out and set
704  * reserved bits) to get it to work.
705  */
706  regs.rflags = X86ISA::getRFlags(tc);
707 
708  setRegisters(regs);
709 }
710 
711 static inline void
712 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
713  const int index)
714 {
715  SegAttr attr(tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(index)));
716 
717  kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
718  kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
719  kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
720  kvm_seg.type = attr.type;
721  kvm_seg.present = attr.present;
722  kvm_seg.dpl = attr.dpl;
723  kvm_seg.db = attr.defaultSize;
724  kvm_seg.s = attr.system;
725  kvm_seg.l = attr.longMode;
726  kvm_seg.g = attr.granularity;
727  kvm_seg.avl = attr.avl;
728 
729  // A segment is normally unusable when the selector is zero. There
730  // is a attr.unusable flag in gem5, but it seems unused. qemu
731  // seems to set this to 0 all the time, so we just do the same and
732  // hope for the best.
733  kvm_seg.unusable = 0;
734 }
735 
736 static inline void
737 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
738  const int index)
739 {
740  kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
741  kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
742 }
743 
744 static void
745 forceSegAccessed(struct kvm_segment &seg)
746 {
747  // Intel's VMX requires that (some) usable segments are flagged as
748  // 'accessed' (i.e., the lowest bit in the segment type is set)
749  // when entering VMX. This wouldn't necessary be the case even if
750  // gem5 did set the access bits correctly, so we force it to one
751  // in that case.
752  if (!seg.unusable)
753  seg.type |= SEG_TYPE_BIT_ACCESSED;
754 }
755 
756 void
758 {
759  struct kvm_sregs sregs;
760 
761 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
762 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
763 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
764 
765  FOREACH_SREG();
766  FOREACH_SEGMENT();
767  FOREACH_DTABLE();
768 
769 #undef APPLY_SREG
770 #undef APPLY_SEGMENT
771 #undef APPLY_DTABLE
772 
773  // Clear the interrupt bitmap
774  memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
775 
776  // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
777  // bit in the type field set.
778  forceSegAccessed(sregs.cs);
779  forceSegAccessed(sregs.ss);
780  forceSegAccessed(sregs.ds);
781  forceSegAccessed(sregs.es);
782  forceSegAccessed(sregs.fs);
783  forceSegAccessed(sregs.gs);
784 
785  // There are currently some cases where the active task isn't
786  // marked as busy. This is illegal in VMX, so we force it to busy.
787  if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
788  hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
789  sregs.tr.type);
790  sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
791  }
792 
793  // VMX requires the DPL of SS and CS to be the same for
794  // non-conforming code segments. It seems like m5 doesn't set the
795  // DPL of SS correctly when taking interrupts, so we need to fix
796  // that here.
797  if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
798  sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
799  sregs.cs.dpl != sregs.ss.dpl) {
800 
801  hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
802  sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
803  sregs.ss.dpl = sregs.cs.dpl;
804  }
805 
806  // Do checks after fixing up the state to avoid getting excessive
807  // amounts of warnings.
808  RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
809  if (!rflags_nocc.vm) {
810  // Do segment verification if the CPU isn't entering virtual
811  // 8086 mode. We currently assume that unrestricted guest
812  // mode is available.
813 
814 #define APPLY_SEGMENT(kreg, idx) \
815  checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
816 
817  FOREACH_SEGMENT();
818 #undef APPLY_SEGMENT
819  }
820 
821  setSpecialRegisters(sregs);
822 }
823 
824 template <typename T>
825 static void
827 {
828  fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
829  fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
830  // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
831  // with effects.
832  fpu.fsw = tc->readMiscReg(MISCREG_FSW);
833 
834  uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
835  fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
836 
837  fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
838 
839  const unsigned top((fpu.fsw >> 11) & 0x7);
840  for (int i = 0; i < 8; ++i) {
841  const unsigned reg_idx((i + top) & 0x7);
842  const double value(bitsToFloat64(
843  tc->readFloatReg(FLOATREG_FPR(reg_idx))));
844  DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
845  reg_idx, i, value);
846  X86ISA::storeFloat80(fpu.fpr[i], value);
847  }
848 
849  // TODO: We should update the MMX state
850 
851  for (int i = 0; i < 16; ++i) {
852  *(uint64_t *)&fpu.xmm[i][0] =
854  *(uint64_t *)&fpu.xmm[i][8] =
856  }
857 }
858 
859 void
861 {
862  struct kvm_fpu fpu;
863 
864  // There is some padding in the FP registers, so we'd better zero
865  // the whole struct.
866  memset(&fpu, 0, sizeof(fpu));
867 
869 
871  warn_once("MISCREG_FISEG is non-zero.\n");
872 
873  fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
874 
876  warn_once("MISCREG_FOSEG is non-zero.\n");
877 
878  fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
879 
880  setFPUState(fpu);
881 }
882 
883 void
885 {
886  struct kvm_xsave kxsave;
887  FXSave &xsave(*(FXSave *)kxsave.region);
888 
889  // There is some padding and reserved fields in the structure, so
890  // we'd better zero the whole thing.
891  memset(&kxsave, 0, sizeof(kxsave));
892 
893  updateKvmStateFPUCommon(tc, xsave);
894 
896  warn_once("MISCREG_FISEG is non-zero.\n");
897 
898  xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
899 
901  warn_once("MISCREG_FOSEG is non-zero.\n");
902 
903  xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
904 
905  setXSave(kxsave);
906 }
907 
908 void
910 {
911  if (useXSave)
913  else
915 }
916 
917 void
919 {
920  KvmMSRVector msrs;
921 
922  const Kvm::MSRIndexVector &indices(getMsrIntersection());
923 
924  for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
925  struct kvm_msr_entry e;
926 
927  e.index = *it;
928  e.reserved = 0;
929  e.data = tc->readMiscReg(msrMap.at(*it));
930  DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
931  e.index, e.data);
932 
933  msrs.push_back(e);
934  }
935 
936  setMSRs(msrs);
937 }
938 
939 void
941 {
942  struct kvm_regs regs;
943  struct kvm_sregs sregs;
944 
945  getRegisters(regs);
946  getSpecialRegisters(sregs);
947 
948  DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
949  if (DTRACE(KvmContext))
950  dump();
951 
952  updateThreadContextRegs(regs, sregs);
954  if (useXSave) {
955  struct kvm_xsave xsave;
956  getXSave(xsave);
957 
959  } else {
960  struct kvm_fpu fpu;
961  getFPUState(fpu);
962 
964  }
966 
967  // The M5 misc reg caches some values from other
968  // registers. Writing to it with side effects causes it to be
969  // updated from its source registers.
971 }
972 
973 void
974 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
975  const struct kvm_sregs &sregs)
976 {
977 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
978 
979  FOREACH_IREG();
980 
981 #undef APPLY_IREG
982 
983  tc->pcState(PCState(regs.rip + sregs.cs.base));
984 
985  // Flags are spread out across multiple semi-magic registers so we
986  // need some special care when updating them.
987  X86ISA::setRFlags(tc, regs.rflags);
988 }
989 
990 
991 inline void
992 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
993  const int index)
994 {
995  SegAttr attr(0);
996 
997  attr.type = kvm_seg.type;
998  attr.present = kvm_seg.present;
999  attr.dpl = kvm_seg.dpl;
1000  attr.defaultSize = kvm_seg.db;
1001  attr.system = kvm_seg.s;
1002  attr.longMode = kvm_seg.l;
1003  attr.granularity = kvm_seg.g;
1004  attr.avl = kvm_seg.avl;
1005  attr.unusable = kvm_seg.unusable;
1006 
1007  // We need some setMiscReg magic here to keep the effective base
1008  // addresses in sync. We need an up-to-date version of EFER, so
1009  // make sure this is called after the sregs have been synced.
1010  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
1011  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
1012  tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
1013  tc->setMiscReg(MISCREG_SEG_ATTR(index), attr);
1014 }
1015 
1016 inline void
1017 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1018  const int index)
1019 {
1020  // We need some setMiscReg magic here to keep the effective base
1021  // addresses in sync. We need an up-to-date version of EFER, so
1022  // make sure this is called after the sregs have been synced.
1023  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
1024  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
1025 }
1026 
1027 void
1028 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1029 {
1030  assert(getKvmRunState()->apic_base == sregs.apic_base);
1031  assert(getKvmRunState()->cr8 == sregs.cr8);
1032 
1033 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1034 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1035 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1036  FOREACH_SREG();
1037  FOREACH_SEGMENT();
1038  FOREACH_DTABLE();
1039 #undef APPLY_SREG
1040 #undef APPLY_SEGMENT
1041 #undef APPLY_DTABLE
1042 }
1043 
1044 template<typename T>
1045 static void
1047 {
1048  const unsigned top((fpu.fsw >> 11) & 0x7);
1049 
1050  for (int i = 0; i < 8; ++i) {
1051  const unsigned reg_idx((i + top) & 0x7);
1052  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1053  DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1054  reg_idx, i, value);
1055  tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value));
1056  }
1057 
1058  // TODO: We should update the MMX state
1059 
1061  tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
1062  tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
1063  tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
1064 
1065  uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1066  // TODO: Are these registers really the same?
1067  tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
1068  tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
1069 
1070  tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
1071 
1072  for (int i = 0; i < 16; ++i) {
1073  tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]);
1074  tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]);
1075  }
1076 }
1077 
1078 void
1079 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1080 {
1082 
1084  tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
1086  tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
1087 }
1088 
1089 void
1090 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1091 {
1092  const FXSave &xsave(*(const FXSave *)kxsave.region);
1093 
1095 
1097  tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
1099  tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
1100 }
1101 
1102 void
1104 {
1105  const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1106 
1107  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1108  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1109  struct kvm_msr_entry *entry;
1110 
1111  // Create a list of MSRs to read
1112  kvm_msrs->nmsrs = msrs.size();
1113  entry = &kvm_msrs->entries[0];
1114  for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1115  entry->index = *it;
1116  entry->reserved = 0;
1117  entry->data = 0;
1118  }
1119 
1120  getMSRs(*kvm_msrs.get());
1121 
1122  // Update M5's state
1123  entry = &kvm_msrs->entries[0];
1124  for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1125  DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1126  entry->index, entry->data);
1127 
1128  tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1129  }
1130 }
1131 
1132 void
1134 {
1135  Fault fault;
1136 
1138 
1139  {
1140  // Migrate to the interrupt controller's thread to get the
1141  // interrupt. Even though the individual methods are safe to
1142  // call across threads, we might still lose interrupts unless
1143  // they are getInterrupt() and updateIntrInfo() are called
1144  // atomically.
1146  fault = interrupts[0]->getInterrupt(tc);
1147  interrupts[0]->updateIntrInfo(tc);
1148  }
1149 
1150  X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1151  if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1152  DPRINTF(KvmInt, "Delivering NMI\n");
1154  } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1155  DPRINTF(KvmInt, "INIT interrupt\n");
1156  fault.get()->invoke(tc);
1157  // Delay the kvm state update since we won't enter KVM on this
1158  // tick.
1159  threadContextDirty = true;
1160  // HACK: gem5 doesn't actually have any BIOS code, which means
1161  // that we need to halt the thread and wait for a startup
1162  // interrupt before restarting the thread. The simulated CPUs
1163  // use the same kind of hack using a microcode routine.
1164  thread->suspend();
1165  } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1166  DPRINTF(KvmInt, "STARTUP interrupt\n");
1167  fault.get()->invoke(tc);
1168  // The kvm state is assumed to have been updated when entering
1169  // kvmRun(), so we need to update manually it here.
1170  updateKvmState();
1171  } else if (x86int) {
1172  struct kvm_interrupt kvm_int;
1173  kvm_int.irq = x86int->getVector();
1174 
1175  DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1176  fault->name(), kvm_int.irq);
1177 
1178  kvmInterrupt(kvm_int);
1179  } else {
1180  panic("KVM: Unknown interrupt type\n");
1181  }
1182 
1183 }
1184 
1185 Tick
1187 {
1188  struct kvm_run &kvm_run(*getKvmRunState());
1189 
1190  auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1191 
1192  if (lapic->checkInterruptsRaw()) {
1193  if (lapic->hasPendingUnmaskable()) {
1194  DPRINTF(KvmInt,
1195  "Delivering unmaskable interrupt.\n");
1198  } else if (kvm_run.ready_for_interrupt_injection) {
1199  // KVM claims that it is ready for an interrupt. It might
1200  // be lying if we just updated rflags and disabled
1201  // interrupts (e.g., by doing a CPU handover). Let's sync
1202  // the thread context and check if there are /really/
1203  // interrupts that should be delivered now.
1205  if (lapic->checkInterrupts(tc)) {
1206  DPRINTF(KvmInt,
1207  "M5 has pending interrupts, delivering interrupt.\n");
1208 
1210  } else {
1211  DPRINTF(KvmInt,
1212  "Interrupt delivery delayed due to KVM confusion.\n");
1213  kvm_run.request_interrupt_window = 1;
1214  }
1215  } else if (!kvm_run.request_interrupt_window) {
1216  DPRINTF(KvmInt,
1217  "M5 has pending interrupts, requesting interrupt "
1218  "window.\n");
1219  kvm_run.request_interrupt_window = 1;
1220  }
1221  } else {
1222  kvm_run.request_interrupt_window = 0;
1223  }
1224 
1225  // The CPU might have been suspended as a result of the INIT
1226  // interrupt delivery hack. In that case, don't enter into KVM.
1227  if (_status == Idle)
1228  return 0;
1229  else
1230  return kvmRunWrapper(ticks);
1231 }
1232 
1233 Tick
1235 {
1236  struct kvm_run &kvm_run(*getKvmRunState());
1237 
1238  if (!archIsDrained()) {
1239  DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1240 
1241  // Tell KVM to find a suitable place to deliver interrupts. This
1242  // should ensure that pending interrupts have been delivered and
1243  // things are reasonably consistent (i.e., no interrupts pending
1244  // in the guest).
1245  kvm_run.request_interrupt_window = 1;
1246 
1247  // Limit the run to 1 millisecond. That is hopefully enough to
1248  // reach an interrupt window. Otherwise, we'll just try again
1249  // later.
1250  return kvmRunWrapper(1 * SimClock::Float::ms);
1251  } else {
1252  DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1253 
1254  return kvmRunWrapper(0);
1255  }
1256 }
1257 
1258 Tick
1260 {
1261  struct kvm_run &kvm_run(*getKvmRunState());
1262 
1263  // Synchronize the APIC base and CR8 here since they are present
1264  // in the kvm_run struct, which makes the synchronization really
1265  // cheap.
1266  kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
1267  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1268 
1269  const Tick run_ticks(BaseKvmCPU::kvmRun(ticks));
1270 
1271  tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
1272  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1273 
1274  return run_ticks;
1275 }
1276 
1277 uint64_t
1279 {
1280  return getMSR(MSR_TSC);
1281 }
1282 
1283 void
1285 {
1286  struct kvm_run &kvm_run(*getKvmRunState());
1287  const uint16_t port(kvm_run.io.port);
1288 
1289  assert(kvm_run.exit_reason == KVM_EXIT_IO);
1290 
1291  if (kvm_run.io.size != 4) {
1292  panic("Unexpected IO size (%u) for address 0x%x.\n",
1293  kvm_run.io.size, port);
1294  }
1295 
1296  if (kvm_run.io.count != 1) {
1297  panic("Unexpected IO count (%u) for address 0x%x.\n",
1298  kvm_run.io.count, port);
1299  }
1300 
1301  uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1302  if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1303  tc->setMiscReg(miscreg, *data);
1304  else
1305  *data = tc->readMiscRegNoEffect(miscreg);
1306 }
1307 
1308 Tick
1310 {
1311  struct kvm_run &kvm_run(*getKvmRunState());
1312  bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1313  unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1314  Tick delay(0);
1315  uint16_t port(kvm_run.io.port);
1316  Addr pAddr;
1317  const int count(kvm_run.io.count);
1318 
1319  assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1320  kvm_run.io.direction == KVM_EXIT_IO_OUT);
1321 
1322  DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1323  (isWrite ? "out" : "in"), kvm_run.io.port);
1324 
1325  /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1326  * don't use the TLB component, we need to intercept and handle
1327  * the PCI configuration space IO ports here.
1328  *
1329  * The IO port PCI discovery mechanism uses one address register
1330  * and one data register. We map the address register to a misc
1331  * reg and use that to re-route data register accesses to the
1332  * right location in the PCI configuration space.
1333  */
1334  if (port == IO_PCI_CONF_ADDR) {
1336  return 0;
1337  } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1339  if (pciConfigAddr & 0x80000000) {
1340  pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1341  (port & 0x3));
1342  } else {
1343  pAddr = X86ISA::x86IOAddress(port);
1344  }
1345  } else {
1346  pAddr = X86ISA::x86IOAddress(port);
1347  }
1348 
1349  const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1350  // Temporarily lock and migrate to the device event queue to
1351  // prevent races in multi-core mode.
1353  for (int i = 0; i < count; ++i) {
1354  RequestPtr io_req = std::make_shared<Request>(
1355  pAddr, kvm_run.io.size,
1357 
1358  io_req->setContext(tc->contextId());
1359 
1360  PacketPtr pkt = new Packet(io_req, cmd);
1361 
1362  pkt->dataStatic(guestData);
1363  delay += dataPort.submitIO(pkt);
1364 
1365  guestData += kvm_run.io.size;
1366  }
1367 
1368  return delay;
1369 }
1370 
1371 Tick
1373 {
1374  // We don't need to do anything here since this is caught the next
1375  // time we execute kvmRun(). We still overload the exit event to
1376  // silence the warning about an unhandled exit event.
1377  return 0;
1378 }
1379 
1380 bool
1382 {
1383  struct kvm_vcpu_events events;
1384 
1385  getVCpuEvents(events);
1386 
1387  // We could probably handle this in a by re-inserting interrupts
1388  // that are pending into gem5 on a drain. However, that would
1389  // probably be tricky to do reliably, so we'll just prevent a
1390  // drain if there is anything pending in the
1391  // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1392  // executed in the guest by requesting an interrupt window if
1393  // there are pending interrupts.
1394  const bool pending_events(events.exception.injected ||
1395  events.interrupt.injected ||
1396  events.nmi.injected || events.nmi.pending);
1397 
1398  if (pending_events) {
1399  DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1400  events.exception.injected ? "exception" : "",
1401  events.interrupt.injected ? "interrupt" : "",
1402  events.nmi.injected ? "nmi[i]" : "",
1403  events.nmi.pending ? "nmi[p]" : "");
1404  }
1405 
1406  return !pending_events;
1407 }
1408 
1409 static struct kvm_cpuid_entry2
1410 makeKvmCpuid(uint32_t function, uint32_t index,
1411  CpuidResult &result)
1412 {
1413  struct kvm_cpuid_entry2 e;
1414  e.function = function;
1415  e.index = index;
1416  e.flags = 0;
1417  e.eax = (uint32_t)result.rax;
1418  e.ebx = (uint32_t)result.rbx;
1419  e.ecx = (uint32_t)result.rcx;
1420  e.edx = (uint32_t)result.rdx;
1421 
1422  return e;
1423 }
1424 
1425 void
1427 {
1428  Kvm::CPUIDVector m5_supported;
1429 
1430  /* TODO: We currently don't support any of the functions that
1431  * iterate through data structures in the CPU using an index. It's
1432  * currently not a problem since M5 doesn't expose any of them at
1433  * the moment.
1434  */
1435 
1436  /* Basic features */
1437  CpuidResult func0;
1438  X86ISA::doCpuid(tc, 0x0, 0, func0);
1439  for (uint32_t function = 0; function <= func0.rax; ++function) {
1441  uint32_t idx(0);
1442 
1443  X86ISA::doCpuid(tc, function, idx, cpuid);
1444  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1445  }
1446 
1447  /* Extended features */
1448  CpuidResult efunc0;
1449  X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1450  for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1452  uint32_t idx(0);
1453 
1454  X86ISA::doCpuid(tc, function, idx, cpuid);
1455  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1456  }
1457 
1458  setCPUID(m5_supported);
1459 }
1460 
1461 void
1462 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1463 {
1464  if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1465  panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1466  errno);
1467 }
1468 
1469 void
1470 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1471 {
1472  std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1473  newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
1474 
1475  kvm_cpuid->nent = cpuid.size();
1476  std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1477 
1478  setCPUID(*kvm_cpuid);
1479 }
1480 
1481 void
1482 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1483 {
1484  if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1485  panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1486  errno);
1487 }
1488 
1489 void
1491 {
1492  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1493  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1494 
1495  kvm_msrs->nmsrs = msrs.size();
1496  std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1497 
1498  setMSRs(*kvm_msrs);
1499 }
1500 
1501 void
1502 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1503 {
1504  if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1505  panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1506  errno);
1507 }
1508 
1509 
1510 void
1511 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1512 {
1513  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1514  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1515  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1516 
1517  kvm_msrs->nmsrs = 1;
1518  entry.index = index;
1519  entry.reserved = 0;
1520  entry.data = value;
1521 
1522  setMSRs(*kvm_msrs.get());
1523 }
1524 
1525 uint64_t
1526 X86KvmCPU::getMSR(uint32_t index) const
1527 {
1528  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1529  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1530  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1531 
1532  kvm_msrs->nmsrs = 1;
1533  entry.index = index;
1534  entry.reserved = 0;
1535  entry.data = 0;
1536 
1537  getMSRs(*kvm_msrs.get());
1538  return entry.data;
1539 }
1540 
1541 const Kvm::MSRIndexVector &
1543 {
1544  if (cachedMsrIntersection.empty()) {
1545  const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
1546 
1547  DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1548  for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1549  if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1550  cachedMsrIntersection.push_back(*it);
1551  DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1552  } else {
1553  warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1554  *it);
1555  }
1556  }
1557  }
1558 
1559  return cachedMsrIntersection;
1560 }
1561 
1562 void
1563 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1564 {
1565 #ifdef KVM_GET_DEBUGREGS
1566  if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1567  panic("KVM: Failed to get guest debug registers\n");
1568 #else
1569  panic("KVM: Unsupported getDebugRegisters call.\n");
1570 #endif
1571 }
1572 
1573 void
1574 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1575 {
1576 #ifdef KVM_SET_DEBUGREGS
1577  if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1578  panic("KVM: Failed to set guest debug registers\n");
1579 #else
1580  panic("KVM: Unsupported setDebugRegisters call.\n");
1581 #endif
1582 }
1583 
1584 void
1585 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1586 {
1587  if (ioctl(KVM_GET_XCRS, &regs) == -1)
1588  panic("KVM: Failed to get guest debug registers\n");
1589 }
1590 
1591 void
1592 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1593 {
1594  if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1595  panic("KVM: Failed to set guest debug registers\n");
1596 }
1597 
1598 void
1599 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1600 {
1601  if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1602  panic("KVM: Failed to get guest debug registers\n");
1603 }
1604 
1605 void
1606 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1607 {
1608  if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1609  panic("KVM: Failed to set guest debug registers\n");
1610 }
1611 
1612 
1613 void
1614 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1615 {
1616  if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1617  panic("KVM: Failed to get guest debug registers\n");
1618 }
1619 
1620 void
1621 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1622 {
1623  if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1624  panic("KVM: Failed to set guest debug registers\n");
1625 }
1626 
1627 X86KvmCPU *
1628 X86KvmCPUParams::create()
1629 {
1630  return new X86KvmCPU(this);
1631 }
count
Definition: misc.hh:705
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM&#39;s state transfer methods.
Definition: x86_cpu.cc:1563
#define DPRINTF(x,...)
Definition: trace.hh:229
#define SEG_CS_TYPE_ACCESSED
Definition: x86_cpu.cc:65
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition: x86_cpu.cc:1046
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition: x86_cpu.cc:1284
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition: vm.cc:109
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition: utility.cc:118
The request is to an uncacheable address.
Definition: request.hh:115
void setXCRs(const struct kvm_xcrs &regs)
Definition: x86_cpu.cc:1592
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition: x86_cpu.cc:1482
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition: faults.hh:99
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition: x86_cpu.cc:60
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition: base.hh:430
Bitfield< 30, 0 > index
void suspend() override
Set the status to Suspended.
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition: x86_cpu.cc:1574
static void forceSegAccessed(struct kvm_segment &seg)
Definition: x86_cpu.cc:745
uint32_t fpu_ip
Definition: x86_cpu.cc:82
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition: x86_cpu.cc:1542
Definition: packet.hh:76
Definition: test.h:61
const std::string & name()
Definition: trace.cc:54
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition: x86_cpu.cc:1133
Bitfield< 7 > i
bool haveXSave
Kvm::capXSave() available?
Definition: x86_cpu.hh:244
#define IO_PCI_CONF_ADDR
Definition: x86_cpu.cc:56
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition: base.cc:176
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition: base.hh:629
virtual TheISA::PCState pcState() const =0
void dumpMSRs() const
Definition: x86_cpu.cc:657
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition: base.cc:852
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
std::vector< BaseInterrupts * > interrupts
Definition: base.hh:222
std::shared_ptr< Request > RequestPtr
Definition: request.hh:83
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition: base.cc:724
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition: x86_cpu.cc:737
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:884
Bitfield< 11 > e
Definition: misc.hh:755
#define SEG_SYS_TYPE_TSS_BUSY
Definition: x86_cpu.cc:62
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition: x86_cpu.cc:1372
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread&#39;s state...
Definition: base.hh:151
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition: base.cc:822
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5&#39;s state representation.
Definition: x86_cpu.cc:690
uint32_t fpu_dp
Definition: x86_cpu.cc:85
void getXCRs(struct kvm_xcrs &regs) const
Definition: x86_cpu.cc:1585
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition: x86_cpu.cc:1621
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition: x86_cpu.cc:1614
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:1079
Bitfield< 35, 32 > at
#define SEG_CS_TYPE_READ_ACCESSED
Definition: x86_cpu.cc:67
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition: x86_cpu.cc:1462
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:1028
#define SEG_TYPE_BIT_ACCESSED
Definition: x86_cpu.cc:71
Bitfield< 28, 21 > cpuid
Definition: dt_constants.hh:94
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition: base.cc:866
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition: utility.cc:225
Base class for KVM based CPU models.
Definition: base.hh:79
void dumpSpecRegs() const
Definition: x86_cpu.cc:603
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition: vm.cc:157
void getMSRs(struct kvm_msrs &msrs) const
Definition: x86_cpu.cc:1502
virtual void setFloatReg(RegIndex reg_idx, RegVal val)=0
uint16_t last_opcode
Definition: x86_cpu.cc:79
ThreadContext is the external interface to all thread state for anything outside of the CPU...
STL vector class.
Definition: stl.hh:40
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1040
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition: x86_cpu.cc:383
static bool isCanonicalAddress(uint64_t addr)
Definition: x86_cpu.cc:372
static void dumpKvm(const struct kvm_regs &regs)
Definition: x86_cpu.cc:172
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition: vm.cc:115
uint64_t rax
Definition: cpuid.hh:42
void updateKvmState() override
Update the KVM state from the current thread context.
Definition: x86_cpu.cc:677
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition: base.cc:859
uint16_t fpu_cs
Definition: x86_cpu.cc:83
static double bitsToFloat64(uint64_t val)
Definition: types.hh:210
#define inform(...)
Definition: logging.hh:213
#define FOREACH_SEGMENT()
Definition: x86_cpu.cc:147
#define hack(...)
Definition: logging.hh:214
Temporarily migrate execution to a different event queue.
Definition: eventq.hh:552
#define DTRACE(x)
Definition: trace.hh:227
virtual RegVal readFloatReg(RegIndex reg_idx) const =0
uint32_t mxcsr_mask
Definition: x86_cpu.cc:96
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:535
#define M5_FALLTHROUGH
Definition: compiler.hh:86
KvmVM & vm
Definition: base.hh:153
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:528
void dump() const override
Dump the internal state to the terminal.
Definition: x86_cpu.cc:572
static FloatRegIndex FLOATREG_FPR(int index)
Definition: float.hh:125
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition: x86_cpu.cc:1381
static FloatRegIndex FLOATREG_XMM_HIGH(int index)
Definition: float.hh:137
MasterID dataMasterId() const
Reads this CPU&#39;s unique data requestor ID.
Definition: base.hh:189
uint64_t fpu_dp
Definition: x86_cpu.cc:92
uint64_t Tick
Tick count type.
Definition: types.hh:63
double ms
millisecond
Definition: core.cc:53
void dumpIntRegs() const
Definition: x86_cpu.cc:595
void updateCPUID()
Transfer gem5&#39;s CPUID values into the virtual CPU.
Definition: x86_cpu.cc:1426
static STRUCT * newVarStruct(size_t entries)
Definition: x86_cpu.cc:166
#define FOREACH_DREG()
Definition: x86_cpu.cc:137
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition: base.cc:838
uint16_t pad1
Definition: x86_cpu.cc:84
uint16_t fpu_ds
Definition: x86_cpu.cc:86
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition: utility.cc:175
uint16_t fcw
Definition: x86_cpu.cc:75
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition: utility.cc:134
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition: x86_cpu.cc:909
void dumpXSave() const
Definition: x86_cpu.cc:637
uint16_t pad2
Definition: x86_cpu.cc:87
virtual Addr instAddr() const =0
void updateThreadContextMSRs()
Update MSR registers.
Definition: x86_cpu.cc:1103
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:507
Bitfield< 2, 0 > seg
Definition: types.hh:84
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5&#39;s thread context from KVM&#39;s state representation.
Definition: x86_cpu.cc:974
uint32_t mxcsr
Definition: x86_cpu.cc:95
void startup() override
startup() is the final initialization call before simulation.
Definition: x86_cpu.cc:558
static uint64_t floatToBits64(double val)
Definition: types.hh:183
uint8_t pad0
Definition: x86_cpu.cc:78
void setMSR(uint32_t index, uint64_t value)
Definition: x86_cpu.cc:1511
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition: x86_cpu.cc:826
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:757
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition: x86_cpu.cc:1309
#define warn_once(...)
Definition: logging.hh:216
Status _status
CPU run state.
Definition: base.hh:232
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition: cpuid.cc:89
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition: x86_cpu.hh:249
static void dumpFpuCommon(const T &fpu)
Definition: x86_cpu.cc:262
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition: base.cc:830
uint16_t fsw
Definition: x86_cpu.cc:76
uint64_t fpu_ip
Definition: x86_cpu.cc:91
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition: base.hh:310
static Addr x86IOAddress(const uint32_t port)
Definition: x86_traits.hh:81
void setFPUState(const struct kvm_fpu &state)
Definition: base.cc:873
Bitfield< 24 > j
struct FXSave M5_ATTR_PACKED
void dumpVCpuEvents() const
Definition: x86_cpu.cc:649
void dumpFpuRegs() const
Definition: x86_cpu.cc:587
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
Bitfield< 9 > e
static FloatRegIndex FLOATREG_XMM_LOW(int index)
Definition: float.hh:131
EventQueue * eventQueue() const
Definition: eventq.hh:738
void setXSave(const struct kvm_xsave &xsave)
Definition: x86_cpu.cc:1606
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
Definition: sim_object.cc:174
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition: x86_cpu.cc:1410
#define FOREACH_SREG()
Definition: x86_cpu.cc:126
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:514
struct FXSave::@31::@34 ctrl64
KVMCpuPort dataPort
Port for data requests.
Definition: base.hh:614
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition: vm.cc:103
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition: x86_cpu.cc:1278
This is exposed globally, independent of the ISA.
Definition: acpi.hh:57
#define FOREACH_DTABLE()
Definition: x86_cpu.cc:159
void dumpDebugRegs() const
Definition: x86_cpu.cc:611
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
void setRegisters(const struct kvm_regs &regs)
Definition: base.cc:845
void getXSave(struct kvm_xsave &xsave) const
Definition: x86_cpu.cc:1599
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition: utility.cc:150
static Addr x86PciConfigAddress(const uint32_t addr)
Definition: x86_traits.hh:87
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition: x86_cpu.cc:1234
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:712
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition: vm.cc:177
virtual ContextID contextId() const =0
void dumpXCRs() const
Definition: x86_cpu.cc:625
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition: base.cc:1174
#define IO_PCI_CONF_DATA_BASE
Definition: x86_cpu.cc:57
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition: vm.cc:187
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition: x86_cpu.cc:940
Tick kvmRunWrapper(Tick ticks)
Wrapper that synchronizes state in kvm_run.
Definition: x86_cpu.cc:1259
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:992
uint8_t ftwx
Definition: x86_cpu.cc:77
Kvm * kvm
Global KVM interface.
Definition: vm.hh:411
Bitfield< 16, 15 > xs
Definition: registers.hh:619
#define MSR_TSC
Definition: x86_cpu.cc:54
Context not scheduled in KVM.
Definition: base.hh:191
void startup() override
startup() is the final initialization call before simulation.
Definition: base.cc:120
#define warn(...)
Definition: logging.hh:212
uint64_t getMSR(uint32_t index) const
Definition: x86_cpu.cc:1526
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition: vm.cc:167
void updateKvmStateMSRs()
Update MSR registers.
Definition: x86_cpu.cc:918
virtual ~X86KvmCPU()
Definition: x86_cpu.cc:553
SimpleThread * thread
A cached copy of a thread&#39;s state in the form of a SimpleThread object.
Definition: base.hh:146
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition: utility.cc:216
KVM parent interface.
Definition: vm.hh:74
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition: x86_cpu.cc:1186
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:860
#define FOREACH_IREG()
Definition: x86_cpu.cc:106
virtual RegVal readMiscReg(RegIndex misc_reg)=0
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:1090
bool haveXCRs
Kvm::capXCRs() available?
Definition: x86_cpu.hh:251
X86KvmCPU(X86KvmCPUParams *params)
Definition: x86_cpu.cc:525
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
Bitfield< 3 > addr
Definition: types.hh:81
static void dumpFpuSpec(const struct FXSave &xs)
Definition: x86_cpu.cc:246
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition: x86_cpu.hh:242
x86 implementation of a KVM-based hardware virtualized CPU.
Definition: x86_cpu.hh:41
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition: x86_cpu.hh:238
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context...
Definition: base.cc:962
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:104
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun()...
Definition: base.hh:300

Generated on Fri Feb 28 2020 16:26:59 for gem5 by doxygen 1.8.13