gem5  v20.1.0.0
x86_cpu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Andreas Sandberg
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "cpu/kvm/x86_cpu.hh"
30 
31 #include <linux/kvm.h>
32 
33 #include <algorithm>
34 #include <cerrno>
35 #include <memory>
36 
37 #include "arch/registers.hh"
38 #include "arch/x86/cpuid.hh"
39 #include "arch/x86/faults.hh"
40 #include "arch/x86/interrupts.hh"
41 #include "arch/x86/regs/msr.hh"
42 #include "arch/x86/utility.hh"
43 #include "cpu/kvm/base.hh"
44 #include "debug/Drain.hh"
45 #include "debug/Kvm.hh"
46 #include "debug/KvmContext.hh"
47 #include "debug/KvmIO.hh"
48 #include "debug/KvmInt.hh"
49 
50 using namespace X86ISA;
51 
52 #define MSR_TSC 0x10
53 
54 #define IO_PCI_CONF_ADDR 0xCF8
55 #define IO_PCI_CONF_DATA_BASE 0xCFC
56 
57 // Task segment type of an inactive 32-bit or 64-bit task
58 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
59 // Task segment type of an active 32-bit or 64-bit task
60 #define SEG_SYS_TYPE_TSS_BUSY 11
61 
62 // Non-conforming accessed code segment
63 #define SEG_CS_TYPE_ACCESSED 9
64 // Non-conforming accessed code segment that can be read
65 #define SEG_CS_TYPE_READ_ACCESSED 11
66 
67 // The lowest bit of the type field for normal segments (code and
68 // data) is used to indicate that a segment has been accessed.
69 #define SEG_TYPE_BIT_ACCESSED 1
70 
71 struct FXSave
72 {
73  uint16_t fcw;
74  uint16_t fsw;
75  uint8_t ftwx;
76  uint8_t pad0;
77  uint16_t last_opcode;
78  union {
79  struct {
80  uint32_t fpu_ip;
81  uint16_t fpu_cs;
82  uint16_t pad1;
83  uint32_t fpu_dp;
84  uint16_t fpu_ds;
85  uint16_t pad2;
86  } ctrl32;
87 
88  struct {
89  uint64_t fpu_ip;
90  uint64_t fpu_dp;
91  } ctrl64;
92  };
93  uint32_t mxcsr;
94  uint32_t mxcsr_mask;
95 
96  uint8_t fpr[8][16];
97  uint8_t xmm[16][16];
98 
99  uint64_t reserved[12];
101 
102 static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
103 
104 #define FOREACH_IREG() \
105  do { \
106  APPLY_IREG(rax, INTREG_RAX); \
107  APPLY_IREG(rbx, INTREG_RBX); \
108  APPLY_IREG(rcx, INTREG_RCX); \
109  APPLY_IREG(rdx, INTREG_RDX); \
110  APPLY_IREG(rsi, INTREG_RSI); \
111  APPLY_IREG(rdi, INTREG_RDI); \
112  APPLY_IREG(rsp, INTREG_RSP); \
113  APPLY_IREG(rbp, INTREG_RBP); \
114  APPLY_IREG(r8, INTREG_R8); \
115  APPLY_IREG(r9, INTREG_R9); \
116  APPLY_IREG(r10, INTREG_R10); \
117  APPLY_IREG(r11, INTREG_R11); \
118  APPLY_IREG(r12, INTREG_R12); \
119  APPLY_IREG(r13, INTREG_R13); \
120  APPLY_IREG(r14, INTREG_R14); \
121  APPLY_IREG(r15, INTREG_R15); \
122  } while (0)
123 
124 #define FOREACH_SREG() \
125  do { \
126  APPLY_SREG(cr0, MISCREG_CR0); \
127  APPLY_SREG(cr2, MISCREG_CR2); \
128  APPLY_SREG(cr3, MISCREG_CR3); \
129  APPLY_SREG(cr4, MISCREG_CR4); \
130  APPLY_SREG(cr8, MISCREG_CR8); \
131  APPLY_SREG(efer, MISCREG_EFER); \
132  APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
133  } while (0)
134 
135 #define FOREACH_DREG() \
136  do { \
137  APPLY_DREG(db[0], MISCREG_DR0); \
138  APPLY_DREG(db[1], MISCREG_DR1); \
139  APPLY_DREG(db[2], MISCREG_DR2); \
140  APPLY_DREG(db[3], MISCREG_DR3); \
141  APPLY_DREG(dr6, MISCREG_DR6); \
142  APPLY_DREG(dr7, MISCREG_DR7); \
143  } while (0)
144 
145 #define FOREACH_SEGMENT() \
146  do { \
147  APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
148  APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
149  APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
150  APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
151  APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
152  APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
153  APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
154  APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
155  } while (0)
156 
157 #define FOREACH_DTABLE() \
158  do { \
159  APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
160  APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
161  } while (0)
162 
163 template<typename STRUCT, typename ENTRY>
164 static STRUCT *newVarStruct(size_t entries)
165 {
166  return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
167 }
168 
169 static void
170 dumpKvm(const struct kvm_regs &regs)
171 {
172  inform("KVM register state:\n");
173 
174 #define APPLY_IREG(kreg, mreg) \
175  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
176 
177  FOREACH_IREG();
178 
179 #undef APPLY_IREG
180 
181  inform("\trip: 0x%llx\n", regs.rip);
182  inform("\trflags: 0x%llx\n", regs.rflags);
183 }
184 
185 static void
186 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
187 {
188  inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
189  "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
190  reg_name,
191  seg.base, seg.limit, seg.selector, seg.type,
192  seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
193 }
194 
195 static void
196 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
197 {
198  inform("\t%s: @0x%llx+%x\n",
199  reg_name, dtable.base, dtable.limit);
200 }
201 
202 static void
203 dumpKvm(const struct kvm_sregs &sregs)
204 {
205 #define APPLY_SREG(kreg, mreg) \
206  inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
207 #define APPLY_SEGMENT(kreg, idx) \
208  dumpKvm(# kreg, sregs.kreg);
209 #define APPLY_DTABLE(kreg, idx) \
210  dumpKvm(# kreg, sregs.kreg);
211 
212  inform("Special registers:\n");
213  FOREACH_SEGMENT();
214  FOREACH_SREG();
215  FOREACH_DTABLE();
216 
217  inform("Interrupt Bitmap:");
218  for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
219  inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
220 
221 #undef APPLY_SREG
222 #undef APPLY_SEGMENT
223 #undef APPLY_DTABLE
224 }
225 
226 #ifdef KVM_GET_DEBUGREGS
227 static void
228 dumpKvm(const struct kvm_debugregs &regs)
229 {
230  inform("KVM debug state:\n");
231 
232 #define APPLY_DREG(kreg, mreg) \
233  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
234 
235  FOREACH_DREG();
236 
237 #undef APPLY_DREG
238 
239  inform("\tflags: 0x%llx\n", regs.flags);
240 }
241 #endif
242 
243 static void
244 dumpFpuSpec(const struct FXSave &xs)
245 {
246  inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
247  inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
248  inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
249 }
250 
251 static void
252 dumpFpuSpec(const struct kvm_fpu &fpu)
253 {
254  inform("\tlast_ip: 0x%x\n", fpu.last_ip);
255  inform("\tlast_dp: 0x%x\n", fpu.last_dp);
256 }
257 
258 template<typename T>
259 static void
260 dumpFpuCommon(const T &fpu)
261 {
262  const unsigned top((fpu.fsw >> 11) & 0x7);
263  inform("\tfcw: 0x%x\n", fpu.fcw);
264 
265  inform("\tfsw: 0x%x (top: %i, "
266  "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
267  fpu.fsw, top,
268 
269  (fpu.fsw & CC0Bit) ? "C0" : "",
270  (fpu.fsw & CC1Bit) ? "C1" : "",
271  (fpu.fsw & CC2Bit) ? "C2" : "",
272  (fpu.fsw & CC3Bit) ? "C3" : "",
273 
274  (fpu.fsw & IEBit) ? "I" : "",
275  (fpu.fsw & DEBit) ? "D" : "",
276  (fpu.fsw & ZEBit) ? "Z" : "",
277  (fpu.fsw & OEBit) ? "O" : "",
278  (fpu.fsw & UEBit) ? "U" : "",
279  (fpu.fsw & PEBit) ? "P" : "",
280 
281  (fpu.fsw & StackFaultBit) ? "SF " : "",
282  (fpu.fsw & ErrSummaryBit) ? "ES " : "",
283  (fpu.fsw & BusyBit) ? "BUSY " : ""
284  );
285  inform("\tftwx: 0x%x\n", fpu.ftwx);
286  inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
287  dumpFpuSpec(fpu);
288  inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
289  inform("\tFP Stack:\n");
290  for (int i = 0; i < 8; ++i) {
291  const unsigned reg_idx((i + top) & 0x7);
292  const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
293  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
294  char hex[33];
295  for (int j = 0; j < 10; ++j)
296  snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
297  inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
298  hex, value, empty ? " (e)" : "");
299  }
300  inform("\tXMM registers:\n");
301  for (int i = 0; i < 16; ++i) {
302  char hex[33];
303  for (int j = 0; j < 16; ++j)
304  snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
305  inform("\t\t%i: 0x%s\n", i, hex);
306  }
307 }
308 
309 static void
310 dumpKvm(const struct kvm_fpu &fpu)
311 {
312  inform("FPU registers:\n");
313  dumpFpuCommon(fpu);
314 }
315 
316 static void
317 dumpKvm(const struct kvm_xsave &xsave)
318 {
319  inform("FPU registers (XSave):\n");
320  dumpFpuCommon(*(FXSave *)xsave.region);
321 }
322 
323 static void
324 dumpKvm(const struct kvm_msrs &msrs)
325 {
326  inform("MSRs:\n");
327 
328  for (int i = 0; i < msrs.nmsrs; ++i) {
329  const struct kvm_msr_entry &e(msrs.entries[i]);
330 
331  inform("\t0x%x: 0x%x\n", e.index, e.data);
332  }
333 }
334 
335 static void
336 dumpKvm(const struct kvm_xcrs &regs)
337 {
338  inform("KVM XCR registers:\n");
339 
340  inform("\tFlags: 0x%x\n", regs.flags);
341  for (int i = 0; i < regs.nr_xcrs; ++i) {
342  inform("\tXCR[0x%x]: 0x%x\n",
343  regs.xcrs[i].xcr,
344  regs.xcrs[i].value);
345  }
346 }
347 
348 static void
349 dumpKvm(const struct kvm_vcpu_events &events)
350 {
351  inform("vCPU events:\n");
352 
353  inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
354  events.exception.injected, events.exception.nr,
355  events.exception.has_error_code, events.exception.error_code);
356 
357  inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
358  events.interrupt.injected, events.interrupt.nr,
359  events.interrupt.soft);
360 
361  inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
362  events.nmi.injected, events.nmi.pending,
363  events.nmi.masked);
364 
365  inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
366  inform("\tFlags: 0x%x\n", events.flags);
367 }
368 
369 static bool
371 {
372  // x86-64 doesn't currently use the full 64-bit virtual address
373  // space, instead it uses signed 48 bit addresses that are
374  // sign-extended to 64 bits. Such addresses are known as
375  // "canonical".
376  uint64_t upper_half(addr & 0xffff800000000000ULL);
377  return upper_half == 0 || upper_half == 0xffff800000000000;
378 }
379 
380 static void
381 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
382  struct kvm_sregs sregs)
383 {
384  // Check the register base
385  switch (idx) {
386  case MISCREG_TSL:
387  case MISCREG_TR:
388  case MISCREG_FS:
389  case MISCREG_GS:
390  if (!isCanonicalAddress(seg.base))
391  warn("Illegal %s base: 0x%x\n", name, seg.base);
392  break;
393 
394  case MISCREG_SS:
395  case MISCREG_DS:
396  case MISCREG_ES:
397  if (seg.unusable)
398  break;
400  case MISCREG_CS:
401  if (seg.base & 0xffffffff00000000ULL)
402  warn("Illegal %s base: 0x%x\n", name, seg.base);
403  break;
404  }
405 
406  // Check the type
407  switch (idx) {
408  case MISCREG_CS:
409  switch (seg.type) {
410  case 3:
411  if (seg.dpl != 0)
412  warn("CS type is 3 but dpl != 0.\n");
413  break;
414  case 9:
415  case 11:
416  if (seg.dpl != sregs.ss.dpl)
417  warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
418  break;
419  case 13:
420  case 15:
421  if (seg.dpl > sregs.ss.dpl)
422  warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
423  break;
424  default:
425  warn("Illegal CS type: %i\n", seg.type);
426  break;
427  }
428  break;
429 
430  case MISCREG_SS:
431  if (seg.unusable)
432  break;
433  switch (seg.type) {
434  case 3:
435  if (sregs.cs.type == 3 && seg.dpl != 0)
436  warn("CS type is 3, but SS DPL is != 0.\n");
438  case 7:
439  if (!(sregs.cr0 & 1) && seg.dpl != 0)
440  warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
441  break;
442  default:
443  warn("Illegal SS type: %i\n", seg.type);
444  break;
445  }
446  break;
447 
448  case MISCREG_DS:
449  case MISCREG_ES:
450  case MISCREG_FS:
451  case MISCREG_GS:
452  if (seg.unusable)
453  break;
454  if (!(seg.type & 0x1) ||
455  ((seg.type & 0x8) && !(seg.type & 0x2)))
456  warn("%s has an illegal type field: %i\n", name, seg.type);
457  break;
458 
459  case MISCREG_TR:
460  // TODO: We should check the CPU mode
461  if (seg.type != 3 && seg.type != 11)
462  warn("%s: Illegal segment type (%i)\n", name, seg.type);
463  break;
464 
465  case MISCREG_TSL:
466  if (seg.unusable)
467  break;
468  if (seg.type != 2)
469  warn("%s: Illegal segment type (%i)\n", name, seg.type);
470  break;
471  }
472 
473  switch (idx) {
474  case MISCREG_SS:
475  case MISCREG_DS:
476  case MISCREG_ES:
477  case MISCREG_FS:
478  case MISCREG_GS:
479  if (seg.unusable)
480  break;
482  case MISCREG_CS:
483  if (!seg.s)
484  warn("%s: S flag not set\n", name);
485  break;
486 
487  case MISCREG_TSL:
488  if (seg.unusable)
489  break;
491  case MISCREG_TR:
492  if (seg.s)
493  warn("%s: S flag is set\n", name);
494  break;
495  }
496 
497  switch (idx) {
498  case MISCREG_SS:
499  case MISCREG_DS:
500  case MISCREG_ES:
501  case MISCREG_FS:
502  case MISCREG_GS:
503  case MISCREG_TSL:
504  if (seg.unusable)
505  break;
507  case MISCREG_TR:
508  case MISCREG_CS:
509  if (!seg.present)
510  warn("%s: P flag not set\n", name);
511 
512  if (((seg.limit & 0xFFF) == 0 && seg.g) ||
513  ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
514  warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
515  name, seg.limit, seg.g);
516  }
517  break;
518  }
519 
520  // TODO: Check CS DB
521 }
522 
523 X86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
524  : BaseKvmCPU(params),
525  useXSave(params->useXSave)
526 {
527  Kvm &kvm(*vm.kvm);
528 
529  if (!kvm.capSetTSSAddress())
530  panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
531  if (!kvm.capExtendedCPUID())
532  panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
533  if (!kvm.capUserNMI())
534  warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
535  if (!kvm.capVCPUEvents())
536  warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
537 
538  haveDebugRegs = kvm.capDebugRegs();
539  haveXSave = kvm.capXSave();
540  haveXCRs = kvm.capXCRs();
541 
542  if (useXSave && !haveXSave) {
543  warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
544  "unreliable due to kernel bugs.\n");
545  useXSave = false;
546  } else if (!useXSave) {
547  warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
548  }
549 }
550 
552 {
553 }
554 
555 void
557 {
559 
560  updateCPUID();
561 
562  // TODO: Do we need to create an identity mapped TSS area? We
563  // should call kvm.vm.setTSSAddress() here in that case. It should
564  // only be needed for old versions of the virtualization
565  // extensions. We should make sure that the identity range is
566  // reserved in the e820 memory map in that case.
567 }
568 
569 void
571 {
572  dumpIntRegs();
573  if (useXSave)
574  dumpXSave();
575  else
576  dumpFpuRegs();
577  dumpSpecRegs();
578  dumpDebugRegs();
579  dumpXCRs();
580  dumpVCpuEvents();
581  dumpMSRs();
582 }
583 
584 void
586 {
587  struct kvm_fpu fpu;
588  getFPUState(fpu);
589  dumpKvm(fpu);
590 }
591 
592 void
594 {
595  struct kvm_regs regs;
596  getRegisters(regs);
597  dumpKvm(regs);
598 }
599 
600 void
602 {
603  struct kvm_sregs sregs;
604  getSpecialRegisters(sregs);
605  dumpKvm(sregs);
606 }
607 
608 void
610 {
611  if (haveDebugRegs) {
612 #ifdef KVM_GET_DEBUGREGS
613  struct kvm_debugregs dregs;
614  getDebugRegisters(dregs);
615  dumpKvm(dregs);
616 #endif
617  } else {
618  inform("Debug registers not supported by kernel.\n");
619  }
620 }
621 
622 void
624 {
625  if (haveXCRs) {
626  struct kvm_xcrs xcrs;
627  getXCRs(xcrs);
628  dumpKvm(xcrs);
629  } else {
630  inform("XCRs not supported by kernel.\n");
631  }
632 }
633 
634 void
636 {
637  if (haveXSave) {
638  struct kvm_xsave xsave;
639  getXSave(xsave);
640  dumpKvm(xsave);
641  } else {
642  inform("XSave not supported by kernel.\n");
643  }
644 }
645 
646 void
648 {
649  struct kvm_vcpu_events events;
650  getVCpuEvents(events);
651  dumpKvm(events);
652 }
653 
654 void
656 {
657  const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
658  std::unique_ptr<struct kvm_msrs> msrs(
659  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
660  supported_msrs.size()));
661 
662  msrs->nmsrs = supported_msrs.size();
663  for (int i = 0; i < supported_msrs.size(); ++i) {
664  struct kvm_msr_entry &e(msrs->entries[i]);
665  e.index = supported_msrs[i];
666  e.reserved = 0;
667  e.data = 0;
668  }
669  getMSRs(*msrs.get());
670 
671  dumpKvm(*msrs.get());
672 }
673 
674 void
676 {
681 
682  DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
683  if (DTRACE(KvmContext))
684  dump();
685 }
686 
687 void
689 {
690  struct kvm_regs regs;
691 
692 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
693  FOREACH_IREG();
694 #undef APPLY_IREG
695 
696  regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
697 
698  /* You might think that setting regs.rflags to the contents
699  * MISCREG_RFLAGS here would suffice. In that case you're
700  * mistaken. We need to reconstruct it from a bunch of ucode
701  * registers and wave a dead chicken over it (aka mask out and set
702  * reserved bits) to get it to work.
703  */
704  regs.rflags = X86ISA::getRFlags(tc);
705 
706  setRegisters(regs);
707 }
708 
709 static inline void
710 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
711  const int index)
712 {
714 
715  kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
716  kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
717  kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
718  kvm_seg.type = attr.type;
719  kvm_seg.present = attr.present;
720  kvm_seg.dpl = attr.dpl;
721  kvm_seg.db = attr.defaultSize;
722  kvm_seg.s = attr.system;
723  kvm_seg.l = attr.longMode;
724  kvm_seg.g = attr.granularity;
725  kvm_seg.avl = attr.avl;
726 
727  // A segment is normally unusable when the selector is zero. There
728  // is a attr.unusable flag in gem5, but it seems unused. qemu
729  // seems to set this to 0 all the time, so we just do the same and
730  // hope for the best.
731  kvm_seg.unusable = 0;
732 }
733 
734 static inline void
735 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
736  const int index)
737 {
738  kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
739  kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
740 }
741 
742 static void
743 forceSegAccessed(struct kvm_segment &seg)
744 {
745  // Intel's VMX requires that (some) usable segments are flagged as
746  // 'accessed' (i.e., the lowest bit in the segment type is set)
747  // when entering VMX. This wouldn't necessary be the case even if
748  // gem5 did set the access bits correctly, so we force it to one
749  // in that case.
750  if (!seg.unusable)
751  seg.type |= SEG_TYPE_BIT_ACCESSED;
752 }
753 
754 void
756 {
757  struct kvm_sregs sregs;
758 
759 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
760 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
761 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
762 
763  FOREACH_SREG();
764  FOREACH_SEGMENT();
765  FOREACH_DTABLE();
766 
767 #undef APPLY_SREG
768 #undef APPLY_SEGMENT
769 #undef APPLY_DTABLE
770 
771  // Clear the interrupt bitmap
772  memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
773 
774  // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
775  // bit in the type field set.
776  forceSegAccessed(sregs.cs);
777  forceSegAccessed(sregs.ss);
778  forceSegAccessed(sregs.ds);
779  forceSegAccessed(sregs.es);
780  forceSegAccessed(sregs.fs);
781  forceSegAccessed(sregs.gs);
782 
783  // There are currently some cases where the active task isn't
784  // marked as busy. This is illegal in VMX, so we force it to busy.
785  if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
786  hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
787  sregs.tr.type);
788  sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
789  }
790 
791  // VMX requires the DPL of SS and CS to be the same for
792  // non-conforming code segments. It seems like m5 doesn't set the
793  // DPL of SS correctly when taking interrupts, so we need to fix
794  // that here.
795  if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
796  sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
797  sregs.cs.dpl != sregs.ss.dpl) {
798 
799  hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
800  sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
801  sregs.ss.dpl = sregs.cs.dpl;
802  }
803 
804  // Do checks after fixing up the state to avoid getting excessive
805  // amounts of warnings.
806  RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
807  if (!rflags_nocc.vm) {
808  // Do segment verification if the CPU isn't entering virtual
809  // 8086 mode. We currently assume that unrestricted guest
810  // mode is available.
811 
812 #define APPLY_SEGMENT(kreg, idx) \
813  checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
814 
815  FOREACH_SEGMENT();
816 #undef APPLY_SEGMENT
817  }
818 
819  setSpecialRegisters(sregs);
820 }
821 
822 template <typename T>
823 static void
825 {
826  fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
827  fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
828  // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
829  // with effects.
830  fpu.fsw = tc->readMiscReg(MISCREG_FSW);
831 
832  uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
833  fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
834 
835  fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
836 
837  const unsigned top((fpu.fsw >> 11) & 0x7);
838  for (int i = 0; i < 8; ++i) {
839  const unsigned reg_idx((i + top) & 0x7);
840  const double value(bitsToFloat64(
841  tc->readFloatReg(FLOATREG_FPR(reg_idx))));
842  DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
843  reg_idx, i, value);
844  X86ISA::storeFloat80(fpu.fpr[i], value);
845  }
846 
847  // TODO: We should update the MMX state
848 
849  for (int i = 0; i < 16; ++i) {
850  *(uint64_t *)&fpu.xmm[i][0] =
852  *(uint64_t *)&fpu.xmm[i][8] =
854  }
855 }
856 
857 void
859 {
860  struct kvm_fpu fpu;
861 
862  // There is some padding in the FP registers, so we'd better zero
863  // the whole struct.
864  memset(&fpu, 0, sizeof(fpu));
865 
867 
869  warn_once("MISCREG_FISEG is non-zero.\n");
870 
871  fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
872 
874  warn_once("MISCREG_FOSEG is non-zero.\n");
875 
876  fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
877 
878  setFPUState(fpu);
879 }
880 
881 void
883 {
884  struct kvm_xsave kxsave;
885  FXSave &xsave(*(FXSave *)kxsave.region);
886 
887  // There is some padding and reserved fields in the structure, so
888  // we'd better zero the whole thing.
889  memset(&kxsave, 0, sizeof(kxsave));
890 
891  updateKvmStateFPUCommon(tc, xsave);
892 
894  warn_once("MISCREG_FISEG is non-zero.\n");
895 
896  xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
897 
899  warn_once("MISCREG_FOSEG is non-zero.\n");
900 
901  xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
902 
903  setXSave(kxsave);
904 }
905 
906 void
908 {
909  if (useXSave)
911  else
913 }
914 
915 void
917 {
918  KvmMSRVector msrs;
919 
920  const Kvm::MSRIndexVector &indices(getMsrIntersection());
921 
922  for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
923  struct kvm_msr_entry e;
924 
925  e.index = *it;
926  e.reserved = 0;
927  e.data = tc->readMiscReg(msrMap.at(*it));
928  DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
929  e.index, e.data);
930 
931  msrs.push_back(e);
932  }
933 
934  setMSRs(msrs);
935 }
936 
937 void
939 {
940  struct kvm_regs regs;
941  struct kvm_sregs sregs;
942 
943  getRegisters(regs);
944  getSpecialRegisters(sregs);
945 
946  DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
947  if (DTRACE(KvmContext))
948  dump();
949 
950  updateThreadContextRegs(regs, sregs);
952  if (useXSave) {
953  struct kvm_xsave xsave;
954  getXSave(xsave);
955 
957  } else {
958  struct kvm_fpu fpu;
959  getFPUState(fpu);
960 
962  }
964 
965  // The M5 misc reg caches some values from other
966  // registers. Writing to it with side effects causes it to be
967  // updated from its source registers.
969 }
970 
971 void
972 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
973  const struct kvm_sregs &sregs)
974 {
975 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
976 
977  FOREACH_IREG();
978 
979 #undef APPLY_IREG
980 
981  tc->pcState(PCState(regs.rip + sregs.cs.base));
982 
983  // Flags are spread out across multiple semi-magic registers so we
984  // need some special care when updating them.
985  X86ISA::setRFlags(tc, regs.rflags);
986 }
987 
988 
989 inline void
990 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
991  const int index)
992 {
993  SegAttr attr(0);
994 
995  attr.type = kvm_seg.type;
996  attr.present = kvm_seg.present;
997  attr.dpl = kvm_seg.dpl;
998  attr.defaultSize = kvm_seg.db;
999  attr.system = kvm_seg.s;
1000  attr.longMode = kvm_seg.l;
1001  attr.granularity = kvm_seg.g;
1002  attr.avl = kvm_seg.avl;
1003  attr.unusable = kvm_seg.unusable;
1004 
1005  // We need some setMiscReg magic here to keep the effective base
1006  // addresses in sync. We need an up-to-date version of EFER, so
1007  // make sure this is called after the sregs have been synced.
1008  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
1009  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
1010  tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
1012 }
1013 
1014 inline void
1015 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1016  const int index)
1017 {
1018  // We need some setMiscReg magic here to keep the effective base
1019  // addresses in sync. We need an up-to-date version of EFER, so
1020  // make sure this is called after the sregs have been synced.
1021  tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
1022  tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
1023 }
1024 
1025 void
1026 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1027 {
1028  assert(getKvmRunState()->apic_base == sregs.apic_base);
1029  assert(getKvmRunState()->cr8 == sregs.cr8);
1030 
1031 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1032 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1033 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1034  FOREACH_SREG();
1035  FOREACH_SEGMENT();
1036  FOREACH_DTABLE();
1037 #undef APPLY_SREG
1038 #undef APPLY_SEGMENT
1039 #undef APPLY_DTABLE
1040 }
1041 
1042 template<typename T>
1043 static void
1045 {
1046  const unsigned top((fpu.fsw >> 11) & 0x7);
1047 
1048  for (int i = 0; i < 8; ++i) {
1049  const unsigned reg_idx((i + top) & 0x7);
1050  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1051  DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1052  reg_idx, i, value);
1053  tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value));
1054  }
1055 
1056  // TODO: We should update the MMX state
1057 
1059  tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
1060  tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
1061  tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
1062 
1063  uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1064  // TODO: Are these registers really the same?
1065  tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
1066  tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
1067 
1068  tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
1069 
1070  for (int i = 0; i < 16; ++i) {
1071  tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]);
1072  tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]);
1073  }
1074 }
1075 
1076 void
1077 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1078 {
1080 
1082  tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
1084  tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
1085 }
1086 
1087 void
1088 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1089 {
1090  const FXSave &xsave(*(const FXSave *)kxsave.region);
1091 
1093 
1095  tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
1097  tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
1098 }
1099 
1100 void
1102 {
1103  const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1104 
1105  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1106  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1107  struct kvm_msr_entry *entry;
1108 
1109  // Create a list of MSRs to read
1110  kvm_msrs->nmsrs = msrs.size();
1111  entry = &kvm_msrs->entries[0];
1112  for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1113  entry->index = *it;
1114  entry->reserved = 0;
1115  entry->data = 0;
1116  }
1117 
1118  getMSRs(*kvm_msrs.get());
1119 
1120  // Update M5's state
1121  entry = &kvm_msrs->entries[0];
1122  for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1123  DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1124  entry->index, entry->data);
1125 
1126  tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1127  }
1128 }
1129 
1130 void
1132 {
1133  Fault fault;
1134 
1136 
1137  {
1138  // Migrate to the interrupt controller's thread to get the
1139  // interrupt. Even though the individual methods are safe to
1140  // call across threads, we might still lose interrupts unless
1141  // they are getInterrupt() and updateIntrInfo() are called
1142  // atomically.
1143  EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
1144  fault = interrupts[0]->getInterrupt();
1145  interrupts[0]->updateIntrInfo();
1146  }
1147 
1148  X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1149  if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1150  DPRINTF(KvmInt, "Delivering NMI\n");
1152  } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1153  DPRINTF(KvmInt, "INIT interrupt\n");
1154  fault.get()->invoke(tc);
1155  // Delay the kvm state update since we won't enter KVM on this
1156  // tick.
1157  threadContextDirty = true;
1158  // HACK: gem5 doesn't actually have any BIOS code, which means
1159  // that we need to halt the thread and wait for a startup
1160  // interrupt before restarting the thread. The simulated CPUs
1161  // use the same kind of hack using a microcode routine.
1162  thread->suspend();
1163  } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1164  DPRINTF(KvmInt, "STARTUP interrupt\n");
1165  fault.get()->invoke(tc);
1166  // The kvm state is assumed to have been updated when entering
1167  // kvmRun(), so we need to update manually it here.
1168  updateKvmState();
1169  } else if (x86int) {
1170  struct kvm_interrupt kvm_int;
1171  kvm_int.irq = x86int->getVector();
1172 
1173  DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1174  fault->name(), kvm_int.irq);
1175 
1176  kvmInterrupt(kvm_int);
1177  } else {
1178  panic("KVM: Unknown interrupt type\n");
1179  }
1180 
1181 }
1182 
1183 Tick
1185 {
1186  struct kvm_run &kvm_run(*getKvmRunState());
1187 
1188  auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1189 
1190  if (lapic->checkInterruptsRaw()) {
1191  if (lapic->hasPendingUnmaskable()) {
1192  DPRINTF(KvmInt,
1193  "Delivering unmaskable interrupt.\n");
1196  } else if (kvm_run.ready_for_interrupt_injection) {
1197  // KVM claims that it is ready for an interrupt. It might
1198  // be lying if we just updated rflags and disabled
1199  // interrupts (e.g., by doing a CPU handover). Let's sync
1200  // the thread context and check if there are /really/
1201  // interrupts that should be delivered now.
1203  if (lapic->checkInterrupts()) {
1204  DPRINTF(KvmInt,
1205  "M5 has pending interrupts, delivering interrupt.\n");
1206 
1208  } else {
1209  DPRINTF(KvmInt,
1210  "Interrupt delivery delayed due to KVM confusion.\n");
1211  kvm_run.request_interrupt_window = 1;
1212  }
1213  } else if (!kvm_run.request_interrupt_window) {
1214  DPRINTF(KvmInt,
1215  "M5 has pending interrupts, requesting interrupt "
1216  "window.\n");
1217  kvm_run.request_interrupt_window = 1;
1218  }
1219  } else {
1220  kvm_run.request_interrupt_window = 0;
1221  }
1222 
1223  // The CPU might have been suspended as a result of the INIT
1224  // interrupt delivery hack. In that case, don't enter into KVM.
1225  if (_status == Idle)
1226  return 0;
1227  else
1228  return kvmRunWrapper(ticks);
1229 }
1230 
1231 Tick
1233 {
1234  struct kvm_run &kvm_run(*getKvmRunState());
1235 
1236  if (!archIsDrained()) {
1237  DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1238 
1239  // Tell KVM to find a suitable place to deliver interrupts. This
1240  // should ensure that pending interrupts have been delivered and
1241  // things are reasonably consistent (i.e., no interrupts pending
1242  // in the guest).
1243  kvm_run.request_interrupt_window = 1;
1244 
1245  // Limit the run to 1 millisecond. That is hopefully enough to
1246  // reach an interrupt window. Otherwise, we'll just try again
1247  // later.
1248  return kvmRunWrapper(1 * SimClock::Float::ms);
1249  } else {
1250  DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1251 
1252  return kvmRunWrapper(0);
1253  }
1254 }
1255 
1256 Tick
1258 {
1259  struct kvm_run &kvm_run(*getKvmRunState());
1260 
1261  // Synchronize the APIC base and CR8 here since they are present
1262  // in the kvm_run struct, which makes the synchronization really
1263  // cheap.
1264  kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
1265  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1266 
1267  const Tick run_ticks(BaseKvmCPU::kvmRun(ticks));
1268 
1269  tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
1270  kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1271 
1272  return run_ticks;
1273 }
1274 
1275 uint64_t
1277 {
1278  return getMSR(MSR_TSC);
1279 }
1280 
1281 void
1283 {
1284  struct kvm_run &kvm_run(*getKvmRunState());
1285  const uint16_t port(kvm_run.io.port);
1286 
1287  assert(kvm_run.exit_reason == KVM_EXIT_IO);
1288 
1289  if (kvm_run.io.size != 4) {
1290  panic("Unexpected IO size (%u) for address 0x%x.\n",
1291  kvm_run.io.size, port);
1292  }
1293 
1294  if (kvm_run.io.count != 1) {
1295  panic("Unexpected IO count (%u) for address 0x%x.\n",
1296  kvm_run.io.count, port);
1297  }
1298 
1299  uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1300  if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1301  tc->setMiscReg(miscreg, *data);
1302  else
1303  *data = tc->readMiscRegNoEffect(miscreg);
1304 }
1305 
1306 Tick
1308 {
1309  struct kvm_run &kvm_run(*getKvmRunState());
1310  bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1311  unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1312  Tick delay(0);
1313  uint16_t port(kvm_run.io.port);
1314  Addr pAddr;
1315  const int count(kvm_run.io.count);
1316 
1317  assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1318  kvm_run.io.direction == KVM_EXIT_IO_OUT);
1319 
1320  DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1321  (isWrite ? "out" : "in"), kvm_run.io.port);
1322 
1323  /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1324  * don't use the TLB component, we need to intercept and handle
1325  * the PCI configuration space IO ports here.
1326  *
1327  * The IO port PCI discovery mechanism uses one address register
1328  * and one data register. We map the address register to a misc
1329  * reg and use that to re-route data register accesses to the
1330  * right location in the PCI configuration space.
1331  */
1332  if (port == IO_PCI_CONF_ADDR) {
1334  return 0;
1335  } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1337  if (pciConfigAddr & 0x80000000) {
1338  pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1339  (port & 0x3));
1340  } else {
1341  pAddr = X86ISA::x86IOAddress(port);
1342  }
1343  } else {
1344  pAddr = X86ISA::x86IOAddress(port);
1345  }
1346 
1347  const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1348  // Temporarily lock and migrate to the device event queue to
1349  // prevent races in multi-core mode.
1351  for (int i = 0; i < count; ++i) {
1352  RequestPtr io_req = std::make_shared<Request>(
1353  pAddr, kvm_run.io.size,
1355 
1356  io_req->setContext(tc->contextId());
1357 
1358  PacketPtr pkt = new Packet(io_req, cmd);
1359 
1360  pkt->dataStatic(guestData);
1361  delay += dataPort.submitIO(pkt);
1362 
1363  guestData += kvm_run.io.size;
1364  }
1365 
1366  return delay;
1367 }
1368 
1369 Tick
1371 {
1372  // We don't need to do anything here since this is caught the next
1373  // time we execute kvmRun(). We still overload the exit event to
1374  // silence the warning about an unhandled exit event.
1375  return 0;
1376 }
1377 
1378 bool
1380 {
1381  struct kvm_vcpu_events events;
1382 
1383  getVCpuEvents(events);
1384 
1385  // We could probably handle this in a by re-inserting interrupts
1386  // that are pending into gem5 on a drain. However, that would
1387  // probably be tricky to do reliably, so we'll just prevent a
1388  // drain if there is anything pending in the
1389  // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1390  // executed in the guest by requesting an interrupt window if
1391  // there are pending interrupts.
1392  const bool pending_events(events.exception.injected ||
1393  events.interrupt.injected ||
1394  events.nmi.injected || events.nmi.pending);
1395 
1396  if (pending_events) {
1397  DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1398  events.exception.injected ? "exception" : "",
1399  events.interrupt.injected ? "interrupt" : "",
1400  events.nmi.injected ? "nmi[i]" : "",
1401  events.nmi.pending ? "nmi[p]" : "");
1402  }
1403 
1404  return !pending_events;
1405 }
1406 
1407 static struct kvm_cpuid_entry2
1408 makeKvmCpuid(uint32_t function, uint32_t index,
1409  CpuidResult &result)
1410 {
1411  struct kvm_cpuid_entry2 e;
1412  e.function = function;
1413  e.index = index;
1414  e.flags = 0;
1415  e.eax = (uint32_t)result.rax;
1416  e.ebx = (uint32_t)result.rbx;
1417  e.ecx = (uint32_t)result.rcx;
1418  e.edx = (uint32_t)result.rdx;
1419 
1420  return e;
1421 }
1422 
1423 void
1425 {
1426  Kvm::CPUIDVector m5_supported;
1427 
1428  /* TODO: We currently don't support any of the functions that
1429  * iterate through data structures in the CPU using an index. It's
1430  * currently not a problem since M5 doesn't expose any of them at
1431  * the moment.
1432  */
1433 
1434  /* Basic features */
1435  CpuidResult func0;
1436  X86ISA::doCpuid(tc, 0x0, 0, func0);
1437  for (uint32_t function = 0; function <= func0.rax; ++function) {
1439  uint32_t idx(0);
1440 
1441  X86ISA::doCpuid(tc, function, idx, cpuid);
1442  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1443  }
1444 
1445  /* Extended features */
1446  CpuidResult efunc0;
1447  X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1448  for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1450  uint32_t idx(0);
1451 
1452  X86ISA::doCpuid(tc, function, idx, cpuid);
1453  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1454  }
1455 
1456  setCPUID(m5_supported);
1457 }
1458 
1459 void
1460 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1461 {
1462  if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1463  panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1464  errno);
1465 }
1466 
1467 void
1468 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1469 {
1470  std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1471  newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
1472 
1473  kvm_cpuid->nent = cpuid.size();
1474  std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1475 
1476  setCPUID(*kvm_cpuid);
1477 }
1478 
1479 void
1480 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1481 {
1482  if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1483  panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1484  errno);
1485 }
1486 
1487 void
1489 {
1490  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1491  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1492 
1493  kvm_msrs->nmsrs = msrs.size();
1494  std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1495 
1496  setMSRs(*kvm_msrs);
1497 }
1498 
1499 void
1500 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1501 {
1502  if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1503  panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1504  errno);
1505 }
1506 
1507 
1508 void
1509 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1510 {
1511  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1512  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1513  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1514 
1515  kvm_msrs->nmsrs = 1;
1516  entry.index = index;
1517  entry.reserved = 0;
1518  entry.data = value;
1519 
1520  setMSRs(*kvm_msrs.get());
1521 }
1522 
1523 uint64_t
1524 X86KvmCPU::getMSR(uint32_t index) const
1525 {
1526  std::unique_ptr<struct kvm_msrs> kvm_msrs(
1527  newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1528  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1529 
1530  kvm_msrs->nmsrs = 1;
1531  entry.index = index;
1532  entry.reserved = 0;
1533  entry.data = 0;
1534 
1535  getMSRs(*kvm_msrs.get());
1536  return entry.data;
1537 }
1538 
1539 const Kvm::MSRIndexVector &
1541 {
1542  if (cachedMsrIntersection.empty()) {
1543  const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
1544 
1545  DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1546  for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1547  if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1548  cachedMsrIntersection.push_back(*it);
1549  DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1550  } else {
1551  warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1552  *it);
1553  }
1554  }
1555  }
1556 
1557  return cachedMsrIntersection;
1558 }
1559 
1560 void
1561 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1562 {
1563 #ifdef KVM_GET_DEBUGREGS
1564  if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1565  panic("KVM: Failed to get guest debug registers\n");
1566 #else
1567  panic("KVM: Unsupported getDebugRegisters call.\n");
1568 #endif
1569 }
1570 
1571 void
1572 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1573 {
1574 #ifdef KVM_SET_DEBUGREGS
1575  if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1576  panic("KVM: Failed to set guest debug registers\n");
1577 #else
1578  panic("KVM: Unsupported setDebugRegisters call.\n");
1579 #endif
1580 }
1581 
1582 void
1583 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1584 {
1585  if (ioctl(KVM_GET_XCRS, &regs) == -1)
1586  panic("KVM: Failed to get guest debug registers\n");
1587 }
1588 
1589 void
1590 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1591 {
1592  if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1593  panic("KVM: Failed to set guest debug registers\n");
1594 }
1595 
1596 void
1597 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1598 {
1599  if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1600  panic("KVM: Failed to get guest debug registers\n");
1601 }
1602 
1603 void
1604 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1605 {
1606  if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1607  panic("KVM: Failed to set guest debug registers\n");
1608 }
1609 
1610 
1611 void
1612 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1613 {
1614  if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1615  panic("KVM: Failed to get guest debug registers\n");
1616 }
1617 
1618 void
1619 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1620 {
1621  if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1622  panic("KVM: Failed to set guest debug registers\n");
1623 }
1624 
1625 X86KvmCPU *
1626 X86KvmCPUParams::create()
1627 {
1628  return new X86KvmCPU(this);
1629 }
KvmVM::kvm
Kvm * kvm
Global KVM interface.
Definition: vm.hh:409
X86ISA::MISCREG_FOSEG
@ MISCREG_FOSEG
Definition: misc.hh:387
X86KvmCPU
x86 implementation of a KVM-based hardware virtualized CPU.
Definition: x86_cpu.hh:39
X86ISA::MISCREG_M5_REG
@ MISCREG_M5_REG
Definition: misc.hh:137
X86KvmCPU::handleKvmExitIO
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition: x86_cpu.cc:1307
ThreadContext::readMiscRegNoEffect
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
forceSegAccessed
static void forceSegAccessed(struct kvm_segment &seg)
Definition: x86_cpu.cc:743
X86ISA::convX87TagsToXTags
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition: utility.cc:142
BaseKvmCPU::kvmNonMaskableInterrupt
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition: base.cc:782
X86KvmCPU::getVCpuEvents
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition: x86_cpu.cc:1612
X86KvmCPU::updateThreadContextFPU
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:1077
makeKvmCpuid
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition: x86_cpu.cc:1408
Kvm
KVM parent interface.
Definition: vm.hh:72
warn
#define warn(...)
Definition: logging.hh:239
X86KvmCPU::getMsrIntersection
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition: x86_cpu.cc:1540
X86ISA::StartupInterrupt
Definition: faults.hh:357
X86KvmCPU::archIsDrained
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition: x86_cpu.cc:1379
X86ISA::MISCREG_FOP
@ MISCREG_FOP
Definition: misc.hh:389
X86ISA::MISCREG_APIC_BASE
@ MISCREG_APIC_BASE
Definition: misc.hh:393
x86_cpu.hh
Kvm::capXCRs
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition: vm.cc:175
X86KvmCPU::updateThreadContext
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition: x86_cpu.cc:938
BaseKvmCPU::vm
KvmVM & vm
Definition: base.hh:150
BaseKvmCPU::dataPort
KVMCpuPort dataPort
Port for data requests.
Definition: base.hh:611
data
const char data[]
Definition: circlebuf.test.cc:42
X86KvmCPU::getXSave
void getXSave(struct kvm_xsave &xsave) const
Definition: x86_cpu.cc:1597
MipsISA::cpuid
Bitfield< 28, 21 > cpuid
Definition: dt_constants.hh:92
MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:44
X86ISA::ErrSummaryBit
@ ErrSummaryBit
Definition: misc.hh:92
X86ISA::DEBit
@ DEBit
Definition: misc.hh:84
X86KvmCPU::haveXSave
bool haveXSave
Kvm::capXSave() available?
Definition: x86_cpu.hh:242
X86KvmCPU::dumpVCpuEvents
void dumpVCpuEvents() const
Definition: x86_cpu.cc:647
FXSave::fsw
uint16_t fsw
Definition: x86_cpu.cc:74
warn_once
#define warn_once(...)
Definition: logging.hh:243
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
X86ISA::CpuidResult::rax
uint64_t rax
Definition: cpuid.hh:40
X86KvmCPU::startup
void startup() override
Definition: x86_cpu.cc:556
FXSave::mxcsr
uint32_t mxcsr
Definition: x86_cpu.cc:93
X86ISA::MISCREG_X87_TOP
@ MISCREG_X87_TOP
Definition: misc.hh:377
X86ISA::MISCREG_TSL
@ MISCREG_TSL
Definition: misc.hh:303
X86KvmCPU::updateThreadContextMSRs
void updateThreadContextMSRs()
Update MSR registers.
Definition: x86_cpu.cc:1101
FXSave::fpu_ds
uint16_t fpu_ds
Definition: x86_cpu.cc:84
X86ISA::MISCREG_ES
@ MISCREG_ES
Definition: misc.hh:296
X86ISA::MISCREG_FISEG
@ MISCREG_FISEG
Definition: misc.hh:385
X86KvmCPU::dumpIntRegs
void dumpIntRegs() const
Definition: x86_cpu.cc:593
X86ISA::MISCREG_CS
@ MISCREG_CS
Definition: misc.hh:297
BaseKvmCPU::_status
Status _status
CPU run state.
Definition: base.hh:229
X86ISA::MISCREG_SEG_LIMIT
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:526
BaseKvmCPU::getGuestData
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition: base.hh:307
X86KvmCPU::getMSR
uint64_t getMSR(uint32_t index) const
Definition: x86_cpu.cc:1524
M5_ATTR_PACKED
struct FXSave M5_ATTR_PACKED
MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:82
X86KvmCPU::dumpFpuRegs
void dumpFpuRegs() const
Definition: x86_cpu.cc:585
X86KvmCPU::updateThreadContextRegs
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition: x86_cpu.cc:972
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:63
Kvm::capDebugRegs
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition: vm.cc:165
BaseKvmCPU::setFPUState
void setFPUState(const struct kvm_fpu &state)
Definition: base.cc:833
interrupts.hh
BaseKvmCPU::setRegisters
void setRegisters(const struct kvm_regs &regs)
Definition: base.cc:805
SimpleThread::suspend
void suspend() override
Set the status to Suspended.
Definition: simple_thread.cc:148
X86ISA::loadFloat80
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition: utility.cc:208
X86KvmCPU::dumpXSave
void dumpXSave() const
Definition: x86_cpu.cc:635
top
Definition: test.h:61
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:82
X86ISA::BusyBit
@ BusyBit
Definition: misc.hh:97
DTRACE
#define DTRACE(x)
Definition: debug.hh:146
X86ISA::CC0Bit
@ CC0Bit
Definition: misc.hh:93
X86KvmCPU::setCPUID
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition: x86_cpu.cc:1460
std::vector
STL vector class.
Definition: stl.hh:37
IO_PCI_CONF_ADDR
#define IO_PCI_CONF_ADDR
Definition: x86_cpu.cc:54
X86ISA::count
count
Definition: misc.hh:703
newVarStruct
static STRUCT * newVarStruct(size_t entries)
Definition: x86_cpu.cc:164
X86ISA::convX87XTagsToTags
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition: utility.cc:167
FXSave::fcw
uint16_t fcw
Definition: x86_cpu.cc:73
X86KvmCPU::dump
void dump() const override
Dump the internal state to the terminal.
Definition: x86_cpu.cc:570
faults.hh
X86KvmCPU::kvmRunWrapper
Tick kvmRunWrapper(Tick ticks)
Wrapper that synchronizes state in kvm_run.
Definition: x86_cpu.cc:1257
FXSave::ctrl64
struct FXSave::@29::@32 ctrl64
FXSave::fpu_ip
uint32_t fpu_ip
Definition: x86_cpu.cc:80
BaseKvmCPU::KVMCpuPort::submitIO
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition: base.cc:171
FOREACH_IREG
#define FOREACH_IREG()
Definition: x86_cpu.cc:104
X86KvmCPU::updateKvmStateMSRs
void updateKvmStateMSRs()
Update MSR registers.
Definition: x86_cpu.cc:916
hack
#define hack(...)
Definition: logging.hh:241
X86KvmCPU::setMSR
void setMSR(uint32_t index, uint64_t value)
Definition: x86_cpu.cc:1509
Kvm::capSetTSSAddress
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition: vm.cc:101
X86KvmCPU::handleKvmExitIRQWindowOpen
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition: x86_cpu.cc:1370
X86ISA::MISCREG_SEG_SEL
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:505
FOREACH_DTABLE
#define FOREACH_DTABLE()
Definition: x86_cpu.cc:157
X86KvmCPU::setMSRs
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition: x86_cpu.cc:1480
X86KvmCPU::deliverInterrupts
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition: x86_cpu.cc:1131
MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:85
BaseKvmCPU::kvmInterrupt
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition: base.cc:790
ThreadContext::readFloatReg
virtual RegVal readFloatReg(RegIndex reg_idx) const =0
X86ISA::storeFloat80
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition: utility.cc:217
checkSeg
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition: x86_cpu.cc:381
FOREACH_DREG
#define FOREACH_DREG()
Definition: x86_cpu.cc:135
isCanonicalAddress
static bool isCanonicalAddress(uint64_t addr)
Definition: x86_cpu.cc:370
updateKvmStateFPUCommon
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition: x86_cpu.cc:824
X86KvmCPU::haveDebugRegs
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition: x86_cpu.hh:240
X86ISA::MISCREG_DS
@ MISCREG_DS
Definition: misc.hh:299
ArmISA::j
Bitfield< 24 > j
Definition: miscregs_types.hh:54
X86ISA::NonMaskableInterrupt
Definition: faults.hh:182
BaseKvmCPU::syncThreadContext
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition: base.cc:922
BaseKvmCPU
Base class for KVM based CPU models.
Definition: base.hh:77
dumpFpuCommon
static void dumpFpuCommon(const T &fpu)
Definition: x86_cpu.cc:260
X86KvmCPU::dumpDebugRegs
void dumpDebugRegs() const
Definition: x86_cpu.cc:609
BaseKvmCPU::thread
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition: base.hh:143
ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:88
setContextSegment
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:990
X86ISA::FLOATREG_FPR
static FloatRegIndex FLOATREG_FPR(int index)
Definition: float.hh:123
X86KvmCPU::kvmRunDrain
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition: x86_cpu.cc:1232
MSR_TSC
#define MSR_TSC
Definition: x86_cpu.cc:52
X86KvmCPU::getHostCycles
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition: x86_cpu.cc:1276
FXSave::fpu_ip
uint64_t fpu_ip
Definition: x86_cpu.cc:89
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
X86ISA::MISCREG_RFLAGS
@ MISCREG_RFLAGS
Definition: misc.hh:134
SEG_TYPE_BIT_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
Definition: x86_cpu.cc:69
MemCmd
Definition: packet.hh:71
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
msr.hh
FXSave::fpu_dp
uint32_t fpu_dp
Definition: x86_cpu.cc:83
BaseCPU::interrupts
std::vector< BaseInterrupts * > interrupts
Definition: base.hh:215
X86KvmCPU::getXCRs
void getXCRs(struct kvm_xcrs &regs) const
Definition: x86_cpu.cc:1583
X86ISA::PEBit
@ PEBit
Definition: misc.hh:88
X86ISA::IEBit
@ IEBit
Definition: misc.hh:83
X86KvmCPU::kvmRun
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition: x86_cpu.cc:1184
BaseKvmCPU::startup
void startup() override
Definition: base.cc:117
SEG_SYS_TYPE_TSS_BUSY
#define SEG_SYS_TYPE_TSS_BUSY
Definition: x86_cpu.cc:60
X86ISA::OEBit
@ OEBit
Definition: misc.hh:86
X86KvmCPU::X86KvmCPU
X86KvmCPU(X86KvmCPUParams *params)
Definition: x86_cpu.cc:523
X86ISA::CC1Bit
@ CC1Bit
Definition: misc.hh:94
BaseKvmCPU::kvmRun
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition: base.cc:684
ArmISA::attr
attr
Definition: miscregs_types.hh:649
BaseKvmCPU::getKvmRunState
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition: base.hh:297
X86KvmCPU::dumpXCRs
void dumpXCRs() const
Definition: x86_cpu.cc:623
utility.hh
M5_FALLTHROUGH
#define M5_FALLTHROUGH
Definition: compiler.hh:84
ThreadContext::contextId
virtual ContextID contextId() const =0
X86ISA::MISCREG_SEG_ATTR
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:533
X86ISA::InitInterrupt
Definition: faults.hh:346
FXSave::fpu_cs
uint16_t fpu_cs
Definition: x86_cpu.cc:81
X86KvmCPU::handleIOMiscReg32
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition: x86_cpu.cc:1282
X86ISA::setRFlags
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition: utility.cc:126
RiscvISA::xs
Bitfield< 16, 15 > xs
Definition: registers.hh:611
X86ISA::FLOATREG_XMM_HIGH
static FloatRegIndex FLOATREG_XMM_HIGH(int index)
Definition: float.hh:135
Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:114
setKvmDTableReg
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition: x86_cpu.cc:735
BaseKvmCPU::deviceEventQueue
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition: base.hh:427
IO_PCI_CONF_DATA_BASE
#define IO_PCI_CONF_DATA_BASE
Definition: x86_cpu.cc:55
X86KvmCPU::setDebugRegisters
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition: x86_cpu.cc:1572
X86KvmCPU::updateKvmStateFPULegacy
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:858
X86ISA::X86FaultBase::getVector
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition: faults.hh:82
X86ISA::MISCREG_PCI_CONFIG_ADDRESS
@ MISCREG_PCI_CONFIG_ADDRESS
Definition: misc.hh:396
BaseKvmCPU::getFPUState
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition: base.cc:826
SEG_CS_TYPE_ACCESSED
#define SEG_CS_TYPE_ACCESSED
Definition: x86_cpu.cc:63
X86ISA::X86Interrupt
Definition: faults.hh:115
dumpFpuSpec
static void dumpFpuSpec(const struct FXSave &xs)
Definition: x86_cpu.cc:244
X86ISA
This is exposed globally, independent of the ISA.
Definition: acpi.hh:55
SEG_CS_TYPE_READ_ACCESSED
#define SEG_CS_TYPE_READ_ACCESSED
Definition: x86_cpu.cc:65
bitsToFloat64
static double bitsToFloat64(uint64_t val)
Definition: types.hh:210
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
X86KvmCPU::updateCPUID
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition: x86_cpu.cc:1424
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
X86KvmCPU::getMSRs
void getMSRs(struct kvm_msrs &msrs) const
Definition: x86_cpu.cc:1500
X86KvmCPU::useXSave
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition: x86_cpu.hh:247
X86KvmCPU::updateThreadContextXSave
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:1088
name
const std::string & name()
Definition: trace.cc:50
base.hh
FXSave::ftwx
uint8_t ftwx
Definition: x86_cpu.cc:75
X86ISA::MISCREG_FS
@ MISCREG_FS
Definition: misc.hh:300
X86ISA::x86PciConfigAddress
static Addr x86PciConfigAddress(const uint32_t addr)
Definition: x86_traits.hh:87
FXSave::fpu_dp
uint64_t fpu_dp
Definition: x86_cpu.cc:90
BaseKvmCPU::ioctl
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition: base.cc:1132
SimClock::Float::ms
double ms
millisecond
Definition: core.cc:50
BaseKvmCPU::threadContextDirty
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition: base.hh:626
X86ISA::StackFaultBit
@ StackFaultBit
Definition: misc.hh:91
X86KvmCPU::updateKvmStateFPU
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition: x86_cpu.cc:907
X86ISA::MISCREG_FTW
@ MISCREG_FTW
Definition: misc.hh:383
ArmISA::e
Bitfield< 9 > e
Definition: miscregs_types.hh:61
ThreadContext::pcState
virtual TheISA::PCState pcState() const =0
FXSave::pad0
uint8_t pad0
Definition: x86_cpu.cc:76
X86ISA::getRFlags
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition: utility.cc:110
X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:79
X86KvmCPU::setXSave
void setXSave(const struct kvm_xsave &xsave)
Definition: x86_cpu.cc:1604
EventQueue::ScopedMigration
Definition: eventq.hh:665
X86ISA::MISCREG_SS
@ MISCREG_SS
Definition: misc.hh:298
inform
#define inform(...)
Definition: logging.hh:240
FOREACH_SEGMENT
#define FOREACH_SEGMENT()
Definition: x86_cpu.cc:145
setKvmSegmentReg
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:710
X86ISA::FLOATREG_XMM_LOW
static FloatRegIndex FLOATREG_XMM_LOW(int index)
Definition: float.hh:129
BaseKvmCPU::Idle
@ Idle
Context not scheduled in KVM.
Definition: base.hh:188
Kvm::capUserNMI
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition: vm.cc:113
X86KvmCPU::setVCpuEvents
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition: x86_cpu.cc:1619
X86KvmCPU::~X86KvmCPU
virtual ~X86KvmCPU()
Definition: x86_cpu.cc:551
X86KvmCPU::dumpSpecRegs
void dumpSpecRegs() const
Definition: x86_cpu.cc:601
ThreadContext::setFloatReg
virtual void setFloatReg(RegIndex reg_idx, RegVal val)=0
X86KvmCPU::dumpMSRs
void dumpMSRs() const
Definition: x86_cpu.cc:655
BaseKvmCPU::tc
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition: base.hh:148
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
X86KvmCPU::getDebugRegisters
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition: x86_cpu.cc:1561
X86ISA::doCpuid
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition: cpuid.cc:87
Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1107
dumpKvm
static void dumpKvm(const struct kvm_regs &regs)
Definition: x86_cpu.cc:170
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
BaseKvmCPU::getSpecialRegisters
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition: base.cc:812
ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
X86ISA::MISCREG_FIOFF
@ MISCREG_FIOFF
Definition: misc.hh:386
X86KvmCPU::haveXCRs
bool haveXCRs
Kvm::capXCRs() available?
Definition: x86_cpu.hh:249
FXSave
Definition: x86_cpu.cc:71
ThreadContext::setMiscReg
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
X86KvmCPU::updateKvmStateSRegs
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:755
updateThreadContextFPUCommon
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition: x86_cpu.cc:1044
X86KvmCPU::updateKvmStateRegs
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition: x86_cpu.cc:688
cpuid.hh
FXSave::pad1
uint16_t pad1
Definition: x86_cpu.cc:82
BaseCPU::dataRequestorId
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:184
X86ISA::MISCREG_TR
@ MISCREG_TR
Definition: misc.hh:307
FXSave::last_opcode
uint16_t last_opcode
Definition: x86_cpu.cc:77
X86ISA::CC2Bit
@ CC2Bit
Definition: misc.hh:95
X86ISA::MISCREG_FTAG
@ MISCREG_FTAG
Definition: misc.hh:384
X86ISA::e
Bitfield< 11 > e
Definition: misc.hh:753
X86ISA::MISCREG_SEG_BASE
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:512
X86ISA::UEBit
@ UEBit
Definition: misc.hh:87
ArmISA::at
Bitfield< 35, 32 > at
Definition: miscregs_types.hh:151
X86ISA::CC3Bit
@ CC3Bit
Definition: misc.hh:96
BaseKvmCPU::setSpecialRegisters
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition: base.cc:819
X86ISA::MISCREG_MXCSR
@ MISCREG_MXCSR
Definition: misc.hh:380
X86KvmCPU::updateKvmState
void updateKvmState() override
Update the KVM state from the current thread context.
Definition: x86_cpu.cc:675
X86ISA::ZEBit
@ ZEBit
Definition: misc.hh:85
ThreadContext::setMiscRegNoEffect
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
BaseKvmCPU::getRegisters
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition: base.cc:798
X86KvmCPU::updateKvmStateFPUXSave
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:882
FXSave::mxcsr_mask
uint32_t mxcsr_mask
Definition: x86_cpu.cc:94
X86ISA::MISCREG_FSW
@ MISCREG_FSW
Definition: misc.hh:382
X86ISA::MISCREG_GS
@ MISCREG_GS
Definition: misc.hh:301
Kvm::capXSave
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition: vm.cc:185
SEG_SYS_TYPE_TSS_AVAILABLE
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition: x86_cpu.cc:58
X86KvmCPU::setXCRs
void setXCRs(const struct kvm_xcrs &regs)
Definition: x86_cpu.cc:1590
X86ISA::MISCREG_CS_BASE
@ MISCREG_CS_BASE
Definition: misc.hh:313
Kvm::capExtendedCPUID
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition: vm.cc:107
X86ISA::x86IOAddress
static Addr x86IOAddress(const uint32_t port)
Definition: x86_traits.hh:81
X86ISA::MISCREG_CR8
@ MISCREG_CR8
Definition: misc.hh:113
X86ISA::Interrupts
Definition: interrupts.hh:74
X86ISA::MISCREG_FOOFF
@ MISCREG_FOOFF
Definition: misc.hh:388
ThreadContext::instAddr
virtual Addr instAddr() const =0
X86ISA::msrMap
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
FOREACH_SREG
#define FOREACH_SREG()
Definition: x86_cpu.cc:124
X86KvmCPU::updateThreadContextSRegs
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:1026
FXSave::pad2
uint16_t pad2
Definition: x86_cpu.cc:85
X86ISA::seg
Bitfield< 2, 0 > seg
Definition: types.hh:82
floatToBits64
static uint64_t floatToBits64(double val)
Definition: types.hh:183
X86KvmCPU::cachedMsrIntersection
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition: x86_cpu.hh:236
X86ISA::CpuidResult
Definition: cpuid.hh:38
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
X86ISA::MISCREG_FCW
@ MISCREG_FCW
Definition: misc.hh:381
Kvm::capVCPUEvents
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition: vm.cc:155

Generated on Wed Sep 30 2020 14:02:08 for gem5 by doxygen 1.8.17