gem5  v22.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
x86_cpu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Andreas Sandberg
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "arch/x86/kvm/x86_cpu.hh"
30 
31 #include <linux/kvm.h>
32 
33 #include <algorithm>
34 #include <cerrno>
35 #include <memory>
36 
37 #include "arch/x86/cpuid.hh"
38 #include "arch/x86/faults.hh"
39 #include "arch/x86/interrupts.hh"
40 #include "arch/x86/regs/int.hh"
41 #include "arch/x86/regs/msr.hh"
42 #include "arch/x86/utility.hh"
43 #include "base/compiler.hh"
44 #include "cpu/kvm/base.hh"
45 #include "debug/Drain.hh"
46 #include "debug/Kvm.hh"
47 #include "debug/KvmContext.hh"
48 #include "debug/KvmIO.hh"
49 #include "debug/KvmInt.hh"
50 
51 namespace gem5
52 {
53 
54 using namespace X86ISA;
55 
56 #define MSR_TSC 0x10
57 
58 #define IO_PCI_CONF_ADDR 0xCF8
59 #define IO_PCI_CONF_DATA_BASE 0xCFC
60 
61 // Task segment type of an inactive 32-bit or 64-bit task
62 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
63 // Task segment type of an active 32-bit or 64-bit task
64 #define SEG_SYS_TYPE_TSS_BUSY 11
65 
66 // Non-conforming accessed code segment
67 #define SEG_CS_TYPE_ACCESSED 9
68 // Non-conforming accessed code segment that can be read
69 #define SEG_CS_TYPE_READ_ACCESSED 11
70 
71 // The lowest bit of the type field for normal segments (code and
72 // data) is used to indicate that a segment has been accessed.
73 #define SEG_TYPE_BIT_ACCESSED 1
74 
76 {
77  uint16_t fcw;
78  uint16_t fsw;
79  uint8_t ftwx;
80  uint8_t pad0;
81  uint16_t last_opcode;
82  union
83  {
84  struct
85  {
86  uint32_t fpu_ip;
87  uint16_t fpu_cs;
88  uint16_t pad1;
89  uint32_t fpu_dp;
90  uint16_t fpu_ds;
91  uint16_t pad2;
92  } ctrl32;
93 
94  struct
95  {
96  uint64_t fpu_ip;
97  uint64_t fpu_dp;
98  } ctrl64;
99  };
100  uint32_t mxcsr;
101  uint32_t mxcsr_mask;
102 
103  uint8_t fpr[8][16];
104  uint8_t xmm[16][16];
105 
106  uint64_t reserved[12];
107 };
108 
109 static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
110 
111 #define FOREACH_IREG() \
112  do { \
113  APPLY_IREG(rax, int_reg::Rax); \
114  APPLY_IREG(rbx, int_reg::Rbx); \
115  APPLY_IREG(rcx, int_reg::Rcx); \
116  APPLY_IREG(rdx, int_reg::Rdx); \
117  APPLY_IREG(rsi, int_reg::Rsi); \
118  APPLY_IREG(rdi, int_reg::Rdi); \
119  APPLY_IREG(rsp, int_reg::Rsp); \
120  APPLY_IREG(rbp, int_reg::Rbp); \
121  APPLY_IREG(r8, int_reg::R8); \
122  APPLY_IREG(r9, int_reg::R9); \
123  APPLY_IREG(r10, int_reg::R10); \
124  APPLY_IREG(r11, int_reg::R11); \
125  APPLY_IREG(r12, int_reg::R12); \
126  APPLY_IREG(r13, int_reg::R13); \
127  APPLY_IREG(r14, int_reg::R14); \
128  APPLY_IREG(r15, int_reg::R15); \
129  } while (0)
130 
131 #define FOREACH_SREG() \
132  do { \
133  APPLY_SREG(cr0, misc_reg::Cr0); \
134  APPLY_SREG(cr2, misc_reg::Cr2); \
135  APPLY_SREG(cr3, misc_reg::Cr3); \
136  APPLY_SREG(cr4, misc_reg::Cr4); \
137  APPLY_SREG(cr8, misc_reg::Cr8); \
138  APPLY_SREG(efer, misc_reg::Efer); \
139  APPLY_SREG(apic_base, misc_reg::ApicBase); \
140  } while (0)
141 
142 #define FOREACH_DREG() \
143  do { \
144  APPLY_DREG(db[0], misc_reg::Dr0); \
145  APPLY_DREG(db[1], misc_reg::Dr1); \
146  APPLY_DREG(db[2], misc_reg::Dr2); \
147  APPLY_DREG(db[3], misc_reg::Dr3); \
148  APPLY_DREG(dr6, misc_reg::Dr6); \
149  APPLY_DREG(dr7, misc_reg::Dr7); \
150  } while (0)
151 
152 #define FOREACH_SEGMENT() \
153  do { \
154  APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
155  APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
156  APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
157  APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
158  APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
159  APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
160  APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
161  APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
162  } while (0)
163 
164 #define FOREACH_DTABLE() \
165  do { \
166  APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
167  APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
168  } while (0)
169 
170 template<typename Struct, typename Entry>
171 static auto
172 newVarStruct(size_t entries)
173 {
174  size_t size = sizeof(Struct) + entries * sizeof(Entry);
175  return std::unique_ptr<Struct, void(*)(Struct *)>(
176  (Struct *)operator new(size),
177  [](Struct *p) { operator delete(p); });
178 }
179 
180 static void
181 dumpKvm(const struct kvm_regs &regs)
182 {
183  inform("KVM register state:\n");
184 
185 #define APPLY_IREG(kreg, mreg) \
186  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
187 
188  FOREACH_IREG();
189 
190 #undef APPLY_IREG
191 
192  inform("\trip: 0x%llx\n", regs.rip);
193  inform("\trflags: 0x%llx\n", regs.rflags);
194 }
195 
196 static void
197 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
198 {
199  inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
200  "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
201  "unus.: %u\n",
202  reg_name,
203  seg.base, seg.limit, seg.selector, seg.type,
204  seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl,
205  seg.unusable);
206 }
207 
208 static void
209 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
210 {
211  inform("\t%s: @0x%llx+%x\n",
212  reg_name, dtable.base, dtable.limit);
213 }
214 
215 static void
216 dumpKvm(const struct kvm_sregs &sregs)
217 {
218 #define APPLY_SREG(kreg, mreg) \
219  inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
220 #define APPLY_SEGMENT(kreg, idx) \
221  dumpKvm(# kreg, sregs.kreg);
222 #define APPLY_DTABLE(kreg, idx) \
223  dumpKvm(# kreg, sregs.kreg);
224 
225  inform("Special registers:\n");
226  FOREACH_SEGMENT();
227  FOREACH_SREG();
228  FOREACH_DTABLE();
229 
230  inform("Interrupt Bitmap:");
231  for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
232  inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
233 
234 #undef APPLY_SREG
235 #undef APPLY_SEGMENT
236 #undef APPLY_DTABLE
237 }
238 
239 #ifdef KVM_GET_DEBUGREGS
240 static void
241 dumpKvm(const struct kvm_debugregs &regs)
242 {
243  inform("KVM debug state:\n");
244 
245 #define APPLY_DREG(kreg, mreg) \
246  inform("\t" # kreg ": 0x%llx\n", regs.kreg)
247 
248  FOREACH_DREG();
249 
250 #undef APPLY_DREG
251 
252  inform("\tflags: 0x%llx\n", regs.flags);
253 }
254 #endif
255 
256 static void
257 dumpFpuSpec(const struct FXSave &xs)
258 {
259  inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
260  inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
261  inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
262 }
263 
264 static void
265 dumpFpuSpec(const struct kvm_fpu &fpu)
266 {
267  inform("\tlast_ip: 0x%x\n", fpu.last_ip);
268  inform("\tlast_dp: 0x%x\n", fpu.last_dp);
269 }
270 
271 template<typename T>
272 static void
273 dumpFpuCommon(const T &fpu)
274 {
275  const unsigned top((fpu.fsw >> 11) & 0x7);
276  inform("\tfcw: 0x%x\n", fpu.fcw);
277 
278  inform("\tfsw: 0x%x (top: %i, "
279  "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
280  fpu.fsw, top,
281 
282  (fpu.fsw & CC0Bit) ? "C0" : "",
283  (fpu.fsw & CC1Bit) ? "C1" : "",
284  (fpu.fsw & CC2Bit) ? "C2" : "",
285  (fpu.fsw & CC3Bit) ? "C3" : "",
286 
287  (fpu.fsw & IEBit) ? "I" : "",
288  (fpu.fsw & DEBit) ? "D" : "",
289  (fpu.fsw & ZEBit) ? "Z" : "",
290  (fpu.fsw & OEBit) ? "O" : "",
291  (fpu.fsw & UEBit) ? "U" : "",
292  (fpu.fsw & PEBit) ? "P" : "",
293 
294  (fpu.fsw & StackFaultBit) ? "SF " : "",
295  (fpu.fsw & ErrSummaryBit) ? "ES " : "",
296  (fpu.fsw & BusyBit) ? "BUSY " : ""
297  );
298  inform("\tftwx: 0x%x\n", fpu.ftwx);
299  inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
300  dumpFpuSpec(fpu);
301  inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
302  inform("\tFP Stack:\n");
303  for (int i = 0; i < 8; ++i) {
304  const unsigned reg_idx((i + top) & 0x7);
305  const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
306  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
307  char hex[33];
308  for (int j = 0; j < 10; ++j)
309  snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
310  inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
311  hex, value, empty ? " (e)" : "");
312  }
313  inform("\tXMM registers:\n");
314  for (int i = 0; i < 16; ++i) {
315  char hex[33];
316  for (int j = 0; j < 16; ++j)
317  snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
318  inform("\t\t%i: 0x%s\n", i, hex);
319  }
320 }
321 
322 static void
323 dumpKvm(const struct kvm_fpu &fpu)
324 {
325  inform("FPU registers:\n");
326  dumpFpuCommon(fpu);
327 }
328 
329 static void
330 dumpKvm(const struct kvm_xsave &xsave)
331 {
332  inform("FPU registers (XSave):\n");
333  dumpFpuCommon(*(FXSave *)xsave.region);
334 }
335 
336 static void
337 dumpKvm(const struct kvm_msrs &msrs)
338 {
339  inform("MSRs:\n");
340 
341  for (int i = 0; i < msrs.nmsrs; ++i) {
342  const struct kvm_msr_entry &e(msrs.entries[i]);
343 
344  inform("\t0x%x: 0x%x\n", e.index, e.data);
345  }
346 }
347 
348 static void
349 dumpKvm(const struct kvm_xcrs &regs)
350 {
351  inform("KVM XCR registers:\n");
352 
353  inform("\tFlags: 0x%x\n", regs.flags);
354  for (int i = 0; i < regs.nr_xcrs; ++i) {
355  inform("\tXCR[0x%x]: 0x%x\n",
356  regs.xcrs[i].xcr,
357  regs.xcrs[i].value);
358  }
359 }
360 
361 static void
362 dumpKvm(const struct kvm_vcpu_events &events)
363 {
364  inform("vCPU events:\n");
365 
366  inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
367  events.exception.injected, events.exception.nr,
368  events.exception.has_error_code, events.exception.error_code);
369 
370  inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
371  events.interrupt.injected, events.interrupt.nr,
372  events.interrupt.soft);
373 
374  inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
375  events.nmi.injected, events.nmi.pending,
376  events.nmi.masked);
377 
378  inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
379  inform("\tFlags: 0x%x\n", events.flags);
380 }
381 
382 static bool
384 {
385  // x86-64 doesn't currently use the full 64-bit virtual address
386  // space, instead it uses signed 48 bit addresses that are
387  // sign-extended to 64 bits. Such addresses are known as
388  // "canonical".
389  uint64_t upper_half(addr & 0xffff800000000000ULL);
390  return upper_half == 0 || upper_half == 0xffff800000000000;
391 }
392 
393 static void
394 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
395  struct kvm_sregs sregs)
396 {
397  // Check the register base
398  switch (idx) {
399  case misc_reg::Tsl:
400  case misc_reg::Tr:
401  case misc_reg::Fs:
402  case misc_reg::Gs:
403  if (!isCanonicalAddress(seg.base))
404  warn("Illegal %s base: 0x%x\n", name, seg.base);
405  break;
406 
407  case misc_reg::Ss:
408  case misc_reg::Ds:
409  case misc_reg::Es:
410  if (seg.unusable)
411  break;
412  [[fallthrough]];
413  case misc_reg::Cs:
414  if (seg.base & 0xffffffff00000000ULL)
415  warn("Illegal %s base: 0x%x\n", name, seg.base);
416  break;
417  }
418 
419  // Check the type
420  switch (idx) {
421  case misc_reg::Cs:
422  switch (seg.type) {
423  case 3:
424  if (seg.dpl != 0)
425  warn("CS type is 3 but dpl != 0.\n");
426  break;
427  case 9:
428  case 11:
429  if (seg.dpl != sregs.ss.dpl)
430  warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
431  break;
432  case 13:
433  case 15:
434  if (seg.dpl > sregs.ss.dpl)
435  warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
436  break;
437  default:
438  warn("Illegal CS type: %i\n", seg.type);
439  break;
440  }
441  break;
442 
443  case misc_reg::Ss:
444  if (seg.unusable)
445  break;
446  switch (seg.type) {
447  case 3:
448  if (sregs.cs.type == 3 && seg.dpl != 0)
449  warn("CS type is 3, but SS DPL is != 0.\n");
450  [[fallthrough]];
451  case 7:
452  if (!(sregs.cr0 & 1) && seg.dpl != 0)
453  warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
454  break;
455  default:
456  warn("Illegal SS type: %i\n", seg.type);
457  break;
458  }
459  break;
460 
461  case misc_reg::Ds:
462  case misc_reg::Es:
463  case misc_reg::Fs:
464  case misc_reg::Gs:
465  if (seg.unusable)
466  break;
467  if (!(seg.type & 0x1) ||
468  ((seg.type & 0x8) && !(seg.type & 0x2)))
469  warn("%s has an illegal type field: %i\n", name, seg.type);
470  break;
471 
472  case misc_reg::Tr:
473  // TODO: We should check the CPU mode
474  if (seg.type != 3 && seg.type != 11)
475  warn("%s: Illegal segment type (%i)\n", name, seg.type);
476  break;
477 
478  case misc_reg::Tsl:
479  if (seg.unusable)
480  break;
481  if (seg.type != 2)
482  warn("%s: Illegal segment type (%i)\n", name, seg.type);
483  break;
484  }
485 
486  switch (idx) {
487  case misc_reg::Ss:
488  case misc_reg::Ds:
489  case misc_reg::Es:
490  case misc_reg::Fs:
491  case misc_reg::Gs:
492  if (seg.unusable)
493  break;
494  [[fallthrough]];
495  case misc_reg::Cs:
496  if (!seg.s)
497  warn("%s: S flag not set\n", name);
498  break;
499 
500  case misc_reg::Tsl:
501  if (seg.unusable)
502  break;
503  [[fallthrough]];
504  case misc_reg::Tr:
505  if (seg.s)
506  warn("%s: S flag is set\n", name);
507  break;
508  }
509 
510  switch (idx) {
511  case misc_reg::Ss:
512  case misc_reg::Ds:
513  case misc_reg::Es:
514  case misc_reg::Fs:
515  case misc_reg::Gs:
516  case misc_reg::Tsl:
517  if (seg.unusable)
518  break;
519  [[fallthrough]];
520  case misc_reg::Tr:
521  case misc_reg::Cs:
522  if (!seg.present)
523  warn("%s: P flag not set\n", name);
524 
525  if (((seg.limit & 0xFFF) == 0 && seg.g) ||
526  ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
527  warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
528  name, seg.limit, seg.g);
529  }
530  break;
531  }
532 
533  // TODO: Check CS DB
534 }
535 
536 X86KvmCPU::X86KvmCPU(const X86KvmCPUParams &params)
537  : BaseKvmCPU(params),
538  useXSave(params.useXSave)
539 {}
540 
541 void
543 {
545 
546  Kvm &kvm = *vm->kvm;
547 
548  if (!kvm.capSetTSSAddress())
549  panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
550  if (!kvm.capExtendedCPUID())
551  panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
552  if (!kvm.capUserNMI())
553  warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
554  if (!kvm.capVCPUEvents())
555  warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
556 
557  haveDebugRegs = kvm.capDebugRegs();
558  haveXSave = kvm.capXSave();
559  haveXCRs = kvm.capXCRs();
560 
561  if (useXSave && !haveXSave) {
562  warn("KVM: XSAVE not supported by host. MXCSR synchronization might "
563  "be unreliable due to kernel bugs.\n");
564  useXSave = false;
565  } else if (!useXSave) {
566  warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
567  }
568 }
569 
571 {
572 }
573 
574 void
576 {
578 
579  updateCPUID();
580 
581  // TODO: Do we need to create an identity mapped TSS area? We
582  // should call kvm.vm.setTSSAddress() here in that case. It should
583  // only be needed for old versions of the virtualization
584  // extensions. We should make sure that the identity range is
585  // reserved in the e820 memory map in that case.
586 }
587 
588 void
590 {
591  dumpIntRegs();
592  if (useXSave)
593  dumpXSave();
594  else
595  dumpFpuRegs();
596  dumpSpecRegs();
597  dumpDebugRegs();
598  dumpXCRs();
599  dumpVCpuEvents();
600  dumpMSRs();
601 }
602 
603 void
605 {
606  struct kvm_fpu fpu;
607  getFPUState(fpu);
608  dumpKvm(fpu);
609 }
610 
611 void
613 {
614  struct kvm_regs regs;
615  getRegisters(regs);
616  dumpKvm(regs);
617 }
618 
619 void
621 {
622  struct kvm_sregs sregs;
623  getSpecialRegisters(sregs);
624  dumpKvm(sregs);
625 }
626 
627 void
629 {
630  if (haveDebugRegs) {
631 #ifdef KVM_GET_DEBUGREGS
632  struct kvm_debugregs dregs;
633  getDebugRegisters(dregs);
634  dumpKvm(dregs);
635 #endif
636  } else {
637  inform("Debug registers not supported by kernel.\n");
638  }
639 }
640 
641 void
643 {
644  if (haveXCRs) {
645  struct kvm_xcrs xcrs;
646  getXCRs(xcrs);
647  dumpKvm(xcrs);
648  } else {
649  inform("XCRs not supported by kernel.\n");
650  }
651 }
652 
653 void
655 {
656  if (haveXSave) {
657  struct kvm_xsave xsave;
658  getXSave(xsave);
659  dumpKvm(xsave);
660  } else {
661  inform("XSave not supported by kernel.\n");
662  }
663 }
664 
665 void
667 {
668  struct kvm_vcpu_events events;
669  getVCpuEvents(events);
670  dumpKvm(events);
671 }
672 
673 void
675 {
676  const Kvm::MSRIndexVector &supported_msrs = vm->kvm->getSupportedMSRs();
677  auto msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
678  supported_msrs.size());
679 
680  msrs->nmsrs = supported_msrs.size();
681  for (int i = 0; i < supported_msrs.size(); ++i) {
682  struct kvm_msr_entry &e(msrs->entries[i]);
683  e.index = supported_msrs[i];
684  e.reserved = 0;
685  e.data = 0;
686  }
687  getMSRs(*msrs.get());
688 
689  dumpKvm(*msrs.get());
690 }
691 
692 void
694 {
699 
700  DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
701  if (debug::KvmContext)
702  dump();
703 }
704 
705 void
707 {
708  struct kvm_regs regs;
709 
710 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
711  FOREACH_IREG();
712 #undef APPLY_IREG
713 
714  regs.rip = tc->pcState().instAddr() - tc->readMiscReg(misc_reg::CsBase);
715 
716  /* You might think that setting regs.rflags to the contents
717  * misc_reg::Rflags here would suffice. In that case you're
718  * mistaken. We need to reconstruct it from a bunch of ucode
719  * registers and wave a dead chicken over it (aka mask out and set
720  * reserved bits) to get it to work.
721  */
722  regs.rflags = X86ISA::getRFlags(tc);
723 
724  setRegisters(regs);
725 }
726 
727 static inline void
728 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
729  const int index)
730 {
732 
733  kvm_seg.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
734  kvm_seg.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
735  kvm_seg.selector = tc->readMiscRegNoEffect(misc_reg::segSel(index));
736  kvm_seg.type = attr.type;
737  kvm_seg.present = attr.present;
738  kvm_seg.dpl = attr.dpl;
739  kvm_seg.db = attr.defaultSize;
740  kvm_seg.s = attr.system;
741  kvm_seg.l = attr.longMode;
742  kvm_seg.g = attr.granularity;
743  kvm_seg.avl = attr.avl;
744  kvm_seg.unusable = attr.unusable;
745 }
746 
747 static inline void
748 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
749  const int index)
750 {
751  kvm_dtable.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
752  kvm_dtable.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
753 }
754 
755 static void
756 forceSegAccessed(struct kvm_segment &seg)
757 {
758  // Intel's VMX requires that (some) usable segments are flagged as
759  // 'accessed' (i.e., the lowest bit in the segment type is set)
760  // when entering VMX. This wouldn't necessary be the case even if
761  // gem5 did set the access bits correctly, so we force it to one
762  // in that case.
763  if (!seg.unusable)
764  seg.type |= SEG_TYPE_BIT_ACCESSED;
765 }
766 
767 void
769 {
770  struct kvm_sregs sregs;
771 
772 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
773 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
774 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
775 
776  FOREACH_SREG();
777  FOREACH_SEGMENT();
778  FOREACH_DTABLE();
779 
780 #undef APPLY_SREG
781 #undef APPLY_SEGMENT
782 #undef APPLY_DTABLE
783 
784  // Clear the interrupt bitmap
785  memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
786 
787  // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
788  // bit in the type field set.
789  forceSegAccessed(sregs.cs);
790  forceSegAccessed(sregs.ss);
791  forceSegAccessed(sregs.ds);
792  forceSegAccessed(sregs.es);
793  forceSegAccessed(sregs.fs);
794  forceSegAccessed(sregs.gs);
795 
796  // There are currently some cases where the active task isn't
797  // marked as busy. This is illegal in VMX, so we force it to busy.
798  if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
799  hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
800  sregs.tr.type);
801  sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
802  }
803 
804  // VMX requires the DPL of SS and CS to be the same for
805  // non-conforming code segments. It seems like m5 doesn't set the
806  // DPL of SS correctly when taking interrupts, so we need to fix
807  // that here.
808  if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
809  sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
810  sregs.cs.dpl != sregs.ss.dpl) {
811 
812  hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
813  sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
814  sregs.ss.dpl = sregs.cs.dpl;
815  }
816 
817  // Do checks after fixing up the state to avoid getting excessive
818  // amounts of warnings.
819  RFLAGS rflags_nocc(tc->readMiscReg(misc_reg::Rflags));
820  if (!rflags_nocc.vm) {
821  // Do segment verification if the CPU isn't entering virtual
822  // 8086 mode. We currently assume that unrestricted guest
823  // mode is available.
824 
825 #define APPLY_SEGMENT(kreg, idx) \
826  checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
827 
828  FOREACH_SEGMENT();
829 #undef APPLY_SEGMENT
830  }
831 
832  setSpecialRegisters(sregs);
833 }
834 
835 template <typename T>
836 static void
838 {
839  fpu.mxcsr = tc->readMiscRegNoEffect(misc_reg::Mxcsr);
840  fpu.fcw = tc->readMiscRegNoEffect(misc_reg::Fcw);
841  // No need to rebuild from misc_reg::Fsw and misc_reg::Top if we read
842  // with effects.
843  fpu.fsw = tc->readMiscReg(misc_reg::Fsw);
844 
845  uint64_t ftw(tc->readMiscRegNoEffect(misc_reg::Ftw));
846  fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
847 
848  fpu.last_opcode = tc->readMiscRegNoEffect(misc_reg::Fop);
849 
850  const unsigned top((fpu.fsw >> 11) & 0x7);
851  for (int i = 0; i < 8; ++i) {
852  const unsigned reg_idx((i + top) & 0x7);
853  const double value(bitsToFloat64(
854  tc->getReg(float_reg::fpr(reg_idx))));
855  DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
856  reg_idx, i, value);
857  X86ISA::storeFloat80(fpu.fpr[i], value);
858  }
859 
860  // TODO: We should update the MMX state
861 
862  for (int i = 0; i < 16; ++i) {
863  *(uint64_t *)&fpu.xmm[i][0] =
864  tc->getReg(float_reg::xmmLow(i));
865  *(uint64_t *)&fpu.xmm[i][8] =
867  }
868 }
869 
870 void
872 {
873  struct kvm_fpu fpu;
874 
875  // There is some padding in the FP registers, so we'd better zero
876  // the whole struct.
877  memset(&fpu, 0, sizeof(fpu));
878 
880 
882  warn_once("misc_reg::Fiseg is non-zero.\n");
883 
884  fpu.last_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
885 
887  warn_once("misc_reg::Foseg is non-zero.\n");
888 
889  fpu.last_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
890 
891  setFPUState(fpu);
892 }
893 
894 void
896 {
897  struct kvm_xsave kxsave;
898  FXSave &xsave(*(FXSave *)kxsave.region);
899 
900  // There is some padding and reserved fields in the structure, so
901  // we'd better zero the whole thing.
902  memset(&kxsave, 0, sizeof(kxsave));
903 
904  updateKvmStateFPUCommon(tc, xsave);
905 
907  warn_once("misc_reg::Fiseg is non-zero.\n");
908 
909  xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
910 
912  warn_once("misc_reg::Foseg is non-zero.\n");
913 
914  xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
915 
916  setXSave(kxsave);
917 }
918 
919 void
921 {
922  if (useXSave)
924  else
926 }
927 
928 void
930 {
931  KvmMSRVector msrs;
932 
933  const Kvm::MSRIndexVector &indices(getMsrIntersection());
934 
935  for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
936  struct kvm_msr_entry e;
937 
938  e.index = *it;
939  e.reserved = 0;
940  e.data = tc->readMiscReg(msrMap.at(*it));
941  DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
942  e.index, e.data);
943 
944  msrs.push_back(e);
945  }
946 
947  setMSRs(msrs);
948 }
949 
950 void
952 {
953  struct kvm_regs regs;
954  struct kvm_sregs sregs;
955 
956  getRegisters(regs);
957  getSpecialRegisters(sregs);
958 
959  DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
960  if (debug::KvmContext)
961  dump();
962 
963  updateThreadContextRegs(regs, sregs);
965  if (useXSave) {
966  struct kvm_xsave xsave;
967  getXSave(xsave);
968 
970  } else {
971  struct kvm_fpu fpu;
972  getFPUState(fpu);
973 
975  }
977 
978  // The M5 misc reg caches some values from other
979  // registers. Writing to it with side effects causes it to be
980  // updated from its source registers.
982 }
983 
984 void
985 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
986  const struct kvm_sregs &sregs)
987 {
988 #define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
989 
990  FOREACH_IREG();
991 
992 #undef APPLY_IREG
993 
994  tc->pcState(PCState(regs.rip + sregs.cs.base));
995 
996  // Flags are spread out across multiple semi-magic registers so we
997  // need some special care when updating them.
998  X86ISA::setRFlags(tc, regs.rflags);
999 }
1000 
1001 
1002 inline void
1003 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
1004  const int index)
1005 {
1006  SegAttr attr(0);
1007 
1008  attr.type = kvm_seg.type;
1009  attr.present = kvm_seg.present;
1010  attr.dpl = kvm_seg.dpl;
1011  attr.defaultSize = kvm_seg.db;
1012  attr.system = kvm_seg.s;
1013  attr.longMode = kvm_seg.l;
1014  attr.granularity = kvm_seg.g;
1015  attr.avl = kvm_seg.avl;
1016  attr.unusable = kvm_seg.unusable;
1017 
1018  // We need some setMiscReg magic here to keep the effective base
1019  // addresses in sync. We need an up-to-date version of EFER, so
1020  // make sure this is called after the sregs have been synced.
1021  tc->setMiscReg(misc_reg::segBase(index), kvm_seg.base);
1022  tc->setMiscReg(misc_reg::segLimit(index), kvm_seg.limit);
1023  tc->setMiscReg(misc_reg::segSel(index), kvm_seg.selector);
1025 }
1026 
1027 inline void
1028 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1029  const int index)
1030 {
1031  // We need some setMiscReg magic here to keep the effective base
1032  // addresses in sync. We need an up-to-date version of EFER, so
1033  // make sure this is called after the sregs have been synced.
1034  tc->setMiscReg(misc_reg::segBase(index), kvm_dtable.base);
1035  tc->setMiscReg(misc_reg::segLimit(index), kvm_dtable.limit);
1036 }
1037 
1038 void
1039 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1040 {
1041  assert(getKvmRunState()->apic_base == sregs.apic_base);
1042  assert(getKvmRunState()->cr8 == sregs.cr8);
1043 
1044 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1045 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1046 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1047  FOREACH_SREG();
1048  FOREACH_SEGMENT();
1049  FOREACH_DTABLE();
1050 #undef APPLY_SREG
1051 #undef APPLY_SEGMENT
1052 #undef APPLY_DTABLE
1053 }
1054 
1055 template<typename T>
1056 static void
1058 {
1059  const unsigned top((fpu.fsw >> 11) & 0x7);
1060 
1061  for (int i = 0; i < 8; ++i) {
1062  const unsigned reg_idx((i + top) & 0x7);
1063  const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1064  DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1065  reg_idx, i, value);
1066  tc->setReg(float_reg::fpr(reg_idx), floatToBits64(value));
1067  }
1068 
1069  // TODO: We should update the MMX state
1070 
1072  tc->setMiscRegNoEffect(misc_reg::Mxcsr, fpu.mxcsr);
1073  tc->setMiscRegNoEffect(misc_reg::Fcw, fpu.fcw);
1074  tc->setMiscRegNoEffect(misc_reg::Fsw, fpu.fsw);
1075 
1076  uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1077  // TODO: Are these registers really the same?
1080 
1081  tc->setMiscRegNoEffect(misc_reg::Fop, fpu.last_opcode);
1082 
1083  for (int i = 0; i < 16; ++i) {
1084  tc->setReg(float_reg::xmmLow(i), *(uint64_t *)&fpu.xmm[i][0]);
1085  tc->setReg(float_reg::xmmHigh(i), *(uint64_t *)&fpu.xmm[i][8]);
1086  }
1087 }
1088 
1089 void
1090 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1091 {
1093 
1095  tc->setMiscRegNoEffect(misc_reg::Fioff, fpu.last_ip);
1097  tc->setMiscRegNoEffect(misc_reg::Fooff, fpu.last_dp);
1098 }
1099 
1100 void
1101 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1102 {
1103  const FXSave &xsave(*(const FXSave *)kxsave.region);
1104 
1106 
1108  tc->setMiscRegNoEffect(misc_reg::Fioff, xsave.ctrl64.fpu_ip);
1110  tc->setMiscRegNoEffect(misc_reg::Fooff, xsave.ctrl64.fpu_dp);
1111 }
1112 
1113 void
1115 {
1116  const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1117 
1118  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1119  msrs.size());
1120  struct kvm_msr_entry *entry;
1121 
1122  // Create a list of MSRs to read
1123  kvm_msrs->nmsrs = msrs.size();
1124  entry = &kvm_msrs->entries[0];
1125  for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1126  entry->index = *it;
1127  entry->reserved = 0;
1128  entry->data = 0;
1129  }
1130 
1131  getMSRs(*kvm_msrs.get());
1132 
1133  // Update M5's state
1134  entry = &kvm_msrs->entries[0];
1135  for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1136  DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1137  entry->index, entry->data);
1138 
1139  tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1140  }
1141 }
1142 
1143 void
1145 {
1146  Fault fault;
1147 
1149 
1150  {
1151  // Migrate to the interrupt controller's thread to get the
1152  // interrupt. Even though the individual methods are safe to
1153  // call across threads, we might still lose interrupts unless
1154  // they are getInterrupt() and updateIntrInfo() are called
1155  // atomically.
1156  EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
1157  fault = interrupts[0]->getInterrupt();
1158  interrupts[0]->updateIntrInfo();
1159  }
1160 
1161  X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1162  if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1163  DPRINTF(KvmInt, "Delivering NMI\n");
1165  } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1166  DPRINTF(KvmInt, "INIT interrupt\n");
1167  fault.get()->invoke(tc);
1168  // Delay the kvm state update since we won't enter KVM on this
1169  // tick.
1170  threadContextDirty = true;
1171  // HACK: gem5 doesn't actually have any BIOS code, which means
1172  // that we need to halt the thread and wait for a startup
1173  // interrupt before restarting the thread. The simulated CPUs
1174  // use the same kind of hack using a microcode routine.
1175  thread->suspend();
1176  } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1177  DPRINTF(KvmInt, "STARTUP interrupt\n");
1178  fault.get()->invoke(tc);
1179  // The kvm state is assumed to have been updated when entering
1180  // kvmRun(), so we need to update manually it here.
1181  updateKvmState();
1182  } else if (x86int) {
1183  struct kvm_interrupt kvm_int;
1184  kvm_int.irq = x86int->getVector();
1185 
1186  DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1187  fault->name(), kvm_int.irq);
1188 
1189  kvmInterrupt(kvm_int);
1190  } else {
1191  panic("KVM: Unknown interrupt type\n");
1192  }
1193 
1194 }
1195 
1196 Tick
1198 {
1199  struct kvm_run &kvm_run(*getKvmRunState());
1200 
1201  auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1202 
1203  if (lapic->checkInterruptsRaw()) {
1204  if (lapic->hasPendingUnmaskable()) {
1205  DPRINTF(KvmInt,
1206  "Delivering unmaskable interrupt.\n");
1209  } else if (kvm_run.ready_for_interrupt_injection) {
1210  // KVM claims that it is ready for an interrupt. It might
1211  // be lying if we just updated rflags and disabled
1212  // interrupts (e.g., by doing a CPU handover). Let's sync
1213  // the thread context and check if there are /really/
1214  // interrupts that should be delivered now.
1216  if (lapic->checkInterrupts()) {
1217  DPRINTF(KvmInt,
1218  "M5 has pending interrupts, delivering interrupt.\n");
1219 
1221  } else {
1222  DPRINTF(KvmInt,
1223  "Interrupt delivery delayed due to KVM confusion.\n");
1224  kvm_run.request_interrupt_window = 1;
1225  }
1226  } else if (!kvm_run.request_interrupt_window) {
1227  DPRINTF(KvmInt,
1228  "M5 has pending interrupts, requesting interrupt "
1229  "window.\n");
1230  kvm_run.request_interrupt_window = 1;
1231  }
1232  } else {
1233  kvm_run.request_interrupt_window = 0;
1234  }
1235 
1236  // The CPU might have been suspended as a result of the INIT
1237  // interrupt delivery hack. In that case, don't enter into KVM.
1238  if (_status == Idle)
1239  return 0;
1240  else
1241  return BaseKvmCPU::kvmRun(ticks);
1242 }
1243 
1244 Tick
1246 {
1247  struct kvm_run &kvm_run(*getKvmRunState());
1248 
1249  if (!archIsDrained()) {
1250  DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1251 
1252  // Tell KVM to find a suitable place to deliver interrupts. This
1253  // should ensure that pending interrupts have been delivered and
1254  // things are reasonably consistent (i.e., no interrupts pending
1255  // in the guest).
1256  kvm_run.request_interrupt_window = 1;
1257 
1258  // Limit the run to 1 millisecond. That is hopefully enough to
1259  // reach an interrupt window. Otherwise, we'll just try again
1260  // later.
1262  } else {
1263  DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1264 
1265  return BaseKvmCPU::kvmRun(0);
1266  }
1267 }
1268 
1269 uint64_t
1271 {
1272  return getMSR(MSR_TSC);
1273 }
1274 
1275 void
1277 {
1278  struct kvm_run &kvm_run(*getKvmRunState());
1279  const uint16_t port(kvm_run.io.port);
1280 
1281  assert(kvm_run.exit_reason == KVM_EXIT_IO);
1282 
1283  if (kvm_run.io.size != 4) {
1284  panic("Unexpected IO size (%u) for address 0x%x.\n",
1285  kvm_run.io.size, port);
1286  }
1287 
1288  if (kvm_run.io.count != 1) {
1289  panic("Unexpected IO count (%u) for address 0x%x.\n",
1290  kvm_run.io.count, port);
1291  }
1292 
1293  uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1294  if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1295  tc->setMiscReg(miscreg, *data);
1296  else
1297  *data = tc->readMiscRegNoEffect(miscreg);
1298 }
1299 
1300 Tick
1302 {
1303  struct kvm_run &kvm_run(*getKvmRunState());
1304  bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1305  unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1306  Tick delay(0);
1307  uint16_t port(kvm_run.io.port);
1308  Addr pAddr;
1309  const int count(kvm_run.io.count);
1310 
1311  assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1312  kvm_run.io.direction == KVM_EXIT_IO_OUT);
1313 
1314  DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1315  (isWrite ? "out" : "in"), kvm_run.io.port);
1316 
1317  /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1318  * don't use the TLB component, we need to intercept and handle
1319  * the PCI configuration space IO ports here.
1320  *
1321  * The IO port PCI discovery mechanism uses one address register
1322  * and one data register. We map the address register to a misc
1323  * reg and use that to re-route data register accesses to the
1324  * right location in the PCI configuration space.
1325  */
1326  if (port == IO_PCI_CONF_ADDR) {
1328  return 0;
1329  } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1330  Addr pciConfigAddr(tc->readMiscRegNoEffect(
1332  if (pciConfigAddr & 0x80000000) {
1333  pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1334  (port & 0x3));
1335  } else {
1336  pAddr = X86ISA::x86IOAddress(port);
1337  }
1338  } else {
1339  pAddr = X86ISA::x86IOAddress(port);
1340  }
1341 
1342  const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1343  // Temporarily lock and migrate to the device event queue to
1344  // prevent races in multi-core mode.
1346  for (int i = 0; i < count; ++i) {
1347  RequestPtr io_req = std::make_shared<Request>(
1348  pAddr, kvm_run.io.size,
1349  Request::UNCACHEABLE, dataRequestorId());
1350 
1351  io_req->setContext(tc->contextId());
1352 
1353  PacketPtr pkt = new Packet(io_req, cmd);
1354 
1355  pkt->dataStatic(guestData);
1356  delay += dataPort.submitIO(pkt);
1357 
1358  guestData += kvm_run.io.size;
1359  }
1360 
1361  return delay;
1362 }
1363 
1364 Tick
1366 {
1367  // We don't need to do anything here since this is caught the next
1368  // time we execute kvmRun(). We still overload the exit event to
1369  // silence the warning about an unhandled exit event.
1370  return 0;
1371 }
1372 
1373 bool
1375 {
1376  struct kvm_vcpu_events events;
1377 
1378  getVCpuEvents(events);
1379 
1380  // We could probably handle this in a by re-inserting interrupts
1381  // that are pending into gem5 on a drain. However, that would
1382  // probably be tricky to do reliably, so we'll just prevent a
1383  // drain if there is anything pending in the
1384  // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1385  // executed in the guest by requesting an interrupt window if
1386  // there are pending interrupts.
1387  const bool pending_events(events.exception.injected ||
1388  events.interrupt.injected ||
1389  events.nmi.injected || events.nmi.pending);
1390 
1391  if (pending_events) {
1392  DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1393  events.exception.injected ? "exception" : "",
1394  events.interrupt.injected ? "interrupt" : "",
1395  events.nmi.injected ? "nmi[i]" : "",
1396  events.nmi.pending ? "nmi[p]" : "");
1397  }
1398 
1399  return !pending_events;
1400 }
1401 
1402 void
1404 {
1405  struct kvm_run &kvm_run(*getKvmRunState());
1406 
1407  // Synchronize the APIC base and CR8 here since they are present
1408  // in the kvm_run struct, which makes the synchronization really
1409  // cheap.
1410  kvm_run.apic_base = tc->readMiscReg(misc_reg::ApicBase);
1411  kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1412 
1414 
1415  tc->setMiscReg(misc_reg::ApicBase, kvm_run.apic_base);
1416  kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1417 }
1418 
1419 static struct kvm_cpuid_entry2
1420 makeKvmCpuid(uint32_t function, uint32_t index,
1421  CpuidResult &result)
1422 {
1423  struct kvm_cpuid_entry2 e;
1424  e.function = function;
1425  e.index = index;
1426  e.flags = 0;
1427  e.eax = (uint32_t)result.rax;
1428  e.ebx = (uint32_t)result.rbx;
1429  e.ecx = (uint32_t)result.rcx;
1430  e.edx = (uint32_t)result.rdx;
1431 
1432  return e;
1433 }
1434 
1435 void
1437 {
1438  Kvm::CPUIDVector m5_supported;
1439 
1440  /* TODO: We currently don't support any of the functions that
1441  * iterate through data structures in the CPU using an index. It's
1442  * currently not a problem since M5 doesn't expose any of them at
1443  * the moment.
1444  */
1445 
1446  /* Basic features */
1447  CpuidResult func0;
1448  X86ISA::doCpuid(tc, 0x0, 0, func0);
1449  for (uint32_t function = 0; function <= func0.rax; ++function) {
1451  uint32_t idx(0);
1452 
1453  X86ISA::doCpuid(tc, function, idx, cpuid);
1454  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1455  }
1456 
1457  /* Extended features */
1458  CpuidResult efunc0;
1459  X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1460  for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1462  uint32_t idx(0);
1463 
1464  X86ISA::doCpuid(tc, function, idx, cpuid);
1465  m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1466  }
1467 
1468  setCPUID(m5_supported);
1469 }
1470 
1471 void
1472 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1473 {
1474  if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1475  panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1476  errno);
1477 }
1478 
1479 void
1480 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1481 {
1482  auto kvm_cpuid = newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
1483  cpuid.size());
1484 
1485  kvm_cpuid->nent = cpuid.size();
1486  std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1487 
1488  setCPUID(*kvm_cpuid);
1489 }
1490 
1491 void
1492 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1493 {
1494  if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1495  panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1496  errno);
1497 }
1498 
1499 void
1501 {
1502  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1503  msrs.size());
1504 
1505  kvm_msrs->nmsrs = msrs.size();
1506  std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1507 
1508  setMSRs(*kvm_msrs);
1509 }
1510 
1511 void
1512 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1513 {
1514  if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1515  panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1516  errno);
1517 }
1518 
1519 
1520 void
1521 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1522 {
1523  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1524  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1525 
1526  kvm_msrs->nmsrs = 1;
1527  entry.index = index;
1528  entry.reserved = 0;
1529  entry.data = value;
1530 
1531  setMSRs(*kvm_msrs.get());
1532 }
1533 
1534 uint64_t
1535 X86KvmCPU::getMSR(uint32_t index) const
1536 {
1537  auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1538  struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1539 
1540  kvm_msrs->nmsrs = 1;
1541  entry.index = index;
1542  entry.reserved = 0;
1543  entry.data = 0;
1544 
1545  getMSRs(*kvm_msrs.get());
1546  return entry.data;
1547 }
1548 
1549 const Kvm::MSRIndexVector &
1551 {
1552  if (cachedMsrIntersection.empty()) {
1553  const Kvm::MSRIndexVector &kvm_msrs = vm->kvm->getSupportedMSRs();
1554 
1555  DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1556  for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1557  if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1558  cachedMsrIntersection.push_back(*it);
1559  DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1560  } else {
1561  warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1562  *it);
1563  }
1564  }
1565  }
1566 
1567  return cachedMsrIntersection;
1568 }
1569 
1570 void
1571 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1572 {
1573 #ifdef KVM_GET_DEBUGREGS
1574  if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1575  panic("KVM: Failed to get guest debug registers\n");
1576 #else
1577  panic("KVM: Unsupported getDebugRegisters call.\n");
1578 #endif
1579 }
1580 
1581 void
1582 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1583 {
1584 #ifdef KVM_SET_DEBUGREGS
1585  if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1586  panic("KVM: Failed to set guest debug registers\n");
1587 #else
1588  panic("KVM: Unsupported setDebugRegisters call.\n");
1589 #endif
1590 }
1591 
1592 void
1593 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1594 {
1595  if (ioctl(KVM_GET_XCRS, &regs) == -1)
1596  panic("KVM: Failed to get guest debug registers\n");
1597 }
1598 
1599 void
1600 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1601 {
1602  if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1603  panic("KVM: Failed to set guest debug registers\n");
1604 }
1605 
1606 void
1607 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1608 {
1609  if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1610  panic("KVM: Failed to get guest debug registers\n");
1611 }
1612 
1613 void
1614 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1615 {
1616  if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1617  panic("KVM: Failed to set guest debug registers\n");
1618 }
1619 
1620 
1621 void
1622 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1623 {
1624  if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1625  panic("KVM: Failed to get guest debug registers\n");
1626 }
1627 
1628 void
1629 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1630 {
1631  if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1632  panic("KVM: Failed to set guest debug registers\n");
1633 }
1634 
1635 } // namespace gem5
gem5::dumpKvm
static void dumpKvm(const struct kvm_regs &regs)
Definition: x86_cpu.cc:181
gem5::X86KvmCPU::dumpDebugRegs
void dumpDebugRegs() const
Definition: x86_cpu.cc:628
gem5::FXSave::fpu_cs
uint16_t fpu_cs
Definition: x86_cpu.cc:87
gem5::FXSave::last_opcode
uint16_t last_opcode
Definition: x86_cpu.cc:81
gem5::BaseKvmCPU::_status
Status _status
CPU run state.
Definition: base.hh:240
gem5::X86ISA::loadFloat80
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition: utility.cc:156
gem5::Kvm::capSetTSSAddress
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition: vm.cc:114
gem5::PCStateBase::instAddr
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
gem5::X86ISA::misc_reg::Ftag
@ Ftag
Definition: misc.hh:392
gem5::X86KvmCPU::setCPUID
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition: x86_cpu.cc:1472
gem5::X86KvmCPU::updateThreadContextRegs
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition: x86_cpu.cc:985
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::Kvm::capUserNMI
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition: vm.cc:126
gem5::FXSave::fcw
uint16_t fcw
Definition: x86_cpu.cc:77
gem5::updateKvmStateFPUCommon
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition: x86_cpu.cc:837
warn
#define warn(...)
Definition: logging.hh:246
gem5::X86KvmCPU::dumpSpecRegs
void dumpSpecRegs() const
Definition: x86_cpu.cc:620
gem5::X86KvmCPU::startup
void startup() override
Definition: x86_cpu.cc:575
gem5::X86ISA::misc_reg::Ftw
@ Ftw
Definition: misc.hh:391
gem5::X86KvmCPU::dumpVCpuEvents
void dumpVCpuEvents() const
Definition: x86_cpu.cc:666
gem5::X86ISA::misc_reg::Fioff
@ Fioff
Definition: misc.hh:394
gem5::GEM5_PACKED
PM4 packets.
Definition: pm4_defines.hh:77
gem5::X86ISA::msrMap
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
gem5::X86ISA::misc_reg::PciConfigAddress
@ PciConfigAddress
Definition: misc.hh:404
x86_cpu.hh
gem5::forceSegAccessed
static void forceSegAccessed(struct kvm_segment &seg)
Definition: x86_cpu.cc:756
gem5::X86KvmCPU::haveXSave
bool haveXSave
Kvm::capXSave() available?
Definition: x86_cpu.hh:261
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::MipsISA::cpuid
Bitfield< 28, 21 > cpuid
Definition: dt_constants.hh:95
gem5::X86ISA::ZEBit
@ ZEBit
Definition: misc.hh:91
gem5::X86KvmCPU::dumpXCRs
void dumpXCRs() const
Definition: x86_cpu.cc:642
gem5::X86ISA::e
Bitfield< 11 > e
Definition: misc.hh:753
gem5::X86KvmCPU::~X86KvmCPU
virtual ~X86KvmCPU()
Definition: x86_cpu.cc:570
gem5::X86KvmCPU::dumpIntRegs
void dumpIntRegs() const
Definition: x86_cpu.cc:612
gem5::FXSave::pad1
uint16_t pad1
Definition: x86_cpu.cc:88
gem5::X86ISA::IEBit
@ IEBit
Definition: misc.hh:89
gem5::BaseKvmCPU::ioctl
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition: base.cc:1158
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::ThreadContext::getReg
virtual RegVal getReg(const RegId &reg) const
Definition: thread_context.cc:171
gem5::X86ISA::CC2Bit
@ CC2Bit
Definition: misc.hh:101
gem5::X86KvmCPU::handleIOMiscReg32
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition: x86_cpu.cc:1276
gem5::X86ISA::ErrSummaryBit
@ ErrSummaryBit
Definition: misc.hh:98
gem5::X86KvmCPU::getDebugRegisters
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition: x86_cpu.cc:1571
gem5::BaseKvmCPU::deviceEventQueue
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition: base.hh:447
gem5::BaseKvmCPU::kvmInterrupt
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition: base.cc:804
gem5::BaseKvmCPU::syncThreadContext
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition: base.cc:936
gem5::X86KvmCPU::updateKvmStateFPU
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition: x86_cpu.cc:920
gem5::X86ISA::float_reg::xmm
static RegId xmm(int index)
Definition: float.hh:133
gem5::X86KvmCPU::updateKvmStateRegs
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition: x86_cpu.cc:706
gem5::X86ISA::misc_reg::M5Reg
@ M5Reg
Definition: misc.hh:146
gem5::ArmISA::attr
attr
Definition: misc_types.hh:656
gem5::X86KvmCPU::setMSRs
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition: x86_cpu.cc:1492
gem5::X86KvmCPU::setDebugRegisters
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition: x86_cpu.cc:1582
gem5::ThreadContext::pcState
virtual const PCStateBase & pcState() const =0
gem5::X86ISA::misc_reg::Ss
@ Ss
Definition: misc.hh:307
gem5::X86ISA::CC0Bit
@ CC0Bit
Definition: misc.hh:99
gem5::bitsToFloat64
static double bitsToFloat64(uint64_t val)
Definition: types.hh:218
gem5::X86KvmCPU::updateThreadContextXSave
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:1101
gem5::X86ISA::misc_reg::CsBase
@ CsBase
Definition: misc.hh:322
gem5::X86ISA::InitInterrupt
Definition: faults.hh:349
gem5::X86ISA::x86PciConfigAddress
static Addr x86PciConfigAddress(const uint32_t addr)
Definition: x86_traits.hh:85
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:65
interrupts.hh
gem5::X86ISA::misc_reg::Tr
@ Tr
Definition: misc.hh:316
gem5::BaseKvmCPU::getKvmRunState
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition: base.hh:317
gem5::FXSave::fpu_ip
uint32_t fpu_ip
Definition: x86_cpu.cc:86
gem5::X86ISA::misc_reg::ApicBase
@ ApicBase
Definition: misc.hh:401
gem5::X86KvmCPU::updateCPUID
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition: x86_cpu.cc:1436
top
Definition: test.h:61
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::X86ISA::misc_reg::segAttr
static RegIndex segAttr(int index)
Definition: misc.hh:531
gem5::X86ISA::getRFlags
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition: utility.cc:58
gem5::Kvm::capExtendedCPUID
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition: vm.cc:120
std::vector
STL vector class.
Definition: stl.hh:37
IO_PCI_CONF_ADDR
#define IO_PCI_CONF_ADDR
Definition: x86_cpu.cc:58
gem5::X86KvmCPU::updateThreadContextFPU
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:1090
gem5::X86ISA::misc_reg::Fcw
@ Fcw
Definition: misc.hh:389
gem5::X86ISA::UEBit
@ UEBit
Definition: misc.hh:93
gem5::X86KvmCPU::dumpXSave
void dumpXSave() const
Definition: x86_cpu.cc:654
gem5::X86KvmCPU::cachedMsrIntersection
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition: x86_cpu.hh:255
gem5::BaseKvmCPU::setFPUState
void setFPUState(const struct kvm_fpu &state)
Definition: base.cc:847
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
faults.hh
gem5::RiscvISA::xs
Bitfield< 16, 15 > xs
Definition: misc.hh:557
gem5::Kvm::capXCRs
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition: vm.cc:188
gem5::X86ISA::storeFloat80
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition: utility.cc:165
gem5::Kvm::capXSave
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition: vm.cc:198
FOREACH_IREG
#define FOREACH_IREG()
Definition: x86_cpu.cc:111
hack
#define hack(...)
Definition: logging.hh:248
gem5::X86ISA::BusyBit
@ BusyBit
Definition: misc.hh:103
gem5::X86KvmCPU::setMSR
void setMSR(uint32_t index, uint64_t value)
Definition: x86_cpu.cc:1521
gem5::BaseKvmCPU::getFPUState
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition: base.cc:840
gem5::X86KvmCPU::kvmRunDrain
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition: x86_cpu.cc:1245
gem5::X86KvmCPU::ioctlRun
void ioctlRun() override
Override for synchronizing state in kvm_run.
Definition: x86_cpu.cc:1403
gem5::BaseKvmCPU::kvmNonMaskableInterrupt
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition: base.cc:796
gem5::X86ISA::DEBit
@ DEBit
Definition: misc.hh:90
gem5::FXSave::mxcsr_mask
uint32_t mxcsr_mask
Definition: x86_cpu.cc:101
FOREACH_DTABLE
#define FOREACH_DTABLE()
Definition: x86_cpu.cc:164
gem5::X86ISA::OEBit
@ OEBit
Definition: misc.hh:92
gem5::ArmISA::at
Bitfield< 35, 32 > at
Definition: misc_types.hh:155
gem5::X86ISA::misc_reg::segSel
static RegIndex segSel(int index)
Definition: misc.hh:503
gem5::MemCmd
Definition: packet.hh:75
gem5::X86KvmCPU::archIsDrained
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition: x86_cpu.cc:1374
gem5::X86ISA::CpuidResult
Definition: cpuid.hh:41
gem5::X86ISA::misc_reg::segBase
static RegIndex segBase(int index)
Definition: misc.hh:510
gem5::FXSave::fpu_ip
uint64_t fpu_ip
Definition: x86_cpu.cc:96
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1147
reserved
reserved
Definition: pcireg.h:54
gem5::X86KvmCPU::getMsrIntersection
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition: x86_cpu.cc:1550
FOREACH_DREG
#define FOREACH_DREG()
Definition: x86_cpu.cc:142
gem5::X86ISA::CC3Bit
@ CC3Bit
Definition: misc.hh:102
gem5::BaseKvmCPU::thread
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition: base.hh:153
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::FXSave::fpu_dp
uint64_t fpu_dp
Definition: x86_cpu.cc:97
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::X86KvmCPU::haveDebugRegs
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition: x86_cpu.hh:259
gem5::BaseKvmCPU::startup
void startup() override
Definition: base.cc:118
gem5::BaseKvmCPU::ioctlRun
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition: base.cc:1324
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::Kvm::capVCPUEvents
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition: vm.cc:168
gem5::X86KvmCPU::getMSR
uint64_t getMSR(uint32_t index) const
Definition: x86_cpu.cc:1535
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
gem5::X86ISA::misc_reg::Foseg
@ Foseg
Definition: misc.hh:395
gem5::X86KvmCPU::updateKvmStateSRegs
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:768
MSR_TSC
#define MSR_TSC
Definition: x86_cpu.cc:56
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
int.hh
gem5::X86ISA::count
count
Definition: misc.hh:703
SEG_TYPE_BIT_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
Definition: x86_cpu.cc:73
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::BaseKvmCPU
Base class for KVM based CPU models.
Definition: base.hh:87
gem5::BaseKvmCPU::tc
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition: base.hh:158
gem5::FXSave::pad0
uint8_t pad0
Definition: x86_cpu.cc:80
msr.hh
gem5::MipsISA::PCState
GenericISA::DelaySlotPCState< 4 > PCState
Definition: pcstate.hh:40
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::X86ISA::float_reg::xmmLow
static RegId xmmLow(int index)
Definition: float.hh:139
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::X86ISA::misc_reg::Mxcsr
@ Mxcsr
Definition: misc.hh:388
gem5::X86ISA::StackFaultBit
@ StackFaultBit
Definition: misc.hh:97
gem5::X86ISA::misc_reg::Fiseg
@ Fiseg
Definition: misc.hh:393
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::setKvmSegmentReg
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:728
gem5::X86ISA::X86FaultBase::getVector
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition: faults.hh:85
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::X86ISA::float_reg::xmmHigh
static RegId xmmHigh(int index)
Definition: float.hh:145
gem5::X86ISA::float_reg::fpr
static RegId fpr(int index)
Definition: float.hh:127
SEG_SYS_TYPE_TSS_BUSY
#define SEG_SYS_TYPE_TSS_BUSY
Definition: x86_cpu.cc:64
gem5::FXSave::pad2
uint16_t pad2
Definition: x86_cpu.cc:91
gem5::dumpFpuCommon
static void dumpFpuCommon(const T &fpu)
Definition: x86_cpu.cc:273
gem5::FXSave::mxcsr
uint32_t mxcsr
Definition: x86_cpu.cc:100
gem5::X86ISA::misc_reg::Fop
@ Fop
Definition: misc.hh:397
gem5::BaseKvmCPU::getSpecialRegisters
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition: base.cc:826
gem5::EventQueue::ScopedMigration
Definition: eventq.hh:672
utility.hh
gem5::X86ISA::Interrupts
Definition: interrupts.hh:78
gem5::X86ISA::misc_reg::Fsw
@ Fsw
Definition: misc.hh:390
compiler.hh
gem5::X86KvmCPU::updateKvmStateMSRs
void updateKvmStateMSRs()
Update MSR registers.
Definition: x86_cpu.cc:929
gem5::X86KvmCPU::setXCRs
void setXCRs(const struct kvm_xcrs &regs)
Definition: x86_cpu.cc:1600
gem5::makeKvmCpuid
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition: x86_cpu.cc:1420
gem5::setContextSegment
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition: x86_cpu.cc:1003
gem5::updateThreadContextFPUCommon
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition: x86_cpu.cc:1057
gem5::X86ISA::misc_reg::Ds
@ Ds
Definition: misc.hh:308
gem5::ThreadContext::readMiscRegNoEffect
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
gem5::X86KvmCPU::getMSRs
void getMSRs(struct kvm_msrs &msrs) const
Definition: x86_cpu.cc:1512
IO_PCI_CONF_DATA_BASE
#define IO_PCI_CONF_DATA_BASE
Definition: x86_cpu.cc:59
gem5::X86ISA::convX87XTagsToTags
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition: utility.cc:115
gem5::sim_clock::as_float::ms
double ms
millisecond
Definition: core.cc:54
gem5::X86KvmCPU::dumpFpuRegs
void dumpFpuRegs() const
Definition: x86_cpu.cc:604
gem5::X86ISA::misc_reg::Tsl
@ Tsl
Definition: misc.hh:312
SEG_CS_TYPE_ACCESSED
#define SEG_CS_TYPE_ACCESSED
Definition: x86_cpu.cc:67
gem5::X86KvmCPU::getHostCycles
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition: x86_cpu.cc:1270
SEG_CS_TYPE_READ_ACCESSED
#define SEG_CS_TYPE_READ_ACCESSED
Definition: x86_cpu.cc:69
gem5::auxv::Entry
@ Entry
Definition: aux_vector.hh:76
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::X86KvmCPU::handleKvmExitIO
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition: x86_cpu.cc:1301
gem5::checkSeg
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition: x86_cpu.cc:394
gem5::X86ISA::misc_reg::Cs
@ Cs
Definition: misc.hh:306
gem5::X86KvmCPU::updateThreadContextMSRs
void updateThreadContextMSRs()
Update MSR registers.
Definition: x86_cpu.cc:1114
gem5::X86ISA::X86Interrupt
Definition: faults.hh:118
gem5::BaseKvmCPU::Idle
@ Idle
Context not scheduled in KVM.
Definition: base.hh:199
name
const std::string & name()
Definition: trace.cc:49
base.hh
gem5::X86ISA::setRFlags
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition: utility.cc:74
gem5::BaseKvmCPU::setRegisters
void setRegisters(const struct kvm_regs &regs)
Definition: base.cc:819
gem5::KvmVM::kvm
Kvm * kvm
Global KVM interface.
Definition: vm.hh:421
gem5::FXSave::ctrl64
struct gem5::FXSave::@20::@23 ctrl64
gem5::X86ISA::doCpuid
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition: cpuid.cc:91
gem5::X86ISA::convX87TagsToXTags
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition: utility.cc:90
gem5::X86KvmCPU::updateKvmState
void updateKvmState() override
Update the KVM state from the current thread context.
Definition: x86_cpu.cc:693
gem5::X86KvmCPU::getVCpuEvents
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition: x86_cpu.cc:1622
gem5::X86KvmCPU::getXSave
void getXSave(struct kvm_xsave &xsave) const
Definition: x86_cpu.cc:1607
gem5::X86KvmCPU::deliverInterrupts
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition: x86_cpu.cc:1144
gem5::X86KvmCPU::dump
void dump() const override
Dump the internal state to the terminal.
Definition: x86_cpu.cc:589
gem5::X86KvmCPU::haveXCRs
bool haveXCRs
Kvm::capXCRs() available?
Definition: x86_cpu.hh:268
gem5::FXSave::fpu_dp
uint32_t fpu_dp
Definition: x86_cpu.cc:89
gem5::X86ISA::StartupInterrupt
Definition: faults.hh:360
gem5::X86KvmCPU::updateKvmStateFPUXSave
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition: x86_cpu.cc:895
inform
#define inform(...)
Definition: logging.hh:247
gem5::X86ISA::misc_reg::Gs
@ Gs
Definition: misc.hh:310
FOREACH_SEGMENT
#define FOREACH_SEGMENT()
Definition: x86_cpu.cc:152
gem5::Kvm::capDebugRegs
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition: vm.cc:178
gem5::SimpleThread::suspend
void suspend() override
Set the status to Suspended.
Definition: simple_thread.cc:144
gem5::floatToBits64
static uint64_t floatToBits64(double val)
Definition: types.hh:191
gem5::X86ISA::misc_reg::Fooff
@ Fooff
Definition: misc.hh:396
gem5::X86ISA::misc_reg::X87Top
@ X87Top
Definition: misc.hh:386
gem5::ThreadContext::setMiscReg
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
gem5::dumpFpuSpec
static void dumpFpuSpec(const struct FXSave &xs)
Definition: x86_cpu.cc:257
gem5::X86ISA::seg
Bitfield< 2, 0 > seg
Definition: types.hh:87
gem5::X86KvmCPU::updateKvmStateFPULegacy
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition: x86_cpu.cc:871
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::FXSave::ftwx
uint8_t ftwx
Definition: x86_cpu.cc:79
gem5::BaseKvmCPU::KVMCpuPort::submitIO
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition: base.cc:172
gem5::X86KvmCPU::X86KvmCPU
X86KvmCPU(const X86KvmCPUParams &params)
Definition: x86_cpu.cc:536
gem5::newVarStruct
static auto newVarStruct(size_t entries)
Definition: x86_cpu.cc:172
cpuid.hh
gem5::Kvm
KVM parent interface.
Definition: vm.hh:80
gem5::X86KvmCPU::kvmRun
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition: x86_cpu.cc:1197
gem5::FXSave::fsw
uint16_t fsw
Definition: x86_cpu.cc:78
gem5::MipsISA::NonMaskableInterrupt
Definition: faults.hh:149
gem5::X86KvmCPU::handleKvmExitIRQWindowOpen
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition: x86_cpu.cc:1365
gem5::X86ISA::p
Bitfield< 0 > p
Definition: pagetable.hh:151
gem5::X86ISA::misc_reg::Rflags
@ Rflags
Definition: misc.hh:143
gem5::X86KvmCPU::setXSave
void setXSave(const struct kvm_xsave &xsave)
Definition: x86_cpu.cc:1614
gem5::BaseKvmCPU::kvmRun
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition: base.cc:699
gem5::BaseKvmCPU::threadContextDirty
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition: base.hh:648
gem5::X86KvmCPU::getXCRs
void getXCRs(struct kvm_xcrs &regs) const
Definition: x86_cpu.cc:1593
gem5::X86ISA::CpuidResult::rax
uint64_t rax
Definition: cpuid.hh:43
gem5::BaseKvmCPU::getRegisters
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition: base.cc:812
gem5::X86KvmCPU::updateThreadContextSRegs
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition: x86_cpu.cc:1039
gem5::X86ISA::misc_reg::Es
@ Es
Definition: misc.hh:305
gem5::BaseKvmCPU::vm
KvmVM * vm
Definition: base.hh:160
gem5::FXSave::fpu_ds
uint16_t fpu_ds
Definition: x86_cpu.cc:90
gem5::X86ISA::misc_reg::Cr8
@ Cr8
Definition: misc.hh:122
gem5::BaseKvmCPU::init
void init() override
Definition: base.cc:109
gem5::isCanonicalAddress
static bool isCanonicalAddress(uint64_t addr)
Definition: x86_cpu.cc:383
gem5::BaseKvmCPU::dataPort
KVMCpuPort dataPort
Port for data requests.
Definition: base.hh:633
gem5::X86KvmCPU::init
void init() override
Definition: x86_cpu.cc:542
gem5::BaseKvmCPU::getGuestData
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition: base.hh:327
SEG_SYS_TYPE_TSS_AVAILABLE
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition: x86_cpu.cc:62
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::X86KvmCPU::updateThreadContext
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition: x86_cpu.cc:951
gem5::X86ISA::CC1Bit
@ CC1Bit
Definition: misc.hh:100
gem5::FXSave
Definition: x86_cpu.cc:75
gem5::X86ISA::x86IOAddress
static Addr x86IOAddress(const uint32_t port)
Definition: x86_traits.hh:79
gem5::X86KvmCPU::dumpMSRs
void dumpMSRs() const
Definition: x86_cpu.cc:674
FOREACH_SREG
#define FOREACH_SREG()
Definition: x86_cpu.cc:131
gem5::X86ISA::misc_reg::segLimit
static RegIndex segLimit(int index)
Definition: misc.hh:524
gem5::BaseKvmCPU::setSpecialRegisters
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition: base.cc:833
gem5::X86ISA::misc_reg::Fs
@ Fs
Definition: misc.hh:309
gem5::X86KvmCPU::useXSave
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition: x86_cpu.hh:266
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::X86ISA::PEBit
@ PEBit
Definition: misc.hh:94
gem5::setKvmDTableReg
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition: x86_cpu.cc:748
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::X86KvmCPU::setVCpuEvents
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition: x86_cpu.cc:1629
gem5::ThreadContext::setMiscRegNoEffect
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
gem5::ThreadContext::setReg
virtual void setReg(const RegId &reg, RegVal val)
Definition: thread_context.cc:183

Generated on Wed Jul 13 2022 10:39:13 for gem5 by doxygen 1.8.17