Go to the documentation of this file.
31 #include <linux/kvm.h>
37 #include "arch/registers.hh"
44 #include "debug/Drain.hh"
45 #include "debug/Kvm.hh"
46 #include "debug/KvmContext.hh"
47 #include "debug/KvmIO.hh"
48 #include "debug/KvmInt.hh"
54 #define IO_PCI_CONF_ADDR 0xCF8
55 #define IO_PCI_CONF_DATA_BASE 0xCFC
58 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
60 #define SEG_SYS_TYPE_TSS_BUSY 11
63 #define SEG_CS_TYPE_ACCESSED 9
65 #define SEG_CS_TYPE_READ_ACCESSED 11
69 #define SEG_TYPE_BIT_ACCESSED 1
99 uint64_t reserved[12];
102 static_assert(
sizeof(
FXSave) == 512,
"Unexpected size of FXSave");
104 #define FOREACH_IREG() \
106 APPLY_IREG(rax, INTREG_RAX); \
107 APPLY_IREG(rbx, INTREG_RBX); \
108 APPLY_IREG(rcx, INTREG_RCX); \
109 APPLY_IREG(rdx, INTREG_RDX); \
110 APPLY_IREG(rsi, INTREG_RSI); \
111 APPLY_IREG(rdi, INTREG_RDI); \
112 APPLY_IREG(rsp, INTREG_RSP); \
113 APPLY_IREG(rbp, INTREG_RBP); \
114 APPLY_IREG(r8, INTREG_R8); \
115 APPLY_IREG(r9, INTREG_R9); \
116 APPLY_IREG(r10, INTREG_R10); \
117 APPLY_IREG(r11, INTREG_R11); \
118 APPLY_IREG(r12, INTREG_R12); \
119 APPLY_IREG(r13, INTREG_R13); \
120 APPLY_IREG(r14, INTREG_R14); \
121 APPLY_IREG(r15, INTREG_R15); \
124 #define FOREACH_SREG() \
126 APPLY_SREG(cr0, MISCREG_CR0); \
127 APPLY_SREG(cr2, MISCREG_CR2); \
128 APPLY_SREG(cr3, MISCREG_CR3); \
129 APPLY_SREG(cr4, MISCREG_CR4); \
130 APPLY_SREG(cr8, MISCREG_CR8); \
131 APPLY_SREG(efer, MISCREG_EFER); \
132 APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
135 #define FOREACH_DREG() \
137 APPLY_DREG(db[0], MISCREG_DR0); \
138 APPLY_DREG(db[1], MISCREG_DR1); \
139 APPLY_DREG(db[2], MISCREG_DR2); \
140 APPLY_DREG(db[3], MISCREG_DR3); \
141 APPLY_DREG(dr6, MISCREG_DR6); \
142 APPLY_DREG(dr7, MISCREG_DR7); \
145 #define FOREACH_SEGMENT() \
147 APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
148 APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
149 APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
150 APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
151 APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
152 APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
153 APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
154 APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
157 #define FOREACH_DTABLE() \
159 APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
160 APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
163 template<
typename STRUCT,
typename ENTRY>
166 return (STRUCT *)
operator new(
sizeof(STRUCT) + entries *
sizeof(ENTRY));
172 inform(
"KVM register state:\n");
174 #define APPLY_IREG(kreg, mreg) \
175 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
181 inform(
"\trip: 0x%llx\n", regs.rip);
182 inform(
"\trflags: 0x%llx\n", regs.rflags);
188 inform(
"\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
189 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
196 dumpKvm(
const char *reg_name,
const struct kvm_dtable &dtable)
198 inform(
"\t%s: @0x%llx+%x\n",
199 reg_name, dtable.base, dtable.limit);
205 #define APPLY_SREG(kreg, mreg) \
206 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
207 #define APPLY_SEGMENT(kreg, idx) \
208 dumpKvm(# kreg, sregs.kreg);
209 #define APPLY_DTABLE(kreg, idx) \
210 dumpKvm(# kreg, sregs.kreg);
212 inform(
"Special registers:\n");
217 inform(
"Interrupt Bitmap:");
218 for (
int i = 0;
i < KVM_NR_INTERRUPTS;
i += 64)
219 inform(
" 0x%.8x", sregs.interrupt_bitmap[
i / 64]);
226 #ifdef KVM_GET_DEBUGREGS
228 dumpKvm(
const struct kvm_debugregs ®s)
230 inform(
"KVM debug state:\n");
232 #define APPLY_DREG(kreg, mreg) \
233 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
239 inform(
"\tflags: 0x%llx\n", regs.flags);
246 inform(
"\tlast_ip: 0x%x\n",
xs.ctrl64.fpu_ip);
247 inform(
"\tlast_dp: 0x%x\n",
xs.ctrl64.fpu_dp);
248 inform(
"\tmxcsr_mask: 0x%x\n",
xs.mxcsr_mask);
254 inform(
"\tlast_ip: 0x%x\n", fpu.last_ip);
255 inform(
"\tlast_dp: 0x%x\n", fpu.last_dp);
262 const unsigned top((fpu.fsw >> 11) & 0x7);
263 inform(
"\tfcw: 0x%x\n", fpu.fcw);
265 inform(
"\tfsw: 0x%x (top: %i, "
266 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
269 (fpu.fsw &
CC0Bit) ?
"C0" :
"",
270 (fpu.fsw &
CC1Bit) ?
"C1" :
"",
271 (fpu.fsw &
CC2Bit) ?
"C2" :
"",
272 (fpu.fsw &
CC3Bit) ?
"C3" :
"",
274 (fpu.fsw &
IEBit) ?
"I" :
"",
275 (fpu.fsw &
DEBit) ?
"D" :
"",
276 (fpu.fsw &
ZEBit) ?
"Z" :
"",
277 (fpu.fsw &
OEBit) ?
"O" :
"",
278 (fpu.fsw &
UEBit) ?
"U" :
"",
279 (fpu.fsw &
PEBit) ?
"P" :
"",
283 (fpu.fsw &
BusyBit) ?
"BUSY " :
""
285 inform(
"\tftwx: 0x%x\n", fpu.ftwx);
286 inform(
"\tlast_opcode: 0x%x\n", fpu.last_opcode);
288 inform(
"\tmxcsr: 0x%x\n", fpu.mxcsr);
290 for (
int i = 0;
i < 8; ++
i) {
291 const unsigned reg_idx((
i +
top) & 0x7);
292 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
295 for (
int j = 0;
j < 10; ++
j)
296 snprintf(&hex[
j*2], 3,
"%.2x", fpu.fpr[
i][
j]);
297 inform(
"\t\tST%i/%i: 0x%s (%f)%s\n",
i, reg_idx,
298 hex, value, empty ?
" (e)" :
"");
300 inform(
"\tXMM registers:\n");
301 for (
int i = 0;
i < 16; ++
i) {
303 for (
int j = 0;
j < 16; ++
j)
304 snprintf(&hex[
j*2], 3,
"%.2x", fpu.xmm[
i][
j]);
305 inform(
"\t\t%i: 0x%s\n",
i, hex);
312 inform(
"FPU registers:\n");
319 inform(
"FPU registers (XSave):\n");
328 for (
int i = 0;
i < msrs.nmsrs; ++
i) {
329 const struct kvm_msr_entry &
e(msrs.entries[
i]);
331 inform(
"\t0x%x: 0x%x\n",
e.index,
e.data);
338 inform(
"KVM XCR registers:\n");
340 inform(
"\tFlags: 0x%x\n", regs.flags);
341 for (
int i = 0;
i < regs.nr_xcrs; ++
i) {
342 inform(
"\tXCR[0x%x]: 0x%x\n",
353 inform(
"\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
354 events.exception.injected, events.exception.nr,
355 events.exception.has_error_code, events.exception.error_code);
357 inform(
"\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
358 events.interrupt.injected, events.interrupt.nr,
359 events.interrupt.soft);
361 inform(
"\tNMI: [inj: %i, pending: %i, masked: %i]\n",
362 events.nmi.injected, events.nmi.pending,
365 inform(
"\tSIPI vector: 0x%x\n", events.sipi_vector);
366 inform(
"\tFlags: 0x%x\n", events.flags);
376 uint64_t upper_half(
addr & 0xffff800000000000ULL);
377 return upper_half == 0 || upper_half == 0xffff800000000000;
382 struct kvm_sregs sregs)
401 if (
seg.base & 0xffffffff00000000ULL)
412 warn(
"CS type is 3 but dpl != 0.\n");
416 if (
seg.dpl != sregs.ss.dpl)
417 warn(
"CS type is %i but CS DPL != SS DPL\n",
seg.type);
421 if (
seg.dpl > sregs.ss.dpl)
422 warn(
"CS type is %i but CS DPL > SS DPL\n",
seg.type);
425 warn(
"Illegal CS type: %i\n",
seg.type);
435 if (sregs.cs.type == 3 &&
seg.dpl != 0)
436 warn(
"CS type is 3, but SS DPL is != 0.\n");
439 if (!(sregs.cr0 & 1) &&
seg.dpl != 0)
440 warn(
"SS DPL is %i, but CR0 PE is 0\n",
seg.dpl);
443 warn(
"Illegal SS type: %i\n",
seg.type);
454 if (!(
seg.type & 0x1) ||
455 ((
seg.type & 0x8) && !(
seg.type & 0x2)))
456 warn(
"%s has an illegal type field: %i\n",
name,
seg.type);
461 if (
seg.type != 3 &&
seg.type != 11)
462 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
469 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
512 if (((
seg.limit & 0xFFF) == 0 &&
seg.g) ||
513 ((
seg.limit & 0xFFF00000) != 0 && !
seg.g)) {
514 warn(
"%s limit (0x%x) and g (%i) combination is illegal.\n",
525 useXSave(params->useXSave)
530 panic(
"KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
532 panic(
"KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
534 warn(
"KVM: Missing capability (KVM_CAP_USER_NMI)\n");
536 warn(
"KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
543 warn(
"KVM: XSAVE not supported by host. MXCSR synchronization might be "
544 "unreliable due to kernel bugs.\n");
547 warn(
"KVM: XSave FPU/SIMD synchronization disabled by user.\n");
595 struct kvm_regs regs;
603 struct kvm_sregs sregs;
612 #ifdef KVM_GET_DEBUGREGS
613 struct kvm_debugregs dregs;
618 inform(
"Debug registers not supported by kernel.\n");
626 struct kvm_xcrs xcrs;
630 inform(
"XCRs not supported by kernel.\n");
638 struct kvm_xsave xsave;
642 inform(
"XSave not supported by kernel.\n");
649 struct kvm_vcpu_events events;
657 const Kvm::MSRIndexVector &supported_msrs(
vm.
kvm->getSupportedMSRs());
658 std::unique_ptr<struct kvm_msrs> msrs(
659 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
660 supported_msrs.size()));
662 msrs->nmsrs = supported_msrs.size();
663 for (
int i = 0;
i < supported_msrs.size(); ++
i) {
664 struct kvm_msr_entry &
e(msrs->entries[
i]);
665 e.index = supported_msrs[
i];
682 DPRINTF(KvmContext,
"X86KvmCPU::updateKvmState():\n");
690 struct kvm_regs regs;
692 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
718 kvm_seg.type =
attr.type;
719 kvm_seg.present =
attr.present;
720 kvm_seg.dpl =
attr.dpl;
721 kvm_seg.db =
attr.defaultSize;
722 kvm_seg.s =
attr.system;
723 kvm_seg.l =
attr.longMode;
724 kvm_seg.g =
attr.granularity;
725 kvm_seg.avl =
attr.avl;
731 kvm_seg.unusable = 0;
757 struct kvm_sregs sregs;
759 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
760 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
761 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
772 memset(&sregs.interrupt_bitmap, 0,
sizeof(sregs.interrupt_bitmap));
786 hack(
"tr.type (%i) is not busy. Forcing the busy bit.\n",
797 sregs.cs.dpl != sregs.ss.dpl) {
799 hack(
"CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
800 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
801 sregs.ss.dpl = sregs.cs.dpl;
807 if (!rflags_nocc.vm) {
812 #define APPLY_SEGMENT(kreg, idx) \
813 checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
822 template <
typename T>
837 const unsigned top((fpu.fsw >> 11) & 0x7);
838 for (
int i = 0;
i < 8; ++
i) {
839 const unsigned reg_idx((
i +
top) & 0x7);
842 DPRINTF(KvmContext,
"Setting KVM FP reg %i (st[%i]) := %f\n",
849 for (
int i = 0;
i < 16; ++
i) {
850 *(uint64_t *)&fpu.xmm[
i][0] =
852 *(uint64_t *)&fpu.xmm[
i][8] =
864 memset(&fpu, 0,
sizeof(fpu));
869 warn_once(
"MISCREG_FISEG is non-zero.\n");
874 warn_once(
"MISCREG_FOSEG is non-zero.\n");
884 struct kvm_xsave kxsave;
889 memset(&kxsave, 0,
sizeof(kxsave));
894 warn_once(
"MISCREG_FISEG is non-zero.\n");
899 warn_once(
"MISCREG_FOSEG is non-zero.\n");
922 for (
auto it = indices.cbegin(); it != indices.cend(); ++it) {
923 struct kvm_msr_entry
e;
928 DPRINTF(KvmContext,
"Adding MSR: idx: 0x%x, data: 0x%x\n",
940 struct kvm_regs regs;
941 struct kvm_sregs sregs;
946 DPRINTF(KvmContext,
"X86KvmCPU::updateThreadContext():\n");
953 struct kvm_xsave xsave;
973 const struct kvm_sregs &sregs)
975 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
995 attr.type = kvm_seg.type;
996 attr.present = kvm_seg.present;
997 attr.dpl = kvm_seg.dpl;
998 attr.defaultSize = kvm_seg.db;
999 attr.system = kvm_seg.s;
1000 attr.longMode = kvm_seg.l;
1001 attr.granularity = kvm_seg.g;
1002 attr.avl = kvm_seg.avl;
1003 attr.unusable = kvm_seg.unusable;
1031 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1032 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1033 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1038 #undef APPLY_SEGMENT
1042 template<
typename T>
1046 const unsigned top((fpu.fsw >> 11) & 0x7);
1048 for (
int i = 0;
i < 8; ++
i) {
1049 const unsigned reg_idx((
i +
top) & 0x7);
1051 DPRINTF(KvmContext,
"Setting gem5 FP reg %i (st[%i]) := %f\n",
1070 for (
int i = 0;
i < 16; ++
i) {
1105 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1106 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1107 struct kvm_msr_entry *entry;
1110 kvm_msrs->nmsrs = msrs.size();
1111 entry = &kvm_msrs->entries[0];
1112 for (
auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1114 entry->reserved = 0;
1121 entry = &kvm_msrs->entries[0];
1122 for (
int i = 0;
i < kvm_msrs->nmsrs; ++
i, ++entry) {
1123 DPRINTF(KvmContext,
"Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1124 entry->index, entry->data);
1150 DPRINTF(KvmInt,
"Delivering NMI\n");
1153 DPRINTF(KvmInt,
"INIT interrupt\n");
1154 fault.get()->invoke(
tc);
1164 DPRINTF(KvmInt,
"STARTUP interrupt\n");
1165 fault.get()->invoke(
tc);
1169 }
else if (x86int) {
1170 struct kvm_interrupt kvm_int;
1173 DPRINTF(KvmInt,
"Delivering interrupt: %s (%u)\n",
1174 fault->name(), kvm_int.irq);
1178 panic(
"KVM: Unknown interrupt type\n");
1190 if (lapic->checkInterruptsRaw()) {
1191 if (lapic->hasPendingUnmaskable()) {
1193 "Delivering unmaskable interrupt.\n");
1196 }
else if (kvm_run.ready_for_interrupt_injection) {
1203 if (lapic->checkInterrupts()) {
1205 "M5 has pending interrupts, delivering interrupt.\n");
1210 "Interrupt delivery delayed due to KVM confusion.\n");
1211 kvm_run.request_interrupt_window = 1;
1213 }
else if (!kvm_run.request_interrupt_window) {
1215 "M5 has pending interrupts, requesting interrupt "
1217 kvm_run.request_interrupt_window = 1;
1220 kvm_run.request_interrupt_window = 0;
1237 DPRINTF(Drain,
"kvmRunDrain: Architecture code isn't drained\n");
1243 kvm_run.request_interrupt_window = 1;
1250 DPRINTF(Drain,
"kvmRunDrain: Delivering pending IO\n");
1285 const uint16_t port(kvm_run.io.port);
1287 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1289 if (kvm_run.io.size != 4) {
1290 panic(
"Unexpected IO size (%u) for address 0x%x.\n",
1291 kvm_run.io.size, port);
1294 if (kvm_run.io.count != 1) {
1295 panic(
"Unexpected IO count (%u) for address 0x%x.\n",
1296 kvm_run.io.count, port);
1300 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1310 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1311 unsigned char *guestData(
getGuestData(kvm_run.io.data_offset));
1313 uint16_t port(kvm_run.io.port);
1315 const int count(kvm_run.io.count);
1317 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1318 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1320 DPRINTF(KvmIO,
"KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1321 (isWrite ?
"out" :
"in"), kvm_run.io.port);
1337 if (pciConfigAddr & 0x80000000) {
1352 RequestPtr io_req = std::make_shared<Request>(
1353 pAddr, kvm_run.io.size,
1363 guestData += kvm_run.io.size;
1381 struct kvm_vcpu_events events;
1392 const bool pending_events(events.exception.injected ||
1393 events.interrupt.injected ||
1394 events.nmi.injected || events.nmi.pending);
1396 if (pending_events) {
1397 DPRINTF(Drain,
"archIsDrained: Pending events: %s %s %s %s\n",
1398 events.exception.injected ?
"exception" :
"",
1399 events.interrupt.injected ?
"interrupt" :
"",
1400 events.nmi.injected ?
"nmi[i]" :
"",
1401 events.nmi.pending ?
"nmi[p]" :
"");
1404 return !pending_events;
1407 static struct kvm_cpuid_entry2
1411 struct kvm_cpuid_entry2
e;
1412 e.function =
function;
1415 e.eax = (uint32_t)result.rax;
1416 e.ebx = (uint32_t)result.rbx;
1417 e.ecx = (uint32_t)result.rcx;
1418 e.edx = (uint32_t)result.rdx;
1426 Kvm::CPUIDVector m5_supported;
1437 for (uint32_t
function = 0;
function <= func0.
rax; ++
function) {
1448 for (uint32_t
function = 0x80000000;
function <= efunc0.
rax; ++
function) {
1462 if (
ioctl(KVM_SET_CPUID2, (
void *)&
cpuid) == -1)
1463 panic(
"KVM: Failed to set guest CPUID2 (errno: %i)\n",
1470 std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1471 newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
cpuid.size()));
1473 kvm_cpuid->nent =
cpuid.size();
1474 std::copy(
cpuid.begin(),
cpuid.end(), kvm_cpuid->entries);
1482 if (
ioctl(KVM_SET_MSRS, (
void *)&msrs) == -1)
1483 panic(
"KVM: Failed to set guest MSRs (errno: %i)\n",
1490 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1491 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1493 kvm_msrs->nmsrs = msrs.size();
1494 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1502 if (
ioctl(KVM_GET_MSRS, (
void *)&msrs) == -1)
1503 panic(
"KVM: Failed to get guest MSRs (errno: %i)\n",
1511 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1512 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1513 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1515 kvm_msrs->nmsrs = 1;
1516 entry.index =
index;
1526 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1527 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1528 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1530 kvm_msrs->nmsrs = 1;
1531 entry.index =
index;
1539 const Kvm::MSRIndexVector &
1543 const Kvm::MSRIndexVector &kvm_msrs(
vm.
kvm->getSupportedMSRs());
1545 DPRINTF(
Kvm,
"kvm-x86: Updating MSR intersection\n");
1546 for (
auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1549 DPRINTF(
Kvm,
"kvm-x86: Adding MSR 0x%x\n", *it);
1551 warn(
"kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1563 #ifdef KVM_GET_DEBUGREGS
1564 if (
ioctl(KVM_GET_DEBUGREGS, ®s) == -1)
1565 panic(
"KVM: Failed to get guest debug registers\n");
1567 panic(
"KVM: Unsupported getDebugRegisters call.\n");
1574 #ifdef KVM_SET_DEBUGREGS
1575 if (
ioctl(KVM_SET_DEBUGREGS, (
void *)®s) == -1)
1576 panic(
"KVM: Failed to set guest debug registers\n");
1578 panic(
"KVM: Unsupported setDebugRegisters call.\n");
1585 if (
ioctl(KVM_GET_XCRS, ®s) == -1)
1586 panic(
"KVM: Failed to get guest debug registers\n");
1592 if (
ioctl(KVM_SET_XCRS, (
void *)®s) == -1)
1593 panic(
"KVM: Failed to set guest debug registers\n");
1599 if (
ioctl(KVM_GET_XSAVE, &xsave) == -1)
1600 panic(
"KVM: Failed to get guest debug registers\n");
1606 if (
ioctl(KVM_SET_XSAVE, (
void *)&xsave) == -1)
1607 panic(
"KVM: Failed to set guest debug registers\n");
1614 if (
ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1615 panic(
"KVM: Failed to get guest debug registers\n");
1621 if (
ioctl(KVM_SET_VCPU_EVENTS, (
void *)&events) == -1)
1622 panic(
"KVM: Failed to set guest debug registers\n");
1626 X86KvmCPUParams::create()
Kvm * kvm
Global KVM interface.
x86 implementation of a KVM-based hardware virtualized CPU.
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
static void forceSegAccessed(struct kvm_segment &seg)
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
void getVCpuEvents(struct kvm_vcpu_events &events) const
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
bool capXCRs() const
Support for getting and setting the x86 XCRs.
void updateThreadContext() override
Update the current thread context with the KVM state.
KVMCpuPort dataPort
Port for data requests.
void getXSave(struct kvm_xsave &xsave) const
bool haveXSave
Kvm::capXSave() available?
void dumpVCpuEvents() const
void updateThreadContextMSRs()
Update MSR registers.
Status _status
CPU run state.
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
uint64_t getMSR(uint32_t index) const
struct FXSave M5_ATTR_PACKED
void updateThreadContextRegs(const struct kvm_regs ®s, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
uint64_t Tick
Tick count type.
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
void setFPUState(const struct kvm_fpu &state)
void setRegisters(const struct kvm_regs ®s)
void suspend() override
Set the status to Suspended.
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
std::shared_ptr< Request > RequestPtr
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
static STRUCT * newVarStruct(size_t entries)
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
void dump() const override
Dump the internal state to the terminal.
Tick kvmRunWrapper(Tick ticks)
Wrapper that synchronizes state in kvm_run.
struct FXSave::@29::@32 ctrl64
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
void updateKvmStateMSRs()
Update MSR registers.
void setMSR(uint32_t index, uint64_t value)
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
static MiscRegIndex MISCREG_SEG_SEL(int index)
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
virtual RegVal readFloatReg(RegIndex reg_idx) const =0
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
static bool isCanonicalAddress(uint64_t addr)
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
bool haveDebugRegs
Kvm::capDebugRegs() available?
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Base class for KVM based CPU models.
static void dumpFpuCommon(const T &fpu)
void dumpDebugRegs() const
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
static FloatRegIndex FLOATREG_FPR(int index)
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
#define SEG_TYPE_BIT_ACCESSED
std::shared_ptr< FaultBase > Fault
std::vector< BaseInterrupts * > interrupts
void getXCRs(struct kvm_xcrs ®s) const
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
#define SEG_SYS_TYPE_TSS_BUSY
X86KvmCPU(X86KvmCPUParams *params)
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
virtual ContextID contextId() const =0
static MiscRegIndex MISCREG_SEG_ATTR(int index)
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
static FloatRegIndex FLOATREG_XMM_HIGH(int index)
@ UNCACHEABLE
The request is to an uncacheable address.
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
#define IO_PCI_CONF_DATA_BASE
void setDebugRegisters(const struct kvm_debugregs ®s)
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
virtual uint8_t getVector() const
Get the vector of an interrupt.
@ MISCREG_PCI_CONFIG_ADDRESS
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
#define SEG_CS_TYPE_ACCESSED
static void dumpFpuSpec(const struct FXSave &xs)
This is exposed globally, independent of the ISA.
#define SEG_CS_TYPE_READ_ACCESSED
static double bitsToFloat64(uint64_t val)
ProbePointArg< PacketInfo > Packet
Packet probe point.
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
void getMSRs(struct kvm_msrs &msrs) const
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
const std::string & name()
static Addr x86PciConfigAddress(const uint32_t addr)
int ioctl(int request, long p1) const
vCPU ioctl interface.
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
void updateKvmStateFPU()
Update FPU and SIMD registers.
virtual TheISA::PCState pcState() const =0
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
void setXSave(const struct kvm_xsave &xsave)
#define FOREACH_SEGMENT()
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
static FloatRegIndex FLOATREG_XMM_LOW(int index)
@ Idle
Context not scheduled in KVM.
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
void setVCpuEvents(const struct kvm_vcpu_events &events)
void dumpSpecRegs() const
virtual void setFloatReg(RegIndex reg_idx, RegVal val)=0
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
GenericISA::DelaySlotPCState< MachInst > PCState
void getDebugRegisters(struct kvm_debugregs ®s) const
Wrappers around KVM's state transfer methods.
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
static void dumpKvm(const struct kvm_regs ®s)
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void getSpecialRegisters(struct kvm_sregs ®s) const
virtual RegVal readMiscReg(RegIndex misc_reg)=0
bool haveXCRs
Kvm::capXCRs() available?
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
static MiscRegIndex MISCREG_SEG_BASE(int index)
void setSpecialRegisters(const struct kvm_sregs ®s)
void updateKvmState() override
Update the KVM state from the current thread context.
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
void getRegisters(struct kvm_regs ®s) const
Get/Set the register state of the guest vCPU.
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
#define SEG_SYS_TYPE_TSS_AVAILABLE
void setXCRs(const struct kvm_xcrs ®s)
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
static Addr x86IOAddress(const uint32_t port)
virtual Addr instAddr() const =0
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
static uint64_t floatToBits64(double val)
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
#define panic(...)
This implements a cprintf based panic() function.
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Generated on Wed Sep 30 2020 14:02:08 for gem5 by doxygen 1.8.17