Go to the documentation of this file.
31 #include <linux/kvm.h>
45 #include "debug/Drain.hh"
46 #include "debug/Kvm.hh"
47 #include "debug/KvmContext.hh"
48 #include "debug/KvmIO.hh"
49 #include "debug/KvmInt.hh"
54 using namespace X86ISA;
58 #define IO_PCI_CONF_ADDR 0xCF8
59 #define IO_PCI_CONF_DATA_BASE 0xCFC
62 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
64 #define SEG_SYS_TYPE_TSS_BUSY 11
67 #define SEG_CS_TYPE_ACCESSED 9
69 #define SEG_CS_TYPE_READ_ACCESSED 11
73 #define SEG_TYPE_BIT_ACCESSED 1
109 static_assert(
sizeof(
FXSave) == 512,
"Unexpected size of FXSave");
111 #define FOREACH_IREG() \
113 APPLY_IREG(rax, int_reg::Rax); \
114 APPLY_IREG(rbx, int_reg::Rbx); \
115 APPLY_IREG(rcx, int_reg::Rcx); \
116 APPLY_IREG(rdx, int_reg::Rdx); \
117 APPLY_IREG(rsi, int_reg::Rsi); \
118 APPLY_IREG(rdi, int_reg::Rdi); \
119 APPLY_IREG(rsp, int_reg::Rsp); \
120 APPLY_IREG(rbp, int_reg::Rbp); \
121 APPLY_IREG(r8, int_reg::R8); \
122 APPLY_IREG(r9, int_reg::R9); \
123 APPLY_IREG(r10, int_reg::R10); \
124 APPLY_IREG(r11, int_reg::R11); \
125 APPLY_IREG(r12, int_reg::R12); \
126 APPLY_IREG(r13, int_reg::R13); \
127 APPLY_IREG(r14, int_reg::R14); \
128 APPLY_IREG(r15, int_reg::R15); \
131 #define FOREACH_SREG() \
133 APPLY_SREG(cr0, misc_reg::Cr0); \
134 APPLY_SREG(cr2, misc_reg::Cr2); \
135 APPLY_SREG(cr3, misc_reg::Cr3); \
136 APPLY_SREG(cr4, misc_reg::Cr4); \
137 APPLY_SREG(cr8, misc_reg::Cr8); \
138 APPLY_SREG(efer, misc_reg::Efer); \
139 APPLY_SREG(apic_base, misc_reg::ApicBase); \
142 #define FOREACH_DREG() \
144 APPLY_DREG(db[0], misc_reg::Dr0); \
145 APPLY_DREG(db[1], misc_reg::Dr1); \
146 APPLY_DREG(db[2], misc_reg::Dr2); \
147 APPLY_DREG(db[3], misc_reg::Dr3); \
148 APPLY_DREG(dr6, misc_reg::Dr6); \
149 APPLY_DREG(dr7, misc_reg::Dr7); \
152 #define FOREACH_SEGMENT() \
154 APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
155 APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
156 APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
157 APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
158 APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
159 APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
160 APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
161 APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
164 #define FOREACH_DTABLE() \
166 APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
167 APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
170 template<
typename Struct,
typename Entry>
174 size_t size =
sizeof(Struct) + entries *
sizeof(
Entry);
175 return std::unique_ptr<Struct, void(*)(Struct *)>(
176 (Struct *)
operator new(size),
177 [](Struct *
p) {
operator delete(
p); });
183 inform(
"KVM register state:\n");
185 #define APPLY_IREG(kreg, mreg) \
186 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
192 inform(
"\trip: 0x%llx\n", regs.rip);
193 inform(
"\trflags: 0x%llx\n", regs.rflags);
199 inform(
"\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
200 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
209 dumpKvm(
const char *reg_name,
const struct kvm_dtable &dtable)
211 inform(
"\t%s: @0x%llx+%x\n",
212 reg_name, dtable.base, dtable.limit);
218 #define APPLY_SREG(kreg, mreg) \
219 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
220 #define APPLY_SEGMENT(kreg, idx) \
221 dumpKvm(# kreg, sregs.kreg);
222 #define APPLY_DTABLE(kreg, idx) \
223 dumpKvm(# kreg, sregs.kreg);
225 inform(
"Special registers:\n");
230 inform(
"Interrupt Bitmap:");
231 for (
int i = 0;
i < KVM_NR_INTERRUPTS;
i += 64)
232 inform(
" 0x%.8x", sregs.interrupt_bitmap[
i / 64]);
239 #ifdef KVM_GET_DEBUGREGS
241 dumpKvm(
const struct kvm_debugregs ®s)
243 inform(
"KVM debug state:\n");
245 #define APPLY_DREG(kreg, mreg) \
246 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
252 inform(
"\tflags: 0x%llx\n", regs.flags);
259 inform(
"\tlast_ip: 0x%x\n",
xs.ctrl64.fpu_ip);
260 inform(
"\tlast_dp: 0x%x\n",
xs.ctrl64.fpu_dp);
261 inform(
"\tmxcsr_mask: 0x%x\n",
xs.mxcsr_mask);
267 inform(
"\tlast_ip: 0x%x\n", fpu.last_ip);
268 inform(
"\tlast_dp: 0x%x\n", fpu.last_dp);
275 const unsigned top((fpu.fsw >> 11) & 0x7);
276 inform(
"\tfcw: 0x%x\n", fpu.fcw);
278 inform(
"\tfsw: 0x%x (top: %i, "
279 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
282 (fpu.fsw &
CC0Bit) ?
"C0" :
"",
283 (fpu.fsw &
CC1Bit) ?
"C1" :
"",
284 (fpu.fsw &
CC2Bit) ?
"C2" :
"",
285 (fpu.fsw &
CC3Bit) ?
"C3" :
"",
287 (fpu.fsw &
IEBit) ?
"I" :
"",
288 (fpu.fsw &
DEBit) ?
"D" :
"",
289 (fpu.fsw &
ZEBit) ?
"Z" :
"",
290 (fpu.fsw &
OEBit) ?
"O" :
"",
291 (fpu.fsw &
UEBit) ?
"U" :
"",
292 (fpu.fsw &
PEBit) ?
"P" :
"",
296 (fpu.fsw &
BusyBit) ?
"BUSY " :
""
298 inform(
"\tftwx: 0x%x\n", fpu.ftwx);
299 inform(
"\tlast_opcode: 0x%x\n", fpu.last_opcode);
301 inform(
"\tmxcsr: 0x%x\n", fpu.mxcsr);
303 for (
int i = 0;
i < 8; ++
i) {
304 const unsigned reg_idx((
i +
top) & 0x7);
305 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
308 for (
int j = 0;
j < 10; ++
j)
309 snprintf(&hex[
j*2], 3,
"%.2x", fpu.fpr[
i][
j]);
310 inform(
"\t\tST%i/%i: 0x%s (%f)%s\n",
i, reg_idx,
311 hex, value, empty ?
" (e)" :
"");
313 inform(
"\tXMM registers:\n");
314 for (
int i = 0;
i < 16; ++
i) {
316 for (
int j = 0;
j < 16; ++
j)
317 snprintf(&hex[
j*2], 3,
"%.2x", fpu.xmm[
i][
j]);
318 inform(
"\t\t%i: 0x%s\n",
i, hex);
325 inform(
"FPU registers:\n");
332 inform(
"FPU registers (XSave):\n");
341 for (
int i = 0;
i < msrs.nmsrs; ++
i) {
342 const struct kvm_msr_entry &
e(msrs.entries[
i]);
344 inform(
"\t0x%x: 0x%x\n",
e.index,
e.data);
351 inform(
"KVM XCR registers:\n");
353 inform(
"\tFlags: 0x%x\n", regs.flags);
354 for (
int i = 0;
i < regs.nr_xcrs; ++
i) {
355 inform(
"\tXCR[0x%x]: 0x%x\n",
366 inform(
"\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
367 events.exception.injected, events.exception.nr,
368 events.exception.has_error_code, events.exception.error_code);
370 inform(
"\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
371 events.interrupt.injected, events.interrupt.nr,
372 events.interrupt.soft);
374 inform(
"\tNMI: [inj: %i, pending: %i, masked: %i]\n",
375 events.nmi.injected, events.nmi.pending,
378 inform(
"\tSIPI vector: 0x%x\n", events.sipi_vector);
379 inform(
"\tFlags: 0x%x\n", events.flags);
389 uint64_t upper_half(
addr & 0xffff800000000000ULL);
390 return upper_half == 0 || upper_half == 0xffff800000000000;
395 struct kvm_sregs sregs)
414 if (
seg.base & 0xffffffff00000000ULL)
425 warn(
"CS type is 3 but dpl != 0.\n");
429 if (
seg.dpl != sregs.ss.dpl)
430 warn(
"CS type is %i but CS DPL != SS DPL\n",
seg.type);
434 if (
seg.dpl > sregs.ss.dpl)
435 warn(
"CS type is %i but CS DPL > SS DPL\n",
seg.type);
438 warn(
"Illegal CS type: %i\n",
seg.type);
448 if (sregs.cs.type == 3 &&
seg.dpl != 0)
449 warn(
"CS type is 3, but SS DPL is != 0.\n");
452 if (!(sregs.cr0 & 1) &&
seg.dpl != 0)
453 warn(
"SS DPL is %i, but CR0 PE is 0\n",
seg.dpl);
456 warn(
"Illegal SS type: %i\n",
seg.type);
467 if (!(
seg.type & 0x1) ||
468 ((
seg.type & 0x8) && !(
seg.type & 0x2)))
469 warn(
"%s has an illegal type field: %i\n",
name,
seg.type);
474 if (
seg.type != 3 &&
seg.type != 11)
475 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
482 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
525 if (((
seg.limit & 0xFFF) == 0 &&
seg.g) ||
526 ((
seg.limit & 0xFFF00000) != 0 && !
seg.g)) {
527 warn(
"%s limit (0x%x) and g (%i) combination is illegal.\n",
538 useXSave(params.useXSave)
549 panic(
"KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
551 panic(
"KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
553 warn(
"KVM: Missing capability (KVM_CAP_USER_NMI)\n");
555 warn(
"KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
562 warn(
"KVM: XSAVE not supported by host. MXCSR synchronization might "
563 "be unreliable due to kernel bugs.\n");
566 warn(
"KVM: XSave FPU/SIMD synchronization disabled by user.\n");
614 struct kvm_regs regs;
622 struct kvm_sregs sregs;
631 #ifdef KVM_GET_DEBUGREGS
632 struct kvm_debugregs dregs;
637 inform(
"Debug registers not supported by kernel.\n");
645 struct kvm_xcrs xcrs;
649 inform(
"XCRs not supported by kernel.\n");
657 struct kvm_xsave xsave;
661 inform(
"XSave not supported by kernel.\n");
668 struct kvm_vcpu_events events;
676 const Kvm::MSRIndexVector &supported_msrs =
vm->
kvm->getSupportedMSRs();
677 auto msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
678 supported_msrs.size());
680 msrs->nmsrs = supported_msrs.size();
681 for (
int i = 0;
i < supported_msrs.size(); ++
i) {
682 struct kvm_msr_entry &
e(msrs->entries[
i]);
683 e.index = supported_msrs[
i];
700 DPRINTF(KvmContext,
"X86KvmCPU::updateKvmState():\n");
701 if (debug::KvmContext)
708 struct kvm_regs regs;
710 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
736 kvm_seg.type =
attr.type;
737 kvm_seg.present =
attr.present;
738 kvm_seg.dpl =
attr.dpl;
739 kvm_seg.db =
attr.defaultSize;
740 kvm_seg.s =
attr.system;
741 kvm_seg.l =
attr.longMode;
742 kvm_seg.g =
attr.granularity;
743 kvm_seg.avl =
attr.avl;
744 kvm_seg.unusable =
attr.unusable;
770 struct kvm_sregs sregs;
772 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
773 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
774 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
785 memset(&sregs.interrupt_bitmap, 0,
sizeof(sregs.interrupt_bitmap));
799 hack(
"tr.type (%i) is not busy. Forcing the busy bit.\n",
810 sregs.cs.dpl != sregs.ss.dpl) {
812 hack(
"CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
813 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
814 sregs.ss.dpl = sregs.cs.dpl;
820 if (!rflags_nocc.vm) {
825 #define APPLY_SEGMENT(kreg, idx) \
826 checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
835 template <
typename T>
850 const unsigned top((fpu.fsw >> 11) & 0x7);
851 for (
int i = 0;
i < 8; ++
i) {
852 const unsigned reg_idx((
i +
top) & 0x7);
855 DPRINTF(KvmContext,
"Setting KVM FP reg %i (st[%i]) := %f\n",
862 for (
int i = 0;
i < 16; ++
i) {
863 *(uint64_t *)&fpu.xmm[
i][0] =
865 *(uint64_t *)&fpu.xmm[
i][8] =
877 memset(&fpu, 0,
sizeof(fpu));
882 warn_once(
"misc_reg::Fiseg is non-zero.\n");
887 warn_once(
"misc_reg::Foseg is non-zero.\n");
897 struct kvm_xsave kxsave;
902 memset(&kxsave, 0,
sizeof(kxsave));
907 warn_once(
"misc_reg::Fiseg is non-zero.\n");
912 warn_once(
"misc_reg::Foseg is non-zero.\n");
935 for (
auto it = indices.cbegin(); it != indices.cend(); ++it) {
936 struct kvm_msr_entry
e;
941 DPRINTF(KvmContext,
"Adding MSR: idx: 0x%x, data: 0x%x\n",
953 struct kvm_regs regs;
954 struct kvm_sregs sregs;
959 DPRINTF(KvmContext,
"X86KvmCPU::updateThreadContext():\n");
960 if (debug::KvmContext)
966 struct kvm_xsave xsave;
986 const struct kvm_sregs &sregs)
988 #define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
1008 attr.type = kvm_seg.type;
1009 attr.present = kvm_seg.present;
1010 attr.dpl = kvm_seg.dpl;
1011 attr.defaultSize = kvm_seg.db;
1012 attr.system = kvm_seg.s;
1013 attr.longMode = kvm_seg.l;
1014 attr.granularity = kvm_seg.g;
1015 attr.avl = kvm_seg.avl;
1016 attr.unusable = kvm_seg.unusable;
1044 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1045 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1046 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1051 #undef APPLY_SEGMENT
1055 template<
typename T>
1059 const unsigned top((fpu.fsw >> 11) & 0x7);
1061 for (
int i = 0;
i < 8; ++
i) {
1062 const unsigned reg_idx((
i +
top) & 0x7);
1064 DPRINTF(KvmContext,
"Setting gem5 FP reg %i (st[%i]) := %f\n",
1083 for (
int i = 0;
i < 16; ++
i) {
1118 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1120 struct kvm_msr_entry *entry;
1123 kvm_msrs->nmsrs = msrs.size();
1124 entry = &kvm_msrs->entries[0];
1125 for (
auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1127 entry->reserved = 0;
1134 entry = &kvm_msrs->entries[0];
1135 for (
int i = 0;
i < kvm_msrs->nmsrs; ++
i, ++entry) {
1136 DPRINTF(KvmContext,
"Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1137 entry->index, entry->data);
1157 fault = interrupts[0]->getInterrupt();
1158 interrupts[0]->updateIntrInfo();
1163 DPRINTF(KvmInt,
"Delivering NMI\n");
1166 DPRINTF(KvmInt,
"INIT interrupt\n");
1167 fault.get()->invoke(
tc);
1177 DPRINTF(KvmInt,
"STARTUP interrupt\n");
1178 fault.get()->invoke(
tc);
1182 }
else if (x86int) {
1183 struct kvm_interrupt kvm_int;
1186 DPRINTF(KvmInt,
"Delivering interrupt: %s (%u)\n",
1187 fault->name(), kvm_int.irq);
1191 panic(
"KVM: Unknown interrupt type\n");
1203 if (lapic->checkInterruptsRaw()) {
1204 if (lapic->hasPendingUnmaskable()) {
1206 "Delivering unmaskable interrupt.\n");
1209 }
else if (kvm_run.ready_for_interrupt_injection) {
1216 if (lapic->checkInterrupts()) {
1218 "M5 has pending interrupts, delivering interrupt.\n");
1223 "Interrupt delivery delayed due to KVM confusion.\n");
1224 kvm_run.request_interrupt_window = 1;
1226 }
else if (!kvm_run.request_interrupt_window) {
1228 "M5 has pending interrupts, requesting interrupt "
1230 kvm_run.request_interrupt_window = 1;
1233 kvm_run.request_interrupt_window = 0;
1250 DPRINTF(Drain,
"kvmRunDrain: Architecture code isn't drained\n");
1256 kvm_run.request_interrupt_window = 1;
1263 DPRINTF(Drain,
"kvmRunDrain: Delivering pending IO\n");
1279 const uint16_t port(kvm_run.io.port);
1281 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1283 if (kvm_run.io.size != 4) {
1284 panic(
"Unexpected IO size (%u) for address 0x%x.\n",
1285 kvm_run.io.size, port);
1288 if (kvm_run.io.count != 1) {
1289 panic(
"Unexpected IO count (%u) for address 0x%x.\n",
1290 kvm_run.io.count, port);
1294 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1304 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1305 unsigned char *guestData(
getGuestData(kvm_run.io.data_offset));
1307 uint16_t port(kvm_run.io.port);
1309 const int count(kvm_run.io.count);
1311 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1312 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1314 DPRINTF(KvmIO,
"KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1315 (isWrite ?
"out" :
"in"), kvm_run.io.port);
1332 if (pciConfigAddr & 0x80000000) {
1347 RequestPtr io_req = std::make_shared<Request>(
1348 pAddr, kvm_run.io.size,
1358 guestData += kvm_run.io.size;
1376 struct kvm_vcpu_events events;
1387 const bool pending_events(events.exception.injected ||
1388 events.interrupt.injected ||
1389 events.nmi.injected || events.nmi.pending);
1391 if (pending_events) {
1392 DPRINTF(Drain,
"archIsDrained: Pending events: %s %s %s %s\n",
1393 events.exception.injected ?
"exception" :
"",
1394 events.interrupt.injected ?
"interrupt" :
"",
1395 events.nmi.injected ?
"nmi[i]" :
"",
1396 events.nmi.pending ?
"nmi[p]" :
"");
1399 return !pending_events;
1419 static struct kvm_cpuid_entry2
1423 struct kvm_cpuid_entry2
e;
1424 e.function =
function;
1427 e.eax = (uint32_t)result.rax;
1428 e.ebx = (uint32_t)result.rbx;
1429 e.ecx = (uint32_t)result.rcx;
1430 e.edx = (uint32_t)result.rdx;
1438 Kvm::CPUIDVector m5_supported;
1449 for (uint32_t
function = 0;
function <= func0.
rax; ++
function) {
1460 for (uint32_t
function = 0x80000000;
function <= efunc0.
rax; ++
function) {
1474 if (
ioctl(KVM_SET_CPUID2, (
void *)&
cpuid) == -1)
1475 panic(
"KVM: Failed to set guest CPUID2 (errno: %i)\n",
1482 auto kvm_cpuid = newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
1485 kvm_cpuid->nent =
cpuid.size();
1486 std::copy(
cpuid.begin(),
cpuid.end(), kvm_cpuid->entries);
1494 if (
ioctl(KVM_SET_MSRS, (
void *)&msrs) == -1)
1495 panic(
"KVM: Failed to set guest MSRs (errno: %i)\n",
1502 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1505 kvm_msrs->nmsrs = msrs.size();
1506 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1514 if (
ioctl(KVM_GET_MSRS, (
void *)&msrs) == -1)
1515 panic(
"KVM: Failed to get guest MSRs (errno: %i)\n",
1523 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1524 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1526 kvm_msrs->nmsrs = 1;
1527 entry.index =
index;
1537 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1538 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1540 kvm_msrs->nmsrs = 1;
1541 entry.index =
index;
1549 const Kvm::MSRIndexVector &
1553 const Kvm::MSRIndexVector &kvm_msrs =
vm->
kvm->getSupportedMSRs();
1555 DPRINTF(
Kvm,
"kvm-x86: Updating MSR intersection\n");
1556 for (
auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1559 DPRINTF(
Kvm,
"kvm-x86: Adding MSR 0x%x\n", *it);
1561 warn(
"kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1573 #ifdef KVM_GET_DEBUGREGS
1574 if (
ioctl(KVM_GET_DEBUGREGS, ®s) == -1)
1575 panic(
"KVM: Failed to get guest debug registers\n");
1577 panic(
"KVM: Unsupported getDebugRegisters call.\n");
1584 #ifdef KVM_SET_DEBUGREGS
1585 if (
ioctl(KVM_SET_DEBUGREGS, (
void *)®s) == -1)
1586 panic(
"KVM: Failed to set guest debug registers\n");
1588 panic(
"KVM: Unsupported setDebugRegisters call.\n");
1595 if (
ioctl(KVM_GET_XCRS, ®s) == -1)
1596 panic(
"KVM: Failed to get guest debug registers\n");
1602 if (
ioctl(KVM_SET_XCRS, (
void *)®s) == -1)
1603 panic(
"KVM: Failed to set guest debug registers\n");
1609 if (
ioctl(KVM_GET_XSAVE, &xsave) == -1)
1610 panic(
"KVM: Failed to get guest debug registers\n");
1616 if (
ioctl(KVM_SET_XSAVE, (
void *)&xsave) == -1)
1617 panic(
"KVM: Failed to set guest debug registers\n");
1624 if (
ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1625 panic(
"KVM: Failed to get guest debug registers\n");
1631 if (
ioctl(KVM_SET_VCPU_EVENTS, (
void *)&events) == -1)
1632 panic(
"KVM: Failed to set guest debug registers\n");
static void dumpKvm(const struct kvm_regs ®s)
void dumpDebugRegs() const
Status _status
CPU run state.
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
void updateThreadContextRegs(const struct kvm_regs ®s, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
void dumpSpecRegs() const
void dumpVCpuEvents() const
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
static void forceSegAccessed(struct kvm_segment &seg)
bool haveXSave
Kvm::capXSave() available?
int ioctl(int request, long p1) const
vCPU ioctl interface.
virtual RegVal getReg(const RegId ®) const
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
void getDebugRegisters(struct kvm_debugregs ®s) const
Wrappers around KVM's state transfer methods.
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
void updateKvmStateFPU()
Update FPU and SIMD registers.
static RegId xmm(int index)
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
void setDebugRegisters(const struct kvm_debugregs ®s)
virtual const PCStateBase & pcState() const =0
static double bitsToFloat64(uint64_t val)
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
static Addr x86PciConfigAddress(const uint32_t addr)
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
virtual ContextID contextId() const =0
static RegIndex segAttr(int index)
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
void setFPUState(const struct kvm_fpu &state)
bool capXCRs() const
Support for getting and setting the x86 XCRs.
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
void setMSR(uint32_t index, uint64_t value)
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
void ioctlRun() override
Override for synchronizing state in kvm_run.
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
static RegIndex segSel(int index)
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
static RegIndex segBase(int index)
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
@ UNCACHEABLE
The request is to an uncacheable address.
bool haveDebugRegs
Kvm::capDebugRegs() available?
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
uint64_t getMSR(uint32_t index) const
std::shared_ptr< FaultBase > Fault
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
#define SEG_TYPE_BIT_ACCESSED
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Base class for KVM based CPU models.
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
GenericISA::DelaySlotPCState< 4 > PCState
ProbePointArg< PacketInfo > Packet
Packet probe point.
static RegId xmmLow(int index)
uint64_t Tick
Tick count type.
std::shared_ptr< Request > RequestPtr
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
virtual uint8_t getVector() const
Get the vector of an interrupt.
static RegId xmmHigh(int index)
static RegId fpr(int index)
#define SEG_SYS_TYPE_TSS_BUSY
static void dumpFpuCommon(const T &fpu)
void getSpecialRegisters(struct kvm_sregs ®s) const
void updateKvmStateMSRs()
Update MSR registers.
void setXCRs(const struct kvm_xcrs ®s)
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
void getMSRs(struct kvm_msrs &msrs) const
#define IO_PCI_CONF_DATA_BASE
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
#define SEG_CS_TYPE_ACCESSED
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
#define SEG_CS_TYPE_READ_ACCESSED
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
void updateThreadContextMSRs()
Update MSR registers.
@ Idle
Context not scheduled in KVM.
const std::string & name()
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
void setRegisters(const struct kvm_regs ®s)
Kvm * kvm
Global KVM interface.
struct gem5::FXSave::@20::@23 ctrl64
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
void updateKvmState() override
Update the KVM state from the current thread context.
void getVCpuEvents(struct kvm_vcpu_events &events) const
void getXSave(struct kvm_xsave &xsave) const
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
void dump() const override
Dump the internal state to the terminal.
bool haveXCRs
Kvm::capXCRs() available?
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
#define FOREACH_SEGMENT()
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
void suspend() override
Set the status to Suspended.
static uint64_t floatToBits64(double val)
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
static void dumpFpuSpec(const struct FXSave &xs)
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
X86KvmCPU(const X86KvmCPUParams ¶ms)
static auto newVarStruct(size_t entries)
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
void setXSave(const struct kvm_xsave &xsave)
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
void getXCRs(struct kvm_xcrs ®s) const
void getRegisters(struct kvm_regs ®s) const
Get/Set the register state of the guest vCPU.
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
static bool isCanonicalAddress(uint64_t addr)
KVMCpuPort dataPort
Port for data requests.
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
#define SEG_SYS_TYPE_TSS_AVAILABLE
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
void updateThreadContext() override
Update the current thread context with the KVM state.
static Addr x86IOAddress(const uint32_t port)
static RegIndex segLimit(int index)
void setSpecialRegisters(const struct kvm_sregs ®s)
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
#define panic(...)
This implements a cprintf based panic() function.
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
void setVCpuEvents(const struct kvm_vcpu_events &events)
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
virtual void setReg(const RegId ®, RegVal val)
Generated on Thu Jun 16 2022 10:41:43 for gem5 by doxygen 1.8.17