46#include "debug/Drain.hh"
47#include "debug/Kvm.hh"
48#include "debug/KvmContext.hh"
49#include "debug/KvmIO.hh"
50#include "debug/KvmInt.hh"
55using namespace X86ISA;
59#define IO_PCI_CONF_ADDR 0xCF8
60#define IO_PCI_CONF_DATA_BASE 0xCFC
63#define SEG_SYS_TYPE_TSS_AVAILABLE 9
65#define SEG_SYS_TYPE_TSS_BUSY 11
68#define SEG_CS_TYPE_ACCESSED 9
70#define SEG_CS_TYPE_READ_ACCESSED 11
74#define SEG_TYPE_BIT_ACCESSED 1
110static_assert(
sizeof(
FXSave) == 512,
"Unexpected size of FXSave");
112#define FOREACH_IREG() \
114 APPLY_IREG(rax, int_reg::Rax); \
115 APPLY_IREG(rbx, int_reg::Rbx); \
116 APPLY_IREG(rcx, int_reg::Rcx); \
117 APPLY_IREG(rdx, int_reg::Rdx); \
118 APPLY_IREG(rsi, int_reg::Rsi); \
119 APPLY_IREG(rdi, int_reg::Rdi); \
120 APPLY_IREG(rsp, int_reg::Rsp); \
121 APPLY_IREG(rbp, int_reg::Rbp); \
122 APPLY_IREG(r8, int_reg::R8); \
123 APPLY_IREG(r9, int_reg::R9); \
124 APPLY_IREG(r10, int_reg::R10); \
125 APPLY_IREG(r11, int_reg::R11); \
126 APPLY_IREG(r12, int_reg::R12); \
127 APPLY_IREG(r13, int_reg::R13); \
128 APPLY_IREG(r14, int_reg::R14); \
129 APPLY_IREG(r15, int_reg::R15); \
132#define FOREACH_SREG() \
134 APPLY_SREG(cr0, misc_reg::Cr0); \
135 APPLY_SREG(cr2, misc_reg::Cr2); \
136 APPLY_SREG(cr3, misc_reg::Cr3); \
137 APPLY_SREG(cr4, misc_reg::Cr4); \
138 APPLY_SREG(cr8, misc_reg::Cr8); \
139 APPLY_SREG(efer, misc_reg::Efer); \
140 APPLY_SREG(apic_base, misc_reg::ApicBase); \
143#define FOREACH_DREG() \
145 APPLY_DREG(db[0], misc_reg::Dr0); \
146 APPLY_DREG(db[1], misc_reg::Dr1); \
147 APPLY_DREG(db[2], misc_reg::Dr2); \
148 APPLY_DREG(db[3], misc_reg::Dr3); \
149 APPLY_DREG(dr6, misc_reg::Dr6); \
150 APPLY_DREG(dr7, misc_reg::Dr7); \
153#define FOREACH_SEGMENT() \
155 APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
156 APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
157 APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
158 APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
159 APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
160 APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
161 APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
162 APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
165#define FOREACH_DTABLE() \
167 APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
168 APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
171template<
typename Struct,
typename Entry>
175 size_t size =
sizeof(Struct) + entries *
sizeof(Entry);
176 return std::unique_ptr<Struct, void(*)(Struct *)>(
177 (Struct *)
operator new(size),
178 [](Struct *
p) {
operator delete(
p); });
184 inform(
"KVM register state:\n");
186#define APPLY_IREG(kreg, mreg) \
187 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
193 inform(
"\trip: 0x%llx\n", regs.rip);
194 inform(
"\trflags: 0x%llx\n", regs.rflags);
200 inform(
"\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
201 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
210dumpKvm(
const char *reg_name,
const struct kvm_dtable &dtable)
212 inform(
"\t%s: @0x%llx+%x\n",
213 reg_name, dtable.base, dtable.limit);
219#define APPLY_SREG(kreg, mreg) \
220 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
221#define APPLY_SEGMENT(kreg, idx) \
222 dumpKvm(# kreg, sregs.kreg);
223#define APPLY_DTABLE(kreg, idx) \
224 dumpKvm(# kreg, sregs.kreg);
226 inform(
"Special registers:\n");
231 inform(
"Interrupt Bitmap:");
232 for (
int i = 0;
i < KVM_NR_INTERRUPTS;
i += 64)
233 inform(
" 0x%.8x", sregs.interrupt_bitmap[
i / 64]);
240#ifdef KVM_GET_DEBUGREGS
242dumpKvm(
const struct kvm_debugregs ®s)
244 inform(
"KVM debug state:\n");
246#define APPLY_DREG(kreg, mreg) \
247 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
253 inform(
"\tflags: 0x%llx\n", regs.flags);
260 inform(
"\tlast_ip: 0x%x\n",
xs.ctrl64.fpu_ip);
261 inform(
"\tlast_dp: 0x%x\n",
xs.ctrl64.fpu_dp);
262 inform(
"\tmxcsr_mask: 0x%x\n",
xs.mxcsr_mask);
268 inform(
"\tlast_ip: 0x%x\n", fpu.last_ip);
269 inform(
"\tlast_dp: 0x%x\n", fpu.last_dp);
276 const unsigned top((fpu.fsw >> 11) & 0x7);
277 inform(
"\tfcw: 0x%x\n", fpu.fcw);
279 inform(
"\tfsw: 0x%x (top: %i, "
280 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
283 (fpu.fsw &
CC0Bit) ?
"C0" :
"",
284 (fpu.fsw &
CC1Bit) ?
"C1" :
"",
285 (fpu.fsw &
CC2Bit) ?
"C2" :
"",
286 (fpu.fsw &
CC3Bit) ?
"C3" :
"",
288 (fpu.fsw &
IEBit) ?
"I" :
"",
289 (fpu.fsw &
DEBit) ?
"D" :
"",
290 (fpu.fsw &
ZEBit) ?
"Z" :
"",
291 (fpu.fsw &
OEBit) ?
"O" :
"",
292 (fpu.fsw &
UEBit) ?
"U" :
"",
293 (fpu.fsw &
PEBit) ?
"P" :
"",
297 (fpu.fsw &
BusyBit) ?
"BUSY " :
""
299 inform(
"\tftwx: 0x%x\n", fpu.ftwx);
300 inform(
"\tlast_opcode: 0x%x\n", fpu.last_opcode);
302 inform(
"\tmxcsr: 0x%x\n", fpu.mxcsr);
304 for (
int i = 0;
i < 8; ++
i) {
305 const unsigned reg_idx((
i +
top) & 0x7);
306 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
309 for (
int j = 0;
j < 10; ++
j)
310 snprintf(&hex[
j*2], 3,
"%.2x", fpu.fpr[
i][
j]);
311 inform(
"\t\tST%i/%i: 0x%s (%f)%s\n",
i, reg_idx,
312 hex, value, empty ?
" (e)" :
"");
314 inform(
"\tXMM registers:\n");
315 for (
int i = 0;
i < 16; ++
i) {
317 for (
int j = 0;
j < 16; ++
j)
318 snprintf(&hex[
j*2], 3,
"%.2x", fpu.xmm[
i][
j]);
319 inform(
"\t\t%i: 0x%s\n",
i, hex);
326 inform(
"FPU registers:\n");
333 inform(
"FPU registers (XSave):\n");
342 for (
int i = 0;
i < msrs.nmsrs; ++
i) {
343 const struct kvm_msr_entry &
e(msrs.entries[
i]);
345 inform(
"\t0x%x: 0x%x\n",
e.index,
e.data);
352 inform(
"KVM XCR registers:\n");
354 inform(
"\tFlags: 0x%x\n", regs.flags);
355 for (
int i = 0;
i < regs.nr_xcrs; ++
i) {
356 inform(
"\tXCR[0x%x]: 0x%x\n",
367 inform(
"\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
368 events.exception.injected, events.exception.nr,
369 events.exception.has_error_code, events.exception.error_code);
371 inform(
"\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
372 events.interrupt.injected, events.interrupt.nr,
373 events.interrupt.soft);
375 inform(
"\tNMI: [inj: %i, pending: %i, masked: %i]\n",
376 events.nmi.injected, events.nmi.pending,
379 inform(
"\tSIPI vector: 0x%x\n", events.sipi_vector);
380 inform(
"\tFlags: 0x%x\n", events.flags);
390 uint64_t upper_half(
addr & 0xffff800000000000ULL);
391 return upper_half == 0 || upper_half == 0xffff800000000000;
396 struct kvm_sregs sregs)
415 if (
seg.base & 0xffffffff00000000ULL)
426 warn(
"CS type is 3 but dpl != 0.\n");
430 if (
seg.dpl != sregs.ss.dpl)
431 warn(
"CS type is %i but CS DPL != SS DPL\n",
seg.type);
435 if (
seg.dpl > sregs.ss.dpl)
436 warn(
"CS type is %i but CS DPL > SS DPL\n",
seg.type);
439 warn(
"Illegal CS type: %i\n",
seg.type);
449 if (sregs.cs.type == 3 &&
seg.dpl != 0)
450 warn(
"CS type is 3, but SS DPL is != 0.\n");
453 if (!(sregs.cr0 & 1) &&
seg.dpl != 0)
454 warn(
"SS DPL is %i, but CR0 PE is 0\n",
seg.dpl);
457 warn(
"Illegal SS type: %i\n",
seg.type);
468 if (!(
seg.type & 0x1) ||
469 ((
seg.type & 0x8) && !(
seg.type & 0x2)))
470 warn(
"%s has an illegal type field: %i\n",
name,
seg.type);
475 if (
seg.type != 3 &&
seg.type != 11)
476 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
483 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
526 if (((
seg.limit & 0xFFF) == 0 &&
seg.g) ||
527 ((
seg.limit & 0xFFF00000) != 0 && !
seg.g)) {
528 warn(
"%s limit (0x%x) and g (%i) combination is illegal.\n",
539 useXSave(params.useXSave)
550 panic(
"KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
552 panic(
"KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
554 warn(
"KVM: Missing capability (KVM_CAP_USER_NMI)\n");
556 warn(
"KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
563 warn(
"KVM: XSAVE not supported by host. MXCSR synchronization might "
564 "be unreliable due to kernel bugs.\n");
567 warn(
"KVM: XSave FPU/SIMD synchronization disabled by user.\n");
615 struct kvm_regs regs;
623 struct kvm_sregs sregs;
632#ifdef KVM_GET_DEBUGREGS
633 struct kvm_debugregs dregs;
638 inform(
"Debug registers not supported by kernel.\n");
646 struct kvm_xcrs xcrs;
650 inform(
"XCRs not supported by kernel.\n");
658 struct kvm_xsave xsave;
662 inform(
"XSave not supported by kernel.\n");
669 struct kvm_vcpu_events events;
677 const Kvm::MSRIndexVector &supported_msrs =
vm->
kvm->getSupportedMSRs();
678 auto msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
679 supported_msrs.size());
681 msrs->nmsrs = supported_msrs.size();
682 for (
int i = 0;
i < supported_msrs.size(); ++
i) {
683 struct kvm_msr_entry &
e(msrs->entries[
i]);
684 e.index = supported_msrs[
i];
701 DPRINTF(KvmContext,
"X86KvmCPU::updateKvmState():\n");
702 if (debug::KvmContext)
709 struct kvm_regs regs;
711#define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
737 kvm_seg.type =
attr.type;
738 kvm_seg.present =
attr.present;
739 kvm_seg.dpl =
attr.dpl;
740 kvm_seg.db =
attr.defaultSize;
741 kvm_seg.s =
attr.system;
742 kvm_seg.l =
attr.longMode;
743 kvm_seg.g =
attr.granularity;
744 kvm_seg.avl =
attr.avl;
745 kvm_seg.unusable =
attr.unusable;
771 struct kvm_sregs sregs;
773#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
774#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
775#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
786 memset(&sregs.interrupt_bitmap, 0,
sizeof(sregs.interrupt_bitmap));
800 hack(
"tr.type (%i) is not busy. Forcing the busy bit.\n",
811 sregs.cs.dpl != sregs.ss.dpl) {
813 hack(
"CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
814 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
815 sregs.ss.dpl = sregs.cs.dpl;
821 if (!rflags_nocc.vm) {
826#define APPLY_SEGMENT(kreg, idx) \
827 checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
851 const unsigned top((fpu.fsw >> 11) & 0x7);
852 for (
int i = 0;
i < 8; ++
i) {
853 const unsigned reg_idx((
i +
top) & 0x7);
856 DPRINTF(KvmContext,
"Setting KVM FP reg %i (st[%i]) := %f\n",
863 for (
int i = 0;
i < 16; ++
i) {
864 *(uint64_t *)&fpu.xmm[
i][0] =
866 *(uint64_t *)&fpu.xmm[
i][8] =
878 memset(&fpu, 0,
sizeof(fpu));
883 warn_once(
"misc_reg::Fiseg is non-zero.\n");
888 warn_once(
"misc_reg::Foseg is non-zero.\n");
898 struct kvm_xsave kxsave;
903 memset(&kxsave, 0,
sizeof(kxsave));
908 warn_once(
"misc_reg::Fiseg is non-zero.\n");
913 warn_once(
"misc_reg::Foseg is non-zero.\n");
936 for (
auto it = indices.cbegin(); it != indices.cend(); ++it) {
937 struct kvm_msr_entry
e;
942 DPRINTF(KvmContext,
"Adding MSR: idx: 0x%x, data: 0x%x\n",
954 struct kvm_regs regs;
955 struct kvm_sregs sregs;
960 DPRINTF(KvmContext,
"X86KvmCPU::updateThreadContext():\n");
961 if (debug::KvmContext)
967 struct kvm_xsave xsave;
987 const struct kvm_sregs &sregs)
989#define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
1009 attr.type = kvm_seg.type;
1010 attr.present = kvm_seg.present;
1011 attr.dpl = kvm_seg.dpl;
1012 attr.defaultSize = kvm_seg.db;
1013 attr.system = kvm_seg.s;
1014 attr.longMode = kvm_seg.l;
1015 attr.granularity = kvm_seg.g;
1016 attr.avl = kvm_seg.avl;
1017 attr.unusable = kvm_seg.unusable;
1045#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1046#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1047#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1060 const unsigned top((fpu.fsw >> 11) & 0x7);
1062 for (
int i = 0;
i < 8; ++
i) {
1063 const unsigned reg_idx((
i +
top) & 0x7);
1065 DPRINTF(KvmContext,
"Setting gem5 FP reg %i (st[%i]) := %f\n",
1084 for (
int i = 0;
i < 16; ++
i) {
1119 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1121 struct kvm_msr_entry *entry;
1124 kvm_msrs->nmsrs = msrs.size();
1125 entry = &kvm_msrs->entries[0];
1126 for (
auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1128 entry->reserved = 0;
1135 entry = &kvm_msrs->entries[0];
1136 for (
int i = 0;
i < kvm_msrs->nmsrs; ++
i, ++entry) {
1137 DPRINTF(KvmContext,
"Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1138 entry->index, entry->data);
1164 DPRINTF(KvmInt,
"Delivering NMI\n");
1167 DPRINTF(KvmInt,
"INIT interrupt\n");
1168 fault.get()->invoke(
tc);
1178 DPRINTF(KvmInt,
"STARTUP interrupt\n");
1179 fault.get()->invoke(
tc);
1183 }
else if (x86int) {
1184 struct kvm_interrupt kvm_int;
1187 DPRINTF(KvmInt,
"Delivering interrupt: %s (%u)\n",
1188 fault->name(), kvm_int.irq);
1192 panic(
"KVM: Unknown interrupt type\n");
1204 if (lapic->checkInterruptsRaw()) {
1205 if (lapic->hasPendingUnmaskable()) {
1207 "Delivering unmaskable interrupt.\n");
1210 }
else if (kvm_run.ready_for_interrupt_injection) {
1217 if (lapic->checkInterrupts()) {
1219 "M5 has pending interrupts, delivering interrupt.\n");
1224 "Interrupt delivery delayed due to KVM confusion.\n");
1225 kvm_run.request_interrupt_window = 1;
1227 }
else if (!kvm_run.request_interrupt_window) {
1229 "M5 has pending interrupts, requesting interrupt "
1231 kvm_run.request_interrupt_window = 1;
1234 kvm_run.request_interrupt_window = 0;
1251 DPRINTF(Drain,
"kvmRunDrain: Architecture code isn't drained\n");
1257 kvm_run.request_interrupt_window = 1;
1264 DPRINTF(Drain,
"kvmRunDrain: Delivering pending IO\n");
1280 const uint16_t port(kvm_run.io.port);
1282 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1284 if (kvm_run.io.size != 4) {
1285 panic(
"Unexpected IO size (%u) for address 0x%x.\n",
1286 kvm_run.io.size, port);
1289 if (kvm_run.io.count != 1) {
1290 panic(
"Unexpected IO count (%u) for address 0x%x.\n",
1291 kvm_run.io.count, port);
1295 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1305 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1306 unsigned char *guestData(
getGuestData(kvm_run.io.data_offset));
1308 uint16_t port(kvm_run.io.port);
1310 const int count(kvm_run.io.count);
1312 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1313 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1315 DPRINTF(KvmIO,
"KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1316 (isWrite ?
"out" :
"in"), kvm_run.io.port);
1333 if (pciConfigAddr & 0x80000000) {
1348 RequestPtr io_req = std::make_shared<Request>(
1349 pAddr, kvm_run.io.size,
1359 guestData += kvm_run.io.size;
1377 struct kvm_vcpu_events events;
1388 const bool pending_events(events.exception.injected ||
1389 events.interrupt.injected ||
1390 events.nmi.injected || events.nmi.pending);
1392 if (pending_events) {
1393 DPRINTF(Drain,
"archIsDrained: Pending events: %s %s %s %s\n",
1394 events.exception.injected ?
"exception" :
"",
1395 events.interrupt.injected ?
"interrupt" :
"",
1396 events.nmi.injected ?
"nmi[i]" :
"",
1397 events.nmi.pending ?
"nmi[p]" :
"");
1400 return !pending_events;
1420static struct kvm_cpuid_entry2
1424 struct kvm_cpuid_entry2
e;
1425 e.function = function;
1428 e.eax = (uint32_t)result.rax;
1429 e.ebx = (uint32_t)result.rbx;
1430 e.ecx = (uint32_t)result.rcx;
1431 e.edx = (uint32_t)result.rdx;
1439 Kvm::CPUIDVector m5_supported;
1450 for (uint32_t function = 0; function <= func0.
rax; ++function) {
1461 for (uint32_t function = 0x80000000; function <= efunc0.
rax; ++function) {
1475 if (
ioctl(KVM_SET_CPUID2, (
void *)&
cpuid) == -1)
1476 panic(
"KVM: Failed to set guest CPUID2 (errno: %i)\n",
1483 auto kvm_cpuid = newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
1486 kvm_cpuid->nent =
cpuid.size();
1487 std::copy(
cpuid.begin(),
cpuid.end(), kvm_cpuid->entries);
1495 if (
ioctl(KVM_SET_MSRS, (
void *)&msrs) == -1)
1496 panic(
"KVM: Failed to set guest MSRs (errno: %i)\n",
1503 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1506 kvm_msrs->nmsrs = msrs.size();
1507 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1515 if (
ioctl(KVM_GET_MSRS, (
void *)&msrs) == -1)
1516 panic(
"KVM: Failed to get guest MSRs (errno: %i)\n",
1524 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1525 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1527 kvm_msrs->nmsrs = 1;
1528 entry.index =
index;
1538 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1539 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1541 kvm_msrs->nmsrs = 1;
1542 entry.index =
index;
1550const Kvm::MSRIndexVector &
1554 const Kvm::MSRIndexVector &kvm_msrs =
vm->
kvm->getSupportedMSRs();
1556 DPRINTF(
Kvm,
"kvm-x86: Updating MSR intersection\n");
1557 for (
auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1560 DPRINTF(
Kvm,
"kvm-x86: Adding MSR 0x%x\n", *it);
1562 warn(
"kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1574#ifdef KVM_GET_DEBUGREGS
1575 if (
ioctl(KVM_GET_DEBUGREGS, ®s) == -1)
1576 panic(
"KVM: Failed to get guest debug registers\n");
1578 panic(
"KVM: Unsupported getDebugRegisters call.\n");
1585#ifdef KVM_SET_DEBUGREGS
1586 if (
ioctl(KVM_SET_DEBUGREGS, (
void *)®s) == -1)
1587 panic(
"KVM: Failed to set guest debug registers\n");
1589 panic(
"KVM: Unsupported setDebugRegisters call.\n");
1596 if (
ioctl(KVM_GET_XCRS, ®s) == -1)
1597 panic(
"KVM: Failed to get guest debug registers\n");
1603 if (
ioctl(KVM_SET_XCRS, (
void *)®s) == -1)
1604 panic(
"KVM: Failed to set guest debug registers\n");
1610 if (
ioctl(KVM_GET_XSAVE, &xsave) == -1)
1611 panic(
"KVM: Failed to get guest debug registers\n");
1617 if (
ioctl(KVM_SET_XSAVE, (
void *)&xsave) == -1)
1618 panic(
"KVM: Failed to set guest debug registers\n");
1625 if (
ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1626 panic(
"KVM: Failed to get guest debug registers\n");
1632 if (
ioctl(KVM_SET_VCPU_EVENTS, (
void *)&events) == -1)
1633 panic(
"KVM: Failed to set guest debug registers\n");
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
std::vector< BaseInterrupts * > interrupts
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Base class for KVM based CPU models.
Status _status
CPU run state.
void getSpecialRegisters(struct kvm_sregs ®s) const
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
void setRegisters(const struct kvm_regs ®s)
@ Idle
Context not scheduled in KVM.
void getRegisters(struct kvm_regs ®s) const
Get/Set the register state of the guest vCPU.
void setSpecialRegisters(const struct kvm_sregs ®s)
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
void startup() override
startup() is the final initialization call before simulation.
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
KVMCpuPort dataPort
Port for data requests.
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
void setFPUState(const struct kvm_fpu &state)
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Kvm * kvm
Global KVM interface.
bool capXCRs() const
Support for getting and setting the x86 XCRs.
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
@ UNCACHEABLE
The request is to an uncacheable address.
void suspend() override
Set the status to Suspended.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual RegVal getReg(const RegId ®) const
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
virtual void setReg(const RegId ®, RegVal val)
virtual const PCStateBase & pcState() const =0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual ContextID contextId() const =0
virtual uint8_t getVector() const
Get the vector of an interrupt.
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
void updateThreadContextMSRs()
Update MSR registers.
void startup() override
startup() is the final initialization call before simulation.
void updateKvmState() override
Update the KVM state from the current thread context.
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
void getXSave(struct kvm_xsave &xsave) const
bool haveXCRs
Kvm::capXCRs() available?
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
void getXCRs(struct kvm_xcrs ®s) const
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
void getVCpuEvents(struct kvm_vcpu_events &events) const
X86KvmCPU(const X86KvmCPUParams ¶ms)
void updateKvmStateFPU()
Update FPU and SIMD registers.
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
void updateKvmStateMSRs()
Update MSR registers.
void getDebugRegisters(struct kvm_debugregs ®s) const
Wrappers around KVM's state transfer methods.
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
void dumpSpecRegs() const
void setMSR(uint32_t index, uint64_t value)
void ioctlRun() override
Override for synchronizing state in kvm_run.
void setXCRs(const struct kvm_xcrs ®s)
void updateThreadContext() override
Update the current thread context with the KVM state.
void getMSRs(struct kvm_msrs &msrs) const
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
void setVCpuEvents(const struct kvm_vcpu_events &events)
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
bool haveDebugRegs
Kvm::capDebugRegs() available?
void dump() const override
Dump the internal state to the terminal.
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
void setXSave(const struct kvm_xsave &xsave)
void dumpVCpuEvents() const
uint64_t getMSR(uint32_t index) const
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
void updateThreadContextRegs(const struct kvm_regs ®s, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
void setDebugRegisters(const struct kvm_debugregs ®s)
bool haveXSave
Kvm::capXSave() available?
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
void dumpDebugRegs() const
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
int ioctl(int request, long p1) const
vCPU ioctl interface.
EventQueue * eventQueue() const
#define panic(...)
This implements a cprintf based panic() function.
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
static RegId xmmLow(int index)
static RegId xmmHigh(int index)
static RegId fpr(int index)
static RegIndex segSel(int index)
static RegIndex segAttr(int index)
static RegIndex segBase(int index)
static RegIndex segLimit(int index)
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
static Addr x86IOAddress(const uint32_t port)
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
static Addr x86PciConfigAddress(const uint32_t addr)
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
static void dumpFpuCommon(const T &fpu)
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
static void forceSegAccessed(struct kvm_segment &seg)
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
static bool isCanonicalAddress(uint64_t addr)
uint64_t Tick
Tick count type.
static uint64_t floatToBits64(double val)
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
static void dumpKvm(const struct kvm_regs ®s)
static auto newVarStruct(size_t entries)
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
static double bitsToFloat64(uint64_t val)
static void dumpFpuSpec(const struct FXSave &xs)
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
struct gem5::FXSave::@24::@27 ctrl64
const std::string & name()
#define SEG_CS_TYPE_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
#define IO_PCI_CONF_DATA_BASE
#define SEG_CS_TYPE_READ_ACCESSED
#define SEG_SYS_TYPE_TSS_BUSY
#define FOREACH_SEGMENT()
#define SEG_SYS_TYPE_TSS_AVAILABLE