48#include "debug/Drain.hh"
49#include "debug/Kvm.hh"
50#include "debug/KvmContext.hh"
51#include "debug/KvmIO.hh"
52#include "debug/KvmInt.hh"
57using namespace X86ISA;
61#define IO_PCI_CONF_ADDR 0xCF8
62#define IO_PCI_CONF_DATA_BASE 0xCFC
65#define SEG_SYS_TYPE_TSS_AVAILABLE 9
67#define SEG_SYS_TYPE_TSS_BUSY 11
70#define SEG_CS_TYPE_ACCESSED 9
72#define SEG_CS_TYPE_READ_ACCESSED 11
76#define SEG_TYPE_BIT_ACCESSED 1
81#define BIT(nr) (1UL << (nr))
119static_assert(
sizeof(
FXSave) == 512,
"Unexpected size of FXSave");
145static_assert(
sizeof(XSaveHeader) == 64,
"Unexpected size of XSaveHeader");
147#define FOREACH_IREG() \
149 APPLY_IREG(rax, int_reg::Rax); \
150 APPLY_IREG(rbx, int_reg::Rbx); \
151 APPLY_IREG(rcx, int_reg::Rcx); \
152 APPLY_IREG(rdx, int_reg::Rdx); \
153 APPLY_IREG(rsi, int_reg::Rsi); \
154 APPLY_IREG(rdi, int_reg::Rdi); \
155 APPLY_IREG(rsp, int_reg::Rsp); \
156 APPLY_IREG(rbp, int_reg::Rbp); \
157 APPLY_IREG(r8, int_reg::R8); \
158 APPLY_IREG(r9, int_reg::R9); \
159 APPLY_IREG(r10, int_reg::R10); \
160 APPLY_IREG(r11, int_reg::R11); \
161 APPLY_IREG(r12, int_reg::R12); \
162 APPLY_IREG(r13, int_reg::R13); \
163 APPLY_IREG(r14, int_reg::R14); \
164 APPLY_IREG(r15, int_reg::R15); \
167#define FOREACH_SREG() \
169 APPLY_SREG(cr0, misc_reg::Cr0); \
170 APPLY_SREG(cr2, misc_reg::Cr2); \
171 APPLY_SREG(cr3, misc_reg::Cr3); \
172 APPLY_SREG(cr4, misc_reg::Cr4); \
173 APPLY_SREG(cr8, misc_reg::Cr8); \
174 APPLY_SREG(efer, misc_reg::Efer); \
175 APPLY_SREG(apic_base, misc_reg::ApicBase); \
178#define FOREACH_DREG() \
180 APPLY_DREG(db[0], misc_reg::Dr0); \
181 APPLY_DREG(db[1], misc_reg::Dr1); \
182 APPLY_DREG(db[2], misc_reg::Dr2); \
183 APPLY_DREG(db[3], misc_reg::Dr3); \
184 APPLY_DREG(dr6, misc_reg::Dr6); \
185 APPLY_DREG(dr7, misc_reg::Dr7); \
188#define FOREACH_SEGMENT() \
190 APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
191 APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
192 APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
193 APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
194 APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
195 APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
196 APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
197 APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
200#define FOREACH_DTABLE() \
202 APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
203 APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
206template<
typename Struct,
typename Entry>
210 size_t size =
sizeof(Struct) + entries *
sizeof(Entry);
211 return std::unique_ptr<Struct, void(*)(Struct *)>(
212 (Struct *)
operator new(size),
213 [](Struct *
p) {
operator delete(
p); });
219 inform(
"KVM register state:\n");
221#define APPLY_IREG(kreg, mreg) \
222 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
228 inform(
"\trip: 0x%llx\n", regs.rip);
229 inform(
"\trflags: 0x%llx\n", regs.rflags);
235 inform(
"\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
236 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
245dumpKvm(
const char *reg_name,
const struct kvm_dtable &dtable)
247 inform(
"\t%s: @0x%llx+%x\n",
248 reg_name, dtable.base, dtable.limit);
254#define APPLY_SREG(kreg, mreg) \
255 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
256#define APPLY_SEGMENT(kreg, idx) \
257 dumpKvm(# kreg, sregs.kreg);
258#define APPLY_DTABLE(kreg, idx) \
259 dumpKvm(# kreg, sregs.kreg);
261 inform(
"Special registers:\n");
266 inform(
"Interrupt Bitmap:");
267 for (
int i = 0;
i < KVM_NR_INTERRUPTS;
i += 64)
268 inform(
" 0x%.8x", sregs.interrupt_bitmap[
i / 64]);
275#ifdef KVM_GET_DEBUGREGS
277dumpKvm(
const struct kvm_debugregs ®s)
279 inform(
"KVM debug state:\n");
281#define APPLY_DREG(kreg, mreg) \
282 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
288 inform(
"\tflags: 0x%llx\n", regs.flags);
295 inform(
"\tlast_ip: 0x%x\n",
xs.ctrl64.fpu_ip);
296 inform(
"\tlast_dp: 0x%x\n",
xs.ctrl64.fpu_dp);
297 inform(
"\tmxcsr_mask: 0x%x\n",
xs.mxcsr_mask);
303 inform(
"\tlast_ip: 0x%x\n", fpu.last_ip);
304 inform(
"\tlast_dp: 0x%x\n", fpu.last_dp);
311 const unsigned top((fpu.fsw >> 11) & 0x7);
312 inform(
"\tfcw: 0x%x\n", fpu.fcw);
314 inform(
"\tfsw: 0x%x (top: %i, "
315 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
318 (fpu.fsw &
CC0Bit) ?
"C0" :
"",
319 (fpu.fsw &
CC1Bit) ?
"C1" :
"",
320 (fpu.fsw &
CC2Bit) ?
"C2" :
"",
321 (fpu.fsw &
CC3Bit) ?
"C3" :
"",
323 (fpu.fsw &
IEBit) ?
"I" :
"",
324 (fpu.fsw &
DEBit) ?
"D" :
"",
325 (fpu.fsw &
ZEBit) ?
"Z" :
"",
326 (fpu.fsw &
OEBit) ?
"O" :
"",
327 (fpu.fsw &
UEBit) ?
"U" :
"",
328 (fpu.fsw &
PEBit) ?
"P" :
"",
332 (fpu.fsw &
BusyBit) ?
"BUSY " :
""
334 inform(
"\tftwx: 0x%x\n", fpu.ftwx);
335 inform(
"\tlast_opcode: 0x%x\n", fpu.last_opcode);
337 inform(
"\tmxcsr: 0x%x\n", fpu.mxcsr);
339 for (
int i = 0;
i < 8; ++
i) {
340 const unsigned reg_idx((
i +
top) & 0x7);
341 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
344 for (
int j = 0; j < 10; ++j)
345 snprintf(&hex[j*2], 3,
"%.2x", fpu.fpr[
i][j]);
346 inform(
"\t\tST%i/%i: 0x%s (%f)%s\n",
i, reg_idx,
347 hex, value, empty ?
" (e)" :
"");
349 inform(
"\tXMM registers:\n");
350 for (
int i = 0;
i < 16; ++
i) {
352 for (
int j = 0; j < 16; ++j)
353 snprintf(&hex[j*2], 3,
"%.2x", fpu.xmm[
i][j]);
354 inform(
"\t\t%i: 0x%s\n",
i, hex);
361 inform(
"FPU registers:\n");
368 inform(
"FPU registers (XSave):\n");
377 for (
int i = 0;
i < msrs.nmsrs; ++
i) {
378 const struct kvm_msr_entry &
e(msrs.entries[
i]);
380 inform(
"\t0x%x: 0x%x\n",
e.index,
e.data);
387 inform(
"KVM XCR registers:\n");
389 inform(
"\tFlags: 0x%x\n", regs.flags);
390 for (
int i = 0;
i < regs.nr_xcrs; ++
i) {
391 inform(
"\tXCR[0x%x]: 0x%x\n",
402 inform(
"\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
403 events.exception.injected, events.exception.nr,
404 events.exception.has_error_code, events.exception.error_code);
406 inform(
"\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
407 events.interrupt.injected, events.interrupt.nr,
408 events.interrupt.soft);
410 inform(
"\tNMI: [inj: %i, pending: %i, masked: %i]\n",
411 events.nmi.injected, events.nmi.pending,
414 inform(
"\tSIPI vector: 0x%x\n", events.sipi_vector);
415 inform(
"\tFlags: 0x%x\n", events.flags);
425 uint64_t upper_half(
addr & 0xffff800000000000ULL);
426 return upper_half == 0 || upper_half == 0xffff800000000000;
431 struct kvm_sregs sregs)
450 if (
seg.base & 0xffffffff00000000ULL)
461 warn(
"CS type is 3 but dpl != 0.\n");
465 if (
seg.dpl != sregs.ss.dpl)
466 warn(
"CS type is %i but CS DPL != SS DPL\n",
seg.type);
470 if (
seg.dpl > sregs.ss.dpl)
471 warn(
"CS type is %i but CS DPL > SS DPL\n",
seg.type);
474 warn(
"Illegal CS type: %i\n",
seg.type);
484 if (sregs.cs.type == 3 &&
seg.dpl != 0)
485 warn(
"CS type is 3, but SS DPL is != 0.\n");
488 if (!(sregs.cr0 & 1) &&
seg.dpl != 0)
489 warn(
"SS DPL is %i, but CR0 PE is 0\n",
seg.dpl);
492 warn(
"Illegal SS type: %i\n",
seg.type);
503 if (!(
seg.type & 0x1) ||
504 ((
seg.type & 0x8) && !(
seg.type & 0x2)))
505 warn(
"%s has an illegal type field: %i\n",
name,
seg.type);
510 if (
seg.type != 3 &&
seg.type != 11)
511 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
518 warn(
"%s: Illegal segment type (%i)\n",
name,
seg.type);
561 if (((
seg.limit & 0xFFF) == 0 &&
seg.g) ||
562 ((
seg.limit & 0xFFF00000) != 0 && !
seg.g)) {
563 warn(
"%s limit (0x%x) and g (%i) combination is illegal.\n",
574 useXSave(params.useXSave)
585 panic(
"KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
587 panic(
"KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
589 warn(
"KVM: Missing capability (KVM_CAP_USER_NMI)\n");
591 warn(
"KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
598 warn(
"KVM: XSAVE not supported by host. MXCSR synchronization might "
599 "be unreliable due to kernel bugs.\n");
602 warn(
"KVM: XSave FPU/SIMD synchronization disabled by user.\n");
650 struct kvm_regs regs;
658 struct kvm_sregs sregs;
667#ifdef KVM_GET_DEBUGREGS
668 struct kvm_debugregs dregs;
673 inform(
"Debug registers not supported by kernel.\n");
681 struct kvm_xcrs xcrs;
685 inform(
"XCRs not supported by kernel.\n");
693 struct kvm_xsave xsave;
697 inform(
"XSave not supported by kernel.\n");
704 struct kvm_vcpu_events events;
712 const Kvm::MSRIndexVector &supported_msrs =
vm->
kvm->getSupportedMSRs();
714 supported_msrs.size());
716 msrs->nmsrs = supported_msrs.size();
717 for (
int i = 0;
i < supported_msrs.size(); ++
i) {
718 struct kvm_msr_entry &
e(msrs->entries[
i]);
719 e.index = supported_msrs[
i];
737 DPRINTF(KvmContext,
"X86KvmCPU::updateKvmState():\n");
738 if (debug::KvmContext)
745 struct kvm_regs regs;
747#define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
773 kvm_seg.type =
attr.type;
774 kvm_seg.present =
attr.present;
775 kvm_seg.dpl =
attr.dpl;
776 kvm_seg.db =
attr.defaultSize;
777 kvm_seg.s =
attr.system;
778 kvm_seg.l =
attr.longMode;
779 kvm_seg.g =
attr.granularity;
780 kvm_seg.avl =
attr.avl;
781 kvm_seg.unusable =
attr.unusable;
807 struct kvm_sregs sregs;
809#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
810#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
811#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
822 memset(&sregs.interrupt_bitmap, 0,
sizeof(sregs.interrupt_bitmap));
836 hack(
"tr.type (%i) is not busy. Forcing the busy bit.\n",
847 sregs.cs.dpl != sregs.ss.dpl) {
849 hack(
"CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
850 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
851 sregs.ss.dpl = sregs.cs.dpl;
857 if (!rflags_nocc.vm) {
862#define APPLY_SEGMENT(kreg, idx) \
863 checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
887 const unsigned top((fpu.fsw >> 11) & 0x7);
888 for (
int i = 0;
i < 8; ++
i) {
889 const unsigned reg_idx((
i +
top) & 0x7);
892 DPRINTF(KvmContext,
"Setting KVM FP reg %i (st[%i]) := %f\n",
899 for (
int i = 0;
i < 16; ++
i) {
900 *(uint64_t *)&fpu.xmm[
i][0] =
902 *(uint64_t *)&fpu.xmm[
i][8] =
914 memset(&fpu, 0,
sizeof(fpu));
919 warn_once(
"misc_reg::Fiseg is non-zero.\n");
924 warn_once(
"misc_reg::Foseg is non-zero.\n");
934 struct kvm_xsave kxsave;
939 memset(&kxsave, 0,
sizeof(kxsave));
951 XSaveHeader& xsave_hdr =
952 * (XSaveHeader *) ((
char *) &kxsave +
sizeof(
FXSave));
953 xsave_hdr.xstate_bv.fpu = 1;
954 xsave_hdr.xstate_bv.sse = 1;
957 warn_once(
"misc_reg::Fiseg is non-zero.\n");
962 warn_once(
"misc_reg::Foseg is non-zero.\n");
985 for (
auto it = indices.cbegin(); it != indices.cend(); ++it) {
986 struct kvm_msr_entry
e;
991 DPRINTF(KvmContext,
"Adding MSR: idx: 0x%x, data: 0x%x\n",
1004 struct kvm_xcrs xcrs;
1009 for (
int i = 0;
i < xcrs.nr_xcrs; ++
i) {
1010 xcrs.xcrs[
i].xcr =
i;
1021 struct kvm_regs regs;
1022 struct kvm_sregs sregs;
1027 DPRINTF(KvmContext,
"X86KvmCPU::updateThreadContext():\n");
1028 if (debug::KvmContext)
1034 struct kvm_xsave xsave;
1055 const struct kvm_sregs &sregs)
1057#define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
1077 attr.type = kvm_seg.type;
1078 attr.present = kvm_seg.present;
1079 attr.dpl = kvm_seg.dpl;
1080 attr.defaultSize = kvm_seg.db;
1081 attr.system = kvm_seg.s;
1082 attr.longMode = kvm_seg.l;
1083 attr.granularity = kvm_seg.g;
1084 attr.avl = kvm_seg.avl;
1085 attr.unusable = kvm_seg.unusable;
1113#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1114#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1115#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1128 const unsigned top((fpu.fsw >> 11) & 0x7);
1130 for (
int i = 0;
i < 8; ++
i) {
1131 const unsigned reg_idx((
i +
top) & 0x7);
1133 DPRINTF(KvmContext,
"Setting gem5 FP reg %i (st[%i]) := %f\n",
1152 for (
int i = 0;
i < 16; ++
i) {
1189 struct kvm_msr_entry *entry;
1192 kvm_msrs->nmsrs = msrs.size();
1193 entry = &kvm_msrs->entries[0];
1194 for (
auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1196 entry->reserved = 0;
1203 entry = &kvm_msrs->entries[0];
1204 for (
int i = 0;
i < kvm_msrs->nmsrs; ++
i, ++entry) {
1205 DPRINTF(KvmContext,
"Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1206 entry->index, entry->data);
1216 struct kvm_xcrs xcrs;
1220 for (
int i = 0;
i < xcrs.nr_xcrs; ++
i) {
1222 xcrs.xcrs[
i].value);
1247 DPRINTF(KvmInt,
"Delivering NMI\n");
1250 DPRINTF(KvmInt,
"INIT interrupt\n");
1251 fault.get()->invoke(
tc);
1261 DPRINTF(KvmInt,
"STARTUP interrupt\n");
1262 fault.get()->invoke(
tc);
1266 }
else if (x86int) {
1267 struct kvm_interrupt kvm_int;
1270 DPRINTF(KvmInt,
"Delivering interrupt: %s (%u)\n",
1271 fault->name(), kvm_int.irq);
1275 panic(
"KVM: Unknown interrupt type\n");
1287 if (lapic->checkInterruptsRaw()) {
1288 if (lapic->hasPendingUnmaskable()) {
1290 "Delivering unmaskable interrupt.\n");
1293 }
else if (kvm_run.ready_for_interrupt_injection) {
1300 if (lapic->checkInterrupts()) {
1302 "M5 has pending interrupts, delivering interrupt.\n");
1307 "Interrupt delivery delayed due to KVM confusion.\n");
1308 kvm_run.request_interrupt_window = 1;
1310 }
else if (!kvm_run.request_interrupt_window) {
1312 "M5 has pending interrupts, requesting interrupt "
1314 kvm_run.request_interrupt_window = 1;
1317 kvm_run.request_interrupt_window = 0;
1334 DPRINTF(Drain,
"kvmRunDrain: Architecture code isn't drained\n");
1340 kvm_run.request_interrupt_window = 1;
1347 DPRINTF(Drain,
"kvmRunDrain: Delivering pending IO\n");
1363 const uint16_t port(kvm_run.io.port);
1365 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1367 if (kvm_run.io.size != 4) {
1368 panic(
"Unexpected IO size (%u) for address 0x%x.\n",
1369 kvm_run.io.size, port);
1372 if (kvm_run.io.count != 1) {
1373 panic(
"Unexpected IO count (%u) for address 0x%x.\n",
1374 kvm_run.io.count, port);
1378 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1388 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1389 unsigned char *guestData(
getGuestData(kvm_run.io.data_offset));
1391 uint16_t port(kvm_run.io.port);
1393 const int count(kvm_run.io.count);
1395 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1396 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1398 DPRINTF(KvmIO,
"KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1399 (isWrite ?
"out" :
"in"), kvm_run.io.port);
1416 if (pciConfigAddr & 0x80000000) {
1431 RequestPtr io_req = std::make_shared<Request>(
1432 pAddr, kvm_run.io.size,
1442 guestData += kvm_run.io.size;
1460 struct kvm_vcpu_events events;
1471 const bool pending_events(events.exception.injected ||
1472 events.interrupt.injected ||
1473 events.nmi.injected || events.nmi.pending);
1475 if (pending_events) {
1476 DPRINTF(Drain,
"archIsDrained: Pending events: %s %s %s %s\n",
1477 events.exception.injected ?
"exception" :
"",
1478 events.interrupt.injected ?
"interrupt" :
"",
1479 events.nmi.injected ?
"nmi[i]" :
"",
1480 events.nmi.pending ?
"nmi[p]" :
"");
1483 return !pending_events;
1503static struct kvm_cpuid_entry2
1507 struct kvm_cpuid_entry2
e;
1508 e.function = function;
1511 e.eax = (uint32_t)result.rax;
1512 e.ebx = (uint32_t)result.rbx;
1513 e.ecx = (uint32_t)result.rcx;
1514 e.edx = (uint32_t)result.rdx;
1522 Kvm::CPUIDVector m5_supported;
1527 isa->
cpuid->doCpuid(
tc, 0x0, 0, func0);
1528 for (uint32_t function = 0; function <= func0.
rax; ++function) {
1532 if (!isa->
cpuid->hasSignificantIndex(function)) {
1537 [[maybe_unused]]
bool rv = isa->
cpuid->doCpuid(
1552 uint32_t flag = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1553 m5_supported.push_back(
1562 isa->
cpuid->doCpuid(
tc, 0x80000000, 0, efunc0);
1563 for (uint32_t function = 0x80000000; function <= efunc0.
rax; ++function) {
1567 if (!isa->
cpuid->hasSignificantIndex(function)) {
1572 [[maybe_unused]]
bool rv = isa->
cpuid->doCpuid(
1587 uint32_t flag = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1588 m5_supported.push_back(
1601 if (
ioctl(KVM_SET_CPUID2, (
void *)&
cpuid) == -1)
1602 panic(
"KVM: Failed to set guest CPUID2 (errno: %i)\n",
1612 kvm_cpuid->nent =
cpuid.size();
1613 std::copy(
cpuid.begin(),
cpuid.end(), kvm_cpuid->entries);
1621 if (
ioctl(KVM_SET_MSRS, (
void *)&msrs) == -1)
1622 panic(
"KVM: Failed to set guest MSRs (errno: %i)\n",
1632 kvm_msrs->nmsrs = msrs.size();
1633 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1641 if (
ioctl(KVM_GET_MSRS, (
void *)&msrs) == -1)
1642 panic(
"KVM: Failed to get guest MSRs (errno: %i)\n",
1651 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1653 kvm_msrs->nmsrs = 1;
1654 entry.index =
index;
1665 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1667 kvm_msrs->nmsrs = 1;
1668 entry.index =
index;
1676const Kvm::MSRIndexVector &
1680 const Kvm::MSRIndexVector &kvm_msrs =
vm->
kvm->getSupportedMSRs();
1682 DPRINTF(
Kvm,
"kvm-x86: Updating MSR intersection\n");
1683 for (
auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1686 DPRINTF(
Kvm,
"kvm-x86: Adding MSR 0x%x\n", *it);
1688 warn(
"kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1700#ifdef KVM_GET_DEBUGREGS
1701 if (
ioctl(KVM_GET_DEBUGREGS, ®s) == -1)
1702 panic(
"KVM: Failed to get guest debug registers\n");
1704 panic(
"KVM: Unsupported getDebugRegisters call.\n");
1711#ifdef KVM_SET_DEBUGREGS
1712 if (
ioctl(KVM_SET_DEBUGREGS, (
void *)®s) == -1)
1713 panic(
"KVM: Failed to set guest debug registers\n");
1715 panic(
"KVM: Unsupported setDebugRegisters call.\n");
1722 if (
ioctl(KVM_GET_XCRS, ®s) == -1)
1723 panic(
"KVM: Failed to get guest debug registers\n");
1729 if (
ioctl(KVM_SET_XCRS, (
void *)®s) == -1)
1730 panic(
"KVM: Failed to set guest debug registers\n");
1736 if (
ioctl(KVM_GET_XSAVE, &xsave) == -1)
1737 panic(
"KVM: Failed to get guest debug registers\n");
1743 if (
ioctl(KVM_SET_XSAVE, (
void *)&xsave) == -1)
1744 panic(
"KVM: Failed to set guest debug registers\n");
1751 if (
ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1752 panic(
"KVM: Failed to get guest debug registers\n");
1758 if (
ioctl(KVM_SET_VCPU_EVENTS, (
void *)&events) == -1)
1759 panic(
"KVM: Failed to set guest debug registers\n");
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
std::vector< BaseInterrupts * > interrupts
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Base class for KVM based CPU models.
Status _status
CPU run state.
void getSpecialRegisters(struct kvm_sregs ®s) const
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
void setRegisters(const struct kvm_regs ®s)
@ Idle
Context not scheduled in KVM.
void getRegisters(struct kvm_regs ®s) const
Get/Set the register state of the guest vCPU.
void setSpecialRegisters(const struct kvm_sregs ®s)
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
void startup() override
startup() is the final initialization call before simulation.
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
KVMCpuPort dataPort
Port for data requests.
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
void setFPUState(const struct kvm_fpu &state)
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Kvm * kvm
Global KVM interface.
bool capXCRs() const
Support for getting and setting the x86 XCRs.
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
@ UNCACHEABLE
The request is to an uncacheable address.
void suspend() override
Set the status to Suspended.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual RegVal getReg(const RegId ®) const
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
virtual BaseISA * getIsaPtr() const =0
virtual void setReg(const RegId ®, RegVal val)
virtual const PCStateBase & pcState() const =0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual ContextID contextId() const =0
std::unique_ptr< X86CPUID > cpuid
virtual uint8_t getVector() const
Get the vector of an interrupt.
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
void updateThreadContextMSRs()
Update MSR registers.
void startup() override
startup() is the final initialization call before simulation.
void updateKvmState() override
Update the KVM state from the current thread context.
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
void getXSave(struct kvm_xsave &xsave) const
bool haveXCRs
Kvm::capXCRs() available?
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
void getXCRs(struct kvm_xcrs ®s) const
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
void getVCpuEvents(struct kvm_vcpu_events &events) const
X86KvmCPU(const X86KvmCPUParams ¶ms)
void updateKvmStateFPU()
Update FPU and SIMD registers.
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
void updateKvmStateMSRs()
Update MSR registers.
void getDebugRegisters(struct kvm_debugregs ®s) const
Wrappers around KVM's state transfer methods.
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
void dumpSpecRegs() const
void setMSR(uint32_t index, uint64_t value)
void ioctlRun() override
Override for synchronizing state in kvm_run.
void setXCRs(const struct kvm_xcrs ®s)
void updateThreadContext() override
Update the current thread context with the KVM state.
void getMSRs(struct kvm_msrs &msrs) const
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
void updateKvmStateXCRs()
Update XCR registers.
void setVCpuEvents(const struct kvm_vcpu_events &events)
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
void updateThreadContextXCRs()
Update XCR registers.
bool haveDebugRegs
Kvm::capDebugRegs() available?
void dump() const override
Dump the internal state to the terminal.
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
void setXSave(const struct kvm_xsave &xsave)
void dumpVCpuEvents() const
uint64_t getMSR(uint32_t index) const
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
void updateThreadContextRegs(const struct kvm_regs ®s, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
void setDebugRegisters(const struct kvm_debugregs ®s)
bool haveXSave
Kvm::capXSave() available?
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
void dumpDebugRegs() const
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
int ioctl(int request, long p1) const
vCPU ioctl interface.
#define BitUnion64(name)
Use this to define conveniently sized values overlayed with bitfields.
#define EndBitUnion(name)
This closes off the class and union started by the above macro.
EventQueue * eventQueue() const
#define panic(...)
This implements a cprintf based panic() function.
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
GenericISA::DelaySlotPCState< 4 > PCState
static RegId xmmLow(int index)
static RegId xmmHigh(int index)
static RegId fpr(int index)
static RegIndex segSel(int index)
static RegIndex segAttr(int index)
static RegIndex xcr(int index)
static RegIndex segBase(int index)
static RegIndex segLimit(int index)
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
static Addr x86IOAddress(const uint32_t port)
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
static Addr x86PciConfigAddress(const uint32_t addr)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result, uint32_t flags=0)
static void dumpFpuCommon(const T &fpu)
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
static void forceSegAccessed(struct kvm_segment &seg)
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
static bool isCanonicalAddress(uint64_t addr)
uint64_t Tick
Tick count type.
static uint64_t floatToBits64(double val)
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
static void dumpKvm(const struct kvm_regs ®s)
static auto newVarStruct(size_t entries)
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
static double bitsToFloat64(uint64_t val)
static void dumpFpuSpec(const struct FXSave &xs)
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Bitfield< 63, 19 > reserved
struct gem5::FXSave::@34::@37 ctrl64
const std::string & name()
#define SEG_CS_TYPE_ACCESSED
#define SEG_TYPE_BIT_ACCESSED
#define IO_PCI_CONF_DATA_BASE
#define SEG_CS_TYPE_READ_ACCESSED
#define SEG_SYS_TYPE_TSS_BUSY
#define FOREACH_SEGMENT()
#define SEG_SYS_TYPE_TSS_AVAILABLE