gem5 v24.0.0.0
Loading...
Searching...
No Matches
x86_cpu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013 Andreas Sandberg
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
30
31#include <linux/kvm.h>
32
33#include <algorithm>
34#include <cerrno>
35#include <memory>
36
37#include "arch/x86/cpuid.hh"
38#include "arch/x86/faults.hh"
40#include "arch/x86/isa.hh"
42#include "arch/x86/regs/int.hh"
43#include "arch/x86/regs/msr.hh"
44#include "arch/x86/utility.hh"
45#include "base/bitunion.hh"
46#include "base/compiler.hh"
47#include "cpu/kvm/base.hh"
48#include "debug/Drain.hh"
49#include "debug/Kvm.hh"
50#include "debug/KvmContext.hh"
51#include "debug/KvmIO.hh"
52#include "debug/KvmInt.hh"
53
54namespace gem5
55{
56
57using namespace X86ISA;
58
59#define MSR_TSC 0x10
60
61#define IO_PCI_CONF_ADDR 0xCF8
62#define IO_PCI_CONF_DATA_BASE 0xCFC
63
64// Task segment type of an inactive 32-bit or 64-bit task
65#define SEG_SYS_TYPE_TSS_AVAILABLE 9
66// Task segment type of an active 32-bit or 64-bit task
67#define SEG_SYS_TYPE_TSS_BUSY 11
68
69// Non-conforming accessed code segment
70#define SEG_CS_TYPE_ACCESSED 9
71// Non-conforming accessed code segment that can be read
72#define SEG_CS_TYPE_READ_ACCESSED 11
73
74// The lowest bit of the type field for normal segments (code and
75// data) is used to indicate that a segment has been accessed.
76#define SEG_TYPE_BIT_ACCESSED 1
77
78// Some linux distro s(e.g., RHEL7) define the KVM macros using "BIT" but do
79// not include where BIT is defined, so define it here in that case.
80#ifndef BIT
81#define BIT(nr) (1UL << (nr))
82#endif
83
84
86{
87 uint16_t fcw;
88 uint16_t fsw;
89 uint8_t ftwx;
90 uint8_t pad0;
91 uint16_t last_opcode;
92 union
93 {
94 struct
95 {
96 uint32_t fpu_ip;
97 uint16_t fpu_cs;
98 uint16_t pad1;
99 uint32_t fpu_dp;
100 uint16_t fpu_ds;
101 uint16_t pad2;
102 } ctrl32;
103
104 struct
105 {
106 uint64_t fpu_ip;
107 uint64_t fpu_dp;
108 } ctrl64;
109 };
110 uint32_t mxcsr;
111 uint32_t mxcsr_mask;
112
113 uint8_t fpr[8][16];
114 uint8_t xmm[16][16];
115
116 uint64_t reserved[12];
117};
118
119static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
120
121BitUnion64(XStateBV)
122 Bitfield<0> fpu;
123 Bitfield<1> sse;
124 Bitfield<2> avx;
125 Bitfield<4, 3> mpx;
126 Bitfield<7, 5> avx512;
127 Bitfield<8> pt;
128 Bitfield<9> pkru;
129 Bitfield<10> pasid;
130 Bitfield<12, 11> cet;
131 Bitfield<13> hdc;
132 Bitfield<14> uintr;
133 Bitfield<15> lbr;
134 Bitfield<16> hwp;
135 Bitfield<18, 17> amx;
136 Bitfield<63, 19> reserved;
137EndBitUnion(XStateBV)
138
139struct XSaveHeader
140{
141 XStateBV xstate_bv;
142 uint64_t reserved[7];
143};
144
145static_assert(sizeof(XSaveHeader) == 64, "Unexpected size of XSaveHeader");
146
147#define FOREACH_IREG() \
148 do { \
149 APPLY_IREG(rax, int_reg::Rax); \
150 APPLY_IREG(rbx, int_reg::Rbx); \
151 APPLY_IREG(rcx, int_reg::Rcx); \
152 APPLY_IREG(rdx, int_reg::Rdx); \
153 APPLY_IREG(rsi, int_reg::Rsi); \
154 APPLY_IREG(rdi, int_reg::Rdi); \
155 APPLY_IREG(rsp, int_reg::Rsp); \
156 APPLY_IREG(rbp, int_reg::Rbp); \
157 APPLY_IREG(r8, int_reg::R8); \
158 APPLY_IREG(r9, int_reg::R9); \
159 APPLY_IREG(r10, int_reg::R10); \
160 APPLY_IREG(r11, int_reg::R11); \
161 APPLY_IREG(r12, int_reg::R12); \
162 APPLY_IREG(r13, int_reg::R13); \
163 APPLY_IREG(r14, int_reg::R14); \
164 APPLY_IREG(r15, int_reg::R15); \
165 } while (0)
166
167#define FOREACH_SREG() \
168 do { \
169 APPLY_SREG(cr0, misc_reg::Cr0); \
170 APPLY_SREG(cr2, misc_reg::Cr2); \
171 APPLY_SREG(cr3, misc_reg::Cr3); \
172 APPLY_SREG(cr4, misc_reg::Cr4); \
173 APPLY_SREG(cr8, misc_reg::Cr8); \
174 APPLY_SREG(efer, misc_reg::Efer); \
175 APPLY_SREG(apic_base, misc_reg::ApicBase); \
176 } while (0)
177
178#define FOREACH_DREG() \
179 do { \
180 APPLY_DREG(db[0], misc_reg::Dr0); \
181 APPLY_DREG(db[1], misc_reg::Dr1); \
182 APPLY_DREG(db[2], misc_reg::Dr2); \
183 APPLY_DREG(db[3], misc_reg::Dr3); \
184 APPLY_DREG(dr6, misc_reg::Dr6); \
185 APPLY_DREG(dr7, misc_reg::Dr7); \
186 } while (0)
187
188#define FOREACH_SEGMENT() \
189 do { \
190 APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
191 APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
192 APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
193 APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
194 APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
195 APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
196 APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
197 APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
198 } while (0)
199
200#define FOREACH_DTABLE() \
201 do { \
202 APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
203 APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
204 } while (0)
205
206template<typename Struct, typename Entry>
207static auto
208newVarStruct(size_t entries)
209{
210 size_t size = sizeof(Struct) + entries * sizeof(Entry);
211 return std::unique_ptr<Struct, void(*)(Struct *)>(
212 (Struct *)operator new(size),
213 [](Struct *p) { operator delete(p); });
214}
215
216static void
217dumpKvm(const struct kvm_regs &regs)
218{
219 inform("KVM register state:\n");
220
221#define APPLY_IREG(kreg, mreg) \
222 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
223
224 FOREACH_IREG();
225
226#undef APPLY_IREG
227
228 inform("\trip: 0x%llx\n", regs.rip);
229 inform("\trflags: 0x%llx\n", regs.rflags);
230}
231
232static void
233dumpKvm(const char *reg_name, const struct kvm_segment &seg)
234{
235 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
236 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
237 "unus.: %u\n",
238 reg_name,
239 seg.base, seg.limit, seg.selector, seg.type,
240 seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl,
241 seg.unusable);
242}
243
244static void
245dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
246{
247 inform("\t%s: @0x%llx+%x\n",
248 reg_name, dtable.base, dtable.limit);
249}
250
251static void
252dumpKvm(const struct kvm_sregs &sregs)
253{
254#define APPLY_SREG(kreg, mreg) \
255 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
256#define APPLY_SEGMENT(kreg, idx) \
257 dumpKvm(# kreg, sregs.kreg);
258#define APPLY_DTABLE(kreg, idx) \
259 dumpKvm(# kreg, sregs.kreg);
260
261 inform("Special registers:\n");
263 FOREACH_SREG();
265
266 inform("Interrupt Bitmap:");
267 for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
268 inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
269
270#undef APPLY_SREG
271#undef APPLY_SEGMENT
272#undef APPLY_DTABLE
273}
274
275#ifdef KVM_GET_DEBUGREGS
276static void
277dumpKvm(const struct kvm_debugregs &regs)
278{
279 inform("KVM debug state:\n");
280
281#define APPLY_DREG(kreg, mreg) \
282 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
283
284 FOREACH_DREG();
285
286#undef APPLY_DREG
287
288 inform("\tflags: 0x%llx\n", regs.flags);
289}
290#endif
291
292static void
293dumpFpuSpec(const struct FXSave &xs)
294{
295 inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
296 inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
297 inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
298}
299
300static void
301dumpFpuSpec(const struct kvm_fpu &fpu)
302{
303 inform("\tlast_ip: 0x%x\n", fpu.last_ip);
304 inform("\tlast_dp: 0x%x\n", fpu.last_dp);
305}
306
307template<typename T>
308static void
309dumpFpuCommon(const T &fpu)
310{
311 const unsigned top((fpu.fsw >> 11) & 0x7);
312 inform("\tfcw: 0x%x\n", fpu.fcw);
313
314 inform("\tfsw: 0x%x (top: %i, "
315 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
316 fpu.fsw, top,
317
318 (fpu.fsw & CC0Bit) ? "C0" : "",
319 (fpu.fsw & CC1Bit) ? "C1" : "",
320 (fpu.fsw & CC2Bit) ? "C2" : "",
321 (fpu.fsw & CC3Bit) ? "C3" : "",
322
323 (fpu.fsw & IEBit) ? "I" : "",
324 (fpu.fsw & DEBit) ? "D" : "",
325 (fpu.fsw & ZEBit) ? "Z" : "",
326 (fpu.fsw & OEBit) ? "O" : "",
327 (fpu.fsw & UEBit) ? "U" : "",
328 (fpu.fsw & PEBit) ? "P" : "",
329
330 (fpu.fsw & StackFaultBit) ? "SF " : "",
331 (fpu.fsw & ErrSummaryBit) ? "ES " : "",
332 (fpu.fsw & BusyBit) ? "BUSY " : ""
333 );
334 inform("\tftwx: 0x%x\n", fpu.ftwx);
335 inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
336 dumpFpuSpec(fpu);
337 inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
338 inform("\tFP Stack:\n");
339 for (int i = 0; i < 8; ++i) {
340 const unsigned reg_idx((i + top) & 0x7);
341 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
342 const double value(X86ISA::loadFloat80(fpu.fpr[i]));
343 char hex[33];
344 for (int j = 0; j < 10; ++j)
345 snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
346 inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
347 hex, value, empty ? " (e)" : "");
348 }
349 inform("\tXMM registers:\n");
350 for (int i = 0; i < 16; ++i) {
351 char hex[33];
352 for (int j = 0; j < 16; ++j)
353 snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
354 inform("\t\t%i: 0x%s\n", i, hex);
355 }
356}
357
358static void
359dumpKvm(const struct kvm_fpu &fpu)
360{
361 inform("FPU registers:\n");
362 dumpFpuCommon(fpu);
363}
364
365static void
366dumpKvm(const struct kvm_xsave &xsave)
367{
368 inform("FPU registers (XSave):\n");
369 dumpFpuCommon(*(FXSave *)xsave.region);
370}
371
372static void
373dumpKvm(const struct kvm_msrs &msrs)
374{
375 inform("MSRs:\n");
376
377 for (int i = 0; i < msrs.nmsrs; ++i) {
378 const struct kvm_msr_entry &e(msrs.entries[i]);
379
380 inform("\t0x%x: 0x%x\n", e.index, e.data);
381 }
382}
383
384static void
385dumpKvm(const struct kvm_xcrs &regs)
386{
387 inform("KVM XCR registers:\n");
388
389 inform("\tFlags: 0x%x\n", regs.flags);
390 for (int i = 0; i < regs.nr_xcrs; ++i) {
391 inform("\tXCR[0x%x]: 0x%x\n",
392 regs.xcrs[i].xcr,
393 regs.xcrs[i].value);
394 }
395}
396
397static void
398dumpKvm(const struct kvm_vcpu_events &events)
399{
400 inform("vCPU events:\n");
401
402 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
403 events.exception.injected, events.exception.nr,
404 events.exception.has_error_code, events.exception.error_code);
405
406 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
407 events.interrupt.injected, events.interrupt.nr,
408 events.interrupt.soft);
409
410 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
411 events.nmi.injected, events.nmi.pending,
412 events.nmi.masked);
413
414 inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
415 inform("\tFlags: 0x%x\n", events.flags);
416}
417
418static bool
420{
421 // x86-64 doesn't currently use the full 64-bit virtual address
422 // space, instead it uses signed 48 bit addresses that are
423 // sign-extended to 64 bits. Such addresses are known as
424 // "canonical".
425 uint64_t upper_half(addr & 0xffff800000000000ULL);
426 return upper_half == 0 || upper_half == 0xffff800000000000;
427}
428
429static void
430checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
431 struct kvm_sregs sregs)
432{
433 // Check the register base
434 switch (idx) {
435 case misc_reg::Tsl:
436 case misc_reg::Tr:
437 case misc_reg::Fs:
438 case misc_reg::Gs:
439 if (!isCanonicalAddress(seg.base))
440 warn("Illegal %s base: 0x%x\n", name, seg.base);
441 break;
442
443 case misc_reg::Ss:
444 case misc_reg::Ds:
445 case misc_reg::Es:
446 if (seg.unusable)
447 break;
448 [[fallthrough]];
449 case misc_reg::Cs:
450 if (seg.base & 0xffffffff00000000ULL)
451 warn("Illegal %s base: 0x%x\n", name, seg.base);
452 break;
453 }
454
455 // Check the type
456 switch (idx) {
457 case misc_reg::Cs:
458 switch (seg.type) {
459 case 3:
460 if (seg.dpl != 0)
461 warn("CS type is 3 but dpl != 0.\n");
462 break;
463 case 9:
464 case 11:
465 if (seg.dpl != sregs.ss.dpl)
466 warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
467 break;
468 case 13:
469 case 15:
470 if (seg.dpl > sregs.ss.dpl)
471 warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
472 break;
473 default:
474 warn("Illegal CS type: %i\n", seg.type);
475 break;
476 }
477 break;
478
479 case misc_reg::Ss:
480 if (seg.unusable)
481 break;
482 switch (seg.type) {
483 case 3:
484 if (sregs.cs.type == 3 && seg.dpl != 0)
485 warn("CS type is 3, but SS DPL is != 0.\n");
486 [[fallthrough]];
487 case 7:
488 if (!(sregs.cr0 & 1) && seg.dpl != 0)
489 warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
490 break;
491 default:
492 warn("Illegal SS type: %i\n", seg.type);
493 break;
494 }
495 break;
496
497 case misc_reg::Ds:
498 case misc_reg::Es:
499 case misc_reg::Fs:
500 case misc_reg::Gs:
501 if (seg.unusable)
502 break;
503 if (!(seg.type & 0x1) ||
504 ((seg.type & 0x8) && !(seg.type & 0x2)))
505 warn("%s has an illegal type field: %i\n", name, seg.type);
506 break;
507
508 case misc_reg::Tr:
509 // TODO: We should check the CPU mode
510 if (seg.type != 3 && seg.type != 11)
511 warn("%s: Illegal segment type (%i)\n", name, seg.type);
512 break;
513
514 case misc_reg::Tsl:
515 if (seg.unusable)
516 break;
517 if (seg.type != 2)
518 warn("%s: Illegal segment type (%i)\n", name, seg.type);
519 break;
520 }
521
522 switch (idx) {
523 case misc_reg::Ss:
524 case misc_reg::Ds:
525 case misc_reg::Es:
526 case misc_reg::Fs:
527 case misc_reg::Gs:
528 if (seg.unusable)
529 break;
530 [[fallthrough]];
531 case misc_reg::Cs:
532 if (!seg.s)
533 warn("%s: S flag not set\n", name);
534 break;
535
536 case misc_reg::Tsl:
537 if (seg.unusable)
538 break;
539 [[fallthrough]];
540 case misc_reg::Tr:
541 if (seg.s)
542 warn("%s: S flag is set\n", name);
543 break;
544 }
545
546 switch (idx) {
547 case misc_reg::Ss:
548 case misc_reg::Ds:
549 case misc_reg::Es:
550 case misc_reg::Fs:
551 case misc_reg::Gs:
552 case misc_reg::Tsl:
553 if (seg.unusable)
554 break;
555 [[fallthrough]];
556 case misc_reg::Tr:
557 case misc_reg::Cs:
558 if (!seg.present)
559 warn("%s: P flag not set\n", name);
560
561 if (((seg.limit & 0xFFF) == 0 && seg.g) ||
562 ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
563 warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
564 name, seg.limit, seg.g);
565 }
566 break;
567 }
568
569 // TODO: Check CS DB
570}
571
572X86KvmCPU::X86KvmCPU(const X86KvmCPUParams &params)
573 : BaseKvmCPU(params),
574 useXSave(params.useXSave)
575{}
576
577void
579{
581
582 Kvm &kvm = *vm->kvm;
583
584 if (!kvm.capSetTSSAddress())
585 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
586 if (!kvm.capExtendedCPUID())
587 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
588 if (!kvm.capUserNMI())
589 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
590 if (!kvm.capVCPUEvents())
591 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
592
594 haveXSave = kvm.capXSave();
595 haveXCRs = kvm.capXCRs();
596
597 if (useXSave && !haveXSave) {
598 warn("KVM: XSAVE not supported by host. MXCSR synchronization might "
599 "be unreliable due to kernel bugs.\n");
600 useXSave = false;
601 } else if (!useXSave) {
602 warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
603 }
604}
605
609
610void
612{
614
615 updateCPUID();
616
617 // TODO: Do we need to create an identity mapped TSS area? We
618 // should call kvm.vm.setTSSAddress() here in that case. It should
619 // only be needed for old versions of the virtualization
620 // extensions. We should make sure that the identity range is
621 // reserved in the e820 memory map in that case.
622}
623
624void
626{
627 dumpIntRegs();
628 if (useXSave)
629 dumpXSave();
630 else
631 dumpFpuRegs();
632 dumpSpecRegs();
634 dumpXCRs();
636 dumpMSRs();
637}
638
639void
641{
642 struct kvm_fpu fpu;
643 getFPUState(fpu);
644 dumpKvm(fpu);
645}
646
647void
649{
650 struct kvm_regs regs;
651 getRegisters(regs);
652 dumpKvm(regs);
653}
654
655void
657{
658 struct kvm_sregs sregs;
659 getSpecialRegisters(sregs);
660 dumpKvm(sregs);
661}
662
663void
665{
666 if (haveDebugRegs) {
667#ifdef KVM_GET_DEBUGREGS
668 struct kvm_debugregs dregs;
669 getDebugRegisters(dregs);
670 dumpKvm(dregs);
671#endif
672 } else {
673 inform("Debug registers not supported by kernel.\n");
674 }
675}
676
677void
679{
680 if (haveXCRs) {
681 struct kvm_xcrs xcrs;
682 getXCRs(xcrs);
683 dumpKvm(xcrs);
684 } else {
685 inform("XCRs not supported by kernel.\n");
686 }
687}
688
689void
691{
692 if (haveXSave) {
693 struct kvm_xsave xsave;
694 getXSave(xsave);
695 dumpKvm(xsave);
696 } else {
697 inform("XSave not supported by kernel.\n");
698 }
699}
700
701void
703{
704 struct kvm_vcpu_events events;
705 getVCpuEvents(events);
706 dumpKvm(events);
707}
708
709void
711{
712 const Kvm::MSRIndexVector &supported_msrs = vm->kvm->getSupportedMSRs();
714 supported_msrs.size());
715
716 msrs->nmsrs = supported_msrs.size();
717 for (int i = 0; i < supported_msrs.size(); ++i) {
718 struct kvm_msr_entry &e(msrs->entries[i]);
719 e.index = supported_msrs[i];
720 e.reserved = 0;
721 e.data = 0;
722 }
723 getMSRs(*msrs.get());
724
725 dumpKvm(*msrs.get());
726}
727
728void
730{
736
737 DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
738 if (debug::KvmContext)
739 dump();
740}
741
742void
744{
745 struct kvm_regs regs;
746
747#define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
748 FOREACH_IREG();
749#undef APPLY_IREG
750
751 regs.rip = tc->pcState().instAddr() - tc->readMiscReg(misc_reg::CsBase);
752
753 /* You might think that setting regs.rflags to the contents
754 * misc_reg::Rflags here would suffice. In that case you're
755 * mistaken. We need to reconstruct it from a bunch of ucode
756 * registers and wave a dead chicken over it (aka mask out and set
757 * reserved bits) to get it to work.
758 */
759 regs.rflags = X86ISA::getRFlags(tc);
760
761 setRegisters(regs);
762}
763
764static inline void
765setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
766 const int index)
767{
769
770 kvm_seg.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
771 kvm_seg.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
772 kvm_seg.selector = tc->readMiscRegNoEffect(misc_reg::segSel(index));
773 kvm_seg.type = attr.type;
774 kvm_seg.present = attr.present;
775 kvm_seg.dpl = attr.dpl;
776 kvm_seg.db = attr.defaultSize;
777 kvm_seg.s = attr.system;
778 kvm_seg.l = attr.longMode;
779 kvm_seg.g = attr.granularity;
780 kvm_seg.avl = attr.avl;
781 kvm_seg.unusable = attr.unusable;
782}
783
784static inline void
785setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
786 const int index)
787{
788 kvm_dtable.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
789 kvm_dtable.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
790}
791
792static void
793forceSegAccessed(struct kvm_segment &seg)
794{
795 // Intel's VMX requires that (some) usable segments are flagged as
796 // 'accessed' (i.e., the lowest bit in the segment type is set)
797 // when entering VMX. This wouldn't necessary be the case even if
798 // gem5 did set the access bits correctly, so we force it to one
799 // in that case.
800 if (!seg.unusable)
802}
803
804void
806{
807 struct kvm_sregs sregs;
808
809#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
810#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
811#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
812
813 FOREACH_SREG();
816
817#undef APPLY_SREG
818#undef APPLY_SEGMENT
819#undef APPLY_DTABLE
820
821 // Clear the interrupt bitmap
822 memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
823
824 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
825 // bit in the type field set.
826 forceSegAccessed(sregs.cs);
827 forceSegAccessed(sregs.ss);
828 forceSegAccessed(sregs.ds);
829 forceSegAccessed(sregs.es);
830 forceSegAccessed(sregs.fs);
831 forceSegAccessed(sregs.gs);
832
833 // There are currently some cases where the active task isn't
834 // marked as busy. This is illegal in VMX, so we force it to busy.
835 if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
836 hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
837 sregs.tr.type);
838 sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
839 }
840
841 // VMX requires the DPL of SS and CS to be the same for
842 // non-conforming code segments. It seems like m5 doesn't set the
843 // DPL of SS correctly when taking interrupts, so we need to fix
844 // that here.
845 if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
846 sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
847 sregs.cs.dpl != sregs.ss.dpl) {
848
849 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
850 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
851 sregs.ss.dpl = sregs.cs.dpl;
852 }
853
854 // Do checks after fixing up the state to avoid getting excessive
855 // amounts of warnings.
856 RFLAGS rflags_nocc(tc->readMiscReg(misc_reg::Rflags));
857 if (!rflags_nocc.vm) {
858 // Do segment verification if the CPU isn't entering virtual
859 // 8086 mode. We currently assume that unrestricted guest
860 // mode is available.
861
862#define APPLY_SEGMENT(kreg, idx) \
863 checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
864
866#undef APPLY_SEGMENT
867 }
868
869 setSpecialRegisters(sregs);
870}
871
872template <typename T>
873static void
875{
876 fpu.mxcsr = tc->readMiscRegNoEffect(misc_reg::Mxcsr);
877 fpu.fcw = tc->readMiscRegNoEffect(misc_reg::Fcw);
878 // No need to rebuild from misc_reg::Fsw and misc_reg::Top if we read
879 // with effects.
880 fpu.fsw = tc->readMiscReg(misc_reg::Fsw);
881
882 uint64_t ftw(tc->readMiscRegNoEffect(misc_reg::Ftw));
883 fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
884
885 fpu.last_opcode = tc->readMiscRegNoEffect(misc_reg::Fop);
886
887 const unsigned top((fpu.fsw >> 11) & 0x7);
888 for (int i = 0; i < 8; ++i) {
889 const unsigned reg_idx((i + top) & 0x7);
890 const double value(bitsToFloat64(
891 tc->getReg(float_reg::fpr(reg_idx))));
892 DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
893 reg_idx, i, value);
894 X86ISA::storeFloat80(fpu.fpr[i], value);
895 }
896
897 // TODO: We should update the MMX state
898
899 for (int i = 0; i < 16; ++i) {
900 *(uint64_t *)&fpu.xmm[i][0] =
902 *(uint64_t *)&fpu.xmm[i][8] =
904 }
905}
906
907void
909{
910 struct kvm_fpu fpu;
911
912 // There is some padding in the FP registers, so we'd better zero
913 // the whole struct.
914 memset(&fpu, 0, sizeof(fpu));
915
917
919 warn_once("misc_reg::Fiseg is non-zero.\n");
920
921 fpu.last_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
922
924 warn_once("misc_reg::Foseg is non-zero.\n");
925
926 fpu.last_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
927
928 setFPUState(fpu);
929}
930
931void
933{
934 struct kvm_xsave kxsave;
935 FXSave &xsave(*(FXSave *)kxsave.region);
936
937 // There is some padding and reserved fields in the structure, so
938 // we'd better zero the whole thing.
939 memset(&kxsave, 0, sizeof(kxsave));
940
942
951 XSaveHeader& xsave_hdr =
952 * (XSaveHeader *) ((char *) &kxsave + sizeof(FXSave));
953 xsave_hdr.xstate_bv.fpu = 1;
954 xsave_hdr.xstate_bv.sse = 1;
955
957 warn_once("misc_reg::Fiseg is non-zero.\n");
958
959 xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
960
962 warn_once("misc_reg::Foseg is non-zero.\n");
963
964 xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
965
966 setXSave(kxsave);
967}
968
969void
977
978void
980{
981 KvmMSRVector msrs;
982
983 const Kvm::MSRIndexVector &indices(getMsrIntersection());
984
985 for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
986 struct kvm_msr_entry e;
987
988 e.index = *it;
989 e.reserved = 0;
990 e.data = tc->readMiscReg(msrMap.at(*it));
991 DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
992 e.index, e.data);
993
994 msrs.push_back(e);
995 }
996
997 setMSRs(msrs);
998}
999
1000void
1002{
1003 if (haveXCRs) {
1004 struct kvm_xcrs xcrs;
1005
1006 xcrs.nr_xcrs = NumXCRegs;
1007 xcrs.flags = 0;
1008
1009 for (int i = 0; i < xcrs.nr_xcrs; ++i) {
1010 xcrs.xcrs[i].xcr = i;
1011 xcrs.xcrs[i].value = tc->readMiscReg(misc_reg::xcr(i));
1012 }
1013
1014 setXCRs(xcrs);
1015 }
1016}
1017
1018void
1020{
1021 struct kvm_regs regs;
1022 struct kvm_sregs sregs;
1023
1024 getRegisters(regs);
1025 getSpecialRegisters(sregs);
1026
1027 DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
1028 if (debug::KvmContext)
1029 dump();
1030
1031 updateThreadContextRegs(regs, sregs);
1033 if (useXSave) {
1034 struct kvm_xsave xsave;
1035 getXSave(xsave);
1036
1038 } else {
1039 struct kvm_fpu fpu;
1040 getFPUState(fpu);
1041
1043 }
1046
1047 // The M5 misc reg caches some values from other
1048 // registers. Writing to it with side effects causes it to be
1049 // updated from its source registers.
1051}
1052
1053void
1054X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
1055 const struct kvm_sregs &sregs)
1056{
1057#define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
1058
1059 FOREACH_IREG();
1060
1061#undef APPLY_IREG
1062
1063 tc->pcState(PCState(regs.rip + sregs.cs.base));
1064
1065 // Flags are spread out across multiple semi-magic registers so we
1066 // need some special care when updating them.
1067 X86ISA::setRFlags(tc, regs.rflags);
1068}
1069
1070
1071inline void
1072setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
1073 const int index)
1074{
1075 SegAttr attr(0);
1076
1077 attr.type = kvm_seg.type;
1078 attr.present = kvm_seg.present;
1079 attr.dpl = kvm_seg.dpl;
1080 attr.defaultSize = kvm_seg.db;
1081 attr.system = kvm_seg.s;
1082 attr.longMode = kvm_seg.l;
1083 attr.granularity = kvm_seg.g;
1084 attr.avl = kvm_seg.avl;
1085 attr.unusable = kvm_seg.unusable;
1086
1087 // We need some setMiscReg magic here to keep the effective base
1088 // addresses in sync. We need an up-to-date version of EFER, so
1089 // make sure this is called after the sregs have been synced.
1090 tc->setMiscReg(misc_reg::segBase(index), kvm_seg.base);
1091 tc->setMiscReg(misc_reg::segLimit(index), kvm_seg.limit);
1092 tc->setMiscReg(misc_reg::segSel(index), kvm_seg.selector);
1094}
1095
1096inline void
1097setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1098 const int index)
1099{
1100 // We need some setMiscReg magic here to keep the effective base
1101 // addresses in sync. We need an up-to-date version of EFER, so
1102 // make sure this is called after the sregs have been synced.
1103 tc->setMiscReg(misc_reg::segBase(index), kvm_dtable.base);
1104 tc->setMiscReg(misc_reg::segLimit(index), kvm_dtable.limit);
1105}
1106
1107void
1108X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1109{
1110 assert(getKvmRunState()->apic_base == sregs.apic_base);
1111 assert(getKvmRunState()->cr8 == sregs.cr8);
1112
1113#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1114#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1115#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1116 FOREACH_SREG();
1119#undef APPLY_SREG
1120#undef APPLY_SEGMENT
1121#undef APPLY_DTABLE
1122}
1123
1124template<typename T>
1125static void
1127{
1128 const unsigned top((fpu.fsw >> 11) & 0x7);
1129
1130 for (int i = 0; i < 8; ++i) {
1131 const unsigned reg_idx((i + top) & 0x7);
1132 const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1133 DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1134 reg_idx, i, value);
1135 tc->setReg(float_reg::fpr(reg_idx), floatToBits64(value));
1136 }
1137
1138 // TODO: We should update the MMX state
1139
1141 tc->setMiscRegNoEffect(misc_reg::Mxcsr, fpu.mxcsr);
1142 tc->setMiscRegNoEffect(misc_reg::Fcw, fpu.fcw);
1143 tc->setMiscRegNoEffect(misc_reg::Fsw, fpu.fsw);
1144
1145 uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1146 // TODO: Are these registers really the same?
1149
1150 tc->setMiscRegNoEffect(misc_reg::Fop, fpu.last_opcode);
1151
1152 for (int i = 0; i < 16; ++i) {
1153 tc->setReg(float_reg::xmmLow(i), *(uint64_t *)&fpu.xmm[i][0]);
1154 tc->setReg(float_reg::xmmHigh(i), *(uint64_t *)&fpu.xmm[i][8]);
1155 }
1156}
1157
1158void
1168
1169void
1170X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1171{
1172 const FXSave &xsave(*(const FXSave *)kxsave.region);
1173
1175
1180}
1181
1182void
1184{
1185 const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1186
1188 msrs.size());
1189 struct kvm_msr_entry *entry;
1190
1191 // Create a list of MSRs to read
1192 kvm_msrs->nmsrs = msrs.size();
1193 entry = &kvm_msrs->entries[0];
1194 for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1195 entry->index = *it;
1196 entry->reserved = 0;
1197 entry->data = 0;
1198 }
1199
1200 getMSRs(*kvm_msrs.get());
1201
1202 // Update M5's state
1203 entry = &kvm_msrs->entries[0];
1204 for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1205 DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1206 entry->index, entry->data);
1207
1208 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1209 }
1210}
1211
1212void
1214{
1215 if (haveXCRs) {
1216 struct kvm_xcrs xcrs;
1217
1218 getXCRs(xcrs);
1219
1220 for (int i = 0; i < xcrs.nr_xcrs; ++i) {
1221 tc->setMiscReg(misc_reg::xcr(xcrs.xcrs[i].xcr),
1222 xcrs.xcrs[i].value);
1223 }
1224 }
1225}
1226
1227void
1229{
1230 Fault fault;
1231
1233
1234 {
1235 // Migrate to the interrupt controller's thread to get the
1236 // interrupt. Even though the individual methods are safe to
1237 // call across threads, we might still lose interrupts unless
1238 // they are getInterrupt() and updateIntrInfo() are called
1239 // atomically.
1241 fault = interrupts[0]->getInterrupt();
1242 interrupts[0]->updateIntrInfo();
1243 }
1244
1245 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1246 if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1247 DPRINTF(KvmInt, "Delivering NMI\n");
1249 } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1250 DPRINTF(KvmInt, "INIT interrupt\n");
1251 fault.get()->invoke(tc);
1252 // Delay the kvm state update since we won't enter KVM on this
1253 // tick.
1254 threadContextDirty = true;
1255 // HACK: gem5 doesn't actually have any BIOS code, which means
1256 // that we need to halt the thread and wait for a startup
1257 // interrupt before restarting the thread. The simulated CPUs
1258 // use the same kind of hack using a microcode routine.
1259 thread->suspend();
1260 } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1261 DPRINTF(KvmInt, "STARTUP interrupt\n");
1262 fault.get()->invoke(tc);
1263 // The kvm state is assumed to have been updated when entering
1264 // kvmRun(), so we need to update manually it here.
1266 } else if (x86int) {
1267 struct kvm_interrupt kvm_int;
1268 kvm_int.irq = x86int->getVector();
1269
1270 DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1271 fault->name(), kvm_int.irq);
1272
1273 kvmInterrupt(kvm_int);
1274 } else {
1275 panic("KVM: Unknown interrupt type\n");
1276 }
1277
1278}
1279
1280Tick
1282{
1283 struct kvm_run &kvm_run(*getKvmRunState());
1284
1285 auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1286
1287 if (lapic->checkInterruptsRaw()) {
1288 if (lapic->hasPendingUnmaskable()) {
1289 DPRINTF(KvmInt,
1290 "Delivering unmaskable interrupt.\n");
1293 } else if (kvm_run.ready_for_interrupt_injection) {
1294 // KVM claims that it is ready for an interrupt. It might
1295 // be lying if we just updated rflags and disabled
1296 // interrupts (e.g., by doing a CPU handover). Let's sync
1297 // the thread context and check if there are /really/
1298 // interrupts that should be delivered now.
1300 if (lapic->checkInterrupts()) {
1301 DPRINTF(KvmInt,
1302 "M5 has pending interrupts, delivering interrupt.\n");
1303
1305 } else {
1306 DPRINTF(KvmInt,
1307 "Interrupt delivery delayed due to KVM confusion.\n");
1308 kvm_run.request_interrupt_window = 1;
1309 }
1310 } else if (!kvm_run.request_interrupt_window) {
1311 DPRINTF(KvmInt,
1312 "M5 has pending interrupts, requesting interrupt "
1313 "window.\n");
1314 kvm_run.request_interrupt_window = 1;
1315 }
1316 } else {
1317 kvm_run.request_interrupt_window = 0;
1318 }
1319
1320 // The CPU might have been suspended as a result of the INIT
1321 // interrupt delivery hack. In that case, don't enter into KVM.
1322 if (_status == Idle)
1323 return 0;
1324 else
1325 return BaseKvmCPU::kvmRun(ticks);
1326}
1327
1328Tick
1330{
1331 struct kvm_run &kvm_run(*getKvmRunState());
1332
1333 if (!archIsDrained()) {
1334 DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1335
1336 // Tell KVM to find a suitable place to deliver interrupts. This
1337 // should ensure that pending interrupts have been delivered and
1338 // things are reasonably consistent (i.e., no interrupts pending
1339 // in the guest).
1340 kvm_run.request_interrupt_window = 1;
1341
1342 // Limit the run to 1 millisecond. That is hopefully enough to
1343 // reach an interrupt window. Otherwise, we'll just try again
1344 // later.
1346 } else {
1347 DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1348
1349 return BaseKvmCPU::kvmRun(0);
1350 }
1351}
1352
1353uint64_t
1355{
1356 return getMSR(MSR_TSC);
1357}
1358
1359void
1361{
1362 struct kvm_run &kvm_run(*getKvmRunState());
1363 const uint16_t port(kvm_run.io.port);
1364
1365 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1366
1367 if (kvm_run.io.size != 4) {
1368 panic("Unexpected IO size (%u) for address 0x%x.\n",
1369 kvm_run.io.size, port);
1370 }
1371
1372 if (kvm_run.io.count != 1) {
1373 panic("Unexpected IO count (%u) for address 0x%x.\n",
1374 kvm_run.io.count, port);
1375 }
1376
1377 uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1378 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1379 tc->setMiscReg(miscreg, *data);
1380 else
1381 *data = tc->readMiscRegNoEffect(miscreg);
1382}
1383
1384Tick
1386{
1387 struct kvm_run &kvm_run(*getKvmRunState());
1388 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1389 unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1390 Tick delay(0);
1391 uint16_t port(kvm_run.io.port);
1392 Addr pAddr;
1393 const int count(kvm_run.io.count);
1394
1395 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1396 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1397
1398 DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1399 (isWrite ? "out" : "in"), kvm_run.io.port);
1400
1401 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1402 * don't use the TLB component, we need to intercept and handle
1403 * the PCI configuration space IO ports here.
1404 *
1405 * The IO port PCI discovery mechanism uses one address register
1406 * and one data register. We map the address register to a misc
1407 * reg and use that to re-route data register accesses to the
1408 * right location in the PCI configuration space.
1409 */
1410 if (port == IO_PCI_CONF_ADDR) {
1412 return 0;
1413 } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1414 Addr pciConfigAddr(tc->readMiscRegNoEffect(
1416 if (pciConfigAddr & 0x80000000) {
1417 pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1418 (port & 0x3));
1419 } else {
1420 pAddr = X86ISA::x86IOAddress(port);
1421 }
1422 } else {
1423 pAddr = X86ISA::x86IOAddress(port);
1424 }
1425
1426 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1427 // Temporarily lock and migrate to the device event queue to
1428 // prevent races in multi-core mode.
1430 for (int i = 0; i < count; ++i) {
1431 RequestPtr io_req = std::make_shared<Request>(
1432 pAddr, kvm_run.io.size,
1434
1435 io_req->setContext(tc->contextId());
1436
1437 PacketPtr pkt = new Packet(io_req, cmd);
1438
1439 pkt->dataStatic(guestData);
1440 delay += dataPort.submitIO(pkt);
1441
1442 guestData += kvm_run.io.size;
1443 }
1444
1445 return delay;
1446}
1447
1448Tick
1450{
1451 // We don't need to do anything here since this is caught the next
1452 // time we execute kvmRun(). We still overload the exit event to
1453 // silence the warning about an unhandled exit event.
1454 return 0;
1455}
1456
1457bool
1459{
1460 struct kvm_vcpu_events events;
1461
1462 getVCpuEvents(events);
1463
1464 // We could probably handle this in a by re-inserting interrupts
1465 // that are pending into gem5 on a drain. However, that would
1466 // probably be tricky to do reliably, so we'll just prevent a
1467 // drain if there is anything pending in the
1468 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1469 // executed in the guest by requesting an interrupt window if
1470 // there are pending interrupts.
1471 const bool pending_events(events.exception.injected ||
1472 events.interrupt.injected ||
1473 events.nmi.injected || events.nmi.pending);
1474
1475 if (pending_events) {
1476 DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1477 events.exception.injected ? "exception" : "",
1478 events.interrupt.injected ? "interrupt" : "",
1479 events.nmi.injected ? "nmi[i]" : "",
1480 events.nmi.pending ? "nmi[p]" : "");
1481 }
1482
1483 return !pending_events;
1484}
1485
1486void
1488{
1489 struct kvm_run &kvm_run(*getKvmRunState());
1490
1491 // Synchronize the APIC base and CR8 here since they are present
1492 // in the kvm_run struct, which makes the synchronization really
1493 // cheap.
1494 kvm_run.apic_base = tc->readMiscReg(misc_reg::ApicBase);
1495 kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1496
1498
1499 tc->setMiscReg(misc_reg::ApicBase, kvm_run.apic_base);
1500 kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1501}
1502
1503static struct kvm_cpuid_entry2
1504makeKvmCpuid(uint32_t function, uint32_t index,
1505 CpuidResult &result, uint32_t flags = 0)
1506{
1507 struct kvm_cpuid_entry2 e;
1508 e.function = function;
1509 e.index = index;
1510 e.flags = flags;
1511 e.eax = (uint32_t)result.rax;
1512 e.ebx = (uint32_t)result.rbx;
1513 e.ecx = (uint32_t)result.rcx;
1514 e.edx = (uint32_t)result.rdx;
1515
1516 return e;
1517}
1518
1519void
1521{
1522 Kvm::CPUIDVector m5_supported;
1523 X86ISA::ISA *isa = dynamic_cast<X86ISA::ISA *>(tc->getIsaPtr());
1524
1525 /* Basic features */
1526 CpuidResult func0;
1527 isa->cpuid->doCpuid(tc, 0x0, 0, func0);
1528 for (uint32_t function = 0; function <= func0.rax; ++function) {
1530 uint32_t idx(0);
1531
1532 if (!isa->cpuid->hasSignificantIndex(function)) {
1533 isa->cpuid->doCpuid(tc, function, idx, cpuid);
1534 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1535 } else {
1536 while (true) {
1537 [[maybe_unused]] bool rv = isa->cpuid->doCpuid(
1538 tc, function, idx, cpuid);
1539 assert(rv);
1540
1541 if (idx &&
1542 !cpuid.rax && !cpuid.rbx && !cpuid.rdx && !cpuid.rcx) {
1543 break;
1544 }
1545
1546 /*
1547 * For functions in family 0, this flag tells Linux to compare
1548 * the index as well as the function number rather than only
1549 * the function number. Important: Do NOT set this flag if the
1550 * function does not take an index. Doing so will break SMP.
1551 */
1552 uint32_t flag = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1553 m5_supported.push_back(
1554 makeKvmCpuid(function, idx, cpuid, flag));
1555 idx++;
1556 }
1557 }
1558 }
1559
1560 /* Extended features */
1561 CpuidResult efunc0;
1562 isa->cpuid->doCpuid(tc, 0x80000000, 0, efunc0);
1563 for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1565 uint32_t idx(0);
1566
1567 if (!isa->cpuid->hasSignificantIndex(function)) {
1568 isa->cpuid->doCpuid(tc, function, idx, cpuid);
1569 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1570 } else {
1571 while (true) {
1572 [[maybe_unused]] bool rv = isa->cpuid->doCpuid(
1573 tc, function, idx, cpuid);
1574 assert(rv);
1575
1576 if (idx &&
1577 !cpuid.rax && !cpuid.rbx && !cpuid.rdx && !cpuid.rcx) {
1578 break;
1579 }
1580
1581 /*
1582 * For functions in family 0, this flag tells Linux to compare
1583 * the index as well as the function number rather than only
1584 * the function number. Important: Do NOT set this flag if the
1585 * function does not take an index. Doing so will break SMP.
1586 */
1587 uint32_t flag = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1588 m5_supported.push_back(
1589 makeKvmCpuid(function, idx, cpuid, flag));
1590 idx++;
1591 }
1592 }
1593 }
1594
1595 setCPUID(m5_supported);
1596}
1597
1598void
1599X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1600{
1601 if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1602 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1603 errno);
1604}
1605
1606void
1607X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1608{
1610 cpuid.size());
1611
1612 kvm_cpuid->nent = cpuid.size();
1613 std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1614
1615 setCPUID(*kvm_cpuid);
1616}
1617
1618void
1619X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1620{
1621 if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1622 panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1623 errno);
1624}
1625
1626void
1628{
1630 msrs.size());
1631
1632 kvm_msrs->nmsrs = msrs.size();
1633 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1634
1635 setMSRs(*kvm_msrs);
1636}
1637
1638void
1639X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1640{
1641 if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1642 panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1643 errno);
1644}
1645
1646
1647void
1648X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1649{
1651 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1652
1653 kvm_msrs->nmsrs = 1;
1654 entry.index = index;
1655 entry.reserved = 0;
1656 entry.data = value;
1657
1658 setMSRs(*kvm_msrs.get());
1659}
1660
1661uint64_t
1663{
1665 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1666
1667 kvm_msrs->nmsrs = 1;
1668 entry.index = index;
1669 entry.reserved = 0;
1670 entry.data = 0;
1671
1672 getMSRs(*kvm_msrs.get());
1673 return entry.data;
1674}
1675
1676const Kvm::MSRIndexVector &
1678{
1679 if (cachedMsrIntersection.empty()) {
1680 const Kvm::MSRIndexVector &kvm_msrs = vm->kvm->getSupportedMSRs();
1681
1682 DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1683 for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1684 if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1685 cachedMsrIntersection.push_back(*it);
1686 DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1687 } else {
1688 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1689 *it);
1690 }
1691 }
1692 }
1693
1694 return cachedMsrIntersection;
1695}
1696
1697void
1698X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1699{
1700#ifdef KVM_GET_DEBUGREGS
1701 if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1702 panic("KVM: Failed to get guest debug registers\n");
1703#else
1704 panic("KVM: Unsupported getDebugRegisters call.\n");
1705#endif
1706}
1707
1708void
1709X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1710{
1711#ifdef KVM_SET_DEBUGREGS
1712 if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1713 panic("KVM: Failed to set guest debug registers\n");
1714#else
1715 panic("KVM: Unsupported setDebugRegisters call.\n");
1716#endif
1717}
1718
1719void
1720X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1721{
1722 if (ioctl(KVM_GET_XCRS, &regs) == -1)
1723 panic("KVM: Failed to get guest debug registers\n");
1724}
1725
1726void
1727X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1728{
1729 if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1730 panic("KVM: Failed to set guest debug registers\n");
1731}
1732
1733void
1734X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1735{
1736 if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1737 panic("KVM: Failed to get guest debug registers\n");
1738}
1739
1740void
1741X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1742{
1743 if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1744 panic("KVM: Failed to set guest debug registers\n");
1745}
1746
1747
1748void
1749X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1750{
1751 if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1752 panic("KVM: Failed to get guest debug registers\n");
1753}
1754
1755void
1756X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1757{
1758 if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1759 panic("KVM: Failed to set guest debug registers\n");
1760}
1761
1762} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:193
std::vector< BaseInterrupts * > interrupts
Definition base.hh:224
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition base.cc:191
Base class for KVM based CPU models.
Definition base.hh:88
Status _status
CPU run state.
Definition base.hh:240
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition base.cc:856
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition base.hh:327
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:128
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition base.hh:317
void setRegisters(const struct kvm_regs &regs)
Definition base.cc:849
@ Idle
Context not scheduled in KVM.
Definition base.hh:199
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition base.cc:842
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition base.cc:863
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition base.cc:966
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:137
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition base.cc:720
KvmVM * vm
Definition base.hh:160
KVMCpuPort dataPort
Port for data requests.
Definition base.hh:633
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition base.hh:153
void setFPUState(const struct kvm_fpu &state)
Definition base.cc:877
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition base.cc:870
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition base.hh:447
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition base.hh:648
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition base.hh:158
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition base.cc:1355
Kvm * kvm
Global KVM interface.
Definition vm.hh:421
KVM parent interface.
Definition vm.hh:81
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition vm.cc:188
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition vm.cc:120
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition vm.cc:114
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition vm.cc:126
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition vm.cc:178
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition vm.cc:198
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition vm.cc:168
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition pcstate.hh:108
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
void suspend() override
Set the status to Suspended.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual RegVal getReg(const RegId &reg) const
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
virtual BaseISA * getIsaPtr() const =0
virtual void setReg(const RegId &reg, RegVal val)
virtual const PCStateBase & pcState() const =0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual ContextID contextId() const =0
std::unique_ptr< X86CPUID > cpuid
Definition isa.hh:98
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition faults.hh:85
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition x86_cpu.cc:1159
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition x86_cpu.cc:1449
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition x86_cpu.cc:1170
void updateThreadContextMSRs()
Update MSR registers.
Definition x86_cpu.cc:1183
void startup() override
startup() is the final initialization call before simulation.
Definition x86_cpu.cc:611
void updateKvmState() override
Update the KVM state from the current thread context.
Definition x86_cpu.cc:729
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition x86_cpu.cc:1520
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition x86_cpu.cc:1360
void dumpIntRegs() const
Definition x86_cpu.cc:648
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition x86_cpu.cc:1619
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition x86_cpu.cc:1599
void getXSave(struct kvm_xsave &xsave) const
Definition x86_cpu.cc:1734
void dumpXSave() const
Definition x86_cpu.cc:690
bool haveXCRs
Kvm::capXCRs() available?
Definition x86_cpu.hh:272
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition x86_cpu.cc:805
void getXCRs(struct kvm_xcrs &regs) const
Definition x86_cpu.cc:1720
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition x86_cpu.hh:270
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition x86_cpu.cc:1749
X86KvmCPU(const X86KvmCPUParams &params)
Definition x86_cpu.cc:572
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition x86_cpu.cc:970
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition x86_cpu.cc:1354
void dumpFpuRegs() const
Definition x86_cpu.cc:640
void updateKvmStateMSRs()
Update MSR registers.
Definition x86_cpu.cc:979
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition x86_cpu.cc:1698
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition x86_cpu.cc:743
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition x86_cpu.cc:1329
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition x86_cpu.cc:1677
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition x86_cpu.cc:578
void dumpSpecRegs() const
Definition x86_cpu.cc:656
void setMSR(uint32_t index, uint64_t value)
Definition x86_cpu.cc:1648
void ioctlRun() override
Override for synchronizing state in kvm_run.
Definition x86_cpu.cc:1487
void setXCRs(const struct kvm_xcrs &regs)
Definition x86_cpu.cc:1727
void dumpMSRs() const
Definition x86_cpu.cc:710
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition x86_cpu.cc:1019
void getMSRs(struct kvm_msrs &msrs) const
Definition x86_cpu.cc:1639
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition x86_cpu.cc:1281
void updateKvmStateXCRs()
Update XCR registers.
Definition x86_cpu.cc:1001
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition x86_cpu.cc:1756
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition x86_cpu.cc:1228
void updateThreadContextXCRs()
Update XCR registers.
Definition x86_cpu.cc:1213
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition x86_cpu.hh:263
void dump() const override
Dump the internal state to the terminal.
Definition x86_cpu.cc:625
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition x86_cpu.cc:1458
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition x86_cpu.hh:259
void setXSave(const struct kvm_xsave &xsave)
Definition x86_cpu.cc:1741
void dumpVCpuEvents() const
Definition x86_cpu.cc:702
uint64_t getMSR(uint32_t index) const
Definition x86_cpu.cc:1662
void dumpXCRs() const
Definition x86_cpu.cc:678
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition x86_cpu.cc:1385
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition x86_cpu.cc:1054
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition x86_cpu.cc:1709
bool haveXSave
Kvm::capXSave() available?
Definition x86_cpu.hh:265
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition x86_cpu.cc:932
virtual ~X86KvmCPU()
Definition x86_cpu.cc:606
void dumpDebugRegs() const
Definition x86_cpu.cc:664
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition x86_cpu.cc:908
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition x86_cpu.cc:1108
STL vector class.
Definition stl.hh:37
Definition test.h:63
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition base.cc:834
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition base.cc:826
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition base.cc:1188
#define BitUnion64(name)
Use this to define conveniently sized values overlayed with bitfields.
Definition bitunion.hh:494
#define EndBitUnion(name)
This closes off the class and union started by the above macro.
Definition bitunion.hh:428
EventQueue * eventQueue() const
Definition eventq.hh:1003
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
uint8_t flags
Definition helpers.cc:87
#define hack(...)
Definition logging.hh:258
#define warn(...)
Definition logging.hh:256
#define warn_once(...)
Definition logging.hh:260
#define inform(...)
Definition logging.hh:257
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 35, 32 > at
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 21 > sse
GenericISA::DelaySlotPCState< 4 > PCState
Definition pcstate.hh:40
Bitfield< 30, 0 > index
Bitfield< 0 > p
Bitfield< 28, 21 > cpuid
Bitfield< 16, 15 > xs
Definition misc.hh:1200
static RegId xmmLow(int index)
Definition float.hh:171
static RegId xmmHigh(int index)
Definition float.hh:177
static RegId fpr(int index)
Definition float.hh:159
static RegIndex segSel(int index)
Definition misc.hh:515
static RegIndex segAttr(int index)
Definition misc.hh:543
static RegIndex xcr(int index)
Definition misc.hh:431
static RegIndex segBase(int index)
Definition misc.hh:522
static RegIndex segLimit(int index)
Definition misc.hh:536
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
const int NumXCRegs
Definition x86_traits.hh:58
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition utility.cc:156
@ StackFaultBit
Definition misc.hh:99
@ ErrSummaryBit
Definition misc.hh:100
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition utility.cc:165
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition utility.cc:90
Bitfield< 3 > addr
Definition types.hh:84
static Addr x86IOAddress(const uint32_t port)
Definition x86_traits.hh:80
Bitfield< 2, 0 > seg
Definition types.hh:87
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition utility.cc:74
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition utility.cc:115
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition utility.cc:58
static Addr x86PciConfigAddress(const uint32_t addr)
Definition x86_traits.hh:86
double ms
millisecond
Definition core.cc:52
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
Bitfield< 14 > uintr
Definition x86_cpu.cc:132
Bitfield< 15 > lbr
Definition x86_cpu.cc:133
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result, uint32_t flags=0)
Definition x86_cpu.cc:1504
Bitfield< 8 > pt
Definition x86_cpu.cc:127
static void dumpFpuCommon(const T &fpu)
Definition x86_cpu.cc:309
Bitfield< 13 > hdc
Definition x86_cpu.cc:131
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition x86_cpu.cc:1126
Bitfield< 18, 17 > amx
Definition x86_cpu.cc:135
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition x86_cpu.cc:785
Bitfield< 16 > hwp
Definition x86_cpu.cc:134
static void forceSegAccessed(struct kvm_segment &seg)
Definition x86_cpu.cc:793
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition x86_cpu.cc:765
static bool isCanonicalAddress(uint64_t addr)
Definition x86_cpu.cc:419
Bitfield< 10 > pasid
Definition x86_cpu.cc:129
uint64_t Tick
Tick count type.
Definition types.hh:58
Bitfield< 12, 11 > cet
Definition x86_cpu.cc:130
static uint64_t floatToBits64(double val)
Definition types.hh:191
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition x86_cpu.cc:1072
static void dumpKvm(const struct kvm_regs &regs)
Definition x86_cpu.cc:217
Bitfield< 7, 5 > avx512
Definition x86_cpu.cc:126
Bitfield< 2 > avx
Definition x86_cpu.cc:124
static auto newVarStruct(size_t entries)
Definition x86_cpu.cc:208
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition x86_cpu.cc:874
static double bitsToFloat64(uint64_t val)
Definition types.hh:218
static void dumpFpuSpec(const struct FXSave &xs)
Definition x86_cpu.cc:293
Bitfield< 9 > pkru
Definition x86_cpu.cc:128
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition x86_cpu.cc:430
Bitfield< 63, 19 > reserved
Definition x86_cpu.cc:136
Bitfield< 4, 3 > mpx
Definition x86_cpu.cc:125
reserved
Definition pcireg.h:54
uint16_t pad1
Definition x86_cpu.cc:98
uint32_t fpu_ip
Definition x86_cpu.cc:96
uint16_t fpu_ds
Definition x86_cpu.cc:100
uint16_t last_opcode
Definition x86_cpu.cc:91
uint16_t fsw
Definition x86_cpu.cc:88
struct gem5::FXSave::@34::@37 ctrl64
uint16_t fcw
Definition x86_cpu.cc:87
uint16_t fpu_cs
Definition x86_cpu.cc:97
uint64_t fpu_dp
Definition x86_cpu.cc:107
uint8_t ftwx
Definition x86_cpu.cc:89
uint8_t pad0
Definition x86_cpu.cc:90
uint32_t mxcsr_mask
Definition x86_cpu.cc:111
uint32_t fpu_dp
Definition x86_cpu.cc:99
uint16_t pad2
Definition x86_cpu.cc:101
uint32_t mxcsr
Definition x86_cpu.cc:110
uint64_t fpu_ip
Definition x86_cpu.cc:106
PM4 packets.
const std::string & name()
Definition trace.cc:48
#define FOREACH_IREG()
Definition x86_cpu.cc:147
#define FOREACH_DREG()
Definition x86_cpu.cc:178
#define MSR_TSC
Definition x86_cpu.cc:59
#define SEG_CS_TYPE_ACCESSED
Definition x86_cpu.cc:70
#define SEG_TYPE_BIT_ACCESSED
Definition x86_cpu.cc:76
#define IO_PCI_CONF_DATA_BASE
Definition x86_cpu.cc:62
#define SEG_CS_TYPE_READ_ACCESSED
Definition x86_cpu.cc:72
#define SEG_SYS_TYPE_TSS_BUSY
Definition x86_cpu.cc:67
#define FOREACH_SEGMENT()
Definition x86_cpu.cc:188
#define IO_PCI_CONF_ADDR
Definition x86_cpu.cc:61
#define FOREACH_SREG()
Definition x86_cpu.cc:167
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition x86_cpu.cc:65
#define FOREACH_DTABLE()
Definition x86_cpu.cc:200

Generated on Tue Jun 18 2024 16:24:00 for gem5 by doxygen 1.11.0