gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
x86_cpu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013 Andreas Sandberg
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
30
31#include <linux/kvm.h>
32
33#include <algorithm>
34#include <cerrno>
35#include <memory>
36
37#include "arch/x86/cpuid.hh"
38#include "arch/x86/faults.hh"
41#include "arch/x86/regs/int.hh"
42#include "arch/x86/regs/msr.hh"
43#include "arch/x86/utility.hh"
44#include "base/compiler.hh"
45#include "cpu/kvm/base.hh"
46#include "debug/Drain.hh"
47#include "debug/Kvm.hh"
48#include "debug/KvmContext.hh"
49#include "debug/KvmIO.hh"
50#include "debug/KvmInt.hh"
51
52namespace gem5
53{
54
55using namespace X86ISA;
56
57#define MSR_TSC 0x10
58
59#define IO_PCI_CONF_ADDR 0xCF8
60#define IO_PCI_CONF_DATA_BASE 0xCFC
61
62// Task segment type of an inactive 32-bit or 64-bit task
63#define SEG_SYS_TYPE_TSS_AVAILABLE 9
64// Task segment type of an active 32-bit or 64-bit task
65#define SEG_SYS_TYPE_TSS_BUSY 11
66
67// Non-conforming accessed code segment
68#define SEG_CS_TYPE_ACCESSED 9
69// Non-conforming accessed code segment that can be read
70#define SEG_CS_TYPE_READ_ACCESSED 11
71
72// The lowest bit of the type field for normal segments (code and
73// data) is used to indicate that a segment has been accessed.
74#define SEG_TYPE_BIT_ACCESSED 1
75
77{
78 uint16_t fcw;
79 uint16_t fsw;
80 uint8_t ftwx;
81 uint8_t pad0;
82 uint16_t last_opcode;
83 union
84 {
85 struct
86 {
87 uint32_t fpu_ip;
88 uint16_t fpu_cs;
89 uint16_t pad1;
90 uint32_t fpu_dp;
91 uint16_t fpu_ds;
92 uint16_t pad2;
93 } ctrl32;
94
95 struct
96 {
97 uint64_t fpu_ip;
98 uint64_t fpu_dp;
99 } ctrl64;
100 };
101 uint32_t mxcsr;
102 uint32_t mxcsr_mask;
103
104 uint8_t fpr[8][16];
105 uint8_t xmm[16][16];
106
107 uint64_t reserved[12];
108};
109
110static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
111
112#define FOREACH_IREG() \
113 do { \
114 APPLY_IREG(rax, int_reg::Rax); \
115 APPLY_IREG(rbx, int_reg::Rbx); \
116 APPLY_IREG(rcx, int_reg::Rcx); \
117 APPLY_IREG(rdx, int_reg::Rdx); \
118 APPLY_IREG(rsi, int_reg::Rsi); \
119 APPLY_IREG(rdi, int_reg::Rdi); \
120 APPLY_IREG(rsp, int_reg::Rsp); \
121 APPLY_IREG(rbp, int_reg::Rbp); \
122 APPLY_IREG(r8, int_reg::R8); \
123 APPLY_IREG(r9, int_reg::R9); \
124 APPLY_IREG(r10, int_reg::R10); \
125 APPLY_IREG(r11, int_reg::R11); \
126 APPLY_IREG(r12, int_reg::R12); \
127 APPLY_IREG(r13, int_reg::R13); \
128 APPLY_IREG(r14, int_reg::R14); \
129 APPLY_IREG(r15, int_reg::R15); \
130 } while (0)
131
132#define FOREACH_SREG() \
133 do { \
134 APPLY_SREG(cr0, misc_reg::Cr0); \
135 APPLY_SREG(cr2, misc_reg::Cr2); \
136 APPLY_SREG(cr3, misc_reg::Cr3); \
137 APPLY_SREG(cr4, misc_reg::Cr4); \
138 APPLY_SREG(cr8, misc_reg::Cr8); \
139 APPLY_SREG(efer, misc_reg::Efer); \
140 APPLY_SREG(apic_base, misc_reg::ApicBase); \
141 } while (0)
142
143#define FOREACH_DREG() \
144 do { \
145 APPLY_DREG(db[0], misc_reg::Dr0); \
146 APPLY_DREG(db[1], misc_reg::Dr1); \
147 APPLY_DREG(db[2], misc_reg::Dr2); \
148 APPLY_DREG(db[3], misc_reg::Dr3); \
149 APPLY_DREG(dr6, misc_reg::Dr6); \
150 APPLY_DREG(dr7, misc_reg::Dr7); \
151 } while (0)
152
153#define FOREACH_SEGMENT() \
154 do { \
155 APPLY_SEGMENT(cs, misc_reg::Cs - misc_reg::SegSelBase); \
156 APPLY_SEGMENT(ds, misc_reg::Ds - misc_reg::SegSelBase); \
157 APPLY_SEGMENT(es, misc_reg::Es - misc_reg::SegSelBase); \
158 APPLY_SEGMENT(fs, misc_reg::Fs - misc_reg::SegSelBase); \
159 APPLY_SEGMENT(gs, misc_reg::Gs - misc_reg::SegSelBase); \
160 APPLY_SEGMENT(ss, misc_reg::Ss - misc_reg::SegSelBase); \
161 APPLY_SEGMENT(tr, misc_reg::Tr - misc_reg::SegSelBase); \
162 APPLY_SEGMENT(ldt, misc_reg::Tsl - misc_reg::SegSelBase); \
163 } while (0)
164
165#define FOREACH_DTABLE() \
166 do { \
167 APPLY_DTABLE(gdt, misc_reg::Tsg - misc_reg::SegSelBase); \
168 APPLY_DTABLE(idt, misc_reg::Idtr - misc_reg::SegSelBase); \
169 } while (0)
170
171template<typename Struct, typename Entry>
172static auto
173newVarStruct(size_t entries)
174{
175 size_t size = sizeof(Struct) + entries * sizeof(Entry);
176 return std::unique_ptr<Struct, void(*)(Struct *)>(
177 (Struct *)operator new(size),
178 [](Struct *p) { operator delete(p); });
179}
180
181static void
182dumpKvm(const struct kvm_regs &regs)
183{
184 inform("KVM register state:\n");
185
186#define APPLY_IREG(kreg, mreg) \
187 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
188
189 FOREACH_IREG();
190
191#undef APPLY_IREG
192
193 inform("\trip: 0x%llx\n", regs.rip);
194 inform("\trflags: 0x%llx\n", regs.rflags);
195}
196
197static void
198dumpKvm(const char *reg_name, const struct kvm_segment &seg)
199{
200 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
201 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, "
202 "unus.: %u\n",
203 reg_name,
204 seg.base, seg.limit, seg.selector, seg.type,
205 seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl,
206 seg.unusable);
207}
208
209static void
210dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
211{
212 inform("\t%s: @0x%llx+%x\n",
213 reg_name, dtable.base, dtable.limit);
214}
215
216static void
217dumpKvm(const struct kvm_sregs &sregs)
218{
219#define APPLY_SREG(kreg, mreg) \
220 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
221#define APPLY_SEGMENT(kreg, idx) \
222 dumpKvm(# kreg, sregs.kreg);
223#define APPLY_DTABLE(kreg, idx) \
224 dumpKvm(# kreg, sregs.kreg);
225
226 inform("Special registers:\n");
228 FOREACH_SREG();
230
231 inform("Interrupt Bitmap:");
232 for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
233 inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
234
235#undef APPLY_SREG
236#undef APPLY_SEGMENT
237#undef APPLY_DTABLE
238}
239
240#ifdef KVM_GET_DEBUGREGS
241static void
242dumpKvm(const struct kvm_debugregs &regs)
243{
244 inform("KVM debug state:\n");
245
246#define APPLY_DREG(kreg, mreg) \
247 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
248
249 FOREACH_DREG();
250
251#undef APPLY_DREG
252
253 inform("\tflags: 0x%llx\n", regs.flags);
254}
255#endif
256
257static void
258dumpFpuSpec(const struct FXSave &xs)
259{
260 inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
261 inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
262 inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
263}
264
265static void
266dumpFpuSpec(const struct kvm_fpu &fpu)
267{
268 inform("\tlast_ip: 0x%x\n", fpu.last_ip);
269 inform("\tlast_dp: 0x%x\n", fpu.last_dp);
270}
271
272template<typename T>
273static void
274dumpFpuCommon(const T &fpu)
275{
276 const unsigned top((fpu.fsw >> 11) & 0x7);
277 inform("\tfcw: 0x%x\n", fpu.fcw);
278
279 inform("\tfsw: 0x%x (top: %i, "
280 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
281 fpu.fsw, top,
282
283 (fpu.fsw & CC0Bit) ? "C0" : "",
284 (fpu.fsw & CC1Bit) ? "C1" : "",
285 (fpu.fsw & CC2Bit) ? "C2" : "",
286 (fpu.fsw & CC3Bit) ? "C3" : "",
287
288 (fpu.fsw & IEBit) ? "I" : "",
289 (fpu.fsw & DEBit) ? "D" : "",
290 (fpu.fsw & ZEBit) ? "Z" : "",
291 (fpu.fsw & OEBit) ? "O" : "",
292 (fpu.fsw & UEBit) ? "U" : "",
293 (fpu.fsw & PEBit) ? "P" : "",
294
295 (fpu.fsw & StackFaultBit) ? "SF " : "",
296 (fpu.fsw & ErrSummaryBit) ? "ES " : "",
297 (fpu.fsw & BusyBit) ? "BUSY " : ""
298 );
299 inform("\tftwx: 0x%x\n", fpu.ftwx);
300 inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
301 dumpFpuSpec(fpu);
302 inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
303 inform("\tFP Stack:\n");
304 for (int i = 0; i < 8; ++i) {
305 const unsigned reg_idx((i + top) & 0x7);
306 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
307 const double value(X86ISA::loadFloat80(fpu.fpr[i]));
308 char hex[33];
309 for (int j = 0; j < 10; ++j)
310 snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
311 inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
312 hex, value, empty ? " (e)" : "");
313 }
314 inform("\tXMM registers:\n");
315 for (int i = 0; i < 16; ++i) {
316 char hex[33];
317 for (int j = 0; j < 16; ++j)
318 snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
319 inform("\t\t%i: 0x%s\n", i, hex);
320 }
321}
322
323static void
324dumpKvm(const struct kvm_fpu &fpu)
325{
326 inform("FPU registers:\n");
327 dumpFpuCommon(fpu);
328}
329
330static void
331dumpKvm(const struct kvm_xsave &xsave)
332{
333 inform("FPU registers (XSave):\n");
334 dumpFpuCommon(*(FXSave *)xsave.region);
335}
336
337static void
338dumpKvm(const struct kvm_msrs &msrs)
339{
340 inform("MSRs:\n");
341
342 for (int i = 0; i < msrs.nmsrs; ++i) {
343 const struct kvm_msr_entry &e(msrs.entries[i]);
344
345 inform("\t0x%x: 0x%x\n", e.index, e.data);
346 }
347}
348
349static void
350dumpKvm(const struct kvm_xcrs &regs)
351{
352 inform("KVM XCR registers:\n");
353
354 inform("\tFlags: 0x%x\n", regs.flags);
355 for (int i = 0; i < regs.nr_xcrs; ++i) {
356 inform("\tXCR[0x%x]: 0x%x\n",
357 regs.xcrs[i].xcr,
358 regs.xcrs[i].value);
359 }
360}
361
362static void
363dumpKvm(const struct kvm_vcpu_events &events)
364{
365 inform("vCPU events:\n");
366
367 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
368 events.exception.injected, events.exception.nr,
369 events.exception.has_error_code, events.exception.error_code);
370
371 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
372 events.interrupt.injected, events.interrupt.nr,
373 events.interrupt.soft);
374
375 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
376 events.nmi.injected, events.nmi.pending,
377 events.nmi.masked);
378
379 inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
380 inform("\tFlags: 0x%x\n", events.flags);
381}
382
383static bool
385{
386 // x86-64 doesn't currently use the full 64-bit virtual address
387 // space, instead it uses signed 48 bit addresses that are
388 // sign-extended to 64 bits. Such addresses are known as
389 // "canonical".
390 uint64_t upper_half(addr & 0xffff800000000000ULL);
391 return upper_half == 0 || upper_half == 0xffff800000000000;
392}
393
394static void
395checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
396 struct kvm_sregs sregs)
397{
398 // Check the register base
399 switch (idx) {
400 case misc_reg::Tsl:
401 case misc_reg::Tr:
402 case misc_reg::Fs:
403 case misc_reg::Gs:
404 if (!isCanonicalAddress(seg.base))
405 warn("Illegal %s base: 0x%x\n", name, seg.base);
406 break;
407
408 case misc_reg::Ss:
409 case misc_reg::Ds:
410 case misc_reg::Es:
411 if (seg.unusable)
412 break;
413 [[fallthrough]];
414 case misc_reg::Cs:
415 if (seg.base & 0xffffffff00000000ULL)
416 warn("Illegal %s base: 0x%x\n", name, seg.base);
417 break;
418 }
419
420 // Check the type
421 switch (idx) {
422 case misc_reg::Cs:
423 switch (seg.type) {
424 case 3:
425 if (seg.dpl != 0)
426 warn("CS type is 3 but dpl != 0.\n");
427 break;
428 case 9:
429 case 11:
430 if (seg.dpl != sregs.ss.dpl)
431 warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
432 break;
433 case 13:
434 case 15:
435 if (seg.dpl > sregs.ss.dpl)
436 warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
437 break;
438 default:
439 warn("Illegal CS type: %i\n", seg.type);
440 break;
441 }
442 break;
443
444 case misc_reg::Ss:
445 if (seg.unusable)
446 break;
447 switch (seg.type) {
448 case 3:
449 if (sregs.cs.type == 3 && seg.dpl != 0)
450 warn("CS type is 3, but SS DPL is != 0.\n");
451 [[fallthrough]];
452 case 7:
453 if (!(sregs.cr0 & 1) && seg.dpl != 0)
454 warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
455 break;
456 default:
457 warn("Illegal SS type: %i\n", seg.type);
458 break;
459 }
460 break;
461
462 case misc_reg::Ds:
463 case misc_reg::Es:
464 case misc_reg::Fs:
465 case misc_reg::Gs:
466 if (seg.unusable)
467 break;
468 if (!(seg.type & 0x1) ||
469 ((seg.type & 0x8) && !(seg.type & 0x2)))
470 warn("%s has an illegal type field: %i\n", name, seg.type);
471 break;
472
473 case misc_reg::Tr:
474 // TODO: We should check the CPU mode
475 if (seg.type != 3 && seg.type != 11)
476 warn("%s: Illegal segment type (%i)\n", name, seg.type);
477 break;
478
479 case misc_reg::Tsl:
480 if (seg.unusable)
481 break;
482 if (seg.type != 2)
483 warn("%s: Illegal segment type (%i)\n", name, seg.type);
484 break;
485 }
486
487 switch (idx) {
488 case misc_reg::Ss:
489 case misc_reg::Ds:
490 case misc_reg::Es:
491 case misc_reg::Fs:
492 case misc_reg::Gs:
493 if (seg.unusable)
494 break;
495 [[fallthrough]];
496 case misc_reg::Cs:
497 if (!seg.s)
498 warn("%s: S flag not set\n", name);
499 break;
500
501 case misc_reg::Tsl:
502 if (seg.unusable)
503 break;
504 [[fallthrough]];
505 case misc_reg::Tr:
506 if (seg.s)
507 warn("%s: S flag is set\n", name);
508 break;
509 }
510
511 switch (idx) {
512 case misc_reg::Ss:
513 case misc_reg::Ds:
514 case misc_reg::Es:
515 case misc_reg::Fs:
516 case misc_reg::Gs:
517 case misc_reg::Tsl:
518 if (seg.unusable)
519 break;
520 [[fallthrough]];
521 case misc_reg::Tr:
522 case misc_reg::Cs:
523 if (!seg.present)
524 warn("%s: P flag not set\n", name);
525
526 if (((seg.limit & 0xFFF) == 0 && seg.g) ||
527 ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
528 warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
529 name, seg.limit, seg.g);
530 }
531 break;
532 }
533
534 // TODO: Check CS DB
535}
536
537X86KvmCPU::X86KvmCPU(const X86KvmCPUParams &params)
538 : BaseKvmCPU(params),
539 useXSave(params.useXSave)
540{}
541
542void
544{
546
547 Kvm &kvm = *vm->kvm;
548
549 if (!kvm.capSetTSSAddress())
550 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
551 if (!kvm.capExtendedCPUID())
552 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
553 if (!kvm.capUserNMI())
554 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
555 if (!kvm.capVCPUEvents())
556 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
557
559 haveXSave = kvm.capXSave();
560 haveXCRs = kvm.capXCRs();
561
562 if (useXSave && !haveXSave) {
563 warn("KVM: XSAVE not supported by host. MXCSR synchronization might "
564 "be unreliable due to kernel bugs.\n");
565 useXSave = false;
566 } else if (!useXSave) {
567 warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
568 }
569}
570
572{
573}
574
575void
577{
579
580 updateCPUID();
581
582 // TODO: Do we need to create an identity mapped TSS area? We
583 // should call kvm.vm.setTSSAddress() here in that case. It should
584 // only be needed for old versions of the virtualization
585 // extensions. We should make sure that the identity range is
586 // reserved in the e820 memory map in that case.
587}
588
589void
591{
592 dumpIntRegs();
593 if (useXSave)
594 dumpXSave();
595 else
596 dumpFpuRegs();
597 dumpSpecRegs();
599 dumpXCRs();
601 dumpMSRs();
602}
603
604void
606{
607 struct kvm_fpu fpu;
608 getFPUState(fpu);
609 dumpKvm(fpu);
610}
611
612void
614{
615 struct kvm_regs regs;
616 getRegisters(regs);
617 dumpKvm(regs);
618}
619
620void
622{
623 struct kvm_sregs sregs;
624 getSpecialRegisters(sregs);
625 dumpKvm(sregs);
626}
627
628void
630{
631 if (haveDebugRegs) {
632#ifdef KVM_GET_DEBUGREGS
633 struct kvm_debugregs dregs;
634 getDebugRegisters(dregs);
635 dumpKvm(dregs);
636#endif
637 } else {
638 inform("Debug registers not supported by kernel.\n");
639 }
640}
641
642void
644{
645 if (haveXCRs) {
646 struct kvm_xcrs xcrs;
647 getXCRs(xcrs);
648 dumpKvm(xcrs);
649 } else {
650 inform("XCRs not supported by kernel.\n");
651 }
652}
653
654void
656{
657 if (haveXSave) {
658 struct kvm_xsave xsave;
659 getXSave(xsave);
660 dumpKvm(xsave);
661 } else {
662 inform("XSave not supported by kernel.\n");
663 }
664}
665
666void
668{
669 struct kvm_vcpu_events events;
670 getVCpuEvents(events);
671 dumpKvm(events);
672}
673
674void
676{
677 const Kvm::MSRIndexVector &supported_msrs = vm->kvm->getSupportedMSRs();
678 auto msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
679 supported_msrs.size());
680
681 msrs->nmsrs = supported_msrs.size();
682 for (int i = 0; i < supported_msrs.size(); ++i) {
683 struct kvm_msr_entry &e(msrs->entries[i]);
684 e.index = supported_msrs[i];
685 e.reserved = 0;
686 e.data = 0;
687 }
688 getMSRs(*msrs.get());
689
690 dumpKvm(*msrs.get());
691}
692
693void
695{
700
701 DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
702 if (debug::KvmContext)
703 dump();
704}
705
706void
708{
709 struct kvm_regs regs;
710
711#define APPLY_IREG(kreg, mreg) regs.kreg = tc->getReg(mreg)
712 FOREACH_IREG();
713#undef APPLY_IREG
714
715 regs.rip = tc->pcState().instAddr() - tc->readMiscReg(misc_reg::CsBase);
716
717 /* You might think that setting regs.rflags to the contents
718 * misc_reg::Rflags here would suffice. In that case you're
719 * mistaken. We need to reconstruct it from a bunch of ucode
720 * registers and wave a dead chicken over it (aka mask out and set
721 * reserved bits) to get it to work.
722 */
723 regs.rflags = X86ISA::getRFlags(tc);
724
725 setRegisters(regs);
726}
727
728static inline void
729setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
730 const int index)
731{
733
734 kvm_seg.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
735 kvm_seg.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
736 kvm_seg.selector = tc->readMiscRegNoEffect(misc_reg::segSel(index));
737 kvm_seg.type = attr.type;
738 kvm_seg.present = attr.present;
739 kvm_seg.dpl = attr.dpl;
740 kvm_seg.db = attr.defaultSize;
741 kvm_seg.s = attr.system;
742 kvm_seg.l = attr.longMode;
743 kvm_seg.g = attr.granularity;
744 kvm_seg.avl = attr.avl;
745 kvm_seg.unusable = attr.unusable;
746}
747
748static inline void
749setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
750 const int index)
751{
752 kvm_dtable.base = tc->readMiscRegNoEffect(misc_reg::segBase(index));
753 kvm_dtable.limit = tc->readMiscRegNoEffect(misc_reg::segLimit(index));
754}
755
756static void
757forceSegAccessed(struct kvm_segment &seg)
758{
759 // Intel's VMX requires that (some) usable segments are flagged as
760 // 'accessed' (i.e., the lowest bit in the segment type is set)
761 // when entering VMX. This wouldn't necessary be the case even if
762 // gem5 did set the access bits correctly, so we force it to one
763 // in that case.
764 if (!seg.unusable)
766}
767
768void
770{
771 struct kvm_sregs sregs;
772
773#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
774#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
775#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
776
777 FOREACH_SREG();
780
781#undef APPLY_SREG
782#undef APPLY_SEGMENT
783#undef APPLY_DTABLE
784
785 // Clear the interrupt bitmap
786 memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
787
788 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
789 // bit in the type field set.
790 forceSegAccessed(sregs.cs);
791 forceSegAccessed(sregs.ss);
792 forceSegAccessed(sregs.ds);
793 forceSegAccessed(sregs.es);
794 forceSegAccessed(sregs.fs);
795 forceSegAccessed(sregs.gs);
796
797 // There are currently some cases where the active task isn't
798 // marked as busy. This is illegal in VMX, so we force it to busy.
799 if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
800 hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
801 sregs.tr.type);
802 sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
803 }
804
805 // VMX requires the DPL of SS and CS to be the same for
806 // non-conforming code segments. It seems like m5 doesn't set the
807 // DPL of SS correctly when taking interrupts, so we need to fix
808 // that here.
809 if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
810 sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
811 sregs.cs.dpl != sregs.ss.dpl) {
812
813 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
814 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
815 sregs.ss.dpl = sregs.cs.dpl;
816 }
817
818 // Do checks after fixing up the state to avoid getting excessive
819 // amounts of warnings.
820 RFLAGS rflags_nocc(tc->readMiscReg(misc_reg::Rflags));
821 if (!rflags_nocc.vm) {
822 // Do segment verification if the CPU isn't entering virtual
823 // 8086 mode. We currently assume that unrestricted guest
824 // mode is available.
825
826#define APPLY_SEGMENT(kreg, idx) \
827 checkSeg(# kreg, idx + misc_reg::SegSelBase, sregs.kreg, sregs)
828
830#undef APPLY_SEGMENT
831 }
832
833 setSpecialRegisters(sregs);
834}
835
836template <typename T>
837static void
839{
840 fpu.mxcsr = tc->readMiscRegNoEffect(misc_reg::Mxcsr);
841 fpu.fcw = tc->readMiscRegNoEffect(misc_reg::Fcw);
842 // No need to rebuild from misc_reg::Fsw and misc_reg::Top if we read
843 // with effects.
844 fpu.fsw = tc->readMiscReg(misc_reg::Fsw);
845
846 uint64_t ftw(tc->readMiscRegNoEffect(misc_reg::Ftw));
847 fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
848
849 fpu.last_opcode = tc->readMiscRegNoEffect(misc_reg::Fop);
850
851 const unsigned top((fpu.fsw >> 11) & 0x7);
852 for (int i = 0; i < 8; ++i) {
853 const unsigned reg_idx((i + top) & 0x7);
854 const double value(bitsToFloat64(
855 tc->getReg(float_reg::fpr(reg_idx))));
856 DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
857 reg_idx, i, value);
858 X86ISA::storeFloat80(fpu.fpr[i], value);
859 }
860
861 // TODO: We should update the MMX state
862
863 for (int i = 0; i < 16; ++i) {
864 *(uint64_t *)&fpu.xmm[i][0] =
866 *(uint64_t *)&fpu.xmm[i][8] =
868 }
869}
870
871void
873{
874 struct kvm_fpu fpu;
875
876 // There is some padding in the FP registers, so we'd better zero
877 // the whole struct.
878 memset(&fpu, 0, sizeof(fpu));
879
881
883 warn_once("misc_reg::Fiseg is non-zero.\n");
884
885 fpu.last_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
886
888 warn_once("misc_reg::Foseg is non-zero.\n");
889
890 fpu.last_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
891
892 setFPUState(fpu);
893}
894
895void
897{
898 struct kvm_xsave kxsave;
899 FXSave &xsave(*(FXSave *)kxsave.region);
900
901 // There is some padding and reserved fields in the structure, so
902 // we'd better zero the whole thing.
903 memset(&kxsave, 0, sizeof(kxsave));
904
906
908 warn_once("misc_reg::Fiseg is non-zero.\n");
909
910 xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(misc_reg::Fioff);
911
913 warn_once("misc_reg::Foseg is non-zero.\n");
914
915 xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(misc_reg::Fooff);
916
917 setXSave(kxsave);
918}
919
920void
922{
923 if (useXSave)
925 else
927}
928
929void
931{
932 KvmMSRVector msrs;
933
934 const Kvm::MSRIndexVector &indices(getMsrIntersection());
935
936 for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
937 struct kvm_msr_entry e;
938
939 e.index = *it;
940 e.reserved = 0;
941 e.data = tc->readMiscReg(msrMap.at(*it));
942 DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
943 e.index, e.data);
944
945 msrs.push_back(e);
946 }
947
948 setMSRs(msrs);
949}
950
951void
953{
954 struct kvm_regs regs;
955 struct kvm_sregs sregs;
956
957 getRegisters(regs);
958 getSpecialRegisters(sregs);
959
960 DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
961 if (debug::KvmContext)
962 dump();
963
964 updateThreadContextRegs(regs, sregs);
966 if (useXSave) {
967 struct kvm_xsave xsave;
968 getXSave(xsave);
969
971 } else {
972 struct kvm_fpu fpu;
973 getFPUState(fpu);
974
976 }
978
979 // The M5 misc reg caches some values from other
980 // registers. Writing to it with side effects causes it to be
981 // updated from its source registers.
983}
984
985void
986X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
987 const struct kvm_sregs &sregs)
988{
989#define APPLY_IREG(kreg, mreg) tc->setReg(mreg, regs.kreg)
990
991 FOREACH_IREG();
992
993#undef APPLY_IREG
994
995 tc->pcState(PCState(regs.rip + sregs.cs.base));
996
997 // Flags are spread out across multiple semi-magic registers so we
998 // need some special care when updating them.
999 X86ISA::setRFlags(tc, regs.rflags);
1000}
1001
1002
1003inline void
1004setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
1005 const int index)
1006{
1007 SegAttr attr(0);
1008
1009 attr.type = kvm_seg.type;
1010 attr.present = kvm_seg.present;
1011 attr.dpl = kvm_seg.dpl;
1012 attr.defaultSize = kvm_seg.db;
1013 attr.system = kvm_seg.s;
1014 attr.longMode = kvm_seg.l;
1015 attr.granularity = kvm_seg.g;
1016 attr.avl = kvm_seg.avl;
1017 attr.unusable = kvm_seg.unusable;
1018
1019 // We need some setMiscReg magic here to keep the effective base
1020 // addresses in sync. We need an up-to-date version of EFER, so
1021 // make sure this is called after the sregs have been synced.
1022 tc->setMiscReg(misc_reg::segBase(index), kvm_seg.base);
1023 tc->setMiscReg(misc_reg::segLimit(index), kvm_seg.limit);
1024 tc->setMiscReg(misc_reg::segSel(index), kvm_seg.selector);
1026}
1027
1028inline void
1029setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1030 const int index)
1031{
1032 // We need some setMiscReg magic here to keep the effective base
1033 // addresses in sync. We need an up-to-date version of EFER, so
1034 // make sure this is called after the sregs have been synced.
1035 tc->setMiscReg(misc_reg::segBase(index), kvm_dtable.base);
1036 tc->setMiscReg(misc_reg::segLimit(index), kvm_dtable.limit);
1037}
1038
1039void
1040X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1041{
1042 assert(getKvmRunState()->apic_base == sregs.apic_base);
1043 assert(getKvmRunState()->cr8 == sregs.cr8);
1044
1045#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1046#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1047#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1048 FOREACH_SREG();
1051#undef APPLY_SREG
1052#undef APPLY_SEGMENT
1053#undef APPLY_DTABLE
1054}
1055
1056template<typename T>
1057static void
1059{
1060 const unsigned top((fpu.fsw >> 11) & 0x7);
1061
1062 for (int i = 0; i < 8; ++i) {
1063 const unsigned reg_idx((i + top) & 0x7);
1064 const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1065 DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1066 reg_idx, i, value);
1067 tc->setReg(float_reg::fpr(reg_idx), floatToBits64(value));
1068 }
1069
1070 // TODO: We should update the MMX state
1071
1073 tc->setMiscRegNoEffect(misc_reg::Mxcsr, fpu.mxcsr);
1074 tc->setMiscRegNoEffect(misc_reg::Fcw, fpu.fcw);
1075 tc->setMiscRegNoEffect(misc_reg::Fsw, fpu.fsw);
1076
1077 uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1078 // TODO: Are these registers really the same?
1081
1082 tc->setMiscRegNoEffect(misc_reg::Fop, fpu.last_opcode);
1083
1084 for (int i = 0; i < 16; ++i) {
1085 tc->setReg(float_reg::xmmLow(i), *(uint64_t *)&fpu.xmm[i][0]);
1086 tc->setReg(float_reg::xmmHigh(i), *(uint64_t *)&fpu.xmm[i][8]);
1087 }
1088}
1089
1090void
1091X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1092{
1094
1096 tc->setMiscRegNoEffect(misc_reg::Fioff, fpu.last_ip);
1098 tc->setMiscRegNoEffect(misc_reg::Fooff, fpu.last_dp);
1099}
1100
1101void
1102X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1103{
1104 const FXSave &xsave(*(const FXSave *)kxsave.region);
1105
1107
1112}
1113
1114void
1116{
1117 const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1118
1119 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1120 msrs.size());
1121 struct kvm_msr_entry *entry;
1122
1123 // Create a list of MSRs to read
1124 kvm_msrs->nmsrs = msrs.size();
1125 entry = &kvm_msrs->entries[0];
1126 for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1127 entry->index = *it;
1128 entry->reserved = 0;
1129 entry->data = 0;
1130 }
1131
1132 getMSRs(*kvm_msrs.get());
1133
1134 // Update M5's state
1135 entry = &kvm_msrs->entries[0];
1136 for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1137 DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1138 entry->index, entry->data);
1139
1140 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1141 }
1142}
1143
1144void
1146{
1147 Fault fault;
1148
1150
1151 {
1152 // Migrate to the interrupt controller's thread to get the
1153 // interrupt. Even though the individual methods are safe to
1154 // call across threads, we might still lose interrupts unless
1155 // they are getInterrupt() and updateIntrInfo() are called
1156 // atomically.
1158 fault = interrupts[0]->getInterrupt();
1159 interrupts[0]->updateIntrInfo();
1160 }
1161
1162 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1163 if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1164 DPRINTF(KvmInt, "Delivering NMI\n");
1166 } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1167 DPRINTF(KvmInt, "INIT interrupt\n");
1168 fault.get()->invoke(tc);
1169 // Delay the kvm state update since we won't enter KVM on this
1170 // tick.
1171 threadContextDirty = true;
1172 // HACK: gem5 doesn't actually have any BIOS code, which means
1173 // that we need to halt the thread and wait for a startup
1174 // interrupt before restarting the thread. The simulated CPUs
1175 // use the same kind of hack using a microcode routine.
1176 thread->suspend();
1177 } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1178 DPRINTF(KvmInt, "STARTUP interrupt\n");
1179 fault.get()->invoke(tc);
1180 // The kvm state is assumed to have been updated when entering
1181 // kvmRun(), so we need to update manually it here.
1183 } else if (x86int) {
1184 struct kvm_interrupt kvm_int;
1185 kvm_int.irq = x86int->getVector();
1186
1187 DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1188 fault->name(), kvm_int.irq);
1189
1190 kvmInterrupt(kvm_int);
1191 } else {
1192 panic("KVM: Unknown interrupt type\n");
1193 }
1194
1195}
1196
1197Tick
1199{
1200 struct kvm_run &kvm_run(*getKvmRunState());
1201
1202 auto *lapic = dynamic_cast<X86ISA::Interrupts *>(interrupts[0]);
1203
1204 if (lapic->checkInterruptsRaw()) {
1205 if (lapic->hasPendingUnmaskable()) {
1206 DPRINTF(KvmInt,
1207 "Delivering unmaskable interrupt.\n");
1210 } else if (kvm_run.ready_for_interrupt_injection) {
1211 // KVM claims that it is ready for an interrupt. It might
1212 // be lying if we just updated rflags and disabled
1213 // interrupts (e.g., by doing a CPU handover). Let's sync
1214 // the thread context and check if there are /really/
1215 // interrupts that should be delivered now.
1217 if (lapic->checkInterrupts()) {
1218 DPRINTF(KvmInt,
1219 "M5 has pending interrupts, delivering interrupt.\n");
1220
1222 } else {
1223 DPRINTF(KvmInt,
1224 "Interrupt delivery delayed due to KVM confusion.\n");
1225 kvm_run.request_interrupt_window = 1;
1226 }
1227 } else if (!kvm_run.request_interrupt_window) {
1228 DPRINTF(KvmInt,
1229 "M5 has pending interrupts, requesting interrupt "
1230 "window.\n");
1231 kvm_run.request_interrupt_window = 1;
1232 }
1233 } else {
1234 kvm_run.request_interrupt_window = 0;
1235 }
1236
1237 // The CPU might have been suspended as a result of the INIT
1238 // interrupt delivery hack. In that case, don't enter into KVM.
1239 if (_status == Idle)
1240 return 0;
1241 else
1242 return BaseKvmCPU::kvmRun(ticks);
1243}
1244
1245Tick
1247{
1248 struct kvm_run &kvm_run(*getKvmRunState());
1249
1250 if (!archIsDrained()) {
1251 DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1252
1253 // Tell KVM to find a suitable place to deliver interrupts. This
1254 // should ensure that pending interrupts have been delivered and
1255 // things are reasonably consistent (i.e., no interrupts pending
1256 // in the guest).
1257 kvm_run.request_interrupt_window = 1;
1258
1259 // Limit the run to 1 millisecond. That is hopefully enough to
1260 // reach an interrupt window. Otherwise, we'll just try again
1261 // later.
1263 } else {
1264 DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1265
1266 return BaseKvmCPU::kvmRun(0);
1267 }
1268}
1269
1270uint64_t
1272{
1273 return getMSR(MSR_TSC);
1274}
1275
1276void
1278{
1279 struct kvm_run &kvm_run(*getKvmRunState());
1280 const uint16_t port(kvm_run.io.port);
1281
1282 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1283
1284 if (kvm_run.io.size != 4) {
1285 panic("Unexpected IO size (%u) for address 0x%x.\n",
1286 kvm_run.io.size, port);
1287 }
1288
1289 if (kvm_run.io.count != 1) {
1290 panic("Unexpected IO count (%u) for address 0x%x.\n",
1291 kvm_run.io.count, port);
1292 }
1293
1294 uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1295 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1296 tc->setMiscReg(miscreg, *data);
1297 else
1298 *data = tc->readMiscRegNoEffect(miscreg);
1299}
1300
1301Tick
1303{
1304 struct kvm_run &kvm_run(*getKvmRunState());
1305 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1306 unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1307 Tick delay(0);
1308 uint16_t port(kvm_run.io.port);
1309 Addr pAddr;
1310 const int count(kvm_run.io.count);
1311
1312 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1313 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1314
1315 DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1316 (isWrite ? "out" : "in"), kvm_run.io.port);
1317
1318 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1319 * don't use the TLB component, we need to intercept and handle
1320 * the PCI configuration space IO ports here.
1321 *
1322 * The IO port PCI discovery mechanism uses one address register
1323 * and one data register. We map the address register to a misc
1324 * reg and use that to re-route data register accesses to the
1325 * right location in the PCI configuration space.
1326 */
1327 if (port == IO_PCI_CONF_ADDR) {
1329 return 0;
1330 } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1331 Addr pciConfigAddr(tc->readMiscRegNoEffect(
1333 if (pciConfigAddr & 0x80000000) {
1334 pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1335 (port & 0x3));
1336 } else {
1337 pAddr = X86ISA::x86IOAddress(port);
1338 }
1339 } else {
1340 pAddr = X86ISA::x86IOAddress(port);
1341 }
1342
1343 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1344 // Temporarily lock and migrate to the device event queue to
1345 // prevent races in multi-core mode.
1347 for (int i = 0; i < count; ++i) {
1348 RequestPtr io_req = std::make_shared<Request>(
1349 pAddr, kvm_run.io.size,
1351
1352 io_req->setContext(tc->contextId());
1353
1354 PacketPtr pkt = new Packet(io_req, cmd);
1355
1356 pkt->dataStatic(guestData);
1357 delay += dataPort.submitIO(pkt);
1358
1359 guestData += kvm_run.io.size;
1360 }
1361
1362 return delay;
1363}
1364
1365Tick
1367{
1368 // We don't need to do anything here since this is caught the next
1369 // time we execute kvmRun(). We still overload the exit event to
1370 // silence the warning about an unhandled exit event.
1371 return 0;
1372}
1373
1374bool
1376{
1377 struct kvm_vcpu_events events;
1378
1379 getVCpuEvents(events);
1380
1381 // We could probably handle this in a by re-inserting interrupts
1382 // that are pending into gem5 on a drain. However, that would
1383 // probably be tricky to do reliably, so we'll just prevent a
1384 // drain if there is anything pending in the
1385 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1386 // executed in the guest by requesting an interrupt window if
1387 // there are pending interrupts.
1388 const bool pending_events(events.exception.injected ||
1389 events.interrupt.injected ||
1390 events.nmi.injected || events.nmi.pending);
1391
1392 if (pending_events) {
1393 DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1394 events.exception.injected ? "exception" : "",
1395 events.interrupt.injected ? "interrupt" : "",
1396 events.nmi.injected ? "nmi[i]" : "",
1397 events.nmi.pending ? "nmi[p]" : "");
1398 }
1399
1400 return !pending_events;
1401}
1402
1403void
1405{
1406 struct kvm_run &kvm_run(*getKvmRunState());
1407
1408 // Synchronize the APIC base and CR8 here since they are present
1409 // in the kvm_run struct, which makes the synchronization really
1410 // cheap.
1411 kvm_run.apic_base = tc->readMiscReg(misc_reg::ApicBase);
1412 kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1413
1415
1416 tc->setMiscReg(misc_reg::ApicBase, kvm_run.apic_base);
1417 kvm_run.cr8 = tc->readMiscReg(misc_reg::Cr8);
1418}
1419
1420static struct kvm_cpuid_entry2
1421makeKvmCpuid(uint32_t function, uint32_t index,
1422 CpuidResult &result)
1423{
1424 struct kvm_cpuid_entry2 e;
1425 e.function = function;
1426 e.index = index;
1427 e.flags = 0;
1428 e.eax = (uint32_t)result.rax;
1429 e.ebx = (uint32_t)result.rbx;
1430 e.ecx = (uint32_t)result.rcx;
1431 e.edx = (uint32_t)result.rdx;
1432
1433 return e;
1434}
1435
1436void
1438{
1439 Kvm::CPUIDVector m5_supported;
1440
1441 /* TODO: We currently don't support any of the functions that
1442 * iterate through data structures in the CPU using an index. It's
1443 * currently not a problem since M5 doesn't expose any of them at
1444 * the moment.
1445 */
1446
1447 /* Basic features */
1448 CpuidResult func0;
1449 X86ISA::doCpuid(tc, 0x0, 0, func0);
1450 for (uint32_t function = 0; function <= func0.rax; ++function) {
1452 uint32_t idx(0);
1453
1454 X86ISA::doCpuid(tc, function, idx, cpuid);
1455 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1456 }
1457
1458 /* Extended features */
1459 CpuidResult efunc0;
1460 X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1461 for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1463 uint32_t idx(0);
1464
1465 X86ISA::doCpuid(tc, function, idx, cpuid);
1466 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1467 }
1468
1469 setCPUID(m5_supported);
1470}
1471
1472void
1473X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1474{
1475 if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1476 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1477 errno);
1478}
1479
1480void
1481X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1482{
1483 auto kvm_cpuid = newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(
1484 cpuid.size());
1485
1486 kvm_cpuid->nent = cpuid.size();
1487 std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1488
1489 setCPUID(*kvm_cpuid);
1490}
1491
1492void
1493X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1494{
1495 if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1496 panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1497 errno);
1498}
1499
1500void
1502{
1503 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
1504 msrs.size());
1505
1506 kvm_msrs->nmsrs = msrs.size();
1507 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1508
1509 setMSRs(*kvm_msrs);
1510}
1511
1512void
1513X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1514{
1515 if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1516 panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1517 errno);
1518}
1519
1520
1521void
1522X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1523{
1524 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1525 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1526
1527 kvm_msrs->nmsrs = 1;
1528 entry.index = index;
1529 entry.reserved = 0;
1530 entry.data = value;
1531
1532 setMSRs(*kvm_msrs.get());
1533}
1534
1535uint64_t
1537{
1538 auto kvm_msrs = newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1);
1539 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1540
1541 kvm_msrs->nmsrs = 1;
1542 entry.index = index;
1543 entry.reserved = 0;
1544 entry.data = 0;
1545
1546 getMSRs(*kvm_msrs.get());
1547 return entry.data;
1548}
1549
1550const Kvm::MSRIndexVector &
1552{
1553 if (cachedMsrIntersection.empty()) {
1554 const Kvm::MSRIndexVector &kvm_msrs = vm->kvm->getSupportedMSRs();
1555
1556 DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1557 for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1558 if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1559 cachedMsrIntersection.push_back(*it);
1560 DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1561 } else {
1562 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1563 *it);
1564 }
1565 }
1566 }
1567
1568 return cachedMsrIntersection;
1569}
1570
1571void
1572X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1573{
1574#ifdef KVM_GET_DEBUGREGS
1575 if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1576 panic("KVM: Failed to get guest debug registers\n");
1577#else
1578 panic("KVM: Unsupported getDebugRegisters call.\n");
1579#endif
1580}
1581
1582void
1583X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1584{
1585#ifdef KVM_SET_DEBUGREGS
1586 if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1587 panic("KVM: Failed to set guest debug registers\n");
1588#else
1589 panic("KVM: Unsupported setDebugRegisters call.\n");
1590#endif
1591}
1592
1593void
1594X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1595{
1596 if (ioctl(KVM_GET_XCRS, &regs) == -1)
1597 panic("KVM: Failed to get guest debug registers\n");
1598}
1599
1600void
1601X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1602{
1603 if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1604 panic("KVM: Failed to set guest debug registers\n");
1605}
1606
1607void
1608X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1609{
1610 if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1611 panic("KVM: Failed to get guest debug registers\n");
1612}
1613
1614void
1615X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1616{
1617 if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1618 panic("KVM: Failed to set guest debug registers\n");
1619}
1620
1621
1622void
1623X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1624{
1625 if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1626 panic("KVM: Failed to get guest debug registers\n");
1627}
1628
1629void
1630X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1631{
1632 if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1633 panic("KVM: Failed to set guest debug registers\n");
1634}
1635
1636} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:193
std::vector< BaseInterrupts * > interrupts
Definition base.hh:224
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition base.cc:172
Base class for KVM based CPU models.
Definition base.hh:88
Status _status
CPU run state.
Definition base.hh:240
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition base.cc:825
uint8_t * getGuestData(uint64_t offset) const
Retrieve a pointer to guest data stored at the end of the kvm_run structure.
Definition base.hh:327
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:109
struct kvm_run * getKvmRunState()
Get a pointer to the kvm_run structure containing all the input and output parameters from kvmRun().
Definition base.hh:317
void setRegisters(const struct kvm_regs &regs)
Definition base.cc:818
@ Idle
Context not scheduled in KVM.
Definition base.hh:199
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition base.cc:811
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition base.cc:832
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition base.cc:935
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:118
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition base.cc:697
KvmVM * vm
Definition base.hh:160
KVMCpuPort dataPort
Port for data requests.
Definition base.hh:633
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition base.hh:153
void setFPUState(const struct kvm_fpu &state)
Definition base.cc:846
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition base.cc:839
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition base.hh:447
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition base.hh:648
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition base.hh:158
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition base.cc:1323
Kvm * kvm
Global KVM interface.
Definition vm.hh:421
KVM parent interface.
Definition vm.hh:81
bool capXCRs() const
Support for getting and setting the x86 XCRs.
Definition vm.cc:188
bool capExtendedCPUID() const
Support for BaseKvmCPU::setCPUID2 and getSupportedCPUID().
Definition vm.cc:120
bool capSetTSSAddress() const
Support for KvmVM::setTSSAddress()
Definition vm.cc:114
bool capUserNMI() const
Support for BaseKvmCPU::kvmNonMaskableInterrupt().
Definition vm.cc:126
bool capDebugRegs() const
Support for getting and setting the kvm_debugregs structure.
Definition vm.cc:178
bool capXSave() const
Support for getting and setting the kvm_xsave structure.
Definition vm.cc:198
bool capVCPUEvents() const
Support for getting and setting the kvm_vcpu_events structure.
Definition vm.cc:168
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition pcstate.hh:107
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
void suspend() override
Set the status to Suspended.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual RegVal getReg(const RegId &reg) const
virtual void setMiscRegNoEffect(RegIndex misc_reg, RegVal val)=0
virtual void setReg(const RegId &reg, RegVal val)
virtual const PCStateBase & pcState() const =0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual ContextID contextId() const =0
virtual uint8_t getVector() const
Get the vector of an interrupt.
Definition faults.hh:85
void updateThreadContextFPU(const struct kvm_fpu &fpu)
Update FPU and SIMD registers using the legacy API.
Definition x86_cpu.cc:1091
Tick handleKvmExitIRQWindowOpen() override
The guest exited because an interrupt window was requested.
Definition x86_cpu.cc:1366
void updateThreadContextXSave(const struct kvm_xsave &kxsave)
Update FPU and SIMD registers using the XSave API.
Definition x86_cpu.cc:1102
void updateThreadContextMSRs()
Update MSR registers.
Definition x86_cpu.cc:1115
void startup() override
startup() is the final initialization call before simulation.
Definition x86_cpu.cc:576
void updateKvmState() override
Update the KVM state from the current thread context.
Definition x86_cpu.cc:694
void updateCPUID()
Transfer gem5's CPUID values into the virtual CPU.
Definition x86_cpu.cc:1437
void handleIOMiscReg32(int miscreg)
Handle a 32-bit IO access that should be mapped to a MiscReg.
Definition x86_cpu.cc:1277
void dumpIntRegs() const
Definition x86_cpu.cc:613
void setMSRs(const struct kvm_msrs &msrs)
Methods to access MSRs in the guest.
Definition x86_cpu.cc:1493
void setCPUID(const struct kvm_cpuid2 &cpuid)
Methods to access CPUID information using the extended API.
Definition x86_cpu.cc:1473
void getXSave(struct kvm_xsave &xsave) const
Definition x86_cpu.cc:1608
void dumpXSave() const
Definition x86_cpu.cc:655
bool haveXCRs
Kvm::capXCRs() available?
Definition x86_cpu.hh:268
void updateKvmStateSRegs()
Update control registers (CRx, segments, etc.)
Definition x86_cpu.cc:769
void getXCRs(struct kvm_xcrs &regs) const
Definition x86_cpu.cc:1594
bool useXSave
Should the XSave interface be used to sync the FPU and SIMD registers?
Definition x86_cpu.hh:266
void getVCpuEvents(struct kvm_vcpu_events &events) const
Definition x86_cpu.cc:1623
X86KvmCPU(const X86KvmCPUParams &params)
Definition x86_cpu.cc:537
void updateKvmStateFPU()
Update FPU and SIMD registers.
Definition x86_cpu.cc:921
uint64_t getHostCycles() const override
Get the value of the hardware cycle counter in the guest.
Definition x86_cpu.cc:1271
void dumpFpuRegs() const
Definition x86_cpu.cc:605
void updateKvmStateMSRs()
Update MSR registers.
Definition x86_cpu.cc:930
void getDebugRegisters(struct kvm_debugregs &regs) const
Wrappers around KVM's state transfer methods.
Definition x86_cpu.cc:1572
void updateKvmStateRegs()
Support routines to update the state of the KVM CPU from gem5's state representation.
Definition x86_cpu.cc:707
Tick kvmRunDrain() override
Run the virtual CPU until draining completes.
Definition x86_cpu.cc:1246
const Kvm::MSRIndexVector & getMsrIntersection() const
Get a list of MSRs supported by both gem5 and KVM.
Definition x86_cpu.cc:1551
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition x86_cpu.cc:543
void dumpSpecRegs() const
Definition x86_cpu.cc:621
void setMSR(uint32_t index, uint64_t value)
Definition x86_cpu.cc:1522
void ioctlRun() override
Override for synchronizing state in kvm_run.
Definition x86_cpu.cc:1404
void setXCRs(const struct kvm_xcrs &regs)
Definition x86_cpu.cc:1601
void dumpMSRs() const
Definition x86_cpu.cc:675
void updateThreadContext() override
Update the current thread context with the KVM state.
Definition x86_cpu.cc:952
void getMSRs(struct kvm_msrs &msrs) const
Definition x86_cpu.cc:1513
Tick kvmRun(Tick ticks) override
Request KVM to run the guest for a given number of ticks.
Definition x86_cpu.cc:1198
void setVCpuEvents(const struct kvm_vcpu_events &events)
Definition x86_cpu.cc:1630
void deliverInterrupts()
Inject pending interrupts from gem5 into the virtual CPU.
Definition x86_cpu.cc:1145
bool haveDebugRegs
Kvm::capDebugRegs() available?
Definition x86_cpu.hh:259
void dump() const override
Dump the internal state to the terminal.
Definition x86_cpu.cc:590
bool archIsDrained() const override
Check if there are pending events in the vCPU that prevents it from being drained.
Definition x86_cpu.cc:1375
Kvm::MSRIndexVector cachedMsrIntersection
Cached intersection of supported MSRs.
Definition x86_cpu.hh:255
void setXSave(const struct kvm_xsave &xsave)
Definition x86_cpu.cc:1615
void dumpVCpuEvents() const
Definition x86_cpu.cc:667
uint64_t getMSR(uint32_t index) const
Definition x86_cpu.cc:1536
void dumpXCRs() const
Definition x86_cpu.cc:643
Tick handleKvmExitIO() override
Handle x86 legacy IO (in/out)
Definition x86_cpu.cc:1302
void updateThreadContextRegs(const struct kvm_regs &regs, const struct kvm_sregs &sregs)
Support routines to update the state of gem5's thread context from KVM's state representation.
Definition x86_cpu.cc:986
void setDebugRegisters(const struct kvm_debugregs &regs)
Definition x86_cpu.cc:1583
bool haveXSave
Kvm::capXSave() available?
Definition x86_cpu.hh:261
void updateKvmStateFPUXSave()
Update FPU and SIMD registers using the XSave API.
Definition x86_cpu.cc:896
virtual ~X86KvmCPU()
Definition x86_cpu.cc:571
void dumpDebugRegs() const
Definition x86_cpu.cc:629
void updateKvmStateFPULegacy()
Update FPU and SIMD registers using the legacy API.
Definition x86_cpu.cc:872
void updateThreadContextSRegs(const struct kvm_sregs &sregs)
Update control registers (CRx, segments, etc.)
Definition x86_cpu.cc:1040
STL vector class.
Definition stl.hh:37
Definition test.h:63
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition base.cc:803
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition base.cc:795
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition base.cc:1157
EventQueue * eventQueue() const
Definition eventq.hh:1003
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
static SimObject * find(const char *name)
Find the SimObject with the given name and return a pointer to it.
#define hack(...)
Definition logging.hh:258
#define warn(...)
Definition logging.hh:256
#define warn_once(...)
Definition logging.hh:260
#define inform(...)
Definition logging.hh:257
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 35, 32 > at
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 24 > j
Definition misc_types.hh:57
Bitfield< 30, 0 > index
Bitfield< 0 > p
Bitfield< 28, 21 > cpuid
Bitfield< 16, 15 > xs
Definition misc.hh:742
static RegId xmmLow(int index)
Definition float.hh:171
static RegId xmmHigh(int index)
Definition float.hh:177
static RegId fpr(int index)
Definition float.hh:159
static RegIndex segSel(int index)
Definition misc.hh:505
static RegIndex segAttr(int index)
Definition misc.hh:533
static RegIndex segBase(int index)
Definition misc.hh:512
static RegIndex segLimit(int index)
Definition misc.hh:526
const MsrMap msrMap
Map between MSR addresses and their corresponding misc registers.
double loadFloat80(const void *_mem)
Load an 80-bit float from memory and convert it to double.
Definition utility.cc:156
@ StackFaultBit
Definition misc.hh:99
@ ErrSummaryBit
Definition misc.hh:100
void storeFloat80(void *_mem, double value)
Convert and store a double as an 80-bit float.
Definition utility.cc:165
uint8_t convX87TagsToXTags(uint16_t ftw)
Convert an x87 tag word to abridged tag format.
Definition utility.cc:90
Bitfield< 3 > addr
Definition types.hh:84
static Addr x86IOAddress(const uint32_t port)
Definition x86_traits.hh:79
Bitfield< 2, 0 > seg
Definition types.hh:87
void setRFlags(ThreadContext *tc, uint64_t val)
Set update the rflags register and internal gem5 state.
Definition utility.cc:74
uint16_t convX87XTagsToTags(uint8_t ftwx)
Convert an x87 xtag word to normal tags format.
Definition utility.cc:115
bool doCpuid(ThreadContext *tc, uint32_t function, uint32_t index, CpuidResult &result)
Definition cpuid.cc:91
uint64_t getRFlags(ThreadContext *tc)
Reconstruct the rflags register from the internal gem5 register state.
Definition utility.cc:58
static Addr x86PciConfigAddress(const uint32_t addr)
Definition x86_traits.hh:85
double ms
millisecond
Definition core.cc:52
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
static void dumpFpuCommon(const T &fpu)
Definition x86_cpu.cc:274
static void updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
Definition x86_cpu.cc:1058
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
static void setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, const int index)
Definition x86_cpu.cc:749
static void forceSegAccessed(struct kvm_segment &seg)
Definition x86_cpu.cc:757
static void setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, const int index)
Definition x86_cpu.cc:729
static bool isCanonicalAddress(uint64_t addr)
Definition x86_cpu.cc:384
uint64_t Tick
Tick count type.
Definition types.hh:58
static uint64_t floatToBits64(double val)
Definition types.hh:191
static struct kvm_cpuid_entry2 makeKvmCpuid(uint32_t function, uint32_t index, CpuidResult &result)
Definition x86_cpu.cc:1421
void setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, const int index)
Definition x86_cpu.cc:1004
static void dumpKvm(const struct kvm_regs &regs)
Definition x86_cpu.cc:182
static auto newVarStruct(size_t entries)
Definition x86_cpu.cc:173
static void updateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
Definition x86_cpu.cc:838
static double bitsToFloat64(uint64_t val)
Definition types.hh:218
static void dumpFpuSpec(const struct FXSave &xs)
Definition x86_cpu.cc:258
static void checkSeg(const char *name, const int idx, const struct kvm_segment &seg, struct kvm_sregs sregs)
Definition x86_cpu.cc:395
uint16_t pad1
Definition x86_cpu.cc:89
uint32_t fpu_ip
Definition x86_cpu.cc:87
uint16_t fpu_ds
Definition x86_cpu.cc:91
uint16_t last_opcode
Definition x86_cpu.cc:82
uint16_t fsw
Definition x86_cpu.cc:79
uint16_t fcw
Definition x86_cpu.cc:78
uint16_t fpu_cs
Definition x86_cpu.cc:88
uint64_t fpu_dp
Definition x86_cpu.cc:98
uint8_t ftwx
Definition x86_cpu.cc:80
struct gem5::FXSave::@24::@27 ctrl64
uint8_t pad0
Definition x86_cpu.cc:81
uint32_t mxcsr_mask
Definition x86_cpu.cc:102
uint32_t fpu_dp
Definition x86_cpu.cc:90
uint16_t pad2
Definition x86_cpu.cc:92
uint32_t mxcsr
Definition x86_cpu.cc:101
uint64_t fpu_ip
Definition x86_cpu.cc:97
PM4 packets.
const std::string & name()
Definition trace.cc:48
#define FOREACH_IREG()
Definition x86_cpu.cc:112
#define FOREACH_DREG()
Definition x86_cpu.cc:143
#define MSR_TSC
Definition x86_cpu.cc:57
#define SEG_CS_TYPE_ACCESSED
Definition x86_cpu.cc:68
#define SEG_TYPE_BIT_ACCESSED
Definition x86_cpu.cc:74
#define IO_PCI_CONF_DATA_BASE
Definition x86_cpu.cc:60
#define SEG_CS_TYPE_READ_ACCESSED
Definition x86_cpu.cc:70
#define SEG_SYS_TYPE_TSS_BUSY
Definition x86_cpu.cc:65
#define FOREACH_SEGMENT()
Definition x86_cpu.cc:153
#define IO_PCI_CONF_ADDR
Definition x86_cpu.cc:59
#define FOREACH_SREG()
Definition x86_cpu.cc:132
#define SEG_SYS_TYPE_TSS_AVAILABLE
Definition x86_cpu.cc:63
#define FOREACH_DTABLE()
Definition x86_cpu.cc:165

Generated on Mon Jul 10 2023 15:32:00 for gem5 by doxygen 1.9.7