gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
base.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012, 2015, 2017, 2021 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/kvm/base.hh"
39
40#include <linux/kvm.h>
41#include <sys/ioctl.h>
42#include <sys/mman.h>
43#include <unistd.h>
44
45#include <cerrno>
46#include <csignal>
47#include <ostream>
48
49#include "base/compiler.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/Kvm.hh"
53#include "debug/KvmIO.hh"
54#include "debug/KvmRun.hh"
55#include "params/BaseKvmCPU.hh"
56#include "sim/process.hh"
57#include "sim/system.hh"
58
59/* Used by some KVM macros */
60#define PAGE_SIZE pageSize
61
62namespace gem5
63{
64
65BaseKvmCPU::BaseKvmCPU(const BaseKvmCPUParams &params)
66 : BaseCPU(params),
67 vm(nullptr),
69 dataPort(name() + ".dcache_port", this),
70 instPort(name() + ".icache_port", this),
73 kvmStateDirty(false),
75 vcpuID(-1), vcpuFD(-1), vcpuMMapSize(0),
76 _kvmRun(NULL), mmioRing(NULL),
77 pageSize(sysconf(_SC_PAGE_SIZE)),
78 tickEvent([this]{ tick(); }, "BaseKvmCPU tick",
79 false, Event::CPU_Tick_Pri),
80 activeInstPeriod(0),
81 hwCycles(nullptr),
82 hwInstructions(nullptr),
83 perfControlledByTimer(params.usePerfOverflow),
84 hostFactor(params.hostFactor), stats(this),
85 ctrInsts(0)
86{
87 if (pageSize == -1)
88 panic("KVM: Failed to determine host page size (%i)\n",
89 errno);
90
91 if (FullSystem)
92 thread = new SimpleThread(this, 0, params.system, params.mmu,
93 params.isa[0], params.decoder[0]);
94 else
95 thread = new SimpleThread(this, /* thread_num */ 0, params.system,
96 params.workload[0], params.mmu,
97 params.isa[0], params.decoder[0]);
98
99 thread->setStatus(ThreadContext::Halted);
100 tc = thread->getTC();
101 threadContexts.push_back(tc);
102
103 if ((!usePerf) && perfControlledByTimer) {
104 panic("KVM: invalid combination of parameters: cannot use "
105 "perfControlledByTimer without usePerf\n");
106 }
107
108 // If we use perf, we create new PerfKVMCounters
109 if (usePerf) {
110 hwCycles = std::unique_ptr<PerfKvmCounter>(new PerfKvmCounter());
111 hwInstructions = std::unique_ptr<PerfKvmCounter>(new PerfKvmCounter());
112 } else {
113 inform("Using KVM CPU without perf. The stats related to the number "
114 "of cycles and instructions executed by the KVM CPU will not "
115 "be updated. The stats should not be used for performance "
116 "evaluation.");
117 }
118}
119
121{
122 if (_kvmRun)
123 munmap(_kvmRun, vcpuMMapSize);
124 close(vcpuFD);
125}
126
127void
129{
130 vm = system->getKvmVM();
131 vcpuID = vm->allocVCPUID();
133 fatal_if(numThreads != 1, "KVM: Multithreading not supported");
134}
135
136void
138{
139 const BaseKvmCPUParams &p =
140 dynamic_cast<const BaseKvmCPUParams &>(params());
141
142 Kvm &kvm = *vm->kvm;
143
145
146 assert(vcpuFD == -1);
147
148 // Tell the VM that a CPU is about to start.
149 vm->cpuStartup();
150
151 // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are
152 // not guaranteed that the parent KVM VM has initialized at that
153 // point. Initialize virtual CPUs here instead.
154 vcpuFD = vm->createVCPU(vcpuID);
155
156 // Map the KVM run structure
158 _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize,
159 PROT_READ | PROT_WRITE, MAP_SHARED,
160 vcpuFD, 0);
161 if (_kvmRun == MAP_FAILED)
162 panic("KVM: Failed to map run data structure\n");
163
164 // Setup a pointer to the MMIO ring buffer if coalesced MMIO is
165 // available. The offset into the KVM's communication page is
166 // provided by the coalesced MMIO capability.
167 int mmioOffset(kvm.capCoalescedMMIO());
168 if (!p.useCoalescedMMIO) {
169 inform("KVM: Coalesced MMIO disabled by config.\n");
170 } else if (mmioOffset) {
171 inform("KVM: Coalesced IO available\n");
172 mmioRing = (struct kvm_coalesced_mmio_ring *)(
173 (char *)_kvmRun + (mmioOffset * pageSize));
174 } else {
175 inform("KVM: Coalesced not supported by host OS\n");
176 }
177
180 }, name(), true), curTick());
181}
182
189
190Tick
192{
193 if (cpu->system->isAtomicMode()) {
194 Tick delay = sendAtomic(pkt);
195 delete pkt;
196 return delay;
197 } else {
198 if (pendingMMIOPkts.empty() && sendTimingReq(pkt)) {
200 } else {
201 pendingMMIOPkts.push(pkt);
202 }
203 // Return value is irrelevant for timing-mode accesses.
204 return 0;
205 }
206}
207
208bool
210{
211 DPRINTF(KvmIO, "KVM: Finished timing request\n");
212
213 delete pkt;
215
216 // We can switch back into KVM when all pending and in-flight MMIO
217 // operations have completed.
218 if (!(activeMMIOReqs || pendingMMIOPkts.size())) {
219 DPRINTF(KvmIO, "KVM: Finished all outstanding timing requests\n");
220 cpu->finishMMIOPending();
221 }
222 return true;
223}
224
225void
227{
228 DPRINTF(KvmIO, "KVM: Retry for timing request\n");
229
230 assert(pendingMMIOPkts.size());
231
232 // Assuming that we can issue infinite requests this cycle is a bit
233 // unrealistic, but it's not worth modeling something more complex in
234 // KVM.
235 while (pendingMMIOPkts.size() && sendTimingReq(pendingMMIOPkts.front())) {
236 pendingMMIOPkts.pop();
238 }
239}
240
241void
243{
244 assert(_status == RunningMMIOPending);
245 assert(!tickEvent.scheduled());
246
249}
250
251void
253{
254 // Do thread-specific initialization. We need to setup signal
255 // delivery for counters and timers from within the thread that
256 // will execute the event queue to ensure that signals are
257 // delivered to the right threads.
258 const BaseKvmCPUParams &p =
259 dynamic_cast<const BaseKvmCPUParams &>(params());
260
261 vcpuThread = pthread_self();
262
263 // Setup signal handlers. This has to be done after the vCPU is
264 // created since it manipulates the vCPU signal mask.
266
268
269 if (p.usePerfOverflow) {
270 runTimer.reset(new PerfKvmTimer(*hwCycles,
272 p.hostFactor,
273 p.hostFreq));
274 } else {
275 runTimer.reset(new PosixKvmTimer(KVM_KICK_SIGNAL, CLOCK_MONOTONIC,
276 p.hostFactor,
277 p.hostFreq));
278 }
279}
280
282 : statistics::Group(parent),
283 ADD_STAT(numVMExits, statistics::units::Count::get(),
284 "total number of KVM exits"),
286 "number of KVM entries to finalize pending operations"),
287 ADD_STAT(numExitSignal, statistics::units::Count::get(),
288 "exits due to signal delivery"),
289 ADD_STAT(numMMIO, statistics::units::Count::get(),
290 "number of VM exits due to memory mapped IO"),
292 "number of coalesced memory mapped IO requests"),
293 ADD_STAT(numIO, statistics::units::Count::get(),
294 "number of VM exits due to legacy IO"),
295 ADD_STAT(numHalt, statistics::units::Count::get(),
296 "number of VM exits due to wait for interrupt instructions"),
297 ADD_STAT(numInterrupts, statistics::units::Count::get(),
298 "number of interrupts delivered"),
299 ADD_STAT(numHypercalls, statistics::units::Count::get(), "number of hypercalls")
300{
301}
302
303void
305{
306 if (debug::Checkpoint) {
307 DPRINTF(Checkpoint, "KVM: Serializing thread %i:\n", tid);
308 dump();
309 }
310
311 assert(tid == 0);
312 assert(_status == Idle);
313 thread->serialize(cp);
314}
315
316void
318{
319 DPRINTF(Checkpoint, "KVM: Unserialize thread %i:\n", tid);
320
321 assert(tid == 0);
322 assert(_status == Idle);
323 thread->unserialize(cp);
324 threadContextDirty = true;
325}
326
329{
330 if (switchedOut())
331 return DrainState::Drained;
332
333 DPRINTF(Drain, "BaseKvmCPU::drain\n");
334
335 // The event queue won't be locked when calling drain since that's
336 // not done from an event. Lock the event queue here to make sure
337 // that scoped migrations continue to work if we need to
338 // synchronize the thread context.
339 std::lock_guard<EventQueue> lock(*this->eventQueue());
340
341 switch (_status) {
342 case Running:
343 // The base KVM code is normally ready when it is in the
344 // Running state, but the architecture specific code might be
345 // of a different opinion. This may happen when the CPU been
346 // notified of an event that hasn't been accepted by the vCPU
347 // yet.
348 if (!archIsDrained())
350
351 // The state of the CPU is consistent, so we don't need to do
352 // anything special to drain it. We simply de-schedule the
353 // tick event and enter the Idle state to prevent nasty things
354 // like MMIOs from happening.
355 if (tickEvent.scheduled())
357 _status = Idle;
358
359 [[fallthrough]];
360 case Idle:
361 // Idle, no need to drain
362 assert(!tickEvent.scheduled());
363
364 // Sync the thread context here since we'll need it when we
365 // switch CPUs or checkpoint the CPU.
367
368 return DrainState::Drained;
369
371 // The CPU has just requested a service that was handled in
372 // the RunningService state, but the results have still not
373 // been reported to the CPU. Now, we /could/ probably just
374 // update the register state ourselves instead of letting KVM
375 // handle it, but that would be tricky. Instead, we enter KVM
376 // and let it do its stuff.
377 DPRINTF(Drain, "KVM CPU is waiting for service completion, "
378 "requesting drain.\n");
380
382 // We need to drain since there are in-flight timing accesses
383 DPRINTF(Drain, "KVM CPU is waiting for timing accesses to complete, "
384 "requesting drain.\n");
386
387 case RunningService:
388 // We need to drain since the CPU is waiting for service (e.g., MMIOs)
389 DPRINTF(Drain, "KVM CPU is waiting for service, requesting drain.\n");
391
392 default:
393 panic("KVM: Unhandled CPU state in drain()\n");
394 return DrainState::Drained;
395 }
396}
397
398void
400{
401 assert(!tickEvent.scheduled());
402
403 // We might have been switched out. In that case, we don't need to
404 // do anything.
405 if (switchedOut())
406 return;
407
408 DPRINTF(Kvm, "drainResume\n");
410
411 /* The simulator may have terminated the threads servicing event
412 * queues. In that case, we need to re-initialize the new
413 * threads. */
416 }, name(), true), curTick());
417
418 // The tick event is de-scheduled as a part of the draining
419 // process. Re-schedule it if the thread context is active.
420 if (tc->status() == ThreadContext::Active) {
423 } else {
424 _status = Idle;
425 }
426}
427
428void
430{
431 // We should have drained prior to forking, which means that the
432 // tick event shouldn't be scheduled and the CPU is idle.
433 assert(!tickEvent.scheduled());
434 assert(_status == Idle);
435
436 if (vcpuFD != -1) {
437 if (close(vcpuFD) == -1)
438 warn("kvm CPU: notifyFork failed to close vcpuFD\n");
439
440 if (_kvmRun)
441 munmap(_kvmRun, vcpuMMapSize);
442
443 vcpuFD = -1;
444 _kvmRun = NULL;
445
446 if (usePerf) {
447 hwInstructions->detach();
448 hwCycles->detach();
449 }
450 }
451}
452
453void
455{
456 DPRINTF(Kvm, "switchOut\n");
457
459
460 // We should have drained prior to executing a switchOut, which
461 // means that the tick event shouldn't be scheduled and the CPU is
462 // idle.
463 assert(!tickEvent.scheduled());
464 assert(_status == Idle);
465}
466
467void
469{
470 DPRINTF(Kvm, "takeOverFrom\n");
471
473
474 // We should have drained prior to executing a switchOut, which
475 // means that the tick event shouldn't be scheduled and the CPU is
476 // idle.
477 assert(!tickEvent.scheduled());
478 assert(_status == Idle);
479 assert(threadContexts.size() == 1);
480
481 // Force an update of the KVM state here instead of flagging the
482 // TC as dirty. This is not ideal from a performance point of
483 // view, but it makes debugging easier as it allows meaningful KVM
484 // state to be dumped before and after a takeover.
486 threadContextDirty = false;
487}
488
489void
491{
492 if (!(system->bypassCaches())) {
493 fatal("The KVM-based CPUs requires the memory system to be in the "
494 "'noncaching' mode.\n");
495 }
496}
497
498void
500{
501 DPRINTF(Kvm, "wakeup()\n");
502 // This method might have been called from another
503 // context. Migrate to this SimObject's event queue when
504 // delivering the wakeup signal.
506
507 // Kick the vCPU to get it to come out of KVM.
508 kick();
509
510 if (thread->status() != ThreadContext::Suspended)
511 return;
512
513 thread->activate();
514}
515
516void
518{
519 DPRINTF(Kvm, "ActivateContext %d\n", thread_num);
520
521 assert(thread_num == 0);
522 assert(thread);
523
524 assert(_status == Idle);
525 assert(!tickEvent.scheduled());
526
527 baseStats.numCycles +=
528 ticksToCycles(thread->lastActivate - thread->lastSuspend);
529
532}
533
534
535void
537{
538 DPRINTF(Kvm, "SuspendContext %d\n", thread_num);
539
540 assert(thread_num == 0);
541 assert(thread);
542
543 if (_status == Idle)
544 return;
545
547
548 // The tick event may no be scheduled if the quest has requested
549 // the monitor to wait for interrupts. The normal CPU models can
550 // get their tick events descheduled by quiesce instructions, but
551 // that can't happen here.
552 if (tickEvent.scheduled())
554
555 _status = Idle;
556}
557
558void
560{
561 // for now, these are equivalent
562 suspendContext(thread_num);
563}
564
565void
567{
568 // for now, these are equivalent
569 suspendContext(thread_num);
571}
572
575{
576 assert(tn == 0);
578 return tc;
579}
580
581
584{
585 return ctrInsts;
586}
587
590{
591 hack_once("Pretending totalOps is equivalent to totalInsts()\n");
592 return ctrInsts;
593}
594
595void
597{
598 inform("State dumping not implemented.");
599}
600
601void
603{
604 Tick delay(0);
605 assert(_status != Idle && _status != RunningMMIOPending);
606
607 switch (_status) {
608 case RunningService:
609 // handleKvmExit() will determine the next state of the CPU
610 delay = handleKvmExit();
611
612 if (tryDrain())
613 _status = Idle;
614 break;
615
617 case Running: {
618 auto &queue = thread->comInstEventQueue;
619 const uint64_t nextInstEvent(
620 queue.empty() ? MaxTick : queue.nextTick());
621 // Enter into KVM and complete pending IO instructions if we
622 // have an instruction event pending.
623 const Tick ticksToExecute(
624 nextInstEvent > ctrInsts ?
625 curEventQueue()->nextTick() - curTick() : 0);
626
627 if (alwaysSyncTC)
628 threadContextDirty = true;
629
630 // We might need to update the KVM state.
631 syncKvmState();
632
633 // Setup any pending instruction count breakpoints using
634 // PerfEvent if we are going to execute more than just an IO
635 // completion.
636 if (ticksToExecute > 0)
638
639 DPRINTF(KvmRun, "Entering KVM...\n");
641 // Force an immediate exit from KVM after completing
642 // pending operations. The architecture-specific code
643 // takes care to run until it is in a state where it can
644 // safely be drained.
645 delay = kvmRunDrain();
646 } else {
647 delay = kvmRun(ticksToExecute);
648 }
649
650 // The CPU might have been suspended before entering into
651 // KVM. Assume that the CPU was suspended /before/ entering
652 // into KVM and skip the exit handling.
653 if (_status == Idle)
654 break;
655
656 // Entering into KVM implies that we'll have to reload the thread
657 // context from KVM if we want to access it. Flag the KVM state as
658 // dirty with respect to the cached thread context.
659 kvmStateDirty = true;
660
661 if (alwaysSyncTC)
663
664 // Enter into the RunningService state unless the
665 // simulation was stopped by a timer.
666 if (_kvmRun->exit_reason != KVM_EXIT_INTR) {
668 } else {
669 ++stats.numExitSignal;
671 }
672
673 // Service any pending instruction events. The vCPU should
674 // have exited in time for the event using the instruction
675 // counter configured by setupInstStop().
676 queue.serviceEvents(ctrInsts);
677
678 if (tryDrain())
679 _status = Idle;
680 } break;
681
682 default:
683 panic("BaseKvmCPU entered tick() in an illegal state (%i)\n",
684 _status);
685 }
686
687 // Schedule a new tick if we are still running
688 if (_status != Idle && _status != RunningMMIOPending) {
689 if (_kvmRun->exit_reason == KVM_EXIT_INTR && runTimer->expired())
691 curEventQueue()->nextTick() - curTick() + 1)));
692 else
694 }
695}
696
697Tick
699{
700 // By default, the only thing we need to drain is a pending IO
701 // operation which assumes that we are in the
702 // RunningServiceCompletion or RunningMMIOPending state.
705
706 // Deliver the data from the pending IO operation and immediately
707 // exit.
708 return kvmRun(0);
709}
710
711uint64_t
713{
714 if (usePerf)
715 return hwCycles->read();
716 return 0;
717}
718
719Tick
721{
722 Tick ticksExecuted;
723 fatal_if(vcpuFD == -1,
724 "Trying to run a KVM CPU in a forked child process. "
725 "This is not supported.\n");
726 DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks);
727
728 if (ticks == 0) {
729 // Settings ticks == 0 is a special case which causes an entry
730 // into KVM that finishes pending operations (e.g., IO) and
731 // then immediately exits.
732 DPRINTF(KvmRun, "KVM: Delivering IO without full guest entry\n");
733
734 ++stats.numVMHalfEntries;
735
736 // Send a KVM_KICK_SIGNAL to the vCPU thread (i.e., this
737 // thread). The KVM control signal is masked while executing
738 // in gem5 and gets unmasked temporarily as when entering
739 // KVM. See setSignalMask() and setupSignalHandler().
740 kick();
741
742 // Start the vCPU. KVM will check for signals after completing
743 // pending operations (IO). Since the KVM_KICK_SIGNAL is
744 // pending, this forces an immediate exit to gem5 again. We
745 // don't bother to setup timers since this shouldn't actually
746 // execute any code (other than completing half-executed IO
747 // instructions) in the guest.
748 ioctlRun();
749
750 // We always execute at least one cycle to prevent the
751 // BaseKvmCPU::tick() to be rescheduled on the same tick
752 // twice.
753 ticksExecuted = clockPeriod();
754 } else {
755 // This method is executed as a result of a tick event. That
756 // means that the event queue will be locked when entering the
757 // method. We temporarily unlock the event queue to allow
758 // other threads to steal control of this thread to inject
759 // interrupts. They will typically lock the queue and then
760 // force an exit from KVM by kicking the vCPU.
762
763 if (ticks < runTimer->resolution()) {
764 DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
765 ticks, runTimer->resolution());
766 ticks = runTimer->resolution();
767 }
768
769 // Get hardware statistics after synchronizing contexts. The KVM
770 // state update might affect guest cycle counters.
771 uint64_t baseCycles(getHostCycles());
772 uint64_t baseInstrs = 0;
773 if (usePerf) {
774 baseInstrs = hwInstructions->read();
775 }
776
777 // Arm the run timer and start the cycle timer if it isn't
778 // controlled by the overflow timer. Starting/stopping the cycle
779 // timer automatically starts the other perf timers as they are in
780 // the same counter group.
781 runTimer->arm(ticks);
782 if (usePerf && (!perfControlledByTimer)) {
783 hwCycles->start();
784 }
785
786 ioctlRun();
787
788 runTimer->disarm();
789 if (usePerf && (!perfControlledByTimer)) {
790 hwCycles->stop();
791 }
792
793 // The control signal may have been delivered after we exited
794 // from KVM. It will be pending in that case since it is
795 // masked when we aren't executing in KVM. Discard it to make
796 // sure we don't deliver it immediately next time we try to
797 // enter into KVM.
799
800 const uint64_t hostCyclesExecuted(getHostCycles() - baseCycles);
801 const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor);
802 uint64_t instsExecuted = 0;
803 if (usePerf) {
804 instsExecuted = hwInstructions->read() - baseInstrs;
805 }
806 ticksExecuted = runTimer->ticksFromHostCycles(hostCyclesExecuted);
807
808 /* Update statistics */
809 baseStats.numCycles += simCyclesExecuted;
810 baseStats.numInsts += instsExecuted;
811 ctrInsts += instsExecuted;
812
813 const ThreadID tid = thread->threadId();
814 const bool in_user_mode = thread->getIsaPtr()->inUserMode();
815 commitStats[tid]->numInsts += instsExecuted;
816 if (in_user_mode) {
817 commitStats[tid]->numUserInsts += instsExecuted;
818 }
819
820 DPRINTF(KvmRun,
821 "KVM: Executed %i instructions in %i cycles "
822 "(%i ticks, sim cycles: %i).\n",
823 instsExecuted, hostCyclesExecuted, ticksExecuted, simCyclesExecuted);
824 }
825
826 ++stats.numVMExits;
827
828 return ticksExecuted + flushCoalescedMMIO();
829}
830
831void
833{
834 ++stats.numInterrupts;
835 if (ioctl(KVM_NMI) == -1)
836 panic("KVM: Failed to deliver NMI to virtual CPU\n");
837}
838
839void
840BaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt)
841{
842 ++stats.numInterrupts;
843 if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1)
844 panic("KVM: Failed to deliver interrupt to virtual CPU\n");
845}
846
847void
848BaseKvmCPU::getRegisters(struct kvm_regs &regs) const
849{
850 if (ioctl(KVM_GET_REGS, &regs) == -1)
851 panic("KVM: Failed to get guest registers\n");
852}
853
854void
855BaseKvmCPU::setRegisters(const struct kvm_regs &regs)
856{
857 if (ioctl(KVM_SET_REGS, (void *)&regs) == -1)
858 panic("KVM: Failed to set guest registers\n");
859}
860
861void
862BaseKvmCPU::getSpecialRegisters(struct kvm_sregs &regs) const
863{
864 if (ioctl(KVM_GET_SREGS, &regs) == -1)
865 panic("KVM: Failed to get guest special registers\n");
866}
867
868void
869BaseKvmCPU::setSpecialRegisters(const struct kvm_sregs &regs)
870{
871 if (ioctl(KVM_SET_SREGS, (void *)&regs) == -1)
872 panic("KVM: Failed to set guest special registers\n");
873}
874
875void
876BaseKvmCPU::getFPUState(struct kvm_fpu &state) const
877{
878 if (ioctl(KVM_GET_FPU, &state) == -1)
879 panic("KVM: Failed to get guest FPU state\n");
880}
881
882void
883BaseKvmCPU::setFPUState(const struct kvm_fpu &state)
884{
885 if (ioctl(KVM_SET_FPU, (void *)&state) == -1)
886 panic("KVM: Failed to set guest FPU state\n");
887}
888
889
890void
891BaseKvmCPU::setOneReg(uint64_t id, const void *addr)
892{
893#ifdef KVM_SET_ONE_REG
894 struct kvm_one_reg reg;
895 reg.id = id;
896 reg.addr = (uint64_t)addr;
897
898 if (ioctl(KVM_SET_ONE_REG, &reg) == -1) {
899 panic("KVM: Failed to set register (0x%x) value (errno: %i)\n",
900 id, errno);
901 }
902#else
903 panic("KVM_SET_ONE_REG is unsupported on this platform.\n");
904#endif
905}
906
907void
908BaseKvmCPU::getOneReg(uint64_t id, void *addr) const
909{
910#ifdef KVM_GET_ONE_REG
911 struct kvm_one_reg reg;
912 reg.id = id;
913 reg.addr = (uint64_t)addr;
914
915 if (ioctl(KVM_GET_ONE_REG, &reg) == -1) {
916 panic("KVM: Failed to get register (0x%x) value (errno: %i)\n",
917 id, errno);
918 }
919#else
920 panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
921#endif
922}
923
924std::string
926{
927#ifdef KVM_GET_ONE_REG
928 std::ostringstream ss;
929
930 ss.setf(std::ios::hex, std::ios::basefield);
931 ss.setf(std::ios::showbase);
932#define HANDLE_INTTYPE(len) \
933 case KVM_REG_SIZE_U ## len: { \
934 uint ## len ## _t value; \
935 getOneReg(id, &value); \
936 ss << value; \
937 } break
938
939#define HANDLE_ARRAY(len) \
940 case KVM_REG_SIZE_U ## len: { \
941 uint8_t value[len / 8]; \
942 getOneReg(id, value); \
943 ccprintf(ss, "[0x%x", value[0]); \
944 for (int i = 1; i < len / 8; ++i) \
945 ccprintf(ss, ", 0x%x", value[i]); \
946 ccprintf(ss, "]"); \
947 } break
948
949 switch (id & KVM_REG_SIZE_MASK) {
950 HANDLE_INTTYPE(8);
951 HANDLE_INTTYPE(16);
952 HANDLE_INTTYPE(32);
953 HANDLE_INTTYPE(64);
954 HANDLE_ARRAY(128);
955 HANDLE_ARRAY(256);
956 HANDLE_ARRAY(512);
957 HANDLE_ARRAY(1024);
958 default:
959 ss << "??";
960 }
961
962#undef HANDLE_INTTYPE
963#undef HANDLE_ARRAY
964
965 return ss.str();
966#else
967 panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
968#endif
969}
970
971void
973{
974 if (!kvmStateDirty)
975 return;
976
977 assert(!threadContextDirty);
978
980 kvmStateDirty = false;
981}
982
983void
985{
987 return;
988
989 assert(!kvmStateDirty);
990
992 threadContextDirty = false;
993}
994
995Tick
997{
998 DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason);
999 assert(_status == RunningService);
1000
1001 // Switch into the running state by default. Individual handlers
1002 // can override this.
1003 _status = Running;
1004 switch (_kvmRun->exit_reason) {
1005 case KVM_EXIT_UNKNOWN:
1006 return handleKvmExitUnknown();
1007
1008 case KVM_EXIT_EXCEPTION:
1009 return handleKvmExitException();
1010
1011 case KVM_EXIT_IO:
1012 {
1013 ++stats.numIO;
1014 Tick ticks = handleKvmExitIO();
1015 _status = dataPort.nextIOState();
1016 return ticks;
1017 }
1018
1019 case KVM_EXIT_HYPERCALL:
1020 ++stats.numHypercalls;
1021 return handleKvmExitHypercall();
1022
1023 case KVM_EXIT_HLT:
1024 /* The guest has halted and is waiting for interrupts */
1025 DPRINTF(Kvm, "handleKvmExitHalt\n");
1026 ++stats.numHalt;
1027
1028 // Suspend the thread until the next interrupt arrives
1029 thread->suspend();
1030
1031 // This is actually ignored since the thread is suspended.
1032 return 0;
1033
1034 case KVM_EXIT_MMIO:
1035 {
1036 /* Service memory mapped IO requests */
1037 DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n",
1038 _kvmRun->mmio.is_write,
1039 _kvmRun->mmio.phys_addr, _kvmRun->mmio.len);
1040
1041 ++stats.numMMIO;
1042 Tick ticks = doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data,
1043 _kvmRun->mmio.len, _kvmRun->mmio.is_write);
1044 // doMMIOAccess could have triggered a suspend, in which case we don't
1045 // want to overwrite the _status.
1046 if (_status != Idle)
1047 _status = dataPort.nextIOState();
1048 return ticks;
1049 }
1050
1051 case KVM_EXIT_IRQ_WINDOW_OPEN:
1053
1054 case KVM_EXIT_FAIL_ENTRY:
1055 return handleKvmExitFailEntry();
1056
1057 case KVM_EXIT_INTR:
1058 /* KVM was interrupted by a signal, restart it in the next
1059 * tick. */
1060 return 0;
1061
1062 case KVM_EXIT_INTERNAL_ERROR:
1063 panic("KVM: Internal error (suberror: %u)\n",
1064 _kvmRun->internal.suberror);
1065
1066 default:
1067 dump();
1068 panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason);
1069 }
1070}
1071
1072Tick
1074{
1075 panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n",
1076 _kvmRun->io.direction, _kvmRun->io.size,
1077 _kvmRun->io.port, _kvmRun->io.count);
1078}
1079
1080Tick
1082{
1083 panic("KVM: Unhandled hypercall\n");
1084}
1085
1086Tick
1088{
1089 warn("KVM: Unhandled IRQ window.\n");
1090 return 0;
1091}
1092
1093
1094Tick
1096{
1097 dump();
1098 panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n",
1099 _kvmRun->hw.hardware_exit_reason);
1100}
1101
1102Tick
1104{
1105 dump();
1106 panic("KVM: Got exception when starting vCPU "
1107 "(exception: %u, error_code: %u)\n",
1108 _kvmRun->ex.exception, _kvmRun->ex.error_code);
1109}
1110
1111Tick
1113{
1114 dump();
1115 panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n",
1116 _kvmRun->fail_entry.hardware_entry_failure_reason);
1117}
1118
1119Tick
1120BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
1121{
1122 ThreadContext *tc(thread->getTC());
1124
1125 RequestPtr mmio_req = std::make_shared<Request>(
1126 paddr, size, Request::UNCACHEABLE, dataRequestorId());
1127
1128 mmio_req->setContext(tc->contextId());
1129 // Some architectures do need to massage physical addresses a bit
1130 // before they are inserted into the memory system. This enables
1131 // APIC accesses on x86 and m5ops where supported through a MMIO
1132 // interface.
1133 BaseMMU::Mode access_type(write ? BaseMMU::Write : BaseMMU::Read);
1134 Fault fault(tc->getMMUPtr()->finalizePhysical(mmio_req, tc, access_type));
1135 if (fault != NoFault)
1136 warn("Finalization of MMIO address failed: %s\n", fault->name());
1137
1138
1139 const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq);
1140 PacketPtr pkt = new Packet(mmio_req, cmd);
1141 pkt->dataStatic(data);
1142
1143 if (mmio_req->isLocalAccess()) {
1144 // Since the PC has already been advanced by KVM, set the next
1145 // PC to the current PC. KVM doesn't use that value, and that
1146 // way any gem5 op or syscall which needs to know what the next
1147 // PC is will be able to get a reasonable value.
1148 //
1149 // We won't be able to rewind the current PC to the "correct"
1150 // value without figuring out how big the current instruction
1151 // is, and that's probably not worth the effort
1152 std::unique_ptr<PCStateBase> pc(tc->pcState().clone());
1153 stutterPC(*pc);
1154 tc->pcState(*pc);
1155 // We currently assume that there is no need to migrate to a
1156 // different event queue when doing local accesses. Currently, they
1157 // are only used for m5ops, so it should be a valid assumption.
1158 const Cycles ipr_delay = mmio_req->localAccessor(tc, pkt);
1159 threadContextDirty = true;
1160 delete pkt;
1161 return clockPeriod() * ipr_delay;
1162 } else {
1163 // Temporarily lock and migrate to the device event queue to
1164 // prevent races in multi-core mode.
1166
1167 return dataPort.submitIO(pkt);
1168 }
1169}
1170
1171void
1173{
1174 std::unique_ptr<struct kvm_signal_mask, void(*)(void *p)>
1175 kvm_mask(nullptr, [](void *p) { operator delete(p); });
1176
1177 if (mask) {
1178 kvm_mask.reset((struct kvm_signal_mask *)operator new(
1179 sizeof(struct kvm_signal_mask) + sizeof(*mask)));
1180 // The kernel and the user-space headers have different ideas
1181 // about the size of sigset_t. This seems like a massive hack,
1182 // but is actually what qemu does.
1183 assert(sizeof(*mask) >= 8);
1184 kvm_mask->len = 8;
1185 memcpy(kvm_mask->sigset, mask, kvm_mask->len);
1186 }
1187
1188 if (ioctl(KVM_SET_SIGNAL_MASK, (void *)kvm_mask.get()) == -1)
1189 panic("KVM: Failed to set vCPU signal mask (errno: %i)\n",
1190 errno);
1191}
1192
1193int
1194BaseKvmCPU::ioctl(int request, long p1) const
1195{
1196 if (vcpuFD == -1)
1197 panic("KVM: CPU ioctl called before initialization\n");
1198
1199 return ::ioctl(vcpuFD, request, p1);
1200}
1201
1202Tick
1204{
1205 if (!mmioRing)
1206 return 0;
1207
1208 DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n");
1209
1210 // TODO: We might need to do synchronization when we start to
1211 // support multiple CPUs
1212 Tick ticks(0);
1213 while (mmioRing->first != mmioRing->last) {
1214 struct kvm_coalesced_mmio &ent(
1215 mmioRing->coalesced_mmio[mmioRing->first]);
1216
1217 DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n",
1218 ent.phys_addr, ent.len);
1219
1220 ++stats.numCoalescedMMIO;
1221 ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true);
1222
1223 mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX;
1224 }
1225
1226 return ticks;
1227}
1228
1239static void
1240onKickSignal(int signo, siginfo_t *si, void *data)
1241{
1242}
1243
1244void
1246{
1247 struct sigaction sa;
1248
1249 memset(&sa, 0, sizeof(sa));
1250 sa.sa_sigaction = onKickSignal;
1251 sa.sa_flags = SA_SIGINFO | SA_RESTART;
1252 if (sigaction(KVM_KICK_SIGNAL, &sa, NULL) == -1)
1253 panic("KVM: Failed to setup vCPU timer signal handler\n");
1254
1255 sigset_t sigset;
1256 if (pthread_sigmask(SIG_BLOCK, NULL, &sigset) == -1)
1257 panic("KVM: Failed get signal mask\n");
1258
1259 // Request KVM to setup the same signal mask as we're currently
1260 // running with except for the KVM control signal. We'll sometimes
1261 // need to raise the KVM_KICK_SIGNAL to cause immediate exits from
1262 // KVM after servicing IO requests. See kvmRun().
1263 sigdelset(&sigset, KVM_KICK_SIGNAL);
1264 setSignalMask(&sigset);
1265
1266 // Mask our control signals so they aren't delivered unless we're
1267 // actually executing inside KVM.
1268 sigaddset(&sigset, KVM_KICK_SIGNAL);
1269 if (pthread_sigmask(SIG_SETMASK, &sigset, NULL) == -1)
1270 panic("KVM: Failed mask the KVM control signals\n");
1271}
1272
1273bool
1275{
1276 int discardedSignal;
1277
1278 // Setting the timeout to zero causes sigtimedwait to return
1279 // immediately.
1280 struct timespec timeout;
1281 timeout.tv_sec = 0;
1282 timeout.tv_nsec = 0;
1283
1284 sigset_t sigset;
1285 sigemptyset(&sigset);
1286 sigaddset(&sigset, signum);
1287
1288 do {
1289 discardedSignal = sigtimedwait(&sigset, NULL, &timeout);
1290 } while (discardedSignal == -1 && errno == EINTR);
1291
1292 if (discardedSignal == signum)
1293 return true;
1294 else if (discardedSignal == -1 && errno == EAGAIN)
1295 return false;
1296 else
1297 panic("Unexpected return value from sigtimedwait: %i (errno: %i)\n",
1298 discardedSignal, errno);
1299}
1300
1301void
1303{
1304 DPRINTF(Kvm, "Attaching cycle counter...\n");
1305 PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE,
1306 PERF_COUNT_HW_CPU_CYCLES);
1307 cfgCycles.disabled(true)
1308 .pinned(true);
1309
1310 // Try to exclude the host. We set both exclude_hv and
1311 // exclude_host since different architectures use slightly
1312 // different APIs in the kernel.
1313 cfgCycles.exclude_hv(true)
1314 .exclude_host(true);
1315
1317 // We need to configure the cycles counter to send overflows
1318 // since we are going to use it to trigger timer signals that
1319 // trap back into m5 from KVM. In practice, this means that we
1320 // need to set some non-zero sample period that gets
1321 // overridden when the timer is armed.
1322 cfgCycles.wakeupEvents(1)
1323 .samplePeriod(42);
1324 }
1325
1326 // We might be re-attaching counters due threads being
1327 // re-initialised after fork.
1328 if (usePerf) {
1329 if (hwCycles->attached()) {
1330 hwCycles->detach();
1331 }
1332
1333 hwCycles->attach(cfgCycles, 0); // TID (0 => currentThread)
1335 }
1336}
1337
1338bool
1340{
1342 return false;
1343
1344 if (!archIsDrained()) {
1345 DPRINTF(Drain, "tryDrain: Architecture code is not ready.\n");
1346 return false;
1347 }
1348
1349 if (_status == Idle || _status == Running) {
1350 DPRINTF(Drain,
1351 "tryDrain: CPU transitioned into the Idle state, drain done\n");
1353 return true;
1354 } else {
1355 DPRINTF(Drain, "tryDrain: CPU not ready.\n");
1356 return false;
1357 }
1358}
1359
1360void
1362{
1363 if (ioctl(KVM_RUN) == -1) {
1364 if (errno != EINTR)
1365 panic("KVM: Failed to start virtual CPU (errno: %i)\n",
1366 errno);
1367 }
1368}
1369
1370void
1372{
1373 if (thread->comInstEventQueue.empty()) {
1375 } else {
1376 Tick next = thread->comInstEventQueue.nextTick();
1377 assert(next > ctrInsts);
1378 setupInstCounter(next - ctrInsts);
1379 }
1380}
1381
1382void
1384{
1385 // This function is for setting up instruction counter using perf
1386 if (!usePerf) {
1387 return;
1388 }
1389
1390 // No need to do anything if we aren't attaching for the first
1391 // time or the period isn't changing.
1392 if (period == activeInstPeriod && hwInstructions->attached()) {
1393 return;
1394 }
1395
1396 PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE,
1397 PERF_COUNT_HW_INSTRUCTIONS);
1398
1399 // Try to exclude the host. We set both exclude_hv and
1400 // exclude_host since different architectures use slightly
1401 // different APIs in the kernel.
1402 cfgInstructions.exclude_hv(true)
1403 .exclude_host(true);
1404
1405 if (period) {
1406 // Setup a sampling counter if that has been requested.
1407 cfgInstructions.wakeupEvents(1)
1408 .samplePeriod(period);
1409 }
1410
1411 // We need to detach and re-attach the counter to reliably change
1412 // sampling settings. See PerfKvmCounter::period() for details.
1413 if (hwInstructions->attached())
1414 hwInstructions->detach();
1415 assert(hwCycles->attached());
1416 hwInstructions->attach(cfgInstructions,
1417 0, // TID (0 => currentThread)
1418 *hwCycles);
1419
1420 if (period)
1421 hwInstructions->enableSignals(KVM_KICK_SIGNAL);
1422
1423 activeInstPeriod = period;
1424}
1425
1426} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:219
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:330
System * system
Definition base.hh:416
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition base.hh:585
@ CPU_STATE_SLEEP
Definition base.hh:576
std::vector< std::unique_ptr< CommitCPUStats > > commitStats
Definition base.hh:860
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:414
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:369
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition base.cc:620
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:634
std::vector< ThreadContext * > threadContexts
Definition base.hh:286
bool switchedOut() const
Determine if the CPU is switched out.
Definition base.hh:397
BaseCPU(const Params &params, bool is_checker=false)
Definition base.cc:129
std::queue< PacketPtr > pendingMMIOPkts
Pending MMIO packets.
Definition base.hh:621
Status nextIOState() const
Returns next valid state after one or more IO accesses.
Definition base.cc:184
BaseKvmCPU * cpu
KVM cpu pointer for finishMMIOPending() callback.
Definition base.hh:618
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition base.cc:191
void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition base.cc:226
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Definition base.cc:209
unsigned int activeMMIOReqs
Number of MMIO requests in flight.
Definition base.hh:624
virtual void dump() const
Dump the internal state to the terminal.
Definition base.cc:596
int vcpuFD
KVM vCPU file descriptor.
Definition base.hh:708
void finishMMIOPending()
Callback from KvmCPUPort to transition the CPU out of RunningMMIOPending when all timing requests hav...
Definition base.cc:242
Status _status
CPU run state.
Definition base.hh:240
uint64_t activeInstPeriod
Currently active instruction count breakpoint.
Definition base.hh:759
virtual void updateKvmState()=0
Update the KVM state from the current thread context.
struct kvm_coalesced_mmio_ring * mmioRing
Coalesced MMIO ring buffer.
Definition base.hh:724
virtual Tick handleKvmExitHypercall()
The guest requested a monitor service using a hypercall.
Definition base.cc:1081
long vcpuID
KVM internal ID of the vCPU.
Definition base.hh:660
void drainResume() override
Resume execution after a successful drain.
Definition base.cc:399
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition base.cc:862
virtual void updateThreadContext()=0
Update the current thread context with the KVM state.
void takeOverFrom(BaseCPU *cpu) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:468
virtual uint64_t getHostCycles() const
Get the value of the hardware cycle counter in the guest.
Definition base.cc:712
Counter totalInsts() const override
Definition base.cc:583
void getOneReg(uint64_t id, void *addr) const
Definition base.cc:908
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:128
const bool alwaysSyncTC
Be conservative and always synchronize the thread context on KVM entry/exit.
Definition base.hh:642
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition base.cc:536
virtual Tick handleKvmExitIRQWindowOpen()
The guest exited because an interrupt window was requested.
Definition base.cc:1087
void setSignalMask(const sigset_t *mask)
Set the signal mask used in kvmRun()
Definition base.cc:1172
void notifyFork() override
Notify a child process of a fork.
Definition base.cc:429
int vcpuMMapSize
Size of MMAPed kvm_run area.
Definition base.hh:710
void syncKvmState()
Update the KVM if the thread context is dirty.
Definition base.cc:984
bool tryDrain()
Try to drain the CPU if a drain is pending.
Definition base.cc:1339
void setRegisters(const struct kvm_regs &regs)
Definition base.cc:855
BaseKvmCPU(const BaseKvmCPUParams &params)
Definition base.cc:65
virtual Tick handleKvmExitException()
An unhandled virtualization exception occured.
Definition base.cc:1103
KVMCpuPort instPort
Unused dummy port for the instruction interface.
Definition base.hh:636
void kick() const
Force an exit from KVM.
Definition base.hh:138
@ Running
Running normally.
Definition base.hh:205
@ Idle
Context not scheduled in KVM.
Definition base.hh:199
@ RunningMMIOPending
Timing MMIO request in flight or stalled.
Definition base.hh:227
@ RunningService
Requiring service at the beginning of the next cycle.
Definition base.hh:219
@ RunningServiceCompletion
Service completion in progress.
Definition base.hh:236
void deallocateContext(ThreadID thread_num)
Definition base.cc:559
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition base.cc:848
void setupSignalHandler()
Setup a signal handler to catch the timer signal used to switch back to the monitor.
Definition base.cc:1245
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition base.cc:869
ThreadContext * getContext(int tn) override
Given a thread num get tho thread context for it.
Definition base.cc:574
bool usePerf
True if using perf; False otherwise.
Definition base.hh:657
virtual Tick handleKvmExitIO()
The guest performed a legacy IO request (out/inp on x86)
Definition base.cc:1073
std::string getAndFormatOneReg(uint64_t id) const
Get and format one register for printout.
Definition base.cc:925
gem5::BaseKvmCPU::StatGroup stats
virtual Tick handleKvmExit()
Main kvmRun exit handler, calls the relevant handleKvmExit* depending on exit type.
Definition base.cc:996
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition base.cc:972
void unserializeThread(CheckpointIn &cp, ThreadID tid) override
Unserialize one thread.
Definition base.cc:317
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition base.cc:328
Tick doMMIOAccess(Addr paddr, void *data, int size, bool write)
Inject a memory mapped IO request into gem5.
Definition base.cc:1120
void wakeup(ThreadID tid=0) override
Definition base.cc:499
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:137
Counter totalOps() const override
Definition base.cc:589
Tick flushCoalescedMMIO()
Service MMIO requests in the mmioRing.
Definition base.cc:1203
std::unique_ptr< PerfKvmCounter > hwInstructions
Guest instruction counter.
Definition base.hh:782
EventFunctionWrapper tickEvent
Definition base.hh:728
bool perfControlledByTimer
Does the runTimer control the performance counters?
Definition base.hh:791
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition base.cc:720
void setupInstStop()
Setup an instruction break if there is one pending.
Definition base.cc:1371
KvmVM * vm
Definition base.hh:160
bool discardPendingSignal(int signum) const
Discard a (potentially) pending signal.
Definition base.cc:1274
virtual Tick kvmRunDrain()
Request the CPU to run until draining completes.
Definition base.cc:698
virtual Tick handleKvmExitFailEntry()
KVM failed to start the virtualized CPU.
Definition base.cc:1112
void switchOut() override
Prepare for another CPU to take over execution.
Definition base.cc:454
void restartEqThread()
Thread-specific initialization.
Definition base.cc:252
void setupCounters()
Setup hardware performance counters.
Definition base.cc:1302
void serializeThread(CheckpointOut &cp, ThreadID tid) const override
Serialize a single thread.
Definition base.cc:304
void setupInstCounter(uint64_t period=0)
Setup the guest instruction counter.
Definition base.cc:1383
virtual ~BaseKvmCPU()
Definition base.cc:120
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition base.cc:517
void tick()
Execute the CPU until the next event in the main event queue or until the guest needs service from ge...
Definition base.cc:602
virtual bool archIsDrained() const
Is the architecture specific code in a state that prevents draining?
Definition base.hh:541
struct kvm_run * _kvmRun
Pointer to the kvm_run structure used to communicate parameters with KVM.
Definition base.hh:719
std::unique_ptr< PerfKvmCounter > hwCycles
Guest cycle counter.
Definition base.hh:769
void setOneReg(uint64_t id, const void *addr)
Get/Set single register using the KVM_(SET|GET)_ONE_REG API.
Definition base.cc:891
std::unique_ptr< BaseKvmTimer > runTimer
Timer used to force execution into the monitor after a specified number of simulation tick equivalent...
Definition base.hh:800
KVMCpuPort dataPort
Port for data requests.
Definition base.hh:633
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition base.hh:153
void setFPUState(const struct kvm_fpu &state)
Definition base.cc:883
virtual Tick handleKvmExitUnknown()
An unknown architecture dependent error occurred when starting the vCPU.
Definition base.cc:1095
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition base.cc:876
void haltContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now halted.
Definition base.cc:566
bool kvmStateDirty
Is the KVM state dirty?
Definition base.hh:654
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition base.hh:447
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition base.cc:490
bool threadContextDirty
Is the gem5 context dirty?
Definition base.hh:648
const long pageSize
Cached page size of the host.
Definition base.hh:726
pthread_t vcpuThread
ID of the vCPU thread.
Definition base.hh:663
Counter ctrInsts
Number of instructions executed by the CPU.
Definition base.hh:823
float hostFactor
Host factor as specified in the configuration.
Definition base.hh:803
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition base.hh:158
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition base.cc:1361
virtual void stutterPC(PCStateBase &pc) const =0
Modify a PCStatePtr's value so that its next PC is the current PC.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
KVM parent interface.
Definition vm.hh:81
int getVCPUMMapSize() const
Get the size of the MMAPed parameter area used to communicate vCPU parameters between the kernel and ...
Definition vm.hh:96
int capCoalescedMMIO() const
Check if coalesced MMIO is supported and which page in the MMAP'ed structure it stores requests in.
Definition vm.cc:136
virtual std::string name() const
Definition named.hh:60
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
PerfEvent counter configuration.
Definition perfevent.hh:55
PerfKvmCounterConfig & samplePeriod(uint64_t period)
Set the initial sample period (overflow count) of an event.
Definition perfevent.hh:88
PerfKvmCounterConfig & disabled(bool val)
Don't start the performance counter automatically when attaching it.
Definition perfevent.hh:113
PerfKvmCounterConfig & exclude_host(bool val)
Exclude the events from the host (i.e., only include events from the guest system).
Definition perfevent.hh:144
PerfKvmCounterConfig & exclude_hv(bool val)
Exclude the hyper visor (i.e., only include events from the guest system).
Definition perfevent.hh:159
PerfKvmCounterConfig & pinned(bool val)
Force the group to be on the active all the time (i.e., disallow multiplexing).
Definition perfevent.hh:126
PerfKvmCounterConfig & wakeupEvents(uint32_t events)
Set the number of samples that need to be triggered before reporting data as being available on the p...
Definition perfevent.hh:101
PerfEvent based timer using the host's CPU cycle counter.
Definition timer.hh:222
Timer based on standard POSIX timers.
Definition timer.hh:188
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
ThreadContext is the external interface to all thread state for anything outside of the CPU.
@ Halted
Permanently shut down.
@ Suspended
Temporarily inactive.
Statistics container.
Definition group.hh:93
#define KVM_KICK_SIGNAL
Signal to use to trigger exits from KVM.
Definition base.hh:56
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition base.cc:840
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition base.cc:832
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition base.cc:1194
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:310
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:329
DrainState
Object drain/handover states.
Definition drain.hh:76
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:78
@ Drained
Buffers drained, ready for serialization/handover.
Definition drain.hh:79
void deschedule(Event &event)
Definition eventq.hh:1021
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition eventq.hh:207
EventQueue * eventQueue() const
Definition eventq.hh:1003
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:268
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
#define hack_once(...)
Definition logging.hh:296
const Params & params() const
#define warn(...)
Definition logging.hh:288
#define inform(...)
Definition logging.hh:289
Bitfield< 3 > sa
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 6 > si
Bitfield< 33 > id
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 5 > lock
Definition types.hh:82
Units for Stats.
Definition units.hh:113
double Counter
All counters are of 64-bit values.
Definition types.hh:46
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
static void onKickSignal(int signo, siginfo_t *si, void *data)
Dummy handler for KVM kick signals.
Definition base.cc:1240
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
Packet * PacketPtr
EventQueue * curEventQueue()
Definition eventq.hh:91
constexpr decltype(nullptr) NoFault
Definition types.hh:253
statistics::Scalar numExitSignal
Definition base.hh:812
StatGroup(statistics::Group *parent)
Definition base.cc:281
statistics::Scalar numIO
Definition base.hh:815
statistics::Scalar numHypercalls
Definition base.hh:818
statistics::Scalar numHalt
Definition base.hh:816
statistics::Scalar numVMHalfEntries
Definition base.hh:811
statistics::Scalar numMMIO
Definition base.hh:813
statistics::Scalar numCoalescedMMIO
Definition base.hh:814
statistics::Scalar numInterrupts
Definition base.hh:817
statistics::Scalar numVMExits
Definition base.hh:810

Generated on Mon Oct 27 2025 04:13:00 for gem5 by doxygen 1.14.0