gem5 v24.0.0.0
Loading...
Searching...
No Matches
base.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2012, 2015, 2017, 2021 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/kvm/base.hh"
39
40#include <linux/kvm.h>
41#include <sys/ioctl.h>
42#include <sys/mman.h>
43#include <unistd.h>
44
45#include <cerrno>
46#include <csignal>
47#include <ostream>
48
49#include "base/compiler.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/Kvm.hh"
53#include "debug/KvmIO.hh"
54#include "debug/KvmRun.hh"
55#include "params/BaseKvmCPU.hh"
56#include "sim/process.hh"
57#include "sim/system.hh"
58
59/* Used by some KVM macros */
60#define PAGE_SIZE pageSize
61
62namespace gem5
63{
64
65BaseKvmCPU::BaseKvmCPU(const BaseKvmCPUParams &params)
66 : BaseCPU(params),
67 vm(nullptr),
68 _status(Idle),
69 dataPort(name() + ".dcache_port", this),
70 instPort(name() + ".icache_port", this),
71 alwaysSyncTC(params.alwaysSyncTC),
72 threadContextDirty(true),
73 kvmStateDirty(false),
74 usePerf(params.usePerf),
75 vcpuID(-1), vcpuFD(-1), vcpuMMapSize(0),
76 _kvmRun(NULL), mmioRing(NULL),
77 pageSize(sysconf(_SC_PAGE_SIZE)),
78 tickEvent([this]{ tick(); }, "BaseKvmCPU tick",
79 false, Event::CPU_Tick_Pri),
80 activeInstPeriod(0),
81 hwCycles(nullptr),
82 hwInstructions(nullptr),
83 perfControlledByTimer(params.usePerfOverflow),
84 hostFactor(params.hostFactor), stats(this),
85 ctrInsts(0)
86{
87 if (pageSize == -1)
88 panic("KVM: Failed to determine host page size (%i)\n",
89 errno);
90
91 if (FullSystem)
92 thread = new SimpleThread(this, 0, params.system, params.mmu,
93 params.isa[0], params.decoder[0]);
94 else
95 thread = new SimpleThread(this, /* thread_num */ 0, params.system,
96 params.workload[0], params.mmu,
97 params.isa[0], params.decoder[0]);
98
99 thread->setStatus(ThreadContext::Halted);
100 tc = thread->getTC();
101 threadContexts.push_back(tc);
102
103 if ((!usePerf) && perfControlledByTimer) {
104 panic("KVM: invalid combination of parameters: cannot use "
105 "perfControlledByTimer without usePerf\n");
106 }
107
108 // If we use perf, we create new PerfKVMCounters
109 if (usePerf) {
110 hwCycles = std::unique_ptr<PerfKvmCounter>(new PerfKvmCounter());
111 hwInstructions = std::unique_ptr<PerfKvmCounter>(new PerfKvmCounter());
112 } else {
113 inform("Using KVM CPU without perf. The stats related to the number "
114 "of cycles and instructions executed by the KVM CPU will not "
115 "be updated. The stats should not be used for performance "
116 "evaluation.");
117 }
118}
119
121{
122 if (_kvmRun)
123 munmap(_kvmRun, vcpuMMapSize);
124 close(vcpuFD);
125}
126
127void
129{
130 vm = system->getKvmVM();
131 vcpuID = vm->allocVCPUID();
133 fatal_if(numThreads != 1, "KVM: Multithreading not supported");
134}
135
136void
138{
139 const BaseKvmCPUParams &p =
140 dynamic_cast<const BaseKvmCPUParams &>(params());
141
142 Kvm &kvm = *vm->kvm;
143
145
146 assert(vcpuFD == -1);
147
148 // Tell the VM that a CPU is about to start.
149 vm->cpuStartup();
150
151 // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are
152 // not guaranteed that the parent KVM VM has initialized at that
153 // point. Initialize virtual CPUs here instead.
155
156 // Map the KVM run structure
158 _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize,
159 PROT_READ | PROT_WRITE, MAP_SHARED,
160 vcpuFD, 0);
161 if (_kvmRun == MAP_FAILED)
162 panic("KVM: Failed to map run data structure\n");
163
164 // Setup a pointer to the MMIO ring buffer if coalesced MMIO is
165 // available. The offset into the KVM's communication page is
166 // provided by the coalesced MMIO capability.
167 int mmioOffset(kvm.capCoalescedMMIO());
168 if (!p.useCoalescedMMIO) {
169 inform("KVM: Coalesced MMIO disabled by config.\n");
170 } else if (mmioOffset) {
171 inform("KVM: Coalesced IO available\n");
172 mmioRing = (struct kvm_coalesced_mmio_ring *)(
173 (char *)_kvmRun + (mmioOffset * pageSize));
174 } else {
175 inform("KVM: Coalesced not supported by host OS\n");
176 }
177
180 }, name(), true), curTick());
181}
182
189
190Tick
192{
193 if (cpu->system->isAtomicMode()) {
194 Tick delay = sendAtomic(pkt);
195 delete pkt;
196 return delay;
197 } else {
198 if (pendingMMIOPkts.empty() && sendTimingReq(pkt)) {
199 activeMMIOReqs++;
200 } else {
201 pendingMMIOPkts.push(pkt);
202 }
203 // Return value is irrelevant for timing-mode accesses.
204 return 0;
205 }
206}
207
208bool
210{
211 DPRINTF(KvmIO, "KVM: Finished timing request\n");
212
213 delete pkt;
214 activeMMIOReqs--;
215
216 // We can switch back into KVM when all pending and in-flight MMIO
217 // operations have completed.
218 if (!(activeMMIOReqs || pendingMMIOPkts.size())) {
219 DPRINTF(KvmIO, "KVM: Finished all outstanding timing requests\n");
220 cpu->finishMMIOPending();
221 }
222 return true;
223}
224
225void
227{
228 DPRINTF(KvmIO, "KVM: Retry for timing request\n");
229
230 assert(pendingMMIOPkts.size());
231
232 // Assuming that we can issue infinite requests this cycle is a bit
233 // unrealistic, but it's not worth modeling something more complex in
234 // KVM.
235 while (pendingMMIOPkts.size() && sendTimingReq(pendingMMIOPkts.front())) {
236 pendingMMIOPkts.pop();
237 activeMMIOReqs++;
238 }
239}
240
241void
250
251void
253{
254 // Do thread-specific initialization. We need to setup signal
255 // delivery for counters and timers from within the thread that
256 // will execute the event queue to ensure that signals are
257 // delivered to the right threads.
258 const BaseKvmCPUParams &p =
259 dynamic_cast<const BaseKvmCPUParams &>(params());
260
261 vcpuThread = pthread_self();
262
263 // Setup signal handlers. This has to be done after the vCPU is
264 // created since it manipulates the vCPU signal mask.
266
268
269 if (p.usePerfOverflow) {
270 runTimer.reset(new PerfKvmTimer(*hwCycles,
272 p.hostFactor,
273 p.hostFreq));
274 } else {
275 runTimer.reset(new PosixKvmTimer(KVM_KICK_SIGNAL, CLOCK_MONOTONIC,
276 p.hostFactor,
277 p.hostFreq));
278 }
279}
280
282 : statistics::Group(parent),
283 ADD_STAT(numVMExits, statistics::units::Count::get(),
284 "total number of KVM exits"),
285 ADD_STAT(numVMHalfEntries, statistics::units::Count::get(),
286 "number of KVM entries to finalize pending operations"),
287 ADD_STAT(numExitSignal, statistics::units::Count::get(),
288 "exits due to signal delivery"),
289 ADD_STAT(numMMIO, statistics::units::Count::get(),
290 "number of VM exits due to memory mapped IO"),
291 ADD_STAT(numCoalescedMMIO, statistics::units::Count::get(),
292 "number of coalesced memory mapped IO requests"),
293 ADD_STAT(numIO, statistics::units::Count::get(),
294 "number of VM exits due to legacy IO"),
295 ADD_STAT(numHalt, statistics::units::Count::get(),
296 "number of VM exits due to wait for interrupt instructions"),
297 ADD_STAT(numInterrupts, statistics::units::Count::get(),
298 "number of interrupts delivered"),
299 ADD_STAT(numHypercalls, statistics::units::Count::get(), "number of hypercalls")
300{
301}
302
303void
305{
306 if (debug::Checkpoint) {
307 DPRINTF(Checkpoint, "KVM: Serializing thread %i:\n", tid);
308 dump();
309 }
310
311 assert(tid == 0);
312 assert(_status == Idle);
313 thread->serialize(cp);
314}
315
316void
318{
319 DPRINTF(Checkpoint, "KVM: Unserialize thread %i:\n", tid);
320
321 assert(tid == 0);
322 assert(_status == Idle);
323 thread->unserialize(cp);
324 threadContextDirty = true;
325}
326
329{
330 if (switchedOut())
331 return DrainState::Drained;
332
333 DPRINTF(Drain, "BaseKvmCPU::drain\n");
334
335 // The event queue won't be locked when calling drain since that's
336 // not done from an event. Lock the event queue here to make sure
337 // that scoped migrations continue to work if we need to
338 // synchronize the thread context.
339 std::lock_guard<EventQueue> lock(*this->eventQueue());
340
341 switch (_status) {
342 case Running:
343 // The base KVM code is normally ready when it is in the
344 // Running state, but the architecture specific code might be
345 // of a different opinion. This may happen when the CPU been
346 // notified of an event that hasn't been accepted by the vCPU
347 // yet.
348 if (!archIsDrained())
350
351 // The state of the CPU is consistent, so we don't need to do
352 // anything special to drain it. We simply de-schedule the
353 // tick event and enter the Idle state to prevent nasty things
354 // like MMIOs from happening.
355 if (tickEvent.scheduled())
357 _status = Idle;
358
359 [[fallthrough]];
360 case Idle:
361 // Idle, no need to drain
362 assert(!tickEvent.scheduled());
363
364 // Sync the thread context here since we'll need it when we
365 // switch CPUs or checkpoint the CPU.
367
368 return DrainState::Drained;
369
371 // The CPU has just requested a service that was handled in
372 // the RunningService state, but the results have still not
373 // been reported to the CPU. Now, we /could/ probably just
374 // update the register state ourselves instead of letting KVM
375 // handle it, but that would be tricky. Instead, we enter KVM
376 // and let it do its stuff.
377 DPRINTF(Drain, "KVM CPU is waiting for service completion, "
378 "requesting drain.\n");
380
382 // We need to drain since there are in-flight timing accesses
383 DPRINTF(Drain, "KVM CPU is waiting for timing accesses to complete, "
384 "requesting drain.\n");
386
387 case RunningService:
388 // We need to drain since the CPU is waiting for service (e.g., MMIOs)
389 DPRINTF(Drain, "KVM CPU is waiting for service, requesting drain.\n");
391
392 default:
393 panic("KVM: Unhandled CPU state in drain()\n");
394 return DrainState::Drained;
395 }
396}
397
398void
400{
401 assert(!tickEvent.scheduled());
402
403 // We might have been switched out. In that case, we don't need to
404 // do anything.
405 if (switchedOut())
406 return;
407
408 DPRINTF(Kvm, "drainResume\n");
410
411 /* The simulator may have terminated the threads servicing event
412 * queues. In that case, we need to re-initialize the new
413 * threads. */
416 }, name(), true), curTick());
417
418 // The tick event is de-scheduled as a part of the draining
419 // process. Re-schedule it if the thread context is active.
420 if (tc->status() == ThreadContext::Active) {
423 } else {
424 _status = Idle;
425 }
426}
427
428void
430{
431 // We should have drained prior to forking, which means that the
432 // tick event shouldn't be scheduled and the CPU is idle.
433 assert(!tickEvent.scheduled());
434 assert(_status == Idle);
435
436 if (vcpuFD != -1) {
437 if (close(vcpuFD) == -1)
438 warn("kvm CPU: notifyFork failed to close vcpuFD\n");
439
440 if (_kvmRun)
441 munmap(_kvmRun, vcpuMMapSize);
442
443 vcpuFD = -1;
444 _kvmRun = NULL;
445
446 if (usePerf) {
447 hwInstructions->detach();
448 hwCycles->detach();
449 }
450 }
451}
452
453void
455{
456 DPRINTF(Kvm, "switchOut\n");
457
459
460 // We should have drained prior to executing a switchOut, which
461 // means that the tick event shouldn't be scheduled and the CPU is
462 // idle.
463 assert(!tickEvent.scheduled());
464 assert(_status == Idle);
465}
466
467void
469{
470 DPRINTF(Kvm, "takeOverFrom\n");
471
473
474 // We should have drained prior to executing a switchOut, which
475 // means that the tick event shouldn't be scheduled and the CPU is
476 // idle.
477 assert(!tickEvent.scheduled());
478 assert(_status == Idle);
479 assert(threadContexts.size() == 1);
480
481 // Force an update of the KVM state here instead of flagging the
482 // TC as dirty. This is not ideal from a performance point of
483 // view, but it makes debugging easier as it allows meaningful KVM
484 // state to be dumped before and after a takeover.
486 threadContextDirty = false;
487}
488
489void
491{
492 if (!(system->bypassCaches())) {
493 fatal("The KVM-based CPUs requires the memory system to be in the "
494 "'noncaching' mode.\n");
495 }
496}
497
498void
500{
501 DPRINTF(Kvm, "wakeup()\n");
502 // This method might have been called from another
503 // context. Migrate to this SimObject's event queue when
504 // delivering the wakeup signal.
506
507 // Kick the vCPU to get it to come out of KVM.
508 kick();
509
511 return;
512
513 thread->activate();
514}
515
516void
518{
519 DPRINTF(Kvm, "ActivateContext %d\n", thread_num);
520
521 assert(thread_num == 0);
522 assert(thread);
523
524 assert(_status == Idle);
525 assert(!tickEvent.scheduled());
526
529
532}
533
534
535void
537{
538 DPRINTF(Kvm, "SuspendContext %d\n", thread_num);
539
540 assert(thread_num == 0);
541 assert(thread);
542
543 if (_status == Idle)
544 return;
545
547
548 // The tick event may no be scheduled if the quest has requested
549 // the monitor to wait for interrupts. The normal CPU models can
550 // get their tick events descheduled by quiesce instructions, but
551 // that can't happen here.
552 if (tickEvent.scheduled())
554
555 _status = Idle;
556}
557
558void
560{
561 // for now, these are equivalent
562 suspendContext(thread_num);
563}
564
565void
567{
568 // for now, these are equivalent
569 suspendContext(thread_num);
571}
572
575{
576 assert(tn == 0);
578 return tc;
579}
580
581
584{
585 return ctrInsts;
586}
587
590{
591 hack_once("Pretending totalOps is equivalent to totalInsts()\n");
592 return ctrInsts;
593}
594
595void
597{
598 inform("State dumping not implemented.");
599}
600
601void
603{
604 Tick delay(0);
605 assert(_status != Idle && _status != RunningMMIOPending);
606
607 switch (_status) {
608 case RunningService:
609 // handleKvmExit() will determine the next state of the CPU
610 delay = handleKvmExit();
611
612 if (tryDrain())
613 _status = Idle;
614 break;
615
617 case Running: {
618 auto &queue = thread->comInstEventQueue;
619 const uint64_t nextInstEvent(
620 queue.empty() ? MaxTick : queue.nextTick());
621 // Enter into KVM and complete pending IO instructions if we
622 // have an instruction event pending.
623 const Tick ticksToExecute(
624 nextInstEvent > ctrInsts ?
625 curEventQueue()->nextTick() - curTick() : 0);
626
627 if (alwaysSyncTC)
628 threadContextDirty = true;
629
630 // We might need to update the KVM state.
631 syncKvmState();
632
633 // Setup any pending instruction count breakpoints using
634 // PerfEvent if we are going to execute more than just an IO
635 // completion.
636 if (ticksToExecute > 0)
638
639 DPRINTF(KvmRun, "Entering KVM...\n");
641 // Force an immediate exit from KVM after completing
642 // pending operations. The architecture-specific code
643 // takes care to run until it is in a state where it can
644 // safely be drained.
645 delay = kvmRunDrain();
646 } else {
647 delay = kvmRun(ticksToExecute);
648 }
649
650 // The CPU might have been suspended before entering into
651 // KVM. Assume that the CPU was suspended /before/ entering
652 // into KVM and skip the exit handling.
653 if (_status == Idle)
654 break;
655
656 // Entering into KVM implies that we'll have to reload the thread
657 // context from KVM if we want to access it. Flag the KVM state as
658 // dirty with respect to the cached thread context.
659 kvmStateDirty = true;
660
661 if (alwaysSyncTC)
663
664 // Enter into the RunningService state unless the
665 // simulation was stopped by a timer.
666 if (_kvmRun->exit_reason != KVM_EXIT_INTR) {
668 } else {
671 }
672
673 // Service any pending instruction events. The vCPU should
674 // have exited in time for the event using the instruction
675 // counter configured by setupInstStop().
676 queue.serviceEvents(ctrInsts);
677
678 if (tryDrain())
679 _status = Idle;
680 } break;
681
682 default:
683 panic("BaseKvmCPU entered tick() in an illegal state (%i)\n",
684 _status);
685 }
686
687 // Schedule a new tick if we are still running
688 if (_status != Idle && _status != RunningMMIOPending) {
689 if (_kvmRun->exit_reason == KVM_EXIT_INTR && runTimer->expired())
691 curEventQueue()->nextTick() - curTick() + 1)));
692 else
694 }
695}
696
697Tick
699{
700 // By default, the only thing we need to drain is a pending IO
701 // operation which assumes that we are in the
702 // RunningServiceCompletion or RunningMMIOPending state.
705
706 // Deliver the data from the pending IO operation and immediately
707 // exit.
708 return kvmRun(0);
709}
710
711uint64_t
713{
714 if (usePerf)
715 return hwCycles->read();
716 return 0;
717}
718
719Tick
721{
722 Tick ticksExecuted;
723 fatal_if(vcpuFD == -1,
724 "Trying to run a KVM CPU in a forked child process. "
725 "This is not supported.\n");
726 DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks);
727
728 if (ticks == 0) {
729 // Settings ticks == 0 is a special case which causes an entry
730 // into KVM that finishes pending operations (e.g., IO) and
731 // then immediately exits.
732 DPRINTF(KvmRun, "KVM: Delivering IO without full guest entry\n");
733
735
736 // Send a KVM_KICK_SIGNAL to the vCPU thread (i.e., this
737 // thread). The KVM control signal is masked while executing
738 // in gem5 and gets unmasked temporarily as when entering
739 // KVM. See setSignalMask() and setupSignalHandler().
740 kick();
741
742 // Start the vCPU. KVM will check for signals after completing
743 // pending operations (IO). Since the KVM_KICK_SIGNAL is
744 // pending, this forces an immediate exit to gem5 again. We
745 // don't bother to setup timers since this shouldn't actually
746 // execute any code (other than completing half-executed IO
747 // instructions) in the guest.
748 ioctlRun();
749
750 // We always execute at least one cycle to prevent the
751 // BaseKvmCPU::tick() to be rescheduled on the same tick
752 // twice.
753 ticksExecuted = clockPeriod();
754 } else {
755 // This method is executed as a result of a tick event. That
756 // means that the event queue will be locked when entering the
757 // method. We temporarily unlock the event queue to allow
758 // other threads to steal control of this thread to inject
759 // interrupts. They will typically lock the queue and then
760 // force an exit from KVM by kicking the vCPU.
762
763 if (ticks < runTimer->resolution()) {
764 DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
765 ticks, runTimer->resolution());
766 ticks = runTimer->resolution();
767 }
768
769 // Get hardware statistics after synchronizing contexts. The KVM
770 // state update might affect guest cycle counters.
771 uint64_t baseCycles(getHostCycles());
772 uint64_t baseInstrs = 0;
773 if (usePerf) {
774 baseInstrs = hwInstructions->read();
775 }
776
777 // Arm the run timer and start the cycle timer if it isn't
778 // controlled by the overflow timer. Starting/stopping the cycle
779 // timer automatically starts the other perf timers as they are in
780 // the same counter group.
781 runTimer->arm(ticks);
782 if (usePerf && (!perfControlledByTimer)) {
783 hwCycles->start();
784 }
785
786 ioctlRun();
787
788 runTimer->disarm();
789 if (usePerf && (!perfControlledByTimer)) {
790 hwCycles->stop();
791 }
792
793 // The control signal may have been delivered after we exited
794 // from KVM. It will be pending in that case since it is
795 // masked when we aren't executing in KVM. Discard it to make
796 // sure we don't deliver it immediately next time we try to
797 // enter into KVM.
799
800 const uint64_t hostCyclesExecuted(getHostCycles() - baseCycles);
801 const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor);
802 uint64_t instsExecuted = 0;
803 if (usePerf) {
804 instsExecuted = hwInstructions->read() - baseInstrs;
805 }
806 ticksExecuted = runTimer->ticksFromHostCycles(hostCyclesExecuted);
807
808 /* Update statistics */
809 baseStats.numCycles += simCyclesExecuted;;
810 commitStats[thread->threadId()]->numInsts += instsExecuted;
811 baseStats.numInsts += instsExecuted;
812 ctrInsts += instsExecuted;
813
814 DPRINTF(KvmRun,
815 "KVM: Executed %i instructions in %i cycles "
816 "(%i ticks, sim cycles: %i).\n",
817 instsExecuted, hostCyclesExecuted, ticksExecuted, simCyclesExecuted);
818 }
819
821
822 return ticksExecuted + flushCoalescedMMIO();
823}
824
825void
827{
829 if (ioctl(KVM_NMI) == -1)
830 panic("KVM: Failed to deliver NMI to virtual CPU\n");
831}
832
833void
834BaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt)
835{
837 if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1)
838 panic("KVM: Failed to deliver interrupt to virtual CPU\n");
839}
840
841void
842BaseKvmCPU::getRegisters(struct kvm_regs &regs) const
843{
844 if (ioctl(KVM_GET_REGS, &regs) == -1)
845 panic("KVM: Failed to get guest registers\n");
846}
847
848void
849BaseKvmCPU::setRegisters(const struct kvm_regs &regs)
850{
851 if (ioctl(KVM_SET_REGS, (void *)&regs) == -1)
852 panic("KVM: Failed to set guest registers\n");
853}
854
855void
856BaseKvmCPU::getSpecialRegisters(struct kvm_sregs &regs) const
857{
858 if (ioctl(KVM_GET_SREGS, &regs) == -1)
859 panic("KVM: Failed to get guest special registers\n");
860}
861
862void
863BaseKvmCPU::setSpecialRegisters(const struct kvm_sregs &regs)
864{
865 if (ioctl(KVM_SET_SREGS, (void *)&regs) == -1)
866 panic("KVM: Failed to set guest special registers\n");
867}
868
869void
870BaseKvmCPU::getFPUState(struct kvm_fpu &state) const
871{
872 if (ioctl(KVM_GET_FPU, &state) == -1)
873 panic("KVM: Failed to get guest FPU state\n");
874}
875
876void
877BaseKvmCPU::setFPUState(const struct kvm_fpu &state)
878{
879 if (ioctl(KVM_SET_FPU, (void *)&state) == -1)
880 panic("KVM: Failed to set guest FPU state\n");
881}
882
883
884void
885BaseKvmCPU::setOneReg(uint64_t id, const void *addr)
886{
887#ifdef KVM_SET_ONE_REG
888 struct kvm_one_reg reg;
889 reg.id = id;
890 reg.addr = (uint64_t)addr;
891
892 if (ioctl(KVM_SET_ONE_REG, &reg) == -1) {
893 panic("KVM: Failed to set register (0x%x) value (errno: %i)\n",
894 id, errno);
895 }
896#else
897 panic("KVM_SET_ONE_REG is unsupported on this platform.\n");
898#endif
899}
900
901void
902BaseKvmCPU::getOneReg(uint64_t id, void *addr) const
903{
904#ifdef KVM_GET_ONE_REG
905 struct kvm_one_reg reg;
906 reg.id = id;
907 reg.addr = (uint64_t)addr;
908
909 if (ioctl(KVM_GET_ONE_REG, &reg) == -1) {
910 panic("KVM: Failed to get register (0x%x) value (errno: %i)\n",
911 id, errno);
912 }
913#else
914 panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
915#endif
916}
917
918std::string
920{
921#ifdef KVM_GET_ONE_REG
922 std::ostringstream ss;
923
924 ss.setf(std::ios::hex, std::ios::basefield);
925 ss.setf(std::ios::showbase);
926#define HANDLE_INTTYPE(len) \
927 case KVM_REG_SIZE_U ## len: { \
928 uint ## len ## _t value; \
929 getOneReg(id, &value); \
930 ss << value; \
931 } break
932
933#define HANDLE_ARRAY(len) \
934 case KVM_REG_SIZE_U ## len: { \
935 uint8_t value[len / 8]; \
936 getOneReg(id, value); \
937 ccprintf(ss, "[0x%x", value[0]); \
938 for (int i = 1; i < len / 8; ++i) \
939 ccprintf(ss, ", 0x%x", value[i]); \
940 ccprintf(ss, "]"); \
941 } break
942
943 switch (id & KVM_REG_SIZE_MASK) {
944 HANDLE_INTTYPE(8);
945 HANDLE_INTTYPE(16);
946 HANDLE_INTTYPE(32);
947 HANDLE_INTTYPE(64);
948 HANDLE_ARRAY(128);
949 HANDLE_ARRAY(256);
950 HANDLE_ARRAY(512);
951 HANDLE_ARRAY(1024);
952 default:
953 ss << "??";
954 }
955
956#undef HANDLE_INTTYPE
957#undef HANDLE_ARRAY
958
959 return ss.str();
960#else
961 panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
962#endif
963}
964
965void
967{
968 if (!kvmStateDirty)
969 return;
970
971 assert(!threadContextDirty);
972
974 kvmStateDirty = false;
975}
976
977void
979{
981 return;
982
983 assert(!kvmStateDirty);
984
986 threadContextDirty = false;
987}
988
989Tick
991{
992 DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason);
993 assert(_status == RunningService);
994
995 // Switch into the running state by default. Individual handlers
996 // can override this.
998 switch (_kvmRun->exit_reason) {
999 case KVM_EXIT_UNKNOWN:
1000 return handleKvmExitUnknown();
1001
1002 case KVM_EXIT_EXCEPTION:
1003 return handleKvmExitException();
1004
1005 case KVM_EXIT_IO:
1006 {
1007 ++stats.numIO;
1008 Tick ticks = handleKvmExitIO();
1010 return ticks;
1011 }
1012
1013 case KVM_EXIT_HYPERCALL:
1015 return handleKvmExitHypercall();
1016
1017 case KVM_EXIT_HLT:
1018 /* The guest has halted and is waiting for interrupts */
1019 DPRINTF(Kvm, "handleKvmExitHalt\n");
1020 ++stats.numHalt;
1021
1022 // Suspend the thread until the next interrupt arrives
1023 thread->suspend();
1024
1025 // This is actually ignored since the thread is suspended.
1026 return 0;
1027
1028 case KVM_EXIT_MMIO:
1029 {
1030 /* Service memory mapped IO requests */
1031 DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n",
1032 _kvmRun->mmio.is_write,
1033 _kvmRun->mmio.phys_addr, _kvmRun->mmio.len);
1034
1035 ++stats.numMMIO;
1036 Tick ticks = doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data,
1037 _kvmRun->mmio.len, _kvmRun->mmio.is_write);
1038 // doMMIOAccess could have triggered a suspend, in which case we don't
1039 // want to overwrite the _status.
1040 if (_status != Idle)
1042 return ticks;
1043 }
1044
1045 case KVM_EXIT_IRQ_WINDOW_OPEN:
1047
1048 case KVM_EXIT_FAIL_ENTRY:
1049 return handleKvmExitFailEntry();
1050
1051 case KVM_EXIT_INTR:
1052 /* KVM was interrupted by a signal, restart it in the next
1053 * tick. */
1054 return 0;
1055
1056 case KVM_EXIT_INTERNAL_ERROR:
1057 panic("KVM: Internal error (suberror: %u)\n",
1058 _kvmRun->internal.suberror);
1059
1060 default:
1061 dump();
1062 panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason);
1063 }
1064}
1065
1066Tick
1068{
1069 panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n",
1070 _kvmRun->io.direction, _kvmRun->io.size,
1071 _kvmRun->io.port, _kvmRun->io.count);
1072}
1073
1074Tick
1076{
1077 panic("KVM: Unhandled hypercall\n");
1078}
1079
1080Tick
1082{
1083 warn("KVM: Unhandled IRQ window.\n");
1084 return 0;
1085}
1086
1087
1088Tick
1090{
1091 dump();
1092 panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n",
1093 _kvmRun->hw.hardware_exit_reason);
1094}
1095
1096Tick
1098{
1099 dump();
1100 panic("KVM: Got exception when starting vCPU "
1101 "(exception: %u, error_code: %u)\n",
1102 _kvmRun->ex.exception, _kvmRun->ex.error_code);
1103}
1104
1105Tick
1107{
1108 dump();
1109 panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n",
1110 _kvmRun->fail_entry.hardware_entry_failure_reason);
1111}
1112
1113Tick
1114BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
1115{
1118
1119 RequestPtr mmio_req = std::make_shared<Request>(
1120 paddr, size, Request::UNCACHEABLE, dataRequestorId());
1121
1122 mmio_req->setContext(tc->contextId());
1123 // Some architectures do need to massage physical addresses a bit
1124 // before they are inserted into the memory system. This enables
1125 // APIC accesses on x86 and m5ops where supported through a MMIO
1126 // interface.
1127 BaseMMU::Mode access_type(write ? BaseMMU::Write : BaseMMU::Read);
1128 Fault fault(tc->getMMUPtr()->finalizePhysical(mmio_req, tc, access_type));
1129 if (fault != NoFault)
1130 warn("Finalization of MMIO address failed: %s\n", fault->name());
1131
1132
1133 const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq);
1134 PacketPtr pkt = new Packet(mmio_req, cmd);
1135 pkt->dataStatic(data);
1136
1137 if (mmio_req->isLocalAccess()) {
1138 // Since the PC has already been advanced by KVM, set the next
1139 // PC to the current PC. KVM doesn't use that value, and that
1140 // way any gem5 op or syscall which needs to know what the next
1141 // PC is will be able to get a reasonable value.
1142 //
1143 // We won't be able to rewind the current PC to the "correct"
1144 // value without figuring out how big the current instruction
1145 // is, and that's probably not worth the effort
1146 std::unique_ptr<PCStateBase> pc(tc->pcState().clone());
1147 stutterPC(*pc);
1148 tc->pcState(*pc);
1149 // We currently assume that there is no need to migrate to a
1150 // different event queue when doing local accesses. Currently, they
1151 // are only used for m5ops, so it should be a valid assumption.
1152 const Cycles ipr_delay = mmio_req->localAccessor(tc, pkt);
1153 threadContextDirty = true;
1154 delete pkt;
1155 return clockPeriod() * ipr_delay;
1156 } else {
1157 // Temporarily lock and migrate to the device event queue to
1158 // prevent races in multi-core mode.
1160
1161 return dataPort.submitIO(pkt);
1162 }
1163}
1164
1165void
1167{
1168 std::unique_ptr<struct kvm_signal_mask, void(*)(void *p)>
1169 kvm_mask(nullptr, [](void *p) { operator delete(p); });
1170
1171 if (mask) {
1172 kvm_mask.reset((struct kvm_signal_mask *)operator new(
1173 sizeof(struct kvm_signal_mask) + sizeof(*mask)));
1174 // The kernel and the user-space headers have different ideas
1175 // about the size of sigset_t. This seems like a massive hack,
1176 // but is actually what qemu does.
1177 assert(sizeof(*mask) >= 8);
1178 kvm_mask->len = 8;
1179 memcpy(kvm_mask->sigset, mask, kvm_mask->len);
1180 }
1181
1182 if (ioctl(KVM_SET_SIGNAL_MASK, (void *)kvm_mask.get()) == -1)
1183 panic("KVM: Failed to set vCPU signal mask (errno: %i)\n",
1184 errno);
1185}
1186
1187int
1188BaseKvmCPU::ioctl(int request, long p1) const
1189{
1190 if (vcpuFD == -1)
1191 panic("KVM: CPU ioctl called before initialization\n");
1192
1193 return ::ioctl(vcpuFD, request, p1);
1194}
1195
1196Tick
1198{
1199 if (!mmioRing)
1200 return 0;
1201
1202 DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n");
1203
1204 // TODO: We might need to do synchronization when we start to
1205 // support multiple CPUs
1206 Tick ticks(0);
1207 while (mmioRing->first != mmioRing->last) {
1208 struct kvm_coalesced_mmio &ent(
1209 mmioRing->coalesced_mmio[mmioRing->first]);
1210
1211 DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n",
1212 ent.phys_addr, ent.len);
1213
1215 ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true);
1216
1217 mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX;
1218 }
1219
1220 return ticks;
1221}
1222
1233static void
1234onKickSignal(int signo, siginfo_t *si, void *data)
1235{
1236}
1237
1238void
1240{
1241 struct sigaction sa;
1242
1243 memset(&sa, 0, sizeof(sa));
1244 sa.sa_sigaction = onKickSignal;
1245 sa.sa_flags = SA_SIGINFO | SA_RESTART;
1246 if (sigaction(KVM_KICK_SIGNAL, &sa, NULL) == -1)
1247 panic("KVM: Failed to setup vCPU timer signal handler\n");
1248
1249 sigset_t sigset;
1250 if (pthread_sigmask(SIG_BLOCK, NULL, &sigset) == -1)
1251 panic("KVM: Failed get signal mask\n");
1252
1253 // Request KVM to setup the same signal mask as we're currently
1254 // running with except for the KVM control signal. We'll sometimes
1255 // need to raise the KVM_KICK_SIGNAL to cause immediate exits from
1256 // KVM after servicing IO requests. See kvmRun().
1257 sigdelset(&sigset, KVM_KICK_SIGNAL);
1258 setSignalMask(&sigset);
1259
1260 // Mask our control signals so they aren't delivered unless we're
1261 // actually executing inside KVM.
1262 sigaddset(&sigset, KVM_KICK_SIGNAL);
1263 if (pthread_sigmask(SIG_SETMASK, &sigset, NULL) == -1)
1264 panic("KVM: Failed mask the KVM control signals\n");
1265}
1266
1267bool
1269{
1270 int discardedSignal;
1271
1272 // Setting the timeout to zero causes sigtimedwait to return
1273 // immediately.
1274 struct timespec timeout;
1275 timeout.tv_sec = 0;
1276 timeout.tv_nsec = 0;
1277
1278 sigset_t sigset;
1279 sigemptyset(&sigset);
1280 sigaddset(&sigset, signum);
1281
1282 do {
1283 discardedSignal = sigtimedwait(&sigset, NULL, &timeout);
1284 } while (discardedSignal == -1 && errno == EINTR);
1285
1286 if (discardedSignal == signum)
1287 return true;
1288 else if (discardedSignal == -1 && errno == EAGAIN)
1289 return false;
1290 else
1291 panic("Unexpected return value from sigtimedwait: %i (errno: %i)\n",
1292 discardedSignal, errno);
1293}
1294
1295void
1297{
1298 DPRINTF(Kvm, "Attaching cycle counter...\n");
1299 PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE,
1300 PERF_COUNT_HW_CPU_CYCLES);
1301 cfgCycles.disabled(true)
1302 .pinned(true);
1303
1304 // Try to exclude the host. We set both exclude_hv and
1305 // exclude_host since different architectures use slightly
1306 // different APIs in the kernel.
1307 cfgCycles.exclude_hv(true)
1308 .exclude_host(true);
1309
1311 // We need to configure the cycles counter to send overflows
1312 // since we are going to use it to trigger timer signals that
1313 // trap back into m5 from KVM. In practice, this means that we
1314 // need to set some non-zero sample period that gets
1315 // overridden when the timer is armed.
1316 cfgCycles.wakeupEvents(1)
1317 .samplePeriod(42);
1318 }
1319
1320 // We might be re-attaching counters due threads being
1321 // re-initialised after fork.
1322 if (usePerf) {
1323 if (hwCycles->attached()) {
1324 hwCycles->detach();
1325 }
1326
1327 hwCycles->attach(cfgCycles, 0); // TID (0 => currentThread)
1329 }
1330}
1331
1332bool
1334{
1336 return false;
1337
1338 if (!archIsDrained()) {
1339 DPRINTF(Drain, "tryDrain: Architecture code is not ready.\n");
1340 return false;
1341 }
1342
1343 if (_status == Idle || _status == Running) {
1344 DPRINTF(Drain,
1345 "tryDrain: CPU transitioned into the Idle state, drain done\n");
1347 return true;
1348 } else {
1349 DPRINTF(Drain, "tryDrain: CPU not ready.\n");
1350 return false;
1351 }
1352}
1353
1354void
1356{
1357 if (ioctl(KVM_RUN) == -1) {
1358 if (errno != EINTR)
1359 panic("KVM: Failed to start virtual CPU (errno: %i)\n",
1360 errno);
1361 }
1362}
1363
1364void
1366{
1369 } else {
1371 assert(next > ctrInsts);
1372 setupInstCounter(next - ctrInsts);
1373 }
1374}
1375
1376void
1378{
1379 // This function is for setting up instruction counter using perf
1380 if (!usePerf) {
1381 return;
1382 }
1383
1384 // No need to do anything if we aren't attaching for the first
1385 // time or the period isn't changing.
1386 if (period == activeInstPeriod && hwInstructions->attached()) {
1387 return;
1388 }
1389
1390 PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE,
1391 PERF_COUNT_HW_INSTRUCTIONS);
1392
1393 // Try to exclude the host. We set both exclude_hv and
1394 // exclude_host since different architectures use slightly
1395 // different APIs in the kernel.
1396 cfgInstructions.exclude_hv(true)
1397 .exclude_host(true);
1398
1399 if (period) {
1400 // Setup a sampling counter if that has been requested.
1401 cfgInstructions.wakeupEvents(1)
1402 .samplePeriod(period);
1403 }
1404
1405 // We need to detach and re-attach the counter to reliably change
1406 // sampling settings. See PerfKvmCounter::period() for details.
1407 if (hwInstructions->attached())
1408 hwInstructions->detach();
1409 assert(hwCycles->attached());
1410 hwInstructions->attach(cfgInstructions,
1411 0, // TID (0 => currentThread)
1412 *hwCycles);
1413
1414 if (period)
1415 hwInstructions->enableSignals(KVM_KICK_SIGNAL);
1416
1417 activeInstPeriod = period;
1418}
1419
1420} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:193
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:310
System * system
Definition base.hh:392
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition base.hh:561
@ CPU_STATE_SLEEP
Definition base.hh:552
std::vector< std::unique_ptr< CommitCPUStats > > commitStats
Definition base.hh:821
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:390
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:349
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition base.cc:588
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:602
std::vector< ThreadContext * > threadContexts
Definition base.hh:260
bool switchedOut() const
Determine if the CPU is switched out.
Definition base.hh:373
std::queue< PacketPtr > pendingMMIOPkts
Pending MMIO packets.
Definition base.hh:621
Status nextIOState() const
Returns next valid state after one or more IO accesses.
Definition base.cc:184
Tick submitIO(PacketPtr pkt)
Interface to send Atomic or Timing IO request.
Definition base.cc:191
void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition base.cc:226
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Definition base.cc:209
unsigned int activeMMIOReqs
Number of MMIO requests in flight.
Definition base.hh:624
virtual void dump() const
Dump the internal state to the terminal.
Definition base.cc:596
int vcpuFD
KVM vCPU file descriptor.
Definition base.hh:708
void finishMMIOPending()
Callback from KvmCPUPort to transition the CPU out of RunningMMIOPending when all timing requests hav...
Definition base.cc:242
Status _status
CPU run state.
Definition base.hh:240
uint64_t activeInstPeriod
Currently active instruction count breakpoint.
Definition base.hh:759
virtual void updateKvmState()=0
Update the KVM state from the current thread context.
struct kvm_coalesced_mmio_ring * mmioRing
Coalesced MMIO ring buffer.
Definition base.hh:724
virtual Tick handleKvmExitHypercall()
The guest requested a monitor service using a hypercall.
Definition base.cc:1075
long vcpuID
KVM internal ID of the vCPU.
Definition base.hh:660
void drainResume() override
Resume execution after a successful drain.
Definition base.cc:399
void getSpecialRegisters(struct kvm_sregs &regs) const
Definition base.cc:856
virtual void updateThreadContext()=0
Update the current thread context with the KVM state.
void takeOverFrom(BaseCPU *cpu) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:468
virtual uint64_t getHostCycles() const
Get the value of the hardware cycle counter in the guest.
Definition base.cc:712
Counter totalInsts() const override
Definition base.cc:583
void getOneReg(uint64_t id, void *addr) const
Definition base.cc:902
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:128
const bool alwaysSyncTC
Be conservative and always synchronize the thread context on KVM entry/exit.
Definition base.hh:642
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition base.cc:536
virtual Tick handleKvmExitIRQWindowOpen()
The guest exited because an interrupt window was requested.
Definition base.cc:1081
void setSignalMask(const sigset_t *mask)
Set the signal mask used in kvmRun()
Definition base.cc:1166
void notifyFork() override
Notify a child process of a fork.
Definition base.cc:429
int vcpuMMapSize
Size of MMAPed kvm_run area.
Definition base.hh:710
void syncKvmState()
Update the KVM if the thread context is dirty.
Definition base.cc:978
bool tryDrain()
Try to drain the CPU if a drain is pending.
Definition base.cc:1333
void setRegisters(const struct kvm_regs &regs)
Definition base.cc:849
BaseKvmCPU(const BaseKvmCPUParams &params)
Definition base.cc:65
virtual Tick handleKvmExitException()
An unhandled virtualization exception occured.
Definition base.cc:1097
void kick() const
Force an exit from KVM.
Definition base.hh:138
@ Running
Running normally.
Definition base.hh:205
@ Idle
Context not scheduled in KVM.
Definition base.hh:199
@ RunningMMIOPending
Timing MMIO request in flight or stalled.
Definition base.hh:227
@ RunningService
Requiring service at the beginning of the next cycle.
Definition base.hh:219
@ RunningServiceCompletion
Service completion in progress.
Definition base.hh:236
void deallocateContext(ThreadID thread_num)
Definition base.cc:559
void getRegisters(struct kvm_regs &regs) const
Get/Set the register state of the guest vCPU.
Definition base.cc:842
void setupSignalHandler()
Setup a signal handler to catch the timer signal used to switch back to the monitor.
Definition base.cc:1239
void setSpecialRegisters(const struct kvm_sregs &regs)
Definition base.cc:863
ThreadContext * getContext(int tn) override
Given a thread num get tho thread context for it.
Definition base.cc:574
bool usePerf
True if using perf; False otherwise.
Definition base.hh:657
virtual Tick handleKvmExitIO()
The guest performed a legacy IO request (out/inp on x86)
Definition base.cc:1067
std::string getAndFormatOneReg(uint64_t id) const
Get and format one register for printout.
Definition base.cc:919
gem5::BaseKvmCPU::StatGroup stats
virtual Tick handleKvmExit()
Main kvmRun exit handler, calls the relevant handleKvmExit* depending on exit type.
Definition base.cc:990
void syncThreadContext()
Update a thread context if the KVM state is dirty with respect to the cached thread context.
Definition base.cc:966
void unserializeThread(CheckpointIn &cp, ThreadID tid) override
Unserialize one thread.
Definition base.cc:317
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition base.cc:328
Tick doMMIOAccess(Addr paddr, void *data, int size, bool write)
Inject a memory mapped IO request into gem5.
Definition base.cc:1114
void wakeup(ThreadID tid=0) override
Definition base.cc:499
void startup() override
startup() is the final initialization call before simulation.
Definition base.cc:137
Counter totalOps() const override
Definition base.cc:589
Tick flushCoalescedMMIO()
Service MMIO requests in the mmioRing.
Definition base.cc:1197
std::unique_ptr< PerfKvmCounter > hwInstructions
Guest instruction counter.
Definition base.hh:782
EventFunctionWrapper tickEvent
Definition base.hh:728
bool perfControlledByTimer
Does the runTimer control the performance counters?
Definition base.hh:791
virtual Tick kvmRun(Tick ticks)
Request KVM to run the guest for a given number of ticks.
Definition base.cc:720
void setupInstStop()
Setup an instruction break if there is one pending.
Definition base.cc:1365
KvmVM * vm
Definition base.hh:160
bool discardPendingSignal(int signum) const
Discard a (potentially) pending signal.
Definition base.cc:1268
virtual Tick kvmRunDrain()
Request the CPU to run until draining completes.
Definition base.cc:698
virtual Tick handleKvmExitFailEntry()
KVM failed to start the virtualized CPU.
Definition base.cc:1106
void switchOut() override
Prepare for another CPU to take over execution.
Definition base.cc:454
void restartEqThread()
Thread-specific initialization.
Definition base.cc:252
void setupCounters()
Setup hardware performance counters.
Definition base.cc:1296
void serializeThread(CheckpointOut &cp, ThreadID tid) const override
Serialize a single thread.
Definition base.cc:304
void setupInstCounter(uint64_t period=0)
Setup the guest instruction counter.
Definition base.cc:1377
virtual ~BaseKvmCPU()
Definition base.cc:120
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition base.cc:517
void tick()
Execute the CPU until the next event in the main event queue or until the guest needs service from ge...
Definition base.cc:602
virtual bool archIsDrained() const
Is the architecture specific code in a state that prevents draining?
Definition base.hh:541
struct kvm_run * _kvmRun
Pointer to the kvm_run structure used to communicate parameters with KVM.
Definition base.hh:719
std::unique_ptr< PerfKvmCounter > hwCycles
Guest cycle counter.
Definition base.hh:769
void setOneReg(uint64_t id, const void *addr)
Get/Set single register using the KVM_(SET|GET)_ONE_REG API.
Definition base.cc:885
std::unique_ptr< BaseKvmTimer > runTimer
Timer used to force execution into the monitor after a specified number of simulation tick equivalent...
Definition base.hh:800
KVMCpuPort dataPort
Port for data requests.
Definition base.hh:633
SimpleThread * thread
A cached copy of a thread's state in the form of a SimpleThread object.
Definition base.hh:153
void setFPUState(const struct kvm_fpu &state)
Definition base.cc:877
virtual Tick handleKvmExitUnknown()
An unknown architecture dependent error occurred when starting the vCPU.
Definition base.cc:1089
void getFPUState(struct kvm_fpu &state) const
Get/Set the guest FPU/vector state.
Definition base.cc:870
void haltContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now halted.
Definition base.cc:566
bool kvmStateDirty
Is the KVM state dirty? Set to true to force an update of the KVM vCPU state upon the next call to kv...
Definition base.hh:654
EventQueue * deviceEventQueue()
Get a pointer to the event queue owning devices.
Definition base.hh:447
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition base.cc:490
bool threadContextDirty
Is the gem5 context dirty? Set to true to force an update of the KVM vCPU state upon the next call to...
Definition base.hh:648
const long pageSize
Cached page size of the host.
Definition base.hh:726
pthread_t vcpuThread
ID of the vCPU thread.
Definition base.hh:663
Counter ctrInsts
Number of instructions executed by the CPU.
Definition base.hh:823
float hostFactor
Host factor as specified in the configuration.
Definition base.hh:803
ThreadContext * tc
ThreadContext object, provides an interface for external objects to modify this thread's state.
Definition base.hh:158
virtual void ioctlRun()
Execute the KVM_RUN ioctl.
Definition base.cc:1355
virtual void stutterPC(PCStateBase &pc) const =0
Modify a PCStatePtr's value so that its next PC is the current PC.
virtual Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const
Definition mmu.cc:125
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
Tick nextTick() const
Definition eventq.hh:836
int createVCPU(long vcpuID)
Create a new vCPU within a VM.
Definition vm.cc:573
Kvm * kvm
Global KVM interface.
Definition vm.hh:421
long allocVCPUID()
Allocate a new vCPU ID within the VM.
Definition vm.cc:585
void cpuStartup()
VM CPU initialization code.
Definition vm.cc:358
KVM parent interface.
Definition vm.hh:81
int getVCPUMMapSize() const
Get the size of the MMAPed parameter area used to communicate vCPU parameters between the kernel and ...
Definition vm.hh:96
int capCoalescedMMIO() const
Check if coalesced MMIO is supported and which page in the MMAP'ed structure it stores requests in.
Definition vm.cc:136
virtual std::string name() const
Definition named.hh:47
virtual PCStateBase * clone() const =0
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
PerfEvent counter configuration.
Definition perfevent.hh:55
PerfKvmCounterConfig & samplePeriod(uint64_t period)
Set the initial sample period (overflow count) of an event.
Definition perfevent.hh:88
PerfKvmCounterConfig & disabled(bool val)
Don't start the performance counter automatically when attaching it.
Definition perfevent.hh:113
PerfKvmCounterConfig & exclude_host(bool val)
Exclude the events from the host (i.e., only include events from the guest system).
Definition perfevent.hh:144
PerfKvmCounterConfig & exclude_hv(bool val)
Exclude the hyper visor (i.e., only include events from the guest system).
Definition perfevent.hh:159
PerfKvmCounterConfig & pinned(bool val)
Force the group to be on the active all the time (i.e., disallow multiplexing).
Definition perfevent.hh:126
PerfKvmCounterConfig & wakeupEvents(uint32_t events)
Set the number of samples that need to be triggered before reporting data as being available on the p...
Definition perfevent.hh:101
PerfEvent based timer using the host's CPU cycle counter.
Definition timer.hh:222
Timer based on standard POSIX timers.
Definition timer.hh:188
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
int threadId() const override
void serialize(CheckpointOut &cp) const override
Serialize an object.
Status status() const override
void suspend() override
Set the status to Suspended.
void activate() override
Set the status to Active.
EventQueue comInstEventQueue
An instruction-based event queue.
void unserialize(CheckpointIn &cp) override
Unserialize an object.
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
KvmVM * getKvmVM() const
Get a pointer to the Kernel Virtual Machine (KVM) SimObject, if present.
Definition system.hh:333
bool bypassCaches() const
Should caches be bypassed?
Definition system.hh:279
ThreadContext is the external interface to all thread state for anything outside of the CPU.
@ Halted
Permanently shut down.
@ Suspended
Temporarily inactive.
virtual const PCStateBase & pcState() const =0
virtual Status status() const =0
virtual BaseMMU * getMMUPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
#define KVM_KICK_SIGNAL
Signal to use to trigger exits from KVM.
Definition base.hh:56
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
void kvmInterrupt(const struct kvm_interrupt &interrupt)
Send a normal interrupt to the guest.
Definition base.cc:834
void kvmNonMaskableInterrupt()
Send a non-maskable interrupt to the guest.
Definition base.cc:826
int ioctl(int request, long p1) const
vCPU ioctl interface.
Definition base.cc:1188
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition eventq.hh:1021
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
bool empty() const
Returns true if no events are queued.
Definition eventq.hh:891
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition eventq.hh:207
EventQueue * eventQueue() const
Definition eventq.hh:1003
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define hack_once(...)
Definition logging.hh:264
const Params & params() const
atomic_var_t state
Definition helpers.cc:211
#define warn(...)
Definition logging.hh:256
#define inform(...)
Definition logging.hh:257
Bitfield< 3 > sa
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 6 > si
Bitfield< 33 > id
Bitfield< 0 > vm
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 5 > lock
Definition types.hh:82
double Counter
All counters are of 64-bit values.
Definition types.hh:46
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
static void onKickSignal(int signo, siginfo_t *si, void *data)
Dummy handler for KVM kick signals.
Definition base.cc:1234
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
EventQueue * curEventQueue()
Definition eventq.hh:91
constexpr decltype(nullptr) NoFault
Definition types.hh:253
statistics::Scalar numInsts
Definition base.hh:637
statistics::Scalar numCycles
Definition base.hh:640
statistics::Scalar numExitSignal
Definition base.hh:812
StatGroup(statistics::Group *parent)
Definition base.cc:281
statistics::Scalar numIO
Definition base.hh:815
statistics::Scalar numHypercalls
Definition base.hh:818
statistics::Scalar numHalt
Definition base.hh:816
statistics::Scalar numVMHalfEntries
Definition base.hh:811
statistics::Scalar numMMIO
Definition base.hh:813
statistics::Scalar numCoalescedMMIO
Definition base.hh:814
statistics::Scalar numInterrupts
Definition base.hh:817
statistics::Scalar numVMExits
Definition base.hh:810
Tick lastSuspend
Last time suspend was called on this thread.
Tick lastActivate
Last time activate was called on this thread.
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:01 for gem5 by doxygen 1.11.0