0%

Principles:KVM底层原理

hypervisor 简述

Hypervisor 允许多个操作系统共享一个 CPU(多核 CPU 的情况可以是多个 CPU),用以协调多个虚拟机,由于这个原因,Hypervisor 又被认为是虚拟机管理器,简称为 VMM

  • 在 x86-system 之上,intel 公司和 AMD 公司分别他们在产品上加入了硬件虚拟化 Intel-VT 和 AMD-V

Hypervisor 有两种类型:

  • Type-1:直接运行在 host hardware(主机硬件)上,来控制硬件资源与管理 guest operating system(安装在虚拟机 VM 上面的操作系统 OS)
  • Typer-2:直接作为一种计算机程序运行在传统的操作系统上,一个 guest operating system 直接作为 host 上的一个进程运行(QEMU,VMware 都是使用 Typer-2)

Hypervisors 不但协调着这些硬件资源的访问,而且在各个虚拟机之间施加防护,当服务器启动并执行 Hypervisor 时,它会加载所有虚拟机客户端的操作系统同时会分配给每一台虚拟机适量的内存,CPU,网络和磁盘

Hypervisors 在 Linux 中有一个很重要的运用:KVM

hypervisor & KVM

KVM(Kernel-Based Virtual Machine 基于内核的虚拟机)是 Linux 内核的一个可加载模块,是 x86 硬件上 Linux 的内核驻留虚拟化基础架构

  • KVM 能够让 Linux 主机成为一个 Hypervisor 虚拟机监控器,需要 x86 架构的支持虚拟化功能的硬件(比如:Intel-VT,AMD-V),是一种全虚拟化架构

KVM 是管理虚拟硬件设备的驱动,该驱动使用字符设备 /dev/kvm 作为管理接口:

1
#define KVM_DEV_PATH		"/dev/kvm"
  • 系统调用流程:sys_ioctl -> ksys_ioctl -> do_vfs_ioctl -> vfs_ioctl -> unlocked_ioctl -> [kvm_dev_ioctl,kvm_vcpu_ioctl,kvm_vm_ioctl]

相关的 API 如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
#define KVM_GET_API_VERSION       _IO(KVMIO,   0x00) /* 获取KVM API的版本 */
#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* 创建一个VM */
#define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) /* 获取KVM API的扩展名 */
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* 获取映射内存的大小 */
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41) /* 创建一个vcpu */
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
struct kvm_userspace_memory_region) /* 修改VM的内存 */

#define KVM_RUN _IO(KVMIO, 0x80) /* 启动VM */
#define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) /* 返回vcpu的通用寄存器值 */
#define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) /* 设置vcpu的通用寄存器值 */
#define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) /* 返回vcpu的特殊寄存器值 */
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) /* 设置vcpu的特殊寄存器值 */

参考案例如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
const uint8_t code[] = {
/* ...... */
};

int main(int argc, char *argv[]) {
int kvm = open(KVM_FILE, O_RDWR|O_CLOEXEC);

int ret = ioctl(kvm, KVM_GET_API_VERSION, 0); /* 查看KVM版本 */

ret = ioctl(kvm, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); /* 查看推荐的最大vcpu数 */

int vmfd = ioctl(kvm, KVM_CREATE_VM, (unsigned long)0); /* 创建VM */
char* mem = mmap(NULL, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); /* 开辟虚拟机的内存 */
memcpy(mem, code, sizeof(code));

struct kvm_userspace_memory_region region= {
.slot = 0,
.guest_phys_addr= 0x1000,
.memory_size = 0x1000,
.userspace_addr = (uint64_t)mem, /* 设置虚拟机内存 */
};
ret = ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &region); /* 通知KVM虚拟机有4096字节的内存 */

int vcpufd = ioctl(vmfd, KVM_CREATE_VCPU, (unsigned long)0); /* 创建vcpu */

int mmap_size = ioctl(kvm, KVM_GET_VCPU_MMAP_SIZE, NULL); /* 获取需要映射的内存大小 */

struct kvm_run* run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, vcpufd, 0); /* 一般mmap_size大于kvm_run结构体的大小,因为内核会使用这片区域去存其它的瞬态信息,使用mmap映射kvm_run结构体 */

struct kvm_sregs sregs;
ret = ioctl(vcpufd, KVM_GET_SREGS, &sregs); /* 获取特殊寄存器的值 */
sregs.cs.base = 0;
sregs.cs.selector = 0;
ret = ioctl(vcpufd, KVM_SET_SREGS, &sregs); /* 设置特殊寄存器的值 */

struct kvm_regs regs = {
.rip = 0x1000,
.rax = 2,
.rbx = 2,
.rflags = 0x2,
};
ret = ioctl(vcpufd, KVM_SET_REGS, &regs); /* 设置普通寄存器的值 */

while(1) {
ret = ioctl(vcpufd, KVM_RUN, NULL); /* 开始使用VCPU运行指令 */
switch (run->exit_reason) { /* kvm_run结构体包含停止原因 */
/* Handle exit */
}
}

return 0;
}
  • 从之前的一道 KVM pwn 上截取的,应该只能参考不能编译

KVM ioctl

在 KVM API 底层对接的内核函数分别是:

  • kvm_dev_ioctl:处理 kvmfd
  • kvm_vm_ioctl:处理 vmfd
  • kvm_vcpu_ioctl:处理 vcpufd

kvm_dev_ioctl:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
long r = -EINVAL;

switch (ioctl) {
case KVM_GET_API_VERSION: /* 获取KVM API的版本 */
if (arg)
goto out;
r = KVM_API_VERSION;
break;
case KVM_CREATE_VM: /* 创建vm */
r = kvm_dev_ioctl_create_vm(arg);
break;
case KVM_CHECK_EXTENSION: /* 获取KVM API的扩展名 */
r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
break;
case KVM_GET_VCPU_MMAP_SIZE: /* 获取映射内存的大小 */
if (arg)
goto out;
r = PAGE_SIZE; /* struct kvm_run */
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
#ifdef CONFIG_KVM_MMIO
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
case KVM_TRACE_ENABLE:
case KVM_TRACE_PAUSE:
case KVM_TRACE_DISABLE:
r = -EOPNOTSUPP;
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
}
out:
return r;
}

kvm_vm_ioctl:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r;

if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU: /* 创建vcpu */
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
break;
case KVM_ENABLE_CAP: {
......
}
case KVM_SET_USER_MEMORY_REGION: {
......
}
case KVM_GET_DIRTY_LOG: {
......
}
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CLEAR_DIRTY_LOG: {
......
}
#endif
#ifdef CONFIG_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
......
}
case KVM_UNREGISTER_COALESCED_MMIO: {
......
}
#endif
case KVM_IRQFD: {
......
}
case KVM_IOEVENTFD: {
......
}
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
......
}
#endif
#ifdef __KVM_HAVE_IRQ_LINE
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
......
}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_SET_GSI_ROUTING: {
......
}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
case KVM_CREATE_DEVICE: {
......
}
case KVM_CHECK_EXTENSION: {
......
}
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
out:
return r;
}

kvm_vcpu_ioctl:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;

if (vcpu->kvm->mm != current->mm)
return -EIO;

if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
return -EINVAL;

/*
* Some architectures have vcpu ioctls that are asynchronous to vcpu
* execution; mutex_lock() would break them.
*/
r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;

if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
switch (ioctl) {
case KVM_RUN: { /* 启动虚拟机 */
struct pid *oldpid;
r = -EINVAL;
if (arg)
goto out;
oldpid = rcu_access_pointer(vcpu->pid); /* rcu锁 */
if (unlikely(oldpid != task_pid(current))) {
/* The thread running this VCPU changed. */
struct pid *newpid;

r = kvm_arch_vcpu_run_pid_change(vcpu);
if (r)
break;

newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
if (oldpid)
synchronize_rcu();
put_pid(oldpid);
}
r = kvm_arch_vcpu_ioctl_run(vcpu);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
case KVM_GET_REGS: { /* 获取通用寄存器值 */
struct kvm_regs *kvm_regs;

r = -ENOMEM;
kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
if (r)
goto out_free1;
r = -EFAULT;
if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
goto out_free1;
r = 0;
out_free1:
kfree(kvm_regs);
break;
}
case KVM_SET_REGS: { /* 设置通用寄存器值 */
struct kvm_regs *kvm_regs;

kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
if (IS_ERR(kvm_regs)) {
r = PTR_ERR(kvm_regs);
goto out;
}
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
kfree(kvm_regs);
break;
}
case KVM_GET_SREGS: { /* 获取特殊寄存器值 */
kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
goto out;
r = 0;
break;
}
case KVM_SET_SREGS: { /* 设置特殊寄存器值 */
kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
if (IS_ERR(kvm_sregs)) {
r = PTR_ERR(kvm_sregs);
kvm_sregs = NULL;
goto out;
}
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
break;
}
case KVM_GET_MP_STATE: {
......
}
case KVM_SET_MP_STATE: {
......
}
case KVM_TRANSLATE: {
......
}
case KVM_SET_GUEST_DEBUG: {
......
}
case KVM_SET_SIGNAL_MASK: {
......
}
case KVM_GET_FPU: {
......
}
case KVM_SET_FPU: {
......
}
default:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
mutex_unlock(&vcpu->mutex);
kfree(fpu);
kfree(kvm_sregs);
return r;
}

这3个函数分别对接3种不同的 KVM 文件描述符(Linux 中一切皆文件),然后在 Switch-case 中对不同的命令进行分类处理

在 KVM 创建虚拟机并执行指令的过程中,最重要的就是如下几个步骤:

1
2
3
4
5
6
7
int vmfd = ioctl(kvm, KVM_CREATE_VM, (unsigned long)0); /* 创建vm */
int vcpufd = ioctl(vmfd, KVM_CREATE_VCPU, (unsigned long)0); /* 创建vcpu */
struct kvm_run* run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, vcpufd, 0);
ret = ioctl(vcpufd, KVM_GET_SREGS, &sregs); /* 获取特殊寄存器值 */
ret = ioctl(vcpufd, KVM_SET_SREGS, &sregs); /* 设置特殊寄存器值 */
ret = ioctl(vcpufd, KVM_SET_REGS, &regs); /* 设置普通寄存器值 */
ret = ioctl(vcpufd, KVM_RUN, NULL); /* 启动虚拟机 */

在分析具体的函数之前,先看一看几个重要的结构体

KVM 相关结构体

创建 vm 时,生成的主要结构体:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; /* 一个kvm可以对应多个vcpu */

/*
* created_vcpus is protected by kvm->lock, and is incremented
* at the beginning of KVM_CREATE_VCPU. online_vcpus is only
* incremented after storing the kvm_vcpu pointer in vcpus,
* and is accessed atomically.
*/
atomic_t online_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
struct list_head items;
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
struct list_head ioeventfds;
#endif
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count; /* kvm引用计数器,当kvm被创建时将会被初始化为'0' */
#ifdef CONFIG_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
#endif

struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
/*
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
struct hlist_head irq_ack_notifier_list;
#endif

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
#endif
long tlbs_dirty;
struct list_head devices;
u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
unsigned int max_halt_poll_ns;
};

创建 vcpu 时,生成的主要结构体:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
struct kvm_vcpu {
struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
#endif
int cpu;
int vcpu_id; /* id given by userspace at creation */
int vcpu_idx; /* index in kvm->vcpus array */
int srcu_idx;
int mode;
u64 requests;
unsigned long guest_debug;

int pre_pcpu;
struct list_head blocked_vcpu_list;

struct mutex mutex;
struct kvm_run *run;

struct rcuwait wait;
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;

#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
int mmio_read_completed;
int mmio_is_write;
int mmio_cur_fragment;
int mmio_nr_fragments;
struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
#endif

#ifdef CONFIG_KVM_ASYNC_PF
struct {
u32 queued;
struct list_head queue;
struct list_head done;
spinlock_t lock;
} async_pf;
#endif

#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
/*
* Cpu relax intercept or pause loop exit optimization
* in_spin_loop: set when a vcpu does a pause loop exit
* or cpu relax intercepted.
* dy_eligible: indicates whether vcpu is eligible for directed yield.
*/
struct {
bool in_spin_loop;
bool dy_eligible;
} spin_loop;
#endif
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
};

切换 Context 时,需要使用的结构体:

1
2
3
4
5
6
7
8
9
struct pt_ctx {
u64 ctl;
u64 status;
u64 output_base;
u64 output_mask;
u64 cr3_match;
u64 addr_a[RTIT_ADDR_RANGE];
u64 addr_b[RTIT_ADDR_RANGE];
};

设置特殊寄存器时使用结构体,和设置普通寄存器时使用的结构体:

1
2
3
4
5
6
7
8
9
10
struct kvm_sregs {
/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
struct kvm_segment cs, ds, es, fs, gs, ss;
struct kvm_segment tr, ldt;
struct kvm_dtable gdt, idt;
__u64 cr0, cr2, cr3, cr4, cr8;
__u64 efer;
__u64 apic_base;
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
1
2
3
4
5
6
7
8
struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 rax, rbx, rcx, rdx;
__u64 rsi, rdi, rsp, rbp;
__u64 r8, r9, r10, r11;
__u64 r12, r13, r14, r15;
__u64 rip, rflags;
};

KVM 底层代码分析

创建 vmfd:kvm_dev_ioctl_create_vm

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
int r;
struct kvm *kvm;
struct file *file;

kvm = kvm_create_vm(type); /* 创建并初始化一个vm结构体 */
if (IS_ERR(kvm))
return PTR_ERR(kvm);
#ifdef CONFIG_KVM_MMIO
r = kvm_coalesced_mmio_init(kvm); /* 对coalesed MMIO进行初始化 */
if (r < 0)
goto put_kvm;
#endif
r = get_unused_fd_flags(O_CLOEXEC); /* 获取一个未使用的FD(从当前进程描述符中的打开文件数组(current->files)里取得一个空闲的项,然后返回其数组下标) */
if (r < 0)
goto put_kvm;

file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); /* 获取一个inode,创建一个file结构体实例,并且把这个inode关联起来 */
if (IS_ERR(file)) {
put_unused_fd(r);
r = PTR_ERR(file);
goto put_kvm;
}

/*
* Don't call kvm_put_kvm anymore at this point; file->f_op is
* already set, with ->release() being kvm_vm_release(). In error
* cases it will be called by the final fput(file) and will take
* care of doing kvm_put_kvm(kvm).
*/
if (kvm_create_vm_debugfs(kvm, r) < 0) {
put_unused_fd(r);
fput(file);
return -ENOMEM;
}
kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);

fd_install(r, file); /* 将文件对象指针file存到该进程的打开文件数组(current->files)中 */
return r;

put_kvm:
kvm_put_kvm(kvm); /* 使计数器kvm->users_count减少'1',如果计数器减少为'0'就调用kvm_destroy_vm来销毁该kvm */
return r;
}
  • 每次访问 MMIO 都会导致虚拟机退到 QEMU 中,但存在多个 MMIO 操作时,可以先将前面的 MMIO 操作保存起来,等到最后一个 MMIO 的时候,再一起退出到 QEMU 中处理,这就是 coalesced MMIO
  • 在初始化完 struct kvm 后,程序会申请一个文件描述符 FD,将两者绑定之后将 FD 返回

创建 vcpufd:kvm_vm_ioctl_create_vcpu

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu;
struct page *page;

if (id >= KVM_MAX_VCPU_ID)
return -EINVAL;

mutex_lock(&kvm->lock);
if (kvm->created_vcpus == KVM_MAX_VCPUS) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}

kvm->created_vcpus++;
mutex_unlock(&kvm->lock);

r = kvm_arch_vcpu_precreate(kvm, id); /* vcpu预创建(只是基础的检查) */
if (r)
goto vcpu_decrement;

vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); /* 为vcpu分配空间 */
if (!vcpu) {
r = -ENOMEM;
goto vcpu_decrement;
}

BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
page = alloc_page(GFP_KERNEL | __GFP_ZERO); /* 从伙伴系统中分配一页 */
if (!page) {
r = -ENOMEM;
goto vcpu_free;
}
vcpu->run = page_address(page); /* 转换page对应的虚拟地址,并赋值给vcpu->kvm_run条目 */

kvm_vcpu_init(vcpu, kvm, id); /* 初始化vcpu */

r = kvm_arch_vcpu_create(vcpu); /* 主要完成体系架构相关的初始化,包括timer,pmu,vgic等 */
if (r)
goto vcpu_free_run_page;

mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) { /* 底层其实是一个kvm_get_vcpu */
r = -EEXIST;
goto unlock_vcpu_destroy;
}

vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);

/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm); /* 和kvm_put_kvm对应,使计数器kvm->users_count增加'1' */
r = create_vcpu_fd(vcpu); /* 创建vcpufd,并注册了kvm_vcpu_fops操作函数集 */
if (r < 0) {
kvm_put_kvm_no_destroy(kvm);
goto unlock_vcpu_destroy;
}

kvm->vcpus[vcpu->vcpu_idx] = vcpu;

/*
* Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
* before kvm->online_vcpu's incremented value.
*/
smp_wmb(); /* 用于SMP场合的写内存屏障 */
atomic_inc(&kvm->online_vcpus);

mutex_unlock(&kvm->lock);
kvm_arch_vcpu_postcreate(vcpu);
kvm_create_vcpu_debugfs(vcpu);
return r;

unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_destroy(vcpu);
vcpu_free_run_page:
free_page((unsigned long)vcpu->run);
vcpu_free:
kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu_decrement:
mutex_lock(&kvm->lock);
kvm->created_vcpus--;
mutex_unlock(&kvm->lock);
return r;
}
  • vcpu 需要一页的空间来存放 kvm_run 的数据,对 vcpu 进行 mmap 所申请到的空间也就是 kvm_run
  • 在初始化完 struct kvm_vcpu 后,程序会调用 create_vcpu_fd 以创建 vcpufd,并注册 kvm_vcpu_fops 操作函数集

启动虚拟机:kvm_arch_vcpu_ioctl_run

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
int r;

vcpu_load(vcpu); /* 加载各类寄存器的值 */
kvm_sigset_activate(vcpu);
kvm_load_guest_fpu(vcpu);

if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
r = -EINTR;
goto out;
}
kvm_vcpu_block(vcpu);
kvm_apic_accept_events(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
if (signal_pending(current)) {
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
goto out;
}

if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
r = -EINVAL;
goto out;
}

if (kvm_run->kvm_dirty_regs) {
r = sync_regs(vcpu);
if (r != 0)
goto out;
}

/* re-sync apic's tpr */
if (!lapic_in_kernel(vcpu)) {
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL;
goto out;
}
}

if (unlikely(vcpu->arch.complete_userspace_io)) {
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu->arch.complete_userspace_io = NULL;
r = cui(vcpu);
if (r <= 0)
goto out;
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);

if (kvm_run->immediate_exit)
r = -EINTR;
else
r = vcpu_run(vcpu); /* 运行虚拟机 */

out:
kvm_put_guest_fpu(vcpu);
if (kvm_run->kvm_valid_regs)
store_regs(vcpu);
post_kvm_run_save(vcpu);
kvm_sigset_deactivate(vcpu);

vcpu_put(vcpu);
return r;
}
  • vcpu 最终是要放置在物理 CPU 上执行的,很显然,我们需要进行 Context 的切换:
    • 保存好 Host 的 Context,并切换到 Guest 的 Context 去执行,最终在退出时再恢复回 Host 的 Context
  • 运行虚拟机的核心函数为 vcpu_run,其源码如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;

vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu->arch.l1tf_flush_l1d = true;

for (;;) {
if (kvm_vcpu_running(vcpu)) { /* 当前CPU是否可以运行 */
r = vcpu_enter_guest(vcpu); /* 运行vcpu */
} else {
r = vcpu_block(kvm, vcpu); /* 阻断vcpu */
}

if (r <= 0)
break;

kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);

if (dm_request_for_irq_injection(vcpu) &&
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
r = 0;
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++vcpu->stat.request_irq_exits;
break;
}

if (__xfer_to_guest_mode_work_pending()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
r = xfer_to_guest_mode_handle_work(vcpu);
if (r)
return r;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}

srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);

return r;
}
  • 运行 vcpu 的核心函数 vcpu_enter_guest 如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win =
dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
fastpath_t exit_fastpath;

bool req_immediate_exit = false;

if (kvm_request_pending(vcpu)) {
/* 处理vcpu->requests */
......
}

if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
/* 处理vcpu->requests */
......
}

r = kvm_mmu_reload(vcpu);
if (unlikely(r)) {
goto cancel_injection;
}

preempt_disable();

kvm_x86_ops.prepare_guest_switch(vcpu); /* 保存host的fs和gs的断选择子,准备的context切换 */

local_irq_disable();
vcpu->mode = IN_GUEST_MODE; /* 设置in_guest模式 */

srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

smp_mb__after_srcu_read_unlock();

if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
kvm_x86_ops.sync_pir_to_irr(vcpu);

if (kvm_vcpu_exit_request(vcpu)) {
......
}

if (req_immediate_exit) {
......
}

trace_kvm_entry(vcpu->vcpu_id);

fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return();

if (unlikely(vcpu->arch.switch_db_regs)) {
......
}

exit_fastpath = kvm_x86_ops.run(vcpu); /* 保存host相关内容,然后加载vmcs中的guest的寄存器值,跳转至guest中代码 */

if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
......
}

if (hw_breakpoint_active())
hw_breakpoint_restore();

vcpu->arch.last_vmentry_cpu = vcpu->cpu;
vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());

vcpu->mode = OUTSIDE_GUEST_MODE; /* 设置out_guest模式 */
smp_wmb();

kvm_x86_ops.handle_exit_irqoff(vcpu); /* 处理外部中断 */

kvm_before_interrupt(vcpu);
local_irq_enable();
++vcpu->stat.exits;
local_irq_disable();
kvm_after_interrupt(vcpu);

if (lapic_in_kernel(vcpu)) {
......
}

local_irq_enable();
preempt_enable();

vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}

if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);

r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); /* 处理各种退出事件 */
return r;

cancel_injection:
if (req_immediate_exit)
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_x86_ops.cancel_injection(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
return r;
}
  • 函数 kvm_x86_ops.run 中完成了 Context 的保存和切换过程,其对应的函数就是 vmx_vcpu_run
1
2
3
4
5
static struct kvm_x86_ops vmx_x86_ops __initdata = {
......
.run = vmx_vcpu_run,
......
}
  • vmx_vcpu_run 中,最终会调用 pt_load_msrpt_save_msr 实现 Context 的切换:
1
2
3
4
5
6
7
8
9
10
11
12
13
static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
{
u32 i;

wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); /* 将一个值写入特定于模型的寄存器 */
wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
for (i = 0; i < addr_range; i++) {
wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
{
u32 i;

rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); /* 将一个寄存器的值写入内存 */
rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
for (i = 0; i < addr_range; i++) {
rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
}
}

hypervisor & Qemu

QEMU:一个完整的可以单独运行的软件,可以独立模拟出整台计算机,包括 CPU,内存,IO 设备,通过一个特殊的“重编译器”对特定的处理器的二进制代码进行翻译,从而具有了跨平台的通用性

  • 在硬件支持虚拟化之前,Qemu 纯软件虚拟化方案,是通过 tcg(tiny code generator) 的方式来进行指令翻译,翻译成 Host 处理器架构的指令来执行
  • 当 hypervisor 技术出现之后,利用 KVM 技术把 Qemu 模拟 CPU、内存的代码换成 KVM,而网卡、显示器等留着,因此 QEMU+KVM 就成了一个完整的虚拟化平台

Linux 上的虚拟化工具 Qemu 和 KVM 的关系如图:

也有虚拟机使用本架构的二进制程序来模拟异构 CPU 的执行过程,详细可以看一下这篇文章:VM虚拟机技术简析 | Pwn进你的心 (ywhkkx.github.io)