ptmalloc堆数据结构

概述

glibc-2.3.x. 之后,glibc 中集成了ptmalloc2

 

可以下载glibc源码查看ptmalloc

http://ftp.gnu.org/gnu/glibc/

 

查看glibc版本

millionsky@ubuntu-16:~/tmp$ ldd --version

ldd (Ubuntu GLIBC 2.23-0ubuntu9) 2.23

这里主要参考:

https://ctf-wiki.github.io/ctf-wiki/pwn/heap

扫描二维码关注公众号,回复: 526994 查看本文章

 

本文参考的glibc源码是glibc-2.25.tar.xz

ptmalloc堆数据结构

2.1 Chunk

2.1.1 Malloc_chunk

struct malloc_chunk {

 

INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */

INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */

struct malloc_chunk *fd; /* double links -- used only if free. */

struct malloc_chunk *bk;

/* Only used for large blocks: pointer to next larger size. */

struct malloc_chunk *fd_nextsize; /* double links -- used only if free. */

struct malloc_chunk *bk_nextsize;

};

2.1.2 尺寸定义

#ifndef INTERNAL_SIZE_T

# define INTERNAL_SIZE_T size_t

#endif

 

/* The corresponding word size. */

#define SIZE_SZ (sizeof (INTERNAL_SIZE_T))

 

l INTERNAL_SIZE_Tsize_t32/64位整数

l SIZE_SZ32/64

2.1.3 Chunk的对齐

/* MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. It

must be a power of two at least 2 * SIZE_SZ, even on machines for

which smaller alignments would suffice. It may be defined as larger

than this though. Note however that code and data structures are

optimized for the case of 8-byte alignment. */

#ifndef MALLOC_ALIGNMENT

# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \

? __alignof__ (long double) : 2 * SIZE_SZ)

#endif

/* The corresponding bit mask value. */

#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)

l MALLOC_ALIGNMENT:对齐字节,8/16

注意:如果64位上size_t4字节,则对齐为8

l MALLOC_ALIGN_MASK:对齐掩码,0x7/0x0f

 

l 检查分配给用户的内存是否对齐

aligned_OK(m)判断m是否对齐;

misaligned_chunk(p)p转换为对齐后的地址(向下取整);

/* Check if m has acceptable alignment */

 

#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)

#define misaligned_chunk(p) \

((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \

& MALLOC_ALIGN_MASK)

2.1.4 最小chunk

l Chunk的最小尺寸:16/32

最小的chunk包含前面4个字段;

/* The smallest possible chunk */

#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))

 

注意:如果64位上size_t4字节,则最小的chunk24字节

 

最小malloc size

最小的malloc size是对齐后的最小chunk,大小为16/32

注意:如果64位上size_t4字节,则MINSIZE32。此时MIN_CHUNK_SIZE24)和MINSIZE不一致。

/* The smallest size we can malloc is an aligned minimal chunk */

 

#define MINSIZE \

(unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))

2.1.5 Malloc header<-->user pointer

Allocated chunk的前两个字段称为 chunk header/malloc header,后面的部分称为user data。转换只要移动chunk header即可,即8/16字节;

/* conversion from malloc headers to user pointers, and back */

 

#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))

#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))

2.1.6 用户大小检查与转换

l REQUEST_OUT_OF_RANGE:判断请求字节是否超出范围

MINSIZE16/32

则最大的请求大小不能超过0xFFFF FFE0/0xFFFF FFFF FFFF FFC0

这里为了简化某些代码,边界制定得比较低,即时加上MINSIZE,也不好回绕0

/*

Check if a request is so large that it would wrap around zero when

padded and aligned. To simplify some other code, the bound is made

low enough so that adding MINSIZE will also not wrap around zero.

*/

 

#define REQUEST_OUT_OF_RANGE(req) \

((unsigned long) (req) >=            \

(unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))

l request2size

Request2size:将用户请求的大小转换为对齐后的chunk大小

MALLOC_ALIGNMENT至少是2*SIZE_SZ,这里会补齐chunk header然后对齐。

/* pad request bytes into a usable size -- internal version */

 

#define request2size(req) \

(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \

MINSIZE : \

((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)

/* Same, except also perform argument check */

#define checked_request2size(req, sz) \

if (REQUEST_OUT_OF_RANGE (req)) {          \

__set_errno (ENOMEM);            \

return 0;                \

}                  \

(sz) = request2size (req);

l checked_request2size:先检查范围再进行大小转换

#define checked_request2size(req, sz) \

if (REQUEST_OUT_OF_RANGE (req)) {          \

__set_errno (ENOMEM);            \

return 0;                \

}                  \

(sz) = request2size (req);

2.1.7 标记相关

P标记:

/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */

#define PREV_INUSE 0x1

 

/* extract inuse bit of previous chunk */

#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)

M标记:

/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */

#define IS_MMAPPED 0x2

/* check for mmap()'ed chunk */

#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)

A标记

/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained

from a non-main arena. This is only set immediately before handing

the chunk to the user, if necessary. */

#define NON_MAIN_ARENA 0x4

/* Check for chunk from main arena. */

#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)

/* Mark a chunk as not being on the main arena. */

#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)

SIZE_BITS:解析大小时需要清除的掩码

/*

Bits to mask off when extracting size

Note: IS_MMAPPED is intentionally not masked off from size field in

macros for which mmapped chunks should never be seen. This should

cause helpful core dumps to occur if it is tried by accident by

people extending or adapting this malloc.

*/

#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)

下一个chunkP标记的获取/设置/清除

/* extract p's inuse bit */

#define inuse(p)               \

((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)

 

/* set/clear chunk as being inuse without otherwise disturbing */

#define set_inuse(p)               \

((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE

#define clear_inuse(p)               \

((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)

指定偏移处chunkP标记的获取/设置/清除

/* check/set/clear inuse bits in known places */

#define inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)

 

#define set_inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)

#define clear_inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))

2.1.8 chunk size相关

获取chunk size

/* Get size, ignoring use bits */

#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))

 

/* Like chunksize, but do not mask SIZE_BITS. */

#define chunksize_nomask(p) ((p)->mchunk_size)

设置chunk size

/* Set size at head, without disturbing its use bit */

#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))

 

/* Set size/use field */

#define set_head(p, s) ((p)->mchunk_size = (s))

/* Set size at footer (only when chunk is not in use) */

#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))

2.1.9 获取下一个物理chunk

/* Ptr to next physical malloc_chunk. */

#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))

2.1.10 获取前一个chunk的信息

/* Size of the chunk below P. Only valid if prev_inuse (P). */

#define prev_size(p) ((p)->mchunk_prev_size)

 

/* Set the size of the chunk below P. Only valid if prev_inuse (P). */

#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))

/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */

#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))

2.1.11 获取指定偏移的chunk

/* Treat space at ptr + offset as a chunk */

#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))

2.1.12 Top chunk

初始情况下,我们可以将 unsorted chunk 作为 top chunk

/* Conveniently, the unsorted bin can be used as dummy top on first call */

#define initial_top(M) (unsorted_chunks (M))

2.2 Bins

2.2.1 Bins数组

#define NBINS 128

typedef struct malloc_chunk* mchunkptr;

 

struct malloc_state

{

/* Normal bins packed as described above */

mchunkptr bins[NBINS * 2 - 2];

2.2.2 Bin定位

l Bin_at(m, i):指定bin索引,获取bins数组中对应的bin header的地址

bin索引 Bins数组索引

0  不存在

1  2*(-1)

2  2*(0)

3  2*(1)

126  2*(124)

127 2*(125)

l Next_bin(b):获取指定bin header的下一个bin header

typedef struct malloc_chunk *mbinptr;

 

/* addressing -- note that bin_at(0) does not exist */

#define bin_at(m, i) \

(mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))      \

- offsetof (struct malloc_chunk, fd))

 

/* analog of ++bin */

#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))

 

/* Reminders about list directionality within bins */

#define first(b) ((b)->fd)

#define last(b) ((b)->bk)

l bin_index(sz)

#define bin_index(sz) \

((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))

2.3 Fast bin

2.3.1 fastbinsY

l MAX_FAST_SIZE80/160字节,用户请求的最大的fastbin

request2size处理(加上chunk_header并对齐)后,大小为88/176字节;

l fastbin_index(sz):输入chunk size,获取fast bin index

Chunk sizefast bin index的对应:

Fast bin Index

Chunk size(32)

Chunk size(64)

0

16

32

1

24

48

2

32

64

3

40

80

4

48

96

5

56

112

6

64

128

7

72

144

8

80

160

9

88

176

l NFASTBINS10

/* The maximum fastbin request size we support */

#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)

 

/* offset 2 to use otherwise unindexable first 2 bins */

#define fastbin_index(sz) \

((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)

 

#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)

 

typedef struct malloc_chunk *mfastbinptr;

#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])

struct malloc_state

{

/* Fastbins */

mfastbinptr fastbinsY[NFASTBINS];

}

2.3.2 DEFAULT_MXFAST

l 64/128,默认的最大fast chunk用户数据大小

#ifndef DEFAULT_MXFAST

#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)

#endif

l MAX_FAST_SIZE80/160,系统支持的最大fast chunk size(用户数据)

#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)

l Malloc_init_state会初始化最大的fastbin(用户数据)设置为64/128

/*

Set value of max_fast.

Use impossibly small value if 0.

Precondition: there are no existing fastbin chunks.

Setting the value clears fastchunk bit but preserves noncontiguous bit.

*/

 

#define set_max_fast(s) \

global_max_fast = (((s) == 0)            \

? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))

#define get_max_fast() global_max_fast

static void

malloc_init_state (mstate av)

{

if (av == &main_arena)

set_max_fast (DEFAULT_MXFAST);

av->flags |= FASTCHUNKS_BIT;

//......

}

2.3.3 FASTCHUNKS_BIT

FASTCHUNKS_BIT指示可能有fastbin chunks

将一个chunk放入fastbin时,此标记被设置;

只在malloc_consolidate中被清除;

0表示真值,这样启动后have_fastchunks就为真,简化了初始化检查;

Fast chunk bit 设置/清除/设置

#define FASTCHUNKS_BIT (1U)

 

#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)

#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)

#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)

 

struct malloc_state

{

/* Flags (formerly in max_fast). */

int flags;

}

2.3.4 NONCONTIGUOUS_BIT

NONCONTIGUOUS_BIT指示MORECORE没有返回连续的区域;

初始值来自MORECORE_CONTIGUOUS,如果使用mmap替换sbrk,则会被动态改变;

// 主分配区中的MORECORE其实为sbr(),默认返回连续虚拟地址空间

// 非主分配区使用mmap()分配大块虚拟内存,然后进行切分来模拟主分配区的行为

// 而默认情况下mmap映射区域是不保证虚拟地址空间连续的,所以非主分配区默认分配非连续虚拟地址空间。

取值/设置/清除

#define NONCONTIGUOUS_BIT (2U)

 

#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)

#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)

#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)

#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)

初始值

#ifndef MORECORE_CONTIGUOUS

#define MORECORE_CONTIGUOUS 1

#endif

MORECORE

MORECORE是从系统获取内存的函数的名称,默认为sbrk

/* Definition for getting more memory from the OS. */

#define MORECORE (*__morecore)

#define MORECORE_FAILURE 0

void * __default_morecore (ptrdiff_t);

void *(*__morecore)(ptrdiff_t) = __default_morecore;

 

/* Allocate INCREMENT more bytes of data space,

and return the start of data space, or NULL on errors.

If INCREMENT is negative, shrink data space. */

void *

__default_morecore (ptrdiff_t increment)

{

void *result = (void *) __sbrk (increment);

if (result == (void *) -1)

return NULL;

 

return result;

}

2.3.5 ARENA_CORRUPTION_BIT

ARENA_CORRUPTION_BIT指示内存崩溃在arena上被检测到;

这样的arena不再用来分配chunks

检测到崩溃之前在此arena上分配的内存不会被释放;

检测/设置

/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the

arena. Such an arena is no longer used to allocate chunks. Chunks

allocated in that arena before detecting corruption are not freed. */

 

#define ARENA_CORRUPTION_BIT (4U)

#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))

#define set_arena_corrupt(A)  ((A)->flags |= ARENA_CORRUPTION_BIT)

2.3.6 FASTBIN_CONSOLIDATION_THRESHOLD

当释放的 chunk 与该 chunk 相邻的空闲 chunk 合并后的大小大于FASTBIN_CONSOLIDATION_THRESHOLD时,内存碎片可能比较多了,我们就需要把 fast bins 中的chunk都进行合并,以减少内存碎片对系统的影响。

/*

FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()

that triggers automatic consolidation of possibly-surrounding

fastbin chunks. This is a heuristic, so the exact value should not

matter too much. It is defined at half the default trim threshold as a

compromise heuristic to only attempt consolidation if it is likely

to lead to trimming. However, it is not dynamically tunable, since

consolidation reduces fragmentation surrounding large chunks even

if trimming is not used.

*/

 

#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)

2.4 Small bin

SMALLBIN_WIDTH8/16

SMALLBIN_CORRECTION:是否需要对small bin的下标进行纠正;

MIN_LARGE_SIZE:最小的large chunk的大小,刚好大于small chunk的最大尺寸的对齐尺寸;

Small chunk的最大尺寸为bin63的尺寸504/1008

MIN_LARGE_SIZE64*8/64*16,即512/1024

in_smallbin_range(sz):判断chunk的大小是否在small bin范围内

smallbin_index(sz) :根据chunk的大小得到small bin对应的索引

Small bin index

Chunk size(32)

Chunk size(64)

2

16

32

3

24

48

4

30

64

......

63

504

1008

ine NSMALLBINS 64

#define SMALLBIN_WIDTH MALLOC_ALIGNMENT

#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)

#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)

 

#define in_smallbin_range(sz) \

((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)

#define smallbin_index(sz) \

((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\

+ SMALLBIN_CORRECTION)

2.5 Large bin

large bins 中一共包括 63 bin,每个 bin 中的 chunk 的大小不一致,而是处于一定区间范围内。此外,这 63 bin 被分成了 6 组,每组 bin 中的 chunk 大小的间隔是一样的;

Larger bin近似对数分布,bin_index中的差异是由于速度上的考虑;

64 bins of size 8                   //64个bin 大小间隔为8,small bin

32 bins of size 64                  //large bin,如下

16 bins of size 512

8 bins of size 4096

4 bins of size 32768

2 bins of size 262144

1 bin of size what's left

Bin的索引和chunk size间隔(32位)

Bin count

Bin index

Chunk size

31

64

...

94

[512, +64)

...

[512+64*30, +64) [2432, 2496)

17

95

...

111

[2496, 2560)

...

[10240, +512)

9

112

...

120

[10752, 12288)

...+4096

[40960, +4096) [40960,45056)

3

120

121

122

123

[45056, 65536)

...+32768

[131072, +32768)

2

124

125

126

[163840, 262144)

[262144,+262144)

[524288,+262144)

1

126

[786432, *)

largebin_index_32:输入大小,输出large bin index

#define largebin_index_32(sz) \

(((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\

((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\

((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\

((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\

((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\

126)

largebin_index

#define largebin_index(sz) \

(SIZE_SZ == 8 ? largebin_index_64 (sz) \

: MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \

: largebin_index_32 (sz))

2.6 unsorted bin

unsorted bin 可以视为空闲 chunk 回归其所属 bin 之前的缓冲区。

/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */

#define unsorted_chunks(M) (bin_at (M, 1))

2.7 Binmap

Binmap的每一位标记对应的bin是否为空:

0 - bin为空

1 - bin非空

为了帮助补偿数目庞大的bin,一个索引结构用于bin-by-bin的搜索;

binmap是一个位向量,记录了哪些bin是空的,这样在遍历的时候可以跳过这些bin

binempty的时候,对应的位不会马上被清除,只有在malloc进行遍历的时候,才会被清除;

l BINMAPSHIFT:生成map word长度所需的比特数目;这里为5

l BITSPERMAP:每个map word32比特;

l BINMAPSIZE4map word的数目,总共128bin,需要128个位;每个map word 32比特,共需要4map word

l idx2block(i):将bin的索引转换为binmapblock索引;

因为一个map word对应32bin,所以bin索引除以32后得到binmap索引;

l idx2bit(i):将bin索引转换为map word中的位索引;

首先将索引对32取模,然后移位获取对应的掩码

l mark_bin(m, i) :设置索引为ibinbinmap标记

l unmark_bin(m, i):清除索引为ibinbinmap标记

l get_binmap(m, i):获取索引为ibinbinmap标记

struct malloc_state

{

/* Bitmap of bins */

unsigned int binmap[BINMAPSIZE];

}

/*

Binmap

 

To help compensate for the large number of bins, a one-level index

structure is used for bin-by-bin searching. `binmap' is a

bitvector recording whether bins are definitely empty so they can

be skipped over during during traversals. The bits are NOT always

cleared as soon as bins are empty, but instead only

when they are noticed to be empty during traversal in malloc.

*/

/* Conservatively use 32 bits per map word, even if on 64bit system */

#define BINMAPSHIFT 5

#define BITSPERMAP (1U << BINMAPSHIFT)

#define BINMAPSIZE (NBINS / BITSPERMAP)

#define idx2block(i) ((i) >> BINMAPSHIFT)

#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))

#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))

#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))

#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))

多线程数据结构

3.1 Heap size

l DEFAULT_MMAP_THRESHOLD_MIN:mmap最小阈值,128K

l DEFAULT_MMAP_THRESHOLD_MAX:mmap最大阈值,512K/16M

l DEFAULT_MMAP_THRESHOLD:128K

#ifndef DEFAULT_MMAP_THRESHOLD_MIN

#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)

#endif

 

#ifndef DEFAULT_MMAP_THRESHOLD_MAX

/* For 32-bit platforms we cannot increase the maximum mmap

threshold much because it is also the minimum value for the

maximum heap size and its alignment. Going above 512k (i.e., 1M

for new heaps) wastes too much address space. */

# if __WORDSIZE == 32

# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)

# else

# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))

# endif

#endif

 

#ifndef DEFAULT_MMAP_THRESHOLD

#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN

#endif

l HEAP_MIN_SIZE32KB

l HEAP_MAX_SIZE512K/16M

/* Compile-time constants. */

 

#define HEAP_MIN_SIZE (32 * 1024)

#ifndef HEAP_MAX_SIZE

# ifdef DEFAULT_MMAP_THRESHOLD_MAX

# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)

# else

# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */

# endif

#endif

3.2 Heap Header(heap_info)

Ar_ptr:heap所属的arena

Prev:前一个heap

Size:此heap的大小

Mprotect_size:heap中已经修改读写权限的大小

Pad: 填充,大小一般为0

填充使得sizeof (heap_info) + 2 * SIZE_SZMALLOC_ALIGNMENT的整数倍;

Heap_info中有两个指针,2size_t,所以使用-6*SIZE&MALLOC_ALIGN_MASK

对齐为什么是sizeof (heap_info) + 2 * SIZE_SZ

/* A heap is a single contiguous memory region holding (coalesceable)

malloc_chunks. It is allocated with mmap() and always starts at an

address aligned to HEAP_MAX_SIZE. */

 

typedef struct _heap_info

{

mstate ar_ptr; /* Arena for this heap. */

struct _heap_info *prev; /* Previous heap. */

size_t size; /* Current size in bytes. */

size_t mprotect_size; /* Size in bytes that has been mprotected

PROT_READ|PROT_WRITE. */

/* Make sure the following data is properly aligned, particularly

that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of

MALLOC_ALIGNMENT. */

char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];

} heap_info;

3.3 Arena header(malloc_state)

注意,main arena malloc_state 并不是 heap segment 的一部分,而是一个全局变量,存储在 libc.so 的数据段。

struct malloc_state;

typedef struct malloc_state *mstate;

struct malloc_state

{

/* Serialize access. */

__libc_lock_define (, mutex);

 

/* Flags (formerly in max_fast). */

int flags;

/* Fastbins */

mfastbinptr fastbinsY[NFASTBINS];

/* Base of the topmost chunk -- not otherwise kept in a bin */

mchunkptr top;

/* The remainder from the most recent split of a small request */

mchunkptr last_remainder;

/* Normal bins packed as described above */

mchunkptr bins[NBINS * 2 - 2];

/* Bitmap of bins */

unsigned int binmap[BINMAPSIZE];

/* Linked list */

struct malloc_state *next;

/* Linked list for free arenas. Access to this field is serialized

by free_list_lock in arena.c. */

struct malloc_state *next_free;

/* Number of threads attached to this arena. 0 if the arena is on

the free list. Access to this field is serialized by

free_list_lock in arena.c. */

INTERNAL_SIZE_T attached_threads;

/* Memory allocated from the system in this arena. */

INTERNAL_SIZE_T system_mem;

INTERNAL_SIZE_T max_system_mem;

};

l __libc_lock_define (, mutex);

该变量用于控制多线程串行访问同一个arena

奇怪的是libc-lock.h中这个宏的定义是空的,真正的定义在哪里呢?

#define __libc_lock_define(CLASS,NAME)

l Flags:一些标记位

#define FASTCHUNKS_BIT (1U)

#define NONCONTIGUOUS_BIT (2U)

#define ARENA_CORRUPTION_BIT (4U)

l fastbinsYfastbins链表指针

l Top:指向top chunk

l last_remainderLast Remainder chunk

l Bins:存储 unstored bin/small bins/large bins的链表的表头

l Binmap:记录了哪些bin是空的,搜索时可替换

l Next:下一个arena header,单链表

l next_freefree arena的链表,需要使用free_list_lock串行访问;

Free arena是指没有附着线程的arena

l attached_threads:附着此arena的线程的数目,若为free arena则为0

l system_mem:此arena中从系统分配的内存;

l max_system_mem:此arena中从系统分配的最大内存;

3.4 main_arena

主线程的arena,这是一个静态变量;

static struct malloc_state main_arena =

{

.mutex = _LIBC_LOCK_INITIALIZER,

.next = &main_arena,

.attached_threads = 1

};

3.5 arena_get

arena_get:请求一个arena,并对对应的mutex加锁,

首先尝试本线程最后成功加锁的arena。这是最普遍的情况。

然后遍历arena链表,如果没有可用的,则创建一个新的。

static __thread mstate thread_arena attribute_tls_model_ie;

 

#define arena_get(ptr, size) do { \

ptr = thread_arena;            \

arena_lock (ptr, size);            \

} while (0)

 

#define arena_lock(ptr, size) do {           \

if (ptr && !arena_is_corrupt (ptr))        \

__libc_lock_lock (ptr->mutex);           \

else                 \

ptr = arena_get2 ((size), NULL);         \

} while (0)

参考文档

1. https://ctf-wiki.github.io/ctf-wiki/pwn/heap/heap_structure/

2. https://ctf-wiki.github.io/ctf-wiki/pwn/heap/heap_implementation_details/


猜你喜欢

转载自blog.csdn.net/luozhaotian/article/details/80267712