linux 内存管理(8) —内存描述符(mm_struct)

  • 了解进程内存描述符mm_struct

1.概述

  内核用两个数据结构表示进程地址空间: struct mm_struct(内存描述符) 和 struct vm_area_struct(线性区描述符) 表示。

  • 最高层次的:mm_struct,描述一个进程的整个虚拟地址空间。
    struct mm_struct 记录进程地址空间有关的全部信息,进程描述符struct task_struct中mm字段指向属于自己的mm_struct(内存描述符)。
  • 较高层次的:vm_area_struct,描述虚拟地址空间的一个区间(简称虚拟区)。
    struct vm_area_struct 记录进程使用每个线性区以及线性区的属性。内核中用两种数据结构组织每个进程所使用的线性区----链表和红黑树,这样做的好处是加快查找的速度。struct mm_struct中mmap 和 mm_rb字段分别存放其链表头和红黑树根结点

内存管理大致结构:
在这里插入图片描述

2.mm_struct 结构体

  task_struct结构体包括指向mm_struct结构的指针,mm_struct用来描述进程的虚拟地址空间。mm_struct包括装入的可履行映像信息和进程的页目录指针pgd,还包括有指向vm_area_struct结构的几个指针,每一个vm_area_struct代表进程的1个虚拟地址区间。

  vm_area_struct结构含有指向vm_operations_struct结构的1个指针,vm_operations_struct描写了在这个区间的操作。vm_operations_struct结构中包括的是函数指针,其中open、close分别用于虚拟区间的打开、关闭,而nopage用于当虚拟页面不再物理内存而引发的"缺页异常"时所调用的函数,当linux处理缺页异常时,就能够为新的虚拟内存分配实际的物理内存。

  mm_struct是对进程的地址空间(虚拟内存)的描写。1个进程的虚拟空间中可能有多个虚拟区间,对这些虚拟空间的组织方式有两种:

  • 当虚拟区较少时采取单链表,由mmap指针指向这个链表;
  • 当虚拟区间多时采取红黑树进行管理,由mm_rb指向这棵树。由于程序中用到的地址常常具有局部性,因此,最近1次用到的虚拟区间极可能下1次还要用到,因此把最近用到的虚拟区间结构放到高速缓存,这个虚拟区间就由mmap_cache指向。

结构体如下图所示:
在这里插入图片描述
2.1.struct mm_struct

struct mm_struct {
	struct {
		struct vm_area_struct *mmap;		/* list of VMAs */
		struct rb_root mm_rb;
		u64 vmacache_seqnum;                   /* per-thread vmacache */
#ifdef CONFIG_MMU
		unsigned long (*get_unmapped_area) (struct file *filp,
				unsigned long addr, unsigned long len,
				unsigned long pgoff, unsigned long flags);
#endif
		unsigned long mmap_base;	/* base of mmap area */
		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
		/* Base adresses for compatible mmap() */
		unsigned long mmap_compat_base;
		unsigned long mmap_compat_legacy_base;
#endif
		unsigned long task_size;	/* size of task vm space */
		unsigned long highest_vm_end;	/* highest vma end address */
		pgd_t * pgd;

#ifdef CONFIG_MEMBARRIER
		atomic_t membarrier_state;
#endif


		atomic_t mm_users;
		atomic_t mm_count;

#ifdef CONFIG_MMU
		atomic_long_t pgtables_bytes;	/* PTE page table pages */
#endif
		int map_count;			/* number of VMAs */

		spinlock_t page_table_lock; /* Protects page tables and some
					     * counters
					     */
		struct rw_semaphore mmap_sem;

		struct list_head mmlist; /* List of maybe swapped mm's.	These
					  * are globally strung together off
					  * init_mm.mmlist, and are protected
					  * by mmlist_lock
					  */
		unsigned long hiwater_rss; /* High-watermark of RSS usage */
		unsigned long hiwater_vm;  /* High-water virtual memory usage */

		unsigned long total_vm;	   /* Total pages mapped */
		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
		atomic64_t    pinned_vm;   /* Refcount permanently increased */
		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
		unsigned long stack_vm;	   /* VM_STACK */
		unsigned long def_flags;

		spinlock_t arg_lock; /* protect the below fields */
		unsigned long start_code, end_code, start_data, end_data;
		unsigned long start_brk, brk, start_stack;
		unsigned long arg_start, arg_end, env_start, env_end;

		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

		struct mm_rss_stat rss_stat;
		struct linux_binfmt *binfmt;
		/* Architecture-specific MM context */
		mm_context_t context;

		unsigned long flags; /* Must use atomic bitops to access */
		struct core_state *core_state; /* coredumping support */

#ifdef CONFIG_AIO
		spinlock_t			ioctx_lock;
		struct kioctx_table __rcu	*ioctx_table;
#endif
#ifdef CONFIG_MEMCG
		struct task_struct __rcu *owner;
#endif
		struct user_namespace *user_ns;

		/* store ref to file /proc/<pid>/exe symlink points to */
		struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
		struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
		unsigned long numa_next_scan;

		/* Restart point for scanning and setting pte_numa */
		unsigned long numa_scan_offset;

		/* numa_scan_seq prevents two threads setting pte_numa */
		int numa_scan_seq;
#endif
		atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
		/* See flush_tlb_batched_pending() */
		bool tlb_flush_batched;
#endif
		struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
		atomic_long_t hugetlb_usage;
#endif
		struct work_struct async_put_work;
	} __randomize_layout;

	unsigned long cpu_bitmap[];
};

2.2.struct vm_area_struct

  虚拟地址区间vm_area_struct是虚拟内存的一部份,内存描写符mm_struct指向全部虚拟空间,而vm_area_struct只是指向了虚拟空间的一段。

  vm_area_struct是由双向链表链接起来的,它们是依照虚拟地址降序排序的,每一个这样的结构都对应描写一个相邻的地址空间范围。之所以这样分隔是由于每一个虚拟区间可能来源不同,有的可能来自可履行映像,有的可能来自同享库,而有的多是动态内存分配的内存区,所以对每一个由vm_area_struct结构所描写的区间的处理操作和它前后范围的处理操作不同,因此linux把虚拟内存分割管理,并利用了虚拟内存处理例程vm_ops来抽象对不同来源虚拟内存的处理方法。不同的虚拟区间其处理操作可能不同,linux在这里利用了面向对象的思想,即把1个虚拟区间看成是1个对象,用vm_area_struct描写这个对象的属性,其中的vm_operation结构描写了在这个对象上的操作。

struct vm_area_struct {
	/* The first cache line has the info for VMA tree walking. */
	unsigned long vm_start;		/* Our start address within vm_mm. */
	unsigned long vm_end;		/* The first byte after our end address within vm_mm. */

	/* linked list of VM areas per task, sorted by address */
	struct vm_area_struct *vm_next, *vm_prev;
	struct rb_node vm_rb;
	unsigned long rb_subtree_gap;

	/* Second cache line starts here. */
	struct mm_struct *vm_mm;	/* The address space we belong to. */
	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
	unsigned long vm_flags;		/* Flags, see mm.h. */

	struct {
		struct rb_node rb;
		unsigned long rb_subtree_last;
	} shared;

	struct list_head anon_vma_chain; /* Serialized by mmap_sem & * page_table_lock */
	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */

	/* Function pointers to deal with this struct. */
	const struct vm_operations_struct *vm_ops;

	/* Information about our backing store: */
	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE units */
	struct file * vm_file;		/* File we map to (can be NULL). */
	void * vm_private_data;		/* was vm_pte (shared mem) */

#ifdef CONFIG_SWAP
	atomic_long_t swap_readahead_info;
#endif
#ifndef CONFIG_MMU
	struct vm_region *vm_region;	/* NOMMU mapping region */
#endif
#ifdef CONFIG_NUMA
	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
#endif
	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;

2.3.vm_operations_struct

  vm_operations_struct结构中包含函数指针,其中open、close分别用于虚拟内存的打开、关闭,而nopage用于当虚拟内存页面没有实际的物理内存映照而引发的”缺页异常”时所调用的函数指针。

struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
	int (*split)(struct vm_area_struct * area, unsigned long addr);
	int (*mremap)(struct vm_area_struct * area);
	vm_fault_t (*fault)(struct vm_fault *vmf);
	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
			enum page_entry_size pe_size);
	void (*map_pages)(struct vm_fault *vmf,
			pgoff_t start_pgoff, pgoff_t end_pgoff);
	unsigned long (*pagesize)(struct vm_area_struct * area);

	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);

	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);

	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
		      
	const char *(*name)(struct vm_area_struct *vma);

#ifdef CONFIG_NUMA
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
#endif
	struct page *(*find_special_page)(struct vm_area_struct *vma,
					  unsigned long addr);
};

虚拟内存管理数据结构图:
在这里插入图片描述
在这里插入图片描述

2.4.proc/pid/maps

  显示进程映射了的内存区域和访问权限。对应内核中的操作集为proc_pid_maps_op,具体的导出函数为show_map。内核中进程的一段地址空间用一个vm_area_struct结构体表示,所有地址空间存储在task->mm->mmap链表中。

  一个文件可以映射到进程的一段内存区域中,映射的文件描述符保存在vm_area_struct->vm_file域中,这种内存区域叫做有名内存区域,相反,属于匿名映射内存区域。vm_area_struct每项对应解析如下表所示:
在这里插入图片描述
例如:

cat /proc/19970/maps
001f7000-00212000 r-xp 00000000 fd:00 2719760    /lib/ld-2.5.so
00212000-00213000 r-xp 0001a000 fd:00 2719760    /lib/ld-2.5.so
00213000-00214000 rwxp 0001b000 fd:00 2719760    /lib/ld-2.5.so
00214000-0036b000 r-xp 00000000 fd:00 2719767    /lib/libc-2.5.so
0036b000-0036d000 r-xp 00157000 fd:00 2719767    /lib/libc-2.5.so
0036d000-0036e000 rwxp 00159000 fd:00 2719767    /lib/libc-2.5.so
0036e000-00371000 rwxp 0036e000 00:00 0
0054f000-00565000 r-xp 00000000 fd:00 2719791    /lib/libpthread-2.5.so
00565000-00566000 r-xp 00015000 fd:00 2719791    /lib/libpthread-2.5.so
00566000-00567000 rwxp 00016000 fd:00 2719791    /lib/libpthread-2.5.so
00567000-00569000 rwxp 00567000 00:00 0
006f5000-006f6000 r-xp 006f5000 00:00 0          [vdso]

08048000-08049000 r-xp 00000000 fd:00 3145810    /home/lijz/code/pthread
08049000-0804a000 rw-p 00000000 fd:00 3145810    /home/lijz/code/pthread

08c50000-08c71000 rw-p 08c50000 00:00 0          [heap]
b75d7000-b75d8000 ---p b75d7000 00:00 0
b75d8000-b7fda000 rw-p b75d8000 00:00 0
b7fe4000-b7fe5000 rw-p b7fe4000 00:00 0
bf987000-bf99c000 rw-p bffea000 00:00 0          [stack]
  • 第一列:08049000-0804a000-----本段内存映射的虚拟地址空间范围,对应vm_area_struct中的vm_start和vm_end。

  • 第二列:rw-p----权限 r-读,w-写 x-可执行 p-私有,对应vm_flags。

  • 第三列:00000000----针对有名映射,指本段映射地址在文件中的偏移,对应vm_pgoff。对匿名映射而言,为vm_area_struct->vm_start。

  • 第四列:fd:00----所映射的文件所属设备的设备号,对应vm_file->f_dentry->d_inode->i_sb->s_dev。匿名映射为0。其中fd为主设备号,00为次设备号。

  • 第五列:3145810----文件的索引节点号,对应vm_file->f_dentry->d_inode->i_ino,与ls –i显示的内容相符。匿名映射为0。

  • 第六列:/home/lijz/code/pthread—所映射的文件名。对有名映射而言,是映射的文件名,对匿名映射来说,是此段内存在进程中的作用。[stack]表示本段内存作为栈来使用,[heap]作为堆来使用,其他情况则为无。

3.mmap内存映射

mmap_driver.c的源代码:

//所有的模块代码都包含下面两个头文件  
#include <linux/module.h>  
#include <linux/init.h>  

#include <linux/fs.h>  
#include <linux/cdev.h> //定义struct cdev结构体及相关操作  
  
#include <linux/types.h> //定义dev_t类型  
#include <linux/slab.h> //定义kmalloc接口  
#include <asm/io.h>		//定义virt_to_phys接口  
#include <linux/mm.h>	//remap_pfn_range  
  
#define MAJOR_NUM 100 
static int dev_major = MAJOR_NUM;  

#define MM_SIZE 4096  
static char driver_name[] = "mmap_driver";  //驱动模块名字  
//static int dev_major = 0;  
static int dev_minor = 0;  
char *buf = NULL;  
struct cdev *cdev = NULL;  
  
static int device_open(struct inode *inode, struct file *file)  
{  
    printk(KERN_ALERT"device open\n");  
    buf = (char *)kmalloc(MM_SIZE, GFP_KERNEL);//内核申请内存只能按页申请,申请该内存以便后面把它当作虚拟设备  
    printk(KERN_ALERT"albert:%s,buf=%p\n", __func__, buf);  
    return 0;  
}  
  
static int device_close(struct inode *indoe, struct file *file)  
{  
    printk("device close\n");  
    if(buf)  
    {  
        kfree(buf);  
    }  
    return 0;  
}  
  
static int device_mmap(struct file *file, struct vm_area_struct *vma)  
{  
    printk("device ap\n");  
    vma->vm_flags |= VM_IO;//表示对设备IO空间的映射  
	vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);//标志该内存区不能被换出,在设备驱动中虚拟页和物理页的关系应该是长期的,应该保留起来,不能随便被别的虚拟页换出  
    if(remap_pfn_range(vma,//虚拟内存区域,即设备地址将要映射到这里  
                       vma->vm_start,//虚拟空间的起始地址  
                       virt_to_phys(buf)>>PAGE_SHIFT,//与物理内存对应的页帧号,物理地址右移12位  
                       vma->vm_end - vma->vm_start,//映射区域大小,一般是页大小的整数倍  
                       vma->vm_page_prot))//保护属性,  
    {  
        return -EAGAIN;  
    }  
    return 0;  
}  
  
static struct file_operations device_fops =  {  
    .owner = THIS_MODULE,  
    .open  = device_open,  
    .release = device_close,  
    .mmap = device_mmap,  
};  
  
static int __init char_device_init(void)  
{  
    int result;  
    dev_t dev;//高12位表示主设备号,低20位表示次设备号  

    printk(KERN_ALERT"module init2323\n");  
    printk("dev=%d", dev);  

    dev = MKDEV(dev_major, dev_minor);  
    cdev = cdev_alloc();//为字符设备cdev分配空间  

    printk(KERN_ALERT"module init\n");  
    if(dev_major) {  
        result = register_chrdev_region(dev, 1, driver_name);//静态分配设备号  
        printk("result = %d\n", result);  
    }  
    else  
    {  
        result = alloc_chrdev_region(&dev, 0, 1, driver_name);//动态分配设备号  
        dev_major = MAJOR(dev);  
    }  
      
    if(result < 0)  
    {  
        printk(KERN_WARNING"Cant't get major %d\n", dev_major);  
        return result;  
    }  
      
      
    cdev_init(cdev, &device_fops);//初始化字符设备cdev  
    cdev->ops = &device_fops;  
    cdev->owner = THIS_MODULE;  
      
    result = cdev_add(cdev, dev, 1);//向内核注册字符设备  
    printk("dffd = %d\n", result);  
    return 0;  
}  
  
static void __exit char_device_exit(void)  
{  
    printk(KERN_ALERT"module exit\n");  
    cdev_del(cdev);  
    unregister_chrdev_region(MKDEV(dev_major, dev_minor), 1);  
}  
  
module_init(char_device_init);//模块加载  
module_exit(char_device_exit);//模块退出  
  
MODULE_LICENSE("GPL");  
MODULE_AUTHOR("ChenShengfa");  

测试代码test_mmap.c:

#include <fcntl.h>  
#include <sys/mman.h>  
#include <stdio.h>
#include <stdlib.h>  
#include <string.h>  
  
int main( void )  
{  
    int fd;  
    char *buffer;  
    char *mapBuf;  

    fd = open("/dev/mmap_driver", O_RDWR);//打开设备文件,内核就能获取设备文件的索引节点,填充inode结构  
    if(fd<0) {  
        printf("open device is error,fd = %d\n",fd); 
        return -1;  
    }  

    /*测试一:查看内存映射段*/  
    printf("before mmap\n");  
    sleep(20);//睡眠20秒,查看映射前的内存图cat /proc/pid/maps  
    buffer = (char *)malloc(4096);  
    printf("buffer=%p\n", (void *)buffer);  
    memset(buffer, 0, 4096);  
    mapBuf = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);//内存映射,会调用驱动的mmap函数  
    printf("mapbuf=%p\n", (void *)mapBuf);  
    printf("after mmap\n");  
    sleep(10);//睡眠10秒,在命令行查看映射后的内存图,如果多出了映射段,说明映射成功  
      
    /*测试二:往映射段读写数据,看是否成功*/  
	strcpy(mapBuf, "albert driver test");//向映射段写数据  
	memset(buffer, 0, 4096);  
	strcpy(buffer, mapBuf);//从映射段读取数据  
	printf("buf = %s\n", buffer);//如果读取出来的数据和写入的数据一致,说明映射段的确成功了  
      
    munmap(mapBuf, 4096);//去除映射  
    free(buffer);  
    close(fd);//关闭文件,最终调用驱动的close  
    return 0;  
}  

测试步骤:

# make    //编译驱动
# insmod mmap_driver.ko    //安装驱动
# mknod /dev/mmap_driver c 100 0    //创建设备文件
# gcc test_mmap.c -o test   //编译应用程序
# ./test    //运行应用程序来测试驱动程序

查看proc/pid/maps:

执行$ ./test
before mmap
buffer=0x1345010
mapbuf=0x7f185d01d000

查看ps -aux,找到pid 为28586,然后执行cat proc/28586/maps

root 28586 0.0 0.0 4208 652 pts/7 S+ 16:21 0:00 ./test_mmap

如下所示:

00400000-00401000 r-xp 00000000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap
00600000-00601000 r--p 00000000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap
00601000-00602000 rw-p 00001000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap
7f185ca34000-7f185cbf2000 r-xp 00000000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cbf2000-7f185cdf2000 ---p 001be000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf2000-7f185cdf6000 r--p 001be000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf6000-7f185cdf8000 rw-p 001c2000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf8000-7f185cdfd000 rw-p 00000000 00:00 0 
7f185cdfd000-7f185ce20000 r-xp 00000000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185cfff000-7f185d002000 rw-p 00000000 00:00 0 
7f185d01e000-7f185d01f000 rw-p 00000000 00:00 0 
7f185d01f000-7f185d020000 r--p 00022000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185d020000-7f185d021000 rw-p 00023000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185d021000-7f185d022000 rw-p 00000000 00:00 0 
7fff1e4f6000-7fff1e517000 rw-p 00000000 00:00 0                          [stack]
7fff1e52d000-7fff1e530000 r--p 00000000 00:00 0                          [vvar]
7fff1e530000-7fff1e532000 r-xp 00000000 00:00 0                          [vdso]
ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]

等待出现after mmap之后,再次查看cat proc/28586/maps:

00400000-00401000 r-xp 00000000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap
00600000-00601000 r--p 00000000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap
00601000-00602000 rw-p 00001000 08:01 2542484                            /home/zhaoxiao/Documents/work/code/albert/linux/mmap/test_mmap

01345000-01367000 rw-p 00000000 00:00 0                                  [heap]

7f185ca34000-7f185cbf2000 r-xp 00000000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cbf2000-7f185cdf2000 ---p 001be000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf2000-7f185cdf6000 r--p 001be000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf6000-7f185cdf8000 rw-p 001c2000 08:01 1847071                    /lib/x86_64-linux-gnu/libc-2.19.so
7f185cdf8000-7f185cdfd000 rw-p 00000000 00:00 0 
7f185cdfd000-7f185ce20000 r-xp 00000000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185cfff000-7f185d002000 rw-p 00000000 00:00 0 
7f185d01d000-7f185d01e000 rw-s 00000000 00:06 108404                     /dev/mmap_driver
7f185d01e000-7f185d01f000 rw-p 00000000 00:00 0 
7f185d01f000-7f185d020000 r--p 00022000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185d020000-7f185d021000 rw-p 00023000 08:01 1847068                    /lib/x86_64-linux-gnu/ld-2.19.so
7f185d021000-7f185d022000 rw-p 00000000 00:00 0 
7fff1e4f6000-7fff1e517000 rw-p 00000000 00:00 0                          [stack]
7fff1e52d000-7fff1e530000 r--p 00000000 00:00 0                          [vvar]
7fff1e530000-7fff1e532000 r-xp 00000000 00:00 0                          [vdso]
ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]

两次对比不同之处:

01345000-01367000 rw-p 00000000 00:00 0 [heap]
7f185d01d000-7f185d01e000 rw-s 00000000 00:06 108404 /dev/mmap_driver

发布了161 篇原创文章 · 获赞 15 · 访问量 2万+

猜你喜欢

转载自blog.csdn.net/weixin_41028621/article/details/104455327
今日推荐