The principle of binder from the perspective of Linux (1)

The principle of binder from the perspective of Linux (1)

As we all know, binder is a very characteristic IPC method of Android, and it can also be said to be the most important part of the Android system. The cornerstone of the Binder system is the Binder driver. Next, let's take a look at how the Binder driver provides support at the bottom. The binder driver code is not in the source code of aosp but in the source code of Linux. In my linux-5.16.8 version, its path is /drivers/android.

First imagine what parts are needed if you implement an IPC function yourself

  1. Knowing the information of each process using IPC can be transmitted, binder is registered before use
  2. A piece of memory is used to save the data in transit
  3. Command protocol, specifying the operations represented by different protocol codes

In this way, there are only so many basic functions of ipc, and the rest is the security and optimization part, which is not very complicated. Next, let's look at the implementation of binder. If you are not familiar with Linux drivers, you can first look at Chapter 2 of "LINUX Device Drivers".

initialization

The binder is initialized by binder_init .


static int __init binder_init(void)
{
    int ret;
    char *device_name, *device_tmp;
    struct binder_device *device;
    struct hlist_node *tmp;
    char *device_names = NULL;
    
    ret = binder_alloc_shrinker_init();// 初始化 binder内存分配相关的结构
    if (ret)
        return ret;
    atomic_set(&binder_transaction_log.cur, ~0U);
    atomic_set(&binder_transaction_log_failed.cur, ~0U);
  
// 创建binder目录/binder
    binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
  
    if (binder_debugfs_dir_entry_root)
        binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                         binder_debugfs_dir_entry_root);
    if (binder_debugfs_dir_entry_root) { //这个if中间都是创建各种binder相关的的文件
        debugfs_create_file("state",
                    0444,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_state_fops);
        debugfs_create_file("stats",
                    0444,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_stats_fops);
        debugfs_create_file("transactions",
                    0444,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_transactions_fops);
        debugfs_create_file("transaction_log",
                    0444,
                    binder_debugfs_dir_entry_root,
                    &binder_transaction_log,
                    &binder_transaction_log_fops);
        debugfs_create_file("failed_transaction_log",
                    0444,
                    binder_debugfs_dir_entry_root,
                    &binder_transaction_log_failed,
                    &binder_transaction_log_fops);
    }
    if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
        strcmp(binder_devices_param, "") != 0) {
        /*
        * Copy the module_parameter string, because we don't want to
        * tokenize it in-place.
         */
        device_names = kstrdup(binder_devices_param, GFP_KERNEL);
        if (!device_names) {
            ret = -ENOMEM;
            goto err_alloc_device_names_failed;
        }
        device_tmp = device_names;
        while ((device_name = strsep(&device_tmp, ","))) {
            ret = init_binder_device(device_name); //创建binder设备
            if (ret)
                goto err_init_binder_device_failed;
        }
    }
    ret = init_binderfs();//初始化文件系统
    if (ret)
        goto err_init_binder_device_failed;
    return ret;
err_init_binder_device_failed:
    hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
        misc_deregister(&device->miscdev);
        hlist_del(&device->hlist);
        kfree(device);
    }
    kfree(device_names);
err_alloc_device_names_failed:
    debugfs_remove_recursive(binder_debugfs_dir_entry_root);
    return ret;
}

4 things are done in binder_init

  1. binder_alloc_shrinker_init initializes memory allocation related structures
  2. Create a series of files to store binder-related information
  3. init_binder_device initializes the device
  4. init_binderfs initializes the filesystem

Next, let's see what init_binder_device and init_binderfs do


static int __init init_binder_device(const char *name)
{
    int ret;
    struct binder_device *binder_device;
    binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);//分配binder_device内存
    if (!binder_device)
        return -ENOMEM;
    binder_device->miscdev.fops = &binder_fops; //将设备文件操作重定向为binder_fops
    binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
    binder_device->miscdev.name = name;
    refcount_set(&binder_device->ref, 1);
    binder_device->context.binder_context_mgr_uid = INVALID_UID;
    binder_device->context.name = name;
    mutex_init(&binder_device->context.context_mgr_node_lock);
    ret = misc_register(&binder_device->miscdev);//注册misc设备
    if (ret < 0) {
        kfree(binder_device);
        return ret;
    }
    hlist_add_head(&binder_device->hlist, &binder_devices);//将当前设备加入binder_devices列表
    return ret;
}

init_binder_device code is relatively simple

  1. Create a binder_device object and assign values ​​to properties. What needs to be paid attention to is that fops is assigned as binder_fops
  2. Register the current binder_device with the system through misc_register. At this time, the system will automatically create nodes, etc. In short, the device has been discovered by other programs. Similar to the insertion of a mouse, the binder is a virtual device without physical hardware.
  3. 将当前设备加入binder_devices列表
const struct file_operations binder_fops = {
    .owner = THIS_MODULE,
    .poll = binder_poll,
    .unlocked_ioctl = binder_ioctl,
    .compat_ioctl = compat_ptr_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release,
};

binder_fops就是对binder驱动的文件操作重定向到了自己定义的函数,和外界的交互就通过这些方法,后续的所有操作都可在这里找到。回头看看init_binderfs


int __init init_binderfs(void)
{
    int ret;
    const char *name;
    size_t len;
    /* Verify that the default binderfs device names are valid. */
    name = binder_devices_param;
    for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
        if (len > BINDERFS_MAX_NAME)
            return -E2BIG;
        name += len;
        if (*name == ',')
            name++;
    }
    /* Allocate new major number for binderfs. */
    ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
                  "binder");
    if (ret)
        return ret;
    ret = register_filesystem(&binder_fs_type);
    if (ret) {
        unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
        return ret;
    }
    return ret;
}

最重要的就一步register_filesystem,将binder文件系统注册到系统中,在前面一些版本操作都直接放在binder_init中一个长函数,现在文件操作相关的部分都放到了binderfs.c文件中。注册的直观的作用就是可以在文件路径中找到binder了,可以open,ioctl。当然这里也涉及了挂载等功能,但是和binder的特性关系不大就不展开了。

打开设备文件

一个进程在使用Binder进程间通信机制之前,首先要调用open打开设备文件/dev/binder来获得一个文件描述符,然后才能通过这个文件描述符来和Binder驱动程序交互。open打开文件会进入系统调用,执行sys_open,在这个方法里会找到一个没有使用的fd并创建一个file,最后会执行文件系统的open操作。而binder文件系统的open又在binder_fops中指向了binder_open这个方法,所以来看下binder_open方法。


static int binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc, *itr;
    struct binder_device *binder_dev;
    struct binderfs_info *info;
    struct dentry *binder_binderfs_dir_entry_proc = NULL;
    bool existing_pid = false;
    binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
             current->group_leader->pid, current->pid);
    proc = kzalloc(sizeof(*proc), GFP_KERNEL);//分配binder_proc内存
    if (proc == NULL)
        return -ENOMEM;
  //初始化两个锁
    spin_lock_init(&proc->inner_lock);
    spin_lock_init(&proc->outer_lock);
    get_task_struct(current->group_leader);
  //初始化proc信息
    proc->tsk = current->group_leader;
    proc->cred = get_cred(filp->f_cred);
    INIT_LIST_HEAD(&proc->todo); //初始化todo队列
    init_waitqueue_head(&proc->freeze_wait);
    proc->default_priority = task_nice(current);
    /* binderfs stashes devices in i_private */
    if (is_binderfs_device(nodp)) {
        binder_dev = nodp->i_private;
        info = nodp->i_sb->s_fs_info;
        binder_binderfs_dir_entry_proc = info->proc_log_dir;
    } else {
        binder_dev = container_of(filp->private_data,
                      struct binder_device, miscdev);
    }
    refcount_inc(&binder_dev->ref);
    proc->context = &binder_dev->context;
    binder_alloc_init(&proc->alloc);
    binder_stats_created(BINDER_STAT_PROC);
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    INIT_LIST_HEAD(&proc->waiting_threads);
    filp->private_data = proc; //把bind_proc保存到filp的private_data中
    mutex_lock(&binder_procs_lock);
    hlist_for_each_entry(itr, &binder_procs, proc_node) {
        if (itr->pid == proc->pid) {
            existing_pid = true;
            break;
        }
    }
    hlist_add_head(&proc->proc_node, &binder_procs);//添加到全局列表binder_procs中
    mutex_unlock(&binder_procs_lock);
    ...
    return 0;
}

总结来说重要的就2件事

  1. 创建binder_proc。初始化binder_proc中的各种变量,这是binder系统中最重要的结构之一,每个使用binder的进程都会对应一个binder_proc。
  2. 保存binder_proc。首先将binder_proc和file绑定起来,后面就能通过fd来找到这个binder_proc了,然后将binder_proc添加到全局的binder_procs这个hash表中

分配内存

分配内存的系统调用是mmap,在binder中会到binder_mmap。为进程分配内核缓冲区,以便它可以用来传输进程间通信数据。


static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    struct binder_proc *proc = filp->private_data;//从filp拿到proc,open的时候存的
    //...
  //交验内存区域的权限
    if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
        pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
               proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
        return -EPERM;
    }
    vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
    vma->vm_flags &= ~VM_MAYWRITE;
    vma->vm_ops = &binder_vm_ops;
    vma->vm_private_data = proc;
    return binder_alloc_mmap_handler(&proc->alloc, vma);//分配内存
}

参数filp是对应的文件结构,在open的时候把binder_proc存到了filp的private_data,在mmap中重新取了出来。vma是虚拟内存这个函数的作用就是拿到proc然后初始化vma的属性,具体的操作在binder_alloc_mmap_handler中。


//去除了一些异常处理方便查看
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                  struct vm_area_struct *vma)
{
    int ret;
    const char *failure_string;
    struct binder_buffer *buffer;
    alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
                   SZ_4M);//设置buffer大小,限制最大为4m
    mutex_unlock(&binder_alloc_mmap_lock);
    alloc->buffer = (void __user *)vma->vm_start;
    alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                   sizeof(alloc->pages[0]),
                   GFP_KERNEL); //创建物理页面结构体指针数组
    
    buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);//分配buffer内存
    buffer->user_data = alloc->buffer; //将指向用户进程内核虚拟空间的地址设置为当前mmap的内核空间地址
    list_add(&buffer->entry, &alloc->buffers);//将buffer加入到proc的buffers队列中
    buffer->free = 1;
    binder_insert_free_buffer(alloc, buffer);//将buffer添加到空闲红黑树中
    alloc->free_async_space = alloc->buffer_size / 2;//将异步事务的buffer_size设置为整个buffer的一半
    binder_alloc_set_vma(alloc, vma);
    mmgrab(alloc->vma_vm_mm);
    return 0;
}

binder_alloc_mmap_handler函数中解答了一个面试中很常见的问题,那就是为什么binder通讯有大小限制,在buffer_size设置的时候做了判断,超过4m就按照4m来截断了。这个函数的核心功能就是创建binder_buffer 这个结构体表示的就是驱动中的缓存,同时会给他分配表示物理内存的页的数量的数组表示实际占用的物理内存。

内存分配完了肯定需要管理,在binder_alloc_mmap_handler中出现了binder_insert_free_buffer将buffer插入到空闲buffer树中。在binder驱动中有两个不同的红黑树,free_buffers和allocated_buffers。需要使用缓存的时候就会将free_buff中拿到,分配物理内存然后添加到allocated_buffers中,当使用完毕之后也就释放内存,重新回到free_buff中。

总结

通过init,open,mmap使用binder的环境已经准备好了,后面只需要通过ioctl和binder交互就能完成通讯了。

参考

《Android系统源代码情景分析》

Guess you like

Origin juejin.im/post/7118368988005924900