安卓源码解析:Binder进程间通讯机制(5)-从ServiceManager的角度分析IPC原理

目录

  1. Binder进程间通讯机制(1)-简况
  2. Binder进程间通讯机制(2)-内核空间Binder驱动详解(基础数据结构)
  3. Binder进程间通讯机制(3)-内核空间Binder驱动详解(Binder驱动内存管理)
  4. Binder进程间通讯机制(4)-内核空间Binder驱动详解(Binder驱动库 C/C++接口简介)
  5. Binder进程间通讯机制(5)-从ServiceManager的角度分析IPC原理

ServiceManager作为Binder进程间通讯的上下文管理者,负责进程间通讯组件的注册,以及提供组件的代理对象.ServiceManager本身也运行在一个独立的进程中,因此,Service组件和Client组件也需要进程间通讯调用他,通讯机制也是使用进程间通讯,所以ServiceManager也是一个特殊的Binder间进程通讯的Service组件.

ServiceManager的启动

ServiceManager由Init进程进行启动,也就是系统启动的时候进行启动.ServiceManager的初始化函数为service_manager.c的main函数. 

int main(int argc, char **argv)
{
    struct binder_state *bs;
    void *svcmgr = BINDER_SERVICE_MANAGER;

    //步骤1:开启binder连接
    bs = binder_open(128*1024);
    //步骤2:注册为binder驱动的上下文管理者
    if (binder_become_context_manager(bs)) {
        LOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    svcmgr_handle = svcmgr;
    //步骤3:开启循环等待client组件的请求
    binder_loop(bs, svcmgr_handler);
    return 0;
}
  1. main函数中先调用binder_open函数打开/dev/binder设备文件,以及将它映射到本进程的地址空间.
  2. 调用binder_become_context_manager注册为binder上下文的管理者.
  3. 调用binder_loop循环等待处理来着client组件的进程间通讯请求

binder_open函数返回的是一个结构体binder_state描述的对象bs.

struct binder_state
{
    int fd;
    void *mapped;
    unsigned mapsize;
};

binder_state中的fd为打开binder设备文件所返回的文件描述符,用来和Binder驱动进行交互.mapped和mpasize为当前serviceManager与Binder驱动进行交互的地址空间,用来Binder驱动可以为它分配内核缓冲区来保存进程间通讯的数据.

宏BINDER_SERVICE_MANAGER定义如下:

/* the one magic object */
#define BINDER_SERVICE_MANAGER ((void*) 0)

ServiceManager是一个特殊的Service组件,他对应的Binder本地对象地址值为0,对应的Binder引用对象的句柄值也为0.


int main(int argc, char **argv)
{
    struct binder_state *bs;
    void *svcmgr = BINDER_SERVICE_MANAGER;
    ....
    //将这个地址为0的值保存在全局变量svcmgr_handle中来描述一个Binder实体对象
    svcmgr_handle = svcmgr;
    return 0;
}

步骤一:binder_open打开Binder设备文件

binder_open用来打开设备文件/dev/binder,并将它映射到进程的地址空间,方便binder驱动与之交互.代码如下

//开启binder
struct binder_state *binder_open(unsigned mapsize)
{
    struct binder_state *bs;

    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return 0;
    }

    //打开设备文件/dev/binder,这时候binder驱动中的binder_open函数会被调用,他会为
    //当前进程创建一个binder_proc结构体用来描述当前进程的Binder进程间通讯状态.
    bs->fd = open("/dev/binder", O_RDWR);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",
                strerror(errno));
        goto fail_open;
    }

    bs->mapsize = mapsize;
    //映射地址空间,返回大小为传递进来的128k作为内核缓冲区地址
    //存在binder_state的mapped变量中,大小存储在mapsize中
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }

        /* TODO: check version */

    return bs;

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return 0;
}

步骤二:binder_become_context_manager注册为Binder驱动上下文管理者

binder_become_context_manager最终会调用ioctl函数并发送发送BINDER_SET_CONTEXT_MGR指令告诉binder驱动将自己注册到Binder驱动程序中.

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

ioctl函数是由binder驱动的binder_ioctl函数处理的

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    //为ServiceManager创建描述一个使用进程间通讯的进程的结构体binder_proc
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    ...
    mutex_lock(&binder_lock);
    //创建处理事务的线程结构体binder_thread,并保存在binder_proc所描述的进程中保存这个
    //结构体的成员变量threads所描述的红黑树中,这个threads为每个binder进程所持有的线程池.
    thread = binder_get_thread(proc);

    switch (cmd) {
    ...
    case BINDER_SET_CONTEXT_MGR:
        ...
        //合法性检测
        ...
        else
            binder_context_mgr_uid = current->cred->euid;
        //binder_new_node为ServiceManager创建binder实体对象
        //结构体binder_node对象binder_context_mgr_node为binder实体对象,
        //创建的binder_node结构体对象binder_context_mgr_node为全局变量,用来描述与binder进程间
        //通讯机制的上下文管理者相对的binder实体对象.
        binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
        //创建失败报错
        if (binder_context_mgr_node == NULL) {
            ret = -ENOMEM;
            goto err;
        }
        //增加ServiceManager实体对象binder_node的强弱引用计数.
        binder_context_mgr_node->local_weak_refs++;
        binder_context_mgr_node->local_strong_refs++;
        //值为1,表示已经增加了引用计数,由于在系统启动期间ServiceManager组件一直存在
        //所以他的引用计数一直为1
        binder_context_mgr_node->has_strong_ref = 1;
        binder_context_mgr_node->has_weak_ref = 1;
        break;
    ...
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    mutex_unlock(&binder_lock);
    ...
    return ret;
}

binder_new_node为ServiceManager创建Binder实体对象,代码如下:

//结构体binder_proc用来描述一个正在使用的Binder进程间通信的进程.既ServiceManager进程
//ptr和cookie用来描述binder本地对象,用来表示这个binder本地对象的地址,他们设置为null,既0,
//因为ServiceManager本地对象的地址为0
binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie)
{
    struct rb_node **p = &proc->nodes.rb_node;
    struct rb_node *parent = NULL;
    struct binder_node *node;

    ...
    //检查这个结构体binder_node所描述的binder本地对象是否已经在宿主进程创建过,既是否在binder_proc
    //成员变量nodes中存在,如果不存在走下面的新建方法
    ...
    //开辟内存空间,新建结构体binder_node,既binder实体对象
    node = kzalloc(sizeof(*node), GFP_KERNEL);
    if (node == NULL)
        return NULL;
    binder_stats.obj_created[BINDER_STAT_NODE]++;
    rb_link_node(&node->rb_node, parent, p);
    rb_insert_color(&node->rb_node, &proc->nodes);
    node->debug_id = ++binder_last_id;
    //实体对象持有ServiceManager进程
    node->proc = proc;
    //持有当前binder实体对象
    node->ptr = ptr;
    node->cookie = cookie;
    node->work.type = BINDER_WORK_NODE;
    INIT_LIST_HEAD(&node->work.entry);
    INIT_LIST_HEAD(&node->async_todo);
    ...
    return node;
}

以上就将ServiceManager注册为了Binder驱动的上下文管理者.

步骤三:binder_loop让service组件循环等待client组件的进程间通讯请求

由于ServiceManager在运行期间,需要不断为Service组件和client组件提供服务,因此,需要通过一个无限循环一直等待处理Service组件和Client组件的进程间通讯请求,既调用函数binder_loop实现.

//binder_state为binder_open创建的结构体
//func指向main函数中的svcmgr_handler,既binder实体对象,用来处理进程间通讯请求
void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    unsigned readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    //将BC_ENTER_LOOPER发送给Binder内核,告诉binder驱动将自己注册为binder线程,以便接收进程间通讯请求
    binder_write(bs, readbuf, sizeof(unsigned));

    //循环接收来自client的进程间通讯请求.
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;
        //不断调取BINDER_WRITE_READ命令判断是否有新的进程间通讯请求
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        //没有则在这里死循环等待
        if (res < 0) {
            LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        //有的话调用binder_parse来处理请求
        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
        if (res == 0) {
            LOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

binder_write函数

//binder_write通过BC_ENTER_LOOPER指令将自己注册为Binder线程,来接受来首binder驱动的处理请求
//data为封装了BC_ENTER_LOOPER指令的对象,bs为存放了Binder实体对象的地址.
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
    //创建binder_write_read结构体bwr
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;
    bwr.write_consumed = 0;
    //将data所指向的缓冲区作为他的输入缓冲区,
    bwr.write_buffer = (unsigned) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    //将当前线程注册到binder驱动
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

ioctl最终负责处理的是binder_ioctl函数,指令为BINDER_WRITE_READ

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    ...
    //获取描述当前线程的结构体binder_thread
    thread = binder_get_thread(proc);
    ...
    switch (cmd) {
    case BINDER_WRITE_READ: {
        struct binder_write_read bwr;
        if (size != sizeof(struct binder_write_read)) {
            ret = -EINVAL;
            goto err;
        }
        //将从用户空间拷贝的binder_write_read结构体拷贝出来保存在bwr中
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
            ret = -EFAULT;
            goto err;
        }
        if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
            printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
                   proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer);
        if (bwr.write_size > 0) {
            //调用binder_thread_write来处理BC_ENTER_LOOPER协议,之后返回用户空间
            ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
            if (ret < 0) {
                bwr.read_consumed = 0;
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        if (bwr.read_size > 0) {
            //检查ServiceManager进程是否有新的进程间通讯请求要处理
            ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
            printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
                   proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size);
        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
            ret = -EFAULT;
            goto err;
        }
        break;
    }
    ...
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    mutex_unlock(&binder_lock);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
    return ret;
}

binder_thread_write函数处理BC_ENTER_LOOPER指令,其中将线程状态设置为BINDER_LOOPER_STATE_ENTERED,表示该线程是一个binder线程,可以处理进程间通讯请求

int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
            void __user *buffer, int size, signed long *consumed)
{
    uint32_t cmd;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error == BR_OK) {
        ptr += sizeof(uint32_t);
        ....
        case BC_ENTER_LOOPER:
            ...
            //将线程状态设置为BINDER_LOOPER_STATE_ENTERED,表示该线程是一个binder线程,可以处理进程间通讯请求
            thread->looper |= BINDER_LOOPER_STATE_ENTERED;
            break;
            ....
        }
        *consumed = ptr - buffer;
    }
    return 0;
}

函数binder_thread_read 用来监测ServiceManager有新的进程间通讯请求要来处理.

binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
    void  __user *buffer, int size, signed long *consumed, int non_block)
{
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    ...
retry:
    //检查正在处理进程间通讯的队列todo是否为空,以及当前线程的事务堆栈是否为空
    //如果都为true wait_for_proc_work为1
    wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);

    ....

    //将当前线程状态设置为BINDER_LOOPER_STATE_WAITING,既这个线程处于空闲状态
    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    if (wait_for_proc_work)
        proc->ready_threads++;
    mutex_unlock(&binder_lock);
    //如果这个线程处于空闲状态
    if (wait_for_proc_work) {
        ...
        //non_block表示当前线程是否已非阻塞模式打开设备文件
        if (non_block) {
            //判断进程是否有未处理的工作项,内部判断todo队列是否为空,如果不为空表示有未处理的工作项
            if (!binder_has_proc_work(proc, thread))
                ret = -EAGAIN;
        } else
            //如果是阻塞模式,调用wait_event_interruptible_exclusive睡眠等待线程接收到新的工作项
            ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    } else {
        //如果线程处于非空闲状态,既当前线程的todo队列有未处理的工作项
        //是否为非阻塞状态
        if (non_block) {
            //判断当前线程是否有未处理工作项
            if (!binder_has_thread_work(thread))
                ret = -EAGAIN;
        } else
            //睡眠等待接收新的工作项
            ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
    }
    mutex_lock(&binder_lock);
    if (wait_for_proc_work)
        proc->ready_threads--;
    //如果binder驱动发现当前线程有新的任务需要处理,将状态位清空.
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
    ...

    //当前线程被唤醒调用while循环处理他的工作项
    while (1) {
        ....
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        if (binder_debug_mask & BINDER_DEBUG_THREADS)
            printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
                   proc->pid, thread->pid);
        //当binder驱动发现一个进程没有足够空闲的BInder线程执行请求时,使用这个协议通知该进程增加一个线程到Binder线程池
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
    }
    return 0;
}

以上为ServiceManager的启动过程,ServiceManager进程的线程池会一直死循环接收来自client端的进程间通讯请求,如果有就处理,没有就进入阻塞状态一直等待.


ServiceManager代理对象获取

无论是AMS还是PMS在启动的时候都要讲自身注册到ServieManager中,因为ServiceManager在一个独立的进程中,所以也要通过调用它的代理对象进程进程间通信.

以下为ServiceManager代理对象的UML图:

IServiceManager作为本地对象以及代理对象都要继承的接口,定义了getService,chectService,addService,listService方法.主要用来注册其他Service组件以及获取其他Service组件的代理对象.

对于其他Service组件,client进程需要通过binder驱动获取他的句柄值,然后根据这个句柄值创建binder代理对象,ServiceManager与这些不同,是特殊的Service组件句柄定为0,因此不需要通过Binder驱动获取,省去了这次交互过程.

其他Service组件例如AMS在启动的时候需要将自己注册到ServiceManager中,就要获取到SM的对象,这时候会调用defaultServiceManager获取SM的代理对象,方法如下

sp<IServiceManager> defaultServiceManager()
{
    //gDefaultServiceManager 为BpServiceManager对象
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;

    {
        AutoMutex _l(gDefaultServiceManagerLock);
        if (gDefaultServiceManager == NULL) {
            //创建BpServiceManager
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
        }
    }

    return gDefaultServiceManager;
}

以上是一个获取gDefaultServiceManager 单例的实现,第一次初始化会调用 gDefaultServiceManager = interface_cast(
ProcessState::self()->getContextObject(NULL));函数.

步骤一:获取ProcessState ProcessState::self()

sp<ProcessState> ProcessState::self()
{
    if (gProcess != NULL) return gProcess;

    AutoMutex _l(gProcessMutex);
    //创建ProcessState对象
    if (gProcess == NULL) gProcess = new ProcessState;
    return gProcess;
}

ProcessState构造函数

ProcessState::ProcessState()
    //open_drivcer种调用binder_open打开/dev/binder设备文件
    : mDriverFD(open_driver())
    , mVMStart(MAP_FAILED)
{
    ....
}

open_driver

static int open_driver()
{
    if (gSingleProcess) {
        return -1;
    }
    //打开设备文件并且返回文件描述符
    int fd = open("/dev/binder", O_RDWR);
    .....
        //向binder驱动发送BINDER_SET_MAX_THREADS指令,告诉当前进程可以最多创建15个线程.
        size_t maxThreads = 15;
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
       ......
    //返回文件描述符
    return fd;
}

步骤二:ProcessState::getContextObject(NULL)

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
    //检查是否支持Binder进程间通讯,既是否成功的打开了设备文件/dev/binder
    if (supportsProcesses()) {
        //创建Binder代理对象
        return getStrongProxyForHandle(0);
    } else {
        return getContextObject(String16("default"), caller);
    }
}

getStrongProxyForHandle(0);

//这里要创建的是ServiceManager的代理对象,既handle的句柄值为0
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);
    //handle_entry里面封装了binder代理对象,并保存在成员变量mHandleToObject列表中,这个列表保存了这个进程所有的
    //binder代理对象
    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        //第一次进来binder代理对象还没有创建,既e->binder为null
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            //创建Binder代理对象BpBinder
            b = new BpBinder(handle); 
            //将地址存储在handle_entry的binder变量中
            e->binder = b;
            ....
        } else {
            ....
        }
    }

    return result;
}

步骤三:封装为ServiceManager的代理对象 interface_cast(BpBinder);

代码如下:

//泛型INTERFACE类型为IServieManager
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
    //将句柄值为0的Binder代理对象封装成ServiceManager代理对象
    return INTERFACE::asInterface(obj);
}

INTERFACE::asInterface(obj); 代码如下:

//obj为句柄为0的binder代理对象BpBinder
android::sp<I##INTERFACE> I##INTERFACE::asInterface(                \
            const android::sp<android::IBinder>& obj)                   \
    {                                                                   \
        android::sp<I##INTERFACE> intr;                                 \
        if (obj != NULL) {                                              \
            intr = static_cast<I##INTERFACE*>(    
                //因为是一个代理对象,所以queryLocalInterface返回null                      
                obj->queryLocalInterface(                               \
                        I##INTERFACE::descriptor).get());               \
            if (intr == NULL) {  
                //创建BpServiceManager                                       \
                intr = new Bp##INTERFACE(obj);                          \
            }                                                           \
        }    
        //返回ServiceManager的代理对象BpServiceManager                                                           \
        return intr;                                                    \
    }                 

Service组件启动过程

Service组件在Server进程中运行,每个Service组件启动的时候会先将自己注册到ServiceManger中,接着再启动一个Binder线程池循环来等待和处理client进程的通讯请求.

当一个Server进程需要注册为Service组件的时候需要调用defaultServiceManager()->addService将自己注册到ServiceManager中,然后调用ProcessState::startThreadPool()启动Binder线程池,最后调用IPCThreadState::joinThreadPool()启动Binder线程池处理来自Client组件的进程间通讯请求.

注册Service组件 addService

getDefaultService函数获取的为BpServiceManager的对象,既ServiceManager的代理对象.再调用BpServiceManager::addService函数.

//代理對象addService
    virtual status_t addService(const String16& name, const sp<IBinder>& service)
    {
        //将进程间通讯数据封装为Parcel对象.
        Parcel data, reply;
        //写入协议头(请求头)
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        //将要传递的Service组件封装成binder结构体
        data.writeStrongBinder(service);
        //调用BpBinder 的 transact方法,并传递ADD_SERVICE_TRANSACTION指令
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }

如何将请求的数据封装为进程间通讯数据:

1,调用writeInterfaceToken写入Binder进程间通讯请求头

2.调用writeStrongBinder将要注册的Service组件封装成一个flat_binder_objcet结构体,之后传递给binder驱动程序.

Client端将数据通过BC_TRANSACTION协议发送到Binder驱动

writeStrongBinder方法如下:

//将service组件传到binder驱动
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

/*
* binder为要向binder驱动传递的对象
*/
status_t flatten_binder(const sp<ProcessState>& proc,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder();
        //因为service是BnBinder的子类,属于本地对象,所有local 不为null
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                LOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.handle = handle;
            obj.cookie = NULL;
        } else {
        //走这里
        //将flat_binder_object 结构体的 binder设置为需要添加的本地Binder对象
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = local->getWeakRefs();
            obj.cookie = local;
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = NULL;
        obj.cookie = NULL;
    }

    //将flat_binder_object结构体obj 写入到Parcel对象的out中
    return finish_flatten_binder(binder, obj, out);
}

// binder 为 要add的本地对象
//flat_binder_object 为封装了 binder本地对象的结构体
//out 封装了请求头
inline static status_t finish_flatten_binder(
    const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out)
{
//将所有参数写入到Parcel中
    return out->writeObject(flat, false);
}

Pracel的writeObject实现如下:

//val封装了进程间通讯数据的对象,参数二维false
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
         //mObjectsCapacity 记录了偏移数组mObjects的总大小
        //成员变量mData为数据缓冲区,记录了要传递的数据以及Binder对象
        //mObjcets为偏移数组,保存了mData中所有Binder对象的偏移位置,根据偏移位置就能拿到Binder对象
        //mDataPos为数据的偏移位置,用来获取写入数据的位置
    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
    //判断是否有足够的位置插入进程间通讯的数据
    if (enoughData && enoughObjects) {
restart_write:
        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;

        // Need to write meta-data?
        if (nullMetaData || val.binder != NULL) {
         //将val所描述的Binder对象写入到数据缓冲区mData和偏移数组mObjects中.
            mObjects[mObjectsSize] = mDataPos;
            acquire_object(ProcessState::self(), val, this);
            //mObjectSize记录了偏移数据mObjects下一个用来写入数据的位置
            mObjectsSize++;
        }

        // remember if it's a file descriptor
        if (val.type == BINDER_TYPE_FD) {
            mHasFds = mFdsKnown = true;
        }

        return finishWrite(sizeof(flat_binder_object));
    }

    ....

    goto restart_write;
}

以上就将数据封装到了Pracel对象中.回到addService中,然后会调用status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);方法,其实就是调用BpBinder代理对象的方法的transact方法向Binder驱动发送一个BC_TRANSACTION命令.

BpBinder的transact方法如下所示:

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
    //data 为要传递的Parcel 内部封装了要注册的Service组件, reply为输出参数用来保存进程间通信结果
    //mHandle表示要处理该进程间通讯的Binder代理对象,既句柄为0的ServiceManager代理对象
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

IPCThreadState::transact方法如下:

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ....
    //如果数据没有问题
    if (err == NO_ERROR) {
        //写入到binder驱动的命令缓冲区中
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }

    ....
    //TF_ONE_WAY为0表示同步请求
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            //循环等待结果并返回,并把结果写入到Pracel对象reply中
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
       ....
    } else {
        err = waitForResponse(NULL, NULL);
    }

    return err;
}

Server端通过BR_TRANSACTION协议拿到进程间通讯数据

前面分析过ServiceManager在启动后会开启一个循环等待处理todo队列中进入工作项.处理这个工作项的函数为binder_thread_read函数

Binder驱动的binder_thread_read函数如下:

//binder_thread_read负责处理todo队列中存在的工作项
static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
    void  __user *buffer, int size, signed long *consumed, int non_block)
{
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    ....
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;
        //如果工作队队列todo不为空,既有工作项需要处理,有的话保存在变量w中
        if (!list_empty(&thread->todo))
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        //监测当前进程的工作队列是否有工作项需要处理,有的话保存在变量w中
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        else {
            if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                goto retry;
            break;
        }

        ....

        //结构体binder_transaction表示一次进程间通讯请求
        //buffer->target_node为指向了ServieManagerBinder实体对象binder_context_mgr_node
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;
            //将实体对象拷贝到binder_transaction_data结构体tr中以便目标线程能够接收到BR_TRANSACTION指令.
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;
        } else {
            tr.target.ptr = NULL;
            tr.cookie = NULL;
            cmd = BR_REPLY;
        }
        //将binder_transaction结构体t中的数据拷贝到结构体binder_transaction_data的tr中
        //code值为ADD_SERVICE_TRANSACTION
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = t->sender_euid;

        ....
        //将数据封装到binder_transaction_data结构体tr中
        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
        tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
        //将tr以及对应的协议BR_TRANSACTION拷贝到目标线程thread提供的缓冲区中
        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);

        binder_stat_br(proc, thread, cmd);
        if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
            printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d"
                "size %zd-%zd ptr %p-%p\n",
                   proc->pid, thread->pid,
                   (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
                   t->debug_id, t->from ? t->from->proc->pid : 0,
                   t->from ? t->from->pid : 0, cmd,
                   t->buffer->data_size, t->buffer->offsets_size,
                   tr.data.ptr.buffer, tr.data.ptr.offsets);
        //将完成的工作项从todo队列中删除
        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        //如果当前命令是BR_TRANSACTION 
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
        //将工作项压入目标线程的事务堆栈中,以便目标线程处理任务
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
        }
        break;
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        if (binder_debug_mask & BINDER_DEBUG_THREADS)
            printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
                   proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
    }
    return 0;
}

static void binder_release_work(struct list_head *list)
{
    struct binder_work *w;
    while (!list_empty(list)) {
        w = list_first_entry(list, struct binder_work, entry);
        list_del_init(&w->entry);
        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            struct binder_transaction *t = container_of(w, struct binder_transaction, work);
            if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
                binder_send_failed_reply(t, BR_DEAD_REPLY);
        } break;
        case BINDER_WORK_TRANSACTION_COMPLETE: {
            kfree(w);
            binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
        } break;
        default:
            break;
        }
    }

}

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
    struct binder_thread *thread = NULL;
    struct rb_node *parent = NULL;
    struct rb_node **p = &proc->threads.rb_node;

    while (*p) {
        parent = *p;
        thread = rb_entry(parent, struct binder_thread, rb_node);

        if (current->pid < thread->pid)
            p = &(*p)->rb_left;
        else if (current->pid > thread->pid)
            p = &(*p)->rb_right;
        else
            break;
    }
    if (*p == NULL) {
        thread = kzalloc(sizeof(*thread), GFP_KERNEL);
        if (thread == NULL)
            return NULL;
        binder_stats.obj_created[BINDER_STAT_THREAD]++;
        thread->proc = proc;
        thread->pid = current->pid;
        init_waitqueue_head(&thread->wait);
        INIT_LIST_HEAD(&thread->todo);
        rb_link_node(&thread->rb_node, parent, p);
        rb_insert_color(&thread->rb_node, &proc->threads);
        thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
        thread->return_error = BR_OK;
        thread->return_error2 = BR_OK;
    }
    return thread;
}

之前说过目标线程既ServiceManager的线程会循环等待处理来自client端发到binder驱动,再由binder驱动发给SM的任务,并通过binder_parse处理.

binder_parse代码如下:

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++;
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        ....
        case BR_TRANSACTION: {
            //结构体binder_txn用来描述进程间通讯数据
            struct binder_txn *txn = (void *) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                //结构体binder_io用来指向进程间通讯的数据缓冲区
                //msg用来解析从binder驱动读取的数据
                //reply用来将进程间通讯返回的数据保存到缓冲区rdata中,以便将数据返回给binder驱动
                struct binder_io msg;
                struct binder_io reply;
                int res;
                //初始化结构体binder_io
                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                //处理msg中的BR_TRANSACTION返回协议
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data, res);
            }
            ptr += sizeof(*txn) / sizeof(uint32_t);
            break;
        }
        ...
    }

    return r;
}

func为一个函数指针,指向用来处理BR_TRANSACTION的方法svcmgr_handler

int svcmgr_handler(struct binder_state *bs,
                   struct binder_txn *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    unsigned len;
    void *ptr;
    uint32_t strict_policy;

    //检测是否是合法的进程间通讯请求
    if (txn->target != svcmgr_handle)
        return -1;

   ....
    switch(txn->code) {
   ....
    case SVC_MGR_ADD_SERVICE:
        //从binder_io结构体中取出Service组件的名字
        s = bio_get_string16(msg, &len);
        //获取binder引用对象的句柄值,这个引用对象石油BInder驱动创建的,他引用了即将要注册的Service组件
        ptr = bio_get_ref(msg);
        //将这个组件注册到ServiceManager中
        if (do_add_service(bs, s, len, ptr, txn->sender_euid))
            return -1;
        break;

   ....

    bio_put_uint32(reply, 0);
    return 0;
}

do_add_service函数如下:

//s为要注册的组件名称
//uid为要注册的Srvice进程的uid
int do_add_service(struct binder_state *bs,
                   uint16_t *s, unsigned len,
                   void *ptr, unsigned uid)
{
    struct svcinfo *si;
//    LOGI("add_service('%s',%p) uid=%d\n", str8(s), ptr, uid);
    ...
    //当前要注册的Service组件是否可以注册
    if (!svc_can_register(uid, s)) {
        LOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",
             str8(s), ptr, uid);
        return -1;
    }
    //检查当前组件是否已经注册过
    si = find_svc(s, len);
    if (si) {
        if (si->ptr) {
            LOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
    } else {
        //创建结构体svcinfo  描述即将要注册的Service组件
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            LOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = svcinfo_death;
        si->death.ptr = si;
        si->next = svclist;
        //将这个Service组件保存到svclist中,这时候这个Service组件就被注册到了ServiceManger中
        svclist = si;
    }

    binder_acquire(bs, ptr);
    binder_link_to_death(bs, ptr, &si->death);
    return 0;
}

这时候Service组件被成功注册到ServiceManager中,ServiceManager就会将成功的结果通过BC_REPLAY告诉Binder驱动,Binder驱动在调用BR_REPLAY将结果发送给请求注册的Service组件,这样Service组件就注册好了.


获取Service组件的代理对象 getService

Service代理对象的类型为BpBinder,Service组件在获取代理对象首先要通过BInder驱动获得一个引用了Service组件的Service引用对象的句柄值,然后通过这个句柄值创建Binder代理对象,再将Binder代理对象封装成Service组件的代理对象.

Client端要获取到代理对象会调用defaultServiceManager()->getService(描述Service组件的字符串) 获取到Binder代理对象,再通过XXXService::asInterface(Binder代理对象) 获取XXXService的代理对象.

BpServiceManager的getService方法如下:

 virtual sp<IBinder> getService(const String16& name) const
    {
        unsigned n;
        for (n = 0; n < 5; n++){
            //调用checkService获取名字为name的Service代理对象
            sp<IBinder> svc = checkService(name);
            if (svc != NULL) return svc;
            LOGI("Waiting for service %s...\n", String8(name).string());
            sleep(1);
        }
        return NULL;
    }


 virtual sp<IBinder> checkService( const String16& name) const
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        //BpServiceManager 命令为CHECK_SERVICE_TRANSACTION
        remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
        return reply.readStrongBinder();
    }

从上面可以知道ServiceManager的进程间通讯请求最终会调用svcmgr_handler方法:

int svcmgr_handler(struct binder_state *bs,
                   struct binder_txn *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    unsigned len;
    void *ptr;
    uint32_t strict_policy;

....

    switch(txn->code) {
    case SVC_MGR_CHECK_SERVICE:
        //
        s = bio_get_string16(msg, &len);
       //从svclist查找Service组件所对应的svcinfo结构体
       //ptr为保存了Service组件的Binder引用对象的句柄.
        ptr = do_find_service(bs, s, len);
        if (!ptr)
            break;
            //找到后将这个应用对象写入到reply中返回给binder驱动,之后返回给调用进程
        bio_put_ref(reply, ptr);
        return 0;

    ...
    }

    bio_put_uint32(reply, 0);
    return 0;
}

asInterface方法如下:

 android::sp<I##INTERFACE> I##INTERFACE::asInterface(                \
            const android::sp<android::IBinder>& obj)                   \
    {                                                                   \
        android::sp<I##INTERFACE> intr;                                 \
        if (obj != NULL) {                                              \
            intr = static_cast<I##INTERFACE*>(                          \
                obj->queryLocalInterface(                               \
                        I##INTERFACE::descriptor).get());               \
            if (intr == NULL) {      
                //创建BpXXXX既Service组建的代理对象并返回                                   \
                intr = new Bp##INTERFACE(obj);                          \
            }                                                           \
        }                                                               \
        return intr;                                                    \
    }              

这样Client端就拿到了Service组件的代理对象.

猜你喜欢

转载自blog.csdn.net/hfyd_/article/details/82079062