您的位置:首页 > 移动开发 > Android开发

ServiceManager的启动过程分析

2016-02-20 16:18 567 查看

ServiceManager的启动过程分析

ServiceManager的启动入口

Service_manager进程的启动位置位于:/cmds/servicemanager/Service_manager.c

int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;

bs = binder_open(128*1024);

if (binder_become_context_manager(bs)) {
LOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}

svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}


可以看到ServiceManager启动后,先是打开了binder设备文件,然后

执行binder_become_context_manager,将自己设置成为上下文管理者

的角色,即服务管理者,随后进入binder_loop,进入无限循环中,等待客户端发起请求。

1. 打开Binder驱动文件,初始化

代码位于/cmds/servicemanager/Binder.c中

struct binder_state *binder_open(unsigned mapsize)
{
struct binder_state *bs;

bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return 0;
}

bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}

bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}

/* TODO: check version */

return bs;

fail_map:
close(bs->fd);
fail_open:
free(bs);
return 0;
}


执行open方法,打开设备驱动文件,同时将文件mmap映射到内存中。由于open的api方法会执行系统调用open方法,系统调用的open方法,最终会指定到binder驱动程序中的binder_open方法,所以进入到drivers/staging/android/Binder.c查看binder_open方法的实现。

static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;

binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);

proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
mutex_lock(&binder_lock);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
mutex_unlock(&binder_lock);

if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
}

return 0;
}


这个函数的主要工作初始化binder_proc结构体,该结构体保存着当前进程(ServiceManager进程)

信息,以及任务队列等。同时将binder_proc信息保存在file->private_data中。(在Linux系统

中一个文件由一个inode结构体表示,但是一个inode结构体可以包含在多个file结构体中,file结

构体是和进程相关的)。binder_open的方法执行完后,紧接着调用了mmap方法,同理,最终也会

执行到binder_mmap的方法中。

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;

if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;

binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));

if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}

area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;

#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
if (proc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
proc->buffer_size = vma->vm_end - vma->vm_start;

vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;

if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;

/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
proc-&
11686
gt;pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;

err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
err_bad_arg:
printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}


binder_mmap的主要作用就是在进程中分配虚拟地址空间,同时将虚拟地址空间的地址保存在

binder_proc中,为以后binder驱动将拷贝在物理地址的数据映射到该进程对应的虚拟空间中。

到此处,Service_manager与binder驱动的联系就建立起来了。接下来回到Service_manager的

main方法中。接下来ServiceManager把自己设置为管理者,即binder描述符为0的特殊binder,

这样任何一个进程都可以通过一个为0的binder描述符来进行与ServiceManager通信。

2. 向Binder驱动发送指令,成为管理者

binder驱动给我们提供了IPC通信的基础,或者叫所谓的载体,但是仅有载体还是不够的,为了使

双方的通信能够畅通无歧义的进行,所以需要约定好一个协议,大家都准守这个协议,这样大家才

能够理解对方的意图,完成任务。在binder驱动程序中提供了方法**ioctl(struct file *filp

, unsigned int cmd, unsigned long arg)**,解析cmd代表的指定代号,同时后面跟着arg,表

示该指定需要的数据指针。

Service_manager的main方法中打开了binder设备驱动和做完相应的初始化mmap后,执行了

binder_become_context_manager方法,将自己置为管理者的角色告诉驱动程序。

int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}


该方法调动ioctl,传入指定为BINDER_SET_CONTEXT_MGR,由于只是向驱动程序设置信息,所以

不需要额外的数据,故第三个参数传递0。解析来进入到binder驱动的实现binder_ioctl方法中,

看一看binder驱动是如何解析该指令的。binder_ioctl的实现比较长,先截取需要的部分。

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;

/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/

ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;

mutex_lock(&binder_lock);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}

switch (cmd) {
case BINDER_SET_CONTEXT_MGR:
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
} else
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
}

return ret;
}


首先从filp中的pricate_data找到请求进程对应的binder_proc对象(这里的对象是java的说法,

实际上binder_proc是c语言中的结构体的实体,与java中的类的实例对象类似,暂且叫它对象吧)

binder_proc是在打开binder驱动程序时,构建好的。binder_proc保存的是进程相关的信息,所以

他也应该保存进程中的线程信息,线程才是cpu工作的基本单位。线程信息保存在binder_proc.threads

中,这是一个红黑树。接下来通过binder_get_thread从binder_proc中拿到已保存的或者创建一

个新的binder_thread对象并保存。到目前为止binder驱动已经掌握了调用者的进程以及线程信息,

并且对相应的线程信息做了缓存。

接下来解析指定,这里的指定cmd为Service_manager传递进来的BINDER_SET_CONTEXT_MGR

因为此时的binder_context_mgr_node并没有数据,所以驱动程序通过**binder_new_node(proc,

NULL,NULL)**来创建一个binder_node对象,并将其保存在binder_context_mgr_node中。并且将其的

引用数增加,防止被释放。接下来看一下binder_new_node的实现。

static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr,
void __user *cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;

while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);

if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}

node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p created\n",
proc->pid, current->pid, node->debug_id,
node->ptr, node->cookie);
return node;
}


binder_node是binder驱动用来管理IPC之间通信的基本单位。进程间通信最终就是通过寻找到对应的binder_node节点,根据binder_node里维护的进程相关信息以及任务队列来唤醒目标进程,发起任务。与binder_node关联紧密的另一个结构体为binder_ref,顾名思义,它的作用就是用来描述binder_node的一个引用对象。binder_ref.desc类似于网路请求中的ip地址。所以客户端只需要知道binder_ref.desc的值就可以通过binder驱动找到对应的binder_node,从而发起IPC。

3. 进入Loop,等待请求到来

Service_manager向Binder驱动设置完自己成为管理者后,随后进入了binder_loop(bs, svcmgr_handler); 进入无限循环,等待请求的到来。svcmgr_handler为响应处理回调。

void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];

bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;

readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));

for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;

res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

if (res < 0) {
LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}

res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
LOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}


binder_loop内部也是通过ioctl向binder驱动发送指令。首先执行binder_write ,向binder驱动写入信息。binder_write 函数的执行过程如下:

int binder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}


这次发送的指定为BINDER_WRITE_READ ,该指令对应的数据结构为struct binder_write_read ,接下来先来看一下struct binder_write_read 的结构。定义在文件drivers/staging/android/Binder.c

struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long   write_buffer;
signed long read_size;  /* bytes to read */
signed long read_consumed;  /* bytes consumed by driver */
unsigned long   read_buffer;
};


binder_write_read中write_buffer和read_buffer保存着读写数据的内存地址(用户空间)。

所以,这时候调用ioctl方法传递的参数args为binder_write_read,此时的binder_write_read 对象的write_buffer里面存储的是BC_ENTER_LOOP,cmd为BINDER_WRITE_READ 。接下来进入到binder驱动程序实现里的binder_ioctl中。

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;

/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/

ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;

mutex_lock(&binder_lock);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}

switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
bwr.read_size, bwr.read_buffer);

if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
}
ret = 0;
return ret;
}


binder_ioctl的是switch前的部分,前面已经分析过

1. 主要是获得当前进程所对应的[b]binder_procbinder_thread对象,此时传递进的cmd为BINDER_WRITE_READ ,故进入BINDER_WRITE_READ分支。[/b]

2. 这里首先将传递进来的用户空间的[b]binder_write_read对象拷贝到binder驱动所在的内核空间,用bwr表述。[/b]

3. [b]BINDER_WRITE_READ的过程先执行binder_thread_write向目标进程请求服务,随后执行binder_thread_read,取出服务的结果。此时的bwr.read_size>0,执行binder_thread_write。[/b]

int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;

while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"binder: %d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("binder: %d:%d ERROR:"
" BC_ENTER_LOOPER called after "
"BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
}
*consumed = ptr - buffer;
}
return 0;
}


首先读出用户空间的bwr.write_buffer等待消费的第一个数据(约定第一个数据代表binder_thread_write的指令)。(get_user方法是linux系统方法,简单地从用户空间拷贝将一个简单的数据类型int写入到linux内核空间)此时的cmd为[b]BC_ENTER_LOOP。在分支 case BC_ENTER_LOOP中,将binder_thread对象的looper置为BINDER_LOOPER_STATE_ENTERED。由于传递进来的binder_write_read.writer_buffer只有这一个指定,所以到此为止。binder_thread_write执行完毕。[/b]

4. 由于bwr.read_size在此时为0,并没有数据需要读取,所以此时就跳过[b]binder_thread_read方法。[/b]

5. 最后将信息将内核空间的binder_write_read信息写入到进程的用户空间中。

到此为止,binder驱动的ioctl完成了对cmd为BINDER_WRITE_READ的处理。大致流程为读取用户空间的[b]binder_write_read对象,执行binder_thread_write处理请求的内容,接下来执行binder_thread_read将请求的结果写入到binder_write_read.read_buffer所代表的地址空间中,最后将更新后的binder_write_read对象更新到用户空间。用户空间就可以从binder_writer_read中读取到处理的结果。在binder驱动处理BINDER_WRITE_READ指令过程中,binder_write_read在整个过程中充当着索引的作用。[/b]

Service_manager中的binder_loop就已经执行完了binder_write方法,已经成功告诉了binder驱动程序,当前线程进入了loop中。现在回到Service_manager的binder_loop方法中。

void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];

bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;

readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));

for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;

res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

if (res < 0) {
LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}

res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
LOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}


进入了无限的for循环中,不断地通过ioctl与binder驱动进行交互。此时向binder驱动程序发送的cmd为BINDER_WRITE_READ,进入到binder驱动的BINDER_WRITE_READ分支后,由于binder_write_read的write_size为0,没有为消费的write_buffer数据,故跳过binder_thread_write。由于binder_write_read据read_buffer第一个数据为BC_ENTER_LOOPER,执行到binder_thread_read中。接下来看一看binder驱动中binder_thread_read函数的执行过程。

static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void  __user *buffer, int size,
signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;

int ret = 0;
int wait_for_proc_work;

if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}

retry:
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);

if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (ptr == end)
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
thread->return_error = BR_OK;
goto done;
}

thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
mutex_unlock(&binder_lock);
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("binder: %d:%d ERROR: Thread waiting "
"for process work before calling BC_REGISTER_"
"LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
}

//暂时省略无关的代码
}


代码第一部分,因为consumed为0,所以使用系统方法想用户空间的read_buffer写入BR_NOOP标志。

if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}


检查当前是否有任务需要执行,如果当前的线程没有则需等待任务,此时wait_for_proc_work为真。需要的进程上等待

wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);


由于随后进入等待状态。wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));监控当前的binder_proc->wait队列

if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("binder: %d:%d ERROR: Thread waiting "
"for process work before calling BC_REGISTER_"
"LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
}


此时Service_manager进入等待状态,等待客户端发起请求。客户端发起请求后,Service_manager被唤醒后接下里执行的动作暂且不分析。到此时,整个IPC通信的第二个过程已经准备完毕了。Service_manager成功的成为了管理者的角色,并且Service_manager进入了循环等待的状态,随时响应客户端的请求。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  android binder ipc SM