Binder (4) - ServiceManager 启动流程

Posted by Jfson on 2017-08-05

纵观Binder通讯过程,无不在跟ServiceManager打交道,了解ServiceManager 交互流程就显得很有必要了。ServiceManager分为启动和、获取、添加、注册服务。首先从启动过程来了解,ServiceManager如何成为Binder守护进程。

1
2
3
4
5
6
源码路径
android/system/core/rootdir/init.rc
android/frameworks/native/cmds/servicemanager/
|-- service_manager.c
|-- binder.c
Binder内核

1 启动ServiceManager

  • 1.Android在init进程启动以后,通过脚本init.rc,启动ServiceManager:

|– init.rc

1
2
3
4
5
6
7
8
9
10
service servicemanager /system/bin/servicemanager
class core
user system
group system
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart media
onrestart restart surfaceflinger
onrestart restart drm

对应执行程序/system/bin/servicemanager,源码service_manager.c。下一步追踪一下service_manager.c的main()入口程序。

2.1 main

|– service_manager.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
int main(int argc, char **argv)
{
struct binder_state *bs;
// 打开Binder驱动..并申请128字节内存
bs = binder_open(128*1024);
if (!bs) {
ALOGE("failed to open binder driver\n");
return -1;
}
// 变成 Context Manager
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
// 判断 selinux 权限能否使用
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
selinux_status_open(true);
if (selinux_enabled > 0) {
if (sehandle == NULL) {
// 获取sehandle 权限失败
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
// 获取service_manager 上下文失败
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
}
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
// 进入 binder 循环
binder_loop(bs, svcmgr_handler);
return 0;
}
  • mian 入口函数一共处理了三件事:
    • 1.打开Binder驱动并申请内存
    • 2.告诉Binder驱动成为Binder的上下文管理者(ServiceManager成为守护进程)
    • 3.开启循环,处理IPC请求(等待Client请求)

2.2 binder_open

在ServiceManaer 的main入口第一步看到打开Binder驱动,调用了binder_open。binder_open又做了哪些具体的工作呢?

|– binder.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs; // 初始化状态参数
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//打开 Binder 设备驱动
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open; //打开失败
}
//系统调用 ioctl获取binder版本信息
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;//内核空间与用户空间的binder不是同一版本
}
bs->mapsize = mapsize;
// 系统调用,mmap内存映射,必须是page的整数倍
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map; // 映射失败
}
return bs;
fail_map: // 映射失败关闭
close(bs->fd);
fail_open: // 打开驱动失败
free(bs);
return NULL;
}
// 初始化状态参数
struct binder_state
{
int fd; // binder_open中 /dev/binder的描述
void *mapped; // 指向mmap映射地址
size_t mapsize;// 分配内存大小 默认128字节
};
  • 打开Binder驱动相关操作:
    • 初始化状态参数 binder_state
    • 通过open打开Binder驱动
    • 通过ioctl()校验内核空间Binder与用户空间Binder是否版本一致
    • 映射mmap内存

2.3 binder_become_context_manager

  • 在ServiceManaer 的main入口第一步看到打开Binder驱动,调用了binder_open。随后执行了binder_become_context_manager。使Binder成为上下文的管理者,具体的操作解读一下源码来看看

|– binder.c

1
2
3
4
5
int binder_become_context_manager(struct binder_state *bs)
{
//通过ioctl,传递BINDER_SET_CONTEXT_MGR指令
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
  • 1.这里直接调用系统ioctl()方法。由于没找到kernel/drivers/android/binder.c源码文件。这里主要梳理一下调用逻辑好了
    1. ioctl() 后会回调binder_ioctl(),根据BINDER_SET_CONTEXT_MGR参数,最终调用binder_ioctl_set_ctx_mgr(),过程中会持有binder_main_lock。
  • 3.binder_ioctl_set_ctx_mgr中首先保证只创建一次mgr_node对象,并将当前线程euid作为Service Manager的uid。在最后通过binder_new_node创建ServiceManager类
  • 4.binder_new_node 中创建binder_node给新创建对象分配内存空间,同时将新创建的node对象添加到proc红黑树;最后init两个队列:async_todo和binder_work。

2.4 binder_loop

在执行了binder_become_context_manager之后,调用了binder_loop开启循环,处理IPC请求。查看其源码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//将BC_ENTER_LOOPER命令传递给binder驱动,使Service Manager进入循环
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//进入循环,不断地binder读写过程
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 解析binder信息
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}

主要操作:

  • binder_write通过ioctl()将BC_ENTER_LOOPER命令发送给binder驱动,ServiceManager进入循环
  • 进入循环,不断读写
  • 读写需要进行 binder_parse 解析

2.4.1 binder_write

  • binder_loop 中将BC_ENTER_LOOPER命令发送给binder驱动,ServiceManager进入循环

|– binder.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;//这里data为BC_ENTER_LOOPER
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
  • 这里其实将传递过来的BC_ENTER_LOOPER通过ioctl()调用回调给binder_ioctl

2.4.2 binder_ioctl

  • binder_ioctl中通过binder_ioctl_write_read将用户空间的binder_write_read结构体拷贝到内核空间.
  • 如果层次是有缓存数据,通过binder_thread_write,从bwr.write_buffer拿出cmd数据,设置线程的looper状态为BINDER_LOOPER_STATE_ENTERED

2.5 binder_parse

  • 在循环读写过程中,对相应的binder进行解析,查看binder_parse源码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
#if TRACE
fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
ptr += sizeof(struct binder_ptr_cookie);
break;
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
case BR_DEAD_BINDER: {
struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
ptr += sizeof(binder_uintptr_t);
death->func(bs, death->ptr);
break;
}
case BR_FAILED_REPLY:
r = -1;
break;
case BR_DEAD_REPLY:
r = -1;
break;
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
  • 参考ServiceManager中开启循环时的调用, binder_loop(bs, svcmgr_handler)
  • ptr 指向BC_ENTER_LOOPER,func指向svcmgr_handler;

2.6 svcmgr_handler

|– service_manager.c

继续查看ServiceManager中的svcmgr_handler源码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//ALOGI("target=%p code=%d pid=%d uid=%d\n",
// (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE: // 获取服务
case SVC_MGR_CHECK_SERVICE: //查找服务
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE: // 查找服务
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: { // 列举服务list
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
  • 该方法提供了查询服务,添加注册服务,列举服务list功能。

总结

ServiceManager通过init.rc脚本启动成为Android 进程间通信机制Binder的守护进程的过程:

  • 1.打开Binder驱动 /dev/binder文件:open(“/dev/binder”, O_RDWR)
  • 2.申请128k内存随后并建立映射: mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0)
  • 3.告诉Binder驱动成为Binder的上下文管理者(ServiceManager成为其守护进程):binder_become_context_manager(struct binder_state *bs)
  • 4.开启循环,处理IPC请求(等待Client请求):binder_loop(bs, svcmgr_handler);

image


pv UV: