⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 binder.c

📁 for binder and can be set as any mode
💻 C
📖 第 1 页 / 共 5 页
字号:
/* drivers/android/binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * */#include <asm/cacheflush.h>#include <linux/binder.h>#include <linux/file.h>#include <linux/fs.h>#include <linux/list.h>#include <linux/miscdevice.h>#include <linux/mm.h>#include <linux/module.h>#include <linux/mutex.h>#include <linux/nsproxy.h>#include <linux/poll.h>#include <linux/proc_fs.h>#include <linux/rbtree.h>#include <linux/sched.h>#include <linux/uaccess.h>#include <linux/vmalloc.h>static DEFINE_MUTEX(binder_lock);static HLIST_HEAD(binder_procs);static struct binder_node *binder_context_mgr_node;static uid_t binder_context_mgr_uid = -1;static int binder_last_id;static struct proc_dir_entry *binder_proc_dir_entry_root;static struct proc_dir_entry *binder_proc_dir_entry_proc;static struct hlist_head binder_dead_nodes;static int binder_read_proc_proc(	char *page, char **start, off_t off, int count, int *eof, void *data);/* This is only defined in include/asm-arm/sizes.h */#ifndef SZ_1K#define SZ_1K                               0x400#endif#ifndef SZ_4M#define SZ_4M                               0x400000#endif#ifndef __i386__#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE | VM_EXEC)#else#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)#endif#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)enum {	BINDER_DEBUG_USER_ERROR             = 1U << 0,	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,	BINDER_DEBUG_READ_WRITE             = 1U << 6,	BINDER_DEBUG_USER_REFS              = 1U << 7,	BINDER_DEBUG_THREADS                = 1U << 8,	BINDER_DEBUG_TRANSACTION            = 1U << 9,	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,};static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO)static int binder_debug_no_lock;module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO)static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);static int binder_stop_on_user_error;static int binder_set_stop_on_user_error(	const char *val, struct kernel_param *kp){	int ret;	ret = param_set_int(val, kp);	if (binder_stop_on_user_error < 2)		wake_up(&binder_user_error_wait);	return ret;}module_param_call(stop_on_user_error, binder_set_stop_on_user_error,	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);#define binder_user_error(x...) \	do { \		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \			printk(KERN_INFO x); \		if (binder_stop_on_user_error) \			binder_stop_on_user_error = 2; \	} while (0)enum {	BINDER_STAT_PROC,	BINDER_STAT_THREAD,	BINDER_STAT_NODE,	BINDER_STAT_REF,	BINDER_STAT_DEATH,	BINDER_STAT_TRANSACTION,	BINDER_STAT_TRANSACTION_COMPLETE,	BINDER_STAT_COUNT};struct binder_stats {	int br[_IOC_NR(BR_FAILED_REPLY) + 1];	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];	int obj_created[BINDER_STAT_COUNT];	int obj_deleted[BINDER_STAT_COUNT];};static struct binder_stats binder_stats;struct binder_transaction_log_entry {	int debug_id;	int call_type;	int from_proc;	int from_thread;	int target_handle;	int to_proc;	int to_thread;	int to_node;	int data_size;	int offsets_size;};struct binder_transaction_log {	int next;	int full;	struct binder_transaction_log_entry entry[32];};struct binder_transaction_log binder_transaction_log;struct binder_transaction_log binder_transaction_log_failed;static struct binder_transaction_log_entry *binder_transaction_log_add(	struct binder_transaction_log *log){	struct binder_transaction_log_entry *e;	e = &log->entry[log->next];	memset(e, 0, sizeof(*e));	log->next++;	if (log->next == ARRAY_SIZE(log->entry)) {		log->next = 0;		log->full = 1;	}	return e;}struct binder_work {	struct list_head entry;	enum {		BINDER_WORK_TRANSACTION = 1,		BINDER_WORK_TRANSACTION_COMPLETE,		BINDER_WORK_NODE,		BINDER_WORK_DEAD_BINDER,		BINDER_WORK_DEAD_BINDER_AND_CLEAR,		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,	} type;};struct binder_node {	int debug_id;	struct binder_work work;	union {		struct rb_node rb_node;		struct hlist_node dead_node;	};	struct binder_proc *proc;	struct hlist_head refs;	int internal_strong_refs;	int local_weak_refs;	int local_strong_refs;	void __user *ptr;	void __user *cookie;	unsigned has_strong_ref : 1;	unsigned pending_strong_ref : 1;	unsigned has_weak_ref : 1;	unsigned pending_weak_ref : 1;	unsigned has_async_transaction : 1;	unsigned accept_fds : 1;	int min_priority : 8;	struct list_head async_todo;};struct binder_ref_death {	struct binder_work work;	void __user *cookie;};struct binder_ref {	/* Lookups needed: */	/*   node + proc => ref (transaction) */	/*   desc + proc => ref (transaction, inc/dec ref) */	/*   node => refs + procs (proc exit) */	int debug_id;	struct rb_node rb_node_desc;	struct rb_node rb_node_node;	struct hlist_node node_entry;	struct binder_proc *proc;	struct binder_node *node;	uint32_t desc;	int strong;	int weak;	struct binder_ref_death *death;};struct binder_buffer {	struct list_head entry; /* free and allocated entries by addesss */	struct rb_node rb_node; /* free entry by size or allocated entry */				/* by address */	unsigned free : 1;	unsigned allow_user_free : 1;	unsigned async_transaction : 1;	unsigned debug_id : 29;	struct binder_transaction *transaction;	struct binder_node *target_node;	size_t data_size;	size_t offsets_size;	uint8_t data[0];};struct binder_proc {	struct hlist_node proc_node;	struct rb_root threads;	struct rb_root nodes;	struct rb_root refs_by_desc;	struct rb_root refs_by_node;	int pid;	struct vm_area_struct *vma;	struct task_struct *tsk;	void *buffer;	size_t user_buffer_offset;	struct list_head buffers;	struct rb_root free_buffers;	struct rb_root allocated_buffers;	size_t free_async_space;	struct page **pages;	size_t buffer_size;	uint32_t buffer_free;	struct list_head todo;	wait_queue_head_t wait;	struct binder_stats stats;	struct list_head delivered_death;	int max_threads;	int requested_threads;	int requested_threads_started;	int ready_threads;	long default_priority;};enum {	BINDER_LOOPER_STATE_REGISTERED  = 0x01,	BINDER_LOOPER_STATE_ENTERED     = 0x02,	BINDER_LOOPER_STATE_EXITED      = 0x04,	BINDER_LOOPER_STATE_INVALID     = 0x08,	BINDER_LOOPER_STATE_WAITING     = 0x10,	BINDER_LOOPER_STATE_NEED_RETURN = 0x20};struct binder_thread {	struct binder_proc *proc;	struct rb_node rb_node;	int pid;	int looper;	struct binder_transaction *transaction_stack;	struct list_head todo;	uint32_t return_error; /* Write failed, return error code in read buf */	uint32_t return_error2; /* Write failed, return error code in read */		/* buffer. Used when sending a reply to a dead process that */		/* we are also waiting on */	wait_queue_head_t wait;	struct binder_stats stats;};struct binder_transaction {	int debug_id;	struct binder_work work;	struct binder_thread *from;	struct binder_transaction *from_parent;	struct binder_proc *to_proc;	struct binder_thread *to_thread;	struct binder_transaction *to_parent;	unsigned need_reply : 1;	/*unsigned is_dead : 1;*/ /* not used at the moment */	struct binder_buffer *buffer;	unsigned int	code;	unsigned int	flags;	long	priority;	long	saved_priority;	uid_t	sender_euid;};/* * copied from get_unused_fd_flags */int task_get_unused_fd_flags(struct task_struct *tsk, int flags){	struct files_struct *files = get_files_struct(tsk);	int fd, error;	struct fdtable *fdt;	unsigned long rlim_cur;	if (files == NULL)		return -ESRCH;	error = -EMFILE;	spin_lock(&files->file_lock);repeat:	fdt = files_fdtable(files);	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,				files->next_fd);	/*	 * N.B. For clone tasks sharing a files structure, this test	 * will limit the total number of files that can be opened.	 */	rcu_read_lock();	if (tsk->signal)		rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;	else		rlim_cur = 0;	rcu_read_unlock();	if (fd >= rlim_cur)		goto out;	/* Do we need to expand the fd array or fd set?  */	error = expand_files(files, fd);	if (error < 0)		goto out;	if (error) {		/*		 * If we needed to expand the fs array we		 * might have blocked - try again.		 */		error = -EMFILE;		goto repeat;	}	FD_SET(fd, fdt->open_fds);	if (flags & O_CLOEXEC)		FD_SET(fd, fdt->close_on_exec);	else		FD_CLR(fd, fdt->close_on_exec);	files->next_fd = fd + 1;#if 1	/* Sanity check */	if (fdt->fd[fd] != NULL) {		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);		fdt->fd[fd] = NULL;	}#endif	error = fd;out:	spin_unlock(&files->file_lock);	put_files_struct(files);	return error;}/* * copied from fd_install */static void task_fd_install(	struct task_struct *tsk, unsigned int fd, struct file *file){	struct files_struct *files = get_files_struct(tsk);	struct fdtable *fdt;	if (files == NULL)		return;	spin_lock(&files->file_lock);	fdt = files_fdtable(files);	BUG_ON(fdt->fd[fd] != NULL);	rcu_assign_pointer(fdt->fd[fd], file);	spin_unlock(&files->file_lock);	put_files_struct(files);}/* * copied from __put_unused_fd in open.c */static void __put_unused_fd(struct files_struct *files, unsigned int fd){	struct fdtable *fdt = files_fdtable(files);	__FD_CLR(fd, fdt->open_fds);	if (fd < files->next_fd)		files->next_fd = fd;}/* * copied from sys_close */static long task_close_fd(struct task_struct *tsk, unsigned int fd){	struct file *filp;	struct files_struct *files = get_files_struct(tsk);	struct fdtable *fdt;	int retval;	if (files == NULL)		return -ESRCH;	spin_lock(&files->file_lock);	fdt = files_fdtable(files);	if (fd >= fdt->max_fds)		goto out_unlock;	filp = fdt->fd[fd];	if (!filp)		goto out_unlock;	rcu_assign_pointer(fdt->fd[fd], NULL);	FD_CLR(fd, fdt->close_on_exec);	__put_unused_fd(files, fd);	spin_unlock(&files->file_lock);	retval = filp_close(filp, files);	/* can't restart close syscall because file table entry was cleared */	if (unlikely(retval == -ERESTARTSYS ||		     retval == -ERESTARTNOINTR ||		     retval == -ERESTARTNOHAND ||		     retval == -ERESTART_RESTARTBLOCK))		retval = -EINTR;	put_files_struct(files);	return retval;out_unlock:	spin_unlock(&files->file_lock);	put_files_struct(files);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -