⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 binder.c

📁 for binder and can be set as any mode
💻 C
📖 第 1 页 / 共 5 页
字号:
	return -EBADF;}static void binder_set_nice(long nice){	long min_nice;	if (can_nice(current, nice)) {		set_user_nice(current, nice);		return;	}	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;	if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP)		printk(KERN_INFO "binder: %d: nice value %ld not allowed use "		       "%ld instead\n", current->pid, nice, min_nice);	set_user_nice(current, min_nice);	if (min_nice < 20)		return;	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);}static size_t binder_buffer_size(	struct binder_proc *proc, struct binder_buffer *buffer){	if (list_is_last(&buffer->entry, &proc->buffers))		return proc->buffer + proc->buffer_size - (void *)buffer->data;	else		return (size_t)list_entry(buffer->entry.next,			struct binder_buffer, entry) - (size_t)buffer->data;}static void binder_insert_free_buffer(	struct binder_proc *proc, struct binder_buffer *new_buffer){	struct rb_node **p = &proc->free_buffers.rb_node;	struct rb_node *parent = NULL;	struct binder_buffer *buffer;	size_t buffer_size;	size_t new_buffer_size;	BUG_ON(!new_buffer->free);	new_buffer_size = binder_buffer_size(proc, new_buffer);	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)		printk(KERN_INFO "binder: %d: add free buffer, size %d, "		       "at %p\n", proc->pid, new_buffer_size, new_buffer);	while (*p) {		parent = *p;		buffer = rb_entry(parent, struct binder_buffer, rb_node);		BUG_ON(!buffer->free);		buffer_size = binder_buffer_size(proc, buffer);		if (new_buffer_size < buffer_size)			p = &parent->rb_left;		else			p = &parent->rb_right;	}	rb_link_node(&new_buffer->rb_node, parent, p);	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);}static void binder_insert_allocated_buffer(	struct binder_proc *proc, struct binder_buffer *new_buffer){	struct rb_node **p = &proc->allocated_buffers.rb_node;	struct rb_node *parent = NULL;	struct binder_buffer *buffer;	BUG_ON(new_buffer->free);	while (*p) {		parent = *p;		buffer = rb_entry(parent, struct binder_buffer, rb_node);		BUG_ON(buffer->free);		if (new_buffer < buffer)			p = &parent->rb_left;		else if (new_buffer > buffer)			p = &parent->rb_right;		else			BUG();	}	rb_link_node(&new_buffer->rb_node, parent, p);	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);}static struct binder_buffer *binder_buffer_lookup(	struct binder_proc *proc, void __user *user_ptr){	struct rb_node *n = proc->allocated_buffers.rb_node;	struct binder_buffer *buffer;	struct binder_buffer *kern_ptr;	kern_ptr = user_ptr - proc->user_buffer_offset		- offsetof(struct binder_buffer, data);	while (n) {		buffer = rb_entry(n, struct binder_buffer, rb_node);		BUG_ON(buffer->free);		if (kern_ptr < buffer)			n = n->rb_left;		else if (kern_ptr > buffer)			n = n->rb_right;		else			return buffer;	}	return NULL;}static int binder_update_page_range(struct binder_proc *proc, int allocate,	void *start, void *end, struct vm_area_struct *vma){	void *page_addr;	unsigned long user_page_addr;	struct vm_struct tmp_area;	struct page **page;	struct mm_struct *mm;	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)		printk(KERN_INFO "binder: %d: %s pages %p-%p\n",		       proc->pid, allocate ? "allocate" : "free", start, end);	if (end <= start)		return 0;	if (vma)		mm = NULL;	else		mm = get_task_mm(proc->tsk);	if (mm) {		down_write(&mm->mmap_sem);		vma = proc->vma;	}	if (allocate == 0)		goto free_range;	if (vma == NULL) {		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "		       "map pages in userspace, no vma\n", proc->pid);		goto err_no_vma;	}	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {		int ret;		struct page **page_array_ptr;		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];		BUG_ON(*page);		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);		if (*page == NULL) {			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "			       "for page at %p\n", proc->pid, page_addr);			goto err_alloc_page_failed;		}		tmp_area.addr = page_addr;		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;		page_array_ptr = page;		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);		if (ret) {			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "			       "to map page at %p in kernel\n",			       proc->pid, page_addr);			goto err_map_kernel_failed;		}		user_page_addr = (size_t)page_addr + proc->user_buffer_offset;		ret = vm_insert_page(vma, user_page_addr, page[0]);		if (ret) {			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "			       "to map page at %lx in userspace\n",			       proc->pid, user_page_addr);			goto err_vm_insert_page_failed;		}		/* vm_insert_page does not seem to increment the refcount */	}	if (mm) {		up_write(&mm->mmap_sem);		mmput(mm);	}	return 0;free_range:	for (page_addr = end - PAGE_SIZE; page_addr >= start;	     page_addr -= PAGE_SIZE) {		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];		if (vma)			zap_page_range(vma, (size_t)page_addr +				proc->user_buffer_offset, PAGE_SIZE, NULL);err_vm_insert_page_failed:		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);err_map_kernel_failed:		__free_page(*page);		*page = NULL;err_alloc_page_failed:		;	}err_no_vma:	if (mm) {		up_write(&mm->mmap_sem);		mmput(mm);	}	return -ENOMEM;}static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,	size_t data_size, size_t offsets_size, int is_async){	struct rb_node *n = proc->free_buffers.rb_node;	struct binder_buffer *buffer;	size_t buffer_size;	struct rb_node *best_fit = NULL;	void *has_page_addr;	void *end_page_addr;	size_t size;	if (proc->vma == NULL) {		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",		       proc->pid);		return NULL;	}	size = ALIGN(data_size, sizeof(void *)) +		ALIGN(offsets_size, sizeof(void *));	if (size < data_size || size < offsets_size) {		binder_user_error("binder: %d: got transaction with invalid "			"size %d-%d\n", proc->pid, data_size, offsets_size);		return NULL;	}	if (is_async &&	    proc->free_async_space < size + sizeof(struct binder_buffer)) {		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)			printk(KERN_ERR "binder: %d: binder_alloc_buf size %d f"			       "ailed, no async space left\n", proc->pid, size);		return NULL;	}	while (n) {		buffer = rb_entry(n, struct binder_buffer, rb_node);		BUG_ON(!buffer->free);		buffer_size = binder_buffer_size(proc, buffer);		if (size < buffer_size) {			best_fit = n;			n = n->rb_left;		} else if (size > buffer_size)			n = n->rb_right;		else {			best_fit = n;			break;		}	}	if (best_fit == NULL) {		printk(KERN_ERR "binder: %d: binder_alloc_buf size %d failed, "		       "no address space\n", proc->pid, size);		return NULL;	}	if (n == NULL) {		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);		buffer_size = binder_buffer_size(proc, buffer);	}	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)		printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got buff"		       "er %p size %d\n", proc->pid, size, buffer, buffer_size);	has_page_addr =		(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK);	if (n == NULL) {		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)			buffer_size = size; /* no room for other buffers */		else			buffer_size = size + sizeof(struct binder_buffer);	}	end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size);	if (end_page_addr > has_page_addr)		end_page_addr = has_page_addr;	if (binder_update_page_range(proc, 1,	    (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL))		return NULL;	rb_erase(best_fit, &proc->free_buffers);	buffer->free = 0;	binder_insert_allocated_buffer(proc, buffer);	if (buffer_size != size) {		struct binder_buffer *new_buffer = (void *)buffer->data + size;		list_add(&new_buffer->entry, &buffer->entry);		new_buffer->free = 1;		binder_insert_free_buffer(proc, new_buffer);	}	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)		printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got "		       "%p\n", proc->pid, size, buffer);	buffer->data_size = data_size;	buffer->offsets_size = offsets_size;	buffer->async_transaction = is_async;	if (is_async) {		proc->free_async_space -= size + sizeof(struct binder_buffer);		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)			printk(KERN_INFO "binder: %d: binder_alloc_buf size %d "			       "async free %d\n", proc->pid, size,			       proc->free_async_space);	}	return buffer;}static void *buffer_start_page(struct binder_buffer *buffer){	return (void *)((size_t)buffer & PAGE_MASK);}static void *buffer_end_page(struct binder_buffer *buffer){	return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK);}static void binder_delete_free_buffer(	struct binder_proc *proc, struct binder_buffer *buffer){	struct binder_buffer *prev, *next = NULL;	int free_page_end = 1;	int free_page_start = 1;	BUG_ON(proc->buffers.next == &buffer->entry);	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);	BUG_ON(!prev->free);	if (buffer_end_page(prev) == buffer_start_page(buffer)) {		free_page_start = 0;		if (buffer_end_page(prev) == buffer_end_page(buffer))			free_page_end = 0;		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)			printk(KERN_INFO "binder: %d: merge free, buffer %p "			       "share page with %p\n", proc->pid, buffer, prev);	}	if (!list_is_last(&buffer->entry, &proc->buffers)) {		next = list_entry(buffer->entry.next,				  struct binder_buffer, entry);		if (buffer_start_page(next) == buffer_end_page(buffer)) {			free_page_end = 0;			if (buffer_start_page(next) ==			    buffer_start_page(buffer))				free_page_start = 0;			if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)				printk(KERN_INFO "binder: %d: merge free, "				       "buffer %p share page with %p\n",				       proc->pid, buffer, prev);		}	}	list_del(&buffer->entry);	if (free_page_start || free_page_end) {		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)			printk(KERN_INFO "binder: %d: merge free, buffer %p do "			       "not share page%s%s with with %p or %p\n",			       proc->pid, buffer, free_page_start ? "" : " end",			       free_page_end ? "" : " start", prev, next);		binder_update_page_range(proc, 0, free_page_start ?			buffer_start_page(buffer) : buffer_end_page(buffer),			(free_page_end ? buffer_end_page(buffer) :			buffer_start_page(buffer)) + PAGE_SIZE, NULL);	}}static void binder_free_buf(	struct binder_proc *proc, struct binder_buffer *buffer){	size_t size, buffer_size;	buffer_size = binder_buffer_size(proc, buffer);	size = ALIGN(buffer->data_size, sizeof(void *)) +		ALIGN(buffer->offsets_size, sizeof(void *));	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)		printk(KERN_INFO "binder: %d: binder_free_buf %p size %d buffer"		       "_size %d\n", proc->pid, buffer, size, buffer_size);	BUG_ON(buffer->free);	BUG_ON(size > buffer_size);	BUG_ON(buffer->transaction != NULL);	BUG_ON((void *)buffer < proc->buffer);	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);	if (buffer->async_transaction) {		proc->free_async_space += size + sizeof(struct binder_buffer);		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)			printk(KERN_INFO "binder: %d: binder_free_buf size %d "			       "async free %d\n", proc->pid, size,			       proc->free_async_space);	}	binder_update_page_range(proc, 0,		(void *)PAGE_ALIGN((size_t)buffer->data),		(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK),		NULL);	rb_erase(&buffer->rb_node, &proc->allocated_buffers);	buffer->free = 1;	if (!list_is_last(&buffer->entry, &proc->buffers)) {		struct binder_buffer *next = list_entry(buffer->entry.next,						struct binder_buffer, entry);		if (next->free) {			rb_erase(&next->rb_node, &proc->free_buffers);			binder_delete_free_buffer(proc, next);		}	}	if (proc->buffers.next != &buffer->entry) {		struct binder_buffer *prev = list_entry(buffer->entry.prev,						struct binder_buffer, entry);		if (prev->free) {			binder_delete_free_buffer(proc, buffer);			rb_erase(&prev->rb_node, &proc->free_buffers);			buffer = prev;		}	}	binder_insert_free_buffer(proc, buffer);}static struct binder_node *binder_get_node(struct binder_proc *proc, void __user *ptr){	struct rb_node *n = proc->nodes.rb_node;	struct binder_node *node;	while (n) {		node = rb_entry(n, struct binder_node, rb_node);		if (ptr < node->ptr)			n = n->rb_left;		else if (ptr > node->ptr)			n = n->rb_right;		else			return node;	}	return NULL;}static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie){	struct rb_node **p = &proc->nodes.rb_node;	struct rb_node *parent = NULL;	struct binder_node *node;	while (*p) {		parent = *p;		node = rb_entry(parent, struct binder_node, rb_node);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -