⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 locks.c

📁 elinux jffs初始版本 具体了解JFFS的文件系统!
💻 C
📖 第 1 页 / 共 3 页
字号:
		return (-EINVAL);	}		return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));}/* This function is called when the file is closed. */void locks_remove_locks(struct task_struct *task, struct file *filp){	struct file_lock *fl;	/* For POSIX locks we free all locks on this file for the given task.	 * For FLOCK we only free locks on this *open* file if it is the last	 * close on that file.	 */	if ((fl = filp->f_inode->i_flock) != NULL) {		if (fl->fl_flags & FL_POSIX)			posix_remove_locks(&filp->f_inode->i_flock, task);		else			flock_remove_locks(&filp->f_inode->i_flock, filp);	}	return;}static void posix_remove_locks(struct file_lock **before, struct task_struct *task){	struct file_lock *fl;	while ((fl = *before) != NULL) {		if (fl->fl_owner == task)			locks_delete_lock(before, 0);		else			before = &fl->fl_next;	}	return;}static void flock_remove_locks(struct file_lock **before, struct file *filp){	struct file_lock *fl; 	while ((fl = *before) != NULL) {		if ((fl->fl_file == filp) && (filp->f_count == 1)) 			locks_delete_lock(before, 0); 		else 			before = &fl->fl_next;	}	return;}int locks_verify_locked(struct inode *inode){	/* Candidates for mandatory locking have the setgid bit set	 * but no group execute bit -  an otherwise meaningless combination.	 */	if (IS_MANDLOCK(inode) &&	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)		return (locks_mandatory_locked(inode));	return (0);}int locks_verify_area(int read_write, struct inode *inode, struct file *filp,		      unsigned int offset, unsigned int count){	/* Candidates for mandatory locking have the setgid bit set	 * but no group execute bit -  an otherwise meaningless combination.	 */	if (IS_MANDLOCK(inode) &&	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)		return (locks_mandatory_area(read_write, inode, filp, offset,					     count));	return (0);}int locks_mandatory_locked(struct inode *inode){	struct file_lock *fl;	/* If there are no FL_POSIX locks then go ahead. */	if (!(fl = inode->i_flock) || !(fl->fl_flags & FL_POSIX))		return (0);	/* Search the lock list for this inode for any POSIX locks.	 */	while (fl != NULL) {		if (fl->fl_owner != current)			return (-EAGAIN);		fl = fl->fl_next;	}	return (0);}int locks_mandatory_area(int read_write, struct inode *inode,			 struct file *filp, unsigned int offset,			 unsigned int count){	struct file_lock *fl;	struct file_lock tfl;	memset(&tfl, 0, sizeof(tfl));	tfl.fl_file = filp;	tfl.fl_flags = FL_POSIX | FL_ACCESS;	tfl.fl_owner = current;	tfl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;	tfl.fl_start = offset;	tfl.fl_end = offset + count - 1;repeat:	/* If there are no FL_POSIX locks then go ahead. */	if (!(fl = inode->i_flock) || !(fl->fl_flags & FL_POSIX))		return (0);	/* Search the lock list for this inode for locks that conflict with	 * the proposed read/write.	 */	while (fl != NULL) {		/* Block for writes against a "read" lock,		 * and both reads and writes against a "write" lock.		 */		if (posix_locks_conflict(fl, &tfl)) {			if (filp && (filp->f_flags & O_NONBLOCK))				return (-EAGAIN);			if (current->signal & ~current->blocked)				return (-ERESTARTSYS);			if (posix_locks_deadlock(current, fl->fl_owner))				return (-EDEADLK);			locks_insert_block(fl, &tfl);			interruptible_sleep_on(&tfl.fl_wait);			locks_delete_block(fl, &tfl);			if (current->signal & ~current->blocked)				return (-ERESTARTSYS);			/* If we've been sleeping someone might have			 * changed the permissions behind our back.			 */			if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)				break;			goto repeat;		}		fl = fl->fl_next;	}	return (0);}/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX * style lock. */static int posix_make_lock(struct file *filp, struct file_lock *fl,			   struct flock *l){	off_t start;	memset(fl, 0, sizeof(*fl));		fl->fl_flags = FL_POSIX;	switch (l->l_type) {	case F_RDLCK:	case F_WRLCK:	case F_UNLCK:		fl->fl_type = l->l_type;		break;	case F_SHLCK :		fl->fl_type = F_RDLCK;		fl->fl_flags |= FL_BROKEN;		break;	case F_EXLCK :		fl->fl_type = F_WRLCK;		fl->fl_flags |= FL_BROKEN;		break;	default:		return (0);	}	switch (l->l_whence) {	case 0: /*SEEK_SET*/		start = 0;		break;	case 1: /*SEEK_CUR*/		start = filp->f_pos;		break;	case 2: /*SEEK_END*/		start = filp->f_inode->i_size;		break;	default:		return (0);	}	if (((start += l->l_start) < 0) || (l->l_len < 0))		return (0);	fl->fl_start = start;	/* we record the absolute position */	if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))		fl->fl_end = OFFSET_MAX;		fl->fl_file = filp;	fl->fl_owner = current;	return (1);}/* Verify a call to flock() and fill in a file_lock structure with * an appropriate FLOCK lock. */static int flock_make_lock(struct file *filp, struct file_lock *fl,			   unsigned int cmd){	memset(fl, 0, sizeof(*fl));	if (!filp->f_inode)	/* just in case */		return (0);	switch (cmd & ~LOCK_NB) {	case LOCK_SH:		fl->fl_type = F_RDLCK;		break;	case LOCK_EX:		fl->fl_type = F_WRLCK;		break;	case LOCK_UN:		fl->fl_type = F_UNLCK;		break;	default:		return (0);	}	fl->fl_flags = FL_FLOCK;	fl->fl_start = 0;	fl->fl_end = OFFSET_MAX;	fl->fl_file = filp;	fl->fl_owner = NULL;		return (1);}/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific * checking before calling the locks_conflict(). */static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){	/* POSIX locks owned by the same process do not conflict with	 * each other.	 */	if (caller_fl->fl_owner == sys_fl->fl_owner)		return (0);	return (locks_conflict(caller_fl, sys_fl));}/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific * checking before calling the locks_conflict(). */static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){	/* FLOCK locks referring to the same filp do not conflict with	 * each other.	 */	if (caller_fl->fl_file == sys_fl->fl_file)		return (0);	return (locks_conflict(caller_fl, sys_fl));}/* Determine if lock sys_fl blocks lock caller_fl. Common functionality * checks for overlapping locks and shared/exclusive status. */static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){	if (!locks_overlap(caller_fl, sys_fl))		return (0);	switch (caller_fl->fl_type) {	case F_RDLCK:		return (sys_fl->fl_type == F_WRLCK);			case F_WRLCK:		return (1);	default:		printk("locks_conflict(): impossible lock type - %d\n",		       caller_fl->fl_type);		break;	}	return (0);	/* This should never happen */}/* This function tests for deadlock condition before putting a process to * sleep. The detection scheme is no longer recursive. Recursive was neat, * but dangerous - we risked stack corruption if the lock data was bad, or * if the recursion was too deep for any other reason. * * We rely on the fact that a task can only be on one lock's wait queue * at a time. When we find blocked_task on a wait queue we can re-search * with blocked_task equal to that queue's owner, until either blocked_task * isn't found, or blocked_task is found on a queue owned by my_task. */static int posix_locks_deadlock(struct task_struct *my_task,				struct task_struct *blocked_task){	struct file_lock *fl;	struct file_lock *bfl;next_task:	if (my_task == blocked_task)		return (1);	for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {		if (fl->fl_owner == NULL || fl->fl_nextblock == NULL)			continue;		for (bfl = fl->fl_nextblock; bfl != fl; bfl = bfl->fl_nextblock) {			if (bfl->fl_owner == blocked_task) {				if (fl->fl_owner == my_task) {					return (1);				}				blocked_task = fl->fl_owner;				goto next_task;			}		}	}	return (0);}/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks at * the head of the list, but that's secret knowledge known only to the next * two functions. */static int flock_lock_file(struct file *filp, struct file_lock *caller,			   unsigned int wait){	struct file_lock *fl;	struct file_lock *new_fl = NULL;	struct file_lock **before;	int error;	int change;	int unlock = (caller->fl_type == F_UNLCK);	/*	 * If we need a new lock, get it in advance to avoid races.	 */	if (!unlock) {		error = -ENOLCK;		new_fl = locks_alloc_lock(caller);		if (!new_fl)			goto out;	}	error = 0;search:	change = 0;	before = &filp->f_inode->i_flock;	if ((fl = *before) && (fl->fl_flags & FL_POSIX)) {		error = -EBUSY;		goto out;	}	while ((fl = *before) != NULL) {		if (caller->fl_file == fl->fl_file) {			if (caller->fl_type == fl->fl_type)				goto out;			change = 1;			break;		}		before = &fl->fl_next;	}	/* change means that we are changing the type of an existing lock, or	 * or else unlocking it.	 */	if (change) {		/* N.B. What if the wait argument is false? */		locks_delete_lock(before, !unlock);		/*		 * If we waited, another lock may have been added ...		 */		if (!unlock)			goto search;	}	if (unlock)		goto out;repeat:	/* Check signals each time we start */	error = -ERESTARTSYS;	if (current->signal & ~current->blocked)		goto out;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -