📄 locks.c
字号:
* Wake up processes that are blocked waiting for this lock, * notify the FS that the lock has been cleared and * finally free the lock. */static inline void _delete_lock(struct file_lock *fl, unsigned int wait){ fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); if (fl->fl_fasync != NULL){ printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } if (fl->fl_remove) fl->fl_remove(fl); locks_wake_up_blocks(fl, wait); locks_free_lock(fl);}/* * Delete a lock and then free it. */static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait){ struct file_lock *fl = *thisfl_p; _unhash_lock(thisfl_p); _delete_lock(fl, wait);}/* * Call back client filesystem in order to get it to unregister a lock, * then delete lock. Essentially useful only in locks_remove_*(). * Note: this must be called with the semaphore already held! */static inline void locks_unlock_delete(struct file_lock **thisfl_p){ struct file_lock *fl = *thisfl_p; int (*lock)(struct file *, int, struct file_lock *); _unhash_lock(thisfl_p); if (fl->fl_file->f_op && (lock = fl->fl_file->f_op->lock) != NULL) { fl->fl_type = F_UNLCK; lock(fl->fl_file, F_SETLK, fl); } _delete_lock(fl, 0);}/* Determine if lock sys_fl blocks lock caller_fl. Common functionality * checks for shared/exclusive status of overlapping locks. */static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){ switch (caller_fl->fl_type) { case F_RDLCK: return (sys_fl->fl_type == F_WRLCK); case F_WRLCK: return (1); default: printk(KERN_ERR "locks_conflict(): impossible lock type - %d\n", caller_fl->fl_type); break; } return (0); /* This should never happen */}/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific * checking before calling the locks_conflict(). */static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){ /* POSIX locks owned by the same process do not conflict with * each other. */ if (!(sys_fl->fl_flags & FL_POSIX) || locks_same_owner(caller_fl, sys_fl)) return (0); /* Check whether they overlap */ if (!locks_overlap(caller_fl, sys_fl)) return 0; return (locks_conflict(caller_fl, sys_fl));}/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific * checking before calling the locks_conflict(). */static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl){ /* FLOCK locks referring to the same filp do not conflict with * each other. */ if (!(sys_fl->fl_flags & FL_FLOCK) || (caller_fl->fl_file == sys_fl->fl_file)) return (0);#ifdef MSNFS if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) return 0;#endif return (locks_conflict(caller_fl, sys_fl));}static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout){ int result = 0; DECLARE_WAITQUEUE(wait, current); current->state = TASK_INTERRUPTIBLE; add_wait_queue(fl_wait, &wait); if (timeout == 0) schedule(); else result = schedule_timeout(timeout); if (signal_pending(current)) result = -ERESTARTSYS; remove_wait_queue(fl_wait, &wait); current->state = TASK_RUNNING; return result;}static int locks_block_on(struct file_lock *blocker, struct file_lock *waiter){ int result; locks_insert_block(blocker, waiter); result = interruptible_sleep_on_locked(&waiter->fl_wait, 0); locks_delete_block(waiter); return result;}static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time){ int result; locks_insert_block(blocker, waiter); result = interruptible_sleep_on_locked(&waiter->fl_wait, time); locks_delete_block(waiter); return result;}struct file_lock *posix_test_lock(struct file *filp, struct file_lock *fl){ struct file_lock *cfl; lock_kernel(); for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { if (!(cfl->fl_flags & FL_POSIX)) continue; if (posix_locks_conflict(cfl, fl)) break; } unlock_kernel(); return (cfl);}/* This function tests for deadlock condition before putting a process to * sleep. The detection scheme is no longer recursive. Recursive was neat, * but dangerous - we risked stack corruption if the lock data was bad, or * if the recursion was too deep for any other reason. * * We rely on the fact that a task can only be on one lock's wait queue * at a time. When we find blocked_task on a wait queue we can re-search * with blocked_task equal to that queue's owner, until either blocked_task * isn't found, or blocked_task is found on a queue owned by my_task. * * Note: the above assumption may not be true when handling lock requests * from a broken NFS client. But broken NFS clients have a lot more to * worry about than proper deadlock detection anyway... --okir */int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl){ struct list_head *tmp; fl_owner_t caller_owner, blocked_owner; unsigned int caller_pid, blocked_pid; caller_owner = caller_fl->fl_owner; caller_pid = caller_fl->fl_pid; blocked_owner = block_fl->fl_owner; blocked_pid = block_fl->fl_pid;next_task: if (caller_owner == blocked_owner && caller_pid == blocked_pid) return 1; list_for_each(tmp, &blocked_list) { struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); if ((fl->fl_owner == blocked_owner) && (fl->fl_pid == blocked_pid)) { fl = fl->fl_next; blocked_owner = fl->fl_owner; blocked_pid = fl->fl_pid; goto next_task; } } return 0;}int locks_mandatory_locked(struct inode *inode){ fl_owner_t owner = current->files; struct file_lock *fl; /* * Search the lock list for this inode for any POSIX locks. */ lock_kernel(); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & FL_POSIX)) continue; if (fl->fl_owner != owner) break; } unlock_kernel(); return fl ? -EAGAIN : 0;}int locks_mandatory_area(int read_write, struct inode *inode, struct file *filp, loff_t offset, size_t count){ struct file_lock *fl; struct file_lock *new_fl = locks_alloc_lock(0); int error; if (new_fl == NULL) return -ENOMEM; new_fl->fl_owner = current->files; new_fl->fl_pid = current->pid; new_fl->fl_file = filp; new_fl->fl_flags = FL_POSIX | FL_ACCESS; new_fl->fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; new_fl->fl_start = offset; new_fl->fl_end = offset + count - 1; error = 0; lock_kernel();repeat: /* Search the lock list for this inode for locks that conflict with * the proposed read/write. */ for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & FL_POSIX)) continue; if (fl->fl_start > new_fl->fl_end) break; if (posix_locks_conflict(new_fl, fl)) { error = -EAGAIN; if (filp && (filp->f_flags & O_NONBLOCK)) break; error = -EDEADLK; if (posix_locks_deadlock(new_fl, fl)) break; error = locks_block_on(fl, new_fl); if (error != 0) break; /* * If we've been sleeping someone might have * changed the permissions behind our back. */ if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID) break; goto repeat; } } locks_free_lock(new_fl); unlock_kernel(); return error;}/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks * at the head of the list, but that's secret knowledge known only to * flock_lock_file and posix_lock_file. */static int flock_lock_file(struct file *filp, unsigned int lock_type, unsigned int wait){ struct file_lock *fl; struct file_lock *new_fl = NULL; struct file_lock **before; struct inode * inode = filp->f_dentry->d_inode; int error, change; int unlock = (lock_type == F_UNLCK); /* * If we need a new lock, get it in advance to avoid races. */ if (!unlock) { error = -ENOLCK; new_fl = flock_make_lock(filp, lock_type); if (!new_fl) return error; } error = 0;search: change = 0; before = &inode->i_flock; while (((fl = *before) != NULL) && (fl->fl_flags & FL_FLOCK)) { if (filp == fl->fl_file) { if (lock_type == fl->fl_type) goto out; change = 1; break; } before = &fl->fl_next; } /* change means that we are changing the type of an existing lock, * or else unlocking it. */ if (change) { /* N.B. What if the wait argument is false? */ locks_delete_lock(before, !unlock); /* * If we waited, another lock may have been added ... */ if (!unlock) goto search; } if (unlock) goto out;repeat: for (fl = inode->i_flock; (fl != NULL) && (fl->fl_flags & FL_FLOCK); fl = fl->fl_next) { if (!flock_locks_conflict(new_fl, fl)) continue; error = -EAGAIN; if (!wait) goto out; error = locks_block_on(fl, new_fl); if (error != 0) goto out; goto repeat; } locks_insert_lock(&inode->i_flock, new_fl); new_fl = NULL; error = 0;out: if (new_fl) locks_free_lock(new_fl); return error;}/** * posix_lock_file: * @filp: The file to apply the lock to * @caller: The lock to be applied * @wait: 1 to retry automatically, 0 to return -EAGAIN * * Add a POSIX style lock to a file. * We merge adjacent locks whenever possible. POSIX locks are sorted by owner * task, then by starting address * * Kai Petzke writes: * To make freeing a lock much faster, we keep a pointer to the lock before the * actual one. But the real gain of the new coding was, that lock_it() and * unlock_it() became one function. * * To all purists: Yes, I use a few goto's. Just pass on to the next function. */int posix_lock_file(struct file *filp, struct file_lock *caller, unsigned int wait){ struct file_lock *fl; struct file_lock *new_fl, *new_fl2; struct file_lock *left = NULL; struct file_lock *right = NULL; struct file_lock **before; struct inode * inode = filp->f_dentry->d_inode; int error, added = 0; /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. */ new_fl = locks_alloc_lock(0); new_fl2 = locks_alloc_lock(0); error = -ENOLCK; /* "no luck" */ if (!(new_fl && new_fl2)) goto out_nolock; lock_kernel(); if (caller->fl_type != F_UNLCK) { repeat: for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & FL_POSIX)) continue; if (!posix_locks_conflict(caller, fl)) continue; error = -EAGAIN; if (!wait) goto out; error = -EDEADLK; if (posix_locks_deadlock(caller, fl)) goto out; error = locks_block_on(fl, caller); if (error != 0) goto out; goto repeat; } } /* * We've allocated the new locks in advance, so there are no * errors possible (and no blocking operations) from here on. * * Find the first old lock with the same owner as the new lock. */ before = &inode->i_flock; /* First skip locks owned by other processes. */ while ((fl = *before) && (!(fl->fl_flags & FL_POSIX) || !locks_same_owner(caller, fl))) { before = &fl->fl_next; } /* Process locks with this owner. */ while ((fl = *before) && locks_same_owner(caller, fl)) { /* Detect adjacent or overlapping regions (if same lock type) */ if (caller->fl_type == fl->fl_type) { if (fl->fl_end < caller->fl_start - 1) goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. */ if (fl->fl_start > caller->fl_end + 1) break; /* If we come here, the new and old lock are of the * same type and adjacent or overlapping. Make one * lock yielding from the lower start address of both * locks to the higher end address. */ if (fl->fl_start > caller->fl_start) fl->fl_start = caller->fl_start; else caller->fl_start = fl->fl_start; if (fl->fl_end < caller->fl_end) fl->fl_end = caller->fl_end; else caller->fl_end = fl->fl_end; if (added) { locks_delete_lock(before, 0); continue; } caller = fl; added = 1; } else { /* Processing for different lock types is a bit * more complex. */ if (fl->fl_end < caller->fl_start) goto next_lock; if (fl->fl_start > caller->fl_end) break; if (caller->fl_type == F_UNLCK) added = 1; if (fl->fl_start < caller->fl_start) left = fl; /* If the next lock in the list has a higher end * address than the new one, insert the new one here. */ if (fl->fl_end > caller->fl_end) { right = fl; break; } if (fl->fl_start >= caller->fl_start) { /* The new lock completely replaces an old * one (This may happen several times). */ if (added) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -