📄 flock.c
字号:
_debug("instant readlock"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); ASSERT(!list_empty(&vnode->granted_locks)); goto sharing_existing_lock; } /* if there's no-one else with a lock on this vnode, then we need to * ask the server for a lock */ if (list_empty(&vnode->pending_locks) && list_empty(&vnode->granted_locks)) { _debug("not locked"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); set_bit(AFS_VNODE_LOCKING, &vnode->flags); spin_unlock(&vnode->lock); ret = afs_vnode_set_lock(vnode, key, type); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); switch (ret) { case 0: _debug("acquired"); goto acquired_server_lock; case -EWOULDBLOCK: _debug("would block"); spin_lock(&vnode->lock); ASSERT(list_empty(&vnode->granted_locks)); ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); goto wait; default: spin_lock(&vnode->lock); list_del_init(&fl->fl_u.afs.link); spin_unlock(&vnode->lock); goto error; } } /* otherwise, we need to wait for a local lock to become available */ _debug("wait local"); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);wait: if (!(fl->fl_flags & FL_SLEEP)) { _debug("noblock"); ret = -EAGAIN; goto abort_attempt; } spin_unlock(&vnode->lock); /* now we need to sleep and wait for the lock manager thread to get the * lock from the server */ _debug("sleep"); ret = wait_event_interruptible(fl->fl_wait, fl->fl_u.afs.state <= AFS_LOCK_GRANTED); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) goto error; spin_lock(&vnode->lock); goto given_lock; } /* we were interrupted, but someone may still be in the throes of * giving us the lock */ _debug("intr"); ASSERTCMP(ret, ==, -ERESTARTSYS); spin_lock(&vnode->lock); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) { spin_unlock(&vnode->lock); goto error; } goto given_lock; }abort_attempt: /* we aren't going to get the lock, either because we're unwilling to * wait, or because some signal happened */ _debug("abort"); if (list_empty(&vnode->granted_locks) && vnode->pending_locks.next == &fl->fl_u.afs.link) { if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { /* kick the next pending lock into having a go */ list_del_init(&fl->fl_u.afs.link); afs_lock_may_be_available(vnode); } } else { list_del_init(&fl->fl_u.afs.link); } spin_unlock(&vnode->lock); goto error;acquired_server_lock: /* we've acquired a server lock, but it needs to be renewed after 5 * mins */ spin_lock(&vnode->lock); afs_schedule_lock_extension(vnode); if (type == AFS_LOCK_READ) set_bit(AFS_VNODE_READLOCKED, &vnode->flags); else set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);sharing_existing_lock: /* the lock has been granted as far as we're concerned... */ fl->fl_u.afs.state = AFS_LOCK_GRANTED; list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);given_lock: /* ... but we do still need to get the VFS's blessing */ ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED))) != 0); ret = posix_lock_file(file, fl, NULL); if (ret < 0) goto vfs_rejected_lock; spin_unlock(&vnode->lock); /* again, make sure we've got a callback on this file and, again, make * sure that our view of the data version is up to date (we ignore * errors incurred here and deal with the consequences elsewhere) */ afs_vnode_fetch_status(vnode, NULL, key);error: unlock_kernel(); _leave(" = %d", ret); return ret;vfs_rejected_lock: /* the VFS rejected the lock we just obtained, so we have to discard * what we just got */ _debug("vfs refused %d", ret); list_del_init(&fl->fl_u.afs.link); if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); spin_unlock(&vnode->lock); goto abort_attempt;}/* * unlock on a file on the server */static int afs_do_unlk(struct file *file, struct file_lock *fl){ struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret; _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file unlocks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) return -EINVAL; fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; spin_lock(&vnode->lock); ret = posix_lock_file(file, fl, NULL); if (ret < 0) { spin_unlock(&vnode->lock); _leave(" = %d [vfs]", ret); return ret; } /* discard the server lock only if all granted locks are gone */ if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); spin_unlock(&vnode->lock); _leave(" = 0"); return 0;}/* * return information about a lock we currently hold, if indeed we hold one */static int afs_do_getlk(struct file *file, struct file_lock *fl){ struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret, lock_count; _enter(""); fl->fl_type = F_UNLCK; mutex_lock(&vnode->vfs_inode.i_mutex); /* check local lock records first */ ret = 0; posix_test_lock(file, fl); if (fl->fl_type == F_UNLCK) { /* no local locks; consult the server */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; lock_count = vnode->status.lock_count; if (lock_count) { if (lock_count > 0) fl->fl_type = F_RDLCK; else fl->fl_type = F_WRLCK; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; } }error: mutex_unlock(&vnode->vfs_inode.i_mutex); _leave(" = %d [%hd]", ret, fl->fl_type); return ret;}/* * manage POSIX locks on a file */int afs_lock(struct file *file, int cmd, struct file_lock *fl){ struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags, (long long) fl->fl_start, (long long) fl->fl_end); /* AFS doesn't support mandatory locks */ if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) return -ENOLCK; if (IS_GETLK(cmd)) return afs_do_getlk(file, fl); if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl);}/* * manage FLOCK locks on a file */int afs_flock(struct file *file, int cmd, struct file_lock *fl){ struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); _enter("{%x:%u},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags); /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; /* we're simulating flock() locks using posix locks on the server */ fl->fl_owner = (fl_owner_t) file; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl);}/* * the POSIX lock management core VFS code copies the lock record and adds the * copy into its own list, so we need to add that copy to the vnode's lock * queue in the same place as the original (which will be deleted shortly * after) */static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl){ _enter(""); list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);}/* * need to remove this lock from the vnode queue when it's removed from the * VFS's list */static void afs_fl_release_private(struct file_lock *fl){ _enter(""); list_del_init(&fl->fl_u.afs.link);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -