⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 glock.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */#include <linux/sched.h>#include <linux/slab.h>#include <linux/spinlock.h>#include <linux/completion.h>#include <linux/buffer_head.h>#include <linux/delay.h>#include <linux/sort.h>#include <linux/jhash.h>#include <linux/kallsyms.h>#include <linux/gfs2_ondisk.h>#include <linux/list.h>#include <linux/lm_interface.h>#include <linux/wait.h>#include <linux/module.h>#include <linux/rwsem.h>#include <asm/uaccess.h>#include <linux/seq_file.h>#include <linux/debugfs.h>#include <linux/kthread.h>#include <linux/freezer.h>#include <linux/workqueue.h>#include <linux/jiffies.h>#include "gfs2.h"#include "incore.h"#include "glock.h"#include "glops.h"#include "inode.h"#include "lm.h"#include "lops.h"#include "meta_io.h"#include "quota.h"#include "super.h"#include "util.h"struct gfs2_gl_hash_bucket {        struct hlist_head hb_list;};struct glock_iter {	int hash;                     /* hash bucket index         */	struct gfs2_sbd *sdp;         /* incore superblock         */	struct gfs2_glock *gl;        /* current glock struct      */	struct seq_file *seq;         /* sequence file for debugfs */	char string[512];             /* scratch space             */};typedef void (*glock_examiner) (struct gfs2_glock * gl);static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);static void gfs2_glock_drop_th(struct gfs2_glock *gl);static void run_queue(struct gfs2_glock *gl);static DECLARE_RWSEM(gfs2_umount_flush_sem);static struct dentry *gfs2_root;static struct task_struct *scand_process;static unsigned int scand_secs = 5;static struct workqueue_struct *glock_workqueue;#define GFS2_GL_HASH_SHIFT      15#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];static struct dentry *gfs2_root;/* * Despite what you might think, the numbers below are not arbitrary :-) * They are taken from the ipv4 routing hash code, which is well tested * and thus should be nearly optimal. Later on we might tweek the numbers * but for now this should be fine. * * The reason for putting the locks in a separate array from the list heads * is that we can have fewer locks than list heads and save memory. We use * the same hash function for both, but with a different hash mask. */#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \	defined(CONFIG_PROVE_LOCKING)#ifdef CONFIG_LOCKDEP# define GL_HASH_LOCK_SZ        256#else# if NR_CPUS >= 32#  define GL_HASH_LOCK_SZ       4096# elif NR_CPUS >= 16#  define GL_HASH_LOCK_SZ       2048# elif NR_CPUS >= 8#  define GL_HASH_LOCK_SZ       1024# elif NR_CPUS >= 4#  define GL_HASH_LOCK_SZ       512# else#  define GL_HASH_LOCK_SZ       256# endif#endif/* We never want more locks than chains */#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ# undef GL_HASH_LOCK_SZ# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE#endifstatic rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];static inline rwlock_t *gl_lock_addr(unsigned int x){	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];}#else /* not SMP, so no spinlocks required */static inline rwlock_t *gl_lock_addr(unsigned int x){	return NULL;}#endif/** * relaxed_state_ok - is a requested lock compatible with the current lock mode? * @actual: the current state of the lock * @requested: the lock state that was requested by the caller * @flags: the modifier flags passed in by the caller * * Returns: 1 if the locks are compatible, 0 otherwise */static inline int relaxed_state_ok(unsigned int actual, unsigned requested,				   int flags){	if (actual == requested)		return 1;	if (flags & GL_EXACT)		return 0;	if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)		return 1;	if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))		return 1;	return 0;}/** * gl_hash() - Turn glock number into hash bucket number * @lock: The glock number * * Returns: The number of the corresponding hash bucket */static unsigned int gl_hash(const struct gfs2_sbd *sdp,			    const struct lm_lockname *name){	unsigned int h;	h = jhash(&name->ln_number, sizeof(u64), 0);	h = jhash(&name->ln_type, sizeof(unsigned int), h);	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);	h &= GFS2_GL_HASH_MASK;	return h;}/** * glock_free() - Perform a few checks and then release struct gfs2_glock * @gl: The glock to release * * Also calls lock module to release its internal structure for this glock. * */static void glock_free(struct gfs2_glock *gl){	struct gfs2_sbd *sdp = gl->gl_sbd;	struct inode *aspace = gl->gl_aspace;	gfs2_lm_put_lock(sdp, gl->gl_lock);	if (aspace)		gfs2_aspace_put(aspace);	kmem_cache_free(gfs2_glock_cachep, gl);}/** * gfs2_glock_hold() - increment reference count on glock * @gl: The glock to hold * */void gfs2_glock_hold(struct gfs2_glock *gl){	atomic_inc(&gl->gl_ref);}/** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put * */int gfs2_glock_put(struct gfs2_glock *gl){	int rv = 0;	struct gfs2_sbd *sdp = gl->gl_sbd;	write_lock(gl_lock_addr(gl->gl_hash));	if (atomic_dec_and_test(&gl->gl_ref)) {		hlist_del(&gl->gl_list);		write_unlock(gl_lock_addr(gl->gl_hash));		BUG_ON(spin_is_locked(&gl->gl_spin));		gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);		gfs2_assert(sdp, list_empty(&gl->gl_reclaim));		gfs2_assert(sdp, list_empty(&gl->gl_holders));		gfs2_assert(sdp, list_empty(&gl->gl_waiters1));		gfs2_assert(sdp, list_empty(&gl->gl_waiters3));		glock_free(gl);		rv = 1;		goto out;	}	write_unlock(gl_lock_addr(gl->gl_hash));out:	return rv;}/** * search_bucket() - Find struct gfs2_glock by lock number * @bucket: the bucket to search * @name: The lock name * * Returns: NULL, or the struct gfs2_glock with the requested number */static struct gfs2_glock *search_bucket(unsigned int hash,					const struct gfs2_sbd *sdp,					const struct lm_lockname *name){	struct gfs2_glock *gl;	struct hlist_node *h;	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {		if (!lm_name_equal(&gl->gl_name, name))			continue;		if (gl->gl_sbd != sdp)			continue;		atomic_inc(&gl->gl_ref);		return gl;	}	return NULL;}/** * gfs2_glock_find() - Find glock by lock number * @sdp: The GFS2 superblock * @name: The lock name * * Returns: NULL, or the struct gfs2_glock with the requested number */static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,					  const struct lm_lockname *name){	unsigned int hash = gl_hash(sdp, name);	struct gfs2_glock *gl;	read_lock(gl_lock_addr(hash));	gl = search_bucket(hash, sdp, name);	read_unlock(gl_lock_addr(hash));	return gl;}static void glock_work_func(struct work_struct *work){	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);	spin_lock(&gl->gl_spin);	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))		set_bit(GLF_DEMOTE, &gl->gl_flags);	run_queue(gl);	spin_unlock(&gl->gl_spin);	gfs2_glock_put(gl);}/** * gfs2_glock_get() - Get a glock, or create one if one doesn't exist * @sdp: The GFS2 superblock * @number: the lock number * @glops: The glock_operations to use * @create: If 0, don't create the glock if it doesn't exist * @glp: the glock is returned here * * This does not lock a glock, just finds/creates structures for one. * * Returns: errno */int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,		   const struct gfs2_glock_operations *glops, int create,		   struct gfs2_glock **glp){	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };	struct gfs2_glock *gl, *tmp;	unsigned int hash = gl_hash(sdp, &name);	int error;	read_lock(gl_lock_addr(hash));	gl = search_bucket(hash, sdp, &name);	read_unlock(gl_lock_addr(hash));	if (gl || !create) {		*glp = gl;		return 0;	}	gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);	if (!gl)		return -ENOMEM;	gl->gl_flags = 0;	gl->gl_name = name;	atomic_set(&gl->gl_ref, 1);	gl->gl_state = LM_ST_UNLOCKED;	gl->gl_demote_state = LM_ST_EXCLUSIVE;	gl->gl_hash = hash;	gl->gl_owner_pid = 0;	gl->gl_ip = 0;	gl->gl_ops = glops;	gl->gl_req_gh = NULL;	gl->gl_req_bh = NULL;	gl->gl_vn = 0;	gl->gl_stamp = jiffies;	gl->gl_tchange = jiffies;	gl->gl_object = NULL;	gl->gl_sbd = sdp;	gl->gl_aspace = NULL;	lops_init_le(&gl->gl_le, &gfs2_glock_lops);	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);	/* If this glock protects actual on-disk data or metadata blocks,	   create a VFS inode to manage the pages/buffers holding them. */	if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {		gl->gl_aspace = gfs2_aspace_get(sdp);		if (!gl->gl_aspace) {			error = -ENOMEM;			goto fail;		}	}	error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);	if (error)		goto fail_aspace;	write_lock(gl_lock_addr(hash));	tmp = search_bucket(hash, sdp, &name);	if (tmp) {		write_unlock(gl_lock_addr(hash));		glock_free(gl);		gl = tmp;	} else {		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);		write_unlock(gl_lock_addr(hash));	}	*glp = gl;	return 0;fail_aspace:	if (gl->gl_aspace)		gfs2_aspace_put(gl->gl_aspace);fail:	kmem_cache_free(gfs2_glock_cachep, gl);	return error;}/** * gfs2_holder_init - initialize a struct gfs2_holder in the default way * @gl: the glock * @state: the state we're requesting * @flags: the modifier flags * @gh: the holder structure * */void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,		      struct gfs2_holder *gh){	INIT_LIST_HEAD(&gh->gh_list);	gh->gh_gl = gl;	gh->gh_ip = (unsigned long)__builtin_return_address(0);	gh->gh_owner_pid = current->pid;	gh->gh_state = state;	gh->gh_flags = flags;	gh->gh_error = 0;	gh->gh_iflags = 0;	gfs2_glock_hold(gl);}/** * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it * @state: the state we're requesting * @flags: the modifier flags * @gh: the holder structure * * Don't mess with the glock. * */void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh){	gh->gh_state = state;	gh->gh_flags = flags;	gh->gh_iflags = 0;	gh->gh_ip = (unsigned long)__builtin_return_address(0);}/** * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) * @gh: the holder structure * */void gfs2_holder_uninit(struct gfs2_holder *gh){	gfs2_glock_put(gh->gh_gl);	gh->gh_gl = NULL;	gh->gh_ip = 0;}static void gfs2_holder_wake(struct gfs2_holder *gh){	clear_bit(HIF_WAIT, &gh->gh_iflags);	smp_mb__after_clear_bit();	wake_up_bit(&gh->gh_iflags, HIF_WAIT);}static int just_schedule(void *word){        schedule();        return 0;}static void wait_on_holder(struct gfs2_holder *gh){	might_sleep();	wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);}static void gfs2_demote_wake(struct gfs2_glock *gl){	BUG_ON(!spin_is_locked(&gl->gl_spin));	gl->gl_demote_state = LM_ST_EXCLUSIVE;        clear_bit(GLF_DEMOTE, &gl->gl_flags);        smp_mb__after_clear_bit();        wake_up_bit(&gl->gl_flags, GLF_DEMOTE);}static void wait_on_demote(struct gfs2_glock *gl){	might_sleep();	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);}/** * rq_mutex - process a mutex request in the queue * @gh: the glock holder * * Returns: 1 if the queue is blocked */static int rq_mutex(struct gfs2_holder *gh){	struct gfs2_glock *gl = gh->gh_gl;	list_del_init(&gh->gh_list);	/*  gh->gh_error never examined.  */	set_bit(GLF_LOCK, &gl->gl_flags);	clear_bit(HIF_WAIT, &gh->gh_iflags);	smp_mb();	wake_up_bit(&gh->gh_iflags, HIF_WAIT);	return 1;}/** * rq_promote - process a promote request in the queue * @gh: the glock holder * * Acquire a new inter-node lock, or change a lock state to more restrictive. * * Returns: 1 if the queue is blocked */static int rq_promote(struct gfs2_holder *gh){	struct gfs2_glock *gl = gh->gh_gl;	struct gfs2_sbd *sdp = gl->gl_sbd;	if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {		if (list_empty(&gl->gl_holders)) {			gl->gl_req_gh = gh;			set_bit(GLF_LOCK, &gl->gl_flags);			spin_unlock(&gl->gl_spin);			if (atomic_read(&sdp->sd_reclaim_count) >			    gfs2_tune_get(sdp, gt_reclaim_limit) &&			    !(gh->gh_flags & LM_FLAG_PRIORITY)) {				gfs2_reclaim_glock(sdp);				gfs2_reclaim_glock(sdp);			}			gfs2_glock_xmote_th(gh->gh_gl, gh);			spin_lock(&gl->gl_spin);		}		return 1;	}	if (list_empty(&gl->gl_holders)) {		set_bit(HIF_FIRST, &gh->gh_iflags);		set_bit(GLF_LOCK, &gl->gl_flags);	} else {		struct gfs2_holder *next_gh;		if (gh->gh_state == LM_ST_EXCLUSIVE)			return 1;		next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,				     gh_list);		if (next_gh->gh_state == LM_ST_EXCLUSIVE)			 return 1;	}	list_move_tail(&gh->gh_list, &gl->gl_holders);	gh->gh_error = 0;	set_bit(HIF_HOLDER, &gh->gh_iflags);	gfs2_holder_wake(gh);	return 0;}/** * rq_demote - process a demote request in the queue * @gh: the glock holder * * Returns: 1 if the queue is blocked */static int rq_demote(struct gfs2_glock *gl){	if (!list_empty(&gl->gl_holders))		return 1;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -