⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dlmcommon.h

📁 linux 内核源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmcommon.h * * Copyright (C) 2004 Oracle.  All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */#ifndef DLMCOMMON_H#define DLMCOMMON_H#include <linux/kref.h>#define DLM_HB_NODE_DOWN_PRI     (0xf000000)#define DLM_HB_NODE_UP_PRI       (0x8000000)#define DLM_LOCKID_NAME_MAX    32#define DLM_DOMAIN_NAME_MAX_LEN    255#define DLM_LOCK_RES_OWNER_UNKNOWN     O2NM_MAX_NODES#define DLM_THREAD_SHUFFLE_INTERVAL    5     // flush everything every 5 passes#define DLM_THREAD_MS                  200   // flush at least every 200 ms#define DLM_HASH_SIZE_DEFAULT	(1 << 14)#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE# define DLM_HASH_PAGES		1#else# define DLM_HASH_PAGES		(DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)#endif#define DLM_BUCKETS_PER_PAGE	(PAGE_SIZE / sizeof(struct hlist_head))#define DLM_HASH_BUCKETS	(DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)/* Intended to make it easier for us to switch out hash functions */#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)enum dlm_ast_type {	DLM_AST = 0,	DLM_BAST,	DLM_ASTUNLOCK};#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \			 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \			 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)#define DLM_RECOVERY_LOCK_NAME       "$RECOVERY"#define DLM_RECOVERY_LOCK_NAME_LEN   9static inline int dlm_is_recovery_lock(const char *lock_name, int name_len){	if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&	    memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)		return 1;	return 0;}#define DLM_RECO_STATE_ACTIVE    0x0001#define DLM_RECO_STATE_FINALIZE  0x0002struct dlm_recovery_ctxt{	struct list_head resources;	struct list_head received;	struct list_head node_data;	u8  new_master;	u8  dead_node;	u16 state;	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];	wait_queue_head_t event;};enum dlm_ctxt_state {	DLM_CTXT_NEW = 0,	DLM_CTXT_JOINED,	DLM_CTXT_IN_SHUTDOWN,	DLM_CTXT_LEAVING,};struct dlm_ctxt{	struct list_head list;	struct hlist_head **lockres_hash;	struct list_head dirty_list;	struct list_head purge_list;	struct list_head pending_asts;	struct list_head pending_basts;	unsigned int purge_count;	spinlock_t spinlock;	spinlock_t ast_lock;	char *name;	u8 node_num;	u32 key;	u8  joining_node;	wait_queue_head_t dlm_join_events;	unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];	unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];	unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];	struct dlm_recovery_ctxt reco;	spinlock_t master_lock;	struct list_head master_list;	struct list_head mle_hb_events;	/* these give a really vague idea of the system load */	atomic_t local_resources;	atomic_t remote_resources;	atomic_t unknown_resources;	/* NOTE: Next three are protected by dlm_domain_lock */	struct kref dlm_refs;	enum dlm_ctxt_state dlm_state;	unsigned int num_joins;	struct o2hb_callback_func dlm_hb_up;	struct o2hb_callback_func dlm_hb_down;	struct task_struct *dlm_thread_task;	struct task_struct *dlm_reco_thread_task;	struct workqueue_struct *dlm_worker;	wait_queue_head_t dlm_thread_wq;	wait_queue_head_t dlm_reco_thread_wq;	wait_queue_head_t ast_wq;	wait_queue_head_t migration_wq;	struct work_struct dispatched_work;	struct list_head work_list;	spinlock_t work_lock;	struct list_head dlm_domain_handlers;	struct list_head	dlm_eviction_callbacks;};static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i){	return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);}/* these keventd work queue items are for less-frequently * called functions that cannot be directly called from the * net message handlers for some reason, usually because * they need to send net messages of their own. */void dlm_dispatch_work(struct work_struct *work);struct dlm_lock_resource;struct dlm_work_item;typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);struct dlm_request_all_locks_priv{	u8 reco_master;	u8 dead_node;};struct dlm_mig_lockres_priv{	struct dlm_lock_resource *lockres;	u8 real_master;};struct dlm_assert_master_priv{	struct dlm_lock_resource *lockres;	u8 request_from;	u32 flags;	unsigned ignore_higher:1;};struct dlm_deref_lockres_priv{	struct dlm_lock_resource *deref_res;	u8 deref_node;};struct dlm_work_item{	struct list_head list;	dlm_workfunc_t *func;	struct dlm_ctxt *dlm;	void *data;	union {		struct dlm_request_all_locks_priv ral;		struct dlm_mig_lockres_priv ml;		struct dlm_assert_master_priv am;		struct dlm_deref_lockres_priv dl;	} u;};static inline void dlm_init_work_item(struct dlm_ctxt *dlm,				      struct dlm_work_item *i,				      dlm_workfunc_t *f, void *data){	memset(i, 0, sizeof(*i));	i->func = f;	INIT_LIST_HEAD(&i->list);	i->data = data;	i->dlm = dlm;  /* must have already done a dlm_grab on this! */}static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,					  u8 node){	assert_spin_locked(&dlm->spinlock);	dlm->joining_node = node;	wake_up(&dlm->dlm_join_events);}#define DLM_LOCK_RES_UNINITED             0x00000001#define DLM_LOCK_RES_RECOVERING           0x00000002#define DLM_LOCK_RES_READY                0x00000004#define DLM_LOCK_RES_DIRTY                0x00000008#define DLM_LOCK_RES_IN_PROGRESS          0x00000010#define DLM_LOCK_RES_MIGRATING            0x00000020#define DLM_LOCK_RES_DROPPING_REF         0x00000040#define DLM_LOCK_RES_BLOCK_DIRTY          0x00001000#define DLM_LOCK_RES_SETREF_INPROG        0x00002000/* max milliseconds to wait to sync up a network failure with a node death */#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)#define DLM_PURGE_INTERVAL_MS   (8 * 1000)struct dlm_lock_resource{	/* WARNING: Please see the comment in dlm_init_lockres before	 * adding fields here. */	struct hlist_node hash_node;	struct qstr lockname;	struct kref      refs;	/*	 * Please keep granted, converting, and blocked in this order,	 * as some funcs want to iterate over all lists.	 *	 * All four lists are protected by the hash's reference.	 */	struct list_head granted;	struct list_head converting;	struct list_head blocked;	struct list_head purge;	/*	 * These two lists require you to hold an additional reference	 * while they are on the list.	 */	struct list_head dirty;	struct list_head recovering; // dlm_recovery_ctxt.resources list	/* unused lock resources have their last_used stamped and are	 * put on a list for the dlm thread to run. */	unsigned long    last_used;	unsigned migration_pending:1;	atomic_t asts_reserved;	spinlock_t spinlock;	wait_queue_head_t wq;	u8  owner;              //node which owns the lock resource, or unknown	u16 state;	char lvb[DLM_LVB_LEN];	unsigned int inflight_locks;	unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];};struct dlm_migratable_lock{	__be64 cookie;	/* these 3 are just padding for the in-memory structure, but	 * list and flags are actually used when sent over the wire */	__be16 pad1;	u8 list;  // 0=granted, 1=converting, 2=blocked	u8 flags;	s8 type;	s8 convert_type;	s8 highest_blocked;	u8 node;};  // 16 bytesstruct dlm_lock{	struct dlm_migratable_lock ml;	struct list_head list;	struct list_head ast_list;	struct list_head bast_list;	struct dlm_lock_resource *lockres;	spinlock_t spinlock;	struct kref lock_refs;	// ast and bast must be callable while holding a spinlock!	dlm_astlockfunc_t *ast;	dlm_bastlockfunc_t *bast;	void *astdata;	struct dlm_lockstatus *lksb;	unsigned ast_pending:1,		 bast_pending:1,		 convert_pending:1,		 lock_pending:1,		 cancel_pending:1,		 unlock_pending:1,		 lksb_kernel_allocated:1;};#define DLM_LKSB_UNUSED1           0x01#define DLM_LKSB_PUT_LVB           0x02#define DLM_LKSB_GET_LVB           0x04#define DLM_LKSB_UNUSED2           0x08#define DLM_LKSB_UNUSED3           0x10#define DLM_LKSB_UNUSED4           0x20#define DLM_LKSB_UNUSED5           0x40#define DLM_LKSB_UNUSED6           0x80enum dlm_lockres_list {	DLM_GRANTED_LIST = 0,	DLM_CONVERTING_LIST,	DLM_BLOCKED_LIST};static inline int dlm_lvb_is_empty(char *lvb){	int i;	for (i=0; i<DLM_LVB_LEN; i++)		if (lvb[i])			return 0;	return 1;}static inline struct list_head *dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx){	struct list_head *ret = NULL;	if (idx == DLM_GRANTED_LIST)		ret = &res->granted;	else if (idx == DLM_CONVERTING_LIST)		ret = &res->converting;	else if (idx == DLM_BLOCKED_LIST)		ret = &res->blocked;	else		BUG();	return ret;}struct dlm_node_iter{	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];	int curnode;};enum {	DLM_MASTER_REQUEST_MSG    = 500,	DLM_UNUSED_MSG1,         /* 501 */	DLM_ASSERT_MASTER_MSG,	 /* 502 */	DLM_CREATE_LOCK_MSG,	 /* 503 */	DLM_CONVERT_LOCK_MSG,	 /* 504 */	DLM_PROXY_AST_MSG,	 /* 505 */	DLM_UNLOCK_LOCK_MSG,	 /* 506 */	DLM_DEREF_LOCKRES_MSG,	 /* 507 */	DLM_MIGRATE_REQUEST_MSG, /* 508 */	DLM_MIG_LOCKRES_MSG, 	 /* 509 */	DLM_QUERY_JOIN_MSG,	 /* 510 */	DLM_ASSERT_JOINED_MSG,	 /* 511 */	DLM_CANCEL_JOIN_MSG,	 /* 512 */	DLM_EXIT_DOMAIN_MSG,	 /* 513 */	DLM_MASTER_REQUERY_MSG,	 /* 514 */	DLM_LOCK_REQUEST_MSG,	 /* 515 */	DLM_RECO_DATA_DONE_MSG,	 /* 516 */	DLM_BEGIN_RECO_MSG,	 /* 517 */	DLM_FINALIZE_RECO_MSG	 /* 518 */};struct dlm_reco_node_data{	int state;	u8 node_num;	struct list_head list;};enum {	DLM_RECO_NODE_DATA_DEAD = -1,	DLM_RECO_NODE_DATA_INIT = 0,	DLM_RECO_NODE_DATA_REQUESTING,	DLM_RECO_NODE_DATA_REQUESTED,	DLM_RECO_NODE_DATA_RECEIVING,	DLM_RECO_NODE_DATA_DONE,	DLM_RECO_NODE_DATA_FINALIZE_SENT,};enum {	DLM_MASTER_RESP_NO = 0,	DLM_MASTER_RESP_YES,	DLM_MASTER_RESP_MAYBE,	DLM_MASTER_RESP_ERROR};struct dlm_master_request{	u8 node_idx;	u8 namelen;	__be16 pad1;	__be32 flags;	u8 name[O2NM_MAX_NAME_LEN];};#define DLM_ASSERT_RESPONSE_REASSERT       0x00000001#define DLM_ASSERT_RESPONSE_MASTERY_REF    0x00000002#define DLM_ASSERT_MASTER_MLE_CLEANUP      0x00000001#define DLM_ASSERT_MASTER_REQUERY          0x00000002#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004struct dlm_assert_master{	u8 node_idx;	u8 namelen;	__be16 pad1;	__be32 flags;	u8 name[O2NM_MAX_NAME_LEN];};#define DLM_MIGRATE_RESPONSE_MASTERY_REF   0x00000001struct dlm_migrate_request{	u8 master;	u8 new_master;	u8 namelen;	u8 pad1;	__be32 pad2;	u8 name[O2NM_MAX_NAME_LEN];};struct dlm_master_requery{	u8 pad1;	u8 pad2;	u8 node_idx;	u8 namelen;	__be32 pad3;	u8 name[O2NM_MAX_NAME_LEN];};#define DLM_MRES_RECOVERY   0x01#define DLM_MRES_MIGRATION  0x02#define DLM_MRES_ALL_DONE   0x04/* * We would like to get one whole lockres into a single network * message whenever possible.  Generally speaking, there will be * at most one dlm_lock on a lockres for each node in the cluster, * plus (infrequently) any additional locks coming in from userdlm. * * struct _dlm_lockres_page * { * 	dlm_migratable_lockres mres; * 	dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; * 	u8 pad[DLM_MIG_LOCKRES_RESERVED]; * }; * * from ../cluster/tcp.h *    NET_MAX_PAYLOAD_BYTES  (4096 - sizeof(net_msg)) *    (roughly 4080 bytes) * and sizeof(dlm_migratable_lockres) = 112 bytes * and sizeof(dlm_migratable_lock) = 16 bytes * * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and * DLM_MIG_LOCKRES_RESERVED=128 means we have this: * *  (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + *     sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = *        NET_MAX_PAYLOAD_BYTES *  (240 * 16) + 112 + 128 = 4080 * * So a lockres would need more than 240 locks before it would * use more than one network packet to recover.  Not too bad. */#define DLM_MAX_MIGRATABLE_LOCKS   240struct dlm_migratable_lockres{	u8 master;	u8 lockname_len;	u8 num_locks;    // locks sent in this structure	u8 flags;	__be32 total_locks; // locks to be sent for this migration cookie	__be64 mig_cookie;  // cookie for this lockres migration			 // or zero if not needed	// 16 bytes	u8 lockname[DLM_LOCKID_NAME_MAX];	// 48 bytes	u8 lvb[DLM_LVB_LEN];	// 112 bytes	struct dlm_migratable_lock ml[0];  // 16 bytes each, begins at byte 112};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -