lustre_dlm.h
来自「lustre 1.6.5 source code」· C头文件 代码 · 共 835 行 · 第 1/3 页
H
835 行
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * (visit-tags-table FILE) * vim:expandtab:shiftwidth=8:tabstop=8: */#ifndef _LUSTRE_DLM_H__#define _LUSTRE_DLM_H__#if defined(__linux__)#include <linux/lustre_dlm.h>#elif defined(__APPLE__)#include <darwin/lustre_dlm.h>#elif defined(__WINNT__)#include <winnt/lustre_dlm.h>#else#error Unsupported operating system.#endif#include <lustre_lib.h>#include <lustre_net.h>#include <lustre_import.h>#include <lustre_handles.h>#include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */#include <interval_tree.h> /* for interval_node{}, ldlm_extent */struct obd_ops;struct obd_device;#define OBD_LDLM_DEVICENAME "ldlm"#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))typedef enum { ELDLM_OK = 0, ELDLM_LOCK_CHANGED = 300, ELDLM_LOCK_ABORTED = 301, ELDLM_LOCK_REPLACED = 302, ELDLM_NO_LOCK_DATA = 303, ELDLM_NAMESPACE_EXISTS = 400, ELDLM_BAD_NAMESPACE = 401} ldlm_error_t;typedef enum { LDLM_NAMESPACE_SERVER = 1 << 0, LDLM_NAMESPACE_CLIENT = 1 << 1} ldlm_side_t;#define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed *//* If the server returns one of these flags, then the lock was put on that list. * If the client sends one of these flags (during recovery ONLY!), it wants the * lock added to the specified list, no questions asked. -p */#define LDLM_FL_BLOCK_GRANTED 0x000002#define LDLM_FL_BLOCK_CONV 0x000004#define LDLM_FL_BLOCK_WAIT 0x000008#define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */#define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */#define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run *//* Lock is being replayed. This could probably be implied by the fact that one * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */#define LDLM_FL_REPLAY 0x000100#define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */#define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused *//* don't run the cancel callback under ldlm_cli_cancel_unused */#define LDLM_FL_FAILED 0x000800#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */#define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */#define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */#define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait * indefinitely *//* file & record locking */#define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked#define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that * the LVB filling happens _after_ the lock has been granted, so another thread * can match`t before the LVB has been updated. As a dirty hack, we set * LDLM_FL_LVB_READY only after we've done the LVB poop. * this is only needed on lov/osc now, where lvb is actually used and callers * must set it in input flags. * * The proper fix is to do the granting inside of the completion AST, which can * be replaced with a LVB-aware wrapping function for OSC locks. That change is * pretty high-risk, though, and would need a lot more testing. */#define LDLM_FL_LVB_READY 0x100000/* A lock contributes to the kms calculation until it has finished the part * of it's cancelation that performs write back on its dirty pages. It * can remain on the granted list during this whole time. Threads racing * to update the kms after performing their writeback need to know to * exclude each others locks from the calculation as they walk the granted * list. */#define LDLM_FL_KMS_IGNORE 0x200000/* Don't drop lock covering mmapped file in LRU */#define LDLM_FL_NO_LRU 0x400000/* Immediatelly cancel such locks when they block some other locks. Send cancel notification to original lock holder, but expect no reply. */#define LDLM_FL_CANCEL_ON_BLOCK 0x800000/* Flags flags inherited from parent lock when doing intents. */#define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)/* These are flags that are mapped into the flags and ASTs of blocking locks */#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs *//* Flags sent in AST lock_flags to be mapped into the receiving lock. */#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)/* completion ast to be executed */#define LDLM_FL_CP_REQD 0x1000000/* cleanup_resource has already handled the lock */#define LDLM_FL_CLEANED 0x2000000/* optimization hint: LDLM can run blocking callback from current context * w/o involving separate thread. in order to decrease cs rate */#define LDLM_FL_ATOMIC_CB 0x4000000/* It may happen that a client initiate 2 operations, e.g. unlink and mkdir, * such that server send blocking ast for conflict locks to this client for * the 1st operation, whereas the 2nd operation has canceled this lock and * is waiting for rpc_lock which is taken by the 1st operation. * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow * ELC code to cancel it. * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * droped to let ldlm_callback_handler() return EINVAL to the server. It is * used when ELC rpc is already prepared and is waiting for rpc_lock, too late * to send a separate CANCEL rpc. */#define LDLM_FL_BL_AST 0x10000000#define LDLM_FL_BL_DONE 0x20000000/* measure lock contention and return -EUSERS if locking contention is high */#define LDLM_FL_DENY_ON_CONTENTION 0x40000000/* The blocking callback is overloaded to perform two functions. These flags * indicate which operation should be performed. */#define LDLM_CB_BLOCKING 1#define LDLM_CB_CANCELING 2/* position flag of skip list pointers */#define LDLM_SL_HEAD(skip_list) ((skip_list)->next != NULL)#define LDLM_SL_TAIL(skip_list) ((skip_list)->prev != NULL)#define LDLM_SL_EMPTY(skip_list) ((skip_list)->next == NULL && \ (skip_list)->prev == NULL)/* compatibility matrix */#define LCK_COMPAT_EX LCK_NL#define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)#define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)#define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)extern ldlm_mode_t lck_compat_array[];static inline void lockmode_verify(ldlm_mode_t mode){ LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);}static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new){ return (lck_compat_array[exist] & new);}/* * * cluster name spaces * */#define DLM_OST_NAMESPACE 1#define DLM_MDS_NAMESPACE 2/* XXX - do we just separate this by security domains and use a prefix for multiple namespaces in the same domain? -*//* * Locking rules: * * lr_lock * * lr_lock * waiting_locks_spinlock * * lr_lock * led_lock * * lr_lock * ns_unused_lock * * lr_lvb_sem * lr_lock * */struct ldlm_pool;struct ldlm_lock;struct ldlm_resource;struct ldlm_namespace;struct ldlm_pool_ops { int (*po_recalc)(struct ldlm_pool *pl); int (*po_shrink)(struct ldlm_pool *pl, int nr, unsigned int gfp_mask); int (*po_setup)(struct ldlm_pool *pl, int limit);};/* One second for pools thread check interval. */#define LDLM_POOLS_THREAD_PERIOD (1)/* 5% margin for modest pools. See ldlm_pool.c for details. */#define LDLM_POOLS_MODEST_MARGIN (5)/* A change to SLV in % after which we want to wake up pools thread asap. */#define LDLM_POOLS_FAST_SLV_CHANGE (50)struct ldlm_pool { /* Common pool fields */ cfs_proc_dir_entry_t *pl_proc_dir; /* Pool proc directory. */ char pl_name[100]; /* Pool name, should be long * enough to contain complex * proc entry name. */ spinlock_t pl_lock; /* Lock for protecting slv/clv * updates. */ atomic_t pl_limit; /* Number of allowed locks in * in pool, both, client and * server side. */ atomic_t pl_granted; /* Number of granted locks. */ atomic_t pl_grant_rate; /* Grant rate per T. */ atomic_t pl_cancel_rate; /* Cancel rate per T. */ atomic_t pl_grant_speed; /* Grant speed (GR-CR) per T. */ __u64 pl_server_lock_volume; /* Server lock volume. * Protected by pl_lock */ atomic_t pl_lock_volume_factor; /* Lock volume factor. */ time_t pl_recalc_time; /* Time when last slv from * server was obtained. */ struct ldlm_pool_ops *pl_ops; /* Recalc and shrink ops. */ int pl_grant_plan; /* Planned number of granted * locks for next T. */ int pl_grant_step; /* Grant plan step for next * T. */ struct lprocfs_stats *pl_stats; /* Pool statistics. */};typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **, void *req_cookie, ldlm_mode_t mode, int flags, void *data);struct ldlm_valblock_ops { int (*lvbo_init)(struct ldlm_resource *res); int (*lvbo_update)(struct ldlm_resource *res, struct lustre_msg *m, int buf_idx, int increase);};typedef enum { LDLM_NAMESPACE_GREEDY = 1 << 0,
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?