📄 llite_internal.h
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: */#ifndef LLITE_INTERNAL_H#define LLITE_INTERNAL_H#include <linux/ext2_fs.h>#ifdef CONFIG_FS_POSIX_ACL# include <linux/fs.h>#ifdef HAVE_XATTR_ACL# include <linux/xattr_acl.h>#endif#ifdef HAVE_LINUX_POSIX_ACL_XATTR_H# include <linux/posix_acl_xattr.h>#endif#endif#include <lustre_debug.h>#include <lustre_ver.h>#include <linux/lustre_version.h>#include <lustre_disk.h> /* for s2sbi */ /*struct lustre_intent_data { __u64 it_lock_handle[2]; __u32 it_disposition; __u32 it_status; __u32 it_lock_mode; }; *//* If there is no FMODE_EXEC defined, make it to match nothing */#ifndef FMODE_EXEC#define FMODE_EXEC 0#endif#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")#define LUSTRE_FPRIVATE(file) ((file)->private_data)#ifdef HAVE_VFS_INTENT_PATCHESstatic inline struct lookup_intent *ll_nd2it(struct nameidata *nd){#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) return &nd->intent;#else return nd->intent;#endif}#endifstruct ll_dentry_data { int lld_cwd_count; int lld_mnt_count; struct obd_client_handle lld_cwd_och; struct obd_client_handle lld_mnt_och;#ifndef HAVE_VFS_INTENT_PATCHES struct lookup_intent *lld_it;#endif unsigned int lld_sa_generation; cfs_waitq_t lld_waitq;};#define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))extern struct file_operations ll_pgcache_seq_fops;#define LLI_INODE_MAGIC 0x111d0de5#define LLI_INODE_DEAD 0xdeadd00d#define LLI_F_HAVE_OST_SIZE_LOCK 0#define LLI_F_HAVE_MDS_SIZE_LOCK 1#define LLI_F_CONTENDED 2#define LLI_F_SRVLOCK 3struct ll_inode_info { int lli_inode_magic; struct semaphore lli_size_sem; /* protect open and change size */ void *lli_size_sem_owner; struct semaphore lli_write_sem; struct lov_stripe_md *lli_smd; char *lli_symlink_name; __u64 lli_maxbytes; __u64 lli_io_epoch; unsigned long lli_flags; cfs_time_t lli_contention_time; /* this lock protects s_d_w and p_w_ll and mmap_cnt */ spinlock_t lli_lock;#ifdef HAVE_CLOSE_THREAD struct list_head lli_pending_write_llaps; struct list_head lli_close_item; int lli_send_done_writing;#endif atomic_t lli_mmap_cnt; /* for writepage() only to communicate to fsync */ int lli_async_rc; struct posix_acl *lli_posix_acl; struct list_head lli_dead_list; struct semaphore lli_och_sem; /* Protects access to och pointers and their usage counters */ /* We need all three because every inode may be opened in different modes */ struct obd_client_handle *lli_mds_read_och; __u64 lli_open_fd_read_count; struct obd_client_handle *lli_mds_write_och; __u64 lli_open_fd_write_count; struct obd_client_handle *lli_mds_exec_och; __u64 lli_open_fd_exec_count;#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) struct inode lli_vfs_inode;#endif /* metadata stat-ahead */ /* * "opendir_pid" is the token when lookup/revalid -- I am the owner of * dir statahead. */ pid_t lli_opendir_pid; /* * since parent-child threads can share the same @file struct, * "opendir_key" is the token when dir close for case of parent exit * before child -- it is me should cleanup the dir readahead. */ void *lli_opendir_key; struct ll_statahead_info *lli_sai;};/* * Locking to guarantee consistency of non-atomic updates to long long i_size, * consistency between file size and KMS, and consistency within * ->lli_smd->lsm_oinfo[]'s. * * Implemented by ->lli_size_sem and ->lsm_sem, nested in that order. */void ll_inode_size_lock(struct inode *inode, int lock_lsm);void ll_inode_size_unlock(struct inode *inode, int unlock_lsm);// FIXME: replace the name of this with LL_I to conform to kernel stuff// static inline struct ll_inode_info *LL_I(struct inode *inode)static inline struct ll_inode_info *ll_i2info(struct inode *inode){#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) return container_of(inode, struct ll_inode_info, lli_vfs_inode);#else CLASSERT(sizeof(inode->u) >= sizeof(struct ll_inode_info)); return (struct ll_inode_info *)&(inode->u.generic_ip);#endif}/* default to about 40meg of readahead on a given system. That much tied * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - CFS_PAGE_SHIFT))/* default to read-ahead full files smaller than 2MB on the second read */#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - CFS_PAGE_SHIFT))enum ra_stat { RA_STAT_HIT = 0, RA_STAT_MISS, RA_STAT_DISTANT_READPAGE, RA_STAT_MISS_IN_WINDOW, RA_STAT_FAILED_GRAB_PAGE, RA_STAT_FAILED_MATCH, RA_STAT_DISCARDED, RA_STAT_ZERO_LEN, RA_STAT_ZERO_WINDOW, RA_STAT_EOF, RA_STAT_MAX_IN_FLIGHT, RA_STAT_WRONG_GRAB_PAGE, _NR_RA_STAT,};struct ll_ra_info { unsigned long ra_cur_pages; unsigned long ra_max_pages; unsigned long ra_max_read_ahead_whole_pages; unsigned long ra_stats[_NR_RA_STAT];};/* LL_HIST_MAX=32 causes an overflow */#define LL_HIST_MAX 28#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */#define LL_PROCESS_HIST_MAX 10struct per_process_info { pid_t pid; struct obd_histogram pp_r_hist; struct obd_histogram pp_w_hist;};/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */struct ll_rw_extents_info { struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];};#define LL_OFFSET_HIST_MAX 100struct ll_rw_process_info { pid_t rw_pid; int rw_op; loff_t rw_range_start; loff_t rw_range_end; loff_t rw_last_file_pos; loff_t rw_offset; size_t rw_smallest_extent; size_t rw_largest_extent; struct file *rw_last_file;};enum stats_track_type { STATS_TRACK_ALL = 0, /* track all processes */ STATS_TRACK_PID, /* track process with this pid */ STATS_TRACK_PPID, /* track processes with this ppid */ STATS_TRACK_GID, /* track processes with this gid */ STATS_TRACK_LAST,};/* flags for sbi->ll_flags */#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */#define LL_SBI_DATA_CHECKSUM 0x02 /* checksum each page on the wire */#define LL_SBI_FLOCK 0x04#define LL_SBI_USER_XATTR 0x08 /* support user xattr */#define LL_SBI_ACL 0x10 /* support ACL */#define LL_SBI_JOIN 0x20 /* support JOIN */#define LL_SBI_LOCALFLOCK 0x40 /* Local flocks support by kernel */#define LL_SBI_LRU_RESIZE 0x80 /* support lru resize */#define LL_SBI_LLITE_CHECKSUM 0x100 /* checksum each page in memory *//* default value for ll_sb_info->contention_time */#define SBI_DEFAULT_CONTENTION_SECONDS 60struct ll_sb_info { struct list_head ll_list; /* this protects pglist and ra_info. It isn't safe to * grab from interrupt contexts */ spinlock_t ll_lock; spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */ spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */ struct obd_uuid ll_sb_uuid; struct obd_export *ll_mdc_exp; struct obd_export *ll_osc_exp; struct proc_dir_entry *ll_proc_root; obd_id ll_rootino; /* number of root inode */ int ll_flags; struct list_head ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ unsigned long ll_async_page_max; unsigned long ll_async_page_count; unsigned long ll_pglist_gen; struct list_head ll_pglist; /* all pages (llap_pglist_item) */ unsigned ll_contention_time; /* seconds */ struct ll_ra_info ll_ra_info; unsigned int ll_namelen; struct file_operations *ll_fop;#ifdef HAVE_EXPORT___IGET struct list_head ll_deathrow; /* inodes to be destroyed (b1443) */ spinlock_t ll_deathrow_lock;#endif /* =0 - hold lock over whole read/write * >0 - max. chunk to be read/written w/o lock re-acquiring */ unsigned long ll_max_rw_chunk; /* Statistics */ struct ll_rw_extents_info ll_rw_extents_info; int ll_extent_process_count; struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX]; unsigned int ll_offset_process_count; struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX]; unsigned int ll_rw_offset_entry_count; enum stats_track_type ll_stats_track_type; int ll_stats_track_id; int ll_rw_stats_on; dev_t ll_sdev_orig; /* save s_dev before assign for * clustred nfs */ /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ unsigned int ll_sa_wrong; /* statahead thread stopped for * low hit ratio */ unsigned int ll_sa_total; /* statahead thread started * count */ unsigned long long ll_sa_blocked; /* ls count waiting for * statahead */ unsigned long long ll_sa_cached; /* ls count got in cache */ unsigned long long ll_sa_hit; /* hit count */ unsigned long long ll_sa_miss; /* miss count */};#define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024)struct ll_ra_read { pgoff_t lrr_start; pgoff_t lrr_count; struct task_struct *lrr_reader; struct list_head lrr_linkage;};/* * per file-descriptor read-ahead data. */struct ll_readahead_state { spinlock_t ras_lock; /* * index of the last page that read(2) needed and that wasn't in the * cache. Used by ras_update() to detect seeks. * * XXX nikita: if access seeks into cached region, Lustre doesn't see * this. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -