📄 lustre_compat25.h
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * * Copyright (c) 2003 Cluster File Systems, Inc. * * This file is part of Lustre, http://www.lustre.org. * * Lustre is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Lustre is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Lustre; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */#ifndef _LINUX_COMPAT25_H#define _LINUX_COMPAT25_H#ifdef __KERNEL__#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)#error sorry, lustre requires at least 2.5.69#endif#include <libcfs/linux/portals_compat25.h>#include <linux/lustre_patchless_compat.h>#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)struct ll_iattr_struct { struct iattr iattr; unsigned int ia_attr_flags;};#else#define ll_iattr_struct iattr#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */#ifndef HAVE_SET_FS_PWDstatic inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, struct dentry *dentry){ struct dentry *old_pwd; struct vfsmount *old_pwdmnt; write_lock(&fs->lock); old_pwd = fs->pwd; old_pwdmnt = fs->pwdmnt; fs->pwdmnt = mntget(mnt); fs->pwd = dget(dentry); write_unlock(&fs->lock); if (old_pwd) { dput(old_pwd); mntput(old_pwdmnt); }}#else#define ll_set_fs_pwd set_fs_pwd#endif /* HAVE_SET_FS_PWD */#ifdef HAVE_INODE_I_MUTEX#define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)#define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)#define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)#else#define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)#define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)#define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))#endif /* HAVE_INODE_I_MUTEX */#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)#define d_child d_u.d_child#define d_rcu d_u.d_rcu#endif#ifdef HAVE_DQUOTOFF_MUTEX#define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)#define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)#else#define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)#define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)#endif /* HAVE_DQUOTOFF_MUTEX */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)#define NGROUPS_SMALL NGROUPS#define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t)))struct group_info { int ngroups; atomic_t usage; gid_t small_block[NGROUPS_SMALL]; int nblocks; gid_t *blocks[0];};#define current_ngroups current->ngroups#define current_groups current->groupsstruct group_info *groups_alloc(int gidsetsize);void groups_free(struct group_info *ginfo);#else /* >= 2.6.4 */#define current_ngroups current->group_info->ngroups#define current_groups current->group_info->small_block#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */#ifndef page_private#define page_private(page) ((page)->private)#define set_page_private(page, v) ((page)->private = (v))#endif#ifndef HAVE_GFP_T#define gfp_t int#endif#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)#define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock)#define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock)#define lock_24kernel() do {} while (0)#define unlock_24kernel() do {} while (0)#define ll_kernel_locked() kernel_locked()/* * OBD need working random driver, thus all our * initialization routines must be called after device * driver initialization */#ifndef MODULE#undef module_init#define module_init(a) late_initcall(a)#endif/* XXX our code should be using the 2.6 calls, not the other way around */#define TryLockPage(page) TestSetPageLocked(page)#define Page_Uptodate(page) PageUptodate(page)#define ll_redirty_page(page) set_page_dirty(page)#define KDEVT_INIT(val) (val)#define LTIME_S(time) (time.tv_sec)#define ll_path_lookup path_lookup#define ll_permission(inode,mask,nd) permission(inode,mask,nd)#define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)#define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)#define ll_call_writepage(inode, page) \ (inode)->i_mapping->a_ops->writepage(page, NULL)#define ll_invalidate_inode_pages(inode) \ invalidate_inode_pages((inode)->i_mapping)#define ll_truncate_complete_page(page) \ truncate_complete_page(page->mapping, page)#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)#define ll_dev_t dev_t#define kdev_t dev_t#define to_kdev_t(dev) (dev)#define kdev_t_to_nr(dev) (dev)#define val_to_kdev(dev) (dev)#define ILOOKUP(sb, ino, test, data) ilookup5(sb, ino, test, data);#include <linux/writeback.h>static inline int cleanup_group_info(void){ struct group_info *ginfo; ginfo = groups_alloc(0); if (!ginfo) return -ENOMEM; set_current_groups(ginfo); put_group_info(ginfo); return 0;}#define __set_page_ll_data(page, llap) \ do { \ page_cache_get(page); \ SetPagePrivate(page); \ set_page_private(page, (unsigned long)llap); \ } while (0)#define __clear_page_ll_data(page) \ do { \ ClearPagePrivate(page); \ set_page_private(page, 0); \ page_cache_release(page); \ } while(0)#define kiobuf bio#include <linux/proc_fs.h>#ifndef HAVE___D_REHASH#define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)#endif#ifdef HAVE_CAN_SLEEP_ARG#define ll_flock_lock_file_wait(file, lock, can_sleep) \ flock_lock_file_wait(file, lock, can_sleep)#else#define ll_flock_lock_file_wait(file, lock, can_sleep) \ flock_lock_file_wait(file, lock)#endif#define CheckWriteback(page, cmd) \ (!(!PageWriteback(page) && cmd == OBD_BRW_WRITE))#else /* 2.4.. */#define ll_flock_lock_file_wait(file, lock, can_sleep) \ do {} while(0)#define lock_dentry(___dentry)#define unlock_dentry(___dentry)#define lock_24kernel() lock_kernel()#define unlock_24kernel() unlock_kernel()#define ll_kernel_locked() (current->lock_depth >= 0)/* 2.4 kernels have HZ=100 on i386/x86_64, this should be reasonably safe */#define get_jiffies_64() (__u64)jiffies#ifdef HAVE_MM_INLINE#include <linux/mm_inline.h>#endif#ifndef pgoff_t#define pgoff_t unsigned long#endif#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c)#define ll_permission(inode,mask,nd) permission(inode,mask)#define ILOOKUP(sb, ino, test, data) ilookup4(sb, ino, test, data);#define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED#define ll_dev_t int#define old_encode_dev(dev) (dev)/* 2.5 uses hlists for some things, like the d_hash. we'll treat them * as 2.5 and let macros drop back.. */#ifndef HLIST_HEAD /* until we get a kernel newer than l28 */#define hlist_entry list_entry#define hlist_head list_head#define hlist_node list_head#define HLIST_HEAD LIST_HEAD#define INIT_HLIST_HEAD INIT_LIST_HEAD#define hlist_del_init list_del_init#define hlist_add_head list_add#endif#ifndef INIT_HLIST_NODE#define INIT_HLIST_NODE(p) ((p)->next = NULL, (p)->prev = NULL)#endif#ifndef hlist_for_each#define hlist_for_each list_for_each#endif#ifndef hlist_for_each_safe#define hlist_for_each_safe list_for_each_safe#endif#define KDEVT_INIT(val) (val)#define ext3_xattr_set_handle ext3_xattr_set#define try_module_get __MOD_INC_USE_COUNT#define module_put __MOD_DEC_USE_COUNT#define LTIME_S(time) (time)#if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)#define cpu_online(cpu) test_bit(cpu, &(cpu_online_map))#endifstatic inline int ll_path_lookup(const char *path, unsigned flags, struct nameidata *nd){ int error = 0; if (path_init(path, flags, nd)) error = path_walk(path, nd);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -