📄 fs.h
字号:
/** * enum positive_aop_returns - aop return codes with specific semantics * * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has * completed, that the page is still locked, and * should be considered active. The VM uses this hint * to return the page to the active list -- it won't * be a candidate for writeback again in the near * future. Other callers must be careful to unlock * the page if they get this return. Returned by * writepage(); * * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has * unlocked it and the page might have been truncated. * The caller should back up to acquiring a new page and * trying again. The aop will be taking reasonable * precautions not to livelock. If the caller held a page * reference, it should drop it before retrying. Returned * by readpage(), prepare_write(), and commit_write(). * * address_space_operation functions return these large constants to indicate * special semantics to the caller. These are much larger than the bytes in a * page to allow for functions that return the number of bytes operated on in a * given page. */enum positive_aop_returns { AOP_WRITEPAGE_ACTIVATE = 0x80000, AOP_TRUNCATED_PAGE = 0x80001,};/* * oh the beauties of C type declarations. */struct page;struct address_space;struct writeback_control;struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); void (*sync_page)(struct page *); /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); /* * ext3 requires that a successful prepare_write() call be followed * by a commit_write() call - they must be balanced */ int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); int (*commit_write)(struct file *, struct page *, unsigned, unsigned); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); void (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, gfp_t); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); struct page* (*get_xip_page)(struct address_space *, sector_t, int); /* migrate the contents of a page to the specified target */ int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *);};struct backing_dev_info;struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ rwlock_t tree_lock; /* and rwlock protecting it */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ spinlock_t i_mmap_lock; /* protect tree, count, list */ unsigned int truncate_count; /* Cover race condition with truncate */ unsigned long nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ struct backing_dev_info *backing_dev_info; /* device readahead, etc */ spinlock_t private_lock; /* for use by the address_space */ struct list_head private_list; /* ditto */ struct address_space *assoc_mapping; /* ditto */} __attribute__((aligned(sizeof(long)))); /* * On most architectures that alignment is already the case; but * must be enforced here for CRIS, to let the least signficant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ int bd_openers; struct mutex bd_mutex; /* open/close mutex */ struct semaphore bd_mount_sem; struct list_head bd_inodes; void * bd_holder; int bd_holders;#ifdef CONFIG_SYSFS struct list_head bd_holder_list;#endif struct block_device * bd_contains; unsigned bd_block_size; struct hd_struct * bd_part; /* number of times partitions within this device have been opened. */ unsigned bd_part_count; int bd_invalidated; struct gendisk * bd_disk; struct list_head bd_list; struct backing_dev_info *bd_inode_backing_dev_info; /* * Private data. You must have bd_claim'ed the block_device * to use this. NOTE: bd_claim allows an owner to claim * the same device multiple times, the owner must take special * care to not mess up bd_private for that case. */ unsigned long bd_private;};/* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees */#define PAGECACHE_TAG_DIRTY 0#define PAGECACHE_TAG_WRITEBACK 1int mapping_tagged(struct address_space *mapping, int tag);/* * Might pages of this file be mapped into userspace? */static inline int mapping_mapped(struct address_space *mapping){ return !prio_tree_empty(&mapping->i_mmap) || !list_empty(&mapping->i_mmap_nonlinear);}/* * Might pages of this file have been modified in userspace? * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. */static inline int mapping_writably_mapped(struct address_space *mapping){ return mapping->i_mmap_writable != 0;}/* * Use sequence counter to get consistent i_size on 32-bit processors. */#if BITS_PER_LONG==32 && defined(CONFIG_SMP)#include <linux/seqlock.h>#define __NEED_I_SIZE_ORDERED#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)#else#define i_size_ordered_init(inode) do { } while (0)#endifstruct inode { struct hlist_node i_hash; struct list_head i_list; struct list_head i_sb_list; struct list_head i_dentry; unsigned long i_ino; atomic_t i_count; unsigned int i_nlink; uid_t i_uid; gid_t i_gid; dev_t i_rdev; unsigned long i_version; loff_t i_size;#ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount;#endif struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned short i_bytes; umode_t i_mode; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ struct mutex i_mutex; struct rw_semaphore i_alloc_sem; struct inode_operations *i_op; const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct super_block *i_sb; struct file_lock *i_flock; struct address_space *i_mapping; /* The LSB in i_data below is used for the PAGE_MAPPING_ANON flag. * This assumes that the address of this member isn't odd which * is not true for all architectures. Force the compiler to align it. */ struct address_space i_data __attribute__ ((aligned(4)));#ifdef CONFIG_QUOTA struct dquot *i_dquot[MAXQUOTAS];#endif struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; }; int i_cindex; __u32 i_generation;#ifdef CONFIG_DNOTIFY unsigned long i_dnotify_mask; /* Directory notify events */ struct dnotify_struct *i_dnotify; /* for directory notifications */#endif#ifdef CONFIG_INOTIFY struct list_head inotify_watches; /* watches on this inode */ struct mutex inotify_mutex; /* protects the watches list */#endif unsigned long i_state; unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned int i_flags; atomic_t i_writecount;#ifdef CONFIG_SECURITY void *i_security;#endif void *i_private; /* fs or device private pointer */};/* * inode->i_mutex nesting subclasses for the lock validator: * * 0: the object of the current VFS operation * 1: parent * 2: child/target * 3: quota file * * The locking order between these classes is * parent -> child -> normal -> xattr -> quota */enum inode_i_mutex_lock_class{ I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, I_MUTEX_XATTR, I_MUTEX_QUOTA};extern void inode_double_lock(struct inode *inode1, struct inode *inode2);extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);/* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic * with respect to the local cpu (unlike with preempt disabled), * but they don't need to be atomic with respect to other cpus like in * true SMP (so they need either to either locally disable irq around * the read or for example on x86 they can be still implemented as a * cmpxchg8b without the need of the lock prefix). For SMP compiles * and 64bit archs it makes no difference if preempt is enabled or not. */static inline loff_t i_size_read(const struct inode *inode){#if BITS_PER_LONG==32 && defined(CONFIG_SMP) loff_t i_size; unsigned int seq; do { seq = read_seqcount_begin(&inode->i_size_seqcount); i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size;#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) loff_t i_size; preempt_disable(); i_size = inode->i_size; preempt_enable(); return i_size;#else return inode->i_size;#endif}/* * NOTE: unlike i_size_read(), i_size_write() does need locking around it * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount * can be lost, resulting in subsequent i_size_read() calls spinning forever. */static inline void i_size_write(struct inode *inode, loff_t i_size){#if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&inode->i_size_seqcount); inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount);#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); inode->i_size = i_size; preempt_enable();#else inode->i_size = i_size;#endif}static inline unsigned iminor(const struct inode *inode){ return MINOR(inode->i_rdev);}static inline unsigned imajor(const struct inode *inode){ return MAJOR(inode->i_rdev);}extern struct block_device *I_BDEV(struct inode *inode);struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ uid_t uid, euid; /* uid/euid of process setting the owner */ int signum; /* posix.1b rt signal to be delivered on IO */};/* * Track a single file's readahead state */struct file_ra_state { unsigned long start; /* Current window */ unsigned long size; unsigned long flags; /* ra flags RA_FLAG_xxx*/ unsigned long cache_hit; /* cache hit count*/ unsigned long prev_page; /* Cache last read() position */ unsigned long ahead_start; /* Ahead window */ unsigned long ahead_size; unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */};#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */#define RA_FLAG_INCACHE 0x02 /* file is already in cache */struct file { /* * fu_list becomes invalid after file_free is called and queued via * fu_rcuhead for RCU freeing */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -