📄 xfs_super.c
字号:
* Flush delayed allocate data, attempting to free up reserved space * from existing allocations. At this point a new allocation attempt * has failed with ENOSPC and we are in the process of scratching our * heads, looking about for more room... */STATIC voidxfs_flush_inode_work( struct xfs_mount *mp, void *arg){ struct inode *inode = arg; filemap_flush(inode->i_mapping); iput(inode);}voidxfs_flush_inode( xfs_inode_t *ip){ struct inode *inode = ip->i_vnode; igrab(inode); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); delay(msecs_to_jiffies(500));}/* * This is the "bigger hammer" version of xfs_flush_inode_work... * (IOW, "If at first you don't succeed, use a Bigger Hammer"). */STATIC voidxfs_flush_device_work( struct xfs_mount *mp, void *arg){ struct inode *inode = arg; sync_blockdev(mp->m_super->s_bdev); iput(inode);}voidxfs_flush_device( xfs_inode_t *ip){ struct inode *inode = vn_to_inode(XFS_ITOV(ip)); igrab(inode); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); delay(msecs_to_jiffies(500)); xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);}STATIC voidxfs_sync_worker( struct xfs_mount *mp, void *unused){ int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER); mp->m_sync_seq++; wake_up(&mp->m_wait_single_sync_task);}STATIC intxfssyncd( void *arg){ struct xfs_mount *mp = arg; long timeleft; bhv_vfs_sync_work_t *work, *n; LIST_HEAD (tmp); set_freezable(); timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); for (;;) { timeleft = schedule_timeout_interruptible(timeleft); /* swsusp */ try_to_freeze(); if (kthread_should_stop() && list_empty(&mp->m_sync_list)) break; spin_lock(&mp->m_sync_lock); /* * We can get woken by laptop mode, to do a sync - * that's the (only!) case where the list would be * empty with time remaining. */ if (!timeleft || list_empty(&mp->m_sync_list)) { if (!timeleft) timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); INIT_LIST_HEAD(&mp->m_sync_work.w_list); list_add_tail(&mp->m_sync_work.w_list, &mp->m_sync_list); } list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) list_move(&work->w_list, &tmp); spin_unlock(&mp->m_sync_lock); list_for_each_entry_safe(work, n, &tmp, w_list) { (*work->w_syncer)(mp, work->w_data); list_del(&work->w_list); if (work == &mp->m_sync_work) continue; kmem_free(work, sizeof(struct bhv_vfs_sync_work)); } } return 0;}STATIC voidxfs_fs_put_super( struct super_block *sb){ struct xfs_mount *mp = XFS_M(sb); int error; kthread_stop(mp->m_sync_task); xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); error = xfs_unmount(mp, 0, NULL); if (error) printk("XFS: unmount got error=%d\n", error);}STATIC voidxfs_fs_write_super( struct super_block *sb){ if (!(sb->s_flags & MS_RDONLY)) xfs_sync(XFS_M(sb), SYNC_FSDATA); sb->s_dirt = 0;}STATIC intxfs_fs_sync_super( struct super_block *sb, int wait){ struct xfs_mount *mp = XFS_M(sb); int error; int flags; /* * Treat a sync operation like a freeze. This is to work * around a race in sync_inodes() which works in two phases * - an asynchronous flush, which can write out an inode * without waiting for file size updates to complete, and a * synchronous flush, which wont do anything because the * async flush removed the inode's dirty flag. Also * sync_inodes() will not see any files that just have * outstanding transactions to be flushed because we don't * dirty the Linux inode until after the transaction I/O * completes. */ if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) { /* * First stage of freeze - no more writers will make progress * now we are here, so we flush delwri and delalloc buffers * here, then wait for all I/O to complete. Data is frozen at * that point. Metadata is not frozen, transactions can still * occur here so don't bother flushing the buftarg (i.e * SYNC_QUIESCE) because it'll just get dirty again. */ flags = SYNC_DATA_QUIESCE; } else flags = SYNC_FSDATA; error = xfs_sync(mp, flags); sb->s_dirt = 0; if (unlikely(laptop_mode)) { int prev_sync_seq = mp->m_sync_seq; /* * The disk must be active because we're syncing. * We schedule xfssyncd now (now that the disk is * active) instead of later (when it might not be). */ wake_up_process(mp->m_sync_task); /* * We have to wait for the sync iteration to complete. * If we don't, the disk activity caused by the sync * will come after the sync is completed, and that * triggers another sync from laptop mode. */ wait_event(mp->m_wait_single_sync_task, mp->m_sync_seq != prev_sync_seq); } return -error;}STATIC intxfs_fs_statfs( struct dentry *dentry, struct kstatfs *statp){ return -xfs_statvfs(XFS_M(dentry->d_sb), statp, vn_from_inode(dentry->d_inode));}STATIC intxfs_fs_remount( struct super_block *sb, int *flags, char *options){ struct xfs_mount *mp = XFS_M(sb); struct xfs_mount_args *args = xfs_args_allocate(sb, 0); int error; error = xfs_parseargs(mp, options, args, 1); if (!error) error = xfs_mntupdate(mp, flags, args); kmem_free(args, sizeof(*args)); return -error;}STATIC voidxfs_fs_lockfs( struct super_block *sb){ xfs_freeze(XFS_M(sb));}STATIC intxfs_fs_show_options( struct seq_file *m, struct vfsmount *mnt){ return -xfs_showargs(XFS_M(mnt->mnt_sb), m);}STATIC intxfs_fs_quotasync( struct super_block *sb, int type){ return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);}STATIC intxfs_fs_getxstate( struct super_block *sb, struct fs_quota_stat *fqs){ return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);}STATIC intxfs_fs_setxstate( struct super_block *sb, unsigned int flags, int op){ return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);}STATIC intxfs_fs_getxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq){ return -XFS_QM_QUOTACTL(XFS_M(sb), (type == USRQUOTA) ? Q_XGETQUOTA : ((type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETPQUOTA), id, (caddr_t)fdq);}STATIC intxfs_fs_setxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq){ return -XFS_QM_QUOTACTL(XFS_M(sb), (type == USRQUOTA) ? Q_XSETQLIM : ((type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETPQLIM), id, (caddr_t)fdq);}STATIC intxfs_fs_fill_super( struct super_block *sb, void *data, int silent){ struct inode *rootvp; struct xfs_mount *mp = NULL; struct xfs_mount_args *args = xfs_args_allocate(sb, silent); struct kstatfs statvfs; int error; mp = xfs_mount_init(); INIT_LIST_HEAD(&mp->m_sync_list); spin_lock_init(&mp->m_sync_lock); init_waitqueue_head(&mp->m_wait_single_sync_task); mp->m_super = sb; sb->s_fs_info = mp; if (sb->s_flags & MS_RDONLY) mp->m_flags |= XFS_MOUNT_RDONLY; error = xfs_parseargs(mp, (char *)data, args, 0); if (error) goto fail_vfsop; sb_min_blocksize(sb, BBSIZE); sb->s_export_op = &xfs_export_operations; sb->s_qcop = &xfs_quotactl_operations; sb->s_op = &xfs_super_operations; error = xfs_mount(mp, args, NULL); if (error) goto fail_vfsop; error = xfs_statvfs(mp, &statvfs, NULL); if (error) goto fail_unmount; sb->s_dirt = 1; sb->s_magic = statvfs.f_type; sb->s_blocksize = statvfs.f_bsize; sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); sb->s_time_gran = 1; set_posix_acl_flag(sb); error = xfs_root(mp, &rootvp); if (error) goto fail_unmount; sb->s_root = d_alloc_root(vn_to_inode(rootvp)); if (!sb->s_root) { error = ENOMEM; goto fail_vnrele; } if (is_bad_inode(sb->s_root->d_inode)) { error = EINVAL; goto fail_vnrele; } mp->m_sync_work.w_syncer = xfs_sync_worker; mp->m_sync_work.w_mount = mp; mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); if (IS_ERR(mp->m_sync_task)) { error = -PTR_ERR(mp->m_sync_task); goto fail_vnrele; } vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__, (inst_t *)__return_address); kmem_free(args, sizeof(*args)); return 0;fail_vnrele: if (sb->s_root) { dput(sb->s_root); sb->s_root = NULL; } else { VN_RELE(rootvp); }fail_unmount: xfs_unmount(mp, 0, NULL);fail_vfsop: kmem_free(args, sizeof(*args)); return -error;}STATIC intxfs_fs_get_sb( struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt){ return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super, mnt);}static struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .write_inode = xfs_fs_write_inode, .clear_inode = xfs_fs_clear_inode, .put_super = xfs_fs_put_super, .write_super = xfs_fs_write_super, .sync_fs = xfs_fs_sync_super, .write_super_lockfs = xfs_fs_lockfs, .statfs = xfs_fs_statfs, .remount_fs = xfs_fs_remount, .show_options = xfs_fs_show_options,};static struct quotactl_ops xfs_quotactl_operations = { .quota_sync = xfs_fs_quotasync, .get_xstate = xfs_fs_getxstate, .set_xstate = xfs_fs_setxstate, .get_xquota = xfs_fs_getxquota, .set_xquota = xfs_fs_setxquota,};static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .get_sb = xfs_fs_get_sb, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV,};STATIC int __initinit_xfs_fs( void ){ int error; static char message[] __initdata = KERN_INFO \ XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"; printk(message); ktrace_init(64); error = xfs_init_zones(); if (error < 0) goto undo_zones; error = xfs_buf_init(); if (error < 0) goto undo_buffers; vn_init(); xfs_init(); uuid_init(); vfs_initquota(); error = register_filesystem(&xfs_fs_type); if (error) goto undo_register; return 0;undo_register: xfs_buf_terminate();undo_buffers: xfs_destroy_zones();undo_zones: return error;}STATIC void __exitexit_xfs_fs( void ){ vfs_exitquota(); unregister_filesystem(&xfs_fs_type); xfs_cleanup(); xfs_buf_terminate(); xfs_destroy_zones(); ktrace_uninit();}module_init(init_xfs_fs);module_exit(exit_xfs_fs);MODULE_AUTHOR("Silicon Graphics, Inc.");MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -