📄 xfs_vnodeops.c
字号:
if (!pathlen) goto out; if (ip->i_df.if_flags & XFS_IFINLINE) { memcpy(link, ip->i_df.if_u1.if_data, pathlen); link[pathlen] = '\0'; } else { error = xfs_readlink_bmap(ip, link); } out: xfs_iunlock(ip, XFS_ILOCK_SHARED); return error;}/* * xfs_fsync * * This is called to sync the inode and its data out to disk. * We need to hold the I/O lock while flushing the data, and * the inode lock while flushing the inode. The inode lock CANNOT * be held while flushing the data, so acquire after we're done * with that. */intxfs_fsync( xfs_inode_t *ip, int flag, xfs_off_t start, xfs_off_t stop){ xfs_trans_t *tp; int error; int log_flushed = 0, changed = 1; vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); ASSERT(start >= 0 && stop >= -1); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return XFS_ERROR(EIO); if (flag & FSYNC_DATA) filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); /* * We always need to make sure that the required inode state * is safe on disk. The vnode might be clean but because * of committed transactions that haven't hit the disk yet. * Likewise, there could be unflushed non-transactional * changes to the inode core that have to go to disk. * * The following code depends on one assumption: that * any transaction that changes an inode logs the core * because it has to change some field in the inode core * (typically nextents or nblocks). That assumption * implies that any transactions against an inode will * catch any non-transactional updates. If inode-altering * transactions exist that violate this assumption, the * code breaks. Right now, it figures that if the involved * update_* field is clear and the inode is unpinned, the * inode is clean. Either it's been flushed or it's been * committed and the commit has hit the disk unpinning the inode. * (Note that xfs_inode_item_format() called at commit clears * the update_* fields.) */ xfs_ilock(ip, XFS_ILOCK_SHARED); /* If we are flushing data then we care about update_size * being set, otherwise we care about update_core */ if ((flag & FSYNC_DATA) ? (ip->i_update_size == 0) : (ip->i_update_core == 0)) { /* * Timestamps/size haven't changed since last inode * flush or inode transaction commit. That means * either nothing got written or a transaction * committed which caught the updates. If the * latter happened and the transaction hasn't * hit the disk yet, the inode will be still * be pinned. If it is, force the log. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) { _xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE | ((flag & FSYNC_WAIT) ? XFS_LOG_SYNC : 0), &log_flushed); } else { /* * If the inode is not pinned and nothing * has changed we don't need to flush the * cache. */ changed = 0; } error = 0; } else { /* * Kick off a transaction to log the inode * core to get the updates. Make it * sync if FSYNC_WAIT is passed in (which * is done by everybody but specfs). The * sync transaction will also force the log. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); if ((error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0))) { xfs_trans_cancel(tp, 0); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); /* * Note - it's possible that we might have pushed * ourselves out of the way during trans_reserve * which would flush the inode. But there's no * guarantee that the inode buffer has actually * gone out yet (it's delwri). Plus the buffer * could be pinned anyway if it's part of an * inode in another recent transaction. So we * play it safe and fire off the transaction anyway. */ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (flag & FSYNC_WAIT) xfs_trans_set_sync(tp); error = _xfs_trans_commit(tp, 0, &log_flushed); xfs_iunlock(ip, XFS_ILOCK_EXCL); } if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) { /* * If the log write didn't issue an ordered tag we need * to flush the disk cache for the data device now. */ if (!log_flushed) xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); /* * If this inode is on the RT dev we need to flush that * cache as well. */ if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); } return error;}/* * This is called by xfs_inactive to free any blocks beyond eof * when the link count isn't zero and by xfs_dm_punch_hole() when * punching a hole to EOF. */intxfs_free_eofblocks( xfs_mount_t *mp, xfs_inode_t *ip, int flags){ xfs_trans_t *tp; int error; xfs_fileoff_t end_fsb; xfs_fileoff_t last_fsb; xfs_filblks_t map_len; int nimaps; xfs_bmbt_irec_t imap; int use_iolock = (flags & XFS_FREE_EOF_LOCK); /* * Figure out if there are any blocks beyond the end * of the file. If not, then there is nothing to do. */ end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size)); last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); map_len = last_fsb - end_fsb; if (map_len <= 0) return 0; nimaps = 1; xfs_ilock(ip, XFS_ILOCK_SHARED); error = XFS_BMAPI(mp, NULL, &ip->i_iocore, end_fsb, map_len, 0, NULL, 0, &imap, &nimaps, NULL, NULL); xfs_iunlock(ip, XFS_ILOCK_SHARED); if (!error && (nimaps != 0) && (imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks)) { /* * Attach the dquots to the inode up front. */ if ((error = XFS_QM_DQATTACH(mp, ip, 0))) return error; /* * There are blocks after the end of file. * Free them up now by truncating the file to * its current size. */ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); /* * Do the xfs_itruncate_start() call before * reserving any log space because * itruncate_start will call into the buffer * cache and we can't * do that within a transaction. */ if (use_iolock) xfs_ilock(ip, XFS_IOLOCK_EXCL); error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, ip->i_size); if (error) { xfs_trans_cancel(tp, 0); if (use_iolock) xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); error = xfs_itruncate_finish(&tp, ip, ip->i_size, XFS_DATA_FORK, 0); /* * If we get an error at this point we * simply don't bother truncating the file. */ if (error) { xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); } else { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); } xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) : XFS_ILOCK_EXCL)); } return error;}/* * Free a symlink that has blocks associated with it. */STATIC intxfs_inactive_symlink_rmt( xfs_inode_t *ip, xfs_trans_t **tpp){ xfs_buf_t *bp; int committed; int done; int error; xfs_fsblock_t first_block; xfs_bmap_free_t free_list; int i; xfs_mount_t *mp; xfs_bmbt_irec_t mval[SYMLINK_MAPS]; int nmaps; xfs_trans_t *ntp; int size; xfs_trans_t *tp; tp = *tpp; mp = ip->i_mount; ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); /* * We're freeing a symlink that has some * blocks allocated to it. Free the * blocks here. We know that we've got * either 1 or 2 extents and that we can * free them all in one bunmapi call. */ ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); *tpp = NULL; return error; } /* * Lock the inode, fix the size, and join it to the transaction. * Hold it so in the normal path, we still have it locked for * the second transaction. In the error paths we need it * held so the cancel won't rele it, see below. */ xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); size = (int)ip->i_d.di_size; ip->i_d.di_size = 0; xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_trans_ihold(tp, ip); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); /* * Find the block(s) so we can inval and unmap them. */ done = 0; XFS_BMAP_INIT(&free_list, &first_block); nmaps = ARRAY_SIZE(mval); if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, &free_list, NULL))) goto error0; /* * Invalidate the block(s). */ for (i = 0; i < nmaps; i++) { bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); xfs_trans_binval(tp, bp); } /* * Unmap the dead block(s) to the free_list. */ if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, &first_block, &free_list, NULL, &done))) goto error1; ASSERT(done); /* * Commit the first transaction. This logs the EFI and the inode. */ if ((error = xfs_bmap_finish(&tp, &free_list, &committed))) goto error1; /* * The transaction must have been committed, since there were * actually extents freed by xfs_bunmapi. See xfs_bmap_finish. * The new tp has the extent freeing and EFDs. */ ASSERT(committed); /* * The first xact was committed, so add the inode to the new one. * Mark it dirty so it will be logged and moved forward in the log as * part of every commit. */ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_trans_ihold(tp, ip); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); /* * Get a new, empty transaction to return to our caller. */ ntp = xfs_trans_dup(tp); /* * Commit the transaction containing extent freeing and EFDs. * If we get an error on the commit here or on the reserve below, * we need to unlock the inode since the new transaction doesn't * have the inode attached. */ error = xfs_trans_commit(tp, 0); tp = ntp; if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); goto error0; } /* * Remove the memory for extent descriptions (just bookkeeping). */ if (ip->i_df.if_bytes) xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); ASSERT(ip->i_df.if_bytes == 0); /* * Put an itruncate log reservation in the new transaction * for our caller. */ if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); goto error0; } /* * Return with the inode locked but not joined to the transaction. */ *tpp = tp; return 0; error1: xfs_bmap_cancel(&free_list); error0: /* * Have to come here with the inode locked and either * (held and in the transaction) or (not in the transaction). * If the inode isn't held then cancel would iput it, but * that's wrong since this is inactive and the vnode ref * count is 0 already. * Cancel won't do anything to the inode if held, but it still * needs to be locked until the cancel is done, if it was * joined to the transaction. */ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); *tpp = NULL; return error;}STATIC intxfs_inactive_symlink_local( xfs_inode_t *ip, xfs_trans_t **tpp){ int error; ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); /* * We're freeing a symlink which fit into * the inode. Just free the memory used * to hold the old symlink. */ error = xfs_trans_reserve(*tpp, 0, XFS_ITRUNCATE_LOG_RES(ip->i_mount), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(*tpp, 0); *tpp = NULL; return error; } xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); /* * Zero length symlinks _can_ exist. */ if (ip->i_df.if_bytes > 0) { xfs_idata_realloc(ip, -(ip->i_df.if_bytes), XFS_DATA_FORK); ASSERT(ip->i_df.if_bytes == 0); } return 0;}STATIC intxfs_inactive_attrs( xfs_inode_t *ip, xfs_trans_t **tpp){ xfs_trans_t *tp; int error; xfs_mount_t *mp; ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); tp = *tpp; mp = ip->i_mount; ASSERT(ip->i_d.di_forkoff != 0); xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(ip, XFS_ILOCK_EXCL); error = xfs_attr_inactive(ip); if (error) { *tpp = NULL; xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; /* goto out */ } tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); error = xfs_trans_reserve(tp, 0, XFS_IFREE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_INACTIVE_LOG_COUNT); if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_trans_cancel(tp, 0); *tpp = NULL; xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); xfs_idestroy_fork(ip, XFS_ATTR_FORK); ASSERT(ip->i_d.di_anextents == 0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -