📄 alloc.c
字号:
tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); status = ocfs2_journal_dirty(handle, tl_bh); if (status < 0) { mlog_errno(status); goto bail; }bail: mlog_exit(status); return status;}static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, struct ocfs2_journal_handle *handle, struct inode *data_alloc_inode, struct buffer_head *data_alloc_bh){ int status = 0; int i; unsigned int num_clusters; u64 start_blk; struct ocfs2_truncate_rec rec; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; struct inode *tl_inode = osb->osb_tl_inode; struct buffer_head *tl_bh = osb->osb_tl_bh; mlog_entry_void(); di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; i = le16_to_cpu(tl->tl_used) - 1; while (i >= 0) { /* Caller has given us at least enough credits to * update the truncate log dinode */ status = ocfs2_journal_access(handle, tl_inode, tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } tl->tl_used = cpu_to_le16(i); status = ocfs2_journal_dirty(handle, tl_bh); if (status < 0) { mlog_errno(status); goto bail; } /* TODO: Perhaps we can calculate the bulk of the * credits up front rather than extending like * this. */ status = ocfs2_extend_trans(handle, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); if (status < 0) { mlog_errno(status); goto bail; } rec = tl->tl_recs[i]; start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, le32_to_cpu(rec.t_start)); num_clusters = le32_to_cpu(rec.t_clusters); /* if start_blk is not set, we ignore the record as * invalid. */ if (start_blk) { mlog(0, "free record %d, start = %u, clusters = %u\n", i, le32_to_cpu(rec.t_start), num_clusters); status = ocfs2_free_clusters(handle, data_alloc_inode, data_alloc_bh, start_blk, num_clusters); if (status < 0) { mlog_errno(status); goto bail; } } i--; }bail: mlog_exit(status); return status;}/* Expects you to already be holding tl_inode->i_mutex */static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb){ int status; unsigned int num_to_flush; struct ocfs2_journal_handle *handle = NULL; struct inode *tl_inode = osb->osb_tl_inode; struct inode *data_alloc_inode = NULL; struct buffer_head *tl_bh = osb->osb_tl_bh; struct buffer_head *data_alloc_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; mlog_entry_void(); BUG_ON(mutex_trylock(&tl_inode->i_mutex)); di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; if (!OCFS2_IS_VALID_DINODE(di)) { OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); status = -EIO; goto bail; } num_to_flush = le16_to_cpu(tl->tl_used); mlog(0, "Flush %u records from truncate log #%"MLFu64"\n", num_to_flush, OCFS2_I(tl_inode)->ip_blkno); if (!num_to_flush) { status = 0; goto bail; } handle = ocfs2_alloc_handle(osb); if (!handle) { status = -ENOMEM; mlog_errno(status); goto bail; } data_alloc_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!data_alloc_inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get bitmap inode!\n"); goto bail; } ocfs2_handle_add_inode(handle, data_alloc_inode); status = ocfs2_meta_lock(data_alloc_inode, handle, &data_alloc_bh, 1); if (status < 0) { mlog_errno(status); goto bail; } handle = ocfs2_start_trans(osb, handle, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); goto bail; } status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, data_alloc_bh); if (status < 0) { mlog_errno(status); goto bail; }bail: if (handle) ocfs2_commit_trans(handle); if (data_alloc_inode) iput(data_alloc_inode); if (data_alloc_bh) brelse(data_alloc_bh); mlog_exit(status); return status;}int ocfs2_flush_truncate_log(struct ocfs2_super *osb){ int status; struct inode *tl_inode = osb->osb_tl_inode; mutex_lock(&tl_inode->i_mutex); status = __ocfs2_flush_truncate_log(osb); mutex_unlock(&tl_inode->i_mutex); return status;}static void ocfs2_truncate_log_worker(void *data){ int status; struct ocfs2_super *osb = data; mlog_entry_void(); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); mlog_exit(status);}#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, int cancel){ if (osb->osb_tl_inode) { /* We want to push off log flushes while truncates are * still running. */ if (cancel) cancel_delayed_work(&osb->osb_truncate_log_wq); queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); }}static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int slot_num, struct inode **tl_inode, struct buffer_head **tl_bh){ int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, slot_num); if (!inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get load truncate log inode!\n"); goto bail; } status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh, OCFS2_BH_CACHED, inode); if (status < 0) { iput(inode); mlog_errno(status); goto bail; } *tl_inode = inode; *tl_bh = bh;bail: mlog_exit(status); return status;}/* called during the 1st stage of node recovery. we stamp a clean * truncate log and pass back a copy for processing later. if the * truncate log does not require processing, a *tl_copy is set to * NULL. */int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, int slot_num, struct ocfs2_dinode **tl_copy){ int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; *tl_copy = NULL; mlog(0, "recover truncate log from slot %d\n", slot_num); status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); if (status < 0) { mlog_errno(status); goto bail; } di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; if (!OCFS2_IS_VALID_DINODE(di)) { OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di); status = -EIO; goto bail; } if (le16_to_cpu(tl->tl_used)) { mlog(0, "We'll have %u logs to recover\n", le16_to_cpu(tl->tl_used)); *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); if (!(*tl_copy)) { status = -ENOMEM; mlog_errno(status); goto bail; } /* Assuming the write-out below goes well, this copy * will be passed back to recovery for processing. */ memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); /* All we need to do to clear the truncate log is set * tl_used. */ tl->tl_used = 0; status = ocfs2_write_block(osb, tl_bh, tl_inode); if (status < 0) { mlog_errno(status); goto bail; } }bail: if (tl_inode) iput(tl_inode); if (tl_bh) brelse(tl_bh); if (status < 0 && (*tl_copy)) { kfree(*tl_copy); *tl_copy = NULL; } mlog_exit(status); return status;}int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, struct ocfs2_dinode *tl_copy){ int status = 0; int i; unsigned int clusters, num_recs, start_cluster; u64 start_blk; struct ocfs2_journal_handle *handle; struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_truncate_log *tl; mlog_entry_void(); if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); return -EINVAL; } tl = &tl_copy->id2.i_dealloc; num_recs = le16_to_cpu(tl->tl_used); mlog(0, "cleanup %u records from %"MLFu64"\n", num_recs, tl_copy->i_blkno); mutex_lock(&tl_inode->i_mutex); for(i = 0; i < num_recs; i++) { if (ocfs2_truncate_log_needs_flush(osb)) { status = __ocfs2_flush_truncate_log(osb); if (status < 0) { mlog_errno(status); goto bail_up; } } handle = ocfs2_start_trans(osb, NULL, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_up; } clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); status = ocfs2_truncate_log_append(osb, handle, start_blk, clusters); ocfs2_commit_trans(handle); if (status < 0) { mlog_errno(status); goto bail_up; } }bail_up: mutex_unlock(&tl_inode->i_mutex); mlog_exit(status); return status;}void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb){ int status; struct inode *tl_inode = osb->osb_tl_inode; mlog_entry_void(); if (tl_inode) { cancel_delayed_work(&osb->osb_truncate_log_wq); flush_workqueue(ocfs2_wq); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); brelse(osb->osb_tl_bh); iput(osb->osb_tl_inode); } mlog_exit_void();}int ocfs2_truncate_log_init(struct ocfs2_super *osb){ int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; mlog_entry_void(); status = ocfs2_get_truncate_log_info(osb, osb->slot_num, &tl_inode, &tl_bh); if (status < 0) mlog_errno(status); /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; mlog_exit(status); return status;}/* This function will figure out whether the currently last extent * block will be deleted, and if it will, what the new last extent * block will be so we can update his h_next_leaf_blk field, as well * as the dinodes i_last_eb_blk */static int ocfs2_find_new_last_ext_blk(struct ocfs2_super *osb, struct inode *inode, struct ocfs2_dinode *fe, u32 new_i_clusters, struct buffer_head *old_last_eb, struct buffer_head **new_last_eb){ int i, status = 0; u64 block = 0; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *bh = NULL; *new_last_eb = NULL; if (!OCFS2_IS_VALID_DINODE(fe)) { OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); status = -EIO; goto bail; } /* we have no tree, so of course, no last_eb. */ if (!fe->id2.i_list.l_tree_depth) goto bail; /* trunc to zero special case - this makes tree_depth = 0 * regardless of what it is. */ if (!new_i_clusters) goto bail; eb = (struct ocfs2_extent_block *) old_last_eb->b_data; el = &(eb->h_list); BUG_ON(!el->l_next_free_rec); /* Make sure that this guy will actually be empty after we * clear away the data. */ if (le32_to_cpu(el->l_recs[0].e_cpos) < new_i_clusters) goto bail; /* Ok, at this point, we know that last_eb will definitely * change, so lets traverse the tree and find the second to * last extent block. */ el = &(fe->id2.i_list); /* go down the tree, */ do { for(i = (le16_to_cpu(el->l_next_free_rec) - 1); i >= 0; i--) { if (le32_to_cpu(el->l_recs[i].e_cpos) < new_i_clusters) { block = le64_to_cpu(el->l_recs[i].e_blkno); break; } } BUG_ON(i < 0); if (bh) { brelse(bh); bh = NULL; } status = ocfs2_read_block(osb, block, &bh, OCFS2_BH_CACHED, inode); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); status = -EIO; goto bail; } } while (el->l_tree_depth); *new_last_eb = bh; get_bh(*new_last_eb); mlog(0, "returning block %"MLFu64"\n", le64_to_cpu(eb->h_blkno));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -