📄 inode.c
字号:
brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1); } brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset); *phys = newblock; return NULL; } last_block = block; /* Are we beyond EOF? */ if (etype == -1) { int ret; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ ret = udf_extend_file(inode, &prev_epos, laarr, offset); if (ret == -1) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); /* We don't really know the error here so we just make * something up */ *err = -ENOSPC; return NULL; } c = 0; offset = 0; count += ret; /* We are not covered by a preallocated extent? */ if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { /* Is there any real extent? - otherwise we overwrite * the fake one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr)); count++; endnum++; } endnum = c + 1; lastblock = 1; } else { endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else { lastblock = 1; } } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { newblocknum = laarr[c].extLocation.logicalBlockNum + offset; } else { /* otherwise, allocate a new block */ if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block) goal = UDF_I_NEXT_ALLOC_GOAL(inode); if (!goal) { if (!(goal = pgoal)) goal = UDF_I_LOCATION(inode).logicalBlockNum + 1; } if (!(newblocknum = udf_new_block(inode->i_sb, inode, UDF_I_LOCATION(inode).partitionReferenceNum, goal, err))) { brelse(prev_epos.bh); *err = -ENOSPC; return NULL; } UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple blocks, * split the extent into at most three extents. blocks prior to requested * block, requested block, and blocks after requested block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);#ifdef UDF_PREALLOCATE /* preallocate blocks */ udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);#endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, UDF_I_LOCATION(inode).partitionReferenceNum, 0))) { return NULL; } *phys = newblock; *err = 0; *new = 1; UDF_I_NEXT_ALLOC_BLOCK(inode) = block; UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum; inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return result;}static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) { ; } else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << inode->i_sb->s_blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation.partitionReferenceNum = 0; } else { laarr[curr].extLength = (etype << 30) | (offset << inode->i_sb->s_blocksize_bits); } curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | inode->i_sb->s_blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += (offset + 1); laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits); curr++; (*endnum)++; } }}static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else { start = c; } } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else { break; } } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) { laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); } else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation.partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits; } }}static void udf_merge_extents(struct inode *inode, kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ int i; for (i = 0; i < (*endnum - 1); i++) { if ((laarr[i].extLength >> 30) == (laarr[i + 1].extLength >> 30)) { if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((laarr[i + 1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) == (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) { if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { laarr[i + 1].extLength = (laarr[i + 1].extLength - (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; laarr[i + 1].extLocation.logicalBlockNum = laarr[i].extLocation.logicalBlockNum + ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >> inode->i_sb->s_blocksize_bits); } else { laarr[i].extLength = laarr[i + 1].extLength + (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((laarr[i + 1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); laarr[i].extLocation.logicalBlockNum = 0; laarr[i].extLocation.partitionReferenceNum = 0; if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { laarr[i + 1].extLength = (laarr[i + 1].extLength - (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; } else { laarr[i].extLength = laarr[i + 1].extLength + (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); laarr[i].extLocation.logicalBlockNum = 0; laarr[i].extLocation.partitionReferenceNum = 0; laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } }}static void udf_update_extents(struct inode *inode, kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, struct extent_position *epos){ int start = 0, i; kernel_lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, laarr[i].extLocation, laarr[i].extLength, 1); }}struct buffer_head *udf_bread(struct inode *inode, int block, int create, int *err){ struct buffer_head *bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL;}void udf_truncate(struct inode *inode){ int offset; int err; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; lock_kernel(); if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + inode->i_size)) { udf_expand_file_adinicb(inode, inode->i_size, &err); if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { inode->i_size = UDF_I_LENALLOC(inode); unlock_kernel(); return; } else { udf_truncate_extents(inode); } } else { offset = inode->i_size & (inode->i_sb->s_blocksize - 1); memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode)); UDF_I_LENALLOC(inode) = inode->i_size; } } else { block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); udf_truncate_extents(inode); } inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); unlock_kernel();}static void __udf_read_inode(struct inode *inode){ struct buffer_head *bh = NULL; struct fileEntry *fe; uint16_t ident;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -