📄 inode.c
字号:
if (!goal) { if (!(goal = pgoal)) goal = UDF_I_LOCATION(inode).logicalBlockNum + 1; } if (!(newblocknum = udf_new_block(inode->i_sb, inode, UDF_I_LOCATION(inode).partitionReferenceNum, goal, err))) { udf_release_data(pbh); *err = -ENOSPC; return NULL; } UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple blocks, split the extent into at most three extents. blocks prior to requested block, requested block, and blocks after requested block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);#ifdef UDF_PREALLOCATE /* preallocate blocks */ udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);#endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number of extents is greater than the old number, and deleting extents if the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh); udf_release_data(pbh); if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, UDF_I_LOCATION(inode).partitionReferenceNum, 0))) { return NULL; } *phys = newblock; *err = 0; *new = 1; UDF_I_NEXT_ALLOC_BLOCK(inode) = block; UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum; inode->i_ctime = CURRENT_TIME; if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return result;}static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr+2] = laarr[curr+1]; laarr[curr+1] = laarr[curr]; } else { laarr[curr+3] = laarr[curr+1]; laarr[curr+2] = laarr[curr+1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << inode->i_sb->s_blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation.partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << inode->i_sb->s_blocksize_bits); curr ++; (*c) ++; (*endnum) ++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | inode->i_sb->s_blocksize; curr ++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += (offset + 1); laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits); curr ++; (*endnum) ++; } }}static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ int start, length = 0, currlength = 0, i; if (*endnum >= (c+1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c+1; length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i=start+1; i<=*endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c+1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c+2], &laarr[c+1], sizeof(long_ad) * (*endnum - (c+1))); (*endnum) ++; laarr[c+1].extLocation.logicalBlockNum = next; laarr[c+1].extLocation.partitionReferenceNum = laarr[c].extLocation.partitionReferenceNum; laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c+1; } for (i=start+1; numalloc && i<*endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i+1)) memmove(&laarr[i], &laarr[i+1], sizeof(long_ad) * (*endnum - (i+1))); i --; (*endnum) --; } } UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits; } }}static void udf_merge_extents(struct inode *inode, long_ad laarr[EXTENT_MERGE_SIZE], int *endnum){ int i; for (i=0; i<(*endnum-1); i++) { if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30)) { if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) == (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) { if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { laarr[i+1].extLength = (laarr[i+1].extLength - (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1); laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; laarr[i+1].extLocation.logicalBlockNum = laarr[i].extLocation.logicalBlockNum + ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >> inode->i_sb->s_blocksize_bits); } else { laarr[i].extLength = laarr[i+1].extLength + (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1)); if (*endnum > (i+2)) memmove(&laarr[i+1], &laarr[i+2], sizeof(long_ad) * (*endnum - (i+2))); i --; (*endnum) --; } } } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); laarr[i].extLocation.logicalBlockNum = 0; laarr[i].extLocation.partitionReferenceNum = 0; if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { laarr[i+1].extLength = (laarr[i+1].extLength - (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1); laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; } else { laarr[i].extLength = laarr[i+1].extLength + (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1)); if (*endnum > (i+2)) memmove(&laarr[i+1], &laarr[i+2], sizeof(long_ad) * (*endnum - (i+2))); i --; (*endnum) --; } } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); laarr[i].extLocation.logicalBlockNum = 0; laarr[i].extLocation.partitionReferenceNum = 0; laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } }}static void udf_update_extents(struct inode *inode, long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh){ int start = 0, i; lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i=0; i<(startnum-endnum); i++) { udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation, laarr[i].extLength, *pbh); } } else if (startnum < endnum) { for (i=0; i<(endnum-startnum); i++) { udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation, laarr[i].extLength, *pbh); udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation, &laarr[i].extLength, pbh, 1); start ++; } } for (i=start; i<endnum; i++) { udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0); udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation, laarr[i].extLength, *pbh, 1); }}struct buffer_head * udf_bread(struct inode * inode, int block, int create, int * err){ struct buffer_head * bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL;}void udf_truncate(struct inode * inode){ int offset; int err; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; lock_kernel(); if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + inode->i_size)) { udf_expand_file_adinicb(inode, inode->i_size, &err); if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { inode->i_size = UDF_I_LENALLOC(inode); unlock_kernel(); return; } else udf_truncate_extents(inode); } else { offset = inode->i_size & (inode->i_sb->s_blocksize - 1); memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode)); UDF_I_LENALLOC(inode) = inode->i_size; } } else { block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); udf_truncate_extents(inode); } inode->i_mtime = inode->i_ctime = CURRENT_TIME; if (IS_SYNC(inode)) udf_sync_inode (inode); else mark_inode_dirty(inode); unlock_kernel();}/* * udf_read_inode * * PURPOSE * Read an inode. * * DESCRIPTION * This routine is called by iget() [which is called by udf_iget()] * (clean_inode() will have been called first) * when an inode is first read into memory. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. * * 12/19/98 dgb Updated to fix size problems. */voidudf_read_inode(struct inode *inode){ memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));}void__udf_read_inode(struct inode *inode){ struct buffer_head *bh = NULL; struct fileEntry *fe; uint16_t ident; /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ inode->i_blksize = PAGE_SIZE; bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); if (!bh) { printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n", inode->i_ino, ident); udf_release_data(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; if (le16_to_cpu(fe->icbTag.strategyType) == 4096) { struct buffer_head *ibh = NULL, *nbh = NULL; struct indirectEntry *ie; ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident); if (ident == TAG_IDENT_IE) { if (ibh) { lb_addr loc; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength && (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) { if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr)); udf_release_data(bh); udf_release_data(ibh); udf_release_data(nbh); __udf_read_inode(inode); return; } else { udf_release_data(nbh); udf_release_data(ibh); } } else udf_release_data(ibh); } } else udf_release_data(ibh); } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) { printk(KERN_ERR "udf: unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); udf_release_data(bh); make_bad_inode(inode); return; } udf_fill_inode(inode, bh); udf_release_data(bh);}static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -