📄 inode.c
字号:
{ kdev_t dev = s->s_dev; squashfs_sb_info *msBlk = &s->u.squashfs_sb; squashfs_super_block *sBlk = &msBlk->sBlk; int i; TRACE("Entered squashfs_read_superblock\n"); msBlk->devblksize = get_hardsect_size(dev); if(msBlk->devblksize < BLOCK_SIZE) msBlk->devblksize = BLOCK_SIZE; msBlk->devblksize_log2 = ffz(~msBlk->devblksize); set_blocksize(dev, msBlk->devblksize); s->s_blocksize = msBlk->devblksize; s->s_blocksize_bits = msBlk->devblksize_log2; init_MUTEX(&msBlk->read_page_mutex); init_MUTEX(&msBlk->block_cache_mutex); init_MUTEX(&msBlk->fragment_mutex); init_waitqueue_head(&msBlk->waitq); init_waitqueue_head(&msBlk->fragment_wait_queue); if(!read_data(s, (char *) sBlk, SQUASHFS_START, sizeof(squashfs_super_block) | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) { SERROR("unable to read superblock\n"); goto failed_mount; } /* Check it is a SQUASHFS superblock */ msBlk->swap = 0; if((s->s_magic = sBlk->s_magic) != SQUASHFS_MAGIC) { if(sBlk->s_magic == SQUASHFS_MAGIC_SWAP) { squashfs_super_block sblk; WARNING("Mounting a different endian SQUASHFS filesystem on %s\n", bdevname(dev)); SQUASHFS_SWAP_SUPER_BLOCK(&sblk, sBlk); memcpy(sBlk, &sblk, sizeof(squashfs_super_block)); msBlk->swap = 1; } else { SERROR("Can't find a SQUASHFS superblock on %s\n", bdevname(dev)); goto failed_mount; } } /* Check the MAJOR & MINOR versions */#ifdef SQUASHFS_1_0_COMPATIBILITY if((sBlk->s_major != 1) && (sBlk->s_major != 2 || sBlk->s_minor > SQUASHFS_MINOR)) { SERROR("Major/Minor mismatch, filesystem is (%d:%d), I support (1 : x) or (2 : <= %d)\n", sBlk->s_major, sBlk->s_minor, SQUASHFS_MINOR); goto failed_mount; } if(sBlk->s_major == 1) sBlk->block_size = sBlk->block_size_1;#else if(sBlk->s_major != SQUASHFS_MAJOR || sBlk->s_minor > SQUASHFS_MINOR) { SERROR("Major/Minor mismatch, filesystem is (%d:%d), I support (%d: <= %d)\n", sBlk->s_major, sBlk->s_minor, SQUASHFS_MAJOR, SQUASHFS_MINOR); goto failed_mount; }#endif TRACE("Found valid superblock on %s\n", bdevname(dev)); TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(sBlk->flags) ? "un" : ""); TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(sBlk->flags) ? "un" : ""); TRACE("Check data is %s present in the filesystem\n", SQUASHFS_CHECK_DATA(sBlk->flags) ? "" : "not"); TRACE("Filesystem size %d bytes\n", sBlk->bytes_used); TRACE("Block size %d\n", sBlk->block_size); TRACE("Number of inodes %d\n", sBlk->inodes); if(sBlk->s_major > 1) TRACE("Number of fragments %d\n", sBlk->fragments); TRACE("Number of uids %d\n", sBlk->no_uids); TRACE("Number of gids %d\n", sBlk->no_guids); TRACE("sBlk->inode_table_start %x\n", sBlk->inode_table_start); TRACE("sBlk->directory_table_start %x\n", sBlk->directory_table_start); if(sBlk->s_major > 1) TRACE("sBlk->fragment_table_start %x\n", sBlk->fragment_table_start); TRACE("sBlk->uid_start %x\n", sBlk->uid_start); s->s_flags |= MS_RDONLY; s->s_op = &squashfs_ops; /* Init inode_table block pointer array */ if(!(msBlk->block_cache = (squashfs_cache *) kmalloc(sizeof(squashfs_cache) * SQUASHFS_CACHED_BLKS, GFP_KERNEL))) { ERROR("Failed to allocate block cache\n"); goto failed_mount; } for(i = 0; i < SQUASHFS_CACHED_BLKS; i++) msBlk->block_cache[i].block = SQUASHFS_INVALID_BLK; msBlk->next_cache = 0; /* Allocate read_data block */ msBlk->read_size = (sBlk->block_size < SQUASHFS_METADATA_SIZE) ? SQUASHFS_METADATA_SIZE : sBlk->block_size; if(!(msBlk->read_data = (char *) kmalloc(msBlk->read_size, GFP_KERNEL))) { ERROR("Failed to allocate read_data block\n"); goto failed_mount1; } /* Allocate read_page block */ if(sBlk->block_size > PAGE_CACHE_SIZE) { if(!(msBlk->read_page = (char *) kmalloc(sBlk->block_size, GFP_KERNEL))) { ERROR("Failed to allocate read_page block\n"); goto failed_mount2; } } else msBlk->read_page = NULL; /* Allocate uid and gid tables */ if(!(msBlk->uid = (squashfs_uid *) kmalloc((sBlk->no_uids + sBlk->no_guids) * sizeof(squashfs_uid), GFP_KERNEL))) { ERROR("Failed to allocate uid/gid table\n"); goto failed_mount3; } msBlk->guid = msBlk->uid + sBlk->no_uids; if(msBlk->swap) { squashfs_uid suid[sBlk->no_uids + sBlk->no_guids]; if(!read_data(s, (char *) &suid, sBlk->uid_start, ((sBlk->no_uids + sBlk->no_guids) * sizeof(squashfs_uid)) | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) { SERROR("unable to read uid/gid table\n"); goto failed_mount4; } SQUASHFS_SWAP_DATA(msBlk->uid, suid, (sBlk->no_uids + sBlk->no_guids), (sizeof(squashfs_uid) * 8)); } else if(!read_data(s, (char *) msBlk->uid, sBlk->uid_start, ((sBlk->no_uids + sBlk->no_guids) * sizeof(squashfs_uid)) | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) { SERROR("unable to read uid/gid table\n"); goto failed_mount4; }#ifdef SQUASHFS_1_0_COMPATIBILITY if(sBlk->s_major == 1) { msBlk->iget = squashfs_iget_1; msBlk->read_blocklist = read_blocklist_1; msBlk->fragment = NULL; msBlk->fragment_index = NULL; goto allocate_root; }#endif msBlk->iget = squashfs_iget; msBlk->read_blocklist = read_blocklist; if(!(msBlk->fragment = (struct squashfs_fragment_cache *) kmalloc(sizeof(struct squashfs_fragment_cache) * SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) { ERROR("Failed to allocate fragment block cache\n"); goto failed_mount4; } for(i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) { msBlk->fragment[i].locked = 0; msBlk->fragment[i].block = SQUASHFS_INVALID_BLK; msBlk->fragment[i].data = NULL; } msBlk->next_fragment = 0; /* Allocate fragment index table */ if(!(msBlk->fragment_index = (squashfs_fragment_index *) kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk->fragments), GFP_KERNEL))) { ERROR("Failed to allocate uid/gid table\n"); goto failed_mount5; } if(SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk->fragments) && !read_data(s, (char *) msBlk->fragment_index, sBlk->fragment_table_start, SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk->fragments) | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) { SERROR("unable to read fragment index table\n"); goto failed_mount6; } if(msBlk->swap) { int i; squashfs_fragment_index fragment; for(i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sBlk->fragments); i++) { SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment), &msBlk->fragment_index[i], 1); msBlk->fragment_index[i] = fragment; } }#ifdef SQUASHFS_1_0_COMPATIBILITYallocate_root:#endif if(!(s->s_root = d_alloc_root((msBlk->iget)(s, sBlk->root_inode)))) { ERROR("Root inode create failed\n"); goto failed_mount5; } TRACE("Leaving squashfs_read_super\n"); return s;failed_mount6: kfree(msBlk->fragment_index);failed_mount5: kfree(msBlk->fragment);failed_mount4: kfree(msBlk->uid);failed_mount3: kfree(msBlk->read_page);failed_mount2: kfree(msBlk->read_data);failed_mount1: kfree(msBlk->block_cache);failed_mount: return NULL;}static int squashfs_statfs(struct super_block *s, struct statfs *buf){ squashfs_super_block *sBlk = &s->u.squashfs_sb.sBlk; TRACE("Entered squashfs_statfs\n"); buf->f_type = SQUASHFS_MAGIC; buf->f_bsize = sBlk->block_size; buf->f_blocks = ((sBlk->bytes_used - 1) >> sBlk->block_log) + 1; buf->f_bfree = buf->f_bavail = 0; buf->f_files = sBlk->inodes; buf->f_ffree = 0; buf->f_namelen = SQUASHFS_NAME_LEN; return 0;}static int squashfs_symlink_readpage(struct file *file, struct page *page){ struct inode *inode = page->mapping->host; int index = page->index << PAGE_CACHE_SHIFT, length, bytes; unsigned int block = inode->u.squashfs_i.start_block; int offset = inode->u.squashfs_i.offset; void *pageaddr = kmap(page); TRACE("Entered squashfs_symlink_readpage, page index %x, start block %x, offset %x\n", (unsigned int) page->index, inode->u.squashfs_i.start_block, inode->u.squashfs_i.offset); for(length = 0; length < index; length += bytes) { if(!(bytes = squashfs_get_cached_block(inode->i_sb, NULL, block, offset, PAGE_CACHE_SIZE, &block, &offset))) { ERROR("Unable to read symbolic link [%x:%x]\n", block, offset); goto skip_read; } } if(length != index) { ERROR("(squashfs_symlink_readpage) length != index\n"); bytes = 0; goto skip_read; } bytes = (inode->i_size - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE : inode->i_size - length; if(!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block, offset, bytes, &block, &offset))) ERROR("Unable to read symbolic link [%x:%x]\n", block, offset);skip_read: memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap(page); flush_dcache_page(page); SetPageUptodate(page); UnlockPage(page); return 0;}#define SIZE 256#ifdef SQUASHFS_1_0_COMPATIBILITYstatic unsigned int read_blocklist_1(struct inode *inode, int index, int readahead_blks, char *block_list, unsigned short **block_p, unsigned int *bsize){ squashfs_sb_info *msBlk = &inode->i_sb->u.squashfs_sb; unsigned short *block_listp; int i = 0; int block_ptr = inode->u.squashfs_i.block_list_start; int offset = inode->u.squashfs_i.offset; unsigned int block = inode->u.squashfs_i.start_block; for(;;) { int blocks = (index + readahead_blks - i); if(blocks > (SIZE >> 1)) { if((index - i) <= (SIZE >> 1)) blocks = index - i; else blocks = SIZE >> 1; } if(msBlk->swap) { unsigned char sblock_list[SIZE]; if(!squashfs_get_cached_block(inode->i_sb, (char *) sblock_list, block_ptr, offset, blocks << 1, &block_ptr, &offset)) { ERROR("Unable to read block list [%d:%x]\n", block_ptr, offset); return 0; } SQUASHFS_SWAP_SHORTS(((unsigned short *)block_list), ((unsigned short *)sblock_list), blocks); } else if(!squashfs_get_cached_block(inode->i_sb, (char *) block_list, block_ptr, offset, blocks << 1, &block_ptr, &offset)) { ERROR("Unable to read block list [%d:%x]\n", block_ptr, offset); return 0; } for(block_listp = (unsigned short *) block_list; i < index && blocks; i ++, block_listp ++, blocks --) block += SQUASHFS_COMPRESSED_SIZE(*block_listp); if(blocks >= readahead_blks) break; } if(bsize) *bsize = SQUASHFS_COMPRESSED_SIZE(*block_listp) | (!SQUASHFS_COMPRESSED(*block_listp) ? SQUASHFS_COMPRESSED_BIT_BLOCK : 0); else *block_p = block_listp; return block;}#endifstatic unsigned int read_blocklist(struct inode *inode, int index, int readahead_blks, char *block_list, unsigned short **block_p, unsigned int *bsize){ squashfs_sb_info *msBlk = &inode->i_sb->u.squashfs_sb; unsigned int *block_listp; int i = 0; int block_ptr = inode->u.squashfs_i.block_list_start; int offset = inode->u.squashfs_i.offset; unsigned int block = inode->u.squashfs_i.start_block; for(;;) { int blocks = (index + readahead_blks - i); if(blocks > (SIZE >> 2)) { if((index - i) <= (SIZE >> 2)) blocks = index - i; else blocks = SIZE >> 2; } if(msBlk->swap) { unsigned char sblock_list[SIZE]; if(!squashfs_get_cached_block(inode->i_sb, (char *) sblock_list, block_ptr, offset, blocks << 2, &block_ptr, &offset)) { ERROR("Unable to read block list [%d:%x]\n", block_ptr, offset); return 0; } SQUASHFS_SWAP_INTS(((unsigned int *)block_list), ((unsigned int *)sblock_list), blocks); } else if(!squashfs_get_cached_block(inode->i_sb, (char *) block_list, block_ptr, offset, blocks << 2, &block_ptr, &offset)) { ERROR("Unable to read block list [%d:%x]\n", block_ptr, offset); return 0; } for(block_listp = (unsigned int *) block_list; i < index && blocks; i ++, block_listp ++, blocks --) block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp); if(blocks >= readahead_blks) break; } *bsize = *block_listp; return block;}static int squashfs_readpage(struct file *file, struct page *page){ struct inode *inode = page->mapping->host; squashfs_sb_info *msBlk = &inode->i_sb->u.squashfs_sb; squashfs_super_block *sBlk = &msBlk->sBlk; unsigned char block_list[SIZE]; unsigned int bsize, block, i = 0, bytes = 0, byte_offset = 0; int index = page->index >> (sBlk->block_log - PAGE_CACHE_SHIFT); void *pageaddr = kmap(page); struct squashfs_fragment_cache *fragment = NULL; char *data_ptr = msBlk->read_page; int mask = (1 << (sBlk->block_log - PAGE_CACHE_SHIFT)) - 1; int start_index = page->index & ~mask; int end_index = start_index | mask; TRACE("Entered squashfs_readpage, page index %x, start block %x\n", (unsigned int) page->index, inode->u.squashfs_i.start_block); if(page->index >= ((inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)) { goto skip_read; } if(inode->u.squashfs_i.u.s1.fragment_start_block == SQUASHFS_INVALID_BLK || index < (inode->i_size >> sBlk->block_log)) { if((block = (msBlk->read_blocklist)(inode, index, 1, block_list, NULL, &bsize)) == 0) goto skip_read; down(&msBlk->read_page_mutex); if(!(bytes = read_data(inode->i_sb, msBlk->read_page, block, bsize, NULL))) { ERROR("Unable to read page, block %x, size %x\n", block, bsize); up(&msBlk->read_page_mutex); goto skip_read; } } else { if((fragment = get_cached_fragment(inode->i_sb, inode->u.squashfs_i.u.s1.fragment_start_block, inode->u.squashfs_i.u.s1.fragment_size)) == NULL) { ERROR("Unable to read page, block %x, size %x\n", inode->u.squashfs_i.u.s1.fragment_start_block, (int) inode->u.squashfs_i.u.s1.fragment_size); goto skip_read; } bytes = inode->u.squashfs_i.u.s1.fragment_offset + (inode->i_size & (sBlk->block_size - 1)); byte_offset = inode->u.squashfs_i.u.s1.fragment_offset; data_ptr = fragment->data; } for(i = start_index; i <= end_index && byte_offset < bytes; i++, byte_offset += PAGE_CACHE_SIZE) { struct page *push_page; int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE : bytes - byte_offset; TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n", bytes, i, byte_offset, available_bytes); if(i == page->index) { memcpy(pageaddr, data_ptr + byte_offset, available_bytes); memset(pageaddr + available_bytes, 0, PAGE_CACHE_SIZE - available_bytes); kunmap(page); flush_dcache_page(page); SetPageUptodate(page); UnlockPage(page); } else if((push_page = grab_cache_page_nowait(page->mapping, i))) { void *pageaddr = kmap(push_page); memcpy(pageaddr, data_ptr + byte_offset, available_bytes); memset(pageaddr + available_bytes, 0, PAGE_CACHE_SIZE - available_bytes); kunmap(push_page); flush_dcache_page(push_page); SetPageUptodate(push_page); UnlockPage(push_page);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -