📄 ext2fs.c
字号:
/*** The Sleuth Kit **** Brian Carrier [carrier <at> sleuthkit [dot] org]** Copyright (c) 2006-2008 Brian Carrier, Basis Technology. All Rights reserved** Copyright (c) 2003-2005 Brian Carrier. All rights reserved **** TASK** Copyright (c) 2002-2003 Brian Carrier, @stake Inc. All rights reserved** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved.*//** *\file ext2fs.c * Contains the internal TSK ext2/ext3 file system functions. *//* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/#include "tsk_fs_i.h"#include "tsk_ext2fs.h"/* ext2fs_group_load - load block group descriptor into cache * * return 1 on error and 0 on success * * */static uint8_text2fs_group_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num){ ext2fs_gd *gd; TSK_OFF_T offs; ssize_t cnt; TSK_FS_INFO *fs = (TSK_FS_INFO *) ext2fs; /* * Sanity check */ if (grp_num < 0 || grp_num >= ext2fs->groups_count) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_ARG; snprintf(tsk_errstr, TSK_ERRSTR_L, "ext2fs_group_load: invalid cylinder group number: %" PRI_EXT2GRP "", grp_num); return 1; } if (ext2fs->grp_buf == NULL) { if ((ext2fs->grp_buf = (ext2fs_gd *) tsk_malloc(sizeof(ext2fs_gd))) == NULL) { return 1; } } else if (ext2fs->grp_num == grp_num) { return 0; } gd = ext2fs->grp_buf; /* * We're not reading group descriptors often, so it is OK to do small * reads instead of cacheing group descriptors in a large buffer. */ offs = ext2fs->groups_offset + grp_num * sizeof(ext2fs_gd); cnt = tsk_fs_read(&ext2fs->fs_info, offs, (char *) gd, sizeof(ext2fs_gd)); if (cnt != sizeof(ext2fs_gd)) { if (cnt >= 0) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_READ; } snprintf(tsk_errstr2, TSK_ERRSTR_L, "ext2fs_group_load: Group descriptor %" PRI_EXT2GRP " at %" PRIuOFF, grp_num, offs); return 1; } /* Perform a sanity check on the data to make sure offsets are in range */ if ((tsk_getu32(fs->endian, gd->bg_block_bitmap) > fs->last_block) || (tsk_getu32(fs->endian, gd->bg_inode_bitmap) > fs->last_block) || (tsk_getu32(fs->endian, gd->bg_inode_table) > fs->last_block)) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_CORRUPT; snprintf(tsk_errstr, TSK_ERRSTR_L, "extXfs_group_load: Group %" PRI_EXT2GRP " descriptor block locations too large at byte offset %" PRIuDADDR, grp_num, offs); return 1; } ext2fs->grp_num = grp_num; if (tsk_verbose) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; tsk_fprintf(stderr, "\tgroup %" PRI_EXT2GRP ": %" PRIu16 "/%" PRIu16 " free blocks/inodes\n", grp_num, tsk_getu16(fs->endian, gd-> bg_free_blocks_count), tsk_getu16(fs->endian, gd->bg_free_inodes_count)); } return 0;}/* ext2fs_print_map - print a bitmap */static voidext2fs_print_map(uint8_t * map, int len){ int i; for (i = 0; i < len; i++) { if (i > 0 && i % 10 == 0) putc('|', stderr); putc(isset(map, i) ? '1' : '.', stderr); } putc('\n', stderr);}/* ext2fs_bmap_load - look up block bitmap & load into cache * * return 1 on error and 0 on success * */static uint8_text2fs_bmap_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num){ TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ssize_t cnt; /* * Look up the group descriptor info. The load will do the sanity check. */ if ((ext2fs->grp_buf == NULL) || (ext2fs->grp_num != grp_num)) { if (ext2fs_group_load(ext2fs, grp_num)) { return 1; } } if (ext2fs->bmap_buf == NULL) { if ((ext2fs->bmap_buf = (uint8_t *) tsk_malloc(fs->block_size)) == NULL) { return 1; } } else if (ext2fs->bmap_grp_num == grp_num) return 0; /* * Look up the block allocation bitmap. */ if (tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap) > fs->last_block) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_BLK_NUM; snprintf(tsk_errstr, TSK_ERRSTR_L, "ext2fs_bmap_load: Block too large for image: %" PRIu32 "", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap)); return 1; } cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap) * fs->block_size, (char *) ext2fs->bmap_buf, ext2fs->fs_info.block_size); if (cnt != ext2fs->fs_info.block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_READ; } snprintf(tsk_errstr2, TSK_ERRSTR_L, "ext2fs_bmap_load: Bitmap group %" PRI_EXT2GRP " at %" PRIu32, grp_num, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap)); } ext2fs->bmap_grp_num = grp_num; if (tsk_verbose > 1) ext2fs_print_map(ext2fs->bmap_buf, tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group)); return 0;}/* ext2fs_imap_load - look up inode bitmap & load into cache * * return 0 on success and 1 on error * */static uint8_text2fs_imap_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num){ TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ssize_t cnt; /* * Look up the group descriptor info. */ if ((ext2fs->grp_buf == NULL) || (ext2fs->grp_num != grp_num)) { if (ext2fs_group_load(ext2fs, grp_num)) { return 1; } } /* Allocate the cache buffer and exit if map is already loaded */ if (ext2fs->imap_buf == NULL) { if ((ext2fs->imap_buf = (uint8_t *) tsk_malloc(fs->block_size)) == NULL) { return 1; } } else if (ext2fs->imap_grp_num == grp_num) { return 0; } /* * Look up the inode allocation bitmap. */ if (tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) > fs->last_block) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_BLK_NUM; snprintf(tsk_errstr, TSK_ERRSTR_L, "ext2fs_imap_load: Block too large for image: %" PRIu32 "", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); } cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) * fs->block_size, (char *) ext2fs->imap_buf, ext2fs->fs_info.block_size); if (cnt != ext2fs->fs_info.block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_READ; } snprintf(tsk_errstr2, TSK_ERRSTR_L, "ext2fs_imap_load: Inode bitmap %" PRI_EXT2GRP " at %" PRIu32, grp_num, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); } ext2fs->imap_grp_num = grp_num; if (tsk_verbose > 1) ext2fs_print_map(ext2fs->imap_buf, tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); return 0;}/* ext2fs_dinode_load - look up disk inode & load into cache * * return 1 on error and 0 on success * */static uint8_text2fs_dinode_load(EXT2FS_INFO * ext2fs, TSK_INUM_T inum){ ext2fs_inode *dino; EXT2_GRPNUM_T grp_num; TSK_OFF_T addr; ssize_t cnt; TSK_INUM_T rel_inum; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; /* * Sanity check. * Use last_num-1 to account for virtual Orphan directory in last_inum. */ if ((inum < fs->first_inum) || (inum > fs->last_inum - 1)) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_INODE_NUM; snprintf(tsk_errstr, TSK_ERRSTR_L, "ext2fs_dinode_load: address: %" PRIuINUM, inum); return 1; } /* Allocate the buffer or return if already loaded */ if (ext2fs->dino_buf == NULL) { if ((ext2fs->dino_buf = (ext2fs_inode *) tsk_malloc(ext2fs->inode_size)) == NULL) { return 1; } } else if (ext2fs->dino_inum == inum) { return 0; } dino = ext2fs->dino_buf; /* * Look up the group descriptor for this inode. */ grp_num = (EXT2_GRPNUM_T) ((inum - fs->first_inum) / tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); if ((ext2fs->grp_buf == NULL) || (ext2fs->grp_num != grp_num)) { if (ext2fs_group_load(ext2fs, grp_num)) { return 1; } } /* * Look up the inode table block for this inode. */ rel_inum = (inum - 1) - tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group) * grp_num; addr = (TSK_OFF_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) * (TSK_OFF_T) fs->block_size + rel_inum * (TSK_OFF_T) ext2fs->inode_size; cnt = tsk_fs_read(fs, addr, (char *) dino, ext2fs->inode_size); if (cnt != ext2fs->inode_size) { if (cnt >= 0) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_READ; } snprintf(tsk_errstr2, TSK_ERRSTR_L, "ext2fs_dinode_load: Inode %" PRIuINUM " from %" PRIuOFF, inum, addr); return 1; } ext2fs->dino_inum = inum; if (tsk_verbose) tsk_fprintf(stderr, "%" PRIuINUM " m/l/s=%o/%d/%" PRIuOFF " u/g=%d/%d macd=%" PRIu32 "/%" PRIu32 "/%" PRIu32 "/%" PRIu32 "\n", inum, tsk_getu16(fs->endian, dino->i_mode), tsk_getu16(fs->endian, dino->i_nlink), (tsk_getu32(fs->endian, dino->i_size) + (tsk_getu16(fs->endian, dino->i_mode) & EXT2_IN_REG) ? (uint64_t) tsk_getu32(fs->endian, dino->i_size_high) << 32 : 0), tsk_getu16(fs->endian, dino->i_uid) + (tsk_getu16(fs->endian, dino->i_uid_high) << 16), tsk_getu16(fs->endian, dino->i_gid) + (tsk_getu16(fs->endian, dino->i_gid_high) << 16), tsk_getu32(fs->endian, dino->i_mtime), tsk_getu32(fs->endian, dino->i_atime), tsk_getu32(fs->endian, dino->i_ctime), tsk_getu32(fs->endian, dino->i_dtime)); return 0;}/* ext2fs_dinode_copy - copy cached disk inode into generic inode * * returns 1 on error and 0 on success * */static uint8_text2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta){ int i; ext2fs_inode *in = ext2fs->dino_buf; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ext2fs_sb *sb = ext2fs->fs; EXT2_GRPNUM_T grp_num; TSK_INUM_T ibase = 0; TSK_DADDR_T *addr_ptr; if (ext2fs->dino_buf == NULL) { tsk_error_reset(); tsk_errno = TSK_ERR_FS_ARG; snprintf(tsk_errstr, TSK_ERRSTR_L, "ext2fs_dinode_copy: dino_buf is NULL"); return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } // set the type switch (tsk_getu16(fs->endian, in->i_mode) & EXT2_IN_FMT) { case EXT2_IN_REG: fs_meta->type = TSK_FS_META_TYPE_REG; break; case EXT2_IN_DIR: fs_meta->type = TSK_FS_META_TYPE_DIR; break; case EXT2_IN_SOCK: fs_meta->type = TSK_FS_META_TYPE_SOCK;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -