⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 一款功能很强的光盘镜象制作工具
💻 C
📖 第 1 页 / 共 5 页
字号:
	}#endif /* SORTING */#ifdef APPLE_HYB	/*	 * put this here for the time being - may when I've worked out how to	 * use Eric's new system for creating/writing parts of the image it	 * may move to it's own routine	 */	if (apple_hyb)		hfs_file_gen(start_extent);#ifdef PREP_BOOT	else if (use_prep_boot)		gen_prepboot();#endif	/* PREP_BOOT */#endif	/* APPLE_HYB */	return 0;}static intdirtree_dump(){	if (verbose > 2) {		dump_tree(root);	}	return 0;}static intdirtree_fixup(starting_extent)	int	starting_extent;{	if (use_RockRidge && reloc_dir)		finish_cl_pl_entries();	if (use_RockRidge)		update_nlink_field(root);	return 0;}static intdirtree_size(starting_extent)	int	starting_extent;{	assign_directory_addresses(root);	return 0;}static intext_size(starting_extent)	int	starting_extent;{	extern int		extension_record_size;	struct directory_entry *s_entry;	extension_record_extent = starting_extent;	s_entry = root->contents;	set_733((char *) s_entry->rr_attributes + s_entry->rr_attr_size - 24,		extension_record_extent);	set_733((char *) s_entry->rr_attributes + s_entry->rr_attr_size - 8,		extension_record_size);	last_extent++;	return 0;}static intdirtree_write(outfile)	FILE	*outfile;{	generate_iso9660_directories(root, outfile);	return 0;}static intdirtree_cleanup(outfile)	FILE	*outfile;{	free_directories(root);	return 0;}static intpadblock_write(outfile)	FILE	*outfile;{	char	buffer[SECTOR_SIZE];	int	i;	int	npad;	memset(buffer, 0, sizeof(buffer));	npad = session_start + 16 - last_extent_written;	for (i = 0; i < npad; i++) {		xfwrite(buffer, 1, sizeof(buffer), outfile);	}	last_extent_written += npad;	return 0;}static intpadend_write(outfile)	FILE	*outfile;{	char	buffer[SECTOR_SIZE];	int	i;	int	npad;	memset(buffer, 0, sizeof(buffer));	npad = 16;	if ((i = last_extent_written % 16) != 0)		npad += 16 - i;	for (i = 0; i < npad; i++) {		xfwrite(buffer, 1, sizeof(buffer), outfile);	}	last_extent_written += npad;	return 0;}#ifdef APPLE_HYB/* *	hfs_get_parms:	get HFS parameters from the command line */static inthfs_get_parms(key)	char	*key;{	int	ret = 0;	char	*p;	if (hfs_parms == NULL)		return (ret);	if ((p = strstr(hfs_parms, key)) != NULL) {		p += strlen(key) + 1;		sscanf(p, "%d", &ret);	}	return (ret);}/* *	hfs_file_gen:	set up "fake" HFS volume using the ISO9660 tree */static voidhfs_file_gen(start_extent)	int	start_extent;{	int	Csize;	/* clump size for HFS vol */	int	loop;	int	last_extent_save = last_extent;	char	*p;	/* allocate memory for the libhfs/mkisofs extra info */	hce = (hce_mem *) e_malloc(sizeof(hce_mem));	hce->error = (char *) e_malloc(1024);	/* mark as unallocated for use later */	hce->hfs_ce = hce->hfs_hdr = hce->hfs_map = 0;	/* reserve space for the label partition - if it is needed */#ifdef PREP_BOOT	/* a PReP bootable partition needs the map.. */	if (gen_pt || use_prep_boot)#else	if (gen_pt)#endif	/* PREP_BOOT */		hce->hfs_map_size = HFS_MAP_SIZE;	else		hce->hfs_map_size = 0;	/* set the HFS parameter string to upper case */	if (hfs_parms) {		for (p=hfs_parms;*p;p++)       			*p = toupper(*p);	}	/* set the initial factor to increase Catalog file size */	if ((hce->ctc_size = hfs_get_parms("CTC")) == 0)		hce->ctc_size = CTC;	/* set the max size of the Catalog file */	if ((hce->max_XTCsize = hfs_get_parms("MAX_XTCSIZE")) == 0)		hce->max_XTCsize = MAX_XTCSIZE;	/* set the number of time to try to make an HFS volume */	if ((loop = hfs_get_parms("CTC_LOOP")) == 0)		loop = CTC_LOOP;	/*	 * "create" the HFS volume (just the header, catalog/extents files) if	 * there's a problem with the Catalog file being too small, we keep on	 * increasing the size (up to CTC_LOOP) times and try again.	 * Unfortunately I don't know enough about the inner workings of HFS,	 * so I can't workout the size of the Catalog file in advance (and I	 * don't want to "grow" as is is normally allowed to), therefore, this	 * approach is a bit over the top as it involves throwing away the	 * "volume" we have created and trying again ...	 */	do {		hce->error[0] = '\0';		/* attempt to create the Mac volume */		Csize = make_mac_volume(root, start_extent);		/* if we have a problem ... */		if (Csize < 0) {			/*			 * we've made too many attempts, or got some other			 * error			 */			if (loop == 0 || errno != HCE_ERROR) {				/* HCE_ERROR is not a valid errno value */				if (errno == HCE_ERROR)					errno = 0;				/* exit with the error */				if (*hce->error)					fprintf(stderr, "%s\n", hce->error);				perr(hfs_error);			} else {				/* increase Catalog file size factor */				hce->ctc_size *= CTC;				/*				 * reset the initial "last_extent" and try				 * again				 */				last_extent = last_extent_save;			}		} else {			/* everything OK - just carry on ... */			loop = 0;		}	}	while (loop--);	hfs_extra = HFS_ROUND_UP(hce->hfs_tot_size) / SECTOR_SIZE;	last_extent += hfs_extra;	/* generate the Mac label and HFS partition maps */	mac_boot.name = hfs_boot_file;	/*	 * only generate the partition tables etc. if we are making a bootable	 * CD - or if the -part option is given	 */	if (gen_pt) {		if (gen_mac_label(&mac_boot)) {			if (*hce->error)				fprintf(stderr, "%s\n", hce->error);			perr(hfs_error);		}	}	/* set Autostart filename if required */	if (autoname) {		if (autostart())			perr("Autostart filename must less than 12 characters");	}	/* finished with any HFS type errors */	free(hce->error);	hce->error = 0;	/*	 * the ISO files need to start on a multiple of the HFS allocation	 * blocks, so find out how much padding we need	 */	/*	 * take in accout alignment of files wrt HFS volume start - remove any	 * previous session as well	 */	start_extent -= session_start;	hfs_pad = ROUND_UP(start_extent*SECTOR_SIZE +			(hce->hfs_hdr_size + hce->hfs_map_size) * HFS_BLOCKSZ,							Csize) / SECTOR_SIZE;	hfs_pad -= (start_extent + (hce->hfs_hdr_size + hce->hfs_map_size) /							HFS_BLK_CONV);#ifdef PREP_BOOT	gen_prepboot_label(hce->hfs_map);#endif	/* PREP_BOOT */}#ifdef PREP_BOOTstatic voidgen_prepboot(){	/*	 * we need to allocate the hce struct since hce->hfs_map is used to	 * generate the fdisk partition map required for PReP booting	 */	hce = (hce_mem *) e_malloc(sizeof(hce_mem));	/* mark as unallocated for use later */	hce->hfs_ce = hce->hfs_hdr = hce->hfs_map = 0;	/* reserve space for the label partition - if it is needed */	hce->hfs_map_size = HFS_MAP_SIZE;	hce->hfs_map = (unsigned char *) e_malloc(hce->hfs_map_size * HFS_BLOCKSZ);	gen_prepboot_label(hce->hfs_map);}#endif	/* PREP_BOOT *//* *	get_adj_size:	get the ajusted size of the volume with the HFS *			allocation block size for each file */Ulongget_adj_size(Csize)	int	Csize;{	struct deferred_write *dw;	Ulong		size = 0;	int		count = 0;	/* loop through all the files finding the new total size */	for (dw = dw_head; dw; dw = dw->next) {		size += (ROUND_UP(dw->size, Csize)/HFS_BLOCKSZ);		count++;	}	/*	 * crude attempt to prevent overflows - HFS can only cope with a	 * maximum of about 65536 forks (actually less) - this will trap cases	 * when we have far too many files	 */	if (count >= 65536)		return (-1);	else		return (size);}/* *	adj_size:	adjust the ISO record entries for all files *			based on the HFS allocation block size */intadj_size(Csize, start_extent, extra)	int	Csize;	int	start_extent;	int	extra;{	struct deferred_write *dw;	struct directory_entry *s_entry;	int		size;	/* get the adjusted start_extent (with padding) */	/* take in accout alignment of files wrt HFS volume start */	start_extent -= session_start;	start_extent = ROUND_UP(start_extent*SECTOR_SIZE + extra*HFS_BLOCKSZ,						Csize) / SECTOR_SIZE;	start_extent -= (extra / HFS_BLK_CONV);	start_extent += session_start;	/* initialise file hash */	flush_hash();	/*	 * loop through all files changing their starting blocks and finding	 * any padding needed to written out latter	 */	for (dw = dw_head; dw; dw = dw->next) {		s_entry = dw->s_entry;		s_entry->starting_block = dw->extent = start_extent;		set_733((char *) s_entry->isorec.extent, start_extent);		size = ROUND_UP(dw->size, Csize) / SECTOR_SIZE;		dw->pad = size - ISO_ROUND_UP(dw->size) / SECTOR_SIZE;		/*		 * cache non-HFS files - as there may be multiple links to		 * these files (HFS files can't have multiple links). We will		 * need to change the starting extent of the other links later		 */		if (!s_entry->hfs_ent)			add_hash(s_entry);		start_extent += size;	}	return (start_extent);}/* *	adj_size_other:	adjust any non-HFS files that may be linked *			to an existing file (i.e. not have a deferred_write *			entry of it's own */voidadj_size_other(dpnt)	struct directory	*dpnt;{	struct directory_entry *s_entry;	struct file_hash *s_hash;	while (dpnt) {		s_entry = dpnt->contents;		for (s_entry = dpnt->contents; s_entry;						s_entry = s_entry->next) {			/*			 * if it's an HFS file or a directory - then ignore			 * (we're after non-HFS files)			 */			if (s_entry->hfs_ent ||			    (s_entry->isorec.flags[0] & ISO_DIRECTORY))				continue;			/*			 * find any cached entry and assign new starting			 * extent			 */			s_hash = find_hash(s_entry->dev, s_entry->inode);			if (s_hash) {				set_733((char *) s_entry->isorec.extent,						s_hash->starting_block);				/* not vital - but tidy */				s_entry->starting_block =							s_hash->starting_block;			}		}		if (dpnt->subdir) {			adj_size_other(dpnt->subdir);		}		dpnt = dpnt->next;	}	/* clear file hash */	flush_hash();}/* *	hfs_hce_write:	write out the HFS header stuff */static inthfs_hce_write(outfile)	FILE	*outfile;{	char	buffer[SECTOR_SIZE];	int	n = 0;	int	r;	/* HFS hdr output */	int	tot_size = hce->hfs_map_size + hce->hfs_hdr_size;	memset(buffer, 0, sizeof(buffer));	/* hack time ... if the tot_size is greater than 32Kb then	 * it won't fit in the first 16 blank SECTORS (64 512 byte 	 * blocks, as most of this is padding, we just truncate this	 * data to 64x4xHFS_BLOCKSZ ... hope this is OK ... 	 */	if (tot_size > 64) tot_size = 64;	/* get size in CD blocks == 4xHFS_BLOCKSZ == 2048 */	n = tot_size / HFS_BLK_CONV;	r = tot_size % HFS_BLK_CONV;	/* write out HFS volume header info */	xfwrite(hce->hfs_map, tot_size, HFS_BLOCKSZ, outfile);	/* fill up to a complete CD block */	if (r) {		xfwrite(buffer, HFS_BLK_CONV - r, HFS_BLOCKSZ, outfile);		n++;	}	last_extent_written += n;	return 0;}/* *	insert_padding_file : insert a dumy file to make volume at least *				800k */intinsert_padding_file(size)	int	size;{	struct deferred_write *dwpnt;	/* get the size in bytes */	size *= HFS_BLOCKSZ;	dwpnt = (struct deferred_write *)		e_malloc(sizeof(struct deferred_write));	dwpnt->s_entry = 0;	/* set the padding to zero */	dwpnt->pad = 0;	/* set offset to zero */	dwpnt->off = (off_t)0;	/*	 * don't need to wory about the s_entry stuff as it won't be touched#	 * at this point onwards	 */	/* insert the entry in the list */	if (dw_tail) {		dw_tail->next = dwpnt;		dw_tail = dwpnt;	} else {		dw_head = dwpnt;		dw_tail = dwpnt;	}	/* aloocate memory as a "Table" file */	dwpnt->table = e_malloc(size);	dwpnt->name = NULL;	dwpnt->next = NULL;	dwpnt->size = size;	dwpnt->extent = last_extent;	last_extent += ISO_BLOCKS(size);	/* retune the size in HFS blocks */	return (ISO_ROUND_UP(size) / HFS_BLOCKSZ);}struct output_fragment hfs_desc	      = {NULL, NULL, NULL, hfs_hce_write, "HFS volume header"};#endif	/* APPLE_HYB */struct output_fragment padblock_desc  = {NULL, padblock_size, NULL,     padblock_write, "Initial Padbock"};struct output_fragment voldesc_desc   = {NULL, oneblock_size, root_gen, pvd_write,      "Primary Volume Descriptor"};struct output_fragment end_vol	      = {NULL, oneblock_size, NULL,     evd_write,      "End Volume Descriptor" };struct output_fragment version_desc   = {NULL, oneblock_size, NULL,     vers_write,     "Version block" };struct output_fragment pathtable_desc = {NULL, pathtab_size,  generate_path_tables,     pathtab_write, "Path table"};struct output_fragment dirtree_desc   = {NULL, dirtr

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -