⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ffs_alloc.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
	else		ipref = pip->i_number;	if (ipref >= fs->fs_ncg * fs->fs_ipg)		ipref = 0;	cg = ino_to_cg(fs, ipref);	ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg);	if (ino == 0)		goto noinodes;	error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp);	if (error) {		VOP_VFREE(pvp, ino, mode);		return (error);	}	ip = VTOI(*ap->a_vpp);	if (ip->i_mode) {		printf("mode = 0%o, inum = %d, fs = %s\n",		    ip->i_mode, ip->i_number, fs->fs_fsmnt);		panic("ffs_valloc: dup alloc");	}	if (ip->i_blocks) {				/* XXX */		printf("free inode %s/%d had %d blocks\n",		    fs->fs_fsmnt, ino, ip->i_blocks);		ip->i_blocks = 0;	}	ip->i_flags = 0;	/*	 * Set up a new generation number for this inode.	 */	if (++nextgennumber < (u_long)time.tv_sec)		nextgennumber = time.tv_sec;	ip->i_gen = nextgennumber;	return (0);noinodes:	ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes");	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);	return (ENOSPC);}/* * Find a cylinder to place a directory. * * The policy implemented by this algorithm is to select from * among those cylinder groups with above the average number of * free inodes, the one with the smallest number of directories. */static ino_tffs_dirpref(fs)	register struct fs *fs;{	int cg, minndir, mincg, avgifree;	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;	minndir = fs->fs_ipg;	mincg = 0;	for (cg = 0; cg < fs->fs_ncg; cg++)		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {			mincg = cg;			minndir = fs->fs_cs(fs, cg).cs_ndir;		}	return ((ino_t)(fs->fs_ipg * mincg));}/* * Select the desired position for the next block in a file.  The file is * logically divided into sections. The first section is composed of the * direct blocks. Each additional section contains fs_maxbpg blocks. *  * If no blocks have been allocated in the first section, the policy is to * request a block in the same cylinder group as the inode that describes * the file. If no blocks have been allocated in any other section, the * policy is to place the section in a cylinder group with a greater than * average number of free blocks.  An appropriate cylinder group is found * by using a rotor that sweeps the cylinder groups. When a new group of * blocks is needed, the sweep begins in the cylinder group following the * cylinder group from which the previous allocation was made. The sweep * continues until a cylinder group with greater than the average number * of free blocks is found. If the allocation is for the first block in an * indirect block, the information on the previous allocation is unavailable; * here a best guess is made based upon the logical block number being * allocated. *  * If a section is already partially allocated, the policy is to * contiguously allocate fs_maxcontig blocks.  The end of one of these * contiguous blocks and the beginning of the next is physically separated * so that the disk head will be in transit between them for at least * fs_rotdelay milliseconds.  This is to allow time for the processor to * schedule another I/O transfer. */daddr_tffs_blkpref(ip, lbn, indx, bap)	struct inode *ip;	daddr_t lbn;	int indx;	daddr_t *bap;{	register struct fs *fs;	register int cg;	int avgbfree, startcg;	daddr_t nextblk;	fs = ip->i_fs;	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {		if (lbn < NDADDR) {			cg = ino_to_cg(fs, ip->i_number);			return (fs->fs_fpg * cg + fs->fs_frag);		}		/*		 * Find a cylinder with greater than average number of		 * unused data blocks.		 */		if (indx == 0 || bap[indx - 1] == 0)			startcg =			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;		else			startcg = dtog(fs, bap[indx - 1]) + 1;		startcg %= fs->fs_ncg;		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;		for (cg = startcg; cg < fs->fs_ncg; cg++)			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {				fs->fs_cgrotor = cg;				return (fs->fs_fpg * cg + fs->fs_frag);			}		for (cg = 0; cg <= startcg; cg++)			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {				fs->fs_cgrotor = cg;				return (fs->fs_fpg * cg + fs->fs_frag);			}		return (NULL);	}	/*	 * One or more previous blocks have been laid out. If less	 * than fs_maxcontig previous blocks are contiguous, the	 * next block is requested contiguously, otherwise it is	 * requested rotationally delayed by fs_rotdelay milliseconds.	 */	nextblk = bap[indx - 1] + fs->fs_frag;	if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] +	    blkstofrags(fs, fs->fs_maxcontig) != nextblk)		return (nextblk);	if (fs->fs_rotdelay != 0)		/*		 * Here we convert ms of delay to frags as:		 * (frags) = (ms) * (rev/sec) * (sect/rev) /		 *	((sect/frag) * (ms/sec))		 * then round up to the next block.		 */		nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /		    (NSPF(fs) * 1000), fs->fs_frag);	return (nextblk);}/* * Implement the cylinder overflow algorithm. * * The policy implemented by this algorithm is: *   1) allocate the block in its requested cylinder group. *   2) quadradically rehash on the cylinder group number. *   3) brute force search for a free block. *//*VARARGS5*/static u_longffs_hashalloc(ip, cg, pref, size, allocator)	struct inode *ip;	int cg;	long pref;	int size;	/* size for data blocks, mode for inodes */	u_long (*allocator)();{	register struct fs *fs;	long result;	int i, icg = cg;	fs = ip->i_fs;	/*	 * 1: preferred cylinder group	 */	result = (*allocator)(ip, cg, pref, size);	if (result)		return (result);	/*	 * 2: quadratic rehash	 */	for (i = 1; i < fs->fs_ncg; i *= 2) {		cg += i;		if (cg >= fs->fs_ncg)			cg -= fs->fs_ncg;		result = (*allocator)(ip, cg, 0, size);		if (result)			return (result);	}	/*	 * 3: brute force search	 * Note that we start at i == 2, since 0 was checked initially,	 * and 1 is always checked in the quadratic rehash.	 */	cg = (icg + 2) % fs->fs_ncg;	for (i = 2; i < fs->fs_ncg; i++) {		result = (*allocator)(ip, cg, 0, size);		if (result)			return (result);		cg++;		if (cg == fs->fs_ncg)			cg = 0;	}	return (NULL);}/* * Determine whether a fragment can be extended. * * Check to see if the necessary fragments are available, and  * if they are, allocate them. */static daddr_tffs_fragextend(ip, cg, bprev, osize, nsize)	struct inode *ip;	int cg;	long bprev;	int osize, nsize;{	register struct fs *fs;	register struct cg *cgp;	struct buf *bp;	long bno;	int frags, bbase;	int i, error;	fs = ip->i_fs;	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))		return (NULL);	frags = numfrags(fs, nsize);	bbase = fragnum(fs, bprev);	if (bbase > fragnum(fs, (bprev + frags - 1))) {		/* cannot extend across a block boundary */		return (NULL);	}	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),		(int)fs->fs_cgsize, NOCRED, &bp);	if (error) {		brelse(bp);		return (NULL);	}	cgp = (struct cg *)bp->b_data;	if (!cg_chkmagic(cgp)) {		brelse(bp);		return (NULL);	}	cgp->cg_time = time.tv_sec;	bno = dtogd(fs, bprev);	for (i = numfrags(fs, osize); i < frags; i++)		if (isclr(cg_blksfree(cgp), bno + i)) {			brelse(bp);			return (NULL);		}	/*	 * the current fragment can be extended	 * deduct the count on fragment being extended into	 * increase the count on the remaining fragment (if any)	 * allocate the extended piece	 */	for (i = frags; i < fs->fs_frag - bbase; i++)		if (isclr(cg_blksfree(cgp), bno + i))			break;	cgp->cg_frsum[i - numfrags(fs, osize)]--;	if (i != frags)		cgp->cg_frsum[i - frags]++;	for (i = numfrags(fs, osize); i < frags; i++) {		clrbit(cg_blksfree(cgp), bno + i);		cgp->cg_cs.cs_nffree--;		fs->fs_cstotal.cs_nffree--;		fs->fs_cs(fs, cg).cs_nffree--;	}	fs->fs_fmod = 1;	bdwrite(bp);	return (bprev);}/* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */static daddr_tffs_alloccg(ip, cg, bpref, size)	struct inode *ip;	int cg;	daddr_t bpref;	int size;{	register struct fs *fs;	register struct cg *cgp;	struct buf *bp;	register int i;	int error, bno, frags, allocsiz;	fs = ip->i_fs;	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)		return (NULL);	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),		(int)fs->fs_cgsize, NOCRED, &bp);	if (error) {		brelse(bp);		return (NULL);	}	cgp = (struct cg *)bp->b_data;	if (!cg_chkmagic(cgp) ||	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {		brelse(bp);		return (NULL);	}	cgp->cg_time = time.tv_sec;	if (size == fs->fs_bsize) {		bno = ffs_alloccgblk(fs, cgp, bpref);		bdwrite(bp);		return (bno);	}	/*	 * check to see if any fragments are already available	 * allocsiz is the size which will be allocated, hacking	 * it down to a smaller size if necessary	 */	frags = numfrags(fs, size);	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)		if (cgp->cg_frsum[allocsiz] != 0)			break;	if (allocsiz == fs->fs_frag) {		/*		 * no fragments were available, so a block will be 		 * allocated, and hacked up		 */		if (cgp->cg_cs.cs_nbfree == 0) {			brelse(bp);			return (NULL);		}		bno = ffs_alloccgblk(fs, cgp, bpref);		bpref = dtogd(fs, bno);		for (i = frags; i < fs->fs_frag; i++)			setbit(cg_blksfree(cgp), bpref + i);		i = fs->fs_frag - frags;		cgp->cg_cs.cs_nffree += i;		fs->fs_cstotal.cs_nffree += i;		fs->fs_cs(fs, cg).cs_nffree += i;		fs->fs_fmod = 1;		cgp->cg_frsum[i]++;		bdwrite(bp);		return (bno);	}	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);	if (bno < 0) {		brelse(bp);		return (NULL);	}	for (i = 0; i < frags; i++)		clrbit(cg_blksfree(cgp), bno + i);	cgp->cg_cs.cs_nffree -= frags;	fs->fs_cstotal.cs_nffree -= frags;	fs->fs_cs(fs, cg).cs_nffree -= frags;	fs->fs_fmod = 1;	cgp->cg_frsum[allocsiz]--;	if (frags != allocsiz)		cgp->cg_frsum[allocsiz - frags]++;	bdwrite(bp);	return (cg * fs->fs_fpg + bno);}/* * Allocate a block in a cylinder group. * * This algorithm implements the following policy: *   1) allocate the requested block. *   2) allocate a rotationally optimal block in the same cylinder. *   3) allocate the next available block on the block rotor for the *      specified cylinder group. * Note that this routine only allocates fs_bsize blocks; these * blocks may be fragmented by the routine that allocates them. */static daddr_tffs_alloccgblk(fs, cgp, bpref)	register struct fs *fs;	register struct cg *cgp;	daddr_t bpref;{	daddr_t bno, blkno;	int cylno, pos, delta;	short *cylbp;	register int i;	if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) {		bpref = cgp->cg_rotor;		goto norot;	}	bpref = blknum(fs, bpref);	bpref = dtogd(fs, bpref);	/*	 * if the requested block is available, use it	 */	if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {		bno = bpref;		goto gotit;	}	/*	 * check for a block available on the same cylinder	 */	cylno = cbtocylno(fs, bpref);	if (cg_blktot(cgp)[cylno] == 0)		goto norot;	if (fs->fs_cpc == 0) {		/*		 * Block layout information is not available.		 * Leaving bpref unchanged means we take the		 * next available free block following the one 		 * we just allocated. Hopefully this will at		 * least hit a track cache on drives of unknown		 * geometry (e.g. SCSI).		 */		goto norot;	}	/*	 * check the summary information to see if a block is 	 * available in the requested cylinder starting at the	 * requested rotational position and proceeding around.	 */	cylbp = cg_blks(fs, cgp, cylno);	pos = cbtorpos(fs, bpref);	for (i = pos; i < fs->fs_nrpos; i++)		if (cylbp[i] > 0)			break;	if (i == fs->fs_nrpos)		for (i = 0; i < pos; i++)			if (cylbp[i] > 0)				break;	if (cylbp[i] > 0) {		/*		 * found a rotational position, now find the actual		 * block. A panic if none is actually there.		 */		pos = cylno % fs->fs_cpc;		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);		if (fs_postbl(fs, pos)[i] == -1) {			printf("pos = %d, i = %d, fs = %s\n",			    pos, i, fs->fs_fsmnt);			panic("ffs_alloccgblk: cyl groups corrupted");		}		for (i = fs_postbl(fs, pos)[i];; ) {			if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) {				bno = blkstofrags(fs, (bno + i));				goto gotit;			}			delta = fs_rotbl(fs)[i];			if (delta <= 0 ||			    delta + i > fragstoblks(fs, fs->fs_fpg))				break;			i += delta;		}		printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);		panic("ffs_alloccgblk: can't find blk in cyl");	}norot:	/*	 * no blocks in the requested cylinder, so take next	 * available one in this cylinder group.	 */	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);	if (bno < 0)		return (NULL);	cgp->cg_rotor = bno;gotit:	blkno = fragstoblks(fs, bno);	ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno);	ffs_clusteracct(fs, cgp, blkno, -1);	cgp->cg_cs.cs_nbfree--;	fs->fs_cstotal.cs_nbfree--;	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;	cylno = cbtocylno(fs, bno);	cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;	cg_blktot(cgp)[cylno]--;	fs->fs_fmod = 1;	return (cgp->cg_cgx * fs->fs_fpg + bno);}/* * Determine whether a cluster can be allocated. * * We do not currently check for optimal rotational layout if there * are multiple choices in the same cylinder group. Instead we just * take the first one that we find following bpref. */static daddr_tffs_clusteralloc(ip, cg, bpref, len)	struct inode *ip;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -