📄 ufs_alloc.c
字号:
minndir = fs->fs_cs(fs, cg).cs_ndir; } return ((gno_t)(fs->fs_ipg * mincg));}/* * Select the desired position for the next block in a file. The file is * logically divided into sections. The first section is composed of the * direct blocks. Each additional section contains fs_maxbpg blocks. * * If no blocks have been allocated in the first section, the policy is to * request a block in the same cylinder group as the gnode that describes * the file. If no blocks have been allocated in any other section, the * policy is to place the section in a cylinder group with a greater than * average number of free blocks. An appropriate cylinder group is found * by using a rotor that sweeps the cylinder groups. When a new group of * blocks is needed, the sweep begins in the cylinder group following the * cylinder group from which the previous allocation was made. The sweep * continues until a cylinder group with greater than the average number * of free blocks is found. If the allocation is for the first block in an * indirect block, the information on the previous allocation is unavailable; * here a best guess is made based upon the logical block number being * allocated. * * If a section is already partially allocated, the policy is to * contiguously allocate fs_maxcontig blocks. The end of one of these * contiguous blocks and the beginning of the next is physically separated * so that the disk head will be in transit between them for at least * fs_rotdelay milliseconds. This is to allow time for the processor to * schedule another I/O transfer. */daddr_tblkpref(gp, lbn, indx, bap) register struct gnode *gp; daddr_t lbn; register int indx; daddr_t *bap;{ register struct fs *fs; register int cg; register int avgbfree, startcg; register daddr_t nextblk; int lookbehind = 1; extern int ufs_blkpref_lookbehind; fs = FS(gp); /* * If we are writing at least the second block, * try to locate the closest block, within * ufs_blkpref_lookbehind blocks. */ if (ufs_blkpref_lookbehind > 1 && bap && indx > 0) { int floor; int ind; if (indx <= ufs_blkpref_lookbehind) floor = 0; else floor = indx - ufs_blkpref_lookbehind; /* * Start at the block immediatly preceeding the block * we want to allocate, and search until we find an * allocated block, or until ufs_blkpref_lookbehind * blocks have been searched. */ for (ind = indx - 1, lookbehind = 1; ind >= floor; ind--, lookbehind++) if (bap[ind] != 0) break; if (ind < floor) lookbehind = 1; } /* * At this point, lookbehind is either set to one, meaning that * no blocks within ufs_blkpref_lookbehind blocks preceeding * block we want to allcate are currently allocated. Or if * lookbehind > 1, bap[indx - lookbehind] is allocated, and * we should determine our preference from that block. The former * case will drop into the following if, the latter will jump * around the if. */ if (indx % fs->fs_maxbpg == 0 || bap[indx - lookbehind] == 0) { if (lbn < NDADDR) { cg = itog(fs, gp->g_number); return (fs->fs_fpg * cg + fs->fs_frag); } /* * Find a cylinder group with greater than average number of * unused data blocks. */ if (indx == 0 || bap[indx - lookbehind] == 0) startcg = itog(fs, gp->g_number) + lbn / fs->fs_maxbpg; else startcg = dtog(fs, bap[indx - lookbehind]) + 1; startcg %= fs->fs_ncg; avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; for (cg = startcg; cg < fs->fs_ncg; cg++) if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { fs->fs_cgrotor = cg; return (fs->fs_fpg * cg + fs->fs_frag); } for (cg = 0; cg <= startcg; cg++) if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { fs->fs_cgrotor = cg; return (fs->fs_fpg * cg + fs->fs_frag); } return (NULL); } /* * One or more previous blocks have been laid out. If less * than fs_maxcontig previous blocks are contiguous, the * next block is requested contiguously, otherwise it is * requested rotationally delayed by fs_rotdelay milliseconds. * * lookbehind is set to the offset from indx in bap where a * block is already allocated. Simply add (lookbehind * number * of frags per block) to the block number of previosly allocated * block, and we will have our preference. */ nextblk = bap[indx - lookbehind] + (lookbehind * fs->fs_frag);#ifdef notdef if (indx > fs->fs_maxcontig && bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) != nextblk) return (nextblk);#endif if (fs->fs_rotdelay != 0 && indx != 0 && indx % fs->fs_maxcontig == 0) { /* * fs->fs_rotdelay != 0 && new block is on * fs_maxcontig boundry. * * Here we convert ms of delay to frags as: * (frags) = (ms) * (rev/sec) * (sect/rev) / * ((sect/frag) * (ms/sec)) * then round up to the next block. */ nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /(NSPF(fs) * 1000), fs->fs_frag); } #ifdef notdef if (fs->fs_rotdelay != 0) nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / (NSPF(fs) * 1000), fs->fs_frag);#endif return (nextblk);}/* * Implement the cylinder overflow algorithm. * * The policy implemented by this algorithm is: * 1) allocate the block in its requested cylinder group. * 2) quadradically rehash on the cylinder group number. * 3) brute force search for a free block. *//*VARARGS5*/u_longhashalloc(gp, cg, pref, size, allocator) register struct gnode *gp; register int cg; long pref; int size; /* size for data blocks, mode for gnodes */ register u_long (*allocator)();{ register struct fs *fs; register long result; register int i; int icg = cg; fs = FS(gp); /* * 1: preferred cylinder group */ result = (*allocator)(gp, cg, pref, size); if (result) return (result); /* * 2: quadratic rehash */ for (i = 1; i < fs->fs_ncg; i *= 2) { cg += i; if (cg >= fs->fs_ncg) cg -= fs->fs_ncg; result = (*allocator)(gp, cg, 0, size); if (result) return (result); } /* * 3: brute force search * Note that we start at i == 2, since 0 was checked initially, * and 1 is always checked in the quadratic rehash. */ cg = (icg + 2) % fs->fs_ncg; for (i = 2; i < fs->fs_ncg; i++) { result = (*allocator)(gp, cg, 0, size); if (result) return (result); cg++; if (cg == fs->fs_ncg) cg = 0; } return (NULL);}/* * Determine whether a fragment can be extended. * * Check to see if the necessary fragments are available, and * if they are, allocate them. */daddr_tfragextend(gp, cg, bprev, osize, nsize) register struct gnode *gp; int cg; long bprev; int osize, nsize;{ register struct fs *fs; register struct buf *bp; register struct cg *cgp; register long bno; int frags, bbase; register int i; fs = FS(gp); if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) return (NULL); frags = numfrags(fs, nsize); bbase = fragnum(fs, bprev); if (bbase > fragnum(fs, (bprev + frags - 1))) { /* cannot extend across a block boundry */ return (NULL); } bp = bread(gp->g_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, (struct gnode *) NULL); cgp = bp->b_un.b_cg; if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { brelse(bp); return (NULL); } cgp->cg_time = timepick->tv_sec; bno = dtogd(fs, bprev); for (i = numfrags(fs, osize); i < frags; i++) if (isclr(cgp->cg_free, bno + i)) { brelse(bp); return (NULL); } /* * the current fragment can be extended * deduct the count on fragment being extended into * increase the count on the remaining fragment (if any) * allocate the extended piece */ for (i = frags; i < fs->fs_frag - bbase; i++) if (isclr(cgp->cg_free, bno + i)) break; cgp->cg_frsum[i - numfrags(fs, osize)]--; if (i != frags) cgp->cg_frsum[i - frags]++; fs_lock(gp->g_mp); for (i = numfrags(fs, osize); i < frags; i++) { clrbit(cgp->cg_free, bno + i); cgp->cg_cs.cs_nffree--; fs->fs_cstotal.cs_nffree--; fs->fs_cs(fs, cg).cs_nffree--; } gp->g_mp->m_flags |= M_MOD; fs_unlock(gp->g_mp); if (gp->g_mp->m_flags & M_SYNC) bwrite(bp); else bdwrite(bp); return (bprev);}/* * Determine whether a block can be allocated. * * Check to see if a block of the apprpriate size is available, * and if it is, allocate it. */daddr_talloccg(gp, cg, bpref, size) struct gnode *gp; int cg; daddr_t bpref; int size;{ register struct fs *fs; register struct buf *bp; register struct cg *cgp; register int bno; register int allocsiz; int frags; register int i; fs = FS(gp); if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) return (NULL); bp = bread(gp->g_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, (struct gnode *) NULL); cgp = bp->b_un.b_cg; if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC || (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { brelse(bp); return (NULL); } cgp->cg_time = timepick->tv_sec; if (size == fs->fs_bsize) { bno = alloccgblk(fs, cgp, bpref, gp->g_mp); if (gp->g_mp->m_flags & M_SYNC) bwrite(bp); else bdwrite(bp); return (bno); } /* * check to see if any fragments are already available * allocsiz is the size which will be allocated, hacking * it down to a smaller size if necessary */ frags = numfrags(fs, size); for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) if (cgp->cg_frsum[allocsiz] != 0) break; if (allocsiz == fs->fs_frag) { /* * no fragments were available, so a block will be * allocated, and hacked up */ if (cgp->cg_cs.cs_nbfree == 0) { brelse(bp); return (NULL); } bno = alloccgblk(fs, cgp, bpref, gp->g_mp); bpref = dtogd(fs, bno); for (i = frags; i < fs->fs_frag; i++) setbit(cgp->cg_free, bpref + i); i = fs->fs_frag - frags; cgp->cg_cs.cs_nffree += i; fs_lock(gp->g_mp); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; gp->g_mp->m_flags |= M_MOD; fs_unlock(gp->g_mp); cgp->cg_frsum[i]++; if (gp->g_mp->m_flags & M_SYNC) bwrite(bp); else bdwrite(bp); return (bno); } bno = mapsearch(fs, cgp, bpref, allocsiz); if (bno < 0) { brelse(bp); return (NULL); } for (i = 0; i < frags; i++) clrbit(cgp->cg_free, bno + i); cgp->cg_cs.cs_nffree -= frags; fs_lock(gp->g_mp); fs->fs_cstotal.cs_nffree -= frags; fs->fs_cs(fs, cg).cs_nffree -= frags; gp->g_mp->m_flags |= M_MOD; fs_unlock(gp->g_mp); cgp->cg_frsum[allocsiz]--; if (frags != allocsiz) cgp->cg_frsum[allocsiz - frags]++; if (gp->g_mp->m_flags & M_SYNC) bwrite(bp); else bdwrite(bp); return (cg * fs->fs_fpg + bno);}/* * Allocate a block in a cylinder group. * * This algorithm implements the following policy: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate the next available block on the block rotor for the * specified cylinder group. * Note that this routine only allocates fs_bsize blocks; these * blocks may be fragmented by the routine that allocates them. */daddr_talloccgblk(fs, cgp, bpref, mp) register struct fs *fs; register struct cg *cgp; register daddr_t bpref; struct mount *mp;{ register daddr_t bno; int cylno, pos, delta; short *cylbp; register int i; if (bpref == 0) { bpref = cgp->cg_rotor;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -