lfs_segment.c
来自「早期freebsd实现」· C语言 代码 · 共 1,112 行 · 第 1/2 页
C
1,112 行
daddr_t daddr, lbn, off; int db_per_fsb, error, i, nblocks, num; vp = sp->vp; nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp; if (vp == NULL || nblocks == 0) return; /* Sort the blocks. */ if (!(sp->seg_flags & SEGM_CLEAN)) lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks); /* * Assign disk addresses, and update references to the logical * block and the segment usage information. */ fs = sp->fs; db_per_fsb = fsbtodb(fs, 1); for (i = nblocks; i--; ++sp->start_bpp) { lbn = *sp->start_lbp++; (*sp->start_bpp)->b_blkno = off = fs->lfs_offset; fs->lfs_offset += db_per_fsb; if (error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL)) panic("lfs_updatemeta: ufs_bmaparray %d", error); ip = VTOI(vp); switch (num) { case 0: ip->i_db[lbn] = off; break; case 1: ip->i_ib[a[0].in_off] = off; break; default: ap = &a[num - 1]; if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp)) panic("lfs_updatemeta: bread bno %d", ap->in_lbn); /* * Bread may create a new indirect block which needs * to get counted for the inode. */ if (bp->b_blkno == -1 && !(bp->b_flags & B_CACHE)) {printf ("Updatemeta allocating indirect block: shouldn't happen\n"); ip->i_blocks += btodb(fs->lfs_bsize); fs->lfs_bfree -= btodb(fs->lfs_bsize); } ((daddr_t *)bp->b_data)[ap->in_off] = off; VOP_BWRITE(bp); } /* Update segment usage information. */ if (daddr != UNASSIGNED && !(daddr >= fs->lfs_lastpseg && daddr <= off)) { LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);#ifdef DIAGNOSTIC if (sup->su_nbytes < fs->lfs_bsize) { /* XXX -- Change to a panic. */ printf("lfs: negative bytes (segment %d)\n", datosn(fs, daddr)); panic ("Negative Bytes"); }#endif sup->su_nbytes -= fs->lfs_bsize; error = VOP_BWRITE(bp); } }}/* * Start a new segment. */intlfs_initseg(fs) struct lfs *fs;{ struct segment *sp; SEGUSE *sup; SEGSUM *ssp; struct buf *bp; int repeat; sp = fs->lfs_sp; repeat = 0; /* Advance to the next segment. */ if (!LFS_PARTIAL_FITS(fs)) { /* Wake up any cleaning procs waiting on this file system. */ wakeup(&lfs_allclean_wakeup); lfs_newseg(fs); repeat = 1; fs->lfs_offset = fs->lfs_curseg; sp->seg_number = datosn(fs, fs->lfs_curseg); sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE; /* * If the segment contains a superblock, update the offset * and summary address to skip over it. */ LFS_SEGENTRY(sup, fs, sp->seg_number, bp); if (sup->su_flags & SEGUSE_SUPERBLOCK) { fs->lfs_offset += LFS_SBPAD / DEV_BSIZE; sp->seg_bytes_left -= LFS_SBPAD; } brelse(bp); } else { sp->seg_number = datosn(fs, fs->lfs_curseg); sp->seg_bytes_left = (fs->lfs_dbpseg - (fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE; } fs->lfs_lastpseg = fs->lfs_offset; sp->fs = fs; sp->ibp = NULL; sp->ninodes = 0; /* Get a new buffer for SEGSUM and enter it into the buffer list. */ sp->cbpp = sp->bpp; *sp->cbpp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, fs->lfs_offset, LFS_SUMMARY_SIZE); sp->segsum = (*sp->cbpp)->b_data; bzero(sp->segsum, LFS_SUMMARY_SIZE); sp->start_bpp = ++sp->cbpp; fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE; /* Set point to SEGSUM, initialize it. */ ssp = sp->segsum; ssp->ss_next = fs->lfs_nextseg; ssp->ss_nfinfo = ssp->ss_ninos = 0; /* Set pointer to first FINFO, initialize it. */ sp->fip = (struct finfo *)(sp->segsum + sizeof(SEGSUM)); sp->fip->fi_nblocks = 0; sp->start_lbp = &sp->fip->fi_blocks[0]; sp->seg_bytes_left -= LFS_SUMMARY_SIZE; sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM); return(repeat);}/* * Return the next segment to write. */voidlfs_newseg(fs) struct lfs *fs;{ CLEANERINFO *cip; SEGUSE *sup; struct buf *bp; int curseg, isdirty, sn; LFS_SEGENTRY(sup, fs, datosn(fs, fs->lfs_nextseg), bp); sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE; sup->su_nbytes = 0; sup->su_nsums = 0; sup->su_ninos = 0; (void) VOP_BWRITE(bp); LFS_CLEANERINFO(cip, fs, bp); --cip->clean; ++cip->dirty; (void) VOP_BWRITE(bp); fs->lfs_lastseg = fs->lfs_curseg; fs->lfs_curseg = fs->lfs_nextseg; for (sn = curseg = datosn(fs, fs->lfs_curseg);;) { sn = (sn + 1) % fs->lfs_nseg; if (sn == curseg) panic("lfs_nextseg: no clean segments"); LFS_SEGENTRY(sup, fs, sn, bp); isdirty = sup->su_flags & SEGUSE_DIRTY; brelse(bp); if (!isdirty) break; } ++fs->lfs_nactive; fs->lfs_nextseg = sntoda(fs, sn);#ifdef DOSTATS ++lfs_stats.segsused;#endif}intlfs_writeseg(fs, sp) struct lfs *fs; struct segment *sp;{ extern int locked_queue_count; struct buf **bpp, *bp, *cbp; SEGUSE *sup; SEGSUM *ssp; dev_t i_dev; size_t size; u_long *datap, *dp; int ch_per_blk, do_again, i, nblocks, num, s; int (*strategy)__P((struct vop_strategy_args *)); struct vop_strategy_args vop_strategy_a; u_short ninos; char *p; /* * If there are no buffers other than the segment summary to write * and it is not a checkpoint, don't do anything. On a checkpoint, * even if there aren't any buffers, you need to write the superblock. */ if ((nblocks = sp->cbpp - sp->bpp) == 1) return (0); ssp = (SEGSUM *)sp->segsum; /* Update the segment usage information. */ LFS_SEGENTRY(sup, fs, sp->seg_number, bp); ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs); sup->su_nbytes += nblocks - 1 - ninos << fs->lfs_bshift; sup->su_nbytes += ssp->ss_ninos * sizeof(struct dinode); sup->su_nbytes += LFS_SUMMARY_SIZE; sup->su_lastmod = time.tv_sec; sup->su_ninos += ninos; ++sup->su_nsums; do_again = !(bp->b_flags & B_GATHERED); (void)VOP_BWRITE(bp); /* * Compute checksum across data and then across summary; the first * block (the summary block) is skipped. Set the create time here * so that it's guaranteed to be later than the inode mod times. * * XXX * Fix this to do it inline, instead of malloc/copy. */ datap = dp = malloc(nblocks * sizeof(u_long), M_SEGMENT, M_WAITOK); for (bpp = sp->bpp, i = nblocks - 1; i--;) { if ((*++bpp)->b_flags & B_INVAL) { if (copyin((*bpp)->b_saveaddr, dp++, sizeof(u_long))) panic("lfs_writeseg: copyin failed"); } else *dp++ = ((u_long *)(*bpp)->b_data)[0]; } ssp->ss_create = time.tv_sec; ssp->ss_datasum = cksum(datap, (nblocks - 1) * sizeof(u_long)); ssp->ss_sumsum = cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum)); free(datap, M_SEGMENT);#ifdef DIAGNOSTIC if (fs->lfs_bfree < fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE) panic("lfs_writeseg: No diskspace for summary");#endif fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE); i_dev = VTOI(fs->lfs_ivnode)->i_dev; strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)]; /* * When we simply write the blocks we lose a rotation for every block * written. To avoid this problem, we allocate memory in chunks, copy * the buffers into the chunk and write the chunk. MAXPHYS is the * largest size I/O devices can handle. * When the data is copied to the chunk, turn off the the B_LOCKED bit * and brelse the buffer (which will move them to the LRU list). Add * the B_CALL flag to the buffer header so we can count I/O's for the * checkpoints and so we can release the allocated memory. * * XXX * This should be removed if the new virtual memory system allows us to * easily make the buffers contiguous in kernel memory and if that's * fast enough. */ ch_per_blk = MAXPHYS / fs->lfs_bsize; for (bpp = sp->bpp, i = nblocks; i;) { num = ch_per_blk; if (num > i) num = i; i -= num; size = num * fs->lfs_bsize; cbp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, (*bpp)->b_blkno, size); cbp->b_dev = i_dev; cbp->b_flags |= B_ASYNC | B_BUSY; s = splbio(); ++fs->lfs_iocount; for (p = cbp->b_data; num--;) { bp = *bpp++; /* * Fake buffers from the cleaner are marked as B_INVAL. * We need to copy the data from user space rather than * from the buffer indicated. * XXX == what do I do on an error? */ if (bp->b_flags & B_INVAL) { if (copyin(bp->b_saveaddr, p, bp->b_bcount)) panic("lfs_writeseg: copyin failed"); } else bcopy(bp->b_data, p, bp->b_bcount); p += bp->b_bcount; if (bp->b_flags & B_LOCKED) --locked_queue_count; bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_LOCKED | B_GATHERED); if (bp->b_flags & B_CALL) { /* if B_CALL, it was created with newbuf */ brelvp(bp); if (!(bp->b_flags & B_INVAL)) free(bp->b_data, M_SEGMENT); free(bp, M_SEGMENT); } else { bremfree(bp); bp->b_flags |= B_DONE; reassignbuf(bp, bp->b_vp); brelse(bp); } } ++cbp->b_vp->v_numoutput; splx(s); cbp->b_bcount = p - (char *)cbp->b_data; /* * XXXX This is a gross and disgusting hack. Since these * buffers are physically addressed, they hang off the * device vnode (devvp). As a result, they have no way * of getting to the LFS superblock or lfs structure to * keep track of the number of I/O's pending. So, I am * going to stuff the fs into the saveaddr field of * the buffer (yuk). */ cbp->b_saveaddr = (caddr_t)fs; vop_strategy_a.a_desc = VDESC(vop_strategy); vop_strategy_a.a_bp = cbp; (strategy)(&vop_strategy_a); } /* * XXX * Vinvalbuf can move locked buffers off the locked queue * and we have no way of knowing about this. So, after * doing a big write, we recalculate how many bufers are * really still left on the locked queue. */ locked_queue_count = count_lock_queue(); wakeup(&locked_queue_count);#ifdef DOSTATS ++lfs_stats.psegwrites; lfs_stats.blocktot += nblocks - 1; if (fs->lfs_sp->seg_flags & SEGM_SYNC) ++lfs_stats.psyncwrites; if (fs->lfs_sp->seg_flags & SEGM_CLEAN) { ++lfs_stats.pcleanwrites; lfs_stats.cleanblocks += nblocks - 1; }#endif return (lfs_initseg(fs) || do_again);}voidlfs_writesuper(fs) struct lfs *fs;{ struct buf *bp; dev_t i_dev; int (*strategy) __P((struct vop_strategy_args *)); int s; struct vop_strategy_args vop_strategy_a; i_dev = VTOI(fs->lfs_ivnode)->i_dev; strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)]; /* Checksum the superblock and copy it into a buffer. */ fs->lfs_cksum = cksum(fs, sizeof(struct lfs) - sizeof(fs->lfs_cksum)); bp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, fs->lfs_sboffs[0], LFS_SBPAD); *(struct lfs *)bp->b_data = *fs; /* XXX Toggle between first two superblocks; for now just write first */ bp->b_dev = i_dev; bp->b_flags |= B_BUSY | B_CALL | B_ASYNC; bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI); bp->b_iodone = lfs_supercallback; vop_strategy_a.a_desc = VDESC(vop_strategy); vop_strategy_a.a_bp = bp; s = splbio(); ++bp->b_vp->v_numoutput; splx(s); (strategy)(&vop_strategy_a);}/* * Logical block number match routines used when traversing the dirty block * chain. */intlfs_match_data(fs, bp) struct lfs *fs; struct buf *bp;{ return (bp->b_lblkno >= 0);}intlfs_match_indir(fs, bp) struct lfs *fs; struct buf *bp;{ int lbn; lbn = bp->b_lblkno; return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);}intlfs_match_dindir(fs, bp) struct lfs *fs; struct buf *bp;{ int lbn; lbn = bp->b_lblkno; return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);}intlfs_match_tindir(fs, bp) struct lfs *fs; struct buf *bp;{ int lbn; lbn = bp->b_lblkno; return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);}/* * Allocate a new buffer header. */struct buf *lfs_newbuf(vp, daddr, size) struct vnode *vp; daddr_t daddr; size_t size;{ struct buf *bp; size_t nbytes; nbytes = roundup(size, DEV_BSIZE); bp = malloc(sizeof(struct buf), M_SEGMENT, M_WAITOK); bzero(bp, sizeof(struct buf)); if (nbytes) bp->b_data = malloc(nbytes, M_SEGMENT, M_WAITOK); bgetvp(vp, bp); bp->b_bufsize = size; bp->b_bcount = size; bp->b_lblkno = daddr; bp->b_blkno = daddr; bp->b_error = 0; bp->b_resid = 0; bp->b_iodone = lfs_callback; bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE; return (bp);}voidlfs_callback(bp) struct buf *bp;{ struct lfs *fs; fs = (struct lfs *)bp->b_saveaddr;#ifdef DIAGNOSTIC if (fs->lfs_iocount == 0) panic("lfs_callback: zero iocount\n");#endif if (--fs->lfs_iocount == 0) wakeup(&fs->lfs_iocount); brelvp(bp); free(bp->b_data, M_SEGMENT); free(bp, M_SEGMENT);}voidlfs_supercallback(bp) struct buf *bp;{ brelvp(bp); free(bp->b_data, M_SEGMENT); free(bp, M_SEGMENT);}/* * Shellsort (diminishing increment sort) from Data Structures and * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290; * see also Knuth Vol. 3, page 84. The increments are selected from * formula (8), page 95. Roughly O(N^3/2). *//* * This is our own private copy of shellsort because we want to sort * two parallel arrays (the array of buffer pointers and the array of * logical block numbers) simultaneously. Note that we cast the array * of logical block numbers to a unsigned in this routine so that the * negative block numbers (meta data blocks) sort AFTER the data blocks. */voidlfs_shellsort(bp_array, lb_array, nmemb) struct buf **bp_array; daddr_t *lb_array; register int nmemb;{ static int __rsshell_increments[] = { 4, 1, 0 }; register int incr, *incrp, t1, t2; struct buf *bp_temp; u_long lb_temp; for (incrp = __rsshell_increments; incr = *incrp++;) for (t1 = incr; t1 < nmemb; ++t1) for (t2 = t1 - incr; t2 >= 0;) if (lb_array[t2] > lb_array[t2 + incr]) { lb_temp = lb_array[t2]; lb_array[t2] = lb_array[t2 + incr]; lb_array[t2 + incr] = lb_temp; bp_temp = bp_array[t2]; bp_array[t2] = bp_array[t2 + incr]; bp_array[t2 + incr] = bp_temp; t2 -= incr; } else break;}/* * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it. */lfs_vref(vp) register struct vnode *vp;{ if (vp->v_flag & VXLOCK) return(1); return (vget(vp, 0));}voidlfs_vunref(vp) register struct vnode *vp;{ extern int lfs_no_inactive; /* * This is vrele except that we do not want to VOP_INACTIVE * this vnode. Rather than inline vrele here, we use a global * flag to tell lfs_inactive not to run. Yes, its gross. */ lfs_no_inactive = 1; vrele(vp); lfs_no_inactive = 0;}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?