📄 vfs_cluster.c
字号:
struct vnode *vp; struct buf *bp; long flags; daddr_t blkno; daddr_t lblkno; long size; int run;{ if (!bp) { bp = getblk(vp, lblkno, size, 0, 0); if (bp->b_flags & (B_DONE | B_DELWRI)) { bp->b_blkno = blkno; return(bp); } } allocbuf(bp, run * size); bp->b_blkno = blkno; bp->b_iodone = cluster_callback; bp->b_flags |= flags | B_CALL; return(bp);}/* * Cleanup after a clustered read or write. * This is complicated by the fact that any of the buffers might have * extra memory (if there were no empty buffer headers at allocbuf time) * that we will need to shift around. */voidcluster_callback(bp) struct buf *bp;{ struct cluster_save *b_save; struct buf **bpp, *tbp; long bsize; caddr_t cp; int error = 0; /* * Must propogate errors to all the components. */ if (bp->b_flags & B_ERROR) error = bp->b_error; b_save = (struct cluster_save *)(bp->b_saveaddr); bp->b_saveaddr = b_save->bs_saveaddr; bsize = b_save->bs_bufsize; cp = (char *)bp->b_data + bsize; /* * Move memory from the large cluster buffer into the component * buffers and mark IO as done on these. */ for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) { tbp = *bpp; pagemove(cp, tbp->b_data, bsize); tbp->b_bufsize += bsize; tbp->b_bcount = bsize; if (error) { tbp->b_flags |= B_ERROR; tbp->b_error = error; } biodone(tbp); bp->b_bufsize -= bsize; cp += bsize; } /* * If there was excess memory in the cluster buffer, * slide it up adjacent to the remaining valid data. */ if (bp->b_bufsize != bsize) { if (bp->b_bufsize < bsize) panic("cluster_callback: too little memory"); pagemove(cp, (char *)bp->b_data + bsize, bp->b_bufsize - bsize); } bp->b_bcount = bsize; bp->b_iodone = NULL; free(b_save, M_SEGMENT); if (bp->b_flags & B_ASYNC) brelse(bp); else { bp->b_flags &= ~B_WANTED; wakeup((caddr_t)bp); }}/* * Do clustered write for FFS. * * Three cases: * 1. Write is not sequential (write asynchronously) * Write is sequential: * 2. beginning of cluster - begin cluster * 3. middle of a cluster - add to cluster * 4. end of a cluster - asynchronously write cluster */voidcluster_write(bp, filesize) struct buf *bp; u_quad_t filesize;{ struct vnode *vp; daddr_t lbn; int maxclen, cursize; vp = bp->b_vp; lbn = bp->b_lblkno; /* Initialize vnode to beginning of file. */ if (lbn == 0) vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || (bp->b_blkno != vp->v_lasta + btodb(bp->b_bcount))) { maxclen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1; if (vp->v_clen != 0) { /* * Next block is not sequential. * * If we are not writing at end of file, the process * seeked to another point in the file since its * last write, or we have reached our maximum * cluster size, then push the previous cluster. * Otherwise try reallocating to make it sequential. */ cursize = vp->v_lastw - vp->v_cstart + 1; if (!doreallocblks || (lbn + 1) * bp->b_bcount != filesize || lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { cluster_wbuild(vp, NULL, bp->b_bcount, vp->v_cstart, cursize, lbn); } else { struct buf **bpp, **endbp; struct cluster_save *buflist; buflist = cluster_collectbufs(vp, bp); endbp = &buflist->bs_children [buflist->bs_nchildren - 1]; if (VOP_REALLOCBLKS(vp, buflist)) { /* * Failed, push the previous cluster. */ for (bpp = buflist->bs_children; bpp < endbp; bpp++) brelse(*bpp); free(buflist, M_SEGMENT); cluster_wbuild(vp, NULL, bp->b_bcount, vp->v_cstart, cursize, lbn); } else { /* * Succeeded, keep building cluster. */ for (bpp = buflist->bs_children; bpp <= endbp; bpp++) bdwrite(*bpp); free(buflist, M_SEGMENT); vp->v_lastw = lbn; vp->v_lasta = bp->b_blkno; return; } } } /* * Consider beginning a cluster. * If at end of file, make cluster as large as possible, * otherwise find size of existing cluster. */ if ((lbn + 1) * bp->b_bcount != filesize && (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) || bp->b_blkno == -1)) { bawrite(bp); vp->v_clen = 0; vp->v_lasta = bp->b_blkno; vp->v_cstart = lbn + 1; vp->v_lastw = lbn; return; } vp->v_clen = maxclen; if (maxclen == 0) { /* I/O not contiguous */ vp->v_cstart = lbn + 1; bawrite(bp); } else { /* Wait for rest of cluster */ vp->v_cstart = lbn; bdwrite(bp); } } else if (lbn == vp->v_cstart + vp->v_clen) { /* * At end of cluster, write it out. */ cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart, vp->v_clen + 1, lbn); vp->v_clen = 0; vp->v_cstart = lbn + 1; } else /* * In the middle of a cluster, so just delay the * I/O for now. */ bdwrite(bp); vp->v_lastw = lbn; vp->v_lasta = bp->b_blkno;}/* * This is an awful lot like cluster_rbuild...wish they could be combined. * The last lbn argument is the current block on which I/O is being * performed. Check to see that it doesn't fall in the middle of * the current block (if last_bp == NULL). */voidcluster_wbuild(vp, last_bp, size, start_lbn, len, lbn) struct vnode *vp; struct buf *last_bp; long size; daddr_t start_lbn; int len; daddr_t lbn;{ struct cluster_save *b_save; struct buf *bp, *tbp; caddr_t cp; int i, s;#ifdef DIAGNOSTIC if (size != vp->v_mount->mnt_stat.f_iosize) panic("cluster_wbuild: size %d != filesize %d\n", size, vp->v_mount->mnt_stat.f_iosize);#endifredo: while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) { ++start_lbn; --len; } /* Get more memory for current buffer */ if (len <= 1) { if (last_bp) { bawrite(last_bp); } else if (len) { bp = getblk(vp, start_lbn, size, 0, 0); bawrite(bp); } return; } bp = getblk(vp, start_lbn, size, 0, 0); if (!(bp->b_flags & B_DELWRI)) { ++start_lbn; --len; brelse(bp); goto redo; } /* * Extra memory in the buffer, punt on this buffer. * XXX we could handle this in most cases, but we would have to * push the extra memory down to after our max possible cluster * size and then potentially pull it back up if the cluster was * terminated prematurely--too much hassle. */ if (bp->b_bcount != bp->b_bufsize) { ++start_lbn; --len; bawrite(bp); goto redo; } --len; b_save = malloc(sizeof(struct buf *) * len + sizeof(struct cluster_save), M_SEGMENT, M_WAITOK); b_save->bs_bcount = bp->b_bcount; b_save->bs_bufsize = bp->b_bufsize; b_save->bs_nchildren = 0; b_save->bs_children = (struct buf **)(b_save + 1); b_save->bs_saveaddr = bp->b_saveaddr; bp->b_saveaddr = (caddr_t) b_save; bp->b_flags |= B_CALL; bp->b_iodone = cluster_callback; cp = (char *)bp->b_data + size; for (++start_lbn, i = 0; i < len; ++i, ++start_lbn) { /* * Block is not in core or the non-sequential block * ending our cluster was part of the cluster (in which * case we don't want to write it twice). */ if (!incore(vp, start_lbn) || last_bp == NULL && start_lbn == lbn) break; /* * Get the desired block buffer (unless it is the final * sequential block whose buffer was passed in explictly * as last_bp). */ if (last_bp == NULL || start_lbn != lbn) { tbp = getblk(vp, start_lbn, size, 0, 0); if (!(tbp->b_flags & B_DELWRI)) { brelse(tbp); break; } } else tbp = last_bp; ++b_save->bs_nchildren; /* Move memory from children to parent */ if (tbp->b_blkno != (bp->b_blkno + btodb(bp->b_bufsize))) { printf("Clustered Block: %d addr %x bufsize: %d\n", bp->b_lblkno, bp->b_blkno, bp->b_bufsize); printf("Child Block: %d addr: %x\n", tbp->b_lblkno, tbp->b_blkno); panic("Clustered write to wrong blocks"); } pagemove(tbp->b_data, cp, size); bp->b_bcount += size; bp->b_bufsize += size; tbp->b_bufsize -= size; tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); tbp->b_flags |= (B_ASYNC | B_AGE); s = splbio(); reassignbuf(tbp, tbp->b_vp); /* put on clean list */ ++tbp->b_vp->v_numoutput; splx(s); b_save->bs_children[i] = tbp; cp += size; } if (i == 0) { /* None to cluster */ bp->b_saveaddr = b_save->bs_saveaddr; bp->b_flags &= ~B_CALL; bp->b_iodone = NULL; free(b_save, M_SEGMENT); } bawrite(bp); if (i < len) { len -= i + 1; start_lbn += 1; goto redo; }}/* * Collect together all the buffers in a cluster. * Plus add one additional buffer. */struct cluster_save *cluster_collectbufs(vp, last_bp) struct vnode *vp; struct buf *last_bp;{ struct cluster_save *buflist; daddr_t lbn; int i, len; len = vp->v_lastw - vp->v_cstart + 1; buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), M_SEGMENT, M_WAITOK); buflist->bs_nchildren = 0; buflist->bs_children = (struct buf **)(buflist + 1); for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) (void)bread(vp, lbn, last_bp->b_bcount, NOCRED, &buflist->bs_children[i]); buflist->bs_children[i] = last_bp; buflist->bs_nchildren = i + 1; return (buflist);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -