📄 log_map.c
字号:
int16_t iagfree, numinos, agno; struct iag *iagp; struct iag *iag_pg; int32_t next_imap_page = 1; struct iag_data *iag_datarec; int32_t maplen, inoext_arrlen; if (vopen[vol].status == FM_LOGREDO) { fsck_send_msg(lrdo_WRIMPNOTRBLDGIMAP); return (NOTREBUILDING_IMAP); } fsck_send_msg(lrdo_WRIMPSTART); imap_ctl = fsimap.fsimapctrl; iagp = fsimap.iag_pbuf; iag_pg = fsimap.iag_pbuf2; maplen = sizeof (uint32_t) * LPERDMAP; inoext_arrlen = sizeof (pxd_t) * EXTSPERIAG; npages = __le64_to_cpu(dp->di_size) >> L2PSIZE; /* the first page is imap control page, so the number of * iag pages is one less of npages. */ iagpages = npages - 1; /* initialize the struct dinomap page */ imap_ctl->in_freeiag = -1; /* iag.iagnum is zero origin. They are in order in the * imap file. So in_nextiag should be the last iag page * plus one. The last iag.iagnum is (npages - 2). */ imap_ctl->in_nextiag = iagpages; imap_ctl->in_numinos = 0; imap_ctl->in_numfree = 0; /* init imap_ctl->in_agctl[]. Although the aggregate * has only vopen[vol].numag, since the structure * has defined MAXAG, the initializarion will do * against MAXAG */ for (k = 0; k < MAXAG; k++) { imap_ctl->in_agctl[k].inofree = -1; imap_ctl->in_agctl[k].extfree = -1; imap_ctl->in_agctl[k].numinos = 0; imap_ctl->in_agctl[k].numfree = 0; } /* process each iag page of the map. * rebuild AG Free Inode List, AG Free Inode Extent List, * and IAG Free List from scratch */ for (k = 0; k < iagpages; k++) { /* * read in the IAG */ if (fsimap.imap_pagenum != next_imap_page) { rc = iMapRead(vol, next_imap_page, iagp); if (rc != 0) { return (rc); } ujfs_swap_iag(iagp); } fsimap.imap_pagenum = next_imap_page; /* * if the bit maps and inoext arrary for this iag are * in memory, copy the pmap and inoext into the page */ iag_datarec = fsimap.imap_wsp[next_imap_page].imap_data; if (iag_datarec != NULL) { memcpy((void *) &(iagp->pmap), (void *) &(iag_datarec->pmap), maplen); memcpy((void *) &(iagp->inoext), (void *) &(iag_datarec->inoext), inoext_arrlen); } next_imap_page++; iagfree = 0; agno = BLKNOTOAG(iagp->agstart, vopen[vol].l2agsize); updateImapPage(vol, iagp, &numinos, &iagfree); if (iagfree) { /* all inodes are free, then this iag should * be inserted into iag free list. */ iagp->inofreefwd = iagp->inofreeback = -1; iagp->extfreefwd = iagp->extfreeback = -1; iagp->iagfree = imap_ctl->in_freeiag; imap_ctl->in_freeiag = iagp->iagnum; } else if (iagp->nfreeinos > 0) { if ((next_iag = imap_ctl->in_agctl[agno].inofree) == -1) iagp->inofreefwd = iagp->inofreeback = -1; else { /* * read in the IAG */ if (fsimap.imap_pagenum2 != (next_iag + 1)) { rc = iMapRead(vol, (next_iag + 1), iag_pg); if (rc != 0) { return (rc); } fsimap.imap_pagenum2 = next_iag + 1; } iagp->inofreefwd = next_iag; iag_pg->inofreeback = __cpu_to_le32(iagp->iagnum); iagp->inofreeback = -1; /* * write out the IAG */ rc = iMapWrite(vol, fsimap.imap_pagenum2, iag_pg); if (rc != 0) { return (rc); } } imap_ctl->in_agctl[agno].inofree = iagp->iagnum; imap_ctl->in_agctl[agno].numfree += iagp->nfreeinos; imap_ctl->in_numfree += iagp->nfreeinos; } if (numinos) { imap_ctl->in_agctl[agno].numinos += numinos; imap_ctl->in_numinos += numinos; } if (iagp->nfreeexts > 0 && !iagfree) { /* When an IAG is on the IAG free list, its nfreeexts * is EXTSPERIAG which is > 0. But here we only consider * those IAGs that are not on the IAG free list */ if ((next_iag = imap_ctl->in_agctl[agno].extfree) == -1) iagp->extfreefwd = iagp->extfreeback = -1; else { /* * read in the IAG */ if (fsimap.imap_pagenum2 != (next_iag + 1)) { rc = iMapRead(vol, (next_iag + 1), iag_pg); if (rc != 0) { return (rc); } fsimap.imap_pagenum2 = next_iag + 1; } iagp->extfreefwd = next_iag; iag_pg->extfreeback = __cpu_to_le32(iagp->iagnum); iagp->extfreeback = -1; /* * write out the IAG */ rc = iMapWrite(vol, fsimap.imap_pagenum2, iag_pg); if (rc != 0) { return (rc); } } imap_ctl->in_agctl[agno].extfree = iagp->iagnum; } /* * write out the IAG */ ujfs_swap_iag(iagp); rc = iMapWrite(vol, fsimap.imap_pagenum, iagp); if (rc != 0) { return (rc); } } /* * And now, write the control page to the device */ ujfs_swap_dinomap(fsimap.fsimapctrl); rc = iMapWrite(vol, 0, fsimap.fsimapctrl); if (rc != 0) { return (rc); } fsck_send_msg(lrdo_WRIMPDONE); return (0);}/* * NAME: updateImapPage() * * FUNCTION: copies the pmap to the wmap in each iag since pmap is * updated at the logredo process. Now we need to * reconstruct the nfreeinos and nfreeexts fields in iag. */int updateImapPage(int32_t vol, /* index in vopen array */ struct iag *p, /* pointer to the current iag page */ int16_t * numinos, /* no. of backed inodes for this iag */ int16_t * iagfree){ /* set on return if all inodes free */ int rc = 0; uint i, sword, mask; uint16_t allfree; uint8_t *cp; /* copy the perm map to the work map. */ p->nfreeinos = 0; p->nfreeexts = 0; allfree = 0; *numinos = 0; for (i = 0; i < EXTSPERIAG; i++) { p->wmap[i] = p->pmap[i]; sword = i >> L2EXTSPERSUM; mask = UZBIT_32 >> (i & (EXTSPERSUM - 1)); if (p->pmap[i] == 0) { /* There can be the cases that p->pmap[i == 0 but * addressPXD(&p->inoext[i]) != 0. * This could happen that the log sync point has passed * the lastinode free log rec for this ino extent, but * we have not reach the hwm so that no NOREDOPAGE * log rec is written out yet before the system crash. * At the logredo time, we have to null out the * address of p->inoext[i] if p->pmap[i] is zero. */ if (addressPXD(&p->inoext[i]) != 0) { rc = markBmap((struct dmap *) vopen[vol]. bmap_ctl, p->inoext[i], 0, vol); if (rc != 0) { return (rc); } PXDaddress(&p->inoext[i], 0); } p->extsmap[sword] &= ~mask; p->inosmap[sword] |= mask; p->nfreeexts++; allfree++; } else if (p->pmap[i] == ONES) { if (addressPXD(&p->inoext[i]) != 0) { p->inosmap[sword] |= mask; p->extsmap[sword] |= mask; *numinos += INOSPEREXT; } else fsck_send_msg(lrdo_RBLDGIMAPERROR2); } else if (~p->pmap[i] && (addressPXD(&p->inoext[i]) != 0)) { /* there is some bits are zeroes */ p->extsmap[sword] |= mask; p->inosmap[sword] &= ~mask; *numinos += INOSPEREXT; cp = (uint8_t *) & p->pmap[i]; p->nfreeinos += (maptab[*cp] + maptab[*(cp + 1)] + maptab[*(cp + 2)] + maptab[*(cp + 3)]); } else fsck_send_msg(lrdo_RBLDGIMAPERROR1); } if (allfree == EXTSPERIAG) *iagfree = 1; return (0);}/* * NAME: writeBmap() * * FUNCTION: copy pmap to wmap in dmap pages, * rebuild summary tree of dmap and dmap control pages, and * rebuild bmap control page. */int writeBmap(int32_t vol, /* index in vopen array */ struct dbmap *bmap, /* pointer to the bmap control page */ struct dinode *dip){ /* disk inode of map */ int rc; int32_t i, j, k, n; int64_t fssize, h_fssize, nblocks; int32_t npages; char *p; struct dmapctl *l2ptr; struct dmapctl *l1ptr; struct dmapctl *l0ptr; struct dmap *dmap; int8_t *l0leaf, *l1leaf, *l2leaf; int32_t agno, l2agsize; int32_t actags, inactags, l2nl; int64_t ag_rem, actfree, inactfree, avgfree; int32_t next_bmap_page = 1; struct dmap_bitmaps *dmap_bitrec; int32_t bitmaplen; if (vopen[vol].status == FM_LOGREDO) { fsck_send_msg(lrdo_WRBMPNOTRBLDGBMAP); return (NOTREBUILDING_BMAP); } fsck_send_msg(lrdo_WRBMPSTART); /* * set the pointers to the corresponding page buffers */ l2ptr = vopen[vol].L2_pbuf; l1ptr = vopen[vol].L1_pbuf; l0ptr = vopen[vol].L0_pbuf; dmap = vopen[vol].dmap_pbuf; bitmaplen = sizeof (uint32_t) * LPERDMAP; /* * validate file system size and bmap file size * * Since the di_size includes the mkfs hidden dmap page * and its related control pages, when calculate the * l_totalpages we pretend fs size is fssize plus BPERDMAP. * The macro give the page index # (zero origin ) * so the (+ 1) gives the total pages. */ h_fssize = vopen[vol].fssize + BPERDMAP; npages = BLKTODMAPN(h_fssize - 1) + 1; if (npages > (__le64_to_cpu(dip->di_size) >> L2PSIZE)) { fsck_send_msg(lrdo_WRBMPBADMAPSIZE); return (BMAP_WRITEERROR1); } /* * reconstruct bmap extended information from bit map */ fssize = vopen[vol].fssize; /* * initialize bmap control page. * * all the data in bmap control page should exclude * the mkfs hidden dmap page. */ bmap->dn_mapsize = fssize; bmap->dn_maxlevel = BMAPSZTOLEV(bmap->dn_mapsize); bmap->dn_nfree = 0; bmap->dn_agl2size = vopen[vol].l2agsize; l2agsize = bmap->dn_agl2size; bmap->dn_agsize = vopen[vol].agsize; bmap->dn_numag = vopen[vol].numag; for (agno = 0; agno < bmap->dn_numag; agno++) bmap->dn_agfree[agno] = 0; /* * reconstruct summary tree and control information * in struct dmap pages and dmapctl pages */ nblocks = fssize; p = (char *) bmap + sizeof (struct dbmap); if (vopen[vol].L2_pagenum != next_bmap_page) { rc = bMapRead(vol, next_bmap_page, l2ptr); if (rc != 0) { return (rc); } ujfs_swap_dmapctl(l2ptr); } vopen[vol].L2_pagenum = next_bmap_page; next_bmap_page++; l2leaf = l2ptr->stree + CTLLEAFIND; /* reconstruct each L1 in L2 */ p += SIZEOFDMAPCTL; /* the L1.0 */ for (k = 0; k < LPERCTL; k++) { if (vopen[vol].L1_pagenum != next_bmap_page) { rc = bMapRead(vol, next_bmap_page, l1ptr); if (rc != 0) { return (rc); } ujfs_swap_dmapctl(l1ptr); } vopen[vol].L1_pagenum = next_bmap_page; next_bmap_page++; l1leaf = l1ptr->stree + CTLLEAFIND; /* reconstruct each L0 in L1 */ p += SIZEOFDMAPCTL; /* 1st L0 of L1.k */ for (j = 0; j < LPERCTL; j++) { if (vopen[vol].L0_pagenum != next_bmap_page) { rc = bMapRead(vol, next_bmap_page, l0ptr); if (rc != 0) { return (rc); } ujfs_swap_dmapctl(l0ptr); } vopen[vol].L0_pagenum = next_bmap_page; next_bmap_page++; if (l0ptr->leafidx != CTLLEAFIND) { fsck_send_msg(lrdo_WRBMPBADLFIDX0, k, j, l0ptr->leafidx); return ILLEGAL_LEAF_IND0; } l0leaf = l0ptr->stree + l0ptr->leafidx; /* * reconstruct each dmap in L0 */ for (i = 0; i < LPERCTL; i++) { /* * read in the dmap page */ if (vopen[vol].dmap_pagenum != next_bmap_page) { rc = bMapRead(vol, next_bmap_page, dmap); if (rc != 0) { return (rc); } ujfs_swap_dmap(dmap); } vopen[vol].dmap_pagenum = next_bmap_page; /* * if the bit maps for this dmap page are * in memory, copy the pmap into the page */ dmap_bitrec = vopen[vol].bmap_wsp[next_bmap_page].dmap_bitmaps; if (dmap_bitrec != NULL) { memcpy((void *) &(dmap->pmap), (void *) &(dmap_bitrec->pmap), bitmaplen); } next_bmap_page++; /* * reconstruct the dmap page, and * initialize corresponding parent L0 leaf */ n = MIN(nblocks, BPERDMAP); rc = updDmapPage(dmap, n, l0leaf); if (rc != 0) { fsck_send_msg(lrdo_RBLDGDMAPERROR, k, j, i); return (DMAP_UPDATEFAIL); } bmap->dn_nfree += dmap->nfree; agno = dmap->start >> l2agsize; bmap->dn_agfree[agno] += dmap->nfree; l0leaf++; /* * write out the dmap page */ ujfs_swap_dmap(dmap); rc = bMapWrite(vol, vopen[vol].dmap_pagenum, dmap); if (rc != 0) { return (rc); } vopen[vol].dmap_pagenum = -1; nblocks -= n; if (nblocks == 0) break; } /* for each dmap in a L0 */ /* * build current L0 page from its leaves, and * initialize corresponding parent L1 leaf */ *l1leaf = adjTree(l0ptr, L2LPERCTL, L2BPERDMAP); /* * write out the L0 page */ ujfs_swap_dmapctl(l0ptr); rc = bMapWrite(vol, vopen[vol].L0_pagenum, l0ptr); if (rc != 0) { return (rc); } vopen[vol].L0_pagenum = -1; if (nblocks) l1leaf++; /* continue for next L0 */ else { /* more than 1 L0 ? */ if (j > 0) break; /* build L1 page */ else { /* initialize global bmap page */ bmap->dn_maxfreebud = *l1leaf; goto finalize; } } } /* for each L0 in a L1 */ /* * build current L1 page from its leaves, and * initialize corresponding parent L2 leaf */ *l2leaf = adjTree(l1ptr, L2LPERCTL, L2MAXL0SIZE); /* * write out the L1 page to disk */ ujfs_swap_dmapctl(l1ptr); rc = bMapWrite(vol, vopen[vol].L1_pagenum, l1ptr); if (rc != 0) { return (rc); } vopen[vol].L1_pagenum = -1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -