📄 initmap.c
字号:
xad = &rootpage->xad[XTENTRYSTART]; XADoffset(xad, 0); XADlength(xad, xlen); XADaddress(xad, xaddr); /* update page header of root */ rootpage->header.flag &= ~BT_LEAF; rootpage->header.flag |= BT_INTERNAL; rootpage->header.nextindex = XTENTRYSTART + 1; /* Update nblocks for inode to account for new page */ ip->di_nblocks += xlen; return 0;}/*-------------------------------------------------------------------- * NAME: xtSplitPage * * FUNCTION: Split non-root page of xtree * * PARAMETERS: * ip - Inode of xtree splitting * xpage - page to split * offset - offset of new extent to add * nblocks - number of blocks of new extent to add * blkno - starting block number of new extent to add * dev_ptr - Device handle * aggr_block_size - aggregate block size * * RETURNS: 0 for success; Other indicates failure */static int xtSplitPage(struct dinode *ip, struct xtree_buf *xpage, int64_t offset, int nblocks, int64_t blkno, FILE *dev_ptr, int aggr_block_size){ int rc = 0; int64_t xaddr; /* new right page block number */ xad_t *xad; int xlen; xtpage_t *lastpage, *newpage; int64_t leftbn; /* Allocate disk space for the new xtree page */ xlen = 1 << control_page->dn_l2nbperpage; if ((rc = dbAlloc(dev_ptr, xlen, &xaddr))) return rc; /* * Modify xpage's next entry to point to the new disk space, * write the xpage to disk since we won't be needing it anymore. */ lastpage = xpage->page; lastpage->header.next = xaddr; leftbn = addressPXD(&(lastpage->header.self)); /* swap if on big endian machine */ ujfs_swap_xtpage_t(lastpage); rc = ujfs_rw_diskblocks(dev_ptr, leftbn * aggr_block_size, PSIZE, lastpage, PUT); ujfs_swap_xtpage_t(lastpage); if (rc != 0) return rc; /* * We are now done with the xpage as-is. We can now re-use this buffer * for our new buffer. */ newpage = xpage->page; PXDlength(&(newpage->header.self), xlen); PXDaddress(&(newpage->header.self), xaddr); newpage->header.flag = newpage->header.flag & BT_TYPE; /* initialize sibling pointers of newpage */ newpage->header.next = 0; newpage->header.prev = leftbn; /* insert entry at the first entry of the new right page */ xad = &newpage->xad[XTENTRYSTART]; XADoffset(xad, offset); XADlength(xad, nblocks); XADaddress(xad, blkno); newpage->header.nextindex = XTENTRYSTART + 1; /* Now append new page to parent page */ rc = xtAppend(dev_ptr, ip, offset, xaddr, xlen, xpage->up, aggr_block_size); /* Update inode to account for new page */ ip->di_nblocks += xlen; return rc;}/*-------------------------------------------------------------------- * NAME: xtAppend * * FUNCTION: Append an extent to the specified file * * PARAMETERS: * dev_ptr - Device handle * di - Inode to add extent to * offset - offset of extent to add * blkno - block number of start of extent to add * nblocks - number of blocks in extent to add * xpage - xtree page to add extent to * aggr_block_size - aggregate block size in bytes * * NOTES: xpage points to its parent in the xtree and its rightmost child (if it * has one). It also points to the buffer for the page. * * RETURNS: 0 for success; Other indicates failure */static int xtAppend(FILE *dev_ptr, struct dinode *di, int64_t offset, int64_t blkno, int nblocks, struct xtree_buf *xpage, int aggr_block_size){ int rc = 0; int index; xad_t *xad; xtpage_t *cur_page; cur_page = xpage->page; index = cur_page->header.nextindex; /* insert entry for new extent */ if (index == cur_page->header.maxentry) { /* * There is not room in this page to add the entry; Need to * create a new page */ if (cur_page->header.flag & BT_ROOT) { /* This is the root of the xtree; need to split root */ rc = xtSplitRoot(dev_ptr, di, xpage, offset, nblocks, blkno); } else { /* * Non-root page: add new page at this level, xtSplitPage() * calls xtAppend again to propogate up the new page entry */ rc = xtSplitPage(di, xpage, offset, nblocks, blkno, dev_ptr, aggr_block_size); } } else { /* There is room to add the entry to this page */ xad = &cur_page->xad[index]; XADoffset(xad, offset); XADlength(xad, nblocks); XADaddress(xad, blkno); /* advance next available entry index */ ++cur_page->header.nextindex; rc = 0; } return rc;}/*-------------------------------------------------------------------- * NAME: add_bad_block * * FUNCTION: Add an extent of <thisblk> to the <bb_inode> inode * * PRE CONDITIONS: badblock_pages has been initialized * * PARAMETERS: * dev_ptr - Device handle * thisblk - block number of bad block to add * aggr_block_size - Size of an aggregate block * bb_inode - Inode to add bad block to * * RETURNS: 0 for success; Other indicates failure */static int add_bad_block(FILE *dev_ptr, int64_t thisblk, int aggr_block_size, struct dinode *bb_inode){ int rc = 0; /* Mark block allocated in map */ rc = markit(thisblk, ALLOC | BADBLOCK); if (rc != 0) { return (rc); } /* Add to inode: add an extent for this block to the inode's tree */ rc = xtAppend(dev_ptr, bb_inode, bb_inode->di_size / aggr_block_size, thisblk, 1, badblock_pages, aggr_block_size); if (!rc) { /* append was successful */ bb_inode->di_size += aggr_block_size; bb_inode->di_nblocks++; } return rc;}/*-------------------------------------------------------------------- * NAME: verify_last_blocks * * FUNCTION: Verify blocks in aggregate not initialized * * PARAMETERS: * dev_ptr - Device handle * aggr_block_size - aggregate block size in bytes * bb_inode - Inode for bad blocks * * NOTES: Any bad blocks found will be added to the bad block inode * * RETURNS: 0 for success; Other indicates failure */#define L2MEGABYTE 20#define MEGABYTE (1 << L2MEGABYTE)/* Define a parameter array for messages */#define MAXPARMS 1#define MAXSTR 128static char *msg_parms[MAXPARMS];static char msgstr[MAXSTR];int verify_last_blocks(FILE *dev_ptr, int aggr_block_size, struct dinode *bb_inode){ int rc = 0; int error; void *buffer = NULL; int bufsize = PSIZE << 5; int nbufblks; int64_t nblocks, nb; int64_t blkno, thisblk; int percent, section, index; bool write_inode = false; struct xtree_buf *curpage; long flags; if (badblock_pages == NULL) { /* * Initialize list of xtree append buffers */ badblock_pages = malloc(sizeof (struct xtree_buf)); if (badblock_pages == NULL) { message_user(MSG_OSO_INSUFF_MEMORY, NULL, 0, OSO_MSG); return (ENOMEM); } badblock_pages->down = badblock_pages->up = NULL; badblock_pages->page = (xtpage_t *) & bb_inode->di_btroot; } /* Allocate and clear a buffer */ while ((bufsize >= aggr_block_size) &&#ifdef HAVE_POSIX_MEMALIGN posix_memalign(&buffer, aggr_block_size, bufsize))#else#ifdef HAVE_MEMALIGN (buffer = memalign(aggr_block_size, bufsize)) == NULL)#else (buffer = valloc(bufsize)) == NULL)#endif#endif bufsize >>= 1; if (buffer == NULL) { message_user(MSG_OSO_INSUFF_MEMORY, NULL, 0, OSO_MSG); return (ENOMEM); } memset(buffer, 0, bufsize); nbufblks = bufsize / aggr_block_size;#ifdef O_DIRECT /* * Must do direct-io to avoid the page cache */ flags = fcntl(fileno(dev_ptr), F_GETFL); fcntl(fileno(dev_ptr), F_SETFL, flags | O_DIRECT);#endif /* * Starting from the last allocated block to the end of the aggregate * write the empty buffer to disk. */ blkno = last_allocated + 1; nblocks = control_page->dn_mapsize - blkno; section = MAX(control_page->dn_mapsize >> 7, MEGABYTE / aggr_block_size); for (index = section; nblocks > 0; index += nb) { if (index > section) { percent = blkno * 100 / control_page->dn_mapsize; sprintf(msgstr, "%d", percent); msg_parms[0] = msgstr; message_user(MSG_OSO_PERCENT_FORMAT, msg_parms, 1, OSO_MSG); fprintf(stdout, "\r"); fflush(stdout); index = 0; } nb = MIN(nblocks, nbufblks); error = ujfs_rw_diskblocks(dev_ptr, blkno * aggr_block_size, nb * aggr_block_size, buffer, PUT); /* * most devices don't report an error on write, so we have to * verify explicitly to be sure. */ if (error == 0) { error = ujfs_rw_diskblocks(dev_ptr, blkno * aggr_block_size, nb * aggr_block_size, buffer, GET); } if (error != 0) { /* * At least one of the blocks we just tried to write was * bad. To narrow down the problem, we will write each * block individually and add any bad ones to our bad * block inode. */ for (thisblk = blkno; thisblk < blkno + nb; thisblk++) { error = ujfs_rw_diskblocks(dev_ptr, thisblk * aggr_block_size, aggr_block_size, buffer, PUT); /* * most devices don't report an error on write, * so we have to verify explicitly to be sure. */ if (error == 0) { error = ujfs_rw_diskblocks(dev_ptr, thisblk * aggr_block_size, aggr_block_size, buffer, GET); } if (error != 0) { /* add_bad_block may do unaligned I/O */#ifdef O_DIRECT fcntl(fileno(dev_ptr), F_SETFL, flags);#endif /* Add this block to bad list */ if ((rc = add_bad_block(dev_ptr, thisblk, aggr_block_size, bb_inode))) continue; write_inode = true;#ifdef O_DIRECT fcntl(fileno(dev_ptr), F_SETFL, flags | O_DIRECT);#endif /* * In case we allocated blocks for our * addressing structure after our current * bad block, we need to move our block * number up so we don't overwrite any * changes we have just done. */ thisblk = MAX(last_allocated, thisblk); } } /* * In case we allocated blocks for the bad block map * inode's addressing structure, skip past them so we * don't wipe out our work. */ blkno += nb; if (blkno != thisblk) { blkno = thisblk; nblocks = control_page->dn_mapsize - blkno; } else { nblocks -= nb; } } else { blkno += nb; nblocks -= nb; } }#ifdef O_DIRECT fcntl(fileno(dev_ptr), F_SETFL, flags);#endif msg_parms[0] = "100"; message_user(MSG_OSO_PERCENT_FORMAT, msg_parms, 1, OSO_MSG); fprintf(stdout, "\n"); free(buffer); if (write_inode == true) { /* We added bad blocks, flush pages to disk */ curpage = badblock_pages; while (!(curpage->page->header.flag & BT_ROOT)) { blkno = addressPXD(&(curpage->page->header.self)); /* swap if on big endian machine */ ujfs_swap_xtpage_t(curpage->page); rc = ujfs_rw_diskblocks(dev_ptr, blkno * aggr_block_size, PSIZE, curpage->page, PUT); ujfs_swap_xtpage_t(curpage->page); if (rc != 0) return rc; curpage = curpage->up; } /* Write the bad block inode itself */ rc = ujfs_rwinode(dev_ptr, bb_inode, BADBLOCK_I, PUT, aggr_block_size, AGGREGATE_I, type_jfs); } return rc;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -