📄 paged.java
字号:
* writeValue writes the multi-Paged Value starting at the specified * Page. * * @param page The starting Page * @param value The Value to write * @throws IOException if an Exception occurs */ protected final void writeValue(Page page, Value value) throws IOException { if (value == null) { throw new IOException("Can't write a null value"); } InputStream is = value.getInputStream(); // Write as much as we can onto the primary page. PageHeader hdr = page.getPageHeader(); hdr.setRecordLen(value.getLength()); page.streamFrom(is); // Write out the rest of the value onto any needed overflow pages while (is.available() > 0) { Page lpage = page; PageHeader lhdr = hdr; // Find an overflow page to use long np = lhdr.getNextPage(); if (np != NO_PAGE) { // Use an existing page. page = getPage(np); } else { // Create a new overflow page page = getFreePage(); lhdr.setNextPage(page.getPageNum()); } // Mark the page as an overflow page. hdr = page.getPageHeader(); hdr.setStatus(OVERFLOW); // Write some more of the value to the overflow page. page.streamFrom(is); lpage.write(); } // Cleanup any unused overflow pages. i.e. the value is smaller then the // last time it was written. long np = hdr.getNextPage(); if (np != NO_PAGE) { unlinkPages(np); } hdr.setNextPage(NO_PAGE); page.write(); } /** * writeValue writes the multi-Paged Value starting at the specified * page number. * * @param page The starting page number * @param value The Value to write * @throws IOException if an Exception occurs */ protected final void writeValue(long page, Value value) throws IOException { writeValue(getPage(page), value); } /** * unlinkPages unlinks a set of pages starting at the specified Page. * * @param page The starting Page to unlink * @throws IOException if an Exception occurs */ protected final void unlinkPages(Page page) throws IOException { // Handle the page if it's in primary space by setting its status to // DELETED and freeing any overflow pages linked to it. if (page.pageNum < fileHeader.pageCount) { long nextPage = page.header.nextPage; page.header.setStatus(DELETED); page.header.setNextPage(NO_PAGE); page.write(); // See if there are any chained pages from the page that was just removed if (nextPage == NO_PAGE) { page = null; } else { page = getPage(nextPage); } } // Add any overflow pages to the list of free pages. if (page != null) { // Get the first and last page in the chain. long firstPage = page.pageNum; while (page.header.nextPage != NO_PAGE) { page = getPage(page.header.nextPage); } long lastPage = page.pageNum; // Free the chain synchronized (fileHeader) { // If there are already some free pages, add the start of the chain // to the list of free pages. if (fileHeader.lastFreePage != NO_PAGE) { Page p = getPage(fileHeader.lastFreePage); p.header.setNextPage(firstPage); p.write(); } // Otherwise set the chain as the list of free pages. if (fileHeader.firstFreePage == NO_PAGE) { fileHeader.setFirstFreePage(firstPage); } // Add a reference to the end of the chain. fileHeader.setLastFreePage(lastPage); } } } /** * unlinkPages unlinks a set of pages starting at the specified * page number. * * @param pageNum The starting page number to unlink * @throws IOException if an Exception occurs */ protected final void unlinkPages(long pageNum) throws IOException { unlinkPages(getPage(pageNum)); } /** * getFreePage returns the first free Page from secondary storage. * If no Pages are available, the file is grown as appropriate. * * @return The next free Page * @throws IOException if an Exception occurs */ protected final Page getFreePage() throws IOException { Page p = null; // Synchronize read and write to the fileHeader.firstFreePage synchronized (fileHeader) { if (fileHeader.firstFreePage != NO_PAGE) { // Steal a deleted page p = getPage(fileHeader.firstFreePage); fileHeader.setFirstFreePage(p.getPageHeader().nextPage); if (fileHeader.firstFreePage == NO_PAGE) { fileHeader.setLastFreePage(NO_PAGE); } } } if (p == null) { // No deleted pages, grow the file p = getPage(fileHeader.incTotalCount()); } // Initialize The Page Header (Cleanly) p.header.setNextPage(NO_PAGE); p.header.setStatus(UNUSED); return p; } /** * @throws DBException COL_COLLECTION_CLOSED if paged file is closed */ protected final void checkOpened() throws DBException { if (!opened) { throw new FilerException(FaultCodes.COL_COLLECTION_CLOSED, "Filer is closed"); } } /** * getFileHeader returns the FileHeader * * @return The FileHeader */ public FileHeader getFileHeader() { return fileHeader; } public boolean exists() { return file.exists(); } public boolean create() throws DBException { try { createFile(); fileHeader.write(); flush(); return true; } catch (Exception e) { throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error creating " + file.getName(), e); } } private void createFile() throws IOException { RandomAccessFile raf = null; try { raf = getDescriptor(); long o = fileHeader.headerSize + (fileHeader.pageCount + 1) * fileHeader.pageSize - 1; raf.seek(o); raf.write(0); } finally { putDescriptor(raf); } } public boolean open() throws DBException { RandomAccessFile raf = null; try { if (exists()) { raf = getDescriptor(); fileHeader.read(); opened = true; } else { opened = false; } return opened; } catch (Exception e) { throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error opening " + file.getName(), e); } finally { putDescriptor(raf); } } public synchronized boolean close() throws DBException { if (isOpened()) { try { // First of all, mark as closed to prevent operations opened = false; flush(); synchronized (descriptors) { final int total = descriptorsCount; // Close descriptors in cache while (!descriptors.empty()) { closeDescriptor(descriptors.pop()); } // Attempt to close descriptors in use. Max wait time = 0.5s * MAX_DESCRIPTORS int n = descriptorsCount; while (descriptorsCount > 0 && n > 0) { try { descriptors.wait(500); } catch (InterruptedException woken) { Thread.interrupted(); } if (descriptors.isEmpty()) { n--; } else { closeDescriptor(descriptors.pop()); } } if (descriptorsCount > 0) { LOG.fine(descriptorsCount + " out of " + total + " files were not closed during close."); } } } catch (Exception e) { // Failed to close, leave open opened = true; throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error closing " + file.getName(), e); } } return true; } public boolean isOpened() { return opened; } public boolean drop() throws DBException { try { close(); return !exists() || getFile().delete(); } catch (Exception e) { throw new FilerException(FaultCodes.COL_CANNOT_DROP, "Can't drop " + file.getName(), e); } } void addDirty(Page page) throws IOException { synchronized (dirtyLock) { dirty.put(page.pageNum, page); if (dirty.size() > MAX_DIRTY_SIZE) { try { // Too many dirty pages... flush them flush(); } catch (Exception e) { throw new IOException(e.getMessage()); } } } } public void flush() throws DBException { // This method is not synchronized // Error flag/counter int error = 0; // Obtain collection of dirty pages Collection<Page> pages; synchronized (dirtyLock) { pages = dirty.values(); dirty = new HashMap<Long, Page>(); } // Flush dirty pages for (Object page : pages) { Page p = (Page) page; try { p.flush(); } catch (Exception e) { LOG.log(Level.WARNING, "Exception while flushing page", e); error++; } } // Flush header if (fileHeader.dirty) { try { fileHeader.write(); } catch (Exception e) { LOG.log(Level.WARNING, "Exception while flushing file header", e); error++; } } if (error != 0) { throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error performing flush! Failed to flush " + error + " pages!"); } } /** * createFileHeader must be implemented by a Paged implementation * in order to create an appropriate subclass instance of a FileHeader. * * @return a new FileHeader */ public abstract FileHeader createFileHeader(); /** * createFileHeader must be implemented by a Paged implementation * in order to create an appropriate subclass instance of a FileHeader. * * @param read If true, reads the FileHeader from disk * @return a new FileHeader * @throws IOException if an exception occurs */ public abstract FileHeader createFileHeader(boolean read) throws IOException; /** * createFileHeader must be implemented by a Paged implementation * in order to create an appropriate subclass instance of a FileHeader. * * @param pageCount The number of pages to allocate for primary storage * @return a new FileHeader */ public abstract FileHeader createFileHeader(long pageCount); /** * createFileHeader must be implemented by a Paged implementation
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -