📄 paged.java
字号:
/* * The Apache Software License, Version 1.1 * * * Copyright (c) 1999 The Apache Software Foundation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The end-user documentation included with the redistribution, * if any, must include the following acknowledgment: * "This product includes software developed by the * Apache Software Foundation (http://www.apache.org/)." * Alternately, this acknowledgment may appear in the software itself, * if and wherever such third-party acknowledgments normally appear. * * 4. The names "Xindice" and "Apache Software Foundation" must * not be used to endorse or promote products derived from this * software without prior written permission. For written * permission, please contact apache@apache.org. * * 5. Products derived from this software may not be called "Apache", * nor may "Apache" appear in their name, without prior written * permission of the Apache Software Foundation. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation and was * originally based on software copyright (c) 1999-2001, The dbXML * Group, L.L.C., http://www.dbxmlgroup.com. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * */package net.jxta.impl.xindice.core.filer;import net.jxta.impl.xindice.core.DBException;import net.jxta.impl.xindice.core.FaultCodes;import net.jxta.impl.xindice.core.data.Key;import net.jxta.impl.xindice.core.data.Value;import java.io.ByteArrayInputStream;import java.io.ByteArrayOutputStream;import java.io.DataInputStream;import java.io.DataOutputStream;import java.io.File;import java.io.IOException;import java.io.InputStream;import java.io.OutputStream;import java.io.RandomAccessFile;import java.lang.ref.WeakReference;import java.util.Collection;import java.util.EmptyStackException;import java.util.HashMap;import java.util.Map;import java.util.Stack;import java.util.WeakHashMap;import java.util.logging.Level;import java.util.logging.Logger;/** * Paged is a paged file implementation that is foundation for both the * BTree class and the HashFiler. It provides flexible paged I/O and * page caching functionality. * <p/> * Page has folowing configuration attributes: * <ul> * <li><strong>pagesize</strong>: Size of the page used by the paged file. * Default page size is 4096 bytes. This parameter can be set only * before paged file is created. Once it is created, this parameter * can not be changed.</li> * <li><strong>maxkeysize</strong>: Maximum allowed size of the key. * Default maximum key size is 256 bytes.</li> * <li><strong>max-descriptors</strong>: Defines maximum amount of * simultaneously opened file descriptors this paged file can have. * Several descriptors are needed to provide multithreaded access * to the underlying file. Too large number will limit amount of * collections you can open. Default value is 16 * (DEFAULT_DESCRIPTORS_MAX).</li> * </ul> * <p/> * <br>FIXME: Currently it seems that maxkeysize is not used anywhere. * <br>TODO: Introduce Paged interface, implementations. */public abstract class Paged { /** * Logger */ private final static Logger LOG = Logger.getLogger(Paged.class.getName()); /** * The maximum number of pages that will be held in the dirty cache. * Once number reaches the limit, pages are flushed to disk. */ private static final int MAX_DIRTY_SIZE = 128; // The maximum number of open random access files we can have private static final int DEFAULT_DESCRIPTORS_MAX = 16; /** * Unused page status */ protected static final byte UNUSED = 0; /** * Overflow page status */ protected static final byte OVERFLOW = 126; /** * Deleted page status */ protected static final byte DELETED = 127; /** * Page ID of non-existent page */ protected static final int NO_PAGE = -1; /** * flag whether to sync DB on every write or not. */ protected boolean sync = true; // TODO: This is not a cache right now, but a way to assure that only one page instance at most exists in memory at all times. /** * Cache of recently read pages. * <p/> * Cache contains weak references to the Page objects, keys are page numbers (Long objects). * Access synchronized by this map itself. */ private final Map<Long, WeakReference<Page>> pages = new WeakHashMap<Long, WeakReference<Page>>(); /** * Cache of modified pages waiting to be written out. * Access is synchronized by the {@link #dirtyLock}. */ private Map<Long, Page> dirty = new HashMap<Long, Page>(); /** * Lock for synchronizing access to the {@link #dirty} map. */ private final Object dirtyLock = new Object(); /** * Random access file descriptors cache. * Access to it and to {@link #descriptorsCount} is synchronized by itself. */ private final Stack<RandomAccessFile> descriptors = new Stack<RandomAccessFile>(); /** * The number of random access file objects that exist, either in the * cache {@link #descriptors}, or currently in use. */ private int descriptorsCount; /** * The maximum number of random access file objects that can be opened * by this paged instance. */ private int descriptorsMax; /** * Whether the file is opened or not. */ private boolean opened; /** * The underlying file where the Paged object stores its pages. */ private File file; /** * Header of this Paged */ private final FileHeader fileHeader; public Paged() { descriptorsMax = DEFAULT_DESCRIPTORS_MAX; fileHeader = createFileHeader(); } public Paged(File file) { this(); setFile(file); } /** * setFile sets the file object for this Paged. * * @param file The File */ protected final void setFile(final File file) { this.file = file; } /** * getFile returns the file object for this Paged. * * @return The File */ protected final File getFile() { return file; } /** * Obtain RandomAccessFile ('descriptor') object out of the pool. * If no descriptors available, and maximum amount already allocated, * the call will block. * @return the file * @throws java.io.IOException if an io error occurs */ protected final RandomAccessFile getDescriptor() throws IOException { synchronized (descriptors) { // If there are descriptors in the cache return one. if (!descriptors.empty()) { return descriptors.pop(); } // Otherwise we need to get one some other way. // First try to create a new one if there's room if (descriptorsCount < descriptorsMax) { descriptorsCount++; return new RandomAccessFile(file, "rw"); } // Otherwise we have to wait for one to be released by another thread. while (true) { try { descriptors.wait(); return descriptors.pop(); } catch (InterruptedException e) {// Ignore, and continue to wait } catch (EmptyStackException e) {// Ignore, and continue to wait } } } } /** * Puts a RandomAccessFile ('descriptor') back into the descriptor pool. * @param raf the file to add */ protected final void putDescriptor(RandomAccessFile raf) { if (raf != null) { synchronized (descriptors) { descriptors.push(raf); descriptors.notify(); } } } /** * Closes a RandomAccessFile ('descriptor') and removes it from the pool. * @param raf the file to close */ protected final void closeDescriptor(RandomAccessFile raf) { if (raf != null) { try { raf.close(); } catch (IOException e) {// Ignore close exception } // Synchronization is necessary as decrement operation is not atomic synchronized (descriptors) { descriptorsCount--; } } } /** * getPage returns the page specified by pageNum. * * @param pageNum The Page number * @return The requested Page * @throws IOException if an Exception occurs */ protected final Page getPage(long pageNum) throws IOException { final Long lp = pageNum; Page page; synchronized (this) { // Check if it's in the dirty cache // No need to synchronize on dirtyLock thanks to atomic assignment page = dirty.get(lp); // if not check if it's already loaded in the page cache if (page == null) { WeakReference<Page> ref = pages.get(lp); if (ref != null) { page = ref.get(); } } // if still not found we need to create it and add it to the page cache. if (page == null) { page = new Page(lp); pages.put(page.pageNum, new WeakReference<Page>(page)); } } // Load the page from disk if necessary page.read(); return page; } /** * readValue reads the multi-Paged Value starting at the specified * Page. * * @param page The starting Page * @return The Value * @throws IOException if an Exception occurs */ protected final Value readValue(Page page) throws IOException { final PageHeader sph = page.getPageHeader(); ByteArrayOutputStream bos = new ByteArrayOutputStream(sph.getRecordLen()); // Loop until we've read all the pages into memory. Page p = page; while (true) { PageHeader ph = p.getPageHeader(); // Add the contents of the page onto the stream p.streamTo(bos); // Continue following the list of pages until we get to the end. long nextPage = ph.getNextPage(); if (nextPage == NO_PAGE) { break; } p = getPage(nextPage); } // Return a Value with the collected contents of all pages. return new Value(bos.toByteArray()); } /** * readValue reads the multi-Paged Value starting at the specified * page number. * * @param page The starting page number * @return The Value * @throws IOException if an Exception occurs */ protected final Value readValue(long page) throws IOException { return readValue(getPage(page)); } /**
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -