⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 paged.java

📁 jxta_src_2.41b jxta 2.41b 最新版源码 from www.jxta.org
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
package net.jxta.impl.xindice.core.filer;/* * The Apache Software License, Version 1.1 * * * Copyright (c) 1999 The Apache Software Foundation.  All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in *    the documentation and/or other materials provided with the *    distribution. * * 3. The end-user documentation included with the redistribution, *    if any, must include the following acknowledgment: *       "This product includes software developed by the *        Apache Software Foundation (http://www.apache.org/)." *    Alternately, this acknowledgment may appear in the software itself, *    if and wherever such third-party acknowledgments normally appear. * * 4. The names "Xindice" and "Apache Software Foundation" must *    not be used to endorse or promote products derived from this *    software without prior written permission. For written *    permission, please contact apache@apache.org. * * 5. Products derived from this software may not be called "Apache", *    nor may "Apache" appear in their name, without prior written *    permission of the Apache Software Foundation. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED.  IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation and was * originally based on software copyright (c) 1999-2001, The dbXML * Group, L.L.C., http://www.dbxmlgroup.com.  For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * * $Id: Paged.java,v 1.10 2006/05/08 20:25:42 hamada Exp $ */import net.jxta.impl.xindice.core.DBException;import net.jxta.impl.xindice.core.FaultCodes;import net.jxta.impl.xindice.core.data.Key;import net.jxta.impl.xindice.core.data.Value;import org.apache.log4j.Level;import org.apache.log4j.Logger;import java.io.ByteArrayInputStream;import java.io.ByteArrayOutputStream;import java.io.DataInput;import java.io.DataInputStream;import java.io.DataOutput;import java.io.DataOutputStream;import java.io.File;import java.io.IOException;import java.io.InputStream;import java.io.OutputStream;import java.io.RandomAccessFile;import java.lang.ref.WeakReference;import java.util.Collection;import java.util.EmptyStackException;import java.util.HashMap;import java.util.Iterator;import java.util.Map;import java.util.Stack;import java.util.WeakHashMap;/** * Paged is a paged file implementation that is foundation for both the * BTree class and the HashFiler. It provides flexible paged I/O and * page caching functionality. * * Page has folowing configuration attributes: * <ul> * <li><strong>pagesize</strong>: Size of the page used by the paged file. *     Default page size is 4096 bytes. This parameter can be set only *     before paged file is created. Once it is created, this parameter *     can not be changed.</li> * <li><strong>maxkeysize</strong>: Maximum allowed size of the key. *     Default maximum key size is 256 bytes.</li> * <li><strong>max-descriptors</strong>: Defines maximum amount of *     simultaneously opened file descriptors this paged file can have. *     Several descriptors are needed to provide multithreaded access *     to the underlying file. Too large number will limit amount of *     collections you can open. Default value is 16 *     (DEFAULT_DESCRIPTORS_MAX).</li> * </ul> * * <br>FIXME: Currently it seems that maxkeysize is not used anywhere. * <br>TODO: Introduce Paged interface, implementations. * */public abstract class Paged {    /**     *  Log4J Logger     **/    private final static Logger LOG = Logger.getLogger(Paged.class.getName());        /**     * The maximum number of pages that will be held in the dirty cache.     * Once number reaches the limit, pages are flushed to disk.     */    private static final int MAX_DIRTY_SIZE = 128;    // The maximum number of open random access files we can have    private static final int DEFAULT_DESCRIPTORS_MAX = 16;    /**     * Unused page status     */    protected static final byte UNUSED = 0;    /**     * Overflow page status     */    protected static final byte OVERFLOW = 126;    /**     * Deleted page status     */    protected static final byte DELETED = 127;    /**     * Page ID of non-existent page     */    protected static final int NO_PAGE = -1;    /**     * flag whether to sync DB on every write or not.     */    protected boolean sync = true;    // TODO: This is not a cache right now, but a way to assure that only one page instance at most exists in memory at all times.    /**     * Cache of recently read pages.     *     * Cache contains weak references to the Page objects, keys are page numbers (Long objects).     * Access synchronized by this map itself.     */    private final Map pages = new WeakHashMap();    /**     * Cache of modified pages waiting to be written out.     * Access is synchronized by the {@link #dirtyLock}.     */    private Map dirty = new HashMap();    /**     * Lock for synchronizing access to the {@link #dirty} map.     */    private final Object dirtyLock = new Object();    /**     * Random access file descriptors cache.     * Access to it and to {@link #descriptorsCount} is synchronized by itself.     */     private final Stack descriptors = new Stack();    /**     * The number of random access file objects that exist, either in the     * cache {@link #descriptors}, or currently in use.     */    private int descriptorsCount;    /**     * The maximum number of random access file objects that can be opened     * by this paged instance.     */    private int descriptorsMax;    /**     * Whether the file is opened or not.     */    private boolean opened;    /**     * The underlying file where the Paged object stores its pages.     */    private File file;    /**     * Header of this Paged     */    private final FileHeader fileHeader;    public Paged() {        descriptorsMax = DEFAULT_DESCRIPTORS_MAX;        fileHeader = createFileHeader();    }    public Paged(File file) {        this();        setFile(file);    }    /**     * setFile sets the file object for this Paged.     *     * @param file The File     */    protected final void setFile(final File file) {        this.file = file;    }    /**     * getFile returns the file object for this Paged.     *     * @return The File     */    protected final File getFile() {        return file;    }    /**     * Obtain RandomAccessFile ('descriptor') object out of the pool.     * If no descriptors available, and maximum amount already allocated,     * the call will block.     */    protected final RandomAccessFile getDescriptor() throws IOException {        synchronized (descriptors) {        // If there are descriptors in the cache return one.        if (!descriptors.empty()) {            return (RandomAccessFile) descriptors.pop();        }        // Otherwise we need to get one some other way.            // First try to create a new one if there's room        if (descriptorsCount < descriptorsMax) {            descriptorsCount++;                return new RandomAccessFile(file, "rw");            }            // Otherwise we have to wait for one to be released by another thread.                while (true) {                    try {                            descriptors.wait();                        return (RandomAccessFile) descriptors.pop();                        } catch (InterruptedException e) {                            // Ignore, and continue to wait                        } catch (EmptyStackException e) {                            // Ignore, and continue to wait                        }                    }                }            }    /**     * Puts a RandomAccessFile ('descriptor') back into the descriptor pool.     */    protected final void putDescriptor(RandomAccessFile raf) {        if (raf != null) {            synchronized (descriptors) {            descriptors.push(raf);                descriptors.notify();            }        }    }    /**     * Closes a RandomAccessFile ('descriptor') and removes it from the pool.     */    protected final void closeDescriptor(RandomAccessFile raf) {        if (raf != null) {            try {                raf.close();            } catch (IOException e) {                // Ignore close exception            }            // Synchronization is necessary as decrement operation is not atomic            synchronized (descriptors) {                descriptorsCount --;            }        }    }    /**     * getPage returns the page specified by pageNum.     *     * @param pageNum The Page number     * @return The requested Page     * @throws IOException if an Exception occurs     */    protected final Page getPage(long pageNum) throws IOException {        final Long lp = new Long(pageNum);        Page p;        synchronized (this) {            // Check if it's in the dirty cache            // No need to synchronize on dirtyLock thanks to atomic assignment            p = (Page) dirty.get(lp);            // if not check if it's already loaded in the page cache            if (p == null) {                WeakReference ref = (WeakReference)pages.get(lp);                if (ref != null) {                    p = (Page) ref.get();                }            }            // if still not found we need to create it and add it to the page cache.            if (p == null) {                p = new Page(lp);                pages.put(p.pageNum, new WeakReference(p));            }        }        // Load the page from disk if necessary                p.read();        return p;    }    /**     * readValue reads the multi-Paged Value starting at the specified     * Page.     *     * @param page The starting Page     * @return The Value     * @throws IOException if an Exception occurs     */    protected final Value readValue(Page page) throws IOException {        final PageHeader sph = page.getPageHeader();        ByteArrayOutputStream bos = new ByteArrayOutputStream(sph.getRecordLen());        // Loop until we've read all the pages into memory.        Page p = page;        while (true) {            PageHeader ph = p.getPageHeader();            // Add the contents of the page onto the stream            p.streamTo(bos);            // Continue following the list of pages until we get to the end.            long nextPage = ph.getNextPage();            if (nextPage == NO_PAGE) {                break;            }            p = getPage(nextPage);        }        // Return a Value with the collected contents of all pages.        return new Value(bos.toByteArray());    }    /**     * readValue reads the multi-Paged Value starting at the specified     * page number.     *     * @param page The starting page number     * @return The Value     * @throws IOException if an Exception occurs     */    protected final Value readValue(long page) throws IOException {        return readValue(getPage(page));    }    /**     * writeValue writes the multi-Paged Value starting at the specified     * Page.     *     * @param page The starting Page     * @param value The Value to write     * @throws IOException if an Exception occurs     */    protected final void writeValue(Page page, Value value) throws IOException {        if (value == null) {            throw new IOException("Can't write a null value");        }        InputStream is = value.getInputStream();        // Write as much as we can onto the primary page.        PageHeader hdr = page.getPageHeader();        hdr.setRecordLen(value.getLength());        page.streamFrom(is);        // Write out the rest of the value onto any needed overflow pages        while (is.available() > 0) {            Page lpage = page;            PageHeader lhdr = hdr;            // Find an overflow page to use            long np = lhdr.getNextPage();            if (np != NO_PAGE) {                // Use an existing page.                page = getPage(np);            } else {                // Create a new overflow page                page = getFreePage();                lhdr.setNextPage(page.getPageNum().longValue());            }            // Mark the page as an overflow page.            hdr = page.getPageHeader();            hdr.setStatus(OVERFLOW);            // Write some more of the value to the overflow page.            page.streamFrom(is);            lpage.write();        }        // Cleanup any unused overflow pages. i.e. the value is smaller then the        // last time it was written.        long np = hdr.getNextPage();        if (np != NO_PAGE) {            unlinkPages(np);        }        hdr.setNextPage(NO_PAGE);        page.write();    }    /**     * writeValue writes the multi-Paged Value starting at the specified     * page number.     *     * @param page The starting page number     * @param value The Value to write     * @throws IOException if an Exception occurs     */    protected final void writeValue(long page, Value value) throws IOException {        writeValue(getPage(page), value);    }    /**     * unlinkPages unlinks a set of pages starting at the specified Page.     *     * @param page The starting Page to unlink     * @throws IOException if an Exception occurs     */    protected final void unlinkPages(Page page) throws IOException {        // Handle the page if it's in primary space by setting its status to        // DELETED and freeing any overflow pages linked to it.        if (page.pageNum.longValue() < fileHeader.pageCount) {            long nextPage = page.header.nextPage;            page.header.setStatus(DELETED);            page.header.setNextPage(NO_PAGE);            page.write();            // See if there are any chained pages from the page that was just removed            if (nextPage == NO_PAGE) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -