⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fileexpirationservice.java

📁 jxme的一些相关程序,主要是手机上程序开发以及手机和计算机通信的一些程序资料,程序编译需要Ant支持
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
            throw problems;
        }
        
        // schedule a periodic action to compress the database.
        pendingCompressAction =
        ss.scheduleAction( new CleanupAction(), COMPRESS_INTERVAL );
/*
        // test action to advance clock faster than normal
        ss.scheduleAction(new TimewarpAction( TimeUtils.ASECOND * 30, TimeUtils.ADAY ), TimeUtils.ASECOND * 30 );
 */
    }
    
    /**
     *  stop the expiration service.
     *
     *  Shut down the expiration service. The current state is maintained in
     *  the persistent log file.
     **/
    void stop() {
        
        ss.cancelAction(pendingCompressAction);
        
        try {
            compress(true);
            
            out.flush(); outFD.sync();
            out.close();
            out = null;
            outFD = null;
        } catch (IOException e) {
            if (LOG.isEnabledFor(Priority.ERROR))
                LOG.error("Compression of log stream failed on stop()", e);
        }
        
        // cancel everything.
        for( Enumeration eachEntry = hash.elements(); eachEntry.hasMoreElements(); ) {
            Entry anEntry = (Entry) eachEntry.nextElement();
            
            synchronized ( anEntry ) {
                ss.cancelAction( anEntry.expiryAction );
                anEntry.expiryAction = null;
                anEntry.file = null;
            }
        }
        
        hash.clear();
    }
    
    /**
     *  open the persistent log file.
     *
     *  @param  file    the file to be opened.
     *  @param  forAppend   log will be opened for append.
     **/
    private void openlog( File file, boolean forAppend ) throws IOException {
        
        FileOutputStream fos = new FileOutputStream( file.getCanonicalPath(), forAppend );
        BufferedOutputStream bos = new BufferedOutputStream( fos );
        
        out = new DataOutputStream( bos );
        outFD = fos.getFD();
    }
    
    /**
     *  Returns the absolute time in milliseconds at which the file will expire.
     *
     *  @param f    the file whose expiration time is desired.
     *
     *  @returns the absolute time in milliseconds at which this document will
     *   expire. -1 is returned if the file is not recognized.
     **/
    public long getPublicationLifetime(File f) {
        Entry entry = null;
        
        try {
            entry = (Entry) hash.get(f.getCanonicalPath());
        } catch ( IOException ignored ) {}
        
        if (entry == null)
            return -1;
        else
            return entry.expiresAt;
    }
    
    /**
     *  Returns the maximum duration in milliseconds for which this document
     *  should cached by those other than the publisher. This value is either
     *  the cache lifetime or the remaining lifetime of the document, whichever
     *  is less.
     *
     *  @param f    the file whose expiration time is desired.
     *
     *  @returns the relative time in milliseconds at which remote peers should
     *  refresh this document. -1 is returned if the file is not known or
     *  already expired.
     **/
    public long getCacheLifetime( File f ) {
        Entry entry = null;
        try {
            entry = (Entry) hash.get(f.getCanonicalPath());
        } catch ( IOException ignored ) {}
        
        // if the entry is not found the return -1
        if( null == entry )
            return -1;
        
        long tillExpires = TimeUtils.toRelativeTimeMillis(entry.expiresAt);
        
        // already expired. ?
        if ( tillExpires <= 0 )
            return -1;
        else
            return Math.min( tillExpires, entry.cacheLifetime );
    }
    
    /**
     *  @param  pubLifetime time after which the document is removed from
     *  the local cahce. Expressed in milliseconds relative to to now.
     *  @param  cacheLifetime  time after which the document is removed from
     *  remote caches. Expressed in relative milliseconds.
     *  @return true if the expiration was updated or false if existing expiration
     *  superceeded this request.
     */
    public boolean scheduleFileExpiration(File file,
    long pubLifetime,
    long cacheLifetime ) throws IOException {
        return scheduleFileExpiration( file, pubLifetime, cacheLifetime, false );
    }
    
    /**
     *  @param  pubLifetime time after which the document is removed from
     *  the local cahce. Expressed in milliseconds relative to to now.
     *  @param  cacheLifetime  time after which the document is removed from
     *  remote caches. Expressed in relative milliseconds.
     *  @return true if the expiration was updated or false if existing expiration
     *  superceeded this request.
     */
    boolean scheduleFileExpiration(File file,
    long pubLifetime,
    long cacheLifetime, boolean forceUpdate ) throws IOException {
        String  filePath = file.getCanonicalPath();
        
        if ( pubLifetime < 0 )
            throw new IllegalArgumentException( "file cannot have negative local expiry" );
        
        if ( cacheLifetime < 0 )
            throw new IllegalArgumentException( "file cannot have negative remote expiry" );
        
        // limit the cachability to be no greater than how long the adv is published for.
        if ( cacheLifetime > pubLifetime )
            cacheLifetime = pubLifetime;
        
        while (true) {
            long expiresAt = TimeUtils.toAbsoluteTimeMillis( pubLifetime );
            
            // check for overflow
            if( expiresAt < pubLifetime )
                expiresAt = Long.MAX_VALUE;
            
            // We must do a collision mechanism here. We must release
            // the synchro top cancel, but we must keep it once we've
            // managed to get rid of it. Everytime we release the synchro
            // the situation may change and we may have to cancel again
            // or discover that the file re-appeared with a longer life-time.
            Entry entry = null;
            synchronized(this) {
                entry = (Entry) hash.get(filePath);
                if (entry == null) {
                    // Bingo we can proceed.
                    
                    entry   = new Entry( file, expiresAt, cacheLifetime);
                    ExpireAction action1 = new ExpireAction(entry);
                    
                    log( SCHEDULE, file, expiresAt, cacheLifetime );
                    
                    // we have to sync on entry to prevent the completion task
                    // from firing before we put the entry into the table.
                    synchronized( entry ) {
                        entry.expiryAction = ss.scheduleAction(action1, pubLifetime );
                        
                        hash.put(filePath, entry);
                    }
                    
                    return true;
                }
                
                if ( !forceUpdate ) {
                    // if the one we already have is later, don't update it.
                    
                    if ( entry.expiresAt > expiresAt ) {
                        //                        LOG.debug( "current entry later for file : " + file.getCanonicalPath() );
                        return false;
                    }
                    
                    // If nothing is changed at all do not bother messing with it.
                    if ( (entry.expiresAt == expiresAt ) &&
                    (entry.cacheLifetime == cacheLifetime) ) {
                        //                        LOG.debug( "current entry matches for file : " + file.getCanonicalPath() );
                        return false;
                    }
                }
                
                hash.remove(filePath);
                
                // We have to cancel and that cannot be done under synch. So
                // drop the lock and cancel. Then try again the whole deal.
                // Do that until we get the right of way.
            }
            // NB: cancelling an action that has already been carried out
            // and removed from the scheduler does not hurt.
            ss.cancelAction(entry.expiryAction);
            entry.expiryAction = null;
        }
    }
    
    // There's no garantee that a new expiration does not get scheduled
    // right after we cancel, though.
    public void cancelFileExpiration(File file) {
        
        Entry e = null;
        synchronized (this) {
            try {
                e = (Entry) hash.remove(file.getCanonicalPath());
            } catch ( IOException ignored ) {}
            
            if (e == null) return;
            
            log(CANCEL, file, 0, 0);
        }
        
        // NB: cancelling an action that has already been carried out
        // and removed from the scheduler does not hurt.
        ss.cancelAction(e.expiryAction);
        e.file = null;
        e.expiryAction = null;
    }
    
    /**
     *  log the expiration status of a file to the persisten log.
     *
     *  XXX 20020429    bondolo@jxta.org    This could be improved with a
     *  scheduler queue if the updates are frequent. When called with a file
     *  a hash is checked for an existing entry, if none a new entry is created
     *  and event is scheduled for a few seconds later. If the entry is found,
     *  the parameters of the entry are updated, but the event is not changed.
     *  This policy has the effect of the aggregating updates yet still having
     *  a log for persistence through failures.
     *
     * @param  expiresAt time at which the document is removed from
     * the local cahce. Expressed in absolute milliseconds.
     * @param  cacheLifetime  time after which the document is removed from
     * remote caches. Expressed in relative milliseconds.
     **/
    void log( int action, File file, long expiresAt, long cacheLifetime ) {
        try {
            logWrite(action, file, expiresAt, cacheLifetime);
        } catch (IOException e) {
            try {
                compress( true );
                
                logWrite(action, file, expiresAt, cacheLifetime);
            } catch (IOException e2) {
                // what can we do but log it?
                // while the code is running, we'll keep
                // the expiration tables in memory.
                if (LOG.isEnabledFor(Priority.WARN))
                    LOG.warn("Cannot write to " + db + ", keeping expiration tables in memory.");
            }
        }
    }
    
    /**
     *  Writes an entry to the log file.
     *
     *  @param  action  whats being done to the document.
     *  @param  file    the document
     *  @param  expiresAt time after which the document is removed from
     *  the local cahce. Expressed in absolute milliseconds.
     *  @param  cacheLifetime  duration this document should be cached expressed
     *  in milliseconds
     **/
    private void logWrite( int action, File file, long expiresAt, long cacheLifetime )
    throws IOException {
        
        switch( action ) {
            case SCHEDULE :
/*
                if ( (TimeUtils.timeNow() - 10) > expiresAt ) {
                    LOG.debug( "why are we logging this?" );
                }
 
                if (LOG.isEnabledFor(Priority.DEBUG))
                    LOG.debug( "+ " + TimeUtils.toRelativeTimeMillis(expiresAt) + "  " + file.getCanonicalPath() );
*/
                break;
                
            case CANCEL :
/*
                if (LOG.isEnabledFor(Priority.DEBUG))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -