⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lucenesearchprovider.java

📁 wiki建站资源 java编写的 很好用
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
    public void finalize()    {        if( m_luceneUpdateThread != null )        {            m_luceneUpdateThread.        }    }    */    /**     *  Waits first for a little while before starting to go through     *  the Lucene "pages that need updating".     */    private void startLuceneUpdateThread()    {        m_luceneUpdateThread = new Thread(new Runnable()        {            public void run()            {                // FIXME: This is a kludge - JSPWiki should somehow report                //        that init phase is complete.                try                {                    Thread.sleep( 60000L );                }                catch( InterruptedException e ) {}                try                {                    doFullLuceneReindex();                                        while( true )                    {                        while( m_updates.size() > 0 )                        {                            Object[] pair = ( Object[] ) m_updates.remove(0);                            WikiPage page = ( WikiPage ) pair[0];                            String text = ( String ) pair[1];                            updateLuceneIndex(page, text);                        }                        try                        {                            Thread.sleep(500);                        }                        catch ( InterruptedException e ) {}                    }                }                catch( Exception e )                {                    log.error("Problem with Lucene indexing - indexing shut down (no searching)",e);                }            }        });        m_luceneUpdateThread.start();    }    private synchronized void updateLuceneIndex( WikiPage page, String text )    {        IndexWriter writer = null;        log.debug("Updating Lucene index for page '" + page.getName() + "'...");        try        {            pageRemoved(page);            // Now add back the new version.            writer = new IndexWriter(m_luceneDirectory, getLuceneAnalyzer(), false);            luceneIndexPage(page, text, writer);            m_updateCount++;            if( m_updateCount >= LUCENE_OPTIMIZE_COUNT )            {                writer.optimize();                m_updateCount = 0;            }        }        catch ( IOException e )        {            log.error("Unable to update page '" + page.getName() + "' from Lucene index", e);        }        catch( Exception e )        {            log.error("Unexpected Lucene exception - please check configuration!",e);        }        finally        {            try            {                if( writer != null ) writer.close();            }            catch( IOException e ) {}        }        log.debug("Done updating Lucene index for page '" + page.getName() + "'.");    }    private Analyzer getLuceneAnalyzer()        throws ClassNotFoundException,               InstantiationException,               IllegalAccessException    {        Class clazz = ClassUtil.findClass( "", m_analyzerClass );        Analyzer analyzer = (Analyzer)clazz.newInstance();        return analyzer;    }    private void luceneIndexPage( WikiPage page, String text, IndexWriter writer )        throws IOException    {        // make a new, empty document        Document doc = new Document();        // Raw name is the keyword we'll use to refer to this document for updates.        doc.add(Field.Keyword(LUCENE_ID, page.getName()));        // Body text is indexed, but not stored in doc. We add in the        // title text as well to make sure it gets considered.        doc.add(Field.Text(LUCENE_PAGE_CONTENTS,                            new StringReader(text + " " +                                            page.getName()+" "+                                            TextUtil.beautifyString(page.getName()))));        // Allow searching by page name        doc.add(Field.Text(LUCENE_PAGE_NAME, page.getName()));        // Allow searching by authorname                if( page.getAuthor() != null )        {            doc.add(Field.Text(LUCENE_AUTHOR, page.getAuthor()));        }        // Now add the names of the attachments of this page        try         {            Collection attachments = m_engine.getAttachmentManager().listAttachments(page);            String attachmentNames = "";                    for( Iterator it = attachments.iterator(); it.hasNext(); )            {                Attachment att = (Attachment) it.next();                attachmentNames += att.getName() + ";";            }            doc.add(Field.Text(LUCENE_ATTACHMENTS, attachmentNames));        }         catch(ProviderException e)         {        	// Unable to read attachments        	log.error("Failed to get attachments for page", e);        }        writer.addDocument(doc);    }    public void pageRemoved( WikiPage page )    {        try        {            // Must first remove existing version of page.            IndexReader reader = IndexReader.open(m_luceneDirectory);            reader.delete(new Term(LUCENE_ID, page.getName()));            reader.close();        }        catch ( IOException e )        {            log.error("Unable to update page '" + page.getName() + "' from Lucene index", e);        }    }    /**     *  Adds a page-text pair to the lucene update queue.  Safe to call always     */    public void reindexPage( WikiPage page )    {        if( page != null )        {            String text;            // TODO: Think if this was better done in the thread itself?            if( page instanceof Attachment )            {                text = getAttachmentContent( (Attachment) page );             }            else            {                text = m_engine.getPureText( page );            }                        if( text != null )            {                // Add work item to m_updates queue.                Object[] pair = new Object[2];                pair[0] = page;                pair[1] = text;                m_updates.add(pair);                log.debug("Scheduling page " + page.getName() + " for index update");            }        }    }    public Collection findPages( String query )        throws ProviderException    {        Searcher  searcher = null;        ArrayList list     = null;                      try        {            QueryParser qp = new QueryParser( LUCENE_PAGE_CONTENTS, getLuceneAnalyzer() );            Query luceneQuery = qp.parse( query );                        try            {                searcher = new IndexSearcher(m_luceneDirectory);            }            catch( Exception ex )            {                log.info("Lucene not yet ready; indexing not started",ex);                return null;            }            Hits hits = searcher.search(luceneQuery);            list = new ArrayList(hits.length());            for ( int curr = 0; curr < hits.length(); curr++ )            {                Document doc = hits.doc(curr);                String pageName = doc.get(LUCENE_ID);                WikiPage page = m_engine.getPage(pageName, WikiPageProvider.LATEST_VERSION);                                if(page != null)                {                	if(page instanceof Attachment)                     {                		// Currently attachments don't look nice on the search-results page                		// When the search-results are cleaned up this can be enabled again.                	}                    int score = (int)(hits.score(curr) * 100);                    SearchResult result = new SearchResultImpl( page, score );                    list.add(result);                }                else                {                    log.error("Lucene found a result page '" + pageName + "' that could not be loaded, removing from Lucene cache");                    pageRemoved(new WikiPage(pageName));                }            }        }        catch( IOException e )        {            log.error("Failed during lucene search",e);        }        catch( InstantiationException e )        {            log.error("Unable to get a Lucene analyzer",e);        }        catch( IllegalAccessException e )        {            log.error("Unable to get a Lucene analyzer",e);        }        catch( ClassNotFoundException e )        {            log.error("Specified Lucene analyzer does not exist",e);        }        catch( ParseException e )        {            log.info("Broken query; cannot parse",e);                        throw new ProviderException("You have entered a query Lucene cannot process: "+e.getMessage());        }        finally        {            if( searcher != null ) try { searcher.close(); } catch( IOException e ) {}        }                return list;    }    public String getProviderInfo()    {        return "LuceneSearchProvider";    }        // FIXME: This class is dumb; needs to have a better implementation    private class SearchResultImpl        implements SearchResult    {        private WikiPage m_page;        private int      m_score;                public SearchResultImpl( WikiPage page, int score )        {            m_page  = page;            m_score = score;        }        public WikiPage getPage()        {            return m_page;        }        /* (non-Javadoc)         * @see com.ecyrd.jspwiki.SearchResult#getScore()         */        public int getScore()        {            return m_score;        }            }        }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -