⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 wikiengine.java

📁 JSPWiki,100%Java开发的一套完整WIKI程序
💻 JAVA
📖 第 1 页 / 共 4 页
字号:
        if( !isThere && m_matchEnglishPlurals )        {            if( page.endsWith("s") )            {                page = page.substring( 0, page.length()-1 );            }            else            {                page += "s";            }            isThere = simplePageExists( page );        }        return isThere ? page : null ;    }    /**     *  Just queries the existing pages directly from the page manager.     *  We also check overridden pages from jspwiki.properties     */    private boolean simplePageExists( String page )        throws ProviderException    {        if( getSpecialPageReference(page) != null ) return true;        return m_pageManager.pageExists( page );    }    /**     *  Turns a WikiName into something that can be      *  called through using an URL.     *     *  @since 1.4.1     */    public String encodeName( String pagename )    {        try        {            if( m_useUTF8 )                return TextUtil.urlEncodeUTF8( pagename );            else                return java.net.URLEncoder.encode( pagename, "ISO-8859-1" );        }        catch( UnsupportedEncodingException e )        {            throw new InternalWikiException("ISO-8859-1 not a supported encoding!?!  Your platform is borked.");        }    }    public String decodeName( String pagerequest )    {        try        {            if( m_useUTF8 )                return TextUtil.urlDecodeUTF8( pagerequest );            else                return java.net.URLDecoder.decode( pagerequest, "ISO-8859-1" );        }        catch( UnsupportedEncodingException e )        {            throw new InternalWikiException("ISO-8859-1 not a supported encoding!?!  Your platform is borked.");        }    }    /**     *  Returns the IANA name of the character set encoding we're     *  supposed to be using right now.     *     *  @since 1.5.3     */    public String getContentEncoding()    {        if( m_useUTF8 )             return "UTF-8";        return "ISO-8859-1";    }    /**     *  Returns the un-HTMLized text of the latest version of a page.     *  This method also replaces the &lt; and &amp; -characters with     *  their respective HTML entities, thus making it suitable     *  for inclusion on an HTML page.  If you want to have the     *  page text without any conversions, use getPureText().     *     *  @param page WikiName of the page to fetch.     *  @return WikiText.     */    public String getText( String page )    {        return getText( page, WikiPageProvider.LATEST_VERSION );    }    /**     *  Returns the un-HTMLized text of the given version of a page.     *  This method also replaces the &lt; and &amp; -characters with     *  their respective HTML entities, thus making it suitable     *  for inclusion on an HTML page.  If you want to have the     *  page text without any conversions, use getPureText().     *     *     * @param page WikiName of the page to fetch     * @param version  Version of the page to fetch     * @return WikiText.     */    public String getText( String page, int version )    {        String result = getPureText( page, version );        //        //  Replace ampersand first, or else all quotes and stuff        //  get replaced as well with &quot; etc.        //        /*        result = TextUtil.replaceString( result, "&", "&amp;" );        */        result = TextUtil.replaceEntities( result );        return result;    }    /**     *  Returns the un-HTMLized text of the given version of a page in     *  the given context.  USE THIS METHOD if you don't know what     *  doing.     *  <p>     *  This method also replaces the &lt; and &amp; -characters with     *  their respective HTML entities, thus making it suitable     *  for inclusion on an HTML page.  If you want to have the     *  page text without any conversions, use getPureText().     *     *  @since 1.9.15.     */    public String getText( WikiContext context, WikiPage page )    {        return getText( page.getName(), page.getVersion() );    }    /**     *  Returns the pure text of a page, no conversions.  Use this     *  if you are writing something that depends on the parsing     *  of the page.  Note that you should always check for page     *  existence through pageExists() before attempting to fetch     *  the page contents.     *     *  @param page    The name of the page to fetch.     *  @param version If WikiPageProvider.LATEST_VERSION, then uses the      *  latest version.     *  @return The page contents.  If the page does not exist,     *          returns an empty string.     */    // FIXME: Should throw an exception on unknown page/version?    public String getPureText( String page, int version )    {        String result = null;        try        {            result = m_pageManager.getPageText( page, version );        }        catch( ProviderException e )        {            // FIXME        }        finally        {            if( result == null )                result = "";        }        return result;    }    /**     *  Returns the pure text of a page, no conversions.  Use this     *  if you are writing something that depends on the parsing     *  the page. Note that you should always check for page     *  existence through pageExists() before attempting to fetch     *  the page contents.     *       *  @param page A handle to the WikiPage     *  @return String of WikiText.     *  @since 2.1.13.     */    public String getPureText( WikiPage page )    {        return getPureText( page.getName(), page.getVersion() );    }    /**     *  Returns the converted HTML of the page using a different     *  context than the default context.     */    public String getHTML( WikiContext context, WikiPage page )    {	String pagedata = null;        pagedata = getPureText( page.getName(), page.getVersion() );        String res = textToHTML( context, pagedata );	return res;    }        /**     *  Returns the converted HTML of the page.     *     *  @param page WikiName of the page to convert.     */    public String getHTML( String page )    {        return getHTML( page, WikiPageProvider.LATEST_VERSION );    }    /**     *  Returns the converted HTML of the page's specific version.     *  The version must be a positive integer, otherwise the current     *  version is returned.     *     *  @param pagename WikiName of the page to convert.     *  @param version Version number to fetch     *  @deprecated     */    public String getHTML( String pagename, int version )    {        WikiPage page = new WikiPage( pagename );        page.setVersion( version );        WikiContext context = new WikiContext( this,                                               page );                String res = getHTML( context, page );	return res;    }    /**     *  Converts raw page data to HTML.     *     *  @param pagedata Raw page data to convert to HTML     */    public String textToHTML( WikiContext context, String pagedata )    {        return textToHTML( context, pagedata, null, null );    }    /**     *  Reads a WikiPageful of data from a String and returns all links     *  internal to this Wiki in a Collection.     */    protected Collection scanWikiLinks( WikiPage page, String pagedata )    {        LinkCollector localCollector = new LinkCollector();                textToHTML( new WikiContext(this,page),                    pagedata,                    localCollector,                    null,                    localCollector,                    false );        return localCollector.getLinks();    }    /**     *  Just convert WikiText to HTML.     */    public String textToHTML( WikiContext context,                               String pagedata,                               StringTransmutator localLinkHook,                              StringTransmutator extLinkHook )    {        return textToHTML( context, pagedata, localLinkHook, extLinkHook, null, true );    }    /**     *  Just convert WikiText to HTML.     */    public String textToHTML( WikiContext context,                               String pagedata,                               StringTransmutator localLinkHook,                              StringTransmutator extLinkHook,                              StringTransmutator attLinkHook )    {        return textToHTML( context, pagedata, localLinkHook, extLinkHook, attLinkHook, true );    }    /**     *  Helper method for doing the HTML translation.     */    private String textToHTML( WikiContext context,                                String pagedata,                                StringTransmutator localLinkHook,                               StringTransmutator extLinkHook,                               StringTransmutator attLinkHook,                               boolean            parseAccessRules )    {        String result = "";        if( pagedata == null )         {            log.error("NULL pagedata to textToHTML()");            return null;        }        TranslatorReader in = null;        Collection links = null;        try        {            pagedata = m_filterManager.doPreTranslateFiltering( context, pagedata );            in = new TranslatorReader( context,                                       new StringReader( pagedata ) );            in.addLocalLinkHook( localLinkHook );            in.addExternalLinkHook( extLinkHook );            in.addAttachmentLinkHook( attLinkHook );            if( !parseAccessRules ) in.disableAccessRules();            result = FileUtil.readContents( in );            result = m_filterManager.doPostTranslateFiltering( context, result );        }        catch( IOException e )        {            log.error("Failed to scan page data: ", e);        }        catch( FilterException e )        {            // FIXME: Don't yet know what to do        }        finally        {            try            {                if( in  != null ) in.close();            }            catch( Exception e )             {                log.fatal("Closing failed",e);            }        }        return( result );    }    /**     *  Updates all references for the given page.     */    public void updateReferences( WikiPage page )    {        String pageData = getPureText( page.getName(), WikiProvider.LATEST_VERSION );        m_referenceManager.updateReferences( page.getName(),                                             scanWikiLinks( page, pageData ) );    }    /**     *  Writes the WikiText of a page into the     *  page repository.     *     *  @since 2.1.28     *  @param context The current WikiContext     *  @param text    The Wiki markup for the page.     */    public void saveText( WikiContext context, String text )        throws WikiException    {        WikiPage page = context.getPage();        if( page.getAuthor() == null )        {            UserProfile wup = context.getCurrentUser();            if( wup != null ) page.setAuthor( wup.getName() );        }        text = TextUtil.normalizePostData(text);        text = m_filterManager.doPreSaveFiltering( context, text );        // Hook into cross reference collection.                m_pageManager.putPageText( page, text );        m_filterManager.doPostSaveFiltering( context, text );    }    /**     *  Returns the number of pages in this Wiki     */    public int getPageCount()    {        return m_pageManager.getTotalPageCount();    }    /**     *  Returns the provider name     */    public String getCurrentProvider()    {        return m_pageManager.getProvider().getClass().getName();    }    /**     *  return information about current provider.     *  @since 1.6.4     */    public String getCurrentProviderInfo()    {        return m_pageManager.getProviderDescription();    }    /**     *  Returns a Collection of WikiPages, sorted in time     *  order of last change.     */    // FIXME: Should really get a Date object and do proper comparisons.    //        This is terribly wasteful.    public Collection getRecentChanges()    {        try        {            Collection pages = m_pageManager.getAllPages();            Collection  atts = m_attachmentManager.getAllAttachments();            TreeSet sortedPages = new TreeSet( new PageTimeComparator() );            sortedPages.addAll( pages );            sortedPages.addAll( atts );            return sortedPages;        }        catch( ProviderException e )        {            log.error( "Unable to fetch all pages: ",e);            return null;        }    }    /**     *  Parses an incoming search request, then     *  does a search.     *  <P>     *  Search language is simple: prepend a word     *  with a + to force a word to be included (all files     *  not containing that word are automatically rejected),     *  '-' to cause the rejection of all those files that contain     *  that word.     */    // FIXME: does not support phrase searches yet, but for them    // we need a version which reads the whole page into the memory    // once.    //    // FIXME: Should also have attributes attached.    //    public Collection findPages( String query )    {        StringTokenizer st = new StringTokenizer( query, " \t," );        QueryItem[] items = new QueryItem[st.countTokens()];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -