⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 webdbinjector.java

📁 爬虫数据的改进,并修正了一些bug
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/* Copyright (c) 2003 The Nutch Organization.  All rights reserved.   */
/* Use subject to the conditions in http://www.nutch.org/LICENSE.txt. */

package net.nutch.db;

import java.io.*;
import java.net.*;
import java.util.*;
import java.util.logging.*;
import java.net.MalformedURLException;
import java.util.regex.*;

import javax.xml.parsers.*;
import org.xml.sax.*;
import org.xml.sax.helpers.*;
import org.apache.xerces.util.XMLChar;

import net.nutch.io.*;
import net.nutch.fs.*;
import net.nutch.net.*;
import net.nutch.util.*;
import net.nutch.pagedb.*;
import net.nutch.linkdb.*;
import net.nutch.util.NutchConf;

/*********************************************
 * This class takes a flat file of URLs and adds
 * them as entries into a pagedb.  Useful for 
 * bootstrapping the system.
 *
 * @author Mike Cafarella
 * @author Doug Cutting
 *********************************************/
public class WebDBInjector {
    private static final String DMOZ_PAGENAME = "http://www.dmoz.org/";

    private static final byte DEFAULT_INTERVAL =
      (byte)NutchConf.getInt("db.default.fetch.interval", 30);

    private static final float NEW_INJECTED_PAGE_SCORE =
      NutchConf.getFloat("db.score.injected", 2.0f);

    public static final Logger LOG = LogFormatter.getLogger("net.nutch.db.WebDBInjector");

    /**
     * This filter fixes characters that might offend our parser.
     * This lets us be tolerant of errors that might appear in the input XML.
     */
    private static class XMLCharFilter extends FilterReader {
      private boolean lastBad = false;

      public XMLCharFilter(Reader reader) {
        super(reader);
      }

      public int read() throws IOException {
        int c = in.read();
        int value = c;
        if (c != -1 && !(XMLChar.isValid(c)))     // fix invalid characters
          value = 'X';
        else if (lastBad && c == '<') {           // fix mis-matched brackets
          in.mark(1);
          if (in.read() != '/')
            value = 'X';
          in.reset();
        }
        lastBad = (c == 65533);

        return value;
      }

      public int read(char[] cbuf, int off, int len)
        throws IOException {
        int n = in.read(cbuf, off, len);
        if (n != -1) {
          for (int i = 0; i < n; i++) {
            char c = cbuf[off+i];
            char value = c;
            if (!(XMLChar.isValid(c)))            // fix invalid characters
              value = 'X';
            else if (lastBad && c == '<') {       // fix mis-matched brackets
              if (i != n-1 && cbuf[off+i+1] != '/')
                value = 'X';
            }
            lastBad = (c == 65533);
            cbuf[off+i] = value;
          }
        }
        return n;
      }
    }


    /**
     * The RDFProcessor receives tag messages during a parse
     * of RDF XML data.  We build whatever structures we need
     * from these messages.
     */
    class RDFProcessor extends DefaultHandler {
        String curURL = null, curSection = null;
        boolean titlePending = false, descPending = false, insideAdultSection = false;
        Pattern topicPattern = null; 
        StringBuffer title = new StringBuffer(), desc = new StringBuffer();
        XMLReader reader;
        int subsetDenom;
        int hashSkew;
        boolean includeAdult, includeDmozDesc;
        MD5Hash srcDmozID;
        long srcDmozDomainID;
        Locator location;

        /**
         * Pass in an XMLReader, plus a flag as to whether we 
         * should include adult material.
         */
        public RDFProcessor(XMLReader reader, int subsetDenom, boolean includeAdult, boolean includeDmozDesc, int skew, Pattern topicPattern) throws IOException {
            this.reader = reader;
            this.subsetDenom = subsetDenom;
            this.includeAdult = includeAdult;
            this.includeDmozDesc = includeDmozDesc;
            this.topicPattern = topicPattern;

            // We create a Page entry for the "Dmoz" page, from
            // which all descriptive links originate.  The name
            // of this page is always the same, stored in 
            // DMOZ_PAGENAME.  The MD5 is generated over the current
            // timestamp.  Until this page is deleted, the descriptive
            // links will always be kept.
            //
            // If the DMOZ page is updated with new content, you 
            // *could* update these links, if you really wanted to.
            // Just run inject again!  This will replace the old
            // Dmoz Page, because we always keep the same name.
            // That obsolete Page will be deleted, and all its 
            // outlinks (the descriptive ones) garbage-collected.
            // 
            // Then we just proceed to add the new descriptive 
            // links, with the brand-new page's src MD5.
            //
            this.srcDmozID = MD5Hash.digest(DMOZ_PAGENAME + "_" + nextFetch);
            Page dmozPage = new Page(DMOZ_PAGENAME, srcDmozID);
            dmozPage.setNextFetchTime(Long.MAX_VALUE);
            dbWriter.addPageIfNotPresent(dmozPage);

            this.srcDmozDomainID = MD5Hash.digest(new URL(DMOZ_PAGENAME).getHost()).halfDigest();

            this.hashSkew = skew != 0 ? skew : new Random().nextInt();
        }

        //
        // Interface ContentHandler
        //

        /**
         * Start of an XML elt
         */
        public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException {
            if ("Topic".equals(qName)) {
                curSection = atts.getValue("r:id");
            } else if ("ExternalPage".equals(qName)) {
                // Porn filter
                if ((! includeAdult) && curSection.startsWith("Top/Adult")) {
                    return;
                }
          
                if (topicPattern != null && !topicPattern.matcher(curSection).matches()) {
                   return;
                }

                // Subset denominator filter.  
                // Only emit with a chance of 1/denominator.
                String url = atts.getValue("about");
                int hashValue = MD5Hash.digest(url).hashCode();
                hashValue = Math.abs(hashValue ^ hashSkew);
                if ((hashValue % subsetDenom) != 0) {
                    return;
                }

                // We actually claim the URL!
                curURL = url;
            } else if (curURL != null && "d:Title".equals(qName)) {
                titlePending = true;
            } else if (curURL != null && "d:Description".equals(qName)) {
                descPending = true;
            }
        }

        /**
         * The contents of an XML elt
         */
        public void characters(char ch[], int start, int length) {
            if (titlePending) {
                title.append(ch, start, length);
            } else if (descPending) {
                desc.append(ch, start, length);
            }
        }

        /**
         * Termination of XML elt
         */
        public void endElement(String namespaceURI, String localName, String qName) throws SAXException {
            if (curURL != null) {
                if ("ExternalPage".equals(qName)) {
                    //
                    // Inc the number of pages, insert the page, and 
                    // possibly print status.
                    //
                    try {
                      // First, manufacture the Page entry for the
                      // given DMOZ listing.
                      if (addPage(curURL)) {

                        // Second, add a link from the DMOZ page TO the
                        // just-added target Page.  The anchor text should 
                        // be the merged Title and Desc that we get from 
                        // the DMOZ listing.  For testing reasons, the 
                        // caller may choose to disallow this.
                        if (includeDmozDesc) {
                          String fullDesc = title + " " + desc;
                          Link descLink = new Link(srcDmozID, srcDmozDomainID, curURL, fullDesc);
                          dbWriter.addLink(descLink);
                        }
                        pages++;
                      }

                    } catch (MalformedURLException e) {
                        LOG.fine("skipping " + curURL + ":" + e);
                    } catch (IOException ie) {
                        LOG.severe("problem adding url " + curURL + ": " + ie);
                    }
                    printStatusBar(2000, 50000);

                    //
                    // Clear out the link text.  This is what
                    // you would use for adding to the linkdb.
                    //
                    if (title.length() > 0) {
                        title.delete(0, title.length());
                    }
                    if (desc.length() > 0) {
                        desc.delete(0, desc.length());
                    }

                    // Null out the URL.
                    curURL = null;
                } else if ("d:Title".equals(qName)) {
                    titlePending = false;
                } else if ("d:Description".equals(qName)) {
                    descPending = false;
                }
            }
        }

        /**
         * When parsing begins
         */
        public void startDocument() {
            LOG.info("Begin parse");
        }

        /**
         * When parsing ends
         */
        public void endDocument() {
            LOG.info("Completed parse.  Added " + pages + " pages.");
        }

        /**

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -