⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rouge.java

📁 dragontoolkit用于机器学习
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
            hashGrams = computeSkipBigram(referenceList, maxSkip);
            match = matchSkipBigram(testList,maxSkip, hashGrams);
            reference=countSkipBigram(testList.size(),maxSkip)+referenceList.size();

            if (reference<=0)
                evaStat[j][0] = 0;
            else
                evaStat[j][0] = (double) match / reference;

            if(test<=0)
                evaStat[j][1] =0;
            else
                evaStat[j][1] =match/(double)test;

            evaStat[j][2] =computeFScore(evaStat[j][1],evaStat[j][0]);
        }
    }

    private void computeRougeL(String testSummary, String[] refSummaries) {
        Document testDoc, refDoc;
        Sentence curSent;
        Paragraph curPara;
        DocumentParser parser;
        int match, reference, test, j;

        parser=tokenExtractor.getDocumentParser();
        testDoc=parser.parse(testSummary);
        test=tokenize(testDoc).size();
        evaStat = new double[refSummaries.length][3];

        for (j = 0; j < refSummaries.length; j++) {
            match=0;
            refDoc=parser.parse(refSummaries[j]);
            curPara=refDoc.getFirstParagraph();
            while (curPara != null) {
                curSent = curPara.getFirstSentence();
                while (curSent != null) {
                    match+=matchLCS(curSent,testDoc);
                    curSent = curSent.next;
                }
                curPara = curPara.next;
            }
            reference=tokenize(refDoc).size();

            if (reference<=0)
                evaStat[j][0] = 0;
            else
                evaStat[j][0] = (double) match / reference;

            if(test<=0)
                evaStat[j][1] =0;
            else
                evaStat[j][1] =match/(double)test;

            evaStat[j][2] =computeFScore(evaStat[j][1],evaStat[j][0]);
        }
    }

    private HashMap computeNgrams(ArrayList wordList, int nGram) {
        HashMap hashGrams;
        Counter counter;
        String gramStr;
        int start, end;

        start = 0;
        end = nGram;
        hashGrams=new HashMap();
        while (end <= wordList.size()) {
            gramStr=getNgram(wordList,start,end);
            counter=(Counter)hashGrams.get(gramStr);
            if(counter!=null)
                counter.addCount(1);
            else
                hashGrams.put(gramStr,new Counter(1));
            start = start + 1;
            end = end + 1;
        }
        return hashGrams;
    }

    private int matchNgrams(HashMap testHash, HashMap refMap) {
        Iterator iterator;
        String gramStr;
        Counter testCounter, refCounter;
        int count;

        count=0;
        iterator=testHash.keySet().iterator();
        while(iterator.hasNext()){
            gramStr=(String)iterator.next();
            testCounter=(Counter)testHash.get(gramStr);
            refCounter=(Counter)refMap.get(gramStr);
            if(refCounter!=null)
                count+=Math.min(testCounter.getCount(),refCounter.getCount());
        }
        return count;
    }

    private String getNgram(ArrayList wordList, int start, int end){
        String gramStr;
        int i;

        gramStr =null;
        for (i = start; i < end; i++) {
            if (i == 0)
                gramStr =( (Token) wordList.get(i)).getName();
            else
                gramStr = gramStr + "\t"+( (Token) wordList.get(i)).getName();
        }
        return gramStr;
    }

    private HashSet computeSkipBigram(ArrayList list, int maxSkip){
        HashSet hash;
        int i, start, end, first, second;

        hash=new HashSet();
        start=0;
        end=Math.min(start+maxSkip+1,list.size()-1);
        while(start<end){
            first=((Token)list.get(start)).getIndex();
            for(i=start+1;i<=end;i++){
                second=((Token)list.get(i)).getIndex();
                hash.add(new SimplePair(hash.size(),first,second));
            }
            start=start+1;
            end=Math.min(start+maxSkip+1,list.size()-1);
        }
        return hash;
    }

    private int matchSkipBigram(ArrayList list, int maxSkip, HashSet reference){
        int i, start, end, first, second, count;

        start=0;
        count=0;
        end=Math.min(start+maxSkip+1,list.size()-1);
        while(start<end){
            first=((Token)list.get(start)).getIndex();
            for(i=start+1;i<=end;i++){
                second=((Token)list.get(i)).getIndex();
                if(reference.contains(new SimplePair(-1,first,second)))
                    count++;
            }
            start=start+1;
            end=Math.min(start+maxSkip+1,list.size()-1);
        }
        return count;
    }

    private int countSkipBigram(int textLength, int maxSkip){
        int start, end, count;

        start=0;
        count=0;
        end=Math.min(start+maxSkip+1,textLength-1);
        while(start<end){
            count+=end-start;
            start=start+1;
            end=Math.min(start+maxSkip+1,textLength-1);
        }
        return count;
    }

    private int matchLCS(Sentence refSent, Document testDoc){
        SortedArray list;
        ArrayList refList, testList, lcsList;
        SimpleElementList keyList;
        Sentence curSent;
        Paragraph curPara;
        int i;

        keyList=new SimpleElementList();
        list=new SortedArray(new IndexComparator());
        refList=index(tokenize(refSent),keyList);
        curPara=testDoc.getFirstParagraph();
        while(curPara!=null){
            curSent=curPara.getFirstSentence();
            while(curSent!=null){
                testList=index(tokenize(curSent),keyList);
                lcsList=computeLCS(refList,testList);
                for(i=0;i<lcsList.size();i++)
                    list.add(lcsList.get(i));
                curSent=curSent.next;
            }
            curPara=curPara.next;
        }
        return list.size();
    }
    private ArrayList computeLCS(ArrayList first, ArrayList second){
        return null;
    }

    private ArrayList index(ArrayList list,SimpleElementList keyList){
        Token curToken;
        int i;

        for(i=0;i<list.size();i++){
            curToken=(Token)list.get(i);
            curToken.setIndex(keyList.add(curToken.getValue()));
        }
        return list;
    }

    private ArrayList tokenize(String doc){
        ArrayList list;
        Token curToken;
        int i;

        list=tokenExtractor.extractFromDoc(doc);
        if(!caseSensitive){
            //convert all words to lower case
            for(i=0;i<list.size();i++){
                curToken=(Token)list.get(i);
                curToken.setValue(curToken.getValue().toLowerCase());
            }
        }
        return list;
    }

    private ArrayList tokenize(Document doc){
        ArrayList list;
        Token curToken;
        int i;

        list=tokenExtractor.extractFromDoc(doc);
        if(!caseSensitive){
            //convert all words to lower case
            for(i=0;i<list.size();i++){
                curToken=(Token)list.get(i);
                curToken.setValue(curToken.getValue().toLowerCase());
            }
        }
        return list;
    }

    private ArrayList tokenize(Sentence sent){
        ArrayList list;
        Token curToken;
        int i;

        list=tokenExtractor.extractFromSentence(sent);
        if(!caseSensitive){
            //convert all words to lower case
            for(i=0;i<list.size();i++){
                curToken=(Token)list.get(i);
                curToken.setValue(curToken.getValue().toLowerCase());
            }
        }
        return list;
    }

    private double computeFScore(double precision, double recall){
        if(precision==0 || recall==0)
            return 0;
        else if(beta==Double.MAX_VALUE)
            return recall;
        else
            return (1+beta*beta)*precision*recall/(recall+beta*beta*precision);
    }
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -