⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 blue.java

📁 wekaUT是 university texas austin 开发的基于weka的半指导学习(semi supervised learning)的分类器
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
/* *    This program is free software; you can redistribute it and/or modify *    it under the terms of the GNU General Public License as published by *    the Free Software Foundation; either version 2 of the License, or *    (at your option) any later version. * *    This program is distributed in the hope that it will be useful, *    but WITHOUT ANY WARRANTY; without even the implied warranty of *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *    GNU General Public License for more details. * *    You should have received a copy of the GNU General Public License *    along with this program; if not, write to the Free Software *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *//* *    Blue.java *    Copyright (C) 2005 Prem Melville * *///////////////////////////////////// WARNING: UNDER DEVELOPMENT//////////////////////////////////package weka.classifiers.meta;import weka.classifiers.*;import weka.classifiers.bayes.*;import java.util.*;import weka.core.*;import weka.estimators.*;/** * Budgeted Learning by Utility Estimation (BLUE).  * * Exhaustively estimates the utility of acquiring each feature. * * Valid options are:<p> * * -W classname <br> * Specify the full class name of a weak classifier as the basis for  * bagging (required).<p> * * @author Prem Melville (melville@cs.utexas.edu) */public class Blue extends DistributionClassifier    implements OptionHandler, BudgetedLearner{        /** Use Naive Bayes estimates for feature-value distributions. */    protected boolean m_UseNaiveBayes = false;        /** Sample queries probabilistically */    protected boolean m_UseWeightedSampling = false;        /** The attribute estimators. */    protected Estimator [][] m_Distributions;        /** Costs of acquiring each feature */    protected double []m_FeatureCosts;         /** The model base classifier to use */    protected DistributionClassifier m_Classifier = new weka.classifiers.trees.j48.J48();          /** Possible selected policies */    public static final int EXPECTED_UTILITY = 0,	ROUND_ROBIN = 1,	DEFAULT_RR = 2,	ERROR_SAMPLING = 3,	HBL = 4,	ERROR_SAMPLING_RR = 5,	HBL_RR = 6,	RANDOM = 7,	EXPECTED_UTILITY_ENTROPY = 8,	HBL_ENTROPY = 9,	UNCERTAINTY_SAMPLING = 10,	CHEAPEST = 11;        public static final Tag[] TAGS_POLICY = { new Tag(EXPECTED_UTILITY, "Expected Utility"),					      new Tag(ROUND_ROBIN, "Round Robin"),					      new Tag(DEFAULT_RR, "EU-RR"),					      new Tag(ERROR_SAMPLING, "Error Sampling"),					      new Tag(HBL, "Hierarchical BL"),					      new Tag(ERROR_SAMPLING_RR, "Error Sampling + RR"),					      new Tag(HBL_RR, "Hierarchical BL + RR"),					      new Tag(RANDOM, "Random"),					      new Tag(EXPECTED_UTILITY_ENTROPY, "Expected Utility (Entropy)"),					      new Tag(HBL_ENTROPY, "Hierarchical BL (Entropy)"),					      new Tag(UNCERTAINTY_SAMPLING, "Uncertainty Sampling"),					      new Tag(CHEAPEST, "Cheapest model")};        /** POLICY for feature selection */    protected int m_Policy = EXPECTED_UTILITY;        /** Possible cheap policies for the first level of HBL */    public static final int HBL_ERROR_SAMPLING = 0,	HBL_UNCERTAINTY_SAMPLING = 1,	HBL_RANDOM = 2;        public static final Tag[] TAGS_HBL = { new Tag(HBL_ERROR_SAMPLING, "Error Sampling"),					   new Tag(HBL_UNCERTAINTY_SAMPLING, "Uncertainty Sampling"),					   new Tag(HBL_RANDOM, "Random")};        protected int m_HBLPolicy = HBL_ERROR_SAMPLING;                /** Multiplicative factor for HBL - determines how many queries to select using error sampling*/    protected double m_Alpha = 10;        /** Set to true to turn on debug output */    protected boolean m_Debug = true;        /** Random number seed */    protected int m_Seed = 0;        /** Random number generator */    protected Random m_Random = new Random(m_Seed);          /**   * Parses a given list of options. Valid options are:<p>   *   * -W classname <br>   * Specify the full class name of a weak classifier as the basis for    * Blue (required).<p>   *   * @param options the list of options as an array of strings   * @exception Exception if an option is not supported   */  public void setOptions(String[] options) throws Exception {      setUseNaiveBayes(Utils.getFlag('N',options));      setUseWeightedSampling(Utils.getFlag('S',options));            String policy = Utils.getOption('P', options);      if (policy.length() != 0) {	    setPolicy(Integer.parseInt(policy));	} else {	    setPolicy(m_Policy);	}            String hbl_policy = Utils.getOption('H', options);      if (hbl_policy.length() != 0) {	    setHBLPolicy(Integer.parseInt(hbl_policy));	} else {	    setHBLPolicy(m_HBLPolicy);	}      String alpha = Utils.getOption('A', options);      if (alpha.length() != 0) {	  setAlpha(Double.parseDouble(alpha));      } else {	  setAlpha(m_Alpha);      }      String classifierName = Utils.getOption('W', options);    if (classifierName.length() == 0) {      throw new Exception("A classifier must be specified with"			  + " the -W option.");    }    setClassifier((DistributionClassifier) (Classifier.forName(classifierName,Utils.partitionOptions(options))));  }  /**   * Gets the current settings of the Classifier.   *   * @return an array of strings suitable for passing to setOptions   */  public String [] getOptions() {    String [] classifierOptions = new String [0];    if ((m_Classifier != null) && 	(m_Classifier instanceof OptionHandler)) {      classifierOptions = ((OptionHandler)m_Classifier).getOptions();    }    String [] options = new String [classifierOptions.length + 11];    int current = 0;        if(getUseNaiveBayes()){	options[current++] = "-N";    }        if(getUseWeightedSampling()){	options[current++] = "-S";    }    options[current++] = "-P"; options[current++] = "" + getPolicy().getSelectedTag().getID();    options[current++] = "-H"; options[current++] = "" + getHBLPolicy().getSelectedTag().getID();    options[current++] = "-A"; options[current++] = "" + getAlpha();    if (getClassifier() != null) {      options[current++] = "-W";      options[current++] = getClassifier().getClass().getName();    }    options[current++] = "--";    System.arraycopy(classifierOptions, 0, options, current, 		     classifierOptions.length);    current += classifierOptions.length;    while (current < options.length) {      options[current++] = "";    }    return options;  }        /**     * Get the value of m_UseNaiveBayes.     * @return value of m_UseNaiveBayes.     */    public boolean getUseNaiveBayes() {	return m_UseNaiveBayes;    }        /**     * Set the value of m_UseNaiveBayes.     * @param v  Value to assign to m_UseNaiveBayes.     */    public void setUseNaiveBayes(boolean  v) {	m_UseNaiveBayes = v;    }        /**     * Get the value of m_UseWeightedSampling.     * @return value of m_UseWeightedSampling.     */    public boolean getUseWeightedSampling() {	return m_UseWeightedSampling;    }        /**     * Set the value of m_UseWeightedSampling.     * @param v  Value to assign to m_UseWeightedSampling.     */    public void setUseWeightedSampling(boolean  v) {	m_UseWeightedSampling = v;    }              /**     * Get the value of m_Alpha.     * @return value of m_Alpha.     */    public double getAlpha() {	return m_Alpha;    }        /**     * Set the value of m_Alpha.     * @param v  Value to assign to m_Alpha.     */    public void setAlpha(double  v) {	this.m_Alpha = v;    }            /**     * Set the value of m_Policy.     * @param v  Value to assign to m_Policy.     */    public void setPolicy(SelectedTag  v) {	this.m_Policy = v.getSelectedTag().getID();    }      /**     * Get the value of m_Policy.     * @return value of m_Policy.     */    public SelectedTag getPolicy() {	return new SelectedTag(m_Policy, TAGS_POLICY);    }        /**     * Set the value of m_Policy.     * @param v  Value to assign to m_Policy.     */    public void setPolicy(int  v) {	this.m_Policy = v;    }       /**     * Set the value of m_HBLPolicy.     * @param v  Value to assign to m_HBLPolicy.     */    public void setHBLPolicy(SelectedTag  v) {	this.m_HBLPolicy = v.getSelectedTag().getID();    }      /**     * Get the value of m_HBLPolicy.     * @return value of m_HBLPolicy.     */    public SelectedTag getHBLPolicy() {	return new SelectedTag(m_HBLPolicy, TAGS_HBL);    }        /**     * Set the value of m_HBLPolicy.     * @param v  Value to assign to m_HBLPolicy.     */    public void setHBLPolicy(int  v) {	this.m_HBLPolicy = v;    }  /**   * Set the classifier for bagging.    *   * @param newClassifier the Classifier to use.   */  public void setClassifier(DistributionClassifier newClassifier) {      m_Classifier = newClassifier;  }  /**   * Get the classifier used as the classifier   *   * @return the classifier used as the classifier   */  public DistributionClassifier getClassifier() {      return m_Classifier;  }        //Set costs of acquiring each feature     public void setFeatureCosts(double []featureCosts){	m_FeatureCosts = featureCosts;     }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -