⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 randomsplitresultproducer.java

📁 Java 编写的多种数据挖掘算法 包括聚类、分类、预处理等
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/* *    This program is free software; you can redistribute it and/or modify *    it under the terms of the GNU General Public License as published by *    the Free Software Foundation; either version 2 of the License, or *    (at your option) any later version. * *    This program is distributed in the hope that it will be useful, *    but WITHOUT ANY WARRANTY; without even the implied warranty of *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *    GNU General Public License for more details. * *    You should have received a copy of the GNU General Public License *    along with this program; if not, write to the Free Software *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *//* *    RandomSplitResultProducer.java *    Copyright (C) 1999 Len Trigg * */package weka.experiment;import weka.core.AdditionalMeasureProducer;import weka.core.Instance;import weka.core.Instances;import weka.core.Option;import weka.core.OptionHandler;import weka.core.Utils;import java.io.File;import java.util.Calendar;import java.util.Enumeration;import java.util.Random;import java.util.TimeZone;import java.util.Vector;/** <!-- globalinfo-start --> * Generates a single train/test split and calls the appropriate SplitEvaluator to generate some results. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> *  * <pre> -P &lt;percent&gt; *  The percentage of instances to use for training. *  (default 66)</pre> *  * <pre> -D * Save raw split evaluator output.</pre> *  * <pre> -O &lt;file/directory name/path&gt; *  The filename where raw output will be stored. *  If a directory name is specified then then individual *  outputs will be gzipped, otherwise all output will be *  zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)</pre> *  * <pre> -W &lt;class name&gt; *  The full class name of a SplitEvaluator. *  eg: weka.experiment.ClassifierSplitEvaluator</pre> *  * <pre> -R *  Set when data is not to be randomized and the data sets' size. *  Is not to be determined via probabilistic rounding.</pre> *  * <pre>  * Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator: * </pre> *  * <pre> -W &lt;class name&gt; *  The full class name of the classifier. *  eg: weka.classifiers.bayes.NaiveBayes</pre> *  * <pre> -C &lt;index&gt; *  The index of the class for which IR statistics *  are to be output. (default 1)</pre> *  * <pre> -I &lt;index&gt; *  The index of an attribute to output in the *  results. This attribute should identify an *  instance in order to know which instances are *  in the test set of a cross validation. if 0 *  no output (default 0).</pre> *  * <pre> -P *  Add target and prediction columns to the result *  for each fold.</pre> *  * <pre>  * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> *  * <pre> -D *  If set, classifier is run in debug mode and *  may output additional info to the console</pre> *  <!-- options-end --> *  * All options after -- will be passed to the split evaluator. * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 1.18 $ */public class RandomSplitResultProducer   implements ResultProducer, OptionHandler, AdditionalMeasureProducer {    /** for serialization */  static final long serialVersionUID = 1403798165056795073L;    /** The dataset of interest */  protected Instances m_Instances;  /** The ResultListener to send results to */  protected ResultListener m_ResultListener = new CSVResultListener();  /** The percentage of instances to use for training */  protected double m_TrainPercent = 66;  /** Whether dataset is to be randomized */  protected boolean m_randomize = true;  /** The SplitEvaluator used to generate results */  protected SplitEvaluator m_SplitEvaluator = new ClassifierSplitEvaluator();  /** The names of any additional measures to look for in SplitEvaluators */  protected String [] m_AdditionalMeasures = null;  /** Save raw output of split evaluators --- for debugging purposes */  protected boolean m_debugOutput = false;  /** The output zipper to use for saving raw splitEvaluator output */  protected OutputZipper m_ZipDest = null;  /** The destination output file/directory for raw output */  protected File m_OutputFile = new File(			        new File(System.getProperty("user.dir")), 				"splitEvalutorOut.zip");  /** The name of the key field containing the dataset name */  public static String DATASET_FIELD_NAME = "Dataset";  /** The name of the key field containing the run number */  public static String RUN_FIELD_NAME = "Run";  /** The name of the result field containing the timestamp */  public static String TIMESTAMP_FIELD_NAME = "Date_time";  /**   * Returns a string describing this result producer   * @return a description of the result producer suitable for   * displaying in the explorer/experimenter gui   */  public String globalInfo() {    return        "Generates a single train/test split and calls the appropriate "      + "SplitEvaluator to generate some results.";  }  /**   * Sets the dataset that results will be obtained for.   *   * @param instances a value of type 'Instances'.   */  public void setInstances(Instances instances) {        m_Instances = instances;  }  /**   * Set a list of method names for additional measures to look for   * in SplitEvaluators. This could contain many measures (of which only a   * subset may be produceable by the current SplitEvaluator) if an experiment   * is the type that iterates over a set of properties.   * @param additionalMeasures an array of measure names, null if none   */  public void setAdditionalMeasures(String [] additionalMeasures) {    m_AdditionalMeasures = additionalMeasures;    if (m_SplitEvaluator != null) {      System.err.println("RandomSplitResultProducer: setting additional "			 +"measures for "			 +"split evaluator");      m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);    }  }      /**     * Returns an enumeration of any additional measure names that might be   * in the SplitEvaluator   * @return an enumeration of the measure names   */  public Enumeration enumerateMeasures() {    Vector newVector = new Vector();    if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {      Enumeration en = ((AdditionalMeasureProducer)m_SplitEvaluator).	enumerateMeasures();      while (en.hasMoreElements()) {	String mname = (String)en.nextElement();	newVector.addElement(mname);      }    }    return newVector.elements();  }    /**   * Returns the value of the named measure   * @param additionalMeasureName the name of the measure to query for its value   * @return the value of the named measure   * @throws IllegalArgumentException if the named measure is not supported   */  public double getMeasure(String additionalMeasureName) {    if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {      return ((AdditionalMeasureProducer)m_SplitEvaluator).	getMeasure(additionalMeasureName);    } else {      throw new IllegalArgumentException("RandomSplitResultProducer: "			  +"Can't return value for : "+additionalMeasureName			  +". "+m_SplitEvaluator.getClass().getName()+" "			  +"is not an AdditionalMeasureProducer");    }  }    /**   * Sets the object to send results of each run to.   *   * @param listener a value of type 'ResultListener'   */  public void setResultListener(ResultListener listener) {    m_ResultListener = listener;  }  /**   * Gets a Double representing the current date and time.   * eg: 1:46pm on 20/5/1999 -> 19990520.1346   *   * @return a value of type Double   */  public static Double getTimestamp() {    Calendar now = Calendar.getInstance(TimeZone.getTimeZone("UTC"));    double timestamp = now.get(Calendar.YEAR) * 10000      + (now.get(Calendar.MONTH) + 1) * 100      + now.get(Calendar.DAY_OF_MONTH)      + now.get(Calendar.HOUR_OF_DAY) / 100.0      + now.get(Calendar.MINUTE) / 10000.0;    return new Double(timestamp);  }  /**   * Prepare to generate results.   *   * @throws Exception if an error occurs during preprocessing.   */  public void preProcess() throws Exception {    if (m_SplitEvaluator == null) {      throw new Exception("No SplitEvalutor set");    }    if (m_ResultListener == null) {      throw new Exception("No ResultListener set");    }    m_ResultListener.preProcess(this);  }    /**   * Perform any postprocessing. When this method is called, it indicates   * that no more requests to generate results for the current experiment   * will be sent.   *   * @throws Exception if an error occurs   */  public void postProcess() throws Exception {    m_ResultListener.postProcess(this);    if (m_debugOutput) {      if (m_ZipDest != null) {	m_ZipDest.finished();	m_ZipDest = null;      }    }  }  /**   * Gets the keys for a specified run number. Different run   * numbers correspond to different randomizations of the data. Keys   * produced should be sent to the current ResultListener   *   * @param run the run number to get keys for.   * @throws Exception if a problem occurs while getting the keys   */  public void doRunKeys(int run) throws Exception {    if (m_Instances == null) {      throw new Exception("No Instances set");    }    // Add in some fields to the key like run number, dataset name    Object [] seKey = m_SplitEvaluator.getKey();    Object [] key = new Object [seKey.length + 2];    key[0] = Utils.backQuoteChars(m_Instances.relationName());    key[1] = "" + run;    System.arraycopy(seKey, 0, key, 2, seKey.length);    if (m_ResultListener.isResultRequired(this, key)) {      try {	m_ResultListener.acceptResult(this, key, null);      } catch (Exception ex) {	// Save the train and test datasets for debugging purposes?	throw ex;      }    }  }  /**   * Gets the results for a specified run number. Different run   * numbers correspond to different randomizations of the data. Results   * produced should be sent to the current ResultListener   *   * @param run the run number to get results for.   * @throws Exception if a problem occurs while getting the results   */  public void doRun(int run) throws Exception {    if (getRawOutput()) {      if (m_ZipDest == null) {	m_ZipDest = new OutputZipper(m_OutputFile);      }    }    if (m_Instances == null) {      throw new Exception("No Instances set");    }    // Add in some fields to the key like run number, dataset name    Object [] seKey = m_SplitEvaluator.getKey();    Object [] key = new Object [seKey.length + 2];    key[0] = Utils.backQuoteChars(m_Instances.relationName());    key[1] = "" + run;    System.arraycopy(seKey, 0, key, 2, seKey.length);    if (m_ResultListener.isResultRequired(this, key)) {      // Randomize on a copy of the original dataset      Instances runInstances = new Instances(m_Instances);      Instances train;      Instances test;      if (!m_randomize) {	// Don't do any randomization	int trainSize = Utils.round(runInstances.numInstances() * m_TrainPercent / 100);	int testSize = runInstances.numInstances() - trainSize;	train = new Instances(runInstances, 0, trainSize);	test = new Instances(runInstances, trainSize, testSize);      } else {	Random rand = new Random(run);	runInstances.randomize(rand);		// Nominal class	if (runInstances.classAttribute().isNominal()) {	  	  // create the subset for each classs	  int numClasses = runInstances.numClasses();	  Instances[] subsets = new Instances[numClasses + 1];	  for (int i=0; i < numClasses + 1; i++) {	    subsets[i] = new Instances(runInstances, 10);	  }	  	  // divide instances into subsets	  Enumeration e = runInstances.enumerateInstances();	  while(e.hasMoreElements()) {	    Instance inst = (Instance) e.nextElement();	    if (inst.classIsMissing()) {	      subsets[numClasses].add(inst);	    } else {	      subsets[(int) inst.classValue()].add(inst);	    }	  }	  	  // Compactify them	  for (int i=0; i < numClasses + 1; i++) {	    subsets[i].compactify();	  }	  	  // merge into train and test sets	  train = new Instances(runInstances, runInstances.numInstances());	  test = new Instances(runInstances, runInstances.numInstances());	  for (int i = 0; i < numClasses + 1; i++) {	    int trainSize = 	      Utils.probRound(subsets[i].numInstances() * m_TrainPercent / 100, rand);	    for (int j = 0; j < trainSize; j++) {	      train.add(subsets[i].instance(j));	    }	    for (int j = trainSize; j < subsets[i].numInstances(); j++) {	      test.add(subsets[i].instance(j));	    }	    // free memory	    subsets[i] = null;	  }	  train.compactify();	  test.compactify();	  	  // randomize the final sets	  train.randomize(rand);	  test.randomize(rand);	} else {	  	  // Numeric target 	  int trainSize = 	    Utils.probRound(runInstances.numInstances() * m_TrainPercent / 100, rand);	  int testSize = runInstances.numInstances() - trainSize;	  train = new Instances(runInstances, 0, trainSize);	  test = new Instances(runInstances, trainSize, testSize);	}      }      try {	Object [] seResults = m_SplitEvaluator.getResult(train, test);	Object [] results = new Object [seResults.length + 1];	results[0] = getTimestamp();	System.arraycopy(seResults, 0, results, 1,			 seResults.length);	if (m_debugOutput) {	  String resultName = 	    (""+run+"."+	     Utils.backQuoteChars(runInstances.relationName())	     +"."	     +m_SplitEvaluator.toString()).replace(' ','_');	  resultName = Utils.removeSubstring(resultName, 					     "weka.classifiers.");	  resultName = Utils.removeSubstring(resultName, 					     "weka.filters.");	  resultName = Utils.removeSubstring(resultName, 					     "weka.attributeSelection.");	  m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);	}	m_ResultListener.acceptResult(this, key, results);      } catch (Exception ex) {	// Save the train and test datasets for debugging purposes?	throw ex;      }    }  }  /**   * Gets the names of each of the columns produced for a single run.   * This method should really be static.   *   * @return an array containing the name of each column   */  public String [] getKeyNames() {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -