⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 csvloader.java

📁 矩阵的QR分解算法
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/* *    This program is free software; you can redistribute it and/or modify *    it under the terms of the GNU General Public License as published by *    the Free Software Foundation; either version 2 of the License, or *    (at your option) any later version. * *    This program is distributed in the hope that it will be useful, *    but WITHOUT ANY WARRANTY; without even the implied warranty of *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *    GNU General Public License for more details. * *    You should have received a copy of the GNU General Public License *    along with this program; if not, write to the Free Software *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *//* *    CSVLoader.java *    Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */package weka.core.converters;import weka.core.Attribute;import weka.core.FastVector;import weka.core.Instance;import weka.core.Instances;import java.io.BufferedReader;import java.io.File;import java.io.FileNotFoundException;import java.io.FileReader;import java.io.IOException;import java.io.InputStream;import java.io.InputStreamReader;import java.io.StreamTokenizer;import java.io.StringReader;import java.util.Enumeration;import java.util.Hashtable;/** <!-- globalinfo-start --> * Reads a source that is in comma separated or tab separated format. Assumes that the first row in the file determines the number of and names of the attributes. * <p/> <!-- globalinfo-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 1.16 $ * @see Loader */public class CSVLoader   extends AbstractFileLoader   implements BatchConverter {  /** for serialization */  static final long serialVersionUID = 5607529739745491340L;    /** the file extension */  public static String FILE_EXTENSION = ".csv";  /**   * A list of hash tables for accumulating nominal values during parsing.   */  private FastVector m_cumulativeStructure;  /**   * Holds instances accumulated so far   */  private FastVector m_cumulativeInstances;    /** the data collected from an InputStream */  private StringBuffer m_StreamBuffer;    /**   * default constructor   */  public CSVLoader() {    // No instances retrieved yet    setRetrieval(NONE);  }  /**   * Get the file extension used for arff files   *   * @return the file extension   */  public String getFileExtension() {    return FILE_EXTENSION;  }  /**   * Returns a description of the file type.   *   * @return a short file description   */  public String getFileDescription() {    return "CSV data files";  }  /**   * Gets all the file extensions used for this type of file   *   * @return the file extensions   */  public String[] getFileExtensions() {    return new String[]{getFileExtension()};  }  /**   * Returns a string describing this attribute evaluator   * @return a description of the evaluator suitable for   * displaying in the explorer/experimenter gui   */  public String globalInfo() {    return "Reads a source that is in comma separated or tab separated format. "      +"Assumes that the first row in the file determines the number of "      +"and names of the attributes.";  }    /**   * Resets the Loader object and sets the source of the data set to be    * the supplied Stream object.   *   * @param input the input stream   * @exception IOException if an error occurs   */  public void setSource(InputStream input) throws IOException {    BufferedReader	reader;    String		line;        m_structure    = null;    m_sourceFile   = null;    m_File         = null;    m_StreamBuffer = new StringBuffer();    reader         = new BufferedReader(new InputStreamReader(input));    while ((line = reader.readLine()) != null)      m_StreamBuffer.append(line + "\n");  }  /**   * Resets the Loader object and sets the source of the data set to be    * the supplied File object.   *   * @param file the source file.   * @exception IOException if an error occurs   */  public void setSource(File file) throws IOException {    super.setSource(file);        m_StreamBuffer = null;  }  /**   * Determines and returns (if possible) the structure (internally the    * header) of the data set as an empty set of instances.   *   * @return the structure of the data set as an empty set of Instances   * @exception IOException if an error occurs   */  public Instances getStructure() throws IOException {    if ((m_sourceFile == null) && (m_StreamBuffer == null)) {      throw new IOException("No source has been specified");    }    if (m_structure == null) {      try {	BufferedReader br;	if (m_StreamBuffer != null)	  br = new BufferedReader(new StringReader(m_StreamBuffer.toString()));	else	  br = new BufferedReader(new FileReader(m_sourceFile));	StreamTokenizer st = new StreamTokenizer(br);	initTokenizer(st);	readStructure(st);      } catch (FileNotFoundException ex) {      }    }        return m_structure;  }  /**   * reads the structure   *    * @param st the stream tokenizer to read from   * @throws IOException if reading fails   */  private void readStructure(StreamTokenizer st) throws IOException {    readHeader(st);  }  /**   * Return the full data set. If the structure hasn't yet been determined   * by a call to getStructure then method should do so before processing   * the rest of the data set.   *   * @return the structure of the data set as an empty set of Instances   * @exception IOException if there is no source or parsing fails   */  public Instances getDataSet() throws IOException {    if ((m_sourceFile == null) && (m_StreamBuffer == null)) {      throw new IOException("No source has been specified");    }    BufferedReader br;    if (m_sourceFile != null) {      setSource(m_sourceFile);      br = new BufferedReader(new FileReader(m_sourceFile));    }    else {      br = new BufferedReader(new StringReader(m_StreamBuffer.toString()));    }    StreamTokenizer st = new StreamTokenizer(br);    initTokenizer(st);    readStructure(st);        st.ordinaryChar(',');    st.ordinaryChar('\t');        m_cumulativeStructure = new FastVector(m_structure.numAttributes());    for (int i = 0; i < m_structure.numAttributes(); i++) {      m_cumulativeStructure.addElement(new Hashtable());    }        // Instances result = new Instances(m_structure);    m_cumulativeInstances = new FastVector();    FastVector current;    while ((current = getInstance(st)) != null) {      m_cumulativeInstances.addElement(current);    }    br.close();    // now determine the true structure of the data set    FastVector atts = new FastVector(m_structure.numAttributes());    for (int i = 0; i < m_structure.numAttributes(); i++) {      String attname = m_structure.attribute(i).name();      Hashtable tempHash = ((Hashtable)m_cumulativeStructure.elementAt(i));      if (tempHash.size() == 0) {	atts.addElement(new Attribute(attname));      } else {	FastVector values = new FastVector(tempHash.size());	// add dummy objects in order to make the FastVector's size == capacity	for (int z = 0; z < tempHash.size(); z++) {	  values.addElement("dummy");	}	Enumeration e = tempHash.keys();	while (e.hasMoreElements()) {	  Object ob = e.nextElement();	  //	  if (ob instanceof Double) {	  int index = ((Integer)tempHash.get(ob)).intValue();	  values.setElementAt(new String(ob.toString()), index);	  //	  }	}	atts.addElement(new Attribute(attname, values));      }    }    // make the instances    String relationName;    if (m_sourceFile != null)      relationName = (m_sourceFile.getName()).replaceAll("\\.[cC][sS][vV]$","");    else      relationName = "stream";    Instances dataSet = new Instances(relationName, 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -