📄 dfsciotest.java
字号:
/** * Copyright 2005 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.hadoop.fs;import java.io.*;import junit.framework.TestCase;import java.util.Date;import java.util.StringTokenizer;import org.apache.commons.logging.*;import org.apache.hadoop.mapred.*;import org.apache.hadoop.io.*;import org.apache.hadoop.io.SequenceFile.CompressionType;import org.apache.hadoop.conf.*;/** * Distributed i/o benchmark. * <p> * This test writes into or reads from a specified number of files. * File size is specified as a parameter to the test. * Each file is accessed in a separate map task. * <p> * The reducer collects the following statistics: * <ul> * <li>number of tasks completed</li> * <li>number of bytes written/read</li> * <li>execution time</li> * <li>io rate</li> * <li>io rate squared</li> * </ul> * * Finally, the following information is appended to a local file * <ul> * <li>read or write test</li> * <li>date and time the test finished</li> * <li>number of files</li> * <li>total number of bytes processed</li> * <li>throughput in mb/sec (total number of bytes / sum of processing times)</li> * <li>average i/o rate in mb/sec per file</li> * <li>standard i/o rate deviation</li> * </ul> * * @author Konstantin Shvachko */public class DFSCIOTest extends TestCase { // Constants private static final int TEST_TYPE_READ = 0; private static final int TEST_TYPE_WRITE = 1; private static final int TEST_TYPE_CLEANUP = 2; private static final int DEFAULT_BUFFER_SIZE = 1000000; private static final String BASE_FILE_NAME = "test_io_"; private static final String DEFAULT_RES_FILE_NAME = "DFSCIOTest_results.log"; private static final Log LOG = InputFormatBase.LOG; private static Configuration fsConfig = new Configuration(); private static final long MEGA = 0x100000; private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/DFSCIOTest"); private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control"); private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write"); private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read"); private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data"); private static Path HDFS_TEST_DIR = new Path("/tmp/DFSCIOTest"); private static String HDFS_LIB_VERSION = System.getProperty("libhdfs.version", "1"); private static String CHMOD = new String("chmod"); private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION ); private static Path HDFS_READ = new Path(HDFS_TEST_DIR + "/hdfs_read"); private static Path HDFS_WRITE = new Path(HDFS_TEST_DIR + "/hdfs_write"); /** * Run the test with default parameters. * * @throws Exception */ public void testIOs() throws Exception { testIOs(10, 10); } /** * Run the test with the specified parameters. * * @param fileSize file size * @param nrFiles number of files * @throws IOException */ public static void testIOs(int fileSize, int nrFiles) throws IOException { FileSystem fs = FileSystem.get(fsConfig); createControlFile(fs, fileSize, nrFiles); writeTest(fs); readTest(fs); } private static void createControlFile( FileSystem fs, int fileSize, // in MB int nrFiles ) throws IOException { LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files"); fs.delete(CONTROL_DIR); for( int i=0; i < nrFiles; i++ ) { String name = getFileName(i); Path controlFile = new Path(CONTROL_DIR, "in_file_" + name); SequenceFile.Writer writer = null; try { writer = SequenceFile.createWriter(fs, fsConfig, controlFile, UTF8.class, LongWritable.class, CompressionType.NONE); writer.append(new UTF8(name), new LongWritable(fileSize)); } catch(Exception e) { throw new IOException(e.getLocalizedMessage()); } finally { if( writer != null ) writer.close(); writer = null; } } LOG.info("created control files for: "+nrFiles+" files"); } private static String getFileName( int fIdx ) { return BASE_FILE_NAME + Integer.toString(fIdx); } /** * Write/Read mapper base class. * <p> * Collects the following statistics per task: * <ul> * <li>number of tasks completed</li> * <li>number of bytes written/read</li> * <li>execution time</li> * <li>i/o rate</li> * <li>i/o rate squared</li> * </ul> */ private abstract static class IOStatMapper extends IOMapperBase { IOStatMapper() { super(fsConfig); } void collectStats(OutputCollector output, String name, long execTime, Object objSize ) throws IOException { long totalSize = ((Long)objSize).longValue(); float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA); LOG.info("Number of bytes processed = " + totalSize ); LOG.info("Exec time = " + execTime ); LOG.info("IO rate = " + ioRateMbSec ); output.collect(new UTF8("l:tasks"), new UTF8(String.valueOf(1))); output.collect(new UTF8("l:size"), new UTF8(String.valueOf(totalSize))); output.collect(new UTF8("l:time"), new UTF8(String.valueOf(execTime))); output.collect(new UTF8("f:rate"), new UTF8(String.valueOf(ioRateMbSec*1000))); output.collect(new UTF8("f:sqrate"), new UTF8(String.valueOf(ioRateMbSec*ioRateMbSec*1000))); } } /** * Write mapper class. */ public static class WriteMapper extends IOStatMapper { public WriteMapper() { super(); for( int i=0; i < bufferSize; i++ ) buffer[i] = (byte)('0' + i % 50); } public Object doIO( Reporter reporter, String name, long totalSize ) throws IOException { // create file totalSize *= MEGA; // create instance of local filesystem FileSystem localFS = FileSystem.getNamed("local", fsConfig); try { // native runtime Runtime runTime = Runtime.getRuntime(); // copy the dso and executable from dfs and chmod them synchronized (this) { localFS.delete(HDFS_TEST_DIR); if (!(localFS.mkdirs(HDFS_TEST_DIR))) { throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem"); } } synchronized (this) { if (!localFS.exists(HDFS_SHLIB)) { FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig); String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB); Process process = runTime.exec(chmodCmd); int exitStatus = process.waitFor(); if (exitStatus != 0) { throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); } } } synchronized (this) { if (!localFS.exists(HDFS_WRITE)) { FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig); String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE); Process process = runTime.exec(chmodCmd); int exitStatus = process.waitFor(); if (exitStatus != 0) { throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus); } } } // exec the C program Path outFile = new Path(DATA_DIR, name); String writeCmd = new String(HDFS_WRITE + " " + outFile + " " + totalSize + " " + bufferSize); Process process = runTime.exec(writeCmd, null, new File(HDFS_TEST_DIR.toString())); int exitStatus = process.waitFor(); if (exitStatus != 0) { throw new IOException(writeCmd + ": Failed with exitStatus: " + exitStatus); } } catch (InterruptedException interruptedException) { reporter.setStatus(interruptedException.toString()); } finally { localFS.close(); } return new Long(totalSize); } } private static void writeTest(FileSystem fs) throws IOException { fs.delete(DATA_DIR); fs.delete(WRITE_DIR); runIOTest( WriteMapper.class, WRITE_DIR ); } private static void runIOTest( Class mapperClass, Path outputDir ) throws IOException { JobConf job = new JobConf( fsConfig, DFSCIOTest.class ); job.setInputPath(CONTROL_DIR); job.setInputFormat(SequenceFileInputFormat.class);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -