📄 seedreader.java
字号:
wfa[i].scanForAmps();
wfa[i].scanForBias();
}
}
return wfList;
}
/**
* Read a Local or NFS mounted file containing Seed format as described in a
* waveform object. Populates the Waveform's segVector vector with WFSegments
* from the file. Returns the number of WFSegments in the vector. */
// NOTE: we pass the FileNotFoundException upward to the caller
public static int getDataLocal (Waveform waveform)
// throws FileNotFoundException
{
wf = waveform;
// not sure this is always true
wf.setAmpUnits(Units.COUNTS);
// build file name. Path comes from 'algorithm'
String file = wf.getPathFilename();
int traceoff = wf.getFileOffset();
int nbytes = wf.getByteCount();
FileInputStream seedStream;
// open the Seed file
try {
seedStream = new FileInputStream(file);
} catch (FileNotFoundException exc) {
System.out.println ("File not found: " + file);
return 0;
}
catch (IOException exc)
{
System.err.println ("IO error: " + exc.toString());
return 0;
}
BufferedInputStream inbuff =
new BufferedInputStream (seedStream, READBUFFERSIZE);
int buffOffset = 0;
int dataSize = 0;
int totalBytes = 0;
// skip the proper byte offset in the file
if (traceoff > 0) {
try
{
long bytesSkippped = inbuff.skip(traceoff);
// catch non-exception problems
if (bytesSkippped != traceoff) {
System.err.println ("IO error: could only skip "+bytesSkippped+
" wanted to skip " + traceoff);
return 0;
}
}
catch (IOException exc)
{
System.err.println ("IO error skipping to data offset: " +
exc.toString());
return 0;
}
}
// file read loop; while data is there and we haven't yet gotten all the bytes
try {
while (inbuff.available() > 0 && totalBytes < nbytes) {
buffOffset = 0;
/*
* NOTE: Seed packets can be of various sizes, although the size must be a power of 2.
* You don't know what the size is until you read the header of the packet.
*/
// <1> read the header
totalBytes +=
inbuff.read(buff, buffOffset, FrameSize); // read one Seed header
wfseg = SeedReader.createWFSegment(buff);
header = SeedReader.getHeader();
// System.out.println ("Parsed SEED header: " +
// headerType+"\n"+ wfseg.dumpToString() );
// <2> read the data part now that we know how big it is (=blockSize)
// System.out.println ("seedBlockSize = "+ seedBlockSize); // debug
dataSize = header.seedBlockSize - FrameSize;
buffOffset = FrameSize; // start storing bytes in buffer at # 64
totalBytes +=
inbuff.read(buff, buffOffset, dataSize); // append Seed data frames
if (header.isData()) // a data "record"
{
// decompress the data into the WFSegment
wfseg.setTimeSeries( decompress (header, buff) );
// System.out.println (" Samples count = " + wfseg.sample.length);
wf.getSegmentList().add(wfseg);
} else {
// noop
}
} // end of while loop
inbuff.close();
seedStream.close();
}
catch (IOException exc)
{
System.err.println ("IO error: " + exc.toString());
exc.printStackTrace();
}
catch (Exception exc)
{
System.err.println ("General exception: " + exc.toString());
exc.printStackTrace();
}
return wf.getSegmentList().size();
}
/**
* Read a remote file via FTP containing Seed format as described in a
* waveform object. Populates the Waveform's segVector vector with WFSegments
* from the file. Returns the number of WFSegments in the vector.
*/
/*
public static int getDataRemote (Waveform wf) {
// not sure this is always true
wf.setAmpUnits(Units.COUNTS);
// get the remote host name from the DataSource
//String remoteHost = DataSource.getHostName();
//String remoteHost = DataSource.getIPAddress();
String remoteHost = DataSource.getIPAddress();
// catch FTP exceptions
try {
if (remoteReader == null) remoteReader = new RemoteFileReader(remoteHost);
// use the FTP reader that was created at instantiation time
// byte[] bytes = remoteReader.getBytes (wf);
byte[] bytes = remoteReader.getBytes(wf.getPathFilename(),
wf.getFileOffset(),
wf.getByteCount());
// check for no data
if (bytes.length == 0) return 0;
// System.out.println ("SeedReader got byte[] of size: "+bytes.length);
// create a stream for reading
ByteArrayInputStream inbuff = new ByteArrayInputStream (bytes);
int buffOffset = 0;
int dataSize = 0;
// file read loop =============================
while (inbuff.available() > 0) {
buffOffset = 0;
// <1> read the header
// NOTE: Seed packets can be of various sizes, although the size must be a
// power of 2. You don't know what the size is until you read the header
// of the packet.
inbuff.read(buff, buffOffset, FrameSize); // read one Seed header
wfseg = parseSeedHeader (buff);
// <2> read the data part now that we know how big it is (=blockSize)
dataSize = seedBlockSize - FrameSize;
buffOffset = FrameSize; // start storing bytes in buffer at # 64
inbuff.read(buff, buffOffset, dataSize); // append Seed data frames
if ( headerType.equalsIgnoreCase("D") ) // a data "record"
{
// decompress the data into the WFSegment
wfseg.setTimeSeries( decompress (wfseg.samplesExpected, buff) );
wf.getSegmentList().add(wfseg);
} else if (headerType.equalsIgnoreCase("V") ) // skip non-data blocks
{
// noop
}
} // end of while loop
inbuff.close();
}
catch (IOException exc)
{
System.out.println ("IO error: " + exc.toString());
}
catch (Exception exc)
{
System.out.println ("General exception: " + exc.toString());
}
return wf.getSegmentList().size();
}
*/
/** Get the timeseries for this waveform from the datasource. */
public static int getDataFromDataSource(Waveform waveform) {
if (dbBlobReader == null) dbBlobReader = new DbBlobReader();
return dbBlobReader.getDataFromDataSource(waveform,
waveform.getStart().doubleValue(),waveform.getEnd().doubleValue());
}
/** Get the timeseries for this waveform and this start/stop time from the datasource. */
public static int getDataFromDataSource(Waveform waveform, double startTime, double endTime) {
if (endTime < startTime) throw new IllegalArgumentException("input endTime < startTime" );
if (dbBlobReader == null) dbBlobReader = new DbBlobReader();
return dbBlobReader.getDataFromDataSource(waveform, startTime, endTime);
}
/**
* Create a WFsegment using the information in this SEED header.
* This method will not populate the data part of the WFsegment.
* You must call decompress() to interpret and load timeseries.
*/
public static WFSegment createWFSegment (SeedHeader h) {
return h.createWFsegment();
}
/*
Channel ch = Channel.create();
ch.setChannelName(h.chan);
WFSegment wfseg = new WFSegment (ch);
wfseg.setStart(h.datetime);
wfseg.samplesExpected = h.sampleCount;
wfseg.setSampleInterval(1.0/h.samplesPerSecond);
// object exists, assume a default format and zero length pathname
if (wf == null) { // no valid Waveform
//wfseg.fmt = STEIM1;
wfseg.fmt = Waveform.SEED_FORMAT;
wfseg.filename = "";
}
else { // use Waveform
// wfseg.fmt = h.encodingFormat;
wfseg.fmt = wf.format.intValue(); // value from dbase
wfseg.filename = wf.getPathFilename();
}
return wfseg;
}
*/
/*
READ QUANTERRA SEED "RECORDS" (PACKETS)
Seed records are FIXED size and are always a power of 2; usually
from 512 or 4096 bytes.
SEED "records" are made up of 64 byte "frames".
There is ALWAYS a 48 byte fixed data header. The header may be
followed by any number of "blockettes" which are addition data
and information about the time series in the record. Any extra space
in the header contains garbage.
If there are enough blockettes they may extend into the next frame or frames.
Therefore, data may begin in the 2nd, 3rd or Nth frame. A non-data
record may consist of ONLY a header and some blockettes and garbage in
the time-series frames.
512 byte fixed length records are divided into 8 64-byte "frames"
The first frame contains the SEED data header and data blockettes.
For non-time-series packets the remaining 7 frames contain more
blockettes (usually 1 or 2 total) followed by garbage.
For time-series packets the remaining 7 frames contain compressed data.
c The FIXED DATA HEADER contains how many blockettes follow and each
c blockette contains the byte offset to the next blocket.
c
c SEED "D" RECORD STRUCTURE (have Time-series "1001" Blockettes)
c
c Frame#
c +-------------------------------------------------------+
c 1 | Fixed data header | 1000 blockette | 1001 blockette |
c | (48 bytes) | (8 bytes) | (8 bytes) |
c +-------------------------------------------------------+
c 2 | Data Frame |
c +-------------------------------------------------------+
c 3 | Data Frame |
c +-------------------------------------------------------+
c .
c .
c .
c +-------------------------------------------------------+
c 7 | Data Frame |
c +-------------------------------------------------------+
c
c
c SEED "other" RECORD STRUCTURE (All other Blockettes)
c
c Frame#
c +-------------------------------------------------------+
c 1 | Fixed data header | Blockette | Blockette... |
c | (48 bytes) | (8-32 bytes) | (8-32 bytes) |
c +-------------------------------------------------------+
c 2 | ... more blockette(s) |
c +-------------------------------------------------------+
c 3 | Garbage |
c +-------------------------------------------------------+
c .
c .
c .
c +-------------------------------------------------------+
c 7 | Garbage |
c +-------------------------------------------------------+
c
c Data Blockette structure:
c
c FIELD TYPE LENGTH (bytes)
c Blockette type Byte 2 (interpret as int)
c Byte of next blockette Byte 2 (interpret as int)
c Data fields depends on blockette type
c .
c .
c .
c
c Known Blockette types:
c
c NUMBER TYPE
c 100 sample rate
c 200 generic event detection
c 201 Murdoch-Hutt event detection
c 202 LOG-Z event detection
C 300 step calibration
C 310 sine calibration
C 320 pseudo-random calibration
C 390 generic calibration
C 395 calibration abort
C 400 Beam (no used by Quanterra)
C 405 Beam delay (no used by Quanterra)
c 500 time stamp
c 1000 Data format description
c 1001 Data (time-series)
*/
/**
*
* Return a SeedHeader intepreted from this byte array.
* Just a passthru method that calls SeedHeader.parseSeedHeader(buff).
*/
public static SeedHeader parseSeedHeader (byte[] buff) {
return SeedHeader.parseSeedHeader(buff);
}
/**
* Create a WFsegment using the information in this SEED header.
* This method will not populate the data part of the WFsegment.
* You must call decompress() to interpret and load timeseries.
* Returns null if buffer did not contain a readable SEED header.
*/
public static WFSegment createWFSegment (byte[] buffer) {
header = SeedHeader.parseSeedHeader(buffer);
if (header == null) return null;
return header.createWFsegment();
}
/** Get the SEED header object for the last byte buffer processed.
* Will be null if nothing processed or the data type is not "D" */
public static SeedHeader getHeader() {
return header;
}
/**
* Decode a 10-byte Seed BTIME and convert to "UNIX" epoch time
INTEGER*2 YR
INTEGER*2 JDAY
BYTE HOUR
BYTE MIN
BYTE SEC
BYTE %FILL
INTEGER*2 MSEC ! ms * 10
*/
public static double seedTime (byte[] buff, int idx)
{
int yr = Bits.byteToInt2 ( buff, idx+0);
int jday = Bits.byteToInt2 ( buff, idx+2);
int hr = buff[idx+4];
int mn = buff[idx+5];
int sec = buff[idx+6];
int frac = Bits.byteToInt2 ( buff, idx+8);
// DateTime class keeps precision to nanoseconds (0.000001),
// native Java time only good to milliseconds (0.001)
double fsec = (double) sec + (double)frac/10000.0;
DateTime datetime = new DateTime (yr, 0, jday, hr, mn, fsec);
double dt0 = datetime.getEpochSeconds();
return dt0;
}
/**
* Decompress one SEED packet (Only handles Steim1 compression at this time)
* Assumes you've already parsed the header because
*/
/* static float[] decompress (SeedHeader header, byte[] buff) {
return decompress (header.dataOffset, header.sampleCount,
header.framesInRecord, buff);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -