📄 shapefilereader.java
字号:
// ensure the proper position, regardless of read or handler behavior
buffer.position(this.toBufferOffset(record.end));
// no more data left
if (buffer.remaining() < 8)
return false;
// looks good
boolean hasNext = true;
if (checkRecno) {
// record headers in big endian
buffer.order(ByteOrder.BIG_ENDIAN);
hasNext = buffer.getInt() == record.number + 1;
}
// reset things to as they were
buffer.position(position);
return hasNext;
}
/**
* Transfer (by bytes) the data at the current record to the
* ShapefileWriter.
*
* @param bounds
* double array of length four for transfering the bounds
* into
* @return The length of the record transfered in bytes
*/
public int transferTo(ShapefileWriter writer, int recordNum, double[] bounds)
throws IOException {
buffer.position(this.toBufferOffset(record.end));
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.getInt(); // record number
int rl = buffer.getInt();
int mark = buffer.position();
int len = rl * 2;
buffer.order(ByteOrder.LITTLE_ENDIAN);
ShapeType recordType = ShapeType.forID(buffer.getInt());
if (recordType.isMultiPoint()) {
for (int i = 0; i < 4; i++) {
bounds[i] = buffer.getDouble();
}
} else if (recordType != ShapeType.NULL) {
bounds[0] = bounds[1] = buffer.getDouble();
bounds[2] = bounds[3] = buffer.getDouble();
}
// write header to shp and shx
headerTransfer.position(0);
headerTransfer.putInt(recordNum).putInt(rl).position(0);
writer.shpChannel.write(headerTransfer);
headerTransfer.putInt(0, writer.offset).position(0);
writer.offset += rl + 4;
writer.shxChannel.write(headerTransfer);
// reset to mark and limit at end of record, then write
buffer.position(mark).limit(mark + len);
writer.shpChannel.write(buffer);
buffer.limit(buffer.capacity());
record.end = this.toFileOffset(buffer.position());
record.number++;
return len;
}
/**
* Fetch the next record information.
*
* @throws IOException
* @return The record instance associated with this reader.
*/
public Record nextRecord() throws IOException {
// need to update position
buffer.position(this.toBufferOffset(record.end));
// record header is big endian
buffer.order(ByteOrder.BIG_ENDIAN);
// read shape record header
int recordNumber = buffer.getInt();
// silly ESRI say contentLength is in 2-byte words
// and ByteByffer uses bytes.
// track the record location
int recordLength = buffer.getInt() * 2;
if (!buffer.isReadOnly() && !useMemoryMappedBuffer) {
// capacity is less than required for the record
// copy the old into the newly allocated
if (buffer.capacity() < recordLength + 8) {
this.currentOffset += buffer.position();
ByteBuffer old = buffer;
// ensure enough capacity for one more record header
buffer = ensureCapacity(buffer, recordLength + 8,
useMemoryMappedBuffer);
buffer.put(old);
NIOUtilities.clean(old);
fill(buffer, channel);
buffer.position(0);
} else
// remaining is less than record length
// compact the remaining data and read again,
// allowing enough room for one more record header
if (buffer.remaining() < recordLength + 8) {
this.currentOffset += buffer.position();
buffer.compact();
fill(buffer, channel);
buffer.position(0);
}
}
// shape record is all little endian
buffer.order(ByteOrder.LITTLE_ENDIAN);
// read the type, handlers don't need it
ShapeType recordType = ShapeType.forID(buffer.getInt());
// this usually happens if the handler logic is bunk,
// but bad files could exist as well...
if (recordType != ShapeType.NULL && recordType != fileShapeType) {
throw new IllegalStateException("ShapeType changed illegally from "
+ fileShapeType + " to " + recordType);
}
// peek at bounds, then reset for handler
// many handler's may ignore bounds reading, but we don't want to
// second guess them...
buffer.mark();
if (recordType.isMultiPoint()) {
record.minX = buffer.getDouble();
record.minY = buffer.getDouble();
record.maxX = buffer.getDouble();
record.maxY = buffer.getDouble();
} else if (recordType != ShapeType.NULL) {
record.minX = record.maxX = buffer.getDouble();
record.minY = record.maxY = buffer.getDouble();
}
buffer.reset();
record.offset = record.end;
// update all the record info.
record.length = recordLength;
record.type = recordType;
record.number = recordNumber;
// remember, we read one int already...
record.end = this.toFileOffset(buffer.position()) + recordLength - 4;
// mark this position for the reader
record.start = buffer.position();
// clear any cached shape
record.shape = null;
return record;
}
/**
* Needs better data, what is the requirements for offset?
*
* @param offset
* @throws IOException
* @throws UnsupportedOperationException
*/
public void goTo(int offset) throws IOException,
UnsupportedOperationException {
if (randomAccessEnabled) {
if (this.useMemoryMappedBuffer) {
buffer.position(offset);
} else {
/*
* Check to see if requested offset is already loaded; ensure
* that record header is in the buffer
*/
if (this.currentOffset <= offset
&& this.currentOffset + buffer.limit() >= offset + 8) {
buffer.position(this.toBufferOffset(offset));
} else {
FileChannel fc = (FileChannel) this.channel;
fc.position(offset);
this.currentOffset = offset;
buffer.position(0);
fill(buffer, fc);
buffer.position(0);
}
}
int oldRecordOffset = record.end;
record.end = offset;
try {
hasNext(false); // don't check for next logical record equality
} catch (IOException ioe) {
record.end = oldRecordOffset;
throw ioe;
}
} else {
throw new UnsupportedOperationException("Random Access not enabled");
}
}
/**
* TODO needs better java docs!!! What is offset?
*
* @param offset
* @throws IOException
* @throws UnsupportedOperationException
*/
public Object shapeAt(int offset) throws IOException,
UnsupportedOperationException {
if (randomAccessEnabled) {
this.goTo(offset);
return nextRecord().shape();
}
throw new UnsupportedOperationException("Random Access not enabled");
}
/**
* Sets the current location of the byteStream to offset and returns the
* next record. Usually used in conjuctions with the shx file or some other
* index file.
*
* @param offset
* If using an shx file the offset would be: 2 *
* (index.getOffset(i))
* @return The record after the offset location in the bytestream
* @throws IOException
* thrown in a read error occurs
* @throws UnsupportedOperationException
* thrown if not a random access file
*/
public Record recordAt(int offset) throws IOException,
UnsupportedOperationException {
if (randomAccessEnabled) {
this.goTo(offset);
return nextRecord();
}
throw new UnsupportedOperationException("Random Access not enabled");
}
/**
* Converts file offset to buffer offset
*
* @param offset
* The offset relative to the whole file
* @return The offset relative to the current loaded portion of the file
*/
private int toBufferOffset(int offset) {
return (int) (offset - this.currentOffset);
}
/**
* Converts buffer offset to file offset
*
* @param offset
* The offset relative to the buffer
* @return The offset relative to the whole file
*/
private int toFileOffset(int offset) {
return (int) (this.currentOffset + offset);
}
/**
* Parses the shpfile counting the records.
*
* @return the number of non-null records in the shapefile
*/
public int getCount(int count) throws DataSourceException {
try {
if (channel == null)
return -1;
count = 0;
long offset = this.currentOffset;
try {
goTo(100);
} catch (UnsupportedOperationException e) {
return -1;
}
while (hasNext()) {
count++;
nextRecord();
}
goTo((int) offset);
} catch (IOException ioe) {
count = -1;
// What now? This seems arbitrarily appropriate !
throw new DataSourceException("Problem reading shapefile record",
ioe);
}
return count;
}
/**
* @param handler
* The handler to set.
*/
public void setHandler(ShapeHandler handler) {
this.handler = handler;
}
public String id() {
return getClass().getName();
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -