if (redoRecord != null) {
// Try writing the record that didn't fit into the last RecordBatch
Object deSerializedValue = serde.deserialize((Writable) redoRecord);
boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
if (!status) {
throw new DrillRuntimeException("Current record is too big to fit into allocated ValueVector buffer");
}
redoRecord = null;
recordCount++;
}
while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
Object deSerializedValue = serde.deserialize((Writable) value);
boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
if (!status) {
redoRecord = value;
setValueCountAndPopulatePartitionVectors(recordCount);
return recordCount;
}
recordCount++;
}
setValueCountAndPopulatePartitionVectors(recordCount);
return recordCount;
} catch (IOException | SerDeException e) {
throw new DrillRuntimeException(e);
}
}