Examples of BatchSchema


Examples of org.apache.drill.exec.record.BatchSchema

    String validationSelection = "L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, " +
        "L_RETURNFLAG, L_LINESTATUS, L_SHIPDATE,COMMITDATE ,RECEIPTDATE, L_SHIPINSTRUCT, L_SHIPMODE, L_COMMENT";
    String inputTable = "cp.`tpch/lineitem.parquet`";
    String query = String.format("SELECT %s FROM %s", selection, inputTable);
    List<QueryResultBatch> expected = testSqlWithResults(query);
    BatchSchema schema = null;
    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    List<Map> expectedRecords = new ArrayList<>();
    // read the data out of the results, the error manifested itself upon call of getObject on the vectors as they had contained deadbufs
    addToMaterializedResults(expectedRecords, expected, loader, schema);
    for (QueryResultBatch result : expected) {
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

  public void compareResults(List<QueryResultBatch> expected, List<QueryResultBatch> result) throws Exception {
    List<Map> expectedRecords = new ArrayList<>();
    List<Map> actualRecords = new ArrayList<>();

    BatchSchema schema = null;
    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    addToMaterializedResults(expectedRecords, expected, loader, schema);
    addToMaterializedResults(actualRecords, result, loader, schema);
    Assert.assertEquals("Different number of records returned", expectedRecords.size(), actualRecords.size());
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

    String validationSelection = "L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, " +
        "L_RETURNFLAG, L_LINESTATUS, L_SHIPDATE,COMMITDATE ,RECEIPTDATE, L_SHIPINSTRUCT, L_SHIPMODE, L_COMMENT";
    String inputTable = "cp.`tpch/lineitem.parquet`";
    String query = String.format("SELECT %s FROM %s", selection, inputTable);
    List<QueryResultBatch> expected = testSqlWithResults(query);
    BatchSchema schema = null;
    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    List<Map> expectedRecords = new ArrayList<>();
    // read the data out of the results, the error manifested itself upon call of getObject on the vectors as they had contained deadbufs
    addToMaterializedResults(expectedRecords, expected, loader, schema);
    for (QueryResultBatch result : expected) {
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

  public void compareParquetReaders(String selection, String table) throws Exception {
    test("alter system set `store.parquet.use_new_reader` = true");
    List<QueryResultBatch> expected = testSqlWithResults("select " + selection + " from " + table);

    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    BatchSchema schema = null;

    List<Map> expectedRecords = new ArrayList<>();
    addToMaterializedResults(expectedRecords, expected, loader, schema);

    test("alter system set `store.parquet.use_new_reader` = false");
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

  public void compareParquetReadersColumnar(String selection, String table) throws Exception {
    test("alter system set `store.parquet.use_new_reader` = true");
    List<QueryResultBatch> expected = testSqlWithResults("select " + selection + " from " + table);

    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    BatchSchema schema = null;

    Map<String, List> expectedSuperVectors = addToCombinedVectorResults(expected, loader, schema);

    test("alter system set `store.parquet.use_new_reader` = false");
    List<QueryResultBatch> results = testSqlWithResults("select " + selection + " from " + table);
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

    }
  }

  public void compareParquetReadersHyperVector(String selection, String table) throws Exception {
    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    BatchSchema schema = null;

    // TODO - It didn't seem to respect the max width per node setting, so I went in and modified the SimpleParalellizer directly.
    // I backed out the changes after the test passed.
//    test("alter system set `planner.width.max_per_node` = 1");
    test("alter system set `store.parquet.use_new_reader` = false");
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

    String create = "CREATE TABLE " + outputFile + " AS " + query;
    String validateQuery = String.format("SELECT %s FROM " + outputFile, validationSelection);
    test(create);

    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    BatchSchema schema = null;

    List<QueryResultBatch> expected = testSqlWithResults(query);
    List<Map> expectedRecords = new ArrayList<>();
    addToMaterializedResults(expectedRecords, expected, loader, schema);
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

   * @throws Exception
   */
  protected void doQuery(FileInputStream input) throws Exception{
    int  batchNum = 0;
    int  emptyBatchNum = 0;
    BatchSchema prevSchema = null;
    List<Integer> schemaChangeIdx = Lists.newArrayList();

    BatchMetaInfo aggBatchMetaInfo = new BatchMetaInfo();

    while (input.available() > 0) {
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

    container.clear();
    if (queuedRightBatches.size() > Character.MAX_VALUE) {
      throw new SchemaChangeException("Join cannot work on more than %d batches at a time.", (int) Character.MAX_VALUE);
    }
    status.sv4 = new SelectionVector4(svAllocator.getAllocation(), recordCount, Character.MAX_VALUE);
    BatchSchema schema = queuedRightBatches.keySet().iterator().next();
    List<RecordBatchData> data = queuedRightBatches.get(schema);

    // now we're going to generate the sv4 pointers
    switch (schema.getSelectionVectorMode()) {
      case NONE: {
        int index = 0;
        int recordBatchId = 0;
        for (RecordBatchData d : data) {
          for (int i =0; i < d.getRecordCount(); i++, index++) {
View Full Code Here

Examples of org.apache.drill.exec.record.BatchSchema

  private RecordBatch batch;

  public VectorRecordMaterializer(FragmentContext context, RecordBatch batch) {
    this.queryId = context.getHandle().getQueryId();
    this.batch = batch;
    BatchSchema schema = batch.getSchema();
    assert schema != null : "Schema must be defined.";

//    for (MaterializedField f : batch.getSchema()) {
//      logger.debug("New Field: {}", f);
//    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.