Package org.apache.drill.exec.proto.UserBitShared

Examples of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef


      }
      // remove vv access to buffers.
      vv.clear();
    }

    RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount).build();
    WritableBatch b = new WritableBatch(batchDef, buffers);
    return b;
  }
View Full Code Here


    try{
      if (batch == null) return IterOutcome.NONE;

      logger.debug("Next received batch {}", batch);

      RecordBatchDef rbd = batch.getHeader().getDef();
      boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
      batch.release();
      if(schemaChanged){
        this.schema = batchLoader.getSchema();
        return IterOutcome.OK_NEW_SCHEMA;
View Full Code Here

      }


//      logger.debug("Next received batch {}", batch);

      RecordBatchDef rbd = batch.getHeader().getDef();
      boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
      stats.addLongStat(Metric.BYTES_RECEIVED, batch.getByteCount());

      batch.release();
      if(schemaChanged) {
        this.schema = batchLoader.getSchema();
        stats.batchReceived(0, rbd.getRecordCount(), true);
        return IterOutcome.OK_NEW_SCHEMA;
      } else {
        stats.batchReceived(0, rbd.getRecordCount(), false);
        return IterOutcome.OK;
      }
    } catch(SchemaChangeException | IOException ex) {
      context.fail(ex);
      return IterOutcome.STOP;
View Full Code Here

    List<SerializedField> fields = Lists.newArrayList();
    for (MaterializedField field : schema) {
      fields.add(field.getAsBuilder().build());
    }
    RecordBatchDef def = RecordBatchDef.newBuilder().addAllField(fields).build();
    return new FragmentWritableBatch(true, queryId, sendMajorFragmentId, sendMinorFragmentId, receiveMajorFragmentId, receiveMinorFragmentId, def);
  }
View Full Code Here

      }
      // remove vv access to buffers.
      vv.clear();
    }

    RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount)
        .setCarriesTwoByteSelectionVector(isSV2).build();
    WritableBatch b = new WritableBatch(batchDef, buffers);
    return b;
  }
View Full Code Here

  public static QueryWritableBatch getEmptyBatchWithSchema(QueryId queryId, int rowCount, boolean isLastChunk, BatchSchema schema) {
    List<SerializedField> fields = Lists.newArrayList();
    for (MaterializedField field : schema) {
      fields.add(field.getAsBuilder().build());
    }
    RecordBatchDef def = RecordBatchDef.newBuilder().addAllField(fields).build();
    QueryResult header = QueryResult.newBuilder() //
            .setQueryId(queryId) //
            .setRowCount(rowCount) //
            .setDef(def) //
            .setIsLastChunk(isLastChunk) //
View Full Code Here

    client = new DrillClient(config, serviceSet.getCoordinator());
    client.setSupportComplexTypes(false);
    client.connect();
    results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
    loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
    RecordBatchDef def = results.get(0).getHeader().getDef();
    // the entire row is returned as a single column
    assertEquals(1, def.getFieldCount());
    // with setSupportComplexTypes == false, the column mode should be REQUIRED
    assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REQUIRED);
    loader.clear();
    for(QueryResultBatch result : results) {
      result.release();
    }
    client.close();

    client = new DrillClient(config, serviceSet.getCoordinator());
    client.setSupportComplexTypes(true);
    client.connect();
    results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
    loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
    def = results.get(0).getHeader().getDef();
    // the entire row is returned as a single column
    assertEquals(1, def.getFieldCount());
    // with setSupportComplexTypes == true, the column mode should be REPEATED
    assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REPEATED);
    loader.clear();
    for(QueryResultBatch result : results) {
      result.release();
    }
    client.close();
View Full Code Here

      }


//      logger.debug("Next received batch {}", batch);

      RecordBatchDef rbd = batch.getHeader().getDef();
      boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
      stats.addLongStat(Metric.BYTES_RECEIVED, batch.getByteCount());

      batch.release();
      if(schemaChanged) {
        this.schema = batchLoader.getSchema();
        stats.batchReceived(0, rbd.getRecordCount(), true);
        return IterOutcome.OK_NEW_SCHEMA;
      } else {
        stats.batchReceived(0, rbd.getRecordCount(), false);
        return IterOutcome.OK;
      }
    } catch(SchemaChangeException | IOException ex) {
      context.fail(ex);
      return IterOutcome.STOP;
View Full Code Here

  public static QueryWritableBatch getEmptyBatchWithSchema(QueryId queryId, int rowCount, boolean isLastChunk, BatchSchema schema) {
    List<SerializedField> fields = Lists.newArrayList();
    for (MaterializedField field : schema) {
      fields.add(field.getAsBuilder().build());
    }
    RecordBatchDef def = RecordBatchDef.newBuilder().addAllField(fields).build();
    QueryResult header = QueryResult.newBuilder() //
            .setQueryId(queryId) //
            .setRowCount(rowCount) //
            .setDef(def) //
            .setIsLastChunk(isLastChunk) //
View Full Code Here

      }
      // remove vv access to buffers.
      vv.clear();
    }

    RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount)
        .setCarriesTwoByteSelectionVector(isSV2).build();
    WritableBatch b = new WritableBatch(batchDef, buffers);
    return b;
  }
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.