Package org.apache.drill.exec.record

Examples of org.apache.drill.exec.record.RecordBatchLoader


          Files.toString(FileUtils.getResourceAsFile("/physical_json_scan_test1.json"), Charsets.UTF_8)
              .replace("#{TEST_FILE}", FileUtils.getResourceAsFile("/scan_json_test_1.json").toURI().toString())
      );

      // look at records
      RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());
      int recordCount = 0;

      //int expectedBatchCount = 2;

      //assertEquals(expectedBatchCount, results.size());

      for (int i = 0; i < results.size(); ++i) {
        QueryResultBatch batch = results.get(i);
        if (i == 0) {
          assertTrue(batch.hasData());
        } else {
          assertFalse(batch.hasData());
          return;
        }

        assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
        boolean firstColumn = true;

        // print headers.
        System.out.println("\n\n========NEW SCHEMA=========\n\n");
        for (VectorWrapper<?> v : batchLoader) {

          if (firstColumn) {
            firstColumn = false;
          } else {
            System.out.print("\t");
          }
          System.out.print(v.getField().getName());
          System.out.print("[");
          System.out.print(v.getField().getType().getMinorType());
          System.out.print("]");
        }

        System.out.println();


        for (int r = 0; r < batchLoader.getRecordCount(); r++) {
          boolean first = true;
          recordCount++;
          for (VectorWrapper<?> v : batchLoader) {
            if (first) {
              first = false;
View Full Code Here


      v.getMutator().generateTestData();
      v.getMutator().setValueCount(100);
    }

    WritableBatch writableBatch = WritableBatch.getBatchNoSV(100, vectors);
    RecordBatchLoader batchLoader = new RecordBatchLoader(allocator);
    ByteBuf[] byteBufs = writableBatch.getBuffers();
    int bytes = 0;
    for (int i = 0; i < byteBufs.length; i++) {
      bytes += byteBufs[i].writerIndex();
    }
    ByteBuf byteBuf = allocator.buffer(bytes);
    int index = 0;
    for (int i = 0; i < byteBufs.length; i++) {
      byteBufs[i].readBytes(byteBuf, index, byteBufs[i].writerIndex());
      index += byteBufs[i].writerIndex();
    }
    byteBuf.writerIndex(bytes);
    try {
      batchLoader.load(writableBatch.getDef(), byteBuf);
      boolean firstColumn = true;
      int recordCount = 0;
      for (VectorWrapper<?> v : batchLoader) {
        if (firstColumn) {
          firstColumn = false;
        } else {
          System.out.print("\t");
        }
        System.out.print(v.getField().getName());
        System.out.print("[");
        System.out.print(v.getField().getType().getMinorType());
        System.out.print("]");
      }

      System.out.println();
      for (int r = 0; r < batchLoader.getRecordCount(); r++) {
        boolean first = true;
        recordCount++;
        for (VectorWrapper<?> v : batchLoader) {
          if (first) {
            first = false;
View Full Code Here

    DrillConfig config = DrillConfig.create();

    try(Drillbit bit1 = new Drillbit(config, serviceSet); DrillClient client = new DrillClient(config, serviceSet.getCoordinator());){
      bit1.run();
      client.connect();
      RecordBatchLoader batchLoader = new RecordBatchLoader(bit1.getContext().getAllocator());
      ParquetResultListener resultListener = new ParquetResultListener(batchLoader, props);
      long C = System.nanoTime();
      if (readEntries != null){
        client.runQuery(UserProtos.QueryType.LOGICAL, (Files.toString(FileUtils.getResourceAsFile(plan), Charsets.UTF_8).replaceFirst( "&REPLACED_IN_PARQUET_TEST&", readEntries)), resultListener);
      }
View Full Code Here

      System.out.println(String.format("Took %f s to connect", (float)(C-B) / 1E9));
      System.out.println(String.format("Took %f s to run query", (float)(D-C) / 1E9));
      //List<QueryResultBatch> results = client.runQuery(UserProtos.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/parquet_scan_union_screen_physical.json"), Charsets.UTF_8));
      int count = 0;
//      RecordBatchLoader batchLoader = new RecordBatchLoader(new BootStrapContext(config).getAllocator());
      RecordBatchLoader batchLoader = new RecordBatchLoader(bit1.getContext().getAllocator());
      byte[] bytes;

      int batchCounter = 1;
      int columnValCounter = 0;
      int i = 0;
      FieldInfo currentField;
      HashMap<String, Integer> valuesChecked = new HashMap();
      for(QueryResultBatch b : results){

        count += b.getHeader().getRowCount();
        boolean schemaChanged = batchLoader.load(b.getHeader().getDef(), b.getData());

        int recordCount = 0;
        // print headers.
        if (schemaChanged) {
        } // do not believe any change is needed for when the schema changes, with the current mock scan use case

        for (VectorWrapper vw : batchLoader) {
          ValueVector vv = vw.getValueVector();
          currentField = props.fields.get(vv.getField().getName());
          if (VERBOSE_DEBUG){
            System.out.println("\n" + (String) currentField.name);
          }
          if ( ! valuesChecked.containsKey(vv.getField().getName())){
            valuesChecked.put(vv.getField().getName(), 0);
            columnValCounter = 0;
          } else {
            columnValCounter = valuesChecked.get(vv.getField().getName());
          }
          for (int j = 0; j < vv.getAccessor().getValueCount(); j++) {
            if (VERBOSE_DEBUG){
              System.out.print(vv.getAccessor().getObject(j) + ", " + (j % 25 == 0 ? "\n batch:" + batchCounter + " v:" + j + " - " : ""));
            }
            assertField(vv, j, currentField.type,
                currentField.values[columnValCounter % 3], currentField.name + "/");
            columnValCounter++;
          }
          if (VERBOSE_DEBUG){
            System.out.println("\n" + vv.getAccessor().getValueCount());
          }
          valuesChecked.remove(vv.getField().getName());
          valuesChecked.put(vv.getField().getName(), columnValCounter);
        }

        if (VERBOSE_DEBUG){
          for (i = 0; i < batchLoader.getRecordCount(); i++) {
            recordCount++;
            if (i % 50 == 0){
              System.out.println();
              for (VectorWrapper vw : batchLoader) {
                ValueVector v = vw.getValueVector();
View Full Code Here

  public BatchLoaderMap(List<String> requestedFields, BatchListener listener, DrillClient client) {
    this.listener = listener;
    this.requestedFields = requestedFields;
    this.objArr = new Object[requestedFields.size()];
    this.loader = new RecordBatchLoader(client.getAllocator());
    this.helper = new JsonHelper(client.getConfig());
  }
View Full Code Here

 
  public WireRecordBatch(FragmentContext context, RawFragmentBatchProvider fragProvider) {
    this.fragProvider = fragProvider;
    this.context = context;
    this.batchLoader = new RecordBatchLoader(context.getAllocator());
  }
View Full Code Here

  @Test
  public void testRead() throws Exception {
    List<QueryResultBatch> results = testSqlWithResults(
        String.format("SELECT count(*) FROM dfs_test.`default`.`%s`", dataFile.getPath()));

    RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());

    for(QueryResultBatch batch : results) {
      batchLoader.load(batch.getHeader().getDef(), batch.getData());

      if (batchLoader.getRecordCount() <= 0) {
        continue;
      }

      BigIntVector countV = (BigIntVector) batchLoader.getValueAccessorById(BigIntVector.class, 0).getValueVector();
      assertTrue("Total of "+ NUM_RECORDS + " records expected in count", countV.getAccessor().get(0) == NUM_RECORDS);

      batchLoader.clear();
      batch.release();
    }
  }
View Full Code Here

            bit.run();
            client.connect();
            List<QueryResultBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
                    Files.toString(FileUtils.getResourceAsFile("/functions/multi_input_add_test.json"), Charsets.UTF_8));

            RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());

            QueryResultBatch batch = results.get(0);
            assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));

            for (VectorWrapper<?> v : batchLoader) {

                ValueVector.Accessor accessor = v.getValueVector().getAccessor();

                assertTrue((accessor.getObject(0)).equals(10));
            }

            batchLoader.clear();
            for(QueryResultBatch b : results){
                b.release();
            }
        }
    }
View Full Code Here

            client.connect();
            List<QueryResultBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
                    Files.toString(FileUtils.getResourceAsFile("/record/vector/test_date.json"), Charsets.UTF_8)
                            .replace("#{TEST_FILE}", "/test_simple_date.json"));

            RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());

            QueryResultBatch batch = results.get(0);
            assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));

            for (VectorWrapper<?> v : batchLoader) {

                ValueVector.Accessor accessor = v.getValueVector().getAccessor();

                assertEquals((accessor.getObject(0).toString()), ("1970-01-02"));
                assertEquals((accessor.getObject(1).toString()), ("2008-12-28"));
                assertEquals((accessor.getObject(2).toString()), ("2000-02-27"));
            }

            batchLoader.clear();
            for(QueryResultBatch b : results){
              b.release();
            }
        }
    }
View Full Code Here

            client.connect();
            List<QueryResultBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
                    Files.toString(FileUtils.getResourceAsFile("/record/vector/test_sort_date.json"), Charsets.UTF_8)
                            .replace("#{TEST_FILE}", "/test_simple_date.json"));

            RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());

            QueryResultBatch batch = results.get(0);
            assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));

            for (VectorWrapper<?> v : batchLoader) {

                ValueVector.Accessor accessor = v.getValueVector().getAccessor();

                assertEquals((accessor.getObject(0).toString()), new String("1970-01-02"));
                assertEquals((accessor.getObject(1).toString()), new String("2000-02-27"));
                assertEquals((accessor.getObject(2).toString()), new String("2008-12-28"));
            }

            batchLoader.clear();
            for(QueryResultBatch b : results){
              b.release();
            }
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.record.RecordBatchLoader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.