Package org.apache.drill.exec.record

Examples of org.apache.drill.exec.record.ExpandableHyperContainer


    for (MaterializedField field : schema) {
      int[] ids = container.getValueVectorId(field.getPath()).getFieldIds();
      newContainer.add(container.getValueAccessorById(field.getValueClass(), ids).getValueVectors());
    }
    newContainer.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
    this.hyperBatch = new ExpandableHyperContainer(newContainer);
    this.batchCount = hyperBatch.iterator().next().getValueVectors().length;
    BufferAllocator.PreAllocator preAlloc = allocator.getNewPreAllocator();
    preAlloc.preAllocate(4 * (limit + 1));
    this.heapSv4 = new SelectionVector4(preAlloc.getAllocation(), limit, Character.MAX_VALUE);
    for (int i = 0; i < v4.getTotalCount(); i++) {
View Full Code Here


  @Override
  public void add(FragmentContext context, RecordBatchData batch) throws SchemaChangeException{
    Stopwatch watch = new Stopwatch();
    watch.start();
    if (hyperBatch == null) {
      hyperBatch = new ExpandableHyperContainer(batch.getContainer());
    } else {
      hyperBatch.addBatch(batch.getContainer());
    }

    doSetup(context, hyperBatch, null); // may not need to do this every time
View Full Code Here

                     * to the hyper vector container. Will be used when we want to retrieve
                     * records that have matching keys on the probe side.
                     */
                    RecordBatchData nextBatch = new RecordBatchData(right);
                    if (hyperContainer == null) {
                        hyperContainer = new ExpandableHyperContainer(nextBatch.getContainer());
                    } else {
                        hyperContainer.addBatch(nextBatch.getContainer());
                    }

                    // completed processing a batch, increment batch index
View Full Code Here

          countSincePurge += incoming.getRecordCount();
          batchCount++;
          RecordBatchData batch = new RecordBatchData(incoming);
          batch.canonicalize();
          if (priorityQueue == null) {
            priorityQueue = createNewPriorityQueue(context, config.getOrderings(), new ExpandableHyperContainer(batch.getContainer()), MAIN_MAPPING, LEFT_MAPPING, RIGHT_MAPPING);
          }
          priorityQueue.add(context, batch);
          if (countSincePurge > config.getLimit() && batchCount > batchPurgeThreshold) {
            purge();
            countSincePurge = 0;
View Full Code Here

    try {
      CodeGenerator<MergingReceiverGeneratorBase> cg = CodeGenerator.get(MergingReceiverGeneratorBase.TEMPLATE_DEFINITION, context.getFunctionRegistry());
      ClassGenerator<MergingReceiverGeneratorBase> g = cg.getRoot();

      ExpandableHyperContainer batch = null;
      boolean first = true;
      for (RecordBatchLoader loader : batchLoaders) {
        if (first) {
          batch = new ExpandableHyperContainer(loader);
          first = false;
        } else {
          batch.addBatch(loader);
        }
      }

      generateComparisons(g, batch);
View Full Code Here

          countSincePurge += incoming.getRecordCount();
          batchCount++;
          RecordBatchData batch = new RecordBatchData(incoming);
          batch.canonicalize();
          if (priorityQueue == null) {
            priorityQueue = createNewPriorityQueue(context, config.getOrderings(), new ExpandableHyperContainer(batch.getContainer()), MAIN_MAPPING, LEFT_MAPPING, RIGHT_MAPPING);
          }
          priorityQueue.add(context, batch);
          if (countSincePurge > config.getLimit() && batchCount > batchPurgeThreshold) {
            purge();
            countSincePurge = 0;
View Full Code Here

      for (MaterializedField field : rightSchema) {
        c.addOrGet(field);
      }
      c.buildSchema(SelectionVectorMode.NONE);
      c.setRecordCount(0);
      hyperContainer = new ExpandableHyperContainer(c);
      hjHelper.addNewBatch(0);
      buildBatchIndex++;
      setupHashTable();
      hashJoinProbe = setupHashJoinProbe();
      // Build the container schema and set the counts
View Full Code Here

                     * to the hyper vector container. Will be used when we want to retrieve
                     * records that have matching keys on the probe side.
                     */
                    RecordBatchData nextBatch = new RecordBatchData(right);
                    if (hyperContainer == null) {
                        hyperContainer = new ExpandableHyperContainer(nextBatch.getContainer());
                    } else {
                        hyperContainer.addBatch(nextBatch.getContainer());
                    }

                    // completed processing a batch, increment batch index
View Full Code Here

    for (MaterializedField field : schema) {
      int[] ids = container.getValueVectorId(field.getPath()).getFieldIds();
      newContainer.add(container.getValueAccessorById(field.getValueClass(), ids).getValueVectors());
    }
    newContainer.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);
    this.hyperBatch = new ExpandableHyperContainer(newContainer);
    this.batchCount = hyperBatch.iterator().next().getValueVectors().length;
    BufferAllocator.PreAllocator preAlloc = allocator.getNewPreAllocator();
    preAlloc.preAllocate(4 * (limit + 1));
    this.heapSv4 = new SelectionVector4(preAlloc.getAllocation(), limit, Character.MAX_VALUE);
    for (int i = 0; i < v4.getTotalCount(); i++) {
View Full Code Here

  @Override
  public void add(FragmentContext context, RecordBatchData batch) throws SchemaChangeException{
    Stopwatch watch = new Stopwatch();
    watch.start();
    if (hyperBatch == null) {
      hyperBatch = new ExpandableHyperContainer(batch.getContainer());
    } else {
      hyperBatch.addBatch(batch.getContainer());
    }

    doSetup(context, hyperBatch, null); // may not need to do this every time
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.record.ExpandableHyperContainer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.