Package org.apache.drill.exec.record

Examples of org.apache.drill.exec.record.RawFragmentBatch


  }

  private RawFragmentBatch getNext(RawFragmentBatchProvider provider) throws IOException{
    stats.startWait();
    try {
      RawFragmentBatch b = provider.getNext();
      if (b != null) {
        stats.addLongStat(Metric.BYTES_RECEIVED, b.getByteCount());
        stats.batchReceived(0, b.getHeader().getDef().getRecordCount(), false);
      }
      return b;
    } finally {
      stats.stopWait();
    }
View Full Code Here


      // set up each (non-empty) incoming record batch
      List<RawFragmentBatch> rawBatches = Lists.newArrayList();
      boolean firstBatch = true;
      for (RawFragmentBatchProvider provider : fragProviders) {
        RawFragmentBatch rawBatch = null;
        try {
          rawBatch = getNext(provider);
          if (rawBatch == null && context.isCancelled()) {
            return IterOutcome.STOP;
          }
        } catch (IOException e) {
          context.fail(e);
          return IterOutcome.STOP;
        }
        if (rawBatch.getHeader().getDef().getRecordCount() != 0) {
          rawBatches.add(rawBatch);
        } else {
          if (emptyBatch == null) {
            emptyBatch = rawBatch;
          }
          try {
            while ((rawBatch = getNext(provider)) != null && rawBatch.getHeader().getDef().getRecordCount() == 0) {
              ;
            }
            if (rawBatch == null && context.isCancelled()) {
              return IterOutcome.STOP;
            }
          } catch (IOException e) {
            context.fail(e);
            return IterOutcome.STOP;
          }
          if (rawBatch != null) {
            rawBatches.add(rawBatch);
          } else {
            rawBatches.add(emptyBatch);
          }
        }
      }

      // allocate the incoming record batch loaders
      senderCount = rawBatches.size();
      incomingBatches = new RawFragmentBatch[senderCount];
      batchOffsets = new int[senderCount];
      batchLoaders = new RecordBatchLoader[senderCount];
      for (int i = 0; i < senderCount; ++i) {
        incomingBatches[i] = rawBatches.get(i);
        batchLoaders[i] = new RecordBatchLoader(oContext.getAllocator());
      }

      int i = 0;
      for (RawFragmentBatch batch : incomingBatches) {
        // initialize the incoming batchLoaders
        UserBitShared.RecordBatchDef rbd = batch.getHeader().getDef();
        try {
          batchLoaders[i].load(rbd, batch.getBody());
        } catch(SchemaChangeException e) {
          logger.error("MergingReceiver failed to load record batch from remote host.  {}", e);
          context.fail(e);
          return IterOutcome.STOP;
        }
        batch.release();
        ++batchOffsets[i];
        ++i;
      }

      // Canonicalize each incoming batch, so that vectors are alphabetically sorted based on SchemaPath.
      for (RecordBatchLoader loader : batchLoaders) {
        loader.canonicalize();
      }

      // Ensure all the incoming batches have the identical schema.
      if (!isSameSchemaAmongBatches(batchLoaders)) {
        logger.error("Incoming batches for merging receiver have diffferent schemas!");
        context.fail(new SchemaChangeException("Incoming batches for merging receiver have diffferent schemas!"));
        return IterOutcome.STOP;
      }

      // create the outgoing schema and vector container, and allocate the initial batch
      SchemaBuilder bldr = BatchSchema.newBuilder().setSelectionVectorMode(BatchSchema.SelectionVectorMode.NONE);
      int vectorCount = 0;
      for (VectorWrapper<?> v : batchLoaders[0]) {

        // add field to the output schema
        bldr.addField(v.getField());

        // allocate a new value vector
        ValueVector outgoingVector = outgoingContainer.addOrGet(v.getField());
        outgoingVector.allocateNew();
        ++vectorCount;
      }


      schema = bldr.build();
      if (schema != null && !schema.equals(schema)) {
        // TODO: handle case where one or more batches implicitly indicate schema change
        logger.debug("Initial state has incoming batches with different schemas");
      }
      outgoingContainer.buildSchema(BatchSchema.SelectionVectorMode.NONE);

      // generate code for merge operations (copy and compare)
      try {
        merger = createMerger();
      } catch (SchemaChangeException e) {
        logger.error("Failed to generate code for MergingReceiver.  {}", e);
        context.fail(e);
        return IterOutcome.STOP;
      }

      // allocate the priority queue with the generated comparator
      this.pqueue = new PriorityQueue<Node>(fragProviders.length, new Comparator<Node>() {
        public int compare(Node node1, Node node2) {
          int leftIndex = (node1.batchId << 16) + node1.valueIndex;
          int rightIndex = (node2.batchId << 16) + node2.valueIndex;
          return merger.doEval(leftIndex, rightIndex);
        }
      });

      // populate the priority queue with initial values
      for (int b = 0; b < senderCount; ++b) {
        while (batchLoaders[b] != null && batchLoaders[b].getRecordCount() == 0) {
          try {
            RawFragmentBatch batch = getNext(fragProviders[b]);
            incomingBatches[b] = batch;
            if (batch != null) {
              batchLoaders[b].load(batch.getHeader().getDef(), batch.getBody());
            } else {
              batchLoaders[b].clear();
              batchLoaders[b] = null;
              if (context.isCancelled()) {
                return IterOutcome.STOP;
              }
            }
          } catch (IOException | SchemaChangeException e) {
            context.fail(e);
            return IterOutcome.STOP;
          }
        }
        if (batchLoaders[b] != null) {
          pqueue.add(new Node(b, 0));
        }
      }

      hasRun = true;
      // finished lazy initialization
    }

    while (!pqueue.isEmpty()) {
      // pop next value from pq and copy to outgoing batch
      Node node = pqueue.peek();
      if (!copyRecordToOutgoingBatch(node)) {
        logger.debug("Outgoing vectors space is full; breaking");
        prevBatchWasFull = true;
        break;
      }
      pqueue.poll();

//      if (isOutgoingFull()) {
//        // set a flag so that we reallocate on the next iteration
//        logger.debug("Outgoing vectors record batch size reached; breaking");
//        prevBatchWasFull = true;
//      }

      if (node.valueIndex == batchLoaders[node.batchId].getRecordCount() - 1) {
        // reached the end of an incoming record batch
        RawFragmentBatch nextBatch = null;
        try {
          nextBatch = getNext(fragProviders[node.batchId]);

          while (nextBatch != null && nextBatch.getHeader().getDef().getRecordCount() == 0) {
            nextBatch = getNext(fragProviders[node.batchId]);
          }
          if (nextBatch == null && context.isCancelled()) {
            return IterOutcome.STOP;
          }
View Full Code Here

  @Override
  public IterOutcome buildSchema() throws SchemaChangeException {
    stats.startProcessing();
    try {
      RawFragmentBatch batch = getNext(fragProviders[0]);
      for (SerializedField field : batch.getHeader().getDef().getFieldList()) {
        outgoingContainer.addOrGet(MaterializedField.create(field));
      }
    } catch (IOException e) {
      throw new SchemaChangeException(e);
    } finally {
View Full Code Here

      Stopwatch watch = new Stopwatch();
      watch.start();
      BitData.FragmentRecordBatch header = BitData.FragmentRecordBatch.parseDelimitedFrom(stream);
      DrillBuf buf = allocator.buffer(bodyLength);
      buf.writeBytes(stream, bodyLength);
      batch = new RawFragmentBatch(null, header, buf, null);
      buf.release();
      available = true;
      latch.countDown();
      long t = watch.elapsed(TimeUnit.MICROSECONDS);
      logger.debug("Took {} us to read {} from disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
View Full Code Here

    if (finished) {
      throw new RuntimeException("Attempted to enqueue batch after finished");
    }
    if (batch.getHeader().getIsOutOfMemory()) {
      logger.debug("Setting autoread false");
      RawFragmentBatch firstBatch = buffer.peekFirst();
      FragmentRecordBatch header = firstBatch == null ? null :firstBatch.getHeader();
      if (!outOfMemory.get() && !(header == null) && header.getIsOutOfMemory()) {
        buffer.addFirst(batch);
      }
      outOfMemory.set(true);
      return;
View Full Code Here

    if (!buffer.isEmpty()) {
      if (!context.isFailed() && !context.isCancelled()) {
        context.fail(new IllegalStateException("Batches still in queue during cleanup"));
        logger.error("{} Batches in queue.", buffer.size());
        RawFragmentBatch batch;
        while ((batch = buffer.poll()) != null) {
          logger.error("Batch left in queue: {}", batch);
        }
      }
      RawFragmentBatch batch;
      while ((batch = buffer.poll()) != null) {
        if (batch.getBody() != null) batch.getBody().release();
      }
    }
  }
View Full Code Here

  }

  @Override
  public void kill(FragmentContext context) {
    while(!buffer.isEmpty()){
      RawFragmentBatch batch = buffer.poll();
      batch.getBody().release();
    }
  }
View Full Code Here

      logger.debug("Setting autoread true");
      outOfMemory.set(false);
      readController.flushResponses();
    }

    RawFragmentBatch b = null;

    b = buffer.poll();

    // if we didn't get a buffer, block on waiting for buffer.
    if(b == null && (!finished || !buffer.isEmpty())){
      try {
        b = buffer.take();
      } catch (InterruptedException e) {
        return null;
      }
    }

    if (b != null && b.getHeader().getIsOutOfMemory()) {
      outOfMemory.set(true);
      return b;
    }


    // if we are in the overlimit condition and aren't finished, check if we've passed the start limit.  If so, turn off the overlimit condition and set auto read to true (start reading from socket again).
    if(!finished && overlimit.get()){
      if(buffer.size() == startlimit){
        overlimit.set(false);
        readController.flushResponses();
      }
    }

    if (b != null && b.getHeader().getIsLastBatch()) {
      streamCounter--;
      if (streamCounter == 0) {
        finished();
      }
    }
View Full Code Here

      fragmentManager.setAutoRead(true);
      logger.debug("Setting autoRead true");
    }
    boolean spool = spooling.get();
    RawFragmentBatchWrapper w = buffer.poll();
    RawFragmentBatch batch;
    if(w == null && !finished){
      try {
        w = buffer.take();
        batch = w.get();
        if (batch.getHeader().getIsOutOfMemory()) {
          outOfMemory = true;
          return batch;
        }
        queueSize -= w.getBodySize();
        return batch;
      } catch (InterruptedException e) {
        return null;
      }
    }
    if (w == null) {
      return null;
    }

    batch = w.get();
    if (batch.getHeader().getIsOutOfMemory()) {
      outOfMemory = true;
      return batch;
    }
    queueSize -= w.getBodySize();
//    assert queueSize >= 0;
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.record.RawFragmentBatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.