Package org.apache.tajo.storage

Examples of org.apache.tajo.storage.Tuple


  public JoinNode getPlan(){
    return this.joinNode;
  }

  public Tuple next() throws IOException {
    Tuple previous;

    for (;;) {
      if (!outerIterator.hasNext() && !innerIterator.hasNext()) {
        if(end){
          return null;
View Full Code Here


  }

  @Override
  public Tuple next() throws IOException {   
    Target [] targets = plan.getTargets();
    Tuple t = new VTuple(targets.length);
    for (int i = 0; i < targets.length; i++) {
      t.put(i, targets[i].getEvalTree().eval(inSchema, null));
    }
    return t;
  }
View Full Code Here

    contexts = new FunctionContext[plan.getAggFunctions().length];
  }

  @Override
  public Tuple next() throws IOException {
    Tuple currentKey;
    Tuple tuple;
    Tuple outputTuple = null;

    while(!context.isStopped() && (tuple = child.next()) != null) {

      // get a key tuple
      currentKey = new VTuple(groupingKeyIds.length);
      for(int i = 0; i < groupingKeyIds.length; i++) {
        currentKey.put(i, tuple.get(groupingKeyIds[i]));
      }

      /** Aggregation State */
      if (lastKey == null || lastKey.equals(currentKey)) {
        if (lastKey == null) {
          for(int i = 0; i < aggFunctionsNum; i++) {
            contexts[i] = aggFunctions[i].newContext();
            aggFunctions[i].merge(contexts[i], inSchema, tuple);
          }
          lastKey = currentKey;
        } else {
          // aggregate
          for (int i = 0; i < aggFunctionsNum; i++) {
            aggFunctions[i].merge(contexts[i], inSchema, tuple);
          }
        }

      } else { /** Finalization State */
        // finalize aggregate and return
        outputTuple = new VTuple(outSchema.size());
        int tupleIdx = 0;

        for(; tupleIdx < groupingKeyNum; tupleIdx++) {
          outputTuple.put(tupleIdx, lastKey.get(tupleIdx));
        }
        for(int aggFuncIdx = 0; aggFuncIdx < aggFunctionsNum; tupleIdx++, aggFuncIdx++) {
          outputTuple.put(tupleIdx, aggFunctions[aggFuncIdx].terminate(contexts[aggFuncIdx]));
        }

        for(int evalIdx = 0; evalIdx < aggFunctionsNum; evalIdx++) {
          contexts[evalIdx] = aggFunctions[evalIdx].newContext();
          aggFunctions[evalIdx].merge(contexts[evalIdx], inSchema, tuple);
        }

        lastKey = currentKey;
        return outputTuple;
      }
    } // while loop

    if (!finished) {
      outputTuple = new VTuple(outSchema.size());
      int tupleIdx = 0;
      for(; tupleIdx < groupingKeyNum; tupleIdx++) {
        outputTuple.put(tupleIdx, lastKey.get(tupleIdx));
      }
      for(int aggFuncIdx = 0; aggFuncIdx < aggFunctionsNum; tupleIdx++, aggFuncIdx++) {
        outputTuple.put(tupleIdx, aggFunctions[aggFuncIdx].terminate(contexts[aggFuncIdx]));
      }
      finished = true;
    }
    return outputTuple;
  }
View Full Code Here

  public FileChunk get(Map<String, List<String>> kvs) throws IOException {
    // nothing to verify the file because AdvancedDataRetriever checks
    // its validity of the file.
    File data = new File(this.file, "data/data");
    byte [] startBytes = Base64.decodeBase64(kvs.get("start").get(0));
    Tuple start = decoder.toTuple(startBytes);
    byte [] endBytes;
    Tuple end;
    endBytes = Base64.decodeBase64(kvs.get("end").get(0));
    end = decoder.toTuple(endBytes);
    boolean last = kvs.containsKey("final");

    if(!comp.isAscendingFirstKey()) {
      Tuple tmpKey = start;
      start = end;
      end = tmpKey;
    }

    LOG.info("GET Request for " + data.getAbsolutePath() + " (start="+start+", end="+ end +
View Full Code Here

    for (Column col : target.getColumns()) {
      Preconditions.checkState(statSet.containsKey(col),
          "ERROR: Invalid Column Stats (column stats: " + colStats + ", there exists not target " + col);
    }

    Tuple startTuple = new VTuple(target.size());
    Tuple endTuple = new VTuple(target.size());
    int i = 0;

    // In outer join, empty table could be searched.
    // As a result, min value and max value would be null.
    // So, we should put NullDatum for this case.
    for (Column col : target.getColumns()) {
      if (sortSpecs[i].isAscending()) {
        if (statSet.get(col).getMinValue() != null)
          startTuple.put(i, statSet.get(col).getMinValue());
        else
          startTuple.put(i, DatumFactory.createNullDatum());

        if (statSet.get(col).getMaxValue() != null)
          endTuple.put(i, statSet.get(col).getMaxValue());
        else
          endTuple.put(i, DatumFactory.createNullDatum());
      } else {
        if (statSet.get(col).getMaxValue() != null)
          startTuple.put(i, statSet.get(col).getMaxValue());
        else
          startTuple.put(i, DatumFactory.createNullDatum());

        if (statSet.get(col).getMinValue() != null)
          endTuple.put(i, statSet.get(col).getMinValue());
        else
          endTuple.put(i, DatumFactory.createNullDatum());
      }
      i++;
    }
    return new TupleRange(sortSpecs, startTuple, endTuple);
  }
View Full Code Here

    // true means this is a file.
    if (beNullIfFile && partitionColumnSchema.size() < columnValues.length) {
      return null;
    }

    Tuple tuple = new VTuple(partitionColumnSchema.size());
    int i = 0;
    for (; i < columnValues.length && i < partitionColumnSchema.size(); i++) {
      String [] parts = columnValues[i].split("=");
      if (parts.length != 2) {
        return null;
      }
      int columnId = partitionColumnSchema.getColumnIdByName(parts[0]);
      Column keyColumn = partitionColumnSchema.getColumn(columnId);
      tuple.put(columnId, DatumFactory.createFromString(keyColumn.getDataType(), parts[1]));
    }
    for (; i < partitionColumnSchema.size(); i++) {
      tuple.put(i, NullDatum.get());
    }
    return tuple;
  }
View Full Code Here

    }

    public List<Tuple> nextBlock() {
      List<Tuple> results = Lists.newArrayList();

      Tuple tuple;
      while (iterator.hasNext()) {
        tuple = iterator.next();
        if (qual.eval(schema, tuple).isTrue()) {
          results.add(tuple);
        }
View Full Code Here

    for (int i = 0; i < 100; i++) {
      Datum[] datums = new Datum[5];
      for (int j = 0; j < 5; j++) {
        datums[j] = new TextDatum(i + "_" + j);
      }
      Tuple tuple = new VTuple(datums);
      tupleData.add(tuple);
    }

    ExecutionBlockId ebId = QueryIdFactory.newExecutionBlockId(
        QueryIdFactory.newQueryId(System.currentTimeMillis(), 0));

    TupleCacheKey cacheKey = new TupleCacheKey(ebId.toString(), "TestTable");
    TupleCache tupleCache = TupleCache.getInstance();

    assertFalse(tupleCache.isBroadcastCacheReady(cacheKey));
    assertTrue(tupleCache.lockBroadcastScan(cacheKey));
    assertFalse(tupleCache.lockBroadcastScan(cacheKey));

    tupleCache.addBroadcastCache(cacheKey, tupleData);
    assertTrue(tupleCache.isBroadcastCacheReady(cacheKey));

    Scanner scanner = tupleCache.openCacheScanner(cacheKey, null);
    assertNotNull(scanner);

    int count = 0;

    while (true) {
      Tuple tuple = scanner.next();
      if (tuple == null) {
        break;
      }

      assertEquals(tupleData.get(count), tuple);
View Full Code Here

  /**
   * It computes the value cardinality of a tuple range.
   * @return
   */
  public static BigDecimal computeCardinalityForAllColumns(SortSpec[] sortSpecs, TupleRange range, boolean inclusive) {
    Tuple start = range.getStart();
    Tuple end = range.getEnd();
    Column col;

    BigDecimal cardinality = new BigDecimal(1);
    BigDecimal columnCard;
    for (int i = 0; i < sortSpecs.length; i++) {
      columnCard = computeCardinality(sortSpecs[i].getSortKey().getDataType(), start.get(i), end.get(i), inclusive,
          sortSpecs[i].isAscending());

      if (new BigDecimal(0).compareTo(columnCard) < 0) {
        cardinality = cardinality.multiply(columnCard);
      }
View Full Code Here

  @Test
  public void testAll() throws Exception {
    Path file = createTmpFile();
    Schema schema = createAllTypesSchema();
    Tuple tuple = new VTuple(schema.size());
    tuple.put(0, DatumFactory.createBool(true));
    tuple.put(1, DatumFactory.createBit((byte)128));
    tuple.put(2, DatumFactory.createChar('t'));
    tuple.put(3, DatumFactory.createInt2((short)2048));
    tuple.put(4, DatumFactory.createInt4(4096));
    tuple.put(5, DatumFactory.createInt8(8192L));
    tuple.put(6, DatumFactory.createFloat4(0.2f));
    tuple.put(7, DatumFactory.createFloat8(4.1));
    tuple.put(8, DatumFactory.createText(HELLO));
    tuple.put(9, DatumFactory.createBlob(HELLO.getBytes(Charsets.UTF_8)));
    tuple.put(10, NullDatum.get());

    TajoParquetWriter writer = new TajoParquetWriter(file, schema);
    writer.write(tuple);
    writer.close();

    TajoParquetReader reader = new TajoParquetReader(file, schema);
    tuple = reader.read();

    assertNotNull(tuple);
    assertEquals(true, tuple.getBool(0));
    assertEquals((byte)128, tuple.getByte(1));
    assertTrue(String.valueOf('t').equals(String.valueOf(tuple.getChar(2))));
    assertEquals((short)2048, tuple.getInt2(3));
    assertEquals(4096, tuple.getInt4(4));
    assertEquals(8192L, tuple.getInt8(5));
    assertEquals(new Float(0.2f), new Float(tuple.getFloat4(6)));
    assertEquals(new Double(4.1), new Double(tuple.getFloat8(7)));
    assertTrue(HELLO.equals(tuple.getText(8)));
    assertArrayEquals(HELLO.getBytes(Charsets.UTF_8), tuple.getBytes(9));
    assertEquals(NullDatum.get(), tuple.get(10));
  }
View Full Code Here

TOP

Related Classes of org.apache.tajo.storage.Tuple

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.