Package org.apache.phoenix.schema.tuple

Examples of org.apache.phoenix.schema.tuple.Tuple


                // Build Map with evaluated hash key as key and row as value
                for (int i = 0; i < nRows; i++) {
                    int resultSize = (int)Bytes.readVLong(hashCacheByteArray, offset);
                    offset += WritableUtils.decodeVIntSize(hashCacheByteArray[offset]);
                    ImmutableBytesWritable value = new ImmutableBytesWritable(hashCacheByteArray,offset,resultSize);
                    Tuple result = new ResultTuple(new Result(value));
                    ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, onExpressions);
                    List<Tuple> tuples = hashCacheMap.get(key);
                    if (tuples == null) {
                        tuples = new LinkedList<Tuple>();
                        hashCacheMap.put(key, tuples);
View Full Code Here


            int vIntValLength = WritableUtils.getVIntSize(keyLength);
            KeyValue keyValue =
                    KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(),
                        QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN,
                        QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
            Tuple result = new SingleKeyValueTuple(keyValue);
            TupleUtil.getAggregateValue(result, ptr);
            KeyValueSchema schema = aggregators.getValueSchema();
            ValueBitSet tempValueSet = ValueBitSet.newInstance(schema);
            tempValueSet.clear();
            tempValueSet.or(ptr);
View Full Code Here

        }

        @Override
        public void assertNext(Tuple result) throws Exception {
            assertTrue(expectedResults.hasNext());
            Tuple expected = expectedResults.next();
            TestUtil.compareTuples(expected, result);
        }
View Full Code Here

        }

        @Override
        public void assertNext(Tuple result) throws Exception {
            result.getKey(tempPtr);
            Tuple expected = expectedResults.remove(tempPtr);
            TestUtil.compareTuples(expected, result);
        }
View Full Code Here

            CompilableStatement compilableStmt = getStatement();
            final StatementPlan plan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE);
            List<String> planSteps = plan.getExplainPlan().getPlanSteps();
            List<Tuple> tuples = Lists.newArrayListWithExpectedSize(planSteps.size());
            for (String planStep : planSteps) {
                Tuple tuple = new SingleKeyValueTuple(KeyValueUtil.newKeyValue(PDataType.VARCHAR.toBytes(planStep), EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY));
                tuples.add(tuple);
            }
            final ResultIterator iterator = new MaterializedResultIterator(tuples);
            return new QueryPlan() {
View Full Code Here

            state = new MutationState(maxSize, connection, totalRowCount);
        }
        final MutationState finalState = state;
        byte[] value = PDataType.LONG.toBytes(totalRowCount);
        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
        final Tuple tuple = new SingleKeyValueTuple(keyValue);
        return new PeekingResultIterator() {
            private boolean done = false;
           
            @Override
            public Tuple next() throws SQLException {
View Full Code Here

     *  We only need to call startRegionOperation and closeRegionOperation when
     *  getting the first Tuple (which forces running through the entire region)
     *  since after this everything is held in memory
     */
    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
        final Tuple firstTuple;
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
        long estSize = iterator.getEstimatedByteSize();
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        final HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
View Full Code Here

                            byte[] uuidValue = cache.getId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long)projector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                            return new MutationState(maxSize, connection) {
                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps =  aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }
            };
        } else {
            if (parallelIteratorFactory != null) {
                parallelIteratorFactory.setRowProjector(plan.getProjector());
            }
            return new MutationPlan() {

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    if (!hasLimit) {
                        Tuple tuple;
                        long totalRowCount = 0;
                        while ((tuple=iterator.next()) != null) {// Runs query
                            KeyValue kv = tuple.getValue(0);
                            totalRowCount += PDataType.LONG.getCodec().decodeLong(kv.getBuffer(), kv.getValueOffset(), SortOrder.getDefault());
                        }
                        // Return total number of rows that have been delete. In the case of auto commit being off
                        // the mutations will all be in the mutation state of the current connection.
                        return new MutationState(maxSize, connection, totalRowCount);
View Full Code Here

   
    private void processResults(List<KeyValue> result, boolean hasBatchLimit) throws IOException {
        if (result.isEmpty())
            return;
       
        Tuple tuple = new ResultTuple(new Result(result));
        if (joinInfo == null || joinInfo.forceProjection()) {
            tuple = projector.projectResults(tuple);
        }
        if (joinInfo == null) {
            resultQueue.offer(tuple);
            return;
        }
       
        if (hasBatchLimit)
            throw new UnsupportedOperationException("Cannot support join operations in scans with limit");

        int count = joinInfo.getJoinIds().length;
        boolean cont = true;
        for (int i = 0; i < count; i++) {
            if (!(joinInfo.earlyEvaluation()[i]))
                continue;
            ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
            tempTuples[i] = hashCaches[i].get(key);
            JoinType type = joinInfo.getJoinTypes()[i];
            if (type == JoinType.Inner && tempTuples[i] == null) {
                cont = false;
                break;
            }
        }
        if (cont) {
            if (projector == null) {
                int dup = 1;
                for (int i = 0; i < count; i++) {
                    dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size());
                }
                for (int i = 0; i < dup; i++) {
                    resultQueue.offer(tuple);
                }
            } else {
                KeyValueSchema schema = joinInfo.getJoinedSchema();
                if (!joinInfo.forceProjection()) {
                    tuple = projector.projectResults(tuple);
                }
                resultQueue.offer(tuple);
                for (int i = 0; i < count; i++) {
                    boolean earlyEvaluation = joinInfo.earlyEvaluation()[i];
                    if (earlyEvaluation && tempTuples[i] == null)
                        continue;
                    int j = resultQueue.size();
                    while (j-- > 0) {
                        Tuple lhs = resultQueue.poll();
                        if (!earlyEvaluation) {
                            ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
                            tempTuples[i] = hashCaches[i].get(key);                         
                            if (tempTuples[i] == null) {
                                if (joinInfo.getJoinTypes()[i] != JoinType.Inner) {
                                    resultQueue.offer(lhs);
                                }
                                continue;
                            }
                        }
                        for (Tuple t : tempTuples[i]) {
                            Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ?
                                    lhs : TupleProjector.mergeProjectedValue(
                                            (ProjectedValueTuple) lhs, schema, tempDestBitSet,
                                            t, joinInfo.getSchemas()[i], tempSrcBitSet[i],
                                            joinInfo.getFieldPositions()[i]);
                            resultQueue.offer(joined);
                        }
                    }
                }
            }
            // apply post-join filter
            Expression postFilter = joinInfo.getPostJoinFilterExpression();
            if (postFilter != null) {
                for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext();) {
                    Tuple t = iter.next();
                    ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
                    try {
                        if (!postFilter.evaluate(t, tempPtr)) {
                            iter.remove();
                            continue;
View Full Code Here

   
    private boolean nextInQueue(List<KeyValue> results) {
        if (resultQueue.isEmpty())
            return false;
       
        Tuple tuple = resultQueue.poll();
        for (int i = 0; i < tuple.size(); i++) {
            results.add(tuple.getValue(i));
        }
        return (count++ < limit) && (resultQueue.isEmpty() ? hasMore : true);
    }
View Full Code Here

TOP

Related Classes of org.apache.phoenix.schema.tuple.Tuple

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.