Package org.apache.phoenix.schema.tuple

Examples of org.apache.phoenix.schema.tuple.Tuple


                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                        }
                        Scanner scanner = aggPlan.getScanner();
                        ResultIterator iterator = scanner.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long)projector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                            return new MutationState(maxSize, connection) {
                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps =  aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }
            };
        } else {
            if (parallelIteratorFactory != null) {
                parallelIteratorFactory.setRowProjector(plan.getProjector());
            }
            return new MutationPlan() {

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    Scanner scanner = plan.getScanner();
                    ResultIterator iterator = scanner.iterator();
                    if (!hasLimit) {
                        Tuple tuple;
                        long totalRowCount = 0;
                        while ((tuple=iterator.next()) != null) {// Runs query
                            KeyValue kv = tuple.getValue(0);
                            totalRowCount += PDataType.LONG.getCodec().decodeLong(kv.getBuffer(), kv.getValueOffset(), null);
                        }
                        // Return total number of rows that have been delete. In the case of auto commit being off
                        // the mutations will all be in the mutation state of the current connection.
                        return new MutationState(maxSize, connection, totalRowCount);
View Full Code Here


                                    scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                }
                                Scanner scanner = aggPlan.getScanner();
                                ResultIterator iterator = scanner.iterator();
                                try {
                                    Tuple row = iterator.next();
                                    final long mutationCount = (Long)aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                                    return new MutationState(maxSize, connection) {
                                        @Override
                                        public long getUpdateCount() {
                                            return mutationCount;
                                        }
                                    };
                                } finally {
                                    iterator.close();
                                }
                            } finally {
                                if (cache != null) {
                                    cache.close();
                                }
                            }
                        }
   
                        @Override
                        public ExplainPlan getExplainPlan() throws SQLException {
                            List<String> queryPlanSteps =  aggPlan.getExplainPlan().getPlanSteps();
                            List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
                            planSteps.add("UPSERT ROWS");
                            planSteps.addAll(queryPlanSteps);
                            return new ExplainPlan(planSteps);
                        }
                    };
                }
            }

            ////////////////////////////////////////////////////////////////////
            // UPSERT SELECT run client-side
            /////////////////////////////////////////////////////////////////////
            return new MutationPlan() {

                @Override
                public PhoenixConnection getConnection() {
                    return connection;
                }
               
                @Override
                public ParameterMetaData getParameterMetaData() {
                    return queryPlan.getContext().getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    Scanner scanner = queryPlan.getScanner();
                    ResultIterator iterator = scanner.iterator();
                    if (upsertParallelIteratorFactory == null) {
                        return upsertSelect(statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
                    }
                    upsertParallelIteratorFactory.setRowProjector(projector);
                    upsertParallelIteratorFactory.setColumnIndexes(columnIndexes);
                    upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
                    Tuple tuple;
                    long totalRowCount = 0;
                    while ((tuple=iterator.next()) != null) {// Runs query
                        KeyValue kv = tuple.getValue(0);
                        totalRowCount += PDataType.LONG.getCodec().decodeLong(kv.getBuffer(), kv.getValueOffset(), null);
                    }
                    // Return total number of rows that have been updated. In the case of auto commit being off
                    // the mutations will all be in the mutation state of the current connection.
                    return new MutationState(maxSize, statement.getConnection(), totalRowCount);
View Full Code Here

                            }
                            QueryPlan plan = new AggregatePlan(context, SelectStatement.COUNT_ONE, tableRef, projector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                            Scanner scanner = plan.getScanner();
                            ResultIterator iterator = scanner.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
View Full Code Here

            if (hasLimit)
                throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
           
            int count = joinInfo.getJoinIds().length;
            List<Tuple>[] tuples = new List[count];
            Tuple tuple = new ResultTuple(new Result(result));
            boolean cont = true;
            for (int i = 0; i < count; i++) {
                ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
                HashCache hashCache = (HashCache)cache.getServerCache(joinInfo.getJoinIds()[i]);
                tuples[i] = hashCache.get(key);
View Full Code Here

                // Build Map with evaluated hash key as key and row as value
                for (int i = 0; i < nRows; i++) {
                    int resultSize = (int)Bytes.readVLong(hashCacheByteArray, offset);
                    offset += WritableUtils.decodeVIntSize(hashCacheByteArray[offset]);
                    ImmutableBytesWritable value = new ImmutableBytesWritable(hashCacheByteArray,offset,resultSize);
                    Tuple result = new ResultTuple(new Result(value));
                    ImmutableBytesPtr key = new ImmutableBytesPtr(TupleUtil.getConcatenatedValue(result, onExpressions));
                    List<Tuple> tuples = hashCacheMap.get(key);
                    if (tuples == null) {
                        tuples = new ArrayList<Tuple>(1);
                        hashCacheMap.put(key, tuples);
View Full Code Here

     *  We only need to call startRegionOperation and closeRegionOperation when
     *  getting the first Tuple (which forces running through the entire region)
     *  since after this everything is held in memory
     */
    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
        final Tuple firstTuple;
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
        long estSize = iterator.getEstimatedByteSize();
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        final HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
View Full Code Here

            state = new MutationState(maxSize, connection, totalRowCount);
        }
        final MutationState finalState = state;
        byte[] value = PDataType.LONG.toBytes(totalRowCount);
        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
        final Tuple tuple = new SingleKeyValueTuple(keyValue);
        return new PeekingResultIterator() {
            private boolean done = false;
           
            @Override
            public Tuple next() throws SQLException {
View Full Code Here

   
        @Override
        public synchronized Tuple next() throws SQLException {
            try {
                init();
                Tuple current = next;
                advance();
                return current;
            } catch (IOException e) {
                throw ServerUtil.parseServerException(e);
            }
View Full Code Here

                return 0;
            return totalResultSize;
        }

        private List<KeyValue> toKeyValues(ResultEntry entry) {
            Tuple result = entry.getResult();
            int size = result.size();
            List<KeyValue> kvs = new ArrayList<KeyValue>(size);
            for (int i = 0; i < size; i++) {
                kvs.add(result.getValue(i));
            }
            return kvs;
        }
View Full Code Here

            }
            int resultSize = ByteUtil.vintFromBytes(bytes, offset);
            offset += WritableUtils.getVIntSize(resultSize);
            ImmutableBytesWritable value = new ImmutableBytesWritable(bytes,offset,resultSize);
            offset += resultSize;
            Tuple result = new ResultTuple(new Result(value));
            return next = result;
        }
View Full Code Here

TOP

Related Classes of org.apache.phoenix.schema.tuple.Tuple

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.