Package org.apache.phoenix.hbase.index.util

Examples of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr


    public ImmutableBytesWritable getQualifierWritable() {
        if (this.qualifierPtr == null) {
            synchronized (this.qualifier) {
                if (this.qualifierPtr == null) {
                    this.qualifierPtr = new ImmutableBytesPtr(this.qualifier);
                }
            }
        }
        return this.qualifierPtr;
    }
View Full Code Here


                int mapSize = WritableUtils.readVInt(in);
                for (int i = 0; i < mapSize; i++) {
                    int keyLen = WritableUtils.readVInt(in);
                    byte[] keyBytes = new byte[keyLen];
                    in.read(keyBytes, 0, keyLen);
                    ImmutableBytesPtr key = new ImmutableBytesPtr(keyBytes);
                    int value = WritableUtils.readVInt(in);
                    Integer curCount = valueVsCount.get(key);
                    if (curCount == null) {
                        valueVsCount.put(key, value);
                    } else {
View Full Code Here

        valueVsCount = clientAgg.valueVsCount;
    }

    @Override
    public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
        ImmutableBytesPtr key = new ImmutableBytesPtr(ptr.get(), ptr.getOffset(), ptr.getLength());
        Integer count = this.valueVsCount.get(key);
        if (count == null) {
            this.valueVsCount.put(key, 1);
        } else {
            this.valueVsCount.put(key, ++count);
View Full Code Here

        int serializationSize = countMapSerializationSize();
        buffer = new byte[serializationSize];
        int offset = 1;
        offset += ByteUtil.vintToBytes(buffer, offset, this.valueVsCount.size());
        for (Entry<ImmutableBytesPtr, Integer> entry : this.valueVsCount.entrySet()) {
            ImmutableBytesPtr key = entry.getKey();
            offset += ByteUtil.vintToBytes(buffer, offset, key.getLength());
            System.arraycopy(key.get(), key.getOffset(), buffer, offset, key.getLength());
            offset += key.getLength();
            offset += ByteUtil.vintToBytes(buffer, offset, entry.getValue().intValue());
        }
        if (serializationSize > compressThreshold) {
            // The size for the map serialization is above the threshold. We will do the Snappy compression here.
            ByteArrayOutputStream compressedByteStream = new ByteArrayOutputStream();
View Full Code Here

        this.keyExpressions = keyExpressions.subList(1, keyExpressions.size());
        Set<ImmutableBytesPtr> values = Sets.newHashSetWithExpectedSize(keyExpressions.size()-1);
        int fixedWidth = -1;
        boolean isFixedLength = true;
        for (int i = 1; i < keyExpressions.size(); i++) {
            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
            Expression child = keyExpressions.get(i);
            child.evaluate(null, ptr);
            if (ptr.getLength() > 0) { // filter null as it has no impact
                if (values.add(ptr)) {
                    int length = ptr.getLength();
                    if (fixedWidth == -1) {
                        fixedWidth = length;
                    } else {
                        isFixedLength &= fixedWidth == length;
                    }
                   
                    valuesByteLength += ptr.getLength();
                }
            }
        }
        this.fixedWidth = isFixedLength ? fixedWidth : -1;
        // Sort values by byte value so we can get min/max easily
View Full Code Here

        return PDataType.BOOLEAN;
    }

    private int readValue(DataInput input, byte[] valuesBytes, int offset, ImmutableBytesPtr ptr) throws IOException {
        int valueLen = fixedWidth == -1 ? WritableUtils.readVInt(input) : fixedWidth;
        values.add(new ImmutableBytesPtr(valuesBytes,offset,valueLen));
        return offset + valueLen;
    }
View Full Code Here

        // TODO: consider using a regular HashSet as we never serialize from the server-side
        values = Sets.newLinkedHashSetWithExpectedSize(len);
        int offset = 0;
        int i  = 0;
        if (i < len) {
            offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
            while (++i < len-1) {
                offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
            }
            if (i < len) {
                offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
            } else {
                maxValue = minValue;
            }
        } else {
            minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
        }
    }
View Full Code Here

  private Set<ImmutableBytesPtr>
      getAllFamilies(Collection<? extends ColumnReference> columns) {
    Set<ImmutableBytesPtr> families = new HashSet<ImmutableBytesPtr>();
    for (ColumnReference ref : columns) {
      families.add(new ImmutableBytesPtr(ref.getFamily()));
    }
    return families;
  }
View Full Code Here

                }
                if (!starJoinVector[i]) {
                    needsProject = true;
                }
                ColumnResolver leftResolver = (!forceProjection && starJoinVector[i]) ? joinTable.getOriginalResolver() : projectedTable.createColumnResolver();
                joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder
                Pair<List<Expression>, List<Expression>> joinConditions = joinSpec.compileJoinConditions(context, leftResolver, resolver);
                joinExpressions[i] = joinConditions.getFirst();
                hashExpressions[i] = joinConditions.getSecond();
                joinTypes[i] = joinSpec.getType();
                if (i < count - 1) {
                    fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size()));
                }
            }
            if (needsProject) {
                TupleProjector.serializeProjectorIntoScan(context.getScan(), initialProjectedTable.createTupleProjector());
            }
            context.setCurrentTable(tableRef);
            context.setResolver(needsProject ? projectedTable.createColumnResolver() : joinTable.getOriginalResolver());
            BasicQueryPlan plan = compileSingleQuery(context, query, binds, asSubquery, joinTable.isAllLeftJoin());
            Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context);
            Integer limit = null;
            if (query.getLimit() != null && !query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) {
                limit = LimitCompiler.compile(context, query);
            }
            HashJoinInfo joinInfo = new HashJoinInfo(projectedTable.getTable(), joinIds, joinExpressions, joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, limit, forceProjection);
            return new HashJoinPlan(joinTable.getStatement(), plan, joinInfo, hashExpressions, joinPlans, clientProjectors);
        }
       
        JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1);
        JoinType type = lastJoinSpec.getType();
        if (type == JoinType.Full)
            throw new SQLFeatureNotSupportedException("Full joins not supported.");
       
        if (type == JoinType.Right || type == JoinType.Inner) {
            if (!lastJoinSpec.getJoinTable().getJoinSpecs().isEmpty())
                throw new SQLFeatureNotSupportedException("Right join followed by sub-join is not supported.");
           
            JoinTable rhsJoinTable = lastJoinSpec.getJoinTable();
            Table rhsTable = rhsJoinTable.getTable();
            JoinTable lhsJoin = joinTable.getSubJoinTableWithoutPostFilters();
            Scan subScan = ScanUtil.newScan(originalScan);
            StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), subScan, new SequenceManager(statement));
            QueryPlan lhsPlan = compileJoinQuery(lhsCtx, binds, lhsJoin, true);
            ColumnResolver lhsResolver = lhsCtx.getResolver();
            TupleProjector clientProjector = lhsCtx.getClientTupleProjector();
            PTableWrapper lhsProjTable = ((JoinedTableColumnResolver) (lhsResolver)).getPTableWrapper();
            ProjectedPTableWrapper rhsProjTable;
            TableRef rhsTableRef;
            SelectStatement rhs;
            if (!rhsTable.isSubselect()) {
                rhsProjTable = rhsTable.createProjectedTable(!asSubquery);
                rhsTableRef = rhsTable.getTableRef();
                rhsTable.projectColumns(context.getScan());
                rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(), asSubquery);
            } else {
                SelectStatement subquery = rhsTable.getAsSubquery();
                QueryPlan plan = compileSubquery(subquery);
                rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
                rhsTableRef = plan.getTableRef();
                context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
                rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery);
            }
            boolean forceProjection = rhsTable.isSubselect();
            ColumnResolver rhsResolver = forceProjection ? rhsProjTable.createColumnResolver() : joinTable.getOriginalResolver();
            ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[] {new ImmutableBytesPtr(emptyByteArray)};
            Pair<List<Expression>, List<Expression>> joinConditions = lastJoinSpec.compileJoinConditions(context, lhsResolver, rhsResolver);
            List<Expression> joinExpressions = joinConditions.getSecond();
            List<Expression> hashExpressions = joinConditions.getFirst();
            int fieldPosition = rhsProjTable.getTable().getColumns().size() - rhsProjTable.getTable().getPKColumns().size();
            PTableWrapper projectedTable = rhsProjTable.mergeProjectedTables(lhsProjTable, type == JoinType.Inner);
View Full Code Here

*/
public class ServerCachingEndpointImpl extends BaseEndpointCoprocessor implements ServerCachingProtocol {

    @Override
    public boolean addServerCache(byte[] tenantId, byte[] cacheId, ImmutableBytesWritable cachePtr, ServerCacheFactory cacheFactory) throws SQLException {
        TenantCache tenantCache = GlobalCache.getTenantCache((RegionCoprocessorEnvironment)this.getEnvironment(), tenantId == null ? null : new ImmutableBytesPtr(tenantId));
        tenantCache.addServerCache(new ImmutableBytesPtr(cacheId), cachePtr, cacheFactory);
        return true;
    }
View Full Code Here

TOP

Related Classes of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.