Package org.apache.phoenix.hbase.index.covered.update

Examples of org.apache.phoenix.hbase.index.covered.update.ColumnReference


    final List<KeyValue> allKvs = new ArrayList<KeyValue>();
    allKvs.addAll(p.getFamilyMap().get(family));

    // setup the verifier for the data we expect to write
    // first call shouldn't have anything in the table
    final ColumnReference familyRef =
        new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);

    VerifyingIndexCodec codec = state.codec;
    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", expectedKvs, familyRef));
    codec.verifiers.add(new ListMatchingVerifier("put state 1", allKvs, familyRef));
View Full Code Here


    allKvs.addAll(p2.getFamilyMap().get(family));
    allKvs.addAll(p1.getFamilyMap().get(family));

    // setup the verifier for the data we expect to write
    // both puts should be put into a single batch
    final ColumnReference familyRef =
        new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);
    VerifyingIndexCodec codec = state.codec;
    // no previous state in the table
    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
        .<KeyValue> emptyList(), familyRef));
    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family),
View Full Code Here

    final List<KeyValue> allKvs = new ArrayList<KeyValue>();
    allKvs.addAll(p.getFamilyMap().get(family));

    // setup the verifier for the data we expect to write
    // first call shouldn't have anything in the table
    final ColumnReference familyRef =
        new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);

    VerifyingIndexCodec codec = state.codec;
    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", expectedKvs, familyRef));
    codec.verifiers.add(new ListMatchingVerifier("put state 1", allKvs, familyRef));
View Full Code Here

    allKvs.addAll(p2.getFamilyMap().get(family));
    allKvs.addAll(p1.getFamilyMap().get(family));

    // setup the verifier for the data we expect to write
    // both puts should be put into a single batch
    final ColumnReference familyRef =
        new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS);
    VerifyingIndexCodec codec = state.codec;
    // no previous state in the table
    codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
        .<KeyValue> emptyList(), familyRef));
    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family),
View Full Code Here

                int dataPkPos = dataTable.getPKColumns().indexOf(column) - (dataTable.getBucketNum() == null ? 0 : 1) - (maintainer.isMultiTenant ? 1 : 0);
                rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos);
            } else {
                indexColByteSize += column.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(column) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
                maintainer.getIndexedColumnTypes().add(column.getDataType());
                maintainer.getIndexedColumns().add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
            }
            if (indexColumn.getSortOrder() == SortOrder.DESC) {
                rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos);
            }
        }
        for (int i = 0; i < index.getColumnFamilies().size(); i++) {
            PColumnFamily family = index.getColumnFamilies().get(i);
            for (PColumn indexColumn : family.getColumns()) {
                PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
                maintainer.getCoverededColumns().add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
            }
        }
        maintainer.estimatedIndexRowKeyBytes = maintainer.estimateIndexRowKeyByteSize(indexColByteSize);
        maintainer.initCachedState();
        return maintainer;
View Full Code Here

        if (pendingUpdates.isEmpty()) {
            return false;
        }
        Map<ColumnReference,KeyValue> newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
        for (KeyValue kv : pendingUpdates) {
            newState.put(new ColumnReference(kv.getFamily(), kv.getQualifier()), kv);
        }
        for (ColumnReference ref : indexedColumns) {
            KeyValue newValue = newState.get(ref);
            if (newValue != null) { // Indexed column was potentially changed
                ImmutableBytesPtr oldValue = oldState.getLatestValue(ref);
View Full Code Here

        }
        Delete delete = null;
        // Delete columns for missing key values
        for (KeyValue kv : pendingUpdates) {
            if (kv.getType() != KeyValue.Type.Put.getCode()) {
                ColumnReference ref = new ColumnReference(kv.getFamily(), kv.getQualifier());
                if (coveredColumns.contains(ref)) {
                    if (delete == null) {
                        delete = new Delete(indexRowKey);                   
                        delete.setWriteToWAL(!indexWALDisabled);
                    }
                    delete.deleteColumns(ref.getFamily(), IndexUtil.getIndexColumnName(ref.getFamily(), ref.getQualifier()), ts);
                }
            }
        }
        return delete;
  }
View Full Code Here

        int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
        indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
        for (int i = 0; i < nIndexedColumns; i++) {
            byte[] cf = Bytes.readByteArray(input);
            byte[] cq = Bytes.readByteArray(input);
            indexedColumns.add(new ColumnReference(cf,cq));
        }
        indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
        for (int i = 0; i < nIndexedColumns; i++) {
            PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
            indexedColumnTypes.add(type);
        }
        int nCoveredColumns = WritableUtils.readVInt(input);
        coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nCoveredColumns);
        for (int i = 0; i < nCoveredColumns; i++) {
            byte[] cf = Bytes.readByteArray(input);
            byte[] cq = Bytes.readByteArray(input);
            coveredColumns.add(new ColumnReference(cf,cq));
        }
        indexTableName = Bytes.readByteArray(input);
        dataEmptyKeyValueCF = Bytes.readByteArray(input);
        emptyKeyValueCFPtr = new ImmutableBytesPtr(Bytes.readByteArray(input));
       
View Full Code Here

    /**
     * Init calculated state reading/creating
     */
    private void initCachedState() {
        dataEmptyKeyValueRef =
                new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(),
                        QueryConstants.EMPTY_COLUMN_BYTES);

        indexQualifiers = Lists.newArrayListWithExpectedSize(this.coveredColumns.size());
        for (ColumnReference ref : coveredColumns) {
            indexQualifiers.add(new ImmutableBytesPtr(IndexUtil.getIndexColumnName(
View Full Code Here

            List<KeyValue> dataKeyValues = iterator.next().getSecond();
            Map<ColumnReference,byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
            ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRow());
            Put dataMutation = new Put(rowKeyPtr.copyBytes());
            for (KeyValue kv : dataKeyValues) {
                valueMap.put(new ColumnReference(kv.getFamily(),kv.getQualifier()), kv.getValue());
                dataMutation.add(kv);
            }
            ValueGetter valueGetter = newValueGetter(valueMap);
           
            List<Mutation> indexMutations =
View Full Code Here

TOP

Related Classes of org.apache.phoenix.hbase.index.covered.update.ColumnReference

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.