Examples of PhoenixConnection


Examples of org.apache.phoenix.jdbc.PhoenixConnection

     * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the
     *  values
     */
    public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException {
        PTable table = getTable(conn, fullTableName);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
        List<PColumn> pkColumns = table.getPKColumns();
        if (pkColumns.size() - offset != values.length) {
            throw new SQLException("Expected " + (pkColumns.size() - offset) + " but got " + values.length);
        }
        PDataType type = null;
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

     * @return the Object values encoded in the byte array value
     * @throws SQLException
     */
    public static Object[] decodePK(Connection conn, String name, byte[] value) throws SQLException {
        PTable table = getTable(conn, name);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
        int nValues = table.getPKColumns().size() - offset;
        RowKeySchema schema = table.getRowKeySchema();
        Object[] values = new Object[nValues];
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        schema.iterator(value, ptr);
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

        if (cacheNode != null) {
            validateNodeIsStateless(sequence, cacheNode,
                SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT);
        }

        final PhoenixConnection connection = statement.getConnection();
        final StatementContext context = new StatementContext(statement);
       
        // add param meta data if required
        if (startsWithNode instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) startsWithNode, LONG_DATUM);
        }
        if (incrementByNode instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) incrementByNode, LONG_DATUM);
        }
        if (maxValueNode instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) maxValueNode, LONG_DATUM);
        }
        if (minValueNode instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) minValueNode, LONG_DATUM);
        }
        if (cacheNode instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) cacheNode, INTEGER_DATUM);
        }
       
        ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);       
        final long incrementBy =
                evalExpression(sequence, context, incrementByNode.accept(expressionCompiler),
                    SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT);
        if (incrementBy == 0) {
            throw SequenceUtil.getException(sequence.getSequenceName().getSchemaName(), sequence
                    .getSequenceName().getTableName(),
                SQLExceptionCode.INCREMENT_BY_MUST_NOT_BE_ZERO);
        }
        final long maxValue =
                evalExpression(sequence, context, maxValueNode.accept(expressionCompiler),
                    SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT);
        final long minValue =
                evalExpression(sequence, context, minValueNode.accept(expressionCompiler),
                    SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT);
        if (minValue>maxValue) {
            TableName sequenceName = sequence.getSequenceName();
            throw SequenceUtil.getException(sequenceName.getSchemaName(),
                sequenceName.getTableName(),
                SQLExceptionCode.MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE);
        }
       
        long startsWithValue;
        if (startsWithNode == null) {
            startsWithValue = incrementBy > 0 ? minValue : maxValue;
        } else {
            startsWithValue =
                    evalExpression(sequence, context, startsWithNode.accept(expressionCompiler),
                        SQLExceptionCode.START_WITH_MUST_BE_CONSTANT);
            if (startsWithValue < minValue || startsWithValue > maxValue) {
                TableName sequenceName = sequence.getSequenceName();
                throw SequenceUtil.getException(sequenceName.getSchemaName(),
                    sequenceName.getTableName(),
                    SQLExceptionCode.STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE);
            }
        }
        final long startsWith = startsWithValue;

        long cacheSizeValue;
        if (cacheNode == null) {
            cacheSizeValue =
                    connection
                            .getQueryServices()
                            .getProps()
                            .getLong(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB,
                                QueryServicesOptions.DEFAULT_SEQUENCE_CACHE_SIZE);
        }
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

     */
    abstract protected MutationState mutate(StatementContext context, ResultIterator iterator, PhoenixConnection connection) throws SQLException;
   
    @Override
    public PeekingResultIterator newIterator(StatementContext context, ResultIterator iterator, Scan scan) throws SQLException {
        final PhoenixConnection connection = new PhoenixConnection(this.connection);
        MutationState state = mutate(context, iterator, connection);
        long totalRowCount = state.getUpdateCount();
        if (connection.getAutoCommit()) {
            connection.getMutationState().join(state);
            connection.commit();
            ConnectionQueryServices services = connection.getQueryServices();
            int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
            state = new MutationState(maxSize, connection, totalRowCount);
        }
        final MutationState finalState = state;
        byte[] value = PDataType.LONG.toBytes(totalRowCount);
        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
        final Tuple tuple = new SingleKeyValueTuple(keyValue);
        return new PeekingResultIterator() {
            private boolean done = false;
           
            @Override
            public Tuple next() throws SQLException {
                if (done) {
                    return null;
                }
                done = true;
                return tuple;
            }

            @Override
            public void explain(List<String> planSteps) {
            }

            @Override
            public void close() throws SQLException {
                try {
                    // Join the child mutation states in close, since this is called in a single threaded manner
                    // after the parallel results have been processed.
                    if (!connection.getAutoCommit()) {
                        MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
                    }
                } finally {
                    connection.close();
                }
            }

            @Override
            public Tuple peek() throws SQLException {
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

        }
      }
    }

    public static void clearMetaDataCache(Connection conn) throws Throwable {
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        HTableInterface htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
        htable.coprocessorExec(MetaDataProtocol.class, HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataProtocol, Void>() {
            @Override
            public Void call(MetaDataProtocol instance) throws IOException {
              instance.clearCache();
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

        ensureTableCreated(getUrl(), JOIN_SUPPLIER_TABLE_FULL_NAME);
        ensureTableCreated(getUrl(), TABLE_WITH_ARRAY);
        Properties props = new Properties();
        //props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(MetaDataProtocol.MIN_TABLE_TIMESTAMP));
        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP));
        PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props).unwrap(PhoenixConnection.class);
        try {
            PTable table = conn.getMetaDataCache().getTable(new PTableKey(null, ATABLE_NAME));
            ATABLE = table;
            ORGANIZATION_ID = new ColumnRef(new TableRef(table), table.getColumn("ORGANIZATION_ID").getPosition()).newColumnExpression();
            ENTITY_ID = new ColumnRef(new TableRef(table), table.getColumn("ENTITY_ID").getPosition()).newColumnExpression();
            A_INTEGER = new ColumnRef(new TableRef(table), table.getColumn("A_INTEGER").getPosition()).newColumnExpression();
            A_STRING = new ColumnRef(new TableRef(table), table.getColumn("A_STRING").getPosition()).newColumnExpression();
            B_STRING = new ColumnRef(new TableRef(table), table.getColumn("B_STRING").getPosition()).newColumnExpression();
            A_DATE = new ColumnRef(new TableRef(table), table.getColumn("A_DATE").getPosition()).newColumnExpression();
            A_TIME = new ColumnRef(new TableRef(table), table.getColumn("A_TIME").getPosition()).newColumnExpression();
            A_TIMESTAMP = new ColumnRef(new TableRef(table), table.getColumn("A_TIMESTAMP").getPosition()).newColumnExpression();
            X_DECIMAL = new ColumnRef(new TableRef(table), table.getColumn("X_DECIMAL").getPosition()).newColumnExpression();
        } finally {
            conn.close();
        }
    }
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

    private static StatementContext compileStatement(String query) throws SQLException {
        return compileStatement(query, Collections.emptyList(), null);
    }

    private static StatementContext compileStatement(String query, List<Object> binds, Integer limit) throws SQLException {
        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
        TestUtil.bindParams(pstmt, binds);
        QueryPlan plan = pstmt.compileQuery();
        assertEquals(limit, plan.getLimit());
        return plan.getContext();
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection


public class LimitCompilerTest extends BaseConnectionlessQueryTest {
   
    private static QueryPlan compileStatement(String query, List<Object> binds) throws SQLException {
        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
        TestUtil.bindParams(pstmt, binds);
        return pstmt.compileQuery();
    }
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection



public class SelectStatementRewriterTest extends BaseConnectionlessQueryTest {
    private static Filter compileStatement(String query) throws SQLException {
        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
        QueryPlan plan = pstmt.compileQuery();
        return plan.getContext().getScan().getFilter();
    }
View Full Code Here

Examples of org.apache.phoenix.jdbc.PhoenixConnection

    public CreateTableCompiler(PhoenixStatement statement) {
        this.statement = statement;
    }

    public MutationPlan compile(final CreateTableStatement create) throws SQLException {
        final PhoenixConnection connection = statement.getConnection();
        ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
        PTableType type = create.getTableType();
        PhoenixConnection connectionToBe = connection;
        PTable parentToBe = null;
        ViewType viewTypeToBe = null;
        Scan scan = new Scan();
        final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
        // TODO: support any statement for a VIEW instead of just a WHERE clause
        ParseNode whereNode = create.getWhereClause();
        String viewStatementToBe = null;
        byte[][] viewColumnConstantsToBe = null;
        BitSet isViewColumnReferencedToBe = null;
        if (type == PTableType.VIEW) {
            TableRef tableRef = resolver.getTables().get(0);
            int nColumns = tableRef.getTable().getColumns().size();
            isViewColumnReferencedToBe = new BitSet(nColumns);
            // Used to track column references in a view
            ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe);
            parentToBe = tableRef.getTable();
            viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE;
            if (whereNode == null) {
                viewStatementToBe = parentToBe.getViewStatement();
            } else {
                whereNode = StatementNormalizer.normalize(whereNode, resolver);
                if (whereNode.isStateless()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT)
                        .build().buildException();
                }
                // If our parent has a VIEW statement, combine it with this one
                if (parentToBe.getViewStatement() != null) {
                    SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode);
                    whereNode = select.getWhere();
                }
                Expression where = whereNode.accept(expressionCompiler);
                if (where != null && !LiteralExpression.isTrue(where)) {
                    TableName baseTableName = create.getBaseTableName();
                    String schemaName = baseTableName.getSchemaName();
                    // Only form we currently support for VIEWs: SELECT * FROM t WHERE ...
                    viewStatementToBe = SELECT + " " + WildcardParseNode.NAME + " " + FROM + " " +
                            (schemaName == null ? "" : "\"" + schemaName + "\".") +
                            ("\"" + baseTableName.getTableName() + "\" ") +
                            (WHERE + " " + where.toString());
                }
                if (viewTypeToBe != ViewType.MAPPED) {
                    Long scn = connection.getSCN();
                    connectionToBe = scn != null ? connection :
                        // If we haved no SCN on our connection, freeze the SCN at when
                        // the base table was resolved to prevent any race condition on
                        // the error checking we do for the base table. The only potential
                        // issue is if the base table lives on a different region server
                        // than the new table will, then we're relying here on the system
                        // clocks being in sync.
                        new PhoenixConnection(
                            // When the new table is created, we still want to cache it
                            // on our connection.
                            new DelegateConnectionQueryServices(connection.getQueryServices()) {
                                @Override
                                public PMetaData addTable(PTable table) throws SQLException {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.