Package org.apache.phoenix.query

Examples of org.apache.phoenix.query.ConnectionQueryServices


        }
        int offset = nextSequences.size();
        for (int i = 0; i < currentSequences.size(); i++) {
            sequencePosition[i+offset] = sequenceMap.get(currentSequences.get(i)).getIndex();
        }
        ConnectionQueryServices services = this.statement.getConnection().getQueryServices();
        Long scn = statement.getConnection().getSCN();
        long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        services.validateSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions, action);
        setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions);
    }
View Full Code Here


            if (sequenceMap == null) {
                return;
            }
            Long scn = statement.getConnection().getSCN();
            long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
            ConnectionQueryServices services = statement.getConnection().getQueryServices();
            services.incrementSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions);
            setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions);
            int offset = nextSequences.size();
            for (int i = 0; i < currentSequences.size(); i++) {
                dstSequenceValues[sequencePosition[offset+i]] = services.currentSequenceValue(currentSequences.get(i), timestamp);
            }
        }
View Full Code Here

        } catch (TableNotFoundException e) {
            // Used for mapped VIEW, since we won't be able to resolve that.
            // Instead, we create a table with just the dynamic columns.
            // A tenant-specific connection may not create a mapped VIEW.
            if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
                ConnectionQueryServices services = connection.getQueryServices();
                byte[] fullTableName = SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName());
                HTableInterface htable = null;
                try {
                    htable = services.getTable(fullTableName);
                } catch (UnsupportedOperationException ignore) {
                    throw e; // For Connectionless
                } finally {
                    if (htable != null) Closeables.closeQuietly(htable);
                }
View Full Code Here

     * @return the result iterators for the scan of each region
     */
    @Override
    public List<PeekingResultIterator> getIterators() throws SQLException {
        boolean success = false;
        final ConnectionQueryServices services = context.getConnection().getQueryServices();
        ReadOnlyProps props = services.getProps();
        int numSplits = splits.size();
        List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numSplits);
        List<Pair<KeyRange,Future<PeekingResultIterator>>> futures = new ArrayList<Pair<KeyRange,Future<PeekingResultIterator>>>(numSplits);
        final UUID scanId = UUID.randomUUID();
        try {
            submitWork(scanId, splits, futures);
            int timeoutMs = props.getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, DEFAULT_THREAD_TIMEOUT_MS);
            final int factor = ScanUtil.isReversed(this.context.getScan()) ? -1 : 1;
            // Sort futures by row key so that we have a predictable order we're getting rows back for scans.
            // We're going to wait here until they're finished anyway and this makes testing much easier.
            Collections.sort(futures, new Comparator<Pair<KeyRange,Future<PeekingResultIterator>>>() {
                @Override
                public int compare(Pair<KeyRange, Future<PeekingResultIterator>> o1, Pair<KeyRange, Future<PeekingResultIterator>> o2) {
                    return factor * Bytes.compareTo(o1.getFirst().getLowerRange(), o2.getFirst().getLowerRange());
                }
            });
            boolean clearedCache = false;
            byte[] tableName = tableRef.getTable().getPhysicalName().getBytes();
            for (Pair<KeyRange,Future<PeekingResultIterator>> future : futures) {
                try {
                    PeekingResultIterator iterator = future.getSecond().get(timeoutMs, TimeUnit.MILLISECONDS);
                    iterators.add(iterator);
                } catch (ExecutionException e) {
                    try { // Rethrow as SQLException
                        throw ServerUtil.parseServerException(e);
                    } catch (StaleRegionBoundaryCacheException e2) { // Catch only to try to recover from region boundary cache being out of date
                        List<Pair<KeyRange,Future<PeekingResultIterator>>> newFutures = new ArrayList<Pair<KeyRange,Future<PeekingResultIterator>>>(2);
                        if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries
                            services.clearTableRegionCache(tableName);
                            clearedCache = true;
                        }
                        List<KeyRange> allSplits = toKeyRanges(services.getAllTableRegions(tableName));
                        // Intersect what was the expected boundary with all new region boundaries and
                        // resubmit just this portion of work again
                        List<KeyRange> newSubSplits = KeyRange.intersect(Collections.singletonList(future.getFirst()), allSplits);
                        submitWork(scanId, newSubSplits, newFutures);
                        for (Pair<KeyRange,Future<PeekingResultIterator>> newFuture : newFutures) {
View Full Code Here

        }
    }
   
    private void submitWork(final UUID scanId, List<KeyRange> splits,
            List<Pair<KeyRange,Future<PeekingResultIterator>>> futures) {
        final ConnectionQueryServices services = context.getConnection().getQueryServices();
        ExecutorService executor = services.getExecutor();
        for (final KeyRange split : splits) {
            final Scan splitScan = ScanUtil.newScan(context.getScan());
            // Intersect with existing start/stop key if the table is salted
            // If not salted, we've already intersected it. If salted, we need
            // to wait until now to intersect, as we're running parallel scans
View Full Code Here

        conn.close();
        initTableValues();
    }

    private static void initTableValues() throws Exception {
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
        HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME,WEB_STATS));
        try {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            Put put;
            List<Row> mutations = new ArrayList<Row>();
            put = new Put(Bytes.toBytes("entry1"));
View Full Code Here

                    conn.commit();
                }
            }
        }
        conn.commit();
        ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
        HBaseAdmin admin = services.getAdmin();
        try {
            admin.flush(TABLE_NAME);
        } finally {
            admin.close();
        }
View Full Code Here

   
    @Test
    public void testManualSplit() throws Exception {
        initTable();
        Connection conn = DriverManager.getConnection(getUrl());
        ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
        int nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();
        int nInitialRegions = nRegions;
        HBaseAdmin admin = services.getAdmin();
        try {
            admin.split(TABLE_NAME);
            int nTries = 0;
            while (nRegions == nInitialRegions && nTries < 10) {
                Thread.sleep(1000);
                nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();
                nTries++;
            }
            // Split finished by this time, but cache isn't updated until
            // table is accessed
            assertEquals(nRegions, nInitialRegions);
           
            int nRows = 2;
            String query = "SELECT /*+ NO_INTRA_REGION_PARALLELIZATION */ count(*) FROM S WHERE a IN ('tl','jt')";
            ResultSet rs1 = conn.createStatement().executeQuery(query);
            assertTrue(rs1.next());
            nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();
            // Region cache has been updated, as there are more regions now
            assertNotEquals(nRegions, nInitialRegions);
            if (nRows != rs1.getInt(1)) {
                // Run the same query again and it always passes now
                // (as region cache is up-to-date)
View Full Code Here

    }
   
    private static void destroyTable() throws Exception {
        // Physically delete HBase table so that splits occur as expected for each test
        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
        ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
        HBaseAdmin admin = services.getAdmin();
        try {
            try {
                admin.disableTable(PRODUCT_METRICS_NAME);
                admin.deleteTable(PRODUCT_METRICS_NAME);
            } catch (TableNotFoundException e) {
View Full Code Here

            admin.close();
        }
    }
   
    private static void initTableValues() throws Exception {
        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
        HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
        try {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] family = Bytes.toBytes("1");
            byte[] uintCol = Bytes.toBytes("UINT_COL");
View Full Code Here

TOP

Related Classes of org.apache.phoenix.query.ConnectionQueryServices

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.