Package com.salesforce.phoenix.cache.ServerCacheClient

Examples of com.salesforce.phoenix.cache.ServerCacheClient.ServerCache


                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    tableRef.getTable().getIndexMaintainers(ptr);
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                            cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                            byte[] uuidValue = cache.getId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long)projector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                            return new MutationState(maxSize, connection) {
                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
View Full Code Here


                        StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver, Collections.<Object>emptyList(), scan);
                        ScanUtil.setTimeRange(scan, timestamp);
                        if (emptyCF != null) {
                            scan.setAttribute(UngroupedAggregateRegionObserver.EMPTY_CF, emptyCF);
                        }
                        ServerCache cache = null;
                        try {
                            if (deleteList != null) {
                                if (deleteList.isEmpty()) {
                                    scan.setAttribute(UngroupedAggregateRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                                    // In the case of a row deletion, add index metadata so mutable secondary indexing works
                                    /* TODO
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                                } else {
                                    // In the case of the empty key value column family changing, do not send the index
                                    // metadata, as we're currently managing this from the client. It's possible for the
                                    // data empty column family to stay the same, while the index empty column family
                                    // changes.
                                    PColumn column = deleteList.get(0);
                                    if (emptyCF == null) {
                                        scan.addColumn(column.getFamilyName().getBytes(), column.getName().getBytes());
                                    }
                                    scan.setAttribute(UngroupedAggregateRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                    scan.setAttribute(UngroupedAggregateRegionObserver.DELETE_CQ, column.getName().getBytes());
                                }
                            }
                            List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                            if (projectCF == null) {
                                for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                    columnFamilies.add(family.getName().getBytes());
                                }
                            } else {
                                columnFamilies.add(projectCF);
                            }
                            // Need to project all column families into the scan, since we haven't yet created our empty key value
                            RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                            // Explicitly project these column families and don't project the empty key value,
                            // since at this point we haven't added the empty key value everywhere.
                            if (columnFamilies != null) {
                                scan.getFamilyMap().clear();
                                for (byte[] family : columnFamilies) {
                                    scan.addFamily(family);
                                }
                                projector = new RowProjector(projector,false);
                            }
                            // Ignore exceptions due to not being able to resolve any view columns,
                            // as this just means the view is invalid. Continue on and try to perform
                            // any other Post DDL operations.
                            try {
                                WhereCompiler.compile(context, select); // Push where clause into scan
                            } catch (ColumnFamilyNotFoundException e) {
                                continue;
                            } catch (ColumnNotFoundException e) {
                                continue;
                            } catch (AmbiguousColumnException e) {
                                continue;
                            }
                            QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } finally {
                            if (cache != null) { // Remove server cache if there is one
                                cache.close();
                            }
                        }
                       
                    }
                    final long count = totalMutationCount;
View Full Code Here

   
                        @Override
                        public MutationState execute() throws SQLException {
                            ImmutableBytesWritable ptr = context.getTempPtr();
                            tableRef.getTable().getIndexMaintainers(ptr);
                            ServerCache cache = null;
                            try {
                                if (ptr.getLength() > 0) {
                                    IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                    cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                    byte[] uuidValue = cache.getId();
                                    scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                }
                                ResultIterator iterator = aggPlan.iterator();
                                try {
                                    Tuple row = iterator.next();
                                    final long mutationCount = (Long)aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                                    return new MutationState(maxSize, connection) {
                                        @Override
                                        public long getUpdateCount() {
                                            return mutationCount;
                                        }
                                    };
                                } finally {
                                    iterator.close();
                                }
                            } finally {
                                if (cache != null) {
                                    cache.close();
                                }
                            }
                        }
   
                        @Override
View Full Code Here

                List<Mutation> mutations = pair.getSecond();
               
                int retryCount = 0;
                boolean shouldRetry = false;
                do {
                    ServerCache cache = null;
                    if (hasIndexMaintainers && isDataTable) {
                        byte[] attribValue = null;
                        byte[] uuidValue;
                        if (IndexMetaDataCacheClient.useIndexMetadataCache(connection, mutations, tempPtr.getLength())) {
                            IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                            cache = client.addIndexMetadataCache(mutations, tempPtr);
                            uuidValue = cache.getId();
                            // If we haven't retried yet, retry for this case only, as it's possible that
                            // a split will occur after we send the index metadata cache to all known
                            // region servers.
                            shouldRetry = true;
                        } else {
                            attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
                            uuidValue = ServerCacheClient.generateId();
                        }
                        // Either set the UUID to be able to access the index metadata from the cache
                        // or set the index metadata directly on the Mutation
                        for (Mutation mutation : mutations) {
                            if (tenantId != null) {
                                mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
                            }
                            mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            if (attribValue != null) {
                                mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
                            }
                        }
                    }
                   
                    SQLException sqlE = null;
                    HTableInterface hTable = connection.getQueryServices().getTable(htableName);
                    try {
                        if (logger.isDebugEnabled()) logMutationSize(hTable, mutations);
                        long startTime = System.currentTimeMillis();
                        hTable.batch(mutations);
                        shouldRetry = false;
                        if (logger.isDebugEnabled()) logger.debug("Total time for batch call of  " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms");
                        committedList.add(entry);
                    } catch (Exception e) {
                        SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                        if (inferredE != null) {
                            if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                                // Swallow this exception once, as it's possible that we split after sending the index metadata
                                // and one of the region servers doesn't have it. This will cause it to have it the next go around.
                                // If it fails again, we don't retry.
                                logger.warn("Swallowing exception and retrying after clearing meta cache on connection. " + inferredE);
                                connection.getQueryServices().clearTableRegionCache(htableName);
                                continue;
                            }
                            e = inferredE;
                        }
                        // Throw to client with both what was committed so far and what is left to be committed.
                        // That way, client can either undo what was done or try again with what was not done.
                        sqlE = new CommitException(e, this, new MutationState(committedList, this.sizeOffset, this.maxSize, this.connection));
                    } finally {
                        try {
                            hTable.close();
                        } catch (IOException e) {
                            if (sqlE != null) {
                                sqlE.setNextException(ServerUtil.parseServerException(e));
                            } else {
                                sqlE = ServerUtil.parseServerException(e);
                            }
                        } finally {
                            try {
                                if (cache != null) {
                                    cache.close();
                                }
                            } finally {
                                if (sqlE != null) {
                                    throw sqlE;
                                }
View Full Code Here

                }
            }));
        }
        for (int i = 0; i < count; i++) {
            try {
                ServerCache cache = futures.get(i).get();
                joinIds[i].set(cache.getId());
                dependencies.add(cache);
            } catch (InterruptedException e) {
                throw new SQLException("Hash join execution interrupted.", e);
            } catch (ExecutionException e) {
                throw new SQLException("Encountered exception in hash plan execution.",
View Full Code Here

TOP

Related Classes of com.salesforce.phoenix.cache.ServerCacheClient.ServerCache

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.