Examples of MemoryChunk


Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

        return getServerCaches().getIfPresent(cacheId);
    }
   
    @Override
    public Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, ServerCacheFactory cacheFactory) throws SQLException {
        MemoryChunk chunk = this.getMemoryManager().allocate(cachePtr.getLength());
        Closeable element = cacheFactory.newCache(cachePtr, chunk);
        getServerCaches().put(cacheId, element);
        return element;
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

            estDistVals = Math.min(MIN_DISTINCT_VALUES, (int)(Bytes.toInt(estDistValsBytes) * 1.5f))// Allocate 1.5x estimation
        }
       
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), ScanUtil.getTenantId(scan));
        int estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getSize());
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        boolean success = false;
        try {
            // TODO: spool map to disk if map becomes too big
            boolean hasMore;
            int estValueSize = aggregators.getSize();
            MultiKeyValueTuple result = new MultiKeyValueTuple();
            Map<ImmutableBytesWritable, Aggregator[]> aggregateMap = new HashMap<ImmutableBytesWritable, Aggregator[]>(estDistVals);
            HRegion region = c.getEnvironment().getRegion();
            MultiVersionConsistencyControl.setThreadReadPoint(s.getMvccReadPoint());
            region.startRegionOperation();
            try {
                do {
                    List<KeyValue> results = new ArrayList<KeyValue>();
                    // Results are potentially returned even when the return value of s.next is false
                    // since this is an indication of whether or not there are more values after the
                    // ones returned
                    hasMore = s.nextRaw(results, null);
                    if (!results.isEmpty()) {
                        result.setKeyValues(results);
                        ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(result, expressions);
                        Aggregator[] rowAggregators = aggregateMap.get(key);
                        if (rowAggregators == null) {
                            // If Aggregators not found for this distinct value, clone our original one (we need one per distinct value)
                            if (logger.isDebugEnabled()) {
                                logger.debug("Adding new aggregate bucket for row key " + Bytes.toStringBinary(key.get(),key.getOffset(),key.getLength()));
                            }
                            rowAggregators = aggregators.newAggregators(c.getEnvironment().getConfiguration());
                            aggregateMap.put(key, rowAggregators);
                        }
                        // Aggregate values here
                        aggregators.aggregate(rowAggregators, result);
                        if (logger.isDebugEnabled()) {
                            logger.debug("Row passed filters: " + results + ", aggregated values: " + Arrays.asList(rowAggregators));
                        }
                           
                        if (aggregateMap.size() > estDistVals) { // increase allocation
                            estDistVals *= 1.5f;
                            estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize);
                            chunk.resize(estSize);
                        }
                    }
                } while (hasMore);
            } finally {
                region.closeRegionOperation();
            }
   
            // Compute final allocation
            estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), estValueSize);
            chunk.resize(estSize);
           
            // TODO: spool list to disk if too big and free memory?
            final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size());
            for (Map.Entry<ImmutableBytesWritable, Aggregator[]> entry : aggregateMap.entrySet()) {
                ImmutableBytesWritable key = entry.getKey();
                Aggregator[] rowAggregators = entry.getValue();
                // Generate byte array of Aggregators and set as value of row
                byte[] value = aggregators.toBytes(rowAggregators);
               
                if (logger.isDebugEnabled()) {
                    logger.debug("Adding new distinct group: " + Bytes.toStringBinary(key.get(),key.getOffset(), key.getLength()) +
                            " with aggregators " + Arrays.asList(rowAggregators).toString() +
                            " value = " + Bytes.toStringBinary(value));
                }
                KeyValue keyValue = KeyValueUtil.newKeyValue(key.get(),key.getOffset(), key.getLength(),SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
                aggResults.add(keyValue);
            }
            // Do not sort here, but sort back on the client instead
            // The reason is that if the scan ever extends beyond a region (which can happen
            // if we're basing our parallelization split points on old metadata), we'll get
            // incorrect query results.
            RegionScanner scanner = new BaseRegionScanner() {
                private int index = 0;
   
                @Override
                public HRegionInfo getRegionInfo() {
                    return s.getRegionInfo();
                }
   
                @Override
                public void close() throws IOException {
                    try {
                        s.close();
                    } finally {
                        chunk.close();
                    }
                }
   
                @Override
                public boolean next(List<KeyValue> results) throws IOException {
                    if (index >= aggResults.size()) return false;
                    results.add(aggResults.get(index));
                    index++;
                    return index < aggResults.size();
                }
            };
            success = true;
            return scanner;
        } finally {
            if (!success)
                chunk.close();
        }
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

    }
   
    public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
        List<Closeable> closeables = new ArrayList<Closeable>();
        closeables.add(chunk);
        ServerCache hashCacheSpec = null;
        SQLException firstException = null;
        final byte[] cacheId = generateId();
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

     */
    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
        final Tuple firstTuple;
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
        long estSize = iterator.getEstimatedByteSize();
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        final HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
        try {
            // Once we return from the first call to next, we've run through and cached
            // the topN rows, so we no longer need to start/stop a region operation.
            firstTuple = iterator.next();
            // Now that the topN are cached, we can resize based on the real size
            long actualSize = iterator.getByteSize();
            chunk.resize(actualSize);
        } catch (Throwable t) {
            ServerUtil.throwIOException(region.getRegionNameAsString(), t);
            return null;
        } finally {
            region.closeRegionOperation();
        }
        return new BaseRegionScanner() {
            private Tuple tuple = firstTuple;
           
            @Override
            public boolean isFilterDone() {
                return tuple == null;
            }

            @Override
            public HRegionInfo getRegionInfo() {
                return s.getRegionInfo();
            }

            @Override
            public boolean next(List<KeyValue> results) throws IOException {
                try {
                    if (isFilterDone()) {
                        return false;
                    }
                   
                    for (int i = 0; i < tuple.size(); i++) {
                        results.add(tuple.getValue(i));
                    }
                   
                    tuple = iterator.next();
                    return !isFilterDone();
                } catch (Throwable t) {
                    ServerUtil.throwIOException(region.getRegionNameAsString(), t);
                    return false;
                }
            }

            @Override
            public void close() throws IOException {
                try {
                    s.close();
                } finally {
                    chunk.close();                }
            }
        };
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

    * @throws SQLException
    */
    SpoolingResultIterator(ResultIterator scanner, MemoryManager mm, final int thresholdBytes, final long maxSpoolToDisk) throws SQLException {
        boolean success = false;
        boolean usedOnDiskIterator = false;
        final MemoryChunk chunk = mm.allocate(0, thresholdBytes);
        File tempFile = null;
        try {
            // Can't be bigger than int, since it's the max of the above allocation
            int size = (int)chunk.getSize();
            tempFile = File.createTempFile("ResultSpooler",".bin");
            DeferredFileOutputStream spoolTo = new DeferredFileOutputStream(size, tempFile) {
                @Override
                protected void thresholdReached() throws IOException {
                    super.thresholdReached();
                    chunk.close();
                }
            };
            DataOutputStream out = new DataOutputStream(spoolTo);
            final long maxBytesAllowed = maxSpoolToDisk == -1 ?
                Long.MAX_VALUE : thresholdBytes + maxSpoolToDisk;
            long bytesWritten = 0L;
            int maxSize = 0;
            for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
                int length = TupleUtil.write(result, out);
                bytesWritten += length;
                if(bytesWritten > maxBytesAllowed){
                    throw new SpoolTooBigToDiskException("result too big, max allowed(bytes): " + maxBytesAllowed);
                }
                maxSize = Math.max(length, maxSize);
            }
            spoolTo.close();
            if (spoolTo.isInMemory()) {
                byte[] data = spoolTo.getData();
                chunk.resize(data.length);
                spoolFrom = new InMemoryResultIterator(data, chunk);
            } else {
                spoolFrom = new OnDiskResultIterator(maxSize, spoolTo.getFile());
                usedOnDiskIterator = true;
            }
            success = true;
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            try {
                scanner.close();
            } finally {
                try {
                    if (!usedOnDiskIterator) {
                        tempFile.delete();
                    }
                } finally {
                    if (!success) {
                        chunk.close();
                    }
                }
            }
        }
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

    }
   
    public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
        ConnectionQueryServices services = connection.getQueryServices();
        MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
        List<Closeable> closeables = new ArrayList<Closeable>();
        closeables.add(chunk);
        ServerCache hashCacheSpec = null;
        SQLException firstException = null;
        final byte[] cacheId = generateId();
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

    * @throws SQLException
    */
    SpoolingResultIterator(ResultIterator scanner, MemoryManager mm, final int thresholdBytes, final long maxSpoolToDisk) throws SQLException {
        boolean success = false;
        boolean usedOnDiskIterator = false;
        final MemoryChunk chunk = mm.allocate(0, thresholdBytes);
        File tempFile = null;
        try {
            // Can't be bigger than int, since it's the max of the above allocation
            int size = (int)chunk.getSize();
            tempFile = File.createTempFile("ResultSpooler",".bin");
            DeferredFileOutputStream spoolTo = new DeferredFileOutputStream(size, tempFile) {
                @Override
                protected void thresholdReached() throws IOException {
                    super.thresholdReached();
                    chunk.close();
                }
            };
            DataOutputStream out = new DataOutputStream(spoolTo);
            final long maxBytesAllowed = maxSpoolToDisk == -1 ?
                Long.MAX_VALUE : thresholdBytes + maxSpoolToDisk;
            long bytesWritten = 0L;
            int maxSize = 0;
            for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
                int length = TupleUtil.write(result, out);
                bytesWritten += length;
                if(bytesWritten > maxBytesAllowed){
                    throw new SpoolTooBigToDiskException("result too big, max allowed(bytes): " + maxBytesAllowed);
                }
                maxSize = Math.max(length, maxSize);
            }
            spoolTo.close();
            if (spoolTo.isInMemory()) {
                byte[] data = spoolTo.getData();
                chunk.resize(data.length);
                spoolFrom = new InMemoryResultIterator(data, chunk);
            } else {
                spoolFrom = new OnDiskResultIterator(maxSize, spoolTo.getFile());
                usedOnDiskIterator = true;
            }
            success = true;
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            try {
                scanner.close();
            } finally {
                try {
                    if (!usedOnDiskIterator) {
                        tempFile.delete();
                    }
                } finally {
                    if (!success) {
                        chunk.close();
                    }
                }
            }
        }
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

     */
    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
        final Tuple firstTuple;
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
        long estSize = iterator.getEstimatedByteSize();
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        final HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
        try {
            // Once we return from the first call to next, we've run through and cached
            // the topN rows, so we no longer need to start/stop a region operation.
            firstTuple = iterator.next();
            // Now that the topN are cached, we can resize based on the real size
            long actualSize = iterator.getByteSize();
            chunk.resize(actualSize);
        } catch (Throwable t) {
            ServerUtil.throwIOException(region.getRegionNameAsString(), t);
            return null;
        } finally {
            region.closeRegionOperation();
        }
        return new BaseRegionScanner() {
            private Tuple tuple = firstTuple;
           
            @Override
            public boolean isFilterDone() {
                return tuple == null;
            }

            @Override
            public HRegionInfo getRegionInfo() {
                return s.getRegionInfo();
            }

            @Override
            public boolean next(List<KeyValue> results) throws IOException {
                try {
                    if (isFilterDone()) {
                        return false;
                    }
                   
                    for (int i = 0; i < tuple.size(); i++) {
                        results.add(tuple.getValue(i));
                    }
                   
                    tuple = iterator.next();
                    return !isFilterDone();
                } catch (Throwable t) {
                    ServerUtil.throwIOException(region.getRegionNameAsString(), t);
                    return false;
                }
            }

            @Override
            public void close() throws IOException {
                try {
                    s.close();
                } finally {
                    chunk.close();                }
            }
        };
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

    * @throws SQLException
    */
    SpoolingResultIterator(ResultIterator scanner, MemoryManager mm, final int thresholdBytes, final long maxSpoolToDisk) throws SQLException {
        boolean success = false;
        boolean usedOnDiskIterator = false;
        final MemoryChunk chunk = mm.allocate(0, thresholdBytes);
        File tempFile = null;
        try {
            // Can't be bigger than int, since it's the max of the above allocation
            int size = (int)chunk.getSize();
            tempFile = File.createTempFile("ResultSpooler",".bin");
            DeferredFileOutputStream spoolTo = new DeferredFileOutputStream(size, tempFile) {
                @Override
                protected void thresholdReached() throws IOException {
                    super.thresholdReached();
                    chunk.close();
                }
            };
            DataOutputStream out = new DataOutputStream(spoolTo);
            final long maxBytesAllowed = maxSpoolToDisk == -1 ?
                Long.MAX_VALUE : thresholdBytes + maxSpoolToDisk;
            long bytesWritten = 0L;
            int maxSize = 0;
            for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
                int length = TupleUtil.write(result, out);
                bytesWritten += length;
                if(bytesWritten > maxBytesAllowed){
                    throw new SpoolTooBigToDiskException("result too big, max allowed(bytes): " + maxBytesAllowed);
                }
                maxSize = Math.max(length, maxSize);
            }
            spoolTo.close();
            if (spoolTo.isInMemory()) {
                byte[] data = spoolTo.getData();
                chunk.resize(data.length);
                spoolFrom = new InMemoryResultIterator(data, chunk);
            } else {
                spoolFrom = new OnDiskResultIterator(maxSize, spoolTo.getFile());
                usedOnDiskIterator = true;
            }
            success = true;
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            try {
                scanner.close();
            } finally {
                try {
                    if (!usedOnDiskIterator) {
                        tempFile.delete();
                    }
                } finally {
                    if (!success) {
                        chunk.close();
                    }
                }
            }
        }
    }
View Full Code Here

Examples of org.apache.phoenix.memory.MemoryManager.MemoryChunk

     */
    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
        final Tuple firstTuple;
        TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
        long estSize = iterator.getEstimatedByteSize();
        final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
        final HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
        try {
            // Once we return from the first call to next, we've run through and cached
            // the topN rows, so we no longer need to start/stop a region operation.
            firstTuple = iterator.next();
            // Now that the topN are cached, we can resize based on the real size
            long actualSize = iterator.getByteSize();
            chunk.resize(actualSize);
        } catch (Throwable t) {
            ServerUtil.throwIOException(region.getRegionNameAsString(), t);
            return null;
        } finally {
            region.closeRegionOperation();
        }
        return new BaseRegionScanner() {
            private Tuple tuple = firstTuple;
           
            @Override
            public boolean isFilterDone() {
                return tuple == null;
            }

            @Override
            public HRegionInfo getRegionInfo() {
                return s.getRegionInfo();
            }

            @Override
            public boolean next(List<Cell> results) throws IOException {
                try {
                    if (isFilterDone()) {
                        return false;
                    }
                   
                    for (int i = 0; i < tuple.size(); i++) {
                        results.add(tuple.getValue(i));
                    }
                   
                    tuple = iterator.next();
                    return !isFilterDone();
                } catch (Throwable t) {
                    ServerUtil.throwIOException(region.getRegionNameAsString(), t);
                    return false;
                }
            }

            @Override
            public void close() throws IOException {
                try {
                    s.close();
                } finally {
                    chunk.close();                }
            }
           
            @Override
            public long getMaxResultSize() {
                return s.getMaxResultSize();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.