Package com.facebook.presto.spi

Examples of com.facebook.presto.spi.FixedSplitSource


    @Override
    public ConnectorSplitSource getPartitionSplits(ConnectorTableHandle table, List<ConnectorPartition> partitions)
    {
        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return new FixedSplitSource(connectorId, ImmutableList.<ConnectorSplit>of());
        }

        ConnectorPartition partition = Iterables.getOnlyElement(partitions);
        checkArgument(partition instanceof TpchPartition, "Partition must be a tpch partition");
        TpchTableHandle tableHandle = ((TpchPartition) partition).getTable();

        Set<Node> nodes = nodeManager.getActiveDatasourceNodes(connectorId);
        checkState(!nodes.isEmpty(), "No TPCH nodes available");

        int totalParts = nodes.size() * splitsPerNode;
        int partNumber = 0;

        // Split the data using split and skew by the number of nodes available.
        ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
        for (Node node : nodes) {
            for (int i = 0; i < splitsPerNode; i++) {
                splits.add(new TpchSplit(tableHandle, partNumber++, totalParts, ImmutableList.of(node.getHostAndPort())));
            }
        }
        return new FixedSplitSource(connectorId, splits.build());
    }
View Full Code Here


        checkArgument(tableHandle instanceof CassandraTableHandle, "tableHandle is not an instance of CassandraTableHandle");
        CassandraTableHandle cassandraTableHandle = (CassandraTableHandle) tableHandle;

        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return new FixedSplitSource(connectorId, ImmutableList.<Split>of());
        }

        // if this is an unpartitioned table, split into equal ranges
        if (partitions.size() == 1) {
            Partition partition = partitions.get(0);
            checkArgument(partition instanceof CassandraPartition, "partitions are no CassandraPartitions");
            CassandraPartition cassandraPartition = (CassandraPartition) partition;

            if (cassandraPartition.isUnpartitioned()) {
                CassandraTable table = schemaProvider.getTable(cassandraTableHandle);
                List<Split> splits = getSplitsByTokenRange(table, cassandraPartition.getPartitionId());
                return new FixedSplitSource(connectorId, splits);
            }
        }

        return new FixedSplitSource(connectorId, getSplitsForPartitions(cassandraTableHandle, partitions));
    }
View Full Code Here

        checkNotNull(partitions, "partitions is null");

        ConnectorPartition partition = Iterables.getFirst(partitions, null);
        if (partition == null) {
            return new FixedSplitSource(connectorId, ImmutableList.<ConnectorSplit>of());
        }
        checkArgument(partition instanceof HivePartition, "Partition must be a hive partition");
        SchemaTableName tableName = ((HivePartition) partition).getTableName();
        Optional<HiveBucket> bucket = ((HivePartition) partition).getBucket();
View Full Code Here

        ImmutableList.Builder<Split> splits = ImmutableList.builder();
        for (int i = 0; i < splitCount; i++) {
            splits.add(new DualSplit(HostAddress.fromString("127.0.0.1")));
        }
        SplitSource splitSource = new FixedSplitSource(null, splits.build());

        return new StageExecutionPlan(testFragment,
                Optional.of(splitSource),
                ImmutableList.<StageExecutionPlan>of(),
                ImmutableMap.<PlanNodeId, OutputReceiver>of());
View Full Code Here

    {
        Stopwatch splitTimer = Stopwatch.createStarted();

        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return new FixedSplitSource(connectorId, ImmutableList.<ConnectorSplit>of());
        }

        Map<String, Node> nodesById = uniqueIndex(nodeManager.getActiveNodes(), getIdentifierFunction());

        List<ConnectorSplit> splits = new ArrayList<>();

        Multimap<Long, Entry<UUID, String>> partitionShardNodes = shardManager.getShardNodesByPartition(tableHandle);

        for (ConnectorPartition partition : partitions) {
            RaptorPartition raptorPartition = checkType(partition, RaptorPartition.class, "partition");

            ImmutableMultimap.Builder<UUID, String> shardNodes = ImmutableMultimap.builder();
            for (Entry<UUID, String> shardNode : partitionShardNodes.get(raptorPartition.getRaptorPartitionId())) {
                shardNodes.put(shardNode.getKey(), shardNode.getValue());
            }

            for (Map.Entry<UUID, Collection<String>> entry : shardNodes.build().asMap().entrySet()) {
                List<HostAddress> addresses = getAddressesForNodes(nodesById, entry.getValue());
                checkState(!addresses.isEmpty(), "no host for shard %s found: %s", entry.getKey(), entry.getValue());
                ConnectorSplit split = new RaptorSplit(entry.getKey(), addresses);
                splits.add(split);
            }
        }

        log.debug("Split retrieval for %d partitions (%d splits): %dms", partitions.size(), splits.size(), splitTimer.elapsed(TimeUnit.MILLISECONDS));

        // The query engine assumes that splits are returned in a somewhat random fashion. The Raptor split manager,
        // because it loads the data from a database table, will return the splits somewhat ordered by node ID,
        // so only a subset of nodes are fired up. Shuffle the splits to ensure random distribution.
        Collections.shuffle(splits);

        return new FixedSplitSource(connectorId, splits);
    }
View Full Code Here

    {
        checkNotNull(partitions, "partitions is null");

        Partition partition = Iterables.getFirst(partitions, null);
        if (partition == null) {
            return new FixedSplitSource(connectorId, ImmutableList.<Split>of());
        }
        checkArgument(partition instanceof HivePartition, "Partition must be a hive partition");
        SchemaTableName tableName = ((HivePartition) partition).getTableName();
        Optional<HiveBucket> bucket = ((HivePartition) partition).getBucket();
View Full Code Here

        ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();

        for (int i = 0; i < splitCount; i++) {
            splits.add(new DualSplit(HostAddress.fromString("127.0.0.1")));
        }
        SplitSource splitSource = new ConnectorAwareSplitSource(DualConnector.CONNECTOR_ID, new FixedSplitSource(null, splits.build()));

        return new StageExecutionPlan(testFragment,
                Optional.of(splitSource),
                ImmutableList.<StageExecutionPlan>of()
        );
View Full Code Here

        checkNotNull(partitions, "partitions is null");

        ConnectorPartition partition = Iterables.getFirst(partitions, null);
        if (partition == null) {
            return new FixedSplitSource(connectorId, ImmutableList.<ConnectorSplit>of());
        }
        HivePartition hivePartition = checkType(partition, HivePartition.class, "partition");
        SchemaTableName tableName = hivePartition.getTableName();
        Optional<HiveBucket> bucket = hivePartition.getBucket();
View Full Code Here

    @Override
    public ConnectorSplitSource getPartitionSplits(ConnectorTableHandle table, List<ConnectorPartition> partitions)
    {
        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return new FixedSplitSource(null, ImmutableList.<ConnectorSplit>of());
        }

        ConnectorPartition partition = Iterables.getOnlyElement(partitions);
        checkArgument(partition instanceof DualPartition, "Partition must be a dual partition");

        ConnectorSplit split = new DualSplit(nodeManager.getCurrentNode().getHostAndPort());

        return new FixedSplitSource(null, ImmutableList.of(split));
    }
View Full Code Here

                ImmutableMap.<Symbol, Type>of(symbol, Type.VARCHAR),
                PlanDistribution.SOURCE,
                tableScanNodeId,
                OutputPartitioning.NONE,
                ImmutableList.<Symbol>of());
        SplitSource splitSource = new FixedSplitSource(null, ImmutableList.copyOf(Collections.nCopies(splitCount, split)));

        return new StageExecutionPlan(testFragment,
                Optional.<SplitSource>of(splitSource),
                ImmutableList.<StageExecutionPlan>of(),
                ImmutableMap.<PlanNodeId, OutputReceiver>of());
View Full Code Here

TOP

Related Classes of com.facebook.presto.spi.FixedSplitSource

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.