Examples of ExecutionEnvironment


Examples of com.intellij.execution.runners.ExecutionEnvironment

        }

        runManager.setSelectedConfiguration(runConfigurationSetting);
        ProgramRunner programRunner = RunnerRegistry.getInstance().findRunnerById(DBProgramRunner.RUNNER_ID);
        try {
            ExecutionEnvironment executionEnvironment = new ExecutionEnvironment(DefaultDebugExecutor.getDebugExecutorInstance(), programRunner, runConfigurationSetting, getProject());
            programRunner.execute(executionEnvironment);
        } catch (ExecutionException e) {
            MessageUtil.showErrorDialog(
                    "Could not start debugger for " + method.getQualifiedName() + ". \n" +
                            "Reason: " + e.getMessage());
View Full Code Here

Examples of eu.stratosphere.api.java.ExecutionEnvironment

  //
 
  public static void main(String[] args) throws Exception {
   
    // set up the execution environment
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    // get input data
    DataSet<String> text = env.fromElements(
        "To be, or not to be,--that is the question:--",
        "Whether 'tis nobler in the mind to suffer",
        "The slings and arrows of outrageous fortune",
        "Or to take arms against a sea of troubles,"
        );
   
    DataSet<Tuple2<String, Integer>> counts =
        // split up the lines in pairs (2-tuples) containing: (word,1)
        text.flatMap(new LineSplitter())
        // group by the tuple field "0" and sum up tuple field "1"
        .groupBy(0)
        .aggregate(Aggregations.SUM, 1);

    // emit result
    counts.print();
   
    // execute program
    env.execute("WordCount Example");
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

public class WorksetIterationCornerCasesTest extends CompilerTestBase {

  @Test
  public void testWorksetIterationNotDependingOnSolutionSet() {
    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
     
      DataSet<Tuple2<Long, Long>> input = env.generateSequence(1, 100).map(new Duplicator<Long>());
     
      DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration = input.iterateDelta(input, 100, 1);
     
      DataSet<Tuple2<Long, Long>> iterEnd = iteration.getWorkset().map(new TestMapper<Tuple2<Long,Long>>());
      iteration.closeWith(iterEnd, iterEnd).print();
     
      Plan p = env.createProgramPlan();
      OptimizedPlan op = compileNoStats(p);
     
      WorksetIterationPlanNode wipn = (WorksetIterationPlanNode) op.getDataSinks().iterator().next().getInput().getSource();
      assertTrue(wipn.getSolutionSetPlanNode().getOutgoingChannels().isEmpty());
     
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

    }
  }
 
  private Plan getTestPlanRightStatic(String strategy) {
   
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setDegreeOfParallelism(DEFAULT_PARALLELISM);
   
    DataSet<Tuple3<Long, Long, Long>> bigInput = env.readCsvFile("file://bigFile").types(Long.class, Long.class, Long.class).name("bigFile");
   
    DataSet<Tuple3<Long, Long, Long>> smallInput = env.readCsvFile("file://smallFile").types(Long.class, Long.class, Long.class).name("smallFile");
   
    IterativeDataSet<Tuple3<Long, Long, Long>> iteration = bigInput.iterate(10);
   
    Configuration joinStrategy = new Configuration();
    joinStrategy.setString(PactCompiler.HINT_SHIP_STRATEGY, PactCompiler.HINT_SHIP_STRATEGY_REPARTITION_HASH);
   
    if(strategy != "") {
      joinStrategy.setString(PactCompiler.HINT_LOCAL_STRATEGY, strategy);
    }
   
    DataSet<Tuple3<Long, Long, Long>> inner = iteration.join(smallInput).where(0).equalTo(0).with(new DummyJoiner()).name("DummyJoiner").withParameters(joinStrategy);

    DataSet<Tuple3<Long, Long, Long>> output = iteration.closeWith(inner);
   
    output.print();
   
    return env.createProgramPlan();
   
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

   
  }
 
  private Plan getTestPlanLeftStatic(String strategy) {
   
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setDegreeOfParallelism(DEFAULT_PARALLELISM);
   
    @SuppressWarnings("unchecked")
    DataSet<Tuple3<Long, Long, Long>> bigInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L),
        new Tuple3<Long, Long, Long>(1L, 2L, 3L),new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Big");
   
    @SuppressWarnings("unchecked")
    DataSet<Tuple3<Long, Long, Long>> smallInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Small");
   
    IterativeDataSet<Tuple3<Long, Long, Long>> iteration = bigInput.iterate(10);
   
    Configuration joinStrategy = new Configuration();
    joinStrategy.setString(PactCompiler.HINT_LOCAL_STRATEGY, strategy);
   
    DataSet<Tuple3<Long, Long, Long>> inner = smallInput.join(iteration).where(0).equalTo(0).with(new DummyJoiner()).name("DummyJoiner").withParameters(joinStrategy);

    DataSet<Tuple3<Long, Long, Long>> output = iteration.closeWith(inner);
   
    output.print();
   
    return env.createProgramPlan();
   
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

 
  public static void main(String[] args) throws Exception {
    final int numVertices = 100;
   
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    // enumerate some sample edges and assign an initial uniform probability (rank)
    DataSet<Tuple2<Long, Double>> intialRanks = env.generateSequence(1, numVertices)
                .map(new MapFunction<Long, Tuple2<Long, Double>>() {
                  public Tuple2<Long, Double> map(Long value) {
                    return new Tuple2<Long, Double>(value, 1.0/numVertices);
                  }
                });
   
    // generate some random edges. the transition probability on each edge is 1/num-out-edges of the source vertex
    DataSet<Tuple3<Long, Long, Double>> edgesWithProbability = env.generateSequence(1, numVertices)
                .flatMap(new FlatMapFunction<Long, Tuple3<Long, Long, Double>>() {
                  public void flatMap(Long value, Collector<Tuple3<Long, Long, Double>> out) {
                    int numOutEdges = (int) (Math.random() * (numVertices / 2));
                    for (int i = 0; i < numOutEdges; i++) {
                      long target = (long) (Math.random() * numVertices) + 1;
                      out.collect(new Tuple3<Long, Long, Double>(value, target, 1.0/numOutEdges));
                    }
                  }
                });
   
    DataSet<Tuple2<Long, Double>> result = intialRanks.runOperation(
      VertexCentricIteration.withValuedEdges(edgesWithProbability,
            new VertexRankUpdater(numVertices, BETA), new RankMessenger(), 20));
   
    result.print();
    env.execute("Spargel PageRank");
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

@SuppressWarnings({"serial", "unchecked"})
public class SpargelConnectedComponents {

  public static void main(String[] args) throws Exception {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    DataSet<Long> vertexIds = env.generateSequence(0, 10);
    DataSet<Tuple2<Long, Long>> edges = env.fromElements(new Tuple2<Long, Long>(0L, 2L), new Tuple2<Long, Long>(2L, 4L), new Tuple2<Long, Long>(4L, 8L),
                              new Tuple2<Long, Long>(1L, 5L), new Tuple2<Long, Long>(3L, 7L), new Tuple2<Long, Long>(3L, 9L));
   
    DataSet<Tuple2<Long, Long>> initialVertices = vertexIds.map(new IdAssigner());
   
    DataSet<Tuple2<Long, Long>> result = initialVertices.runOperation(VertexCentricIteration.withPlainEdges(edges, new CCUpdater(), new CCMessager(), 100));
   
    result.print();
    env.execute("Spargel Connected Components");
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

 
  public static void main(String[] args) throws Exception {
    final int NUM_VERTICES = 100;
   
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    // a list of vertices
    DataSet<Long> vertices = env.generateSequence(1, NUM_VERTICES);
   
    // generate some random edges. the transition probability on each edge is 1/num-out-edges of the source vertex
    DataSet<Tuple3<Long, Long, Double>> edgesWithProbability = env.generateSequence(1, NUM_VERTICES)
                .flatMap(new FlatMapFunction<Long, Tuple3<Long, Long, Double>>() {
                  public void flatMap(Long value, Collector<Tuple3<Long, Long, Double>> out) {
                    int numOutEdges = (int) (Math.random() * (NUM_VERTICES / 2));
                    for (int i = 0; i < numOutEdges; i++) {
                      long target = (long) (Math.random() * NUM_VERTICES) + 1;
                      out.collect(new Tuple3<Long, Long, Double>(value, target, 1.0/numOutEdges));
                    }
                  }
                });
   
    // ---------- start of the algorithm ---------------
   
    // count the number of vertices
    DataSet<Long> count = vertices
      .map(new MapFunction<Long, Long>() {
        public Long map(Long value) {
          return 1L;
        }
      })
      .reduce(new ReduceFunction<Long>() {
        public Long reduce(Long value1, Long value2) {
          return value1 + value2;
        }
      });
   
    // enumerate some sample edges and assign an initial uniform probability (rank)
    DataSet<Tuple2<Long, Double>> intialRanks = vertices
      .map(new RichMapFunction<Long, Tuple2<Long, Double>>() {
       
        private long numVertices;
       
        @Override
        public void open(Configuration parameters) {
          numVertices = getRuntimeContext().<Long>getBroadcastVariable("count").iterator().next();
        }
       
        public Tuple2<Long, Double> map(Long value) {
          return new Tuple2<Long, Double>(value, 1.0/numVertices);
        }
      }).withBroadcastSet(count, "count");
   

    VertexCentricIteration<Long, Double, Double, Double> iteration = VertexCentricIteration.withValuedEdges(edgesWithProbability,
        new VertexRankUpdater(BETA), new RankMessenger(), 20);
    iteration.addBroadcastSetForUpdateFunction("count", count);
   
   
    DataSet<Tuple2<Long, Double>> result = intialRanks.runOperation(iteration);
   
    result.print();
    env.execute("Spargel PageRank");
  }
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

  public void testUnionNewApiAssembly() {
    final int NUM_INPUTS = 4;
   
    // construct the plan it will be multiple flat maps, all unioned
    // and the "unioned" dataSet will be grouped
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    DataSet<String> source = env.readTextFile(IN_FILE);
    DataSet<Tuple2<String, Integer>> lastUnion = source.flatMap(new DummyFlatMap());
 
    for (int i = 1; i< NUM_INPUTS; i++){
      lastUnion = lastUnion.union(source.flatMap(new DummyFlatMap()));
    }
   
    DataSet<Tuple2<String, Integer>> result = lastUnion.groupBy(0).aggregate(Aggregations.SUM, 1);
    result.writeAsText(OUT_FILE);
 
    // return the plan
    Plan plan = env.createProgramPlan("Test union on new java-api");
    OptimizedPlan oPlan = compileNoStats(plan);
    NepheleJobGraphGenerator jobGen = new NepheleJobGraphGenerator();
   
    // Compile plan to verify that no error is thrown
    jobGen.compileJobGraph(oPlan);
View Full Code Here

Examples of org.apache.flink.api.java.ExecutionEnvironment

    }
  }

  @Test
  public void testCoGroupSolutionSet() {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Tuple1<Integer>> raw = env.readCsvFile(IN_FILE).types(Integer.class);

    DeltaIteration<Tuple1<Integer>, Tuple1<Integer>> iteration = raw.iterateDelta(raw, 1000, 0);

    DataSet<Tuple1<Integer>> test = iteration.getWorkset().map(new SimpleMap());
    DataSet<Tuple1<Integer>> delta = iteration.getSolutionSet().coGroup(test).where(0).equalTo(0).with(new SimpleCGroup());
    DataSet<Tuple1<Integer>> feedback = iteration.getWorkset().map(new SimpleMap());
    DataSet<Tuple1<Integer>> result = iteration.closeWith(delta, feedback);

    result.print();

    Plan plan = env.createProgramPlan();
    OptimizedPlan oPlan = null;
    try {
      oPlan = compileNoStats(plan);
    } catch(CompilerException e) {
      Assert.fail(e.getMessage());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.