Examples of parallelize()


Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

                return pair._1() + ":" + pair._2().size();
              }
            }).collect().toString();
    System.out.print(s);

    final JavaRDD<Integer> rdd = sc.parallelize(
        new AbstractList<Integer>() {
          final Random random = new Random();
          @Override
          public Integer get(int index) {
            System.out.println("get(" + index + ")");
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

                return pair._1() + ":" + pair._2().size();
              }
            }).collect().toString();
    System.out.print(s);

    final JavaRDD<Integer> rdd = sc.parallelize(
        new AbstractList<Integer>() {
          final Random random = new Random();
          @Override
          public Integer get(int index) {
            System.out.println("get(" + index + ")");
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

    }

    JavaSparkContext jsc = new JavaSparkContext(args[0], "JavaLogQuery",
      System.getenv("SPARK_HOME"), JavaSparkContext.jarOfClass(JavaLogQuery.class));

    JavaRDD<String> dataSet = (args.length == 2) ? jsc.textFile(args[1]) : jsc.parallelize(exampleApacheLogs);

    JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, Stats>() {
      @Override
      public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
        return new Tuple2<Tuple3<String, String, String>, Stats>(extractKey(s), extractStats(s));
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

    List<Integer> l = new ArrayList<Integer>(n);
    for (int i = 0; i < n; i++) {
      l.add(i);
    }

    JavaRDD<Integer> dataSet = jsc.parallelize(l, slices);

    int count = dataSet.map(new Function<Integer, Integer>() {
      @Override
      public Integer call(Integer integer) {
        double x = Math.random() * 2 - 1;
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.8);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      LR lr = new LR(x_feature, y_feature);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.7);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      AutoEncoder da = new AutoEncoder(x_feature, n_hidden);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.7);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      RBM rbm = new RBM(x_feature, n_hidden);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

    List<Integer> l = new ArrayList<Integer>(n);
    for (int i = 0; i < n; i++) {
      l.add(i);
    }

    JavaRDD<Integer> dataSet = jsc.parallelize(l, slices);

    int count = dataSet.map(new Function<Integer, Integer>() {
      @Override
      public Integer call(Integer integer) {
        double x = Math.random() * 2 - 1;
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

  public static void main(String[] args) throws Exception {
    SparkConf sparkConf = new SparkConf().setAppName(APP_NAME);
    final JavaSparkContext sc = new JavaSparkContext(sparkConf);

    // Example of implementing a progress reporter for a simple job.
    JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 5).map(
        new IdentityWithDelay<Integer>());
    JavaFutureAction<List<Integer>> jobFuture = rdd.collectAsync();
    while (!jobFuture.isDone()) {
      Thread.sleep(1000)// 1 second
      List<Integer> jobIds = jobFuture.jobIds();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext.parallelize()

  public static void main(String[] args) {

    SparkConf sparkConf = new SparkConf().setAppName("JavaLogQuery");
    JavaSparkContext jsc = new JavaSparkContext(sparkConf);

    JavaRDD<String> dataSet = (args.length == 1) ? jsc.textFile(args[0]) : jsc.parallelize(exampleApacheLogs);

    JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, Stats>() {
      @Override
      public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
        return new Tuple2<Tuple3<String, String, String>, Stats>(extractKey(s), extractStats(s));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.