Examples of JavaSparkContext


Examples of org.apache.spark.api.java.JavaSparkContext

  private static JavaSparkContext sc = null;
  private static SparkConf sparkConf = null;

  public synchronized static JavaSparkContext getSparkContext() {
    if (sc == null) {
      sc = new JavaSparkContext(getSparkConf());
    }
    return sc;
  }
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

    String outFile= arg(args, "-out", null);
    String sparkhome = arg(args,  "-spark", System.getenv("SPARK_HOME"));
    String jars[] = arg(args, "-jars", "AR.jar:ARApp.jar:ARExt.jar").split(":");
    boolean partition = Boolean.parseBoolean(arg(args, "-partitions", "true"));
   
    JavaSparkContext ctx = new JavaSparkContext(host, "Abstract-Rendering", sparkhome, jars);
   
   
    OptionDataset<G,I> dataset;
    try {
      dataset= (OptionDataset) OptionDataset.class.getField(config).get(null);
    } catch (
        IllegalAccessException |
        IllegalArgumentException |
        NoSuchFieldException | NullPointerException | SecurityException e) {
      throw new IllegalArgumentException("Could not find -config indicated: " + config);
    }
   
    JavaRDD<Indexed> base;
    File sourceFile = dataset.sourceFile;
    if (!sourceFile.getName().endsWith(".csv")) {
      JavaPairRDD<LongWritable, DataInputRecord> source = ctx.hadoopFile(sourceFile.getPath(), HBINInputFormat.class, LongWritable.class, DataInputRecord.class);
      base = (JavaRDD<Indexed>) (JavaRDD) source.map(new Function<Tuple2<LongWritable, DataInputRecord>, DataInputRecord>() {
        public DataInputRecord call(Tuple2<LongWritable, DataInputRecord> pair) throws Exception {return pair._2;}
      });
    } else {
      JavaRDD<String> source = ctx.textFile(sourceFile.getCanonicalPath());
      base = source.map(new StringToIndexed("\\s*,\\s*"));
    }

    Glypher<G,I> glypher = new Glypher<G,I>(dataset.shaper,dataset.valuer);
    GlyphsetRDD<G, I> glyphs = new GlyphsetRDD<>(base.map(glypher), true, partition);
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

    String inFile = arg(args, "-in", "../data/circlepoints.csv");
    String outFile= arg(args, "-out", null);
    String sparkhome = arg(args,  "-spark", System.getenv("SPARK_HOME"));
    String jars[] = arg(args, "-jars", "AR.jar:ARApp.jar:ARExt.jar").split(":");
   
    JavaSparkContext ctx = new JavaSparkContext(host, "Abstract-Rendering", sparkhome, jars);
    JavaRDD<String> source = ctx.textFile(inFile);
    JavaRDD<Indexed> base = source.map(new StringToIndexed("\\s*,\\s*"));
    Shaper<Indexed, Rectangle2D> shaper = new ToRect(.1, .1, false, 2, 3);
    Valuer<Indexed,Integer> valuer = new Valuer.Constant<Indexed,Integer>(1);

    GlyphsetRDD<Rectangle2D, Integer> glyphs = new GlyphsetRDD<>(base.map(new Glypher<>(shaper, valuer)));
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

    } else if (opts.pipelineType == PipelineType.spark) {
      SparkConf sconf = new SparkConf();
      if (!sconf.contains("spark.app.name") || sconf.get("spark.app.name").equals(getClass().getName())) {
        sconf.setAppName(Utils.getShortClassName(getClass()));
      }
      JavaSparkContext sparkContext = new JavaSparkContext(sconf);
      pipeline = new SparkPipeline(sparkContext, sparkContext.appName());
      pipeline.setConfiguration(getConf());
    } else {
      throw new IllegalArgumentException("Unsupported --pipeline-type: " + opts.pipelineType);
    }
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

     
      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.8);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      LR lr = new LR(x_feature, y_feature);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

     
      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.7);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      AutoEncoder da = new AutoEncoder(x_feature, n_hidden);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

     
      List<SampleVector> trainList = new ArrayList<SampleVector>();
      List<SampleVector> testList = new ArrayList<SampleVector>();
      DataInput.splitList(samples, trainList, testList, 0.7);
     
      JavaSparkContext context = SparkContextBuild.getContext(args);
      JavaRDD<SampleVector> rdds = context.parallelize(trainList);
      rdds.count();
      logger.info("RDD ok.");
     
      RBM rbm = new RBM(x_feature, n_hidden);
            SGDTrainConfig config = new SGDTrainConfig();
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

        for (int i = 0; i < sparkJobJarList.length; i++) {
            sparkJobJarList[i] = jars.get(i);
        }
     
        System.setProperty("spark.local.dir", cmd.getOptionValue("sparkHomePath") + File.separator + "temp");
      return new JavaSparkContext(cmd.getOptionValue("sparkMasterURL"),
          cmd.getOptionValue("sparkJobName", "spark-job-" + System.currentTimeMillis()),
          cmd.getOptionValue("sparkHomePath"),
          sparkJobJarList,
          new HashMap<String,String>());
    }
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

    SparkConf sparkConf = new SparkConf()
        .setAppName("Correlate Events")
        // Configure the use of Kryo serialization including our Avro registrator
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .set("spark.kryo.registrator", "org.kitesdk.examples.spark.AvroKyroRegistrator");
    JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);

    JavaPairRDD<StandardEvent, Void> events = sparkContext.newAPIHadoopRDD(conf,
        DatasetKeyInputFormat.class, StandardEvent.class, Void.class);

    // Map each event to two correlation keys. One with the IP address and the
    // nearest 5 minute interval that happened before the event and one with the
    // IP address and the nearest 5 minute interval that happened after the event
View Full Code Here

Examples of org.apache.spark.api.java.JavaSparkContext

  private transient JavaSparkContext sc;
  private transient JavaSQLContext sqlContext;

  @Before
  public void setUp() {
    sc = new JavaSparkContext("local", "JavaAPISuite");
    sqlContext = new JavaSQLContext(sc);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.