Examples of HashPartitioner


Examples of org.apache.hadoop.mapred.lib.HashPartitioner

      public void configure(JobConf job) {
        // 'key' == sortInput for sort-input; key == sortOutput for sort-output
        key = deduceInputFile(job);
       
        if (key == sortOutput) {
          partitioner = new HashPartitioner();
         
          // Figure the 'current' partition and no. of reduces of the 'sort'
          try {
            URI inputURI = new URI(job.get("map.input.file"));
            String inputFile = inputURI.getPath();
View Full Code Here

Examples of org.apache.hadoop.mapred.lib.HashPartitioner

 
  public CrawlDatum get(String crawlDb, String url, Configuration config) throws IOException {
    Text key = new Text(url);
    CrawlDatum val = new CrawlDatum();
    openReaders(crawlDb, config);
    CrawlDatum res = (CrawlDatum)MapFileOutputFormat.getEntry(readers, new HashPartitioner(), key, val);
    return res;
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.lib.HashPartitioner

      public void configure(JobConf job) {
        // 'key' == sortInput for sort-input; key == sortOutput for sort-output
        key = deduceInputFile(job);
       
        if (key == sortOutput) {
          partitioner = new HashPartitioner();
         
          // Figure the 'current' partition and no. of reduces of the 'sort'
          try {
            URI inputURI = new URI(job.get("map.input.file"));
            String inputFile = inputURI.getPath();
View Full Code Here

Examples of org.apache.hadoop.mapred.lib.HashPartitioner

      public void configure(JobConf job) {
        // 'key' == sortInput for sort-input; key == sortOutput for sort-output
        key = deduceInputFile(job);
       
        if (key == sortOutput) {
          partitioner = new HashPartitioner();
         
          // Figure the 'current' partition and no. of reduces of the 'sort'
          try {
            URI inputURI = new URI(job.get("map.input.file"));
            String inputFile = inputURI.getPath();
View Full Code Here

Examples of org.apache.hadoop.mapred.lib.HashPartitioner

 
  public CrawlDatum get(String crawlDb, String url, Configuration config) throws IOException {
    UTF8 key = new UTF8(url);
    CrawlDatum val = new CrawlDatum();
    openReaders(crawlDb, config);
    CrawlDatum res = (CrawlDatum)MapFileOutputFormat.getEntry(readers, new HashPartitioner(), key, val);
    return res;
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.lib.HashPartitioner

 
  public CrawlDatum get(String crawlDb, String url, Configuration config) throws IOException {
    UTF8 key = new UTF8(url);
    CrawlDatum val = new CrawlDatum();
    openReaders(crawlDb, config);
    CrawlDatum res = (CrawlDatum)MapFileOutputFormat.getEntry(readers, new HashPartitioner(), key, val);
    return res;
  }
View Full Code Here

Examples of org.apache.spark.HashPartitioner

    JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
      ssc, inputData, 1);
    JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);

    JavaPairDStream<String, Integer> combined = pairStream.<Integer>combineByKey(i -> i,
      (x, y) -> x + y, (x, y) -> x + y, new HashPartitioner(2));

    JavaTestUtils.attachTestOutputStream(combined);
    List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);

    Assert.assertEquals(expected, result);
View Full Code Here

Examples of org.apache.spark.HashPartitioner

        new Function<Integer, Integer>() {
        @Override
          public Integer call(Integer i) throws Exception {
            return i;
          }
        }, new IntegerSum(), new IntegerSum(), new HashPartitioner(2));

    JavaTestUtils.attachTestOutputStream(combined);
    List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);

    Assert.assertEquals(expected, result);
View Full Code Here

Examples of org.apache.spark.HashPartitioner

        new Function<Integer, Integer>() {
        @Override
          public Integer call(Integer i) throws Exception {
            return i;
          }
        }, new IntegerSum(), new IntegerSum(), new HashPartitioner(2));

    JavaTestUtils.attachTestOutputStream(combined);
    List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);

    Assert.assertEquals(expected, result);
View Full Code Here

Examples of org.apache.spark.HashPartitioner

    JavaDStream<Tuple2<String, Integer>> stream = JavaTestUtils.attachTestInputStream(
      ssc, inputData, 1);
    JavaPairDStream<String, Integer> pairStream = JavaPairDStream.fromJavaDStream(stream);

    JavaPairDStream<String, Integer> combined = pairStream.<Integer>combineByKey(i -> i,
      (x, y) -> x + y, (x, y) -> x + y, new HashPartitioner(2));

    JavaTestUtils.attachTestOutputStream(combined);
    List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);

    Assert.assertEquals(expected, result);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.