Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.InputSplit


    secondOutput = new PartialOutputCollector(total);
    Reporter reporter = Reporter.NULL;
    long slowest = 0; // duration of slowest map

    for (int partition = 0; partition < splits.length; partition++) {
      InputSplit split = splits[partition];
      RecordReader<LongWritable, Text> reader = input.getRecordReader(split,
          job, reporter);

      LongWritable key = reader.createKey();
      Text value = reader.createValue();
View Full Code Here


    Step0OutputCollector collector = new Step0OutputCollector(numMaps);
    Reporter reporter = Reporter.NULL;

    for (int p = 0; p < numMaps; p++) {
      InputSplit split = sorted[p];
      RecordReader<LongWritable, Text> reader = input.getRecordReader(split, job, reporter);

      LongWritable key = reader.createKey();
      Text value = reader.createValue();
View Full Code Here

    Step0Output[] values = new Step0Output[numMaps];
   
    int[] expectedIds = new int[numMaps];
   
    for (int p = 0; p < numMaps; p++) {
      InputSplit split = sorted[p];
      RecordReader<LongWritable, Text> reader = input.getRecordReader(split, job, reporter);

      LongWritable key = reader.createKey();
      Text value = reader.createValue();
View Full Code Here

  }

  @Override
  public RecordReader<LongWritable, Text> getRecordReader(
      InputSplit split, JobConf job, Reporter reporter) throws IOException {
    InputSplit targetSplit = ((SymlinkTextInputSplit)split).getTargetSplit();

    // The target data is in TextInputFormat.
    TextInputFormat inputFormat = new TextInputFormat();
    inputFormat.configure(job);
    return inputFormat.getRecordReader(targetSplit, job, reporter);
View Full Code Here

      } catch (HiveException e) {
        throw new RuntimeException(
            "Unable to get metadata for input table split" + split.getPath());
      }
    }
    InputSplit retA[] = newSplits.toArray((new FileSplit[newSplits.size()]));
    l4j.info("Number of input splits: " + splits.length + " new input splits: "
        + retA.length);
    return retA;
  }
View Full Code Here

  public RecordReader getRecordReader(InputSplit split, JobConf job,
      Reporter reporter) throws IOException {

    HiveInputSplit hsplit = (HiveInputSplit) split;

    InputSplit inputSplit = hsplit.getInputSplit();
    String inputFormatClassName = null;
    Class inputFormatClass = null;
    try {
      inputFormatClassName = hsplit.inputFormatClassName();
      inputFormatClass = job.getClassByName(inputFormatClassName);
View Full Code Here

  public RecordReader getRecordReader(InputSplit split, JobConf job,
      Reporter reporter) throws IOException {

    HiveInputSplit hsplit = (HiveInputSplit)split;

    InputSplit inputSplit = hsplit.getInputSplit();
    String inputFormatClassName = null;
    Class inputFormatClass = null;
    try {
      inputFormatClassName = hsplit.inputFormatClassName();
      inputFormatClass = Class.forName(inputFormatClassName);
View Full Code Here

  public RecordReader getRecordReader(InputSplit split, JobConf job,
      Reporter reporter) throws IOException {

    HiveInputSplit hsplit = (HiveInputSplit) split;

    InputSplit inputSplit = hsplit.getInputSplit();
    String inputFormatClassName = null;
    Class inputFormatClass = null;
    try {
      inputFormatClassName = hsplit.inputFormatClassName();
      inputFormatClass = job.getClassByName(inputFormatClassName);
View Full Code Here

    InputSplit[] splits = aif.getSplits(job, 1);

    Assert.assertEquals(1, splits.length);

    InputSplit split = splits[0];

    Assert.assertEquals(RangeInputSplit.class, split.getClass());

    RangeInputSplit risplit = (RangeInputSplit) split;

    Assert.assertEquals(username, risplit.getPrincipal());
    Assert.assertEquals(table, risplit.getTable());
View Full Code Here

    {
      JobConf job = HadoopPlugin.getJobFactory(BitPostingIndexInputFormat.class.getSimpleName()).newJob();
      setStructures(job, "linksin", "linksin-lookup");
      HadoopUtility.toHConfiguration(index, job);
      index.close();
      InputSplit s = new BitPostingIndexInputSplit(
          new Path(args[3]), Long.parseLong(args[4]), Long.parseLong(args[5]),
          new String[0], Integer.parseInt(args[6]), Integer.parseInt(args[7]));
      RecordReader<IntWritable, IntObjectWrapper<IterablePosting>> rr = new BitPostingIndexInputFormat().getRecordReader(s, job, new Reporter(){
        public InputSplit getInputSplit() throws UnsupportedOperationException {return null;}
        @SuppressWarnings("unchecked")
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.InputSplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.