Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskAttemptID


        private boolean init  = false;

        public IllustratorContext(Configuration conf, DataBag input,
              List<Pair<PigNullableWritable, Writable>> output,
              InputSplit split) throws IOException, InterruptedException {
              super(conf, new TaskAttemptID(), null, null, null, null, split);
              if (output == null)
                  throw new IOException("Null output can not be used");
              this.input = input; this.output = output;
        }
View Full Code Here


            public IllustratorContext(Job job,
                  List<Pair<PigNullableWritable, Writable>> input,
                  POPackage pkg
                  ) throws IOException, InterruptedException {
                super(job.getJobConf(), new TaskAttemptID(), new FakeRawKeyValueIterator(input.iterator().hasNext()),
                    null, null, null, null, null, null, PigNullableWritable.class, NullableTuple.class);
                bos = new ByteArrayOutputStream();
                dos = new DataOutputStream(bos);
                org.apache.hadoop.mapreduce.Job nwJob = new org.apache.hadoop.mapreduce.Job(job.getJobConf());
                sortComparator = nwJob.getSortComparator();
View Full Code Here

      final Accumulator<Map<String, Long>> counters) {
    ProxyFactory factory = new ProxyFactory();
    Class<TaskInputOutputContext> superType = TaskInputOutputContext.class;
    Class[] types = new Class[0];
    Object[] args = new Object[0];
    final TaskAttemptID taskAttemptId = new TaskAttemptID();
    if (superType.isInterface()) {
      factory.setInterfaces(new Class[] { superType });
    } else {
      types = new Class[] { Configuration.class, TaskAttemptID.class, RecordWriter.class, OutputCommitter.class,
          StatusReporter.class };
View Full Code Here

  private static TaskInputOutputContext<?, ?, ?, ?> getInMemoryContext(final Configuration conf) {
    ProxyFactory factory = new ProxyFactory();
    Class<TaskInputOutputContext> superType = TaskInputOutputContext.class;
    Class[] types = new Class[0];
    Object[] args = new Object[0];
    final TaskAttemptID taskAttemptId = new TaskAttemptID();
    if (superType.isInterface()) {
      factory.setInterfaces(new Class[] { superType });
    } else {
      types = new Class[] { Configuration.class, TaskAttemptID.class, RecordWriter.class, OutputCommitter.class,
          StatusReporter.class };
View Full Code Here

    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, schema);
    sd.setOutputSchema(jc, schema);
    sd.initialize(jc, getProps());

    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getExpectedRecords();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildPrunedSchema());

    sd.initialize(jc, getProps());
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getPrunedRecords();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

    sd.initialize(jc, getProps());
    Map<String,String> map = new HashMap<String,String>(1);
    map.put("part1", "first-part");
    sd.setPartitionValues(jc, map);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getReorderedCols();
    for(int j=0; j < 2; j++){
      Assert.assertTrue(rr.nextKeyValue());
View Full Code Here

      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
        RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
        rr.initialize(splits.get(i), tac);
        while (rr.nextKeyValue()) {
          readCount++;
        }
View Full Code Here

    //
    // Old Hadoop API
    //
    public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, new TaskAttemptID());
        List<org.apache.hadoop.mapreduce.InputSplit> newInputSplits = this.getSplits(tac);
        org.apache.hadoop.mapred.InputSplit[] oldInputSplits = new org.apache.hadoop.mapred.InputSplit[newInputSplits.size()];
        for (int i = 0; i < newInputSplits.size(); i++)
            oldInputSplits[i] = (ColumnFamilySplit)newInputSplits.get(i);
        return oldInputSplits;
View Full Code Here

    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf, new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskAttemptID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.