Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.DataInputBuffer.reset()


        }

        // TODO: Remove this after a 'fix' for HADOOP-3647
        if (mapOutputLength > 0) {
          DataInputBuffer dib = new DataInputBuffer();
          dib.reset(shuffleData, 0, shuffleData.length);
          LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" +
                   WritableUtils.readVInt(dib) + ", " +
                   WritableUtils.readVInt(dib) + ") from " +
                   mapOutputLoc.getHost());
        }
View Full Code Here


                                         " not found");
      wrap.initCause(exp);
      throw wrap;
    }
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(split.getBytes(), 0, split.getLength());
    inputSplit.readFields(splitBuffer);
   
    updateJobWithSplit(job, inputSplit);
    reporter.setInputSplit(inputSplit);
View Full Code Here

      (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
        ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
    // rebuild the input split
    org.apache.hadoop.mapreduce.InputSplit split = null;
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength());
    SerializationFactory factory = new SerializationFactory(job);
    Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>
      deserializer =
        (Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>)
        factory.getDeserializer(job.getClassByName(splitClass));
View Full Code Here

      return Arrays.copyOf(out.getData(), out.getLength());
    }
   
    public void fromBytes(byte bytes[]) throws IOException {
      DataInputBuffer inp = new DataInputBuffer();
      inp.reset(bytes, bytes.length);
      extent = new KeyExtent();
      extent.readFields(inp);
      timestamp = inp.readLong();
      server = inp.readUTF();
      filename = inp.readUTF();
View Full Code Here

        String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + tableId.toString() + "/merge";
        if (!ZooReaderWriter.getInstance().exists(path))
          return new MergeInfo();
        byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
        DataInputBuffer in = new DataInputBuffer();
        in.reset(data, data.length);
        MergeInfo info = new MergeInfo();
        info.readFields(in);
        return info;
      } catch (KeeperException.NoNodeException ex) {
        log.info("Error reading merge state, it probably just finished");
View Full Code Here

    if (w == null) {
      throw new IllegalArgumentException("Writable cannot be null");
    }
    DataInputBuffer in = new DataInputBuffer();
    try {
      in.reset(bytes, bytes.length);
      w.readFields(in);
      return w;
    } finally {
      in.close();
    }
View Full Code Here

              while (spindex < endPosition &&
                  kvindices[kvoffsets[spindex % kvoffsets.length]
                            + PARTITION] == i) {
                final int kvoff = kvoffsets[spindex % kvoffsets.length];
                getVBytesForOffset(kvoff, value);
                key.reset(kvbuffer, kvindices[kvoff + KEYSTART],
                          (kvindices[kvoff + VALSTART] -
                           kvindices[kvoff + KEYSTART]));
                writer.append(key, value);
                ++spindex;
              }
View Full Code Here

        }

        // TODO: Remove this after a 'fix' for HADOOP-3647
        if (mapOutputLength > 0) {
          DataInputBuffer dib = new DataInputBuffer();
          dib.reset(shuffleData, 0, shuffleData.length);
          LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" +
                   WritableUtils.readVInt(dib) + ", " +
                   WritableUtils.readVInt(dib) + ") from " +
                   mapOutputLoc.getHost());
        }
View Full Code Here

    queryWritable.write(out);
    byte[] data = out.getData();
    int length = out.getLength();

    DataInputBuffer in = new DataInputBuffer();
    in.reset(data, length);

    QueryWritable newQueryWritable = new QueryWritable();
    newQueryWritable.readFields(in);

    Query termQuery = newQueryWritable.getQuery();
View Full Code Here

                                         " not found");
      wrap.initCause(exp);
      throw wrap;
    }
    DataInputBuffer splitBuffer = new DataInputBuffer();
    splitBuffer.reset(split.getBytes(), 0, split.getLength());
    instantiatedSplit.readFields(splitBuffer);
   
    // if it is a file split, we can give more details
    if (instantiatedSplit instanceof FileSplit) {
      FileSplit fileSplit = (FileSplit) instantiatedSplit;
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.