Examples of readFields()


Examples of org.apache.giraph.utils.io.DataInputOutput.readFields()

    int mapSize = in.readInt();
    for (int m = 0; m < mapSize; m++) {
      I vertexId = config.createVertexId();
      vertexId.readFields(in);
      DataInputOutput dataInputOutput = config.createMessagesInputOutput();
      dataInputOutput.readFields(in);
      inMemoryMessages.put(vertexId, dataInputOutput);
    }

    // read file stores
    int numFileStores = in.readInt();
View Full Code Here

Examples of org.apache.giraph.worker.WorkerInfo.readFields()

    int workerInfosSize = input.readInt();
    workerInfos = Lists.newArrayListWithCapacity(workerInfosSize);
    for (int i = 0; i < workerInfosSize; i++) {
      WorkerInfo workerInfo = new WorkerInfo();
      workerInfo.readFields(input);
      workerInfos.add(workerInfo);
    }

    Map<Integer, WorkerInfo> workerInfoMap = getAsWorkerInfoMap(workerInfos);
    int additionalWorkerInfos = input.readInt();
View Full Code Here

Examples of org.apache.hadoop.conf.Configuration.readFields()

    Preconditions.checkNotNull(byteString, "ByteString must be specified");
//    SnappyInputStream uncompressIs = new SnappyInputStream(byteString.newInput());
    InflaterInputStream uncompressIs = new InflaterInputStream(byteString.newInput());
    DataInputStream dataInputStream = new DataInputStream(uncompressIs);
    Configuration conf = new Configuration(false);
    conf.readFields(dataInputStream);
    return conf;
  }
 
  public static Configuration createConfFromUserPayload(byte[] bb)
      throws IOException {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataNodeReadProfilingData.readFields()

  }
 
  private void readDataNodeProfilingData() throws IOException {
    if (cliData != null) {
      FSDataNodeReadProfilingData dnData = new FSDataNodeReadProfilingData();
      dnData.readFields(in);
      cliData.addDataNodeReadProfilingData(dnData);
    }
  }

  @Override
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus.readFields()

    buffer.reset();
    DataOutputStream out = new DataOutputStream(buffer);
    fileStatus.write(out);

    in.reset(buffer.toByteArray(), 0, buffer.size());
    status.readFields(in);
    return status;
  }
}
View Full Code Here

Examples of org.apache.hadoop.fs.MD5MD5CRC32FileChecksum.readFields()

        checksum = new MD5MD5CRC32CastagnoliFileChecksum();
        break;
      default:
        throw new IOException("Unknown algorithm: " + algorithm);
    }
    checksum.readFields(in);

    //check algorithm name
    if (!checksum.getAlgorithmName().equals(algorithm)) {
      throw new IOException("Algorithm not matched. Expected " + algorithm
          + ", Received " + checksum.getAlgorithmName());
View Full Code Here

Examples of org.apache.hadoop.hbase.BloomFilterDescriptor.readFields()

      // If a bloomFilter is enabled and the column descriptor is less than
      // version 5, we need to skip over it to read the rest of the column
      // descriptor. There are no BloomFilterDescriptors written to disk for
      // column descriptors with a version number >= 5
      BloomFilterDescriptor junk = new BloomFilterDescriptor();
      junk.readFields(in);
    }
    if (versionNumber > 1) {
      this.blockCacheEnabled = in.readBoolean();
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.HColumnDescriptor.readFields()

      hcolBytes = Hex.decodeHex(hcolStr.toCharArray());
    } catch (DecoderException e) {
      throw new AssertionError("Bad hex string: " + hcolStr);
    }
    HColumnDescriptor hcol = new HColumnDescriptor();
    hcol.readFields(new DataInputStream(new ByteArrayInputStream(hcolBytes)));
    LOG.info("Output path: " + outputPath);
    LOG.info("HColumnDescriptor: " + hcol.toString());
    final HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
        .withPath(fs, outputPath)
        .withBlockSize(hcol.getBlocksize())
View Full Code Here

Examples of org.apache.hadoop.hbase.HRegionInfo.readFields()

    Path regioninfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
    FileSystem fs = regioninfo.getFileSystem(getConf());

    FSDataInputStream in = fs.open(regioninfo);
    HRegionInfo hri = new HRegionInfo();
    hri.readFields(in);
    in.close();
    LOG.debug("HRegionInfo read: " + hri.toString());
    hbi.hdfsEntry.hri = hri;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.HServerAddress.readFields()

    int regionsCount = in.readInt();
    for (int i = 0; i < regionsCount; ++i) {
      HRegionInfo hri = new HRegionInfo();
      hri.readFields(in);
      HServerAddress hsa = new HServerAddress();
      hsa.readFields(in);
      allRegions.put(hri, hsa);
    }
    return allRegions;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.