Package org.apache.hadoop.util

Examples of org.apache.hadoop.util.DataChecksum


        metadataIn, BUFFER_SIZE));

    try {
      //read metadata file
      final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
      final DataChecksum checksum = header.getChecksum();
      final int bytesPerCRC = checksum.getBytesPerChecksum();
      final long crcPerBlock = (metadataIn.getLength()
          - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
     
      //compute block checksum
      final MD5Hash md5 = MD5Hash.digest(checksumIn);

      if (LOG.isDebugEnabled()) {
View Full Code Here


    if (newlen > oldlen) {
      throw new IOException("Cannout truncate block to from oldlen (=" + oldlen
          + ") to newlen (=" + newlen + ")");
    }

    DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
    int checksumsize = dcs.getChecksumSize();
    int bpc = dcs.getBytesPerChecksum();
    long n = (newlen - 1)/bpc + 1;
    long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
    long lastchunkoffset = (n - 1)*bpc;
    int lastchunksize = (int)(newlen - lastchunkoffset);
    byte[] b = new byte[Math.max(lastchunksize, checksumsize)];

    RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
    try {
      //truncate blockFile
      blockRAF.setLength(newlen);
      //read last chunk
      blockRAF.seek(lastchunkoffset);
      blockRAF.readFully(b, 0, lastchunksize);
    } finally {
      blockRAF.close();
    }

    //compute checksum
    dcs.update(b, 0, lastchunksize);
    dcs.writeValue(b, 0, false);

    //update metaFile
    RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
    try {
      metaRAF.setLength(newmetalen);
View Full Code Here

      // First, write out the version.
      mdOut.writeShort(BlockMetadataHeader.VERSION);
     
      // Create a summer and write out its header.
      int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
      DataChecksum sum =
        DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
                                     bytesPerChecksum);
      sum.writeHeader(mdOut);
     
      // Buffer to read in a chunk of data.
      byte[] buf = new byte[bytesPerChecksum];
      // Buffer to store the checksum bytes.
      byte[] chk = new byte[sum.getChecksumSize()];
     
      // Read data till we reach the end of the input stream.
      int bytesSinceFlush = 0;
      while (true) {
        // Read some bytes.
        int bytesRead = dataStream.read(buf, bytesSinceFlush,
                                        bytesPerChecksum-bytesSinceFlush);
        if (bytesRead == -1) {
          if (bytesSinceFlush > 0) {
            boolean reset = true;
            sum.writeValue(chk, 0, reset); // This also resets the sum.
            // Write the checksum to the stream.
            mdOut.write(chk, 0, chk.length);
            bytesSinceFlush = 0;
          }
          break;
        }
        // Update the checksum.
        sum.update(buf, bytesSinceFlush, bytesRead);
        bytesSinceFlush += bytesRead;
       
        // Flush the checksum if necessary.
        if (bytesSinceFlush == bytesPerChecksum) {
          boolean reset = true;
          sum.writeValue(chk, 0, reset); // This also resets the sum.
          // Write the checksum to the stream.
          mdOut.write(chk, 0, chk.length);
          bytesSinceFlush = 0;
        }
      }
View Full Code Here

    updateCurrentThreadName("Getting checksum for block " + block);
    try {
      //read metadata file
      final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
      final DataChecksum checksum = header.getChecksum();
      final int bytesPerCRC = checksum.getBytesPerChecksum();
      final long crcPerBlock = (metadataIn.getLength()
          - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
     
      //compute block checksum
      final MD5Hash md5 = MD5Hash.digest(checksumIn);

      if (LOG.isDebugEnabled()) {
View Full Code Here

            + proxySock.getRemoteSocketAddress() + " failed");
      }
     
      // get checksum info about the block we're copying
      ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
      DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(
          checksumInfo.getChecksum());
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(
          block, proxyReply, proxySock.getRemoteSocketAddress().toString(),
          proxySock.getLocalSocketAddress().toString(),
View Full Code Here

    if (newlen > oldlen) {
      throw new IOException("Cannout truncate block to from oldlen (=" + oldlen
          + ") to newlen (=" + newlen + ")");
    }

    DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
    int checksumsize = dcs.getChecksumSize();
    int bpc = dcs.getBytesPerChecksum();
    long n = (newlen - 1)/bpc + 1;
    long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
    long lastchunkoffset = (n - 1)*bpc;
    int lastchunksize = (int)(newlen - lastchunkoffset);
    byte[] b = new byte[Math.max(lastchunksize, checksumsize)];

    RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
    try {
      //truncate blockFile
      blockRAF.setLength(newlen);
      //read last chunk
      blockRAF.seek(lastchunkoffset);
      blockRAF.readFully(b, 0, lastchunksize);
    } finally {
      blockRAF.close();
    }

    //compute checksum
    dcs.update(b, 0, lastchunksize);
    dcs.writeValue(b, 0, false);

    //update metaFile
    RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
    try {
      metaRAF.setLength(newmetalen);
View Full Code Here

       * True,   True: will verify checksum 
       * True,  False: No verify, e.g., need to read data from a corrupted file
       * False,  True: will verify checksum
       * False, False: throws IOException file not found
       */
      DataChecksum csum;
      final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
      if (!corruptChecksumOk || metaIn != null) {
        if (metaIn == null) {
          //need checksum but meta-data not found
          throw new FileNotFoundException("Meta-data not found for " + block);
        }
       
        checksumIn = new DataInputStream(
            new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + block + " ignoring ...");
        }
        csum = header.getChecksum();
      } else {
        LOG.warn("Could not find metadata file for " + block);
        // This only decides the buffer size. Use BUFFER_SIZE?
        csum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL,
            16 * 1024);
      }

      /*
       * If chunkSize is very large, then the metadata file is mostly
       * corrupted. For now just truncate bytesPerchecksum to blockLength.
       */      
      int size = csum.getBytesPerChecksum();
      if (size > 10*1024*1024 && size > replicaVisibleLength) {
        csum = DataChecksum.newDataChecksum(csum.getChecksumType(),
            Math.max((int)replicaVisibleLength, 10*1024*1024));
        size = csum.getBytesPerChecksum();       
      }
      chunkSize = size;
      checksum = csum;
      checksumSize = checksum.getChecksumSize();
      length = length < 0 ? replicaVisibleLength : length;
View Full Code Here

    sendRecvData("Wrong Op Code", true);
   
    /* Test OP_WRITE_BLOCK */
    sendBuf.reset();
   
    DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
    Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();

    sender.writeBlock(new ExtendedBlock(poolId, newBlockId),
        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
        new DatanodeInfo[1], null,
View Full Code Here

      throws IOException, UnresolvedLinkException {
    checkOpen();
    CreateFlag.validate(flag);
    DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
    if (result == null) {
      DataChecksum checksum = DataChecksum.newDataChecksum(
          dfsClientConf.checksumType,
          bytesPerChecksum);
      result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
          flag, createParent, replication, blockSize, progress, buffersize,
          checksum);
View Full Code Here

        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        long firstChunkOffset = startOffset
            - (startOffset % checksum.getBytesPerChecksum());
        localBlockReader = new BlockReaderLocal(conf, file, blk, token,
            startOffset, length, pathinfo, checksum, true, dataIn,
            firstChunkOffset, checksumIn);
      } else {
        localBlockReader = new BlockReaderLocal(conf, file, blk, token,
View Full Code Here

TOP

Related Classes of org.apache.hadoop.util.DataChecksum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.