Package com.amazonaws.services.glacier.internal

Examples of com.amazonaws.services.glacier.internal.TreeHashInputStream


     * Amazon Glacier.
     */
    private void downloadOneChunk(String accountId, String vaultName,
            String jobId, RandomAccessFile output, long currentPosition,
            long endPosition) {
        TreeHashInputStream  input;
        int retries = 0;
        while (true) {
            try {
                GetJobOutputResult  jobOutputResult = glacier.getJobOutput(new GetJobOutputRequest()
                  .withAccountId(accountId)
                  .withVaultName(vaultName)
                  .withRange("bytes=" + Long.toString(currentPosition) + "-" + Long.toString(endPosition))
                  .withJobId(jobId));

                try {
                    input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
                } catch (NoSuchAlgorithmException e) {
                    throw new AmazonClientException("Unable to compute hash for data integrity: " + e.getMessage(), e);
                }

                appendToFile(output, input);

                // Only do tree-hash check when the output checksum is returned from Glacier
                if (null != jobOutputResult.getChecksum()) {
                    // Checksum does not match
                    if (!input.getTreeHash().equalsIgnoreCase(jobOutputResult.getChecksum())) {
                        throw new IOException("Client side computed hash doesn't match server side hash; possible data corruption");
                    }
                } else {
                    log.warn("Cannot validate the downloaded output since no tree-hash checksum is returned from Glacier. "
                            + "Make sure the InitiateJob and GetJobOutput requests use tree-hash-aligned ranges.");
View Full Code Here


      downloadJobOutput(jobOutputResult, file);
    }

    private void downloadJobOutput(GetJobOutputResult jobOutputResult, File file) {
        TreeHashInputStream input;
        OutputStream output = null;
        byte[] buffer = new byte[1024 * 1024];
    try {
      input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
    } catch (NoSuchAlgorithmException e) {
      throw new AmazonClientException("Unable to compute hash for data integrity", e);
    }

        try {
            output = new BufferedOutputStream(new FileOutputStream(file));

            int bytesRead = 0;
            do {
              bytesRead = input.read(buffer);
              if (bytesRead <= 0) break;
              output.write(buffer, 0, bytesRead);
            } while (bytesRead > 0);
        } catch (IOException e) {
            throw new AmazonClientException("Unable to save archive to disk", e);
        } finally {
            try {input.close();catch (Exception e) {}
            try {output.close();} catch (Exception e) {}

            try {
        String clientSideTreeHash = input.getTreeHash();
        String serverSideTreeHash = jobOutputResult.getChecksum();
        if (!clientSideTreeHash.equalsIgnoreCase(serverSideTreeHash)) {
          throw new AmazonClientException("Client side computed hash doesn't match server side hash; possible data corruption");
        }
      } catch (IOException e) {
View Full Code Here

   *             If problems were encountered reading the data or calculating
   *             the hash.
   */
    public static String calculateTreeHash(InputStream input) throws AmazonClientException {
    try {
      TreeHashInputStream treeHashInputStream = new TreeHashInputStream(input);
            byte[] buffer = new byte[1024];
            while (treeHashInputStream.read(buffer, 0, buffer.length) != -1);
      treeHashInputStream.close();
      return calculateTreeHash(treeHashInputStream.getChecksums());
    } catch (Exception e) {
      throw new AmazonClientException("Unable to compute hash", e);
    }
    }
View Full Code Here

      downloadJobOutput(jobOutputResult, file);
    }

    private void downloadJobOutput(GetJobOutputResult jobOutputResult, File file) {
        TreeHashInputStream input;
        OutputStream output = null;
        byte[] buffer = new byte[1024 * 1024];
    try {
      input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
    } catch (NoSuchAlgorithmException e) {
      throw new AmazonClientException("Unable to compute hash for data integrity", e);
    }

        try {
            output = new BufferedOutputStream(new FileOutputStream(file));

            int bytesRead = 0;
            do {
              bytesRead = input.read(buffer);
              if (bytesRead <= 0) break;
              output.write(buffer, 0, bytesRead);
            } while (bytesRead > 0);
        } catch (IOException e) {
            throw new AmazonClientException("Unable to save archive to disk", e);
        } finally {
            try {input.close();catch (Exception e) {}
            try {output.close();} catch (Exception e) {}

            try {
        String clientSideTreeHash = input.getTreeHash();
        String serverSideTreeHash = jobOutputResult.getChecksum();
        if (!clientSideTreeHash.equalsIgnoreCase(serverSideTreeHash)) {
          throw new AmazonClientException("Client side computed hash doesn't match server side hash; possible data corruption");
        }
      } catch (IOException e) {
View Full Code Here

   *             the hash.
   */
    public static String calculateTreeHash(InputStream input) throws AmazonClientException {
    try {
      input.mark(0);
      TreeHashInputStream treeHashInputStream = new TreeHashInputStream(input);

            byte[] buffer = new byte[1024];
            while (treeHashInputStream.read(buffer, 0, buffer.length) != -1);
      treeHashInputStream.close();
      input.reset();
      return calculateTreeHash(treeHashInputStream.getChecksums());
    } catch (Exception e) {
      throw new AmazonClientException("Unable to compute hash", e);
    }
    }
View Full Code Here

     
      downloadJobOutput(jobOutputResult, file);
    }
   
    private void downloadJobOutput(GetJobOutputResult jobOutputResult, File file) {
        TreeHashInputStream input;
        OutputStream output = null;
        byte[] buffer = new byte[1024 * 1024];
    try {
      input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
    } catch (NoSuchAlgorithmException e) {
      throw new AmazonClientException("Unable to compute hash for data integrity", e);
    }

        try {
            output = new BufferedOutputStream(new FileOutputStream(file));

            int bytesRead = 0;
            do {
              bytesRead = input.read(buffer);
              if (bytesRead <= 0) break;
              output.write(buffer, 0, bytesRead);
            } while (bytesRead > 0);
        } catch (IOException e) {
            throw new AmazonClientException("Unable to save archive to disk", e);
        } finally {
            try {input.close();catch (Exception e) {}
            try {output.close();} catch (Exception e) {}

            try {
        String clientSideTreeHash = input.getTreeHash();
        String serverSideTreeHash = jobOutputResult.getChecksum();
        if (!clientSideTreeHash.equalsIgnoreCase(serverSideTreeHash)) {
          throw new AmazonClientException("Client side computed hash doesn't match server side hash; possible data corruption");
        }
      } catch (IOException e) {
View Full Code Here

    /**
     * Download one chunk from Amazon Glacier. It will do the retry if any errors are encountered while streaming the data from
     * Amazon Glacier.
     */
    private void downloadOneChunk(String accountId, String vaultName, String jobId, RandomAccessFile output, long currentPosition, long endPosition) {
        TreeHashInputStream  input;
        int retries = 0;
        while (true) {
            try {
                GetJobOutputResult  jobOutputResult = glacier.getJobOutput(new GetJobOutputRequest()
                  .withAccountId(accountId)
                  .withVaultName(vaultName)
                  .withRange("bytes=" + Long.toString(currentPosition) + "-" + Long.toString(endPosition))
                  .withJobId(jobId));

                try {
                    input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
                } catch (NoSuchAlgorithmException e) {
                    throw new AmazonClientException("Unable to compute hash for data integrity: " + e.getMessage(), e);
                }

                appendToFile(output, input);

                // Only do tree-hash check when the output checksum is returned from Glacier
                if (null != jobOutputResult.getChecksum()) {
                    // Checksum does not match
                    if (!input.getTreeHash().equalsIgnoreCase(jobOutputResult.getChecksum())) {
                        throw new IOException("Client side computed hash doesn't match server side hash; possible data corruption");
                    }
                } else {
                    log.warn("Cannot validate the downloaded output since no tree-hash checksum is returned from Glacier. "
                            + "Make sure the InitiateJob and GetJobOutput requests use tree-hash-aligned ranges.");
View Full Code Here

     */
    private void downloadOneChunk(String accountId, String vaultName,
            String jobId, RandomAccessFile output, long currentPosition,
            long endPosition, ProgressListener progressListener) {
        final long chunkSize = endPosition - currentPosition + 1;
        TreeHashInputStream input = null;
        int retries = 0;
        while (true) {
            try {
                GetJobOutputRequest req = new GetJobOutputRequest()
                    .withAccountId(accountId)
                    .withVaultName(vaultName)
                    .withRange("bytes=" + currentPosition + "-" + endPosition)
                    .withJobId(jobId)
                    .withGeneralProgressListener(progressListener)
                    ;
                GetJobOutputResult jobOutputResult = glacier.getJobOutput(req);
                try {
                    input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
                    appendToFile(output, input);
                } catch (NoSuchAlgorithmException e) {
                    throw failure(e, "Unable to compute hash for data integrity");
                } finally {
                    closeQuietly(input, log);
                }

                // Only do tree-hash check when the output checksum is returned from Glacier
                if (null != jobOutputResult.getChecksum()) {
                    // Checksum does not match
                    if (!input.getTreeHash().equalsIgnoreCase(jobOutputResult.getChecksum())) {
                        // Discard the chunk of bytes received
                        publishResponseBytesDiscarded(progressListener, chunkSize);
                        if (log.isDebugEnabled())
                            log.debug("reverting " + chunkSize);
                        throw new IOException("Client side computed hash doesn't match server side hash; possible data corruption");
View Full Code Here

   *             the hash.
   */
    public static String calculateTreeHash(InputStream input)
            throws AmazonClientException {
        try {
            TreeHashInputStream treeHashInputStream =
                new TreeHashInputStream(input);
            byte[] buffer = new byte[1024];
            while (treeHashInputStream.read(buffer, 0, buffer.length) != -1);
            // closing is currently required to compute the checksum
            treeHashInputStream.close();
            return calculateTreeHash(treeHashInputStream.getChecksums());
        } catch (Exception e) {
            throw new AmazonClientException("Unable to compute hash", e);
        }
    }
View Full Code Here

    /**
     * Download one chunk from Amazon Glacier. It will do the retry if any errors are encountered while streaming the data from
     * Amazon Glacier.
     */
    private void downloadOneChunk(String accountId, String vaultName, String jobId, RandomAccessFile output, long currentPosition, long endPosition) {
        TreeHashInputStream  input;
        int retries = 0;
        while (true) {
            try {
                GetJobOutputResult  jobOutputResult = glacier.getJobOutput(new GetJobOutputRequest()
                  .withAccountId(accountId)
                  .withVaultName(vaultName)
                  .withRange("bytes=" + Long.toString(currentPosition) + "-" + Long.toString(endPosition))
                  .withJobId(jobId));

                try {
                    input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody()));
                } catch (NoSuchAlgorithmException e) {
                    throw new AmazonClientException("Unable to compute hash for data integrity: " + e.getMessage(), e);
                }

                appendToFile(output, input);

                // Checksum does not match
                if (!input.getTreeHash().equalsIgnoreCase(jobOutputResult.getChecksum())) {
                    throw new IOException("Client side computed hash doesn't match server side hash; possible data corruption");
                }

                // Successfully download
                return;
View Full Code Here

TOP

Related Classes of com.amazonaws.services.glacier.internal.TreeHashInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.