Package com.amazonaws

Examples of com.amazonaws.AmazonClientException


        while (!allInstancesStateEqual(instances, InstanceStateName.Running)) {

            // instance status should not be Terminated
            if (anyInstancesStateEqual(instances, InstanceStateName.Terminated)) {
                throw new AmazonClientException("Some Instance is terminated before running a job");
            }

            // notify the status
            for (Instance ins: instances) {
                jobExecutionContext.getNotificationService().publish(new EC2ProviderEvent("EC2 Instance " +
View Full Code Here


                    }
                }
            } catch (IOException ioe) {
                log.error("Unable to execute HTTP request: " + ioe.getMessage());

                throw new AmazonClientException("Unable to execute HTTP request: "
                        + ioe.getMessage(), ioe);
            } finally {
                /*
                 * Some response handlers need to manually manage the HTTP
                 * connection and will take care of releasing the connection on
View Full Code Here

        } else if (request.getMethodName() == HttpMethodName.HEAD) {
            HeadMethod headMethod = new HeadMethod(uri);
            if (nameValuePairs != null) headMethod.setQueryString(nameValuePairs);
            method = headMethod;
        } else {
            throw new AmazonClientException("Unknown HTTP method name: " + request.getMethodName());
        }

        // No matter what type of HTTP method we're creating, we need to copy
        // all the headers from the request.
        for (Entry<String, String> entry : request.getHeaders().entrySet()) {
View Full Code Here

            return awsResponse.getResult();
        } catch (Exception e) {
            String errorMessage = "Unable to unmarshall response (" + e.getMessage() + "): "
                                + method.getResponseBodyAsString();
            log.error(errorMessage, e);
            throw new AmazonClientException(errorMessage, e);
        }
    }
View Full Code Here

            requestLog.info("Received error response: " + exception.toString());
        } catch (Exception e) {
            String errorMessage = "Unable to unmarshall error response (" + e.getMessage() + "): "
                                + method.getResponseBodyAsString();
            log.error(errorMessage, e);
            throw new AmazonClientException(errorMessage, e);
        }

        exception.setStatusCode(status);
        exception.setServiceName(request.getServiceName());
        exception.fillInStackTrace();
View Full Code Here

            int bytesRead;
            while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) {
                outputStream.write(buffer, 0, bytesRead);
            }
        } catch (IOException e) {
            throw new AmazonClientException(
                    "Unable to store object contents to disk: " + e.getMessage(), e);
        } finally {
            try {outputStream.close();} catch (Exception e) {}
            try {s3Object.getObjectContent().close();} catch (Exception e) {}
        }

        try {
            byte[] clientSideHash = ServiceUtils.computeMD5Hash(new FileInputStream(destinationFile));
            byte[] serverSideHash = ServiceUtils.fromHex(s3Object.getObjectMetadata().getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                throw new AmazonClientException("Unable to verify integrity of data download.  " +
                        "Client calculated content hash didn't match hash calculated by Amazon S3.  " +
                        "The data stored in '" + destinationFile.getAbsolutePath() + "' may be corrupt.");
            }
        } catch (Exception e) {
            log.warn("Unable to calculate MD5 hash to validate download: " + e.getMessage(), e);
View Full Code Here

            try {
                byte[] md5Hash = ServiceUtils.computeMD5Hash(new FileInputStream(file));
                metadata.setContentMD5(ServiceUtils.toBase64(md5Hash));
            } catch (Exception e) {
                throw new AmazonClientException(
                        "Unable to calculate MD5 hash: " + e.getMessage(), e);
            }

            try {
                input = new RepeatableFileInputStream(file);
            } catch (FileNotFoundException fnfe) {
                throw new AmazonClientException("Unable to find file to upload", fnfe);
            }
        }

        Request<Void> request = createRequest(bucketName, key, putObjectRequest);

        if (putObjectRequest.getCannedAcl() != null) {
            request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
        }

        if (putObjectRequest.getStorageClass() != null) {
            request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
        }

        if (metadata.getContentLength() <= 0) {
            /*
             * There's nothing we can do except for let the HTTP client buffer
             * the input stream contents if the caller doesn't tell us how much
             * data to expect in a stream since we have to explicitly tell
             * Amazon S3 how much we're sending before we start sending any of
             * it.
             */
            log.warn("No content length specified for stream data.  " +
                     "Stream contents will be buffered in memory and could result in " +
                     "out of memory errors.");
        }

        if (!input.markSupported()) {
            input = new RepeatableInputStream(input, Constants.DEFAULT_STREAM_BUFFER_SIZE);
        }

        if (metadata.getContentMD5() == null) {
            /*
             * If the user hasn't set the content MD5, then we don't want to
             * buffer the whole stream in memory just to calculate it. Instead,
             * we can calculate it on the fly and validate it with the returned
             * ETag from the object upload.
             */
            try {
                input = new MD5DigestCalculatingInputStream(input);
            } catch (NoSuchAlgorithmException e) {
                log.warn("No MD5 digest algorithm available.  Unable to calculate " +
                         "checksum and verify data integrity.", e);
            }
        }

        if (metadata.getContentType() == null) {
            /*
             * Default to the "application/octet-stream" if the user hasn't
             * specified a content type.
             */
            metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
        }

        populateRequestMetadata(request, metadata);
        signRequest(request, HttpMethodName.PUT, bucketName, key);
        HttpRequest httpRequest = convertToHttpRequest(request, HttpMethodName.PUT);
        httpRequest.setContent(input);

        ObjectMetadata returnedMetadata = null;
        try {
            S3MetadataResponseHandler responseHandler = new S3MetadataResponseHandler();
            returnedMetadata = (ObjectMetadata)client.execute(httpRequest, responseHandler, errorResponseHandler);
        } finally {
            try {input.close();} catch (Exception e) {
                log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
            }
        }

        String contentMd5 = metadata.getContentMD5();
        if (input instanceof MD5DigestCalculatingInputStream) {
            MD5DigestCalculatingInputStream md5DigestInputStream = (MD5DigestCalculatingInputStream)input;
            contentMd5 = ServiceUtils.toBase64(md5DigestInputStream.getMd5Digest());
        }

        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = ServiceUtils.fromBase64(contentMd5);
            byte[] serverSideHash = ServiceUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                throw new AmazonClientException("Unable to verify integrity of data upload.  " +
                        "Client calculated content hash didn't match hash calculated by Amazon S3.  " +
                        "You may need to delete the data stored in Amazon S3.");
            }
        }

View Full Code Here

                "/" +
                ((bucketName != null) ? bucketName + "/" : "") +
                ((key != null) ? ServiceUtils.urlEncode(key) : "");
            new S3Signer(awsCredentials, methodName.toString(), resourcePath).sign(request, null, null);
        } catch (SignatureException e) {
            throw new AmazonClientException("Unable to sign request: " + e.getMessage(), e);
        }
    }
View Full Code Here

                ((bucketName != null) ? bucketName + "/" : "") +
                ((key != null) ? ServiceUtils.urlEncode(key) : "") +
                ((subResource != null) ? "?" + subResource : "");
            new S3QueryStringSigner<T>(awsCredentials, methodName.toString(), resourcePath, expiration).sign(request);
        } catch (SignatureException e) {
            throw new AmazonClientException("Unable to sign request: " + e.getMessage(), e);
        }

        // The Amazon S3 DevPay token header is a special exception and can be safely moved
        // from the request's headers into the query string to ensure that it travels along
        // with the pre-signed URL when it's sent back to Amazon S3.
View Full Code Here

     */
    private URI convertToVirtualHostEndpoint(String bucketName) {
        try {
            return new URI(endpoint.getScheme() + "://" + bucketName + "." + endpoint.getAuthority());
        } catch (URISyntaxException e) {
            throw new AmazonClientException("Can't turn bucket name into a URI: " + e.getMessage(), e);
        }
    }
View Full Code Here

TOP

Related Classes of com.amazonaws.AmazonClientException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.