Package org.jets3t.service.impl.rest.httpclient

Examples of org.jets3t.service.impl.rest.httpclient.RestS3Service


                "s3service.s3-endpoint", Constants.GS_DEFAULT_HOSTNAME);
        } else if (originalTargetEndpoint != null) {
            cockpitProperties.setProperty(
                "s3service.s3-endpoint", originalTargetEndpoint);
        }
        return new RestS3Service(credentials, APPLICATION_DESCRIPTION,
            this, cockpitProperties);
    }
View Full Code Here


        Jets3tProperties properties = new Jets3tProperties();
        properties.setProperty(
            "storage-service.request-signature-version",
            this.requestSignatureVersion);

        RestS3Service service = new RestS3Service(
            this.testCredentials, null, null, properties);

        service.listAllBuckets();
    }
View Full Code Here

        Jets3tProperties properties = new Jets3tProperties();
        properties.setProperty(
            "storage-service.request-signature-version",
            this.requestSignatureVersion);

        RestS3Service service = new RestS3Service(
            this.testCredentials, null, null, properties);

        String bucketName =
            "test-" + testCredentials.getAccessKey().toLowerCase()
            + "-testwithservicecreatebucket-"
            + System.currentTimeMillis();
        service.createBucket(bucketName);
        service.deleteBucket(bucketName);
    }
View Full Code Here

    // Test signed PUT requests (with payloads) and DELETE requests for bucket in "eu-central-1"
    // using service that is *not* configured to use AWS4-HMAC-SHA256 signatures by default.
    @Test
    public void testWithServiceCreateAndDeleteBucketAndCreateGetAndDeleteObject() throws Exception {
        RestS3Service service = new RestS3Service(this.testCredentials);

        String bucketName =
            "test-" + testCredentials.getAccessKey().toLowerCase()
            + System.currentTimeMillis();
        String objectData = "Just some simple text data";
        S3Object object = new S3Object(
            "text data object : îüøæç : テストオブジェクト",
            objectData);
        object.addMetadata("my-test-metadata", "my-value");


        service.getOrCreateBucket(bucketName, "eu-central-1");

        service.putObject(bucketName, object);

        // After request targeted at our bucket, we should have a cache entry
        // mapping our bucket name to the correct region.
        assertEquals(
            "eu-central-1", service.getRegionEndpointCache().get(bucketName));

        // With a cached mapping to the correct region, a HEAD request to
        // non-default region bucket using a service that is not aware of the
        // region will succeed.
        S3Object headObject = (S3Object)service.getObjectDetails(
            bucketName, object.getKey());
        assertEquals("my-value", headObject.getMetadata("my-test-metadata"));

        // The same HEAD request to non-default region bucket using a service
        // that is not aware of the region would fail if it wasn't for the
        // cached mapping from bucket to region.
        service.getRegionEndpointCache().remove(bucketName);
        try {
            service.getObjectDetails(bucketName, object.getKey());
            fail("Expected HEAD request to fail with no");
        } catch (ServiceException e) {
        }

        // A GET request to non-default region bucket using a service that is not
        // aware of the region can succeed because we get an error from S3 with
        // the expected region, and can correct the request then retry.
        S3Object retrievedObject = service.getObject(bucketName, object.getKey());
        assertEquals(objectData,
            ServiceUtils.readInputStreamToString(
                retrievedObject.getDataInputStream(),
                Constants.DEFAULT_ENCODING));

        // The above GET request targeted at our bucket could be made to succeed
        // using the error data returned by S3, which also re-populates our
        // bucket name to region cache.
        assertEquals(
            "eu-central-1", service.getRegionEndpointCache().get(bucketName));

        service.deleteObject(bucketName, object.getKey());

        service.deleteBucket(bucketName);
    }
View Full Code Here

    @Override
    protected RestStorageService getStorageService(ProviderCredentials credentials) throws ServiceException {
        Jets3tProperties properties = new Jets3tProperties();
        properties.setProperty("s3service.s3-endpoint", Constants.GS_DEFAULT_HOSTNAME);
        return new RestS3Service(credentials, null, null, properties);
    }
View Full Code Here

    }

    protected RestStorageService getStorageService(ProviderCredentials credentials,
        Jets3tProperties properties) throws ServiceException
    {
        return new RestS3Service(credentials, null, null, properties);
    }
View Full Code Here

    {
        return new RestS3Service(credentials, null, null, properties);
    }

    public void testUrlSigning() throws Exception {
        RestS3Service service = (RestS3Service) getStorageService(getCredentials());
        StorageBucket bucket = createBucketForTest("testUrlSigning");
        String bucketName = bucket.getName();

        try {
            // Create test object, with private ACL
            String dataString = "Text for the URL Signing test object...";
            S3Object object = new S3Object("Testing URL Signing", dataString);
            object.setContentType("text/html");
            object.addMetadata(service.getRestMetadataPrefix() + "example-header", "example-value");
            object.setAcl(AccessControlList.REST_CANNED_PRIVATE);

            // Determine what the time will be in 5 minutes.
            Calendar cal = Calendar.getInstance();
            cal.add(Calendar.MINUTE, 5);
            Date expiryDate = cal.getTime();

            // Create a signed HTTP PUT URL.
            String signedPutUrl = service.createSignedPutUrl(bucket.getName(), object.getKey(),
                object.getMetadataMap(), expiryDate, false);

            // Put the object in S3 using the signed URL (no AWS credentials required)
            RestS3Service restS3Service = new RestS3Service(null);
            restS3Service.putObjectWithSignedUrl(signedPutUrl, object);

            // Ensure the object was created.
            StorageObject objects[] = service.listObjects(bucketName, object.getKey(), null);
            assertEquals("Signed PUT URL failed to put/create object", objects.length, 1);

            // Change the object's content-type and ensure the signed PUT URL disallows the put.
            object.setContentType("application/octet-stream");
            try {
                restS3Service.putObjectWithSignedUrl(signedPutUrl, object);
                fail("Should not be able to use a signed URL for an object with a changed content-type");
            } catch (ServiceException e) {
                object.setContentType("text/html");
            }

            // Add an object header and ensure the signed PUT URL disallows the put.
            object.addMetadata(service.getRestMetadataPrefix() + "example-header-2", "example-value");
            try {
                restS3Service.putObjectWithSignedUrl(signedPutUrl, object);
                fail("Should not be able to use a signed URL for an object with changed metadata");
            } catch (ServiceException e) {
                object.removeMetadata(service.getRestMetadataPrefix() + "example-header-2");
            }

            // Change the object's name and ensure the signed PUT URL uses the signed name, not the object name.
            String originalName = object.getKey();
            object.setKey("Testing URL Signing 2");
            object.setDataInputStream(new ByteArrayInputStream(dataString.getBytes()));
            object = restS3Service.putObjectWithSignedUrl(signedPutUrl, object);
            assertEquals("Ensure returned object key is renamed based on signed PUT URL",
                originalName, object.getKey());

            // Test last-resort MD5 sanity-check for uploaded object when ETag is missing.
            S3Object objectWithoutETag = new S3Object("Object Without ETag");
            objectWithoutETag.setContentType("text/html");
            String objectWithoutETagSignedPutURL = service.createSignedPutUrl(
                bucket.getName(), objectWithoutETag.getKey(), objectWithoutETag.getMetadataMap(),
                expiryDate, false);
            objectWithoutETag.setDataInputStream(new ByteArrayInputStream(dataString.getBytes()));
            objectWithoutETag.setContentLength(dataString.getBytes().length);
            restS3Service.putObjectWithSignedUrl(objectWithoutETagSignedPutURL, objectWithoutETag);
            service.deleteObject(bucketName, objectWithoutETag.getKey());

            // Ensure we can't get the object with a normal URL.
            String s3Url = "https://s3.amazonaws.com";
            URL url = new URL(s3Url + "/" + bucket.getName() + "/" + RestUtils.encodeUrlString(object.getKey()));
View Full Code Here

            cleanupBucketForTest("testUrlSigning");
        }
    }

    public void testMultipartUtils() throws Exception {
        RestS3Service service = (RestS3Service) getStorageService(getCredentials());
        StorageBucket bucket = createBucketForTest("testMultipartUtilities");
        String bucketName = bucket.getName();

        try {
            // Ensure constructor enforces sanity constraints
            try {
                new MultipartUtils(MultipartUtils.MIN_PART_SIZE - 1);
                fail("Expected failure creating MultipartUtils with illegally small part size");
            } catch (IllegalArgumentException e) {}

            try {
                new MultipartUtils(MultipartUtils.MAX_OBJECT_SIZE + 1);
                fail("Expected failure creating MultipartUtils with illegally large part size");
            } catch (IllegalArgumentException e) {}

            // Default part size is maximum possible
            MultipartUtils multipartUtils = new MultipartUtils();
            assertEquals("Unexpected default part size",
                MultipartUtils.MAX_OBJECT_SIZE, multipartUtils.getMaxPartSize());

            // Create a util with the minimum part size, for quicker testing
            multipartUtils = new MultipartUtils(MultipartUtils.MIN_PART_SIZE);
            assertEquals("Unexpected default part size",
                MultipartUtils.MIN_PART_SIZE, multipartUtils.getMaxPartSize());

            // Create a large (11 MB) file
            File largeFile = File.createTempFile("JetS3t-testMultipartUtils-large", ".txt");
            BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(largeFile));
            int offset = 0;
            while (offset < 11 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            // Create a medium (6 MB) file
            File mediumFile = File.createTempFile("JetS3t-testMultipartUtils-medium", ".txt");
            bos = new BufferedOutputStream(new FileOutputStream(mediumFile));
            offset = 0;
            while (offset < 6 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            // Create a small (5 MB) file
            File smallFile = File.createTempFile("JetS3t-testMultipartUtils-small", ".txt");
            bos = new BufferedOutputStream(new FileOutputStream(smallFile));
            offset = 0;
            while (offset < 5 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            assertFalse("Expected small file to be <= 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(smallFile));
            assertTrue("Expected medium file to be > 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(mediumFile));
            assertTrue("Expected large file to be > 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(largeFile));

            // Split small file into 5MB object parts
            List<S3Object> parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                smallFile.getName(), smallFile);
            assertEquals(1, parts.size());

            // Split medium file into 5MB object parts
            parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                mediumFile.getName(), mediumFile);
            assertEquals(2, parts.size());

            // Split large file into 5MB object parts
            parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                largeFile.getName(), largeFile);
            assertEquals(3, parts.size());

            /*
             * Upload medium-sized file as object in multiple parts
             */
            List<StorageObject> objects = new ArrayList<StorageObject>();
            objects.add(
                ObjectUtils.createObjectForUpload(
                    mediumFile.getName(),
                    mediumFile,
                    null, // encryptionUtil
                    false // gzipFile
                ));

            multipartUtils.uploadObjects(bucketName, service, objects, null);

            S3Object completedObject = (S3Object) service.getObjectDetails(
                bucketName, mediumFile.getName());
            assertEquals(mediumFile.length(), completedObject.getContentLength());
            // Confirm object's mimetype metadata was applied
            assertEquals("text/plain", completedObject.getContentType());

            /*
             * Upload large-sized file as object in multiple parts
             */
            objects = new ArrayList<StorageObject>();
            objects.add(
                ObjectUtils.createObjectForUpload(
                    largeFile.getName(),
                    largeFile,
                    null, // encryptionUtil
                    false // gzipFile
                ));

            multipartUtils.uploadObjects(bucketName, service, objects, null);

            completedObject = (S3Object) service.getObjectDetails(
                bucketName, largeFile.getName());
            assertEquals(largeFile.length(), completedObject.getContentLength());
        } finally {
            cleanupBucketForTest("testMultipartUtilities");
        }
View Full Code Here

            cleanupBucketForTest("testMultipartUtilities");
        }
    }

    public void testMultipartUploads() throws Exception {
        RestS3Service service = (RestS3Service) getStorageService(getCredentials());
        StorageBucket bucket = createBucketForTest("testMultipartUploads");
        String bucketName = bucket.getName();

        try {
            // Check stripping of double-quote characters from etag
            MultipartPart testEtagSanitized = new MultipartPart(
                1, new Date(), "\"fakeEtagWithDoubleQuotes\"", 0l);
            assertEquals("fakeEtagWithDoubleQuotes", testEtagSanitized.getEtag());

            // Create 5MB of test data
            int fiveMB = 5 * 1024 * 1024;
            byte[] fiveMBTestData = new byte[fiveMB];
            for (int offset = 0; offset < fiveMBTestData.length; offset++) {
                fiveMBTestData[offset] = (byte) (offset % 256);
            }

            // Define name and String metadata values for multipart upload object
            String objectKey = "multipart-object.txt";
            Map<String, Object> metadata = new HashMap<String, Object>();
            metadata.put("test-md-value", "testing, testing, 123");
            metadata.put("test-timestamp-value", System.currentTimeMillis());

            // Start a multipart upload
            MultipartUpload testMultipartUpload = service.multipartStartUpload(
                bucketName, objectKey, metadata,
                AccessControlList.REST_CANNED_AUTHENTICATED_READ, null);

            assertEquals(bucketName, testMultipartUpload.getBucketName());
            assertEquals(objectKey, testMultipartUpload.getObjectKey());

            // List all ongoing multipart uploads
            List<MultipartUpload> uploads = service.multipartListUploads(bucketName);

            assertTrue("Expected at least one ongoing upload", uploads.size() >= 1);

            // Confirm our newly-created multipart upload is present in listing
            boolean foundNewUpload = false;
            for (MultipartUpload upload: uploads) {
                if (upload.getUploadId().equals(testMultipartUpload.getUploadId())) {
                    foundNewUpload = true;
                }
            }
            assertTrue("Expected to find the new upload in listing", foundNewUpload);

            // Start a second, encrypted multipart upload
            S3Object encryptedMultiPartObject = new S3Object(objectKey + "2");
            encryptedMultiPartObject.setServerSideEncryptionAlgorithm(
                S3Object.SERVER_SIDE_ENCRYPTION__AES256);
            MultipartUpload testMultipartUpload2 =
                service.multipartStartUpload(bucketName, encryptedMultiPartObject);
            assertEquals("AES256",
                testMultipartUpload2.getMetadata().get("x-amz-server-side-encryption"));

            // List multipart uploads with markers -- Find second upload only
            uploads = service.multipartListUploads(bucketName,
                "multipart-object.txt",
                testMultipartUpload.getUploadId(),
                10);
            assertEquals(1, uploads.size());
            assertEquals(objectKey + "2", uploads.get(0).getObjectKey());

            // List multipart uploads with prefix/delimiter constraints
            MultipartUpload testMultipartUpload3 =
                service.multipartStartUpload(bucketName, objectKey + "/delimited", metadata);

            MultipartUploadChunk chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object", // prefix
                null, // delimiter
                null, null, 1000, true);
            assertEquals("multipart-object", chunk.getPrefix());
            assertEquals(null, chunk.getDelimiter());
            assertEquals(3, chunk.getUploads().length);

            chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object.txt2", // prefix
                null, // delimiter
                null, null, 1000, true);
            assertEquals("multipart-object.txt2", chunk.getPrefix());
            assertEquals(null, chunk.getDelimiter());
            assertEquals(1, chunk.getUploads().length);

            chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object", // prefix
                "/", // delimiter
                null, null, 1000, true);
            assertEquals("multipart-object", chunk.getPrefix());
            assertEquals("/", chunk.getDelimiter());
            assertEquals(2, chunk.getUploads().length);
            assertEquals(1, chunk.getCommonPrefixes().length);
            assertEquals("multipart-object.txt/", chunk.getCommonPrefixes()[0]);

            chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object", // prefix
                null, // delimiter
                null, null,
                1, // Max number of uploads to return per LIST request
                false // Do *not* complete listing, just get first chunk
                );
            assertEquals(1, chunk.getUploads().length);
            assertEquals(0, chunk.getCommonPrefixes().length);

            chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object", // prefix
                null, // delimiter
                null, null,
                1, // Max number of uploads to return per LIST request
                true // *Do* complete listing, 1 item at a time
                );
            assertEquals(3, chunk.getUploads().length);
            assertEquals(0, chunk.getCommonPrefixes().length);

            // Delete incomplete/unwanted multipart uploads
            service.multipartAbortUpload(testMultipartUpload2);
            service.multipartAbortUpload(testMultipartUpload3);

            // Ensure the incomplete multipart upload has been deleted
            uploads = service.multipartListUploads(bucketName);
            for (MultipartUpload upload: uploads) {
                if (upload.getUploadId().equals(testMultipartUpload2.getUploadId()))
                {
                    fail("Expected multipart upload " + upload.getUploadId()
                        + " to be deleted");
                }
            }

            int partNumber = 0;

            // Upload a first part, must be 5MB+
            S3Object partObject = new S3Object(
                testMultipartUpload.getObjectKey(), fiveMBTestData);
            MultipartPart uploadedPart = service.multipartUploadPart(
                testMultipartUpload, ++partNumber, partObject);
            assertEquals(uploadedPart.getPartNumber().longValue(), partNumber);
            assertEquals(uploadedPart.getEtag(), partObject.getETag());
            assertEquals(uploadedPart.getSize().longValue(), partObject.getContentLength());

            // List multipart parts that have been received by the service
            List<MultipartPart> listedParts = service.multipartListParts(testMultipartUpload);
            assertEquals(listedParts.size(), 1);
            assertEquals(listedParts.get(0).getSize().longValue(), partObject.getContentLength());

            // Upload a second part by copying an object already in S3, must be >= 5 MB
            S3Object objectToCopy = service.putObject(bucketName,
                new S3Object("objectToCopy.txt", fiveMBTestData));
            MultipartPart copiedPart = service.multipartUploadPartCopy(testMultipartUpload,
                ++partNumber, bucketName, objectToCopy.getKey());
            assertEquals(copiedPart.getPartNumber().longValue(), partNumber);
            assertEquals(copiedPart.getEtag(), objectToCopy.getETag());
            // Note: result part from copy operation does *not* include correct part size, due
            // to lack of this info in the CopyPartResult XML response.
            // assertEquals(copiedPart.getSize().longValue(), partObject.getContentLength());

            // List multipart parts that have been received by the service
            listedParts = service.multipartListParts(testMultipartUpload);
            assertEquals(listedParts.size(), 2);
            assertEquals(listedParts.get(1).getSize().longValue(), objectToCopy.getContentLength());

            // TODO Test multipart upload copy with version ID
            // TODO Test multipart upload copy with byte range (need object >= 5 GB !)
            // TODO Test multipart upload copy with ETag (mis)match test
            // TODO Test multipart upload copy with (un)modified since test

            // Upload a third and final part, can be as small as 1 byte
            partObject = new S3Object(
                testMultipartUpload.getObjectKey(), new byte[] {fiveMBTestData[0]});
            uploadedPart = service.multipartUploadPart(
                testMultipartUpload, ++partNumber, partObject);
            assertEquals(uploadedPart.getPartNumber().longValue(), partNumber);
            assertEquals(uploadedPart.getEtag(), partObject.getETag());
            assertEquals(uploadedPart.getSize().longValue(), partObject.getContentLength());

            // List multipart parts that have been received by the service
            listedParts = service.multipartListParts(testMultipartUpload);
            assertEquals(listedParts.size(), 3);
            assertEquals(listedParts.get(2).getSize().longValue(), partObject.getContentLength());

            // Reverse order of parts to ensure multipartCompleteUpload corrects the problem
            Collections.reverse(listedParts);

            // Complete multipart upload, despite badly ordered parts.
            MultipartCompleted multipartCompleted = service.multipartCompleteUpload(
                testMultipartUpload, listedParts);
            assertEquals(multipartCompleted.getBucketName(), testMultipartUpload.getBucketName());
            assertEquals(multipartCompleted.getObjectKey(), testMultipartUpload.getObjectKey());

            // Confirm completed object exists and has expected size, metadata
            S3Object completedObject = (S3Object) service.getObjectDetails(
                bucketName, testMultipartUpload.getObjectKey());
            assertEquals(completedObject.getContentLength(), fiveMBTestData.length * 2 + 1);
            assertEquals(
                metadata.get("test-md-value"),
                completedObject.getMetadata("test-md-value"));
            assertEquals(
                metadata.get("test-timestamp-value").toString(),
                completedObject.getMetadata("test-timestamp-value").toString());
            // Confirm completed object has expected canned ACL settings
            AccessControlList completedObjectACL =
                service.getObjectAcl(bucketName, testMultipartUpload.getObjectKey());
            assertTrue(completedObjectACL.hasGranteeAndPermission(
                GroupGrantee.AUTHENTICATED_USERS, Permission.PERMISSION_READ));
        } finally {
            cleanupBucketForTest("testMultipartUploads");
        }
View Full Code Here

            cleanupBucketForTest("testMultipartUploads");
        }
    }

    public void testMultipartUploadWithConvenienceMethod() throws Exception {
        RestS3Service service = (RestS3Service) getStorageService(getCredentials());
        StorageBucket bucket = createBucketForTest("testMultipartUploadWithConvenienceMethod");
        String bucketName = bucket.getName();

        try {
            int fiveMB = 5 * 1024 * 1024;

            byte[] testDataOverLimit = new byte[fiveMB + 100];
            for (int i = 0; i < testDataOverLimit.length; i++) {
                testDataOverLimit[i] = (byte) (i % 256);
            }

            // Confirm that non-file-based objects are not accepted
            try {
                StorageObject myObject = new StorageObject();
                service.putObjectMaybeAsMultipart(bucketName, myObject, fiveMB);
                fail("");
            } catch (ServiceException se) {
            }

            // Create file for testing
            File testDataFile = File.createTempFile("JetS3t-testMultipartUploadWithConvenienceMethod", ".txt");
            BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(testDataFile));
            bos.write(testDataOverLimit);
            bos.close();
            testDataOverLimit = null; // Free up a some memory

            // Setup non-canned ACL
            AccessControlList testACL = buildAccessControlList();
            testACL.setOwner(service.getAccountOwner());
            testACL.grantPermission(GroupGrantee.AUTHENTICATED_USERS, Permission.PERMISSION_READ);

            // Setup file-based object
            StorageObject objectViaConvenienceMethod = new StorageObject(testDataFile);
            objectViaConvenienceMethod.setKey("multipart-object-via-convenience-method.txt");
            objectViaConvenienceMethod.addMetadata("my-metadata", "convenient? yes!");
            objectViaConvenienceMethod.setAcl(testACL);
            objectViaConvenienceMethod.setStorageClass(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY);

            // Upload object
            service.putObjectMaybeAsMultipart(bucketName, objectViaConvenienceMethod, fiveMB);

            // Confirm completed object exists and has expected metadata
            objectViaConvenienceMethod = service.getObjectDetails(
                bucketName, objectViaConvenienceMethod.getKey());
            assertEquals(
                "convenient? yes!",
                objectViaConvenienceMethod.getMetadata("my-metadata"));

            // Confirm custom ACL was applied automatically
            AccessControlList aclViaConvenienceMethod = service.getObjectAcl(
                bucketName, objectViaConvenienceMethod.getKey());
            assertEquals(
                testACL.getPermissionsForGrantee(GroupGrantee.AUTHENTICATED_USERS),
                aclViaConvenienceMethod.getPermissionsForGrantee(GroupGrantee.AUTHENTICATED_USERS));

            // Confirm completed object was indeed uploaded as a multipart upload,
            // not a standard PUT (ETag is not a valid MD5 hash in this case)
            assertFalse(ServiceUtils.isEtagAlsoAnMD5Hash(
                objectViaConvenienceMethod.getETag()));

            /*
             * Perform a threaded multipart upload
             */
            String objectKeyForThreaded = "threaded-multipart-object.txt";
            Map<String, Object> metadataForThreaded = new HashMap<String, Object>();

            // Start threaded upload using normal service.
            MultipartUpload threadedMultipartUpload =
                service.multipartStartUpload(bucketName, objectKeyForThreaded, metadataForThreaded);

            // Create 5MB of test data
            byte[] fiveMBTestData = new byte[fiveMB];
            for (int offset = 0; offset < fiveMBTestData.length; offset++) {
                fiveMBTestData[offset] = (byte) (offset % 256);
            }

            // Prepare objects for upload (2 * 5MB, and 1 * 1 byte)
            S3Object[] objectsForThreadedUpload = new S3Object[] {
                new S3Object(threadedMultipartUpload.getObjectKey(), fiveMBTestData),
                new S3Object(threadedMultipartUpload.getObjectKey(), fiveMBTestData),
                new S3Object(threadedMultipartUpload.getObjectKey(), new byte[] {fiveMBTestData[0]}),
            };

            // Create threaded service and perform upload in multiple threads
            ThreadedS3Service threadedS3Service = new ThreadedS3Service(service,
                new S3ServiceEventAdaptor());
            List<MultipartUploadAndParts> uploadAndParts = new ArrayList<MultipartUploadAndParts>();
            uploadAndParts.add(new MultipartUploadAndParts(
                threadedMultipartUpload, Arrays.asList(objectsForThreadedUpload)));
            threadedS3Service.multipartUploadParts(uploadAndParts);

            // Complete threaded multipart upload using automatic part listing and normal service.
            MultipartCompleted threadedMultipartCompleted = service.multipartCompleteUpload(
                threadedMultipartUpload);

            // Confirm completed object exists and has expected size
            S3Object finalObjectForThreaded = (S3Object) service.getObjectDetails(
                bucketName, threadedMultipartUpload.getObjectKey());
            assertEquals(fiveMB * 2 + 1, finalObjectForThreaded.getContentLength());
        } finally {
            cleanupBucketForTest("testMultipartUploadWithConvenienceMethod");
        }
View Full Code Here

TOP

Related Classes of org.jets3t.service.impl.rest.httpclient.RestS3Service

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.