Package com.cloud.bridge.service

Examples of com.cloud.bridge.service.S3BucketAdapter


        success = true;       
      }
      finally
      {
        if(!success && shostTuple != null) {
          S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(shostTuple.getFirst());
          bucketAdapter.deleteContainer(shostTuple.getSecond(), request.getBucketName());
        }
        PersistContext.releaseNamedLock("bucket.creation");
      }
     
    } else {
View Full Code Here


       }

      
       // -> delete the file
       Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(sbucket);
       S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());     
       bucketAdapter.deleteContainer(tupleBucketHost.getSecond(), request.getBucketName());
     
       // -> cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. We
       //    need to perform deletion of these objects related to bucket manually.
       //    Delete SMeta & SAcl objects: (1)Get all the objects in the bucket, (2)then all the items in each object, (3) then all meta & acl data for each item
       Set<SObject> objectsInBucket = sbucket.getObjectsInBucket();
View Full Code Here

      logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" );
      return 404;
    }
 
    Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);   
    S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());

    try {
          MultipartLoadDao uploadDao = new MultipartLoadDao();
          Tuple<String,String> exists = uploadDao.multipartExits( uploadId );
          if (null == exists) {
          logger.error( "initiateMultipartUpload failed since multipart upload" + uploadId + " does not exist" );
            return 404;
          }
         
          // -> the multipart initiator or bucket owner can do this action by default
          if (verifyPermission)
          {
              String initiator = uploadDao.getInitiator( uploadId );
              if (null == initiator || !initiator.equals( UserContext.current().getAccessKey()))
              {
                // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket
              S3PolicyContext context = new S3PolicyContext( PolicyActions.AbortMultipartUpload, bucketName );
                context.setKeyName( exists.getSecond());
              verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
              }
          }

          // -> first get a list of all the uploaded files and delete one by one
          S3MultipartPart[] parts = uploadDao.getParts( uploadId, 10000, 0 );
          for( int i=0; i < parts.length; i++ )
          {   
              bucketAdapter.deleteObject( tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), parts[i].getPath());
          }
         
          uploadDao.deleteUpload( uploadId );
          return 204;
View Full Code Here

    S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName );
    context.setKeyName( request.getKey());
    verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE );
   
    Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);   
    S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
    String itemFileName = new String( uploadId + "-" + partNumber );
    InputStream is = null;

    try {
      is = request.getDataInputStream();
      String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName);
      response.setETag(md5Checksum)
     
          MultipartLoadDao uploadDao = new MultipartLoadDao();
          uploadDao.savePart( uploadId, partNumber, md5Checksum, itemFileName, (int)request.getContentLength());
          response.setResultCode(200);
View Full Code Here

    // [B] Now we need to create the final re-assembled object
    //  -> the allocObjectItem checks for the bucket policy PutObject permissions
    Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, null, request.getCannedAccess());
    Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);   
   
    S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
    String itemFileName = tupleObjectItem.getSecond().getStoredPath();
   
    // -> Amazon defines that we must return a 200 response immediately to the client, but
    // -> we don't know the version header until we hit here
    httpResp.setStatus(200);
      httpResp.setContentType("text/xml; charset=UTF-8");
    String version = tupleObjectItem.getSecond().getVersion();
    if (null != version) httpResp.addHeader( "x-amz-version-id", version )
        httpResp.flushBuffer();
   

        // [C] Re-assemble the object from its uploaded file parts
    try {
      // explicit transaction control to avoid holding transaction during long file concatenation process
      PersistContext.commitTransaction();
     
      Tuple<String, Long> result = bucketAdapter.concatentateObjects( tupleBucketHost.getSecond(), bucket.getName(), itemFileName, ServiceProvider.getInstance().getMultipartDir(), parts, os );
      response.setETag(result.getFirst());
      response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
   
      SObjectItemDao itemDao = new SObjectItemDao();
      SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId());
View Full Code Here

    // -> is the caller allowed to write the object?
    //  -> the allocObjectItem checks for the bucket policy PutObject permissions
    Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess());
    Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);   
   
    S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleBucketHost.getFirst());
    String itemFileName = tupleObjectItem.getSecond().getStoredPath();
    InputStream is = null;

    try {
      // explicit transaction control to avoid holding transaction during file-copy process
      PersistContext.commitTransaction();
     
      is = request.getDataInputStream();
      String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName);
      response.setETag(md5Checksum);
      response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
          response.setVersion( tupleObjectItem.getSecond().getVersion());
   
      SObjectItemDao itemDao = new SObjectItemDao();
View Full Code Here

    // -> is the caller allowed to write the object? 
    //  -> the allocObjectItem checks for the bucket policy PutObject permissions
    Tuple<SObject, SObjectItem> tupleObjectItem = allocObjectItem(bucket, key, meta, acl, null);
    Tuple<SHost, String> tupleBucketHost = getBucketStorageHost(bucket);
     
    S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(tupleBucketHost.getFirst());
    String itemFileName = tupleObjectItem.getSecond().getStoredPath();
    InputStream is = null;
    try {
      // explicit transaction control to avoid holding transaction during file-copy process
      PersistContext.commitTransaction();
     
      is = request.getInputStream();
      String md5Checksum = bucketAdapter.saveObject(is, tupleBucketHost.getSecond(), bucket.getName(), itemFileName);
      response.setETag(md5Checksum);
      response.setLastModified(DateHelper.toCalendar( tupleObjectItem.getSecond().getLastModifiedTime()));
     
      SObjectItemDao itemDao = new SObjectItemDao();
      SObjectItem item = itemDao.get( tupleObjectItem.getSecond().getId());
View Full Code Here

        response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime()));
        response.setVersion( item.getVersion());
        if (request.isInlineData())
        {
          Tuple<SHost, String> tupleSHostInfo = getBucketStorageHost(sbucket);
        S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst());
       
        if ( 0 <= bytesStart && 0 <= bytesEnd )
           response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(),
               request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd ));
        else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath()));
        }
      }
     
      response.setResultCode( resultCode );
      response.setResultDescription("OK");
View Full Code Here

   
    // -> delete the file holding the object
    if (null != storedPath)
    {
       Tuple<SHost, String> tupleBucketHost = getBucketStorageHost( sbucket );
       S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter( tupleBucketHost.getFirst());    
       bucketAdapter.deleteObject( tupleBucketHost.getSecond(), bucketName, storedPath );   
    }
   
    response.setResultCode(204);
    return response;
    }
View Full Code Here

     
    if(mhost.getMounts().size() > 0) {
      Random random = new Random();
      MHostMount[] mounts = (MHostMount[])mhost.getMounts().toArray();
      MHostMount mount = mounts[random.nextInt(mounts.length)];
      S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(mount.getShost());
      bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName));
      return new Tuple<SHost, String>(mount.getShost(), mount.getMountPath());
    }
   
    // To make things simple, only allow one local mounted storage root
    String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root");
    if(localStorageRoot != null) {
      SHost localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot);
      if(localSHost == null)
        throw new InternalErrorException("storage.root is configured but not initialized");
     
      S3BucketAdapter bucketAdapter =  getStorageHostBucketAdapter(localSHost);
      bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName));
      return new Tuple<SHost, String>(localSHost, localStorageRoot);
    }
   
    throw new OutOfStorageException("No storage host is available");
  }
View Full Code Here

TOP

Related Classes of com.cloud.bridge.service.S3BucketAdapter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.