Package com.netflix.astyanax.recipes.storage

Examples of com.netflix.astyanax.recipes.storage.ObjectMetadata


    {
      throw new ISE("outDir[%s] must be a directory.", outDir);
    }

    long startTime = System.currentTimeMillis();
    ObjectMetadata meta = null;
    final File outFile = new File(outDir, "index.zip");
    try
    {
      try
      {
        log.info("Writing to [%s]", outFile.getAbsolutePath());
        OutputStream os = Files.newOutputStreamSupplier(outFile).getOutput();
        meta = ChunkedStorage
            .newReader(indexStorage, key, os)
            .withBatchSize(BATCH_SIZE)
            .withConcurrencyLevel(CONCURRENCY)
            .call();
        os.close();
        CompressionUtils.unzip(outFile, outDir);
      } catch (Exception e)
      {
        FileUtils.deleteDirectory(outDir);
      }
    } catch (Exception e)
    {
      throw new SegmentLoadingException(e, e.getMessage());
    }
    log.info("Pull of file[%s] completed in %,d millis (%s bytes)", key, System.currentTimeMillis() - startTime,
        meta.getObjectSize());
  }
View Full Code Here


    InputStream fis = null;
    InputStream bis = null;
    try {
      fis = this.getClass().getClassLoader().getResource(FILEBIN)
          .openStream();
      ObjectMetadata meta = ChunkedStorage
          .newWriter(provider, OBJBIN, fis).withChunkSize(0x1000)
          .withConcurrencyLevel(8).withTtl(60) // Optional TTL for the
                              // entire object
          .call();
      Long writesize = meta.getObjectSize();
      // Long readsize = readChunked("myks","chunks","test1");
      byte[] written = new byte[writesize.intValue()];
       bis =
       this.getClass().getClassLoader().getResource(FILEBIN).openStream();
       int i1 = ((BufferedInputStream)bis).read(written, 0,
View Full Code Here

    InputStream fis = null;
    InputStream bis = null;
    try {
      fis = this.getClass().getClassLoader().getResource(FILEASC)
          .openStream();
      ObjectMetadata meta = ChunkedStorage
          .newWriter(provider, OBJASC, fis).withChunkSize(0x1000)
          .withConcurrencyLevel(8).withTtl(60) // Optional TTL for the
                              // entire object
          .call();
      Long writesize = meta.getObjectSize();
      // Long readsize = readChunked("myks","chunks","test1");
      byte[] written = new byte[writesize.intValue()];
       bis =
       this.getClass().getClassLoader().getResource("chunktest.html").openStream();
       int i1 = ((BufferedInputStream)bis).read(written, 0,
View Full Code Here

  public byte[] readChunked(String db, String table, String objName)
      throws Exception {
    ChunkedStorageProvider provider = new CassandraChunkedStorageProvider(
        keyspace, table);
    ObjectMetadata meta = ChunkedStorage.newInfoReader(provider, objName)
        .call();
    ByteArrayOutputStream os = new ByteArrayOutputStream(meta
        .getObjectSize().intValue());
    meta = ChunkedStorage.newReader(provider, objName, os)
        .withBatchSize(10).call();
    return (os != null) ? os.toByteArray() : new byte[0];
  }
View Full Code Here

  public String writeChunked(String db, String table, String objectName,
      InputStream is) {
    ChunkedStorageProvider provider = new CassandraChunkedStorageProvider(
        keyspace, table);
    ObjectMetadata meta;
    try {
//      if (is!=null) is.reset();
      meta = ChunkedStorage.newWriter(provider, objectName, is)
          .withChunkSize(0x40000).withConcurrencyLevel(8)
          .withMaxWaitTime(10).call();
      if (meta != null && meta.getObjectSize() <= 0)
        throw new RuntimeException("Object does not exist");
    } catch (Exception e) {
      e.printStackTrace();
      throw new RuntimeException(e.getMessage());
    }
View Full Code Here

  }

  public ByteArrayOutputStream readChunked(String db, String table, String objName) {
    ChunkedStorageProvider provider = new CassandraChunkedStorageProvider(
        keyspace, table);
    ObjectMetadata meta;
    ByteArrayOutputStream os = null;
    try {
      meta = ChunkedStorage.newInfoReader(provider, objName).call();
      os = new ByteArrayOutputStream(meta.getObjectSize().intValue());
      meta = ChunkedStorage.newReader(provider, objName, os)
          .withConcurrencyLevel(8).withMaxWaitTime(10)
          .withBatchSize(10).call();
    } catch (Exception e) {
      e.printStackTrace();
View Full Code Here

    String input = sb.toString();

    ByteArrayInputStream in = new ByteArrayInputStream(input.getBytes());

    ObjectMetadata meta = ChunkedStorage.newWriter(provider, "MyObject", in)
        .withChunkSize(100)
        .call();

    meta = ChunkedStorage.newInfoReader(provider, "MyObject").call();
    System.out.println("Obj size: " + meta.getObjectSize().intValue());
    System.out.println("Chunk count: " + meta.getChunkCount());

    ByteArrayOutputStream os = new ByteArrayOutputStream(meta.getObjectSize().intValue());

    meta = ChunkedStorage.newReader(provider, "MyObject", os)
        .withBatchSize(11)       // Randomize fetching blocks within a batch.
        .withConcurrencyLevel(3)
        .call();

    String output = os.toString();

    Assert.assertEquals(input, output);

    ChunkedStorage.newDeleter(provider, "MyObject").call();

    for (int i=0; i<meta.getChunkCount(); i++) {
      ColumnList<String> result = keyspace.prepareQuery(CF_CHUNK).getKey("MyObject$" + i).execute().getResult();
      Assert.assertTrue(result.isEmpty());
    }
   
  }
View Full Code Here

   
    String input = sb.toString();
   
    ByteArrayInputStream in = new ByteArrayInputStream(input.getBytes());
   
    ObjectMetadata meta = ChunkedStorage.newWriter(provider, "MyObject", in)
          .withChunkSize(100)
          .call();
   
    meta = ChunkedStorage.newInfoReader(provider, "MyObject").call();
    System.out.println(meta.getObjectSize().intValue());
    System.out.println(meta.getChunkCount());
   
    ByteArrayOutputStream os = new ByteArrayOutputStream(meta.getObjectSize().intValue());
   
    meta = ChunkedStorage.newReader(provider, "MyObject", os)
          .withBatchSize(11)       // Randomize fetching blocks within a batch.
          .withConcurrencyLevel(3)
        .call();
   
    String output = os.toString();
   
    Assert.assertEquals(input, output);
   
    ChunkedStorage.newDeleter(provider, "MyObject").call();
   
    for (int i=0; i<meta.getChunkCount(); i++) {
      ColumnList<String> result = keyspace.prepareQuery(CF_CHUNK).getKey("MyObject$" + i).execute().getResult();
      Assert.assertTrue(result.isEmpty());
    }
  }
View Full Code Here

TOP

Related Classes of com.netflix.astyanax.recipes.storage.ObjectMetadata

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.