Examples of BatchWriterOpts


Examples of org.apache.accumulo.core.cli.BatchWriterOpts

  }
 
  public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    ScannerOpts scanOpts = new ScannerOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    String programName = FileCount.class.getName();
    opts.parseArgs(programName, args, scanOpts, bwOpts);

    FileCount fileCount = new FileCount(opts, scanOpts, bwOpts);
    fileCount.run();
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

  }
 
 
  public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(Ingest.class.getName(), args, bwOpts);
   
    Connector conn = opts.getConnector();
    if (!conn.tableOperations().exists(opts.nameTable))
      conn.tableOperations().create(opts.nameTable);
    if (!conn.tableOperations().exists(opts.indexTable))
      conn.tableOperations().create(opts.indexTable);
    if (!conn.tableOperations().exists(opts.dataTable)) {
      conn.tableOperations().create(opts.dataTable);
      conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
    }
   
    BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
    BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
    BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
    for (String dir : opts.directories) {
      recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
     
      // fill in parent directory info
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
   * The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
   */
  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
    Opts opts = new Opts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
    Connector connector = opts.getConnector();
    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
   
    long end = opts.start + opts.num;
   
    for (long i = opts.start; i < end; i++) {
      Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

public class InsertWithBatchWriter {
 
  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
      TableNotFoundException {
    ClientOnRequiredTable opts = new ClientOnRequiredTable();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
   
    Connector connector = opts.getConnector();
    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
   
    if (!connector.tableOperations().exists(opts.tableName))
      connector.tableOperations().create(opts.tableName);
    BatchWriter bw = mtbw.getBatchWriter(opts.tableName);
   
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

  }
 
 
  public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
   
    Connector conn = opts.getConnector();
    if (!conn.tableOperations().exists(opts.tableName)) {
      conn.tableOperations().create(opts.tableName);
      conn.tableOperations().attachIterator(opts.tableName, new IteratorSetting(1, ChunkCombiner.class));
    }
    BatchWriter bw = conn.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
    for (String filename : opts.files) {
      fdi.insertFileData(filename, bw);
    }
    bw.close();
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
      MutationsRejectedException {
   
    ClientOpts opts = new ClientOpts();
    ScannerOpts scanOpts = new ScannerOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(RowOperations.class.getName(), args, scanOpts, bwOpts);
   
    // First the setup work
    connector = opts.getConnector();
   
    // lets create an example table
    connector.tableOperations().create(table);
   
    // lets create 3 rows of information
    Text row1 = new Text("row1");
    Text row2 = new Text("row2");
    Text row3 = new Text("row3");
   
    // Which means 3 different mutations
    Mutation mut1 = new Mutation(row1);
    Mutation mut2 = new Mutation(row2);
    Mutation mut3 = new Mutation(row3);
   
    // And we'll put 4 columns in each row
    Text col1 = new Text("1");
    Text col2 = new Text("2");
    Text col3 = new Text("3");
    Text col4 = new Text("4");
   
    // Now we'll add them to the mutations
    mut1.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut1.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut1.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut1.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
   
    mut2.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut2.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut2.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut2.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
   
    mut3.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut3.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut3.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
    mut3.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes()));
   
    // Now we'll make a Batch Writer
    bw = connector.createBatchWriter(table, bwOpts.getBatchWriterConfig());
   
    // And add the mutations
    bw.addMutation(mut1);
    bw.addMutation(mut2);
    bw.addMutation(mut3);
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

 
  static public void main(String[] args) {
   
    ClientOnDefaultTable opts = new ClientOnDefaultTable("test_ingest");
    ScannerOpts scanOpts = new ScannerOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(TestRandomDeletes.class.getName(), args, scanOpts, bwOpts);
   
    log.info("starting random delete test");

   
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

  }
 
  public static void main(String[] args) throws Exception {
   
    Opts opts = new Opts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(TestIngest.class.getName(), args, bwOpts);
   
    Instance instance = opts.getInstance();
   
    String name = TestIngest.class.getSimpleName();
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

    TableOperations to = c.tableOperations();
    String tableName = getUniqueNames(1)[0];
    to.create(tableName);
    TestIngest.Opts opts = new TestIngest.Opts();
    opts.tableName = tableName;
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    to.flush(tableName, null, null, true);
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    vopts.tableName = tableName;
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
    String id = to.tableIdMap().get(tableName);
    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
    assertTrue(FunctionalTestUtils.count(s) > 0);
    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
    assertTrue(fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)).length > 0);
    to.delete(tableName);
    assertEquals(0, FunctionalTestUtils.count(s));
    try {
      assertEquals(0, fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)).length);
    } catch (FileNotFoundException ex) {
      // that's fine, too
    }
    assertNull(to.tableIdMap().get(tableName));
    to.create(tableName);
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
    to.delete(tableName);
  }
View Full Code Here

Examples of org.apache.accumulo.core.cli.BatchWriterOpts

      opts.timestamp = i;
      opts.dataSize = 50;
      opts.rows = NUM_TO_INGEST;
      opts.cols = 1;
      opts.random = i;
      TestIngest.ingest(c, opts, new BatchWriterOpts());
     
      c.tableOperations().flush("test_ingest", null, null, true);
      FunctionalTestUtils.checkRFiles(c, "test_ingest", NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
    }
   
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.