Package org.apache.hadoop.io.file.tfile.TFile

Examples of org.apache.hadoop.io.file.tfile.TFile.Writer


  // unsorted with some codec
  void unsortedWithSomeCodec(String codec) throws IOException {
    Path uTfile = new Path(ROOT, "unsorted.tfile");
    FSDataOutputStream fout = createFSOutput(uTfile);
    Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
    writeRecords(writer);
    writer.close();
    fout.close();
    FSDataInputStream fin = fs.open(uTfile);
    Reader reader =
        new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
View Full Code Here


  // test meta blocks for tfiles
  public void testMetaBlocks() throws IOException {
    Path mFile = new Path(ROOT, "meta.tfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
    someTestingWithMetaBlock(writer, "none");
    writer.close();
    fout.close();
    FSDataInputStream fin = fs.open(mFile);
    Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
    someReadingWithMetaBlock(reader);
    fs.delete(mFile, true);
View Full Code Here

  private void createTFile() throws IOException {
    long totalBytes = 0;
    FSDataOutputStream fout = createFSOutput(path, fs);
    try {
      Writer writer =
          new Writer(fout, options.minBlockSize, options.compress, "memcmp",
              conf);
      try {
        BytesWritable key = new BytesWritable();
        BytesWritable val = new BytesWritable();
        timer.start();
        for (long i = 0; true; ++i) {
          if (i % 1000 == 0) { // test the size for every 1000 rows.
            if (fs.getFileStatus(path).getLen() >= options.fileSize) {
              break;
            }
          }
          kvGen.next(key, val, false);
          writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
              .getSize());
          totalBytes += key.getSize();
          totalBytes += val.getSize();
        }
        timer.stop();
      }
      finally {
        writer.close();
      }
    }
    finally {
      fout.close();
    }
View Full Code Here

  @Before
  public void setUp() throws IOException {
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
  }
View Full Code Here

    if (skip)
      return;
    closeOutput();
    out = fs.create(path);
    try {
      writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
      Assert.fail("Error on handling invalid compression codecs.");
    } catch (Exception e) {
      // noop, expecting exceptions
      // e.printStackTrace();
    }
View Full Code Here

    closeOutput();
    out = fs.create(path);
    out.write(123);

    try {
      writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
      Assert.fail("Failed to catch file write not at position 0.");
    } catch (Exception e) {
      // noop, expecting exceptions
    }
    closeOutput();
View Full Code Here

  void createFile(int count, String compress) throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile + "." + compress);
    fs = path.getFileSystem(conf);
    FSDataOutputStream out = fs.create(path);
    Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

    int nx;
    for (nx = 0; nx < count; nx++) {
      byte[] key = composeSortedKey(KEY, count, nx).getBytes();
      byte[] value = (VALUE + nx).getBytes();
      writer.append(key, value);
    }
    writer.close();
    out.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.file.tfile.TFile.Writer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.