Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.MiniDFSCluster


    //  System.getProperty("test.cache.data", "build/test/cache");
    // It's also deprecated
    System.setProperty("test.cache.data", this.clusterTestDir.toString());

    // Ok, now we can start
    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, null, hosts, null);

    // Set this just-started cluster as our filesystem.
    FileSystem fs = this.dfsCluster.getFileSystem();
    this.conf.set("fs.defaultFS", fs.getUri().toString());
View Full Code Here


    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
    } finally {
View Full Code Here

    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      List<Path> files = new ArrayList<Path>();
      for (int i = 0; i < 3; i++) {
View Full Code Here

  private HRegion r;
  private HRegionIncommon region;

  @Override
  public void setUp() throws Exception {
    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
    super.setUp();
   
View Full Code Here

  @Test
  public void testMiniClusterStore()
      throws EventDeliveryException, IOException {
    // setup a minicluster
    MiniDFSCluster cluster = new MiniDFSCluster
        .Builder(new Configuration())
        .build();

    FileSystem dfs = cluster.getFileSystem();
    Configuration conf = dfs.getConf();

    URI hdfsUri = URI.create(
        "dataset:" + conf.get("fs.defaultFS") + "/tmp/repo" + DATASET_NAME);
    try {
      // create a repository and dataset in HDFS
      Datasets.create(hdfsUri, DESCRIPTOR);

      // update the config to use the HDFS repository
      config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, hdfsUri.toString());

      DatasetSink sink = sink(in, config);

      // run the sink
      sink.start();
      sink.process();
      sink.stop();

      Assert.assertEquals(
          Sets.newHashSet(expected),
          read(Datasets.load(hdfsUri)));
      Assert.assertEquals("Should have committed", 0, remaining(in));

    } finally {
      if (Datasets.exists(hdfsUri)) {
        Datasets.delete(hdfsUri);
      }
      cluster.shutdown();
    }
  }
View Full Code Here

      // dfs cluster is updating config
      // newer dfs cluster are using builder pattern
      // but we need to support older versions in
      // a same code. there are some ramifications if
      // deprecated methods are removed in a future
      dfsCluster = new MiniDFSCluster(config, nodes, true, null);

      // we need to ask the config from mr cluster after init
      // returns. for this case it is not guaranteed that passed config
      // is updated.
      // we do all this via compatibility class which uses
View Full Code Here

import org.junit.Test;

public class TestFileStreamInputFormat {
 
   private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception {
        return new MiniDFSCluster(conf, 4, true,
                             new String[]{"/rack0", "/rack0",
                                          "/rack1", "/rack1"},
                             new String[]{"host0", "host1",
                                          "host2", "host3"});
      }
View Full Code Here

      }
   @Test
   public void testNumInputs() throws Exception {
     Configuration conf = new Configuration();
     JobConf job = new JobConf(conf);
     MiniDFSCluster dfs = newDFSCluster(job);
        FileSystem fs = dfs.getFileSystem();
        System.out.println("FileSystem " + fs.getUri());
        Path inputDir = new Path("/foo/");
        final int numFiles = 10;
        String fileNameBase = "part-0000";
    
View Full Code Here

  /**
   * This is a very basic test that writes one event to HDFS and reads it back.
   */
  @Test
  public void simpleHDFSTest() throws EventDeliveryException, IOException {
    cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    cluster.waitActive();

    String outputDir = "/flume/simpleHDFSTest";
    Path outputDirPath = new Path(outputDir);

View Full Code Here

  /**
   * Writes two events in GZIP-compressed serialize.
   */
  @Test
  public void simpleHDFSGZipCompressedTest() throws EventDeliveryException, IOException {
    cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    cluster.waitActive();

    String outputDir = "/flume/simpleHDFSGZipCompressedTest";
    Path outputDirPath = new Path(outputDir);

View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.