Examples of MiniDFSCluster


Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  /**
   * Writes two events in GZIP-compressed serialize.
   */
  @Test
  public void simpleHDFSGZipCompressedTest() throws EventDeliveryException, IOException {
    cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    cluster.waitActive();

    String outputDir = "/flume/simpleHDFSGZipCompressedTest";
    Path outputDirPath = new Path(outputDir);

View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  @Test
  public void underReplicationTest() throws EventDeliveryException,
      IOException {
    Configuration conf = new Configuration();
    conf.set("dfs.replication", String.valueOf(3));
    cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();

    String outputDir = "/flume/underReplicationTest";
    Path outputDirPath = new Path(outputDir);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  @Test
  public void maxUnderReplicationTest() throws EventDeliveryException,
      IOException {
    Configuration conf = new Configuration();
    conf.set("dfs.replication", String.valueOf(3));
    cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();

    String outputDir = "/flume/underReplicationTest";
    Path outputDirPath = new Path(outputDir);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

      for (String user : getHadoopUsers()) {
        String[] groups = getHadoopUserGroups(user);
        UserGroupInformation.createUserForTesting(user, groups);
      }

      DFS_CLUSTER = new MiniDFSCluster(conf, dataNodes, true, null);
      FileSystem fileSystem = DFS_CLUSTER.getFileSystem();
      fileSystem.mkdirs(new Path("/tmp"));
      fileSystem.mkdirs(new Path("/user"));
      fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
      fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  /**
   * Test {@link JobTracker}'s safe mode.
   */
  public void testJobTrackerSafeMode() throws IOException {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;

    try {
      Configuration conf = new Configuration();
      conf.setBoolean("dfs.replication.considerLoad", false);
      dfs = new MiniDFSCluster(conf, 1, true, null, null);
      dfs.waitActive();
      fileSys = dfs.getFileSystem();
     
      // clean up
      fileSys.delete(testDir, true);
     
      if (!fileSys.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
      }

      // Write the input file
      UtilsForTests.writeFile(dfs.getNameNode(), conf,
                              new Path(inDir + "/file"), (short)1);

      dfs.startDataNodes(conf, 1, true, null, null, null, null);
      dfs.waitActive();

      namenode = (dfs.getFileSystem()).getUri().getHost() + ":"
                 + (dfs.getFileSystem()).getUri().getPort();

      // Make sure that jobhistory leads to a proper job restart
      // So keep the blocksize and the buffer size small
      JobConf jtConf = new JobConf();
      jtConf.set("mapred.jobtracker.job.history.block.size", "512");
      jtConf.set("mapred.jobtracker.job.history.buffer.size", "512");
      jtConf.setInt("mapred.tasktracker.map.tasks.maximum", 1);
      jtConf.setInt("mapred.tasktracker.reduce.tasks.maximum", 1);
      jtConf.setLong("mapred.tasktracker.expiry.interval", 5000);
      jtConf.setInt("mapred.reduce.copy.backoff", 4);
      jtConf.setLong("mapred.job.reuse.jvm.num.tasks", -1);
     
      mr = new MiniMRCluster(numTrackers, namenode, numDir, null, null, jtConf);
     
      // Test Lost tracker case
      testSafeMode(dfs, mr);
    } finally {
      if (mr != null) {
        try {
          mr.shutdown();
        } catch (Exception e) {}
      }
      if (dfs != null) {
        try {
          dfs.shutdown();
        } catch (Exception e) {}
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

   * 8. verify that all the image and edits files are the same.
   */
  public void testStorageRestore() throws Exception {
    int numDatanodes = 2;
    //Collection<String> dirs = config.getStringCollection("dfs.name.dir");
    cluster = new MiniDFSCluster(0, config, numDatanodes, true, false, true,  null, null, null, null);
    cluster.waitActive();
   
    SecondaryNameNode secondary = new SecondaryNameNode(config);
    System.out.println("****testStorageRestore: Cluster and SNN started");
    printStorages(cluster.getNameNode().getFSImage());
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    return new Path(TEST_ROOT_DIR_PATH, fileName);
  }
 
  @Override
  protected void setUp() throws Exception {
    cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true, null);
    cluster.waitActive();
    namesystem = cluster.getNameNode().getNamesystem();
    fs = (DistributedFileSystem) cluster.getFileSystem();
    metrics = namesystem.getFSNamesystemMetrics();
    nn = cluster.getNameNode();
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

      //set the done folder location
      String doneFolder = "history_done";
      conf.set("mapred.job.tracker.history.completed.location", doneFolder);

      MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, 2, true, null);
      mr = new MiniMRCluster(2, dfsCluster.getFileSystem().getUri().toString(),
          3, null, null, conf);

      // run the TCs
      conf = mr.createJobConf();
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

   * Tests rolling edit logs while transactions are ongoing.
   */
  public void testEditLogRolling() throws Exception {
   // start a cluster
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;


    AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
    try {
      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
      cluster.waitActive();
      fileSys = cluster.getFileSystem();
      final FSNamesystem namesystem = cluster.getNameNode().namesystem;

      FSImage fsimage = namesystem.getFSImage();
      FSEditLog editLog = fsimage.getEditLog();

      // set small size of flush buffer
      FSEditLog.setBufferCapacity(2048);
      editLog.close();
      editLog.open();

      startTransactionWorkers(namesystem, caughtErr);

      for (int i = 0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
        try {
          Thread.sleep(20);
        } catch (InterruptedException e) {}

        LOG.info("Starting roll " + i + ".");
        editLog.rollEditLog();
        LOG.info("Roll complete " + i + ".");

        verifyEditLogs(fsimage);

        LOG.info("Starting purge " + i + ".");
        editLog.purgeEditLog();
        LOG.info("Complete purge " + i + ".");
      }
    } finally {
      stopTransactionWorkers();
      if (caughtErr.get() != null) {
        throw new RuntimeException(caughtErr.get());
      }

      if(fileSys != null) fileSys.close();
      if(cluster != null) cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

   * Tests saving fs image while transactions are ongoing.
   */
  public void testSaveNamespace() throws Exception {
    // start a cluster
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;


    AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
    try {
      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
      cluster.waitActive();
      fileSys = cluster.getFileSystem();
      final FSNamesystem namesystem = FSNamesystem.getFSNamesystem();

      FSImage fsimage = namesystem.getFSImage();
      FSEditLog editLog = fsimage.getEditLog();

      // set small size of flush buffer
      FSEditLog.setBufferCapacity(2048);
      editLog.close();
      editLog.open();

      assertTrue(editLog.getEditStreams().size() > 0);
      startTransactionWorkers(namesystem, caughtErr);

      for (int i = 0; i < NUM_SAVE_IMAGE; i++) {
        try {
          Thread.sleep(20);
        } catch (InterruptedException e) {}

        LOG.info("Save " + i + ": entering safe mode");
        namesystem.enterSafeMode();

        // Verify edit logs before the save
        verifyEditLogs(fsimage);

        LOG.info("Save " + i + ": saving namespace");
        namesystem.saveNamespace();
        LOG.info("Save " + i + ": leaving safemode");

        // Verify that edit logs post save are also not corrupt
        verifyEditLogs(fsimage);

        namesystem.leaveSafeMode(false);
        LOG.info("Save " + i + ": complete");

      }
    } finally {
      stopTransactionWorkers();
      if (caughtErr.get() != null) {
        throw new RuntimeException(caughtErr.get());
      }

      if(fileSys != null) fileSys.close();
      if(cluster != null) cluster.shutdown();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.