Examples of MiniDFSCluster


Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  @SuppressWarnings("deprecation")
  void testNameNodeImageSendFail(Configuration conf)
    throws IOException {
    System.out.println("Starting testNameNodeImageSendFail");
    Path file1 = new Path("checkpointww.dat");
    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes,
                                                false, null);
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();
    try {
      assertTrue(!fileSys.exists(file1));
      //
      // Make the checkpoint fail after rolling the edit log.
      //
      SecondaryNameNode secondary = startSecondaryNameNode(conf);
      ErrorSimulator.setErrorSimulation(3);

      try {
        secondary.doCheckpoint()// this should fail
        fail("Did not get expected exception");
      } catch (IOException e) {
        // We only sent part of the image. Have to trigger this exception
        assertTrue(e.getMessage().contains("is not of the advertised size"));
      }
      ErrorSimulator.clearErrorSimulation(3);
      secondary.shutdown(); // secondary namenode crash!

      // start new instance of secondary and verify that
      // a new rollEditLog suceedes inspite of the fact that
      // edits.new already exists.
      //
      secondary = startSecondaryNameNode(conf);
      secondary.doCheckpoint()// this should work correctly
      secondary.shutdown();

      //
      // Create a new file
      //
      writeFile(fileSys, file1, replication);
      checkFile(fileSys, file1, replication);
    } finally {
      fileSys.close();
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

      assertFalse(new File(dir, "lastcheckpoint.tmp").exists());
    }
    nn.stop(); nn = null;
   
    // Check that everything starts ok now.
    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
    cluster.waitActive();
    cluster.shutdown();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    Collection<File> namedirs = null;

    Configuration conf = new Configuration();
    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
    replication = (short)conf.getInt("dfs.replication", 3)
    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();

    try {
      //
      // verify that 'format' really blew away all pre-existing files
      //
      assertTrue(!fileSys.exists(file1));
      assertTrue(!fileSys.exists(file2));
      namedirs = cluster.getNameDirs();

      //
      // Create file1
      //
      writeFile(fileSys, file1, replication);
      checkFile(fileSys, file1, replication);

      //
      // Take a checkpoint
      //
      SecondaryNameNode secondary = startSecondaryNameNode(conf);
      ErrorSimulator.initializeErrorSimulationEvent(4);
      secondary.doCheckpoint();
      secondary.shutdown();
    } finally {
      fileSys.close();
      cluster.shutdown();
    }

    //
    // Restart cluster and verify that file1 still exist.
    //
    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    try {
      // check that file1 still exists
      checkFile(fileSys, file1, replication);
      cleanupFile(fileSys, file1);

      // create new file file2
      writeFile(fileSys, file2, replication);
      checkFile(fileSys, file2, replication);

      //
      // Take a checkpoint
      //
      SecondaryNameNode secondary = startSecondaryNameNode(conf);
      secondary.doCheckpoint();
      secondary.shutdown();
    } finally {
      fileSys.close();
      cluster.shutdown();
    }

    //
    // Restart cluster and verify that file2 exists and
    // file1 does not exist.
    //
    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
    cluster.waitActive();
    fileSys = cluster.getFileSystem();

    assertTrue(!fileSys.exists(file1));

    try {
      // verify that file2 exists
      checkFile(fileSys, file2, replication);
    } finally {
      fileSys.close();
      cluster.shutdown();
    }

    // file2 is left behind.

    testNameNodeImageSendFail(conf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  /**
   * Tests save namepsace.
   */
  public void testSaveNamespace() throws IOException {
    MiniDFSCluster cluster = null;
    DistributedFileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
      String[] args = new String[]{"-saveNamespace"};
      try {
        admin.run(args);
      } catch(IOException eIO) {
        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
      } catch(Exception e) {
        throw new IOException(e);
      }
      // create new file
      Path file = new Path("namespace.dat");
      writeFile(fs, file, replication);
      checkFile(fs, file, replication);
      // verify that the edits file is NOT empty
      Collection<File> editsDirs = cluster.getNameEditsDirs();
      for(File ed : editsDirs) {
        assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
      }

      // Saving image in safe mode should succeed
      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      try {
        admin.run(args);
      } catch(Exception e) {
        throw new IOException(e);
      }
      // verify that the edits file is empty
      for(File ed : editsDirs) {
        assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE);
      }

      // restart cluster and verify file exists
      cluster.shutdown();
      cluster = null;

      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
      checkFile(fs, file, replication);
    } finally {
      if(fs != null) fs.close();
      if(cluster!= null) cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  public void testMultipleCachefiles() throws Exception
  {
    boolean mayExit = false;
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    try{
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      FileSystem fileSys = dfs.getFileSystem();
      String namenode = fileSys.getName();
      mr  = new MiniMRCluster(1, namenode, 3);
      // During tests, the default Configuration will use a local mapred
      // So don't specify -config or -cluster
      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
      String strNamenode = "fs.default.name=" + namenode;
      String argv[] = new String[] {
        "-input", INPUT_FILE,
        "-output", OUTPUT_DIR,
        "-mapper", map,
        "-reducer", reduce,
        //"-verbose",
        //"-jobconf", "stream.debug=set"
        "-jobconf", strNamenode,
        "-jobconf", strJobtracker,
        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
        "-jobconf",
          JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-jobconf",
          JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#" + mapString,
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE_2 + "#" + mapString2
      };

      fileSys.delete(new Path(OUTPUT_DIR));
     
      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
      file.writeBytes(mapString + "\n");
      file.writeBytes(mapString2 + "\n");
      file.close();
      file = fileSys.create(new Path(CACHE_FILE));
      file.writeBytes(cacheString);
      file.close();
      file = fileSys.create(new Path(CACHE_FILE_2));
      file.writeBytes(cacheString2);
      file.close();
       
      job = new StreamJob(argv, mayExit);    
      job.go();

      fileSys = dfs.getFileSystem();
      String line = null;
      String line2 = null;
      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
                                   new Path(OUTPUT_DIR),
                                   new Utils.OutputFileUtils
                                     .OutputFilesFilter()));
      for (int i = 0; i < fileList.length; i++){
        System.out.println(fileList[i].toString());
        BufferedReader bread =
          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
        line = bread.readLine();
        System.out.println(line);
        line2 = bread.readLine();
        System.out.println(line2);
      }
      assertEquals(cacheString + "\t", line);
      assertEquals(cacheString2 + "\t", line2);
    } finally{
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
   
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  public void testSymLink() throws Exception
  {
    boolean mayExit = false;
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    try{
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      FileSystem fileSys = dfs.getFileSystem();
      String namenode = fileSys.getName();
      mr  = new MiniMRCluster(1, namenode, 3);
      // During tests, the default Configuration will use a local mapred
      // So don't specify -config or -cluster
      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
      String strNamenode = "fs.default.name=" + namenode;
      String argv[] = new String[] {
        "-input", INPUT_FILE,
        "-output", OUTPUT_DIR,
        "-mapper", map,
        "-reducer", reduce,
        //"-verbose",
        //"-jobconf", "stream.debug=set"
        "-jobconf", strNamenode,
        "-jobconf", strJobtracker,
        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
        "-jobconf",
          JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-jobconf",
          JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
      };

      fileSys.delete(new Path(OUTPUT_DIR), true);
     
      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
      file.writeBytes(mapString);
      file.close();
      file = fileSys.create(new Path(CACHE_FILE));
      file.writeBytes(cacheString);
      file.close();
       
      job = new StreamJob(argv, mayExit);     
      job.go();

      fileSys = dfs.getFileSystem();
      String line = null;
      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
                                              new Path(OUTPUT_DIR),
                                              new Utils.OutputFileUtils
                                              .OutputFilesFilter()));
      for (int i = 0; i < fileList.length; i++){
        System.out.println(fileList[i].toString());
        BufferedReader bread =
          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
        line = bread.readLine();
        System.out.println(line);
      }
      assertEquals(cacheString + "\t", line);
    } finally{
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  public TestFileArgs() throws IOException {
    super();
    outputExpect = "job.jar\t\nsidefile\t\ntmp\t\n";

    conf = new Configuration();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().getAuthority();
    mr  = new MiniMRCluster(1, namenode, 1);
    strJobTracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
    strNamenode = "fs.default.name=" + namenode;
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    config.set("hadoop.security.auth_to_local",
        "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" +
        "DEFAULT");

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
    cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
    cluster.waitActive();
    cluster.getNameNode().getNamesystem().getDelegationTokenSecretManager()
        .startThreads();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  private static Path p2;
 
  @BeforeClass
  public static void setUp() throws Exception {
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
    jConf = new JobConf(conf);
    mrCluster = new MiniMRCluster(0, 0, numSlaves,
        dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
        jConf);
   
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  }

  // Create a populated namespace for later testing.  Save its contents to a
  // data structure and store its fsimage location.
  private File initFsimage() {
    MiniDFSCluster cluster = null;
    File orig = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 4, true, null);
      FileSystem hdfs = cluster.getFileSystem();
     
      int filesize = 256;
     
      // Create a reasonable namespace
      for(int i = 0; i < NUM_DIRS; i++)  {
        Path dir = new Path("/dir" + i);
        hdfs.mkdirs(dir);
        writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
        for(int j = 0; j < FILES_PER_DIR; j++) {
          Path file = new Path(dir, "file" + j);
          FSDataOutputStream o = hdfs.create(file);
          o.write(new byte[ filesize++ ]);
          o.close();
         
          writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
        }
      }

      // Write results to the fsimage file
      cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNode().saveNamespace();
     
      // Determine location of fsimage file
      File [] files = cluster.getNameDirs().toArray(new File[0]);
      orig =  new File(files[0], "current/fsimage");
     
      if(!orig.exists())
        fail("Didn't generate or can't find fsimage.");

    } catch (IOException e) {
      fail("Failed trying to generate fsimage file: " + e.getMessage());
    } finally {
      if(cluster != null)
        cluster.shutdown();
    }
    return orig;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.