Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


    fs.mkdirs(p);
    fs.setPermission(p, new FsPermission((short)0777));
  }

  public void testDistinctUsers() throws Exception {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    try {
      Configuration conf = new Configuration();
      UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
      dfs = new MiniDFSCluster(conf, 4, true, null);
      FileSystem fs = dfs.getFileSystem();
      mkdir(fs, "/user");
      mkdir(fs, "/mapred");

      UnixUserGroupInformation MR_UGI = createUGI(
          UnixUserGroupInformation.login().getUserName(), false);
      mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
          false, 1, MR_UGI);

      JobConf pi = createJobConf(mr, PI_UGI);
      TestMiniMRWithDFS.runPI(mr, pi);

      JobConf wc = createJobConf(mr, WC_UGI);
      TestMiniMRWithDFS.runWordCount(mr, wc);
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
  }
View Full Code Here


  public void testCreate() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    conf.setInt(FsPermission.UMASK_LABEL, 0);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = FileSystem.get(conf);

    try {
      FsPermission rootPerm = checkPermission(fs, "/", null);
      FsPermission inheritPerm = FsPermission.createImmutable(
          (short)(rootPerm.toShort() | 0300));

      FsPermission dirPerm = new FsPermission((short)0777);
      fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
      checkPermission(fs, "/a1", inheritPerm);
      checkPermission(fs, "/a1/a2", inheritPerm);
      checkPermission(fs, "/a1/a2/a3", dirPerm);

      FsPermission filePerm = new FsPermission((short)0444);
      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
          true, conf.getInt("io.file.buffer.size", 4096),
          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
      out.write(123);
      out.close();
      checkPermission(fs, "/b1", inheritPerm);
      checkPermission(fs, "/b1/b2", inheritPerm);
      checkPermission(fs, "/b1/b2/b3.txt", filePerm);
     
      conf.setInt(FsPermission.UMASK_LABEL, 0022);
      FsPermission permission =
        FsPermission.createImmutable((short)0666);
      FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
      FileSystem.create(fs, new Path("/c1/c2.txt"),
          new FsPermission(permission));
      checkPermission(fs, "/c1", permission);
      checkPermission(fs, "/c1/c2.txt", permission);
    }
    finally {
      try{fs.close();} catch(Exception e) {}
      try{cluster.shutdown();} catch(Exception e) {}
    }
  }
View Full Code Here

  }

  public void testFilePermision() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();

    try {
      FileSystem nnfs = FileSystem.get(conf);
      // test permissions on files that do not exist
      assertFalse(nnfs.exists(CHILD_FILE1));
      try {
        nnfs.setOwner(CHILD_FILE1, "foo", "bar");
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      try {
        nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777));
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      // following dir/file creations are legal
      nnfs.mkdirs(CHILD_DIR1);
      FSDataOutputStream out = nnfs.create(CHILD_FILE1);
      byte data[] = new byte[FILE_LEN];
      RAN.nextBytes(data);
      out.write(data);
      out.close();
      nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0700));

      // following read is legal
      byte dataIn[] = new byte[FILE_LEN];
      FSDataInputStream fin = nnfs.open(CHILD_FILE1);
      int bytesRead = fin.read(dataIn);
      assertTrue(bytesRead == FILE_LEN);
      for(int i=0; i<FILE_LEN; i++) {
        assertEquals(data[i], dataIn[i]);
      }

      ////////////////////////////////////////////////////////////////
      // test illegal file/dir creation
      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
          USER_NAME, GROUP_NAMES );
      UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo);
      FileSystem userfs = FileSystem.get(conf);

      // make sure mkdir of a existing directory that is not owned by
      // this user does not throw an exception.
      userfs.mkdirs(CHILD_DIR1);
     
      // illegal mkdir
      assertTrue(!canMkdirs(userfs, CHILD_DIR2));

      // illegal file creation
      assertTrue(!canCreate(userfs, CHILD_FILE2));

      // illegal file open
      assertTrue(!canOpen(userfs, CHILD_FILE1));

      nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755));
      nnfs.setPermission(CHILD_DIR1, new FsPermission((short)0777));
      nnfs.setPermission(new Path("/"), new FsPermission((short)0777));
      final Path RENAME_PATH = new Path("/foo/bar");
      userfs.mkdirs(RENAME_PATH);
      assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1));
    } finally {
      if(cluster != null) cluster.shutdown();
    }
  }
View Full Code Here

    }
  }
 
  public void testWithDFS() throws IOException {
      String namenode = null;
      MiniDFSCluster dfs = null;
      MiniMRCluster mr = null;
      FileSystem fileSys = null;
      try {
          final int taskTrackers = 4;
          final int jobTrackerPort = 50050;
          final String jobTrackerName = "localhost:" + jobTrackerPort;
          Configuration conf = new Configuration();
          dfs = new MiniDFSCluster(65314, conf, true);
          fileSys = dfs.getFileSystem();
          namenode = fileSys.getName();
          mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers,
                                 namenode, true);
          double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES,
                                               jobTrackerName, namenode);
          double error = Math.abs(Math.PI - estimate);
          assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
          checkTaskDirectories(mr, new String[]{});
         
          // Run a word count example
          JobConf jobConf = new JobConf();
          // Keeping tasks that match this pattern
          jobConf.setKeepTaskFilesPattern("task_[0-9]*_m_000001_.*");
          String result;
          result = launchWordCount(namenode, jobTrackerName, jobConf,
                                   "The quick brown fox\nhas many silly\n" +
                                   "red fox sox\n",
                                   3, 1);
          assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
                       "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result);
          checkTaskDirectories(mr, new String[]{"task_0002_m_000001_0"});
         
      } finally {
          if (fileSys != null) { fileSys.close(); }
          if (dfs != null) { dfs.shutdown(); }
          if (mr != null) { mr.shutdown();
          }
      }
  }
View Full Code Here

  private static final String T31 = "031";

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.miniHdfs.getFileSystem().getHomeDirectory().toString());
  }
View Full Code Here

 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    try {
      this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        this.cluster.getFileSystem().getHomeDirectory().toString());
      this.dir = new Path(DIR, getName());
    } catch (IOException e) {
View Full Code Here

  }
 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Make the hbase rootdir match the minidfs we just span up
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
    super.setUp();
    HTableDescriptor htd = createTableDescriptor(getName());
View Full Code Here

  public void testShutdown() throws Exception {
    // bring up a cluster of 3
    Configuration conf = new Configuration();
    conf.setLong("dfs.block.size", 512L);
    conf.setInt("dfs.dataXceiver.timeoutInMS", 1000);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    final int dnIndex = 0;

    FileSystem localFs = FileSystem.getLocal(conf);
    Path dataDir = new Path(
      System.getProperty("test.build.data", "build/test/data"), "dfs");
    dataDir = new Path(dataDir, "data");
    Path dir1 = new Path(new Path(dataDir, "data"+(2*dnIndex+1)), "tmp");
    Path dir2 = new Path(new Path(dataDir, "data"+(2*dnIndex+2)), "tmp");
    FsPermission oldPerm1 = localFs.getFileStatus(dir1).getPermission();
    FsPermission oldPerm2 = localFs.getFileStatus(dir2).getPermission();
    try {
      // make the data directory of the first datanode to be readonly
      final FsPermission readPermission =
        new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ);
      localFs.setPermission(dir1, readPermission);
      localFs.setPermission(dir2, readPermission);

      // create files and make sure that first datanode will be down
      DataNode dn = cluster.getDataNodes().get(dnIndex);
      for (int i=0; DataNode.isDatanodeUp(dn); i++) {
        Path fileName = new Path("/test.txt"+i);
        DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
        DFSTestUtil.waitReplication(fs, fileName, (short)2);
        fs.delete(fileName, true);
      }
    } finally {
      // restore its old permission
      localFs.setPermission(dir1, oldPerm1);
      localFs.setPermission(dir2, oldPerm2);
      cluster.shutdown();
    }
  }
View Full Code Here

  }
 
  public void testReplicationError() throws Exception {
    // bring up a cluster of 1
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
   
    try {
      // create a file of replication factor of 1
      final Path fileName = new Path("/test.txt");
      final int fileLen = 1;
      DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)1);

      // get the block belonged to the created file
      LocatedBlocks blocks = cluster.getNameNode().namesystem.getBlockLocations(
          fileName.toString(), 0, (long)fileLen);
      assertEquals(blocks.locatedBlockCount(), 1);
      LocatedBlock block = blocks.get(0);
     
      // bring up a second datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      cluster.waitActive();
      final int sndNode = 1;
      DataNode datanode = cluster.getDataNodes().get(sndNode);
     
      // replicate the block to the second datanode
      InetSocketAddress target = datanode.getSelfAddr();
      Socket s = new Socket(target.getAddress(), target.getPort());
        //write the header.
      DataOutputStream out = new DataOutputStream(
          s.getOutputStream());

      out.writeShort( FSConstants.DATA_TRANSFER_VERSION );
      out.write( FSConstants.OP_WRITE_BLOCK );
      out.writeLong( block.getBlock().getBlockId());
      out.writeLong( block.getBlock().getGenerationStamp() );
      out.writeInt(1);
      out.writeBoolean( false );       // recovery flag
      Text.writeString( out, "" );
      out.writeBoolean(false); // Not sending src node information
      out.writeInt(0);
     
      // write check header
      out.writeByte( 1 );
      out.writeInt( 512 );

      out.flush();

      // close the connection before sending the content of the block
      out.close();
     
      // the temporary block & meta files should be deleted
      String dataDir = new File(
         System.getProperty("test.build.data", "build/test/data"),
         "dfs").toString() + "/data";
      File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "tmp");
      File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "tmp");
      while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
        Thread.sleep(100);
      }
     
      // then increase the file's replication factor
      fs.setReplication(fileName, (short)2);
      // replication should succeed
      DFSTestUtil.waitReplication(fs, fileName, (short)1);
     
      // clean up the file
      fs.delete(fileName, false);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

   */
  public void testProcesOverReplicateBlock() throws IOException {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    FileSystem fs = cluster.getFileSystem();

    try {
      final Path fileName = new Path("/foo1");
      DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0);
      File scanLog = new File(System.getProperty("test.build.data"),
          "dfs/data/data1/current/dncp_block_verification.log.curr");
      assertTrue(scanLog.delete());
      // restart the datanode so the corrupt replica will be detected
      cluster.restartDataNode(0);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
     
      final DatanodeID corruptDataNode =
        cluster.getDataNodes().get(2).dnRegistration;
      final FSNamesystem namesystem = FSNamesystem.getFSNamesystem();
      synchronized (namesystem.heartbeats) {
        // set live datanode's remaining space to be 0
        // so they will be chosen to be deleted when over-replication occurs
        for (DatanodeDescriptor datanode : namesystem.heartbeats) {
          if (!corruptDataNode.equals(datanode)) {
            datanode.updateHeartbeat(100L, 100L, 0L, 0);
          }
        }
       
        // decrease the replication factor to 1;
        namesystem.setReplication(fileName.toString(), (short)1);

        // corrupt one won't be chosen to be excess one
        // without 4910 the number of live replicas would be 0: block gets lost
        assertEquals(1, namesystem.countNodes(block).liveReplicas());
      }
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.