Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


  public void testCreate() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    conf.setInt(FsPermission.UMASK_LABEL, 0);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = FileSystem.get(conf);

    try {
      FsPermission rootPerm = checkPermission(fs, "/", null);
      FsPermission inheritPerm = FsPermission.createImmutable(
          (short)(rootPerm.toShort() | 0300));

      FsPermission dirPerm = new FsPermission((short)0777);
      fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
      checkPermission(fs, "/a1", inheritPerm);
      checkPermission(fs, "/a1/a2", inheritPerm);
      checkPermission(fs, "/a1/a2/a3", dirPerm);

      FsPermission filePerm = new FsPermission((short)0444);
      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
          true, conf.getInt("io.file.buffer.size", 4096),
          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
      out.write(123);
      out.close();
      checkPermission(fs, "/b1", inheritPerm);
      checkPermission(fs, "/b1/b2", inheritPerm);
      checkPermission(fs, "/b1/b2/b3.txt", filePerm);
     
      conf.setInt(FsPermission.UMASK_LABEL, 0022);
      FsPermission permission =
        FsPermission.createImmutable((short)0666);
      FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
      FileSystem.create(fs, new Path("/c1/c2.txt"),
          new FsPermission(permission));
      checkPermission(fs, "/c1", permission);
      checkPermission(fs, "/c1/c2.txt", permission);
    }
    finally {
      try{fs.close();} catch(Exception e) {}
      try{cluster.shutdown();} catch(Exception e) {}
    }
  }
View Full Code Here


  }

  public void testFilePermision() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = FileSystem.get(conf);

    try {
      // test permissions on files that do not exist
      assertFalse(fs.exists(CHILD_FILE1));
      try {
        fs.setOwner(CHILD_FILE1, "foo", "bar");
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      try {
        fs.setPermission(CHILD_FILE1, new FsPermission((short)0777));
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      // following dir/file creations are legal
      fs.mkdirs(CHILD_DIR1);
      FSDataOutputStream out = fs.create(CHILD_FILE1);
      byte data[] = new byte[FILE_LEN];
      Random r = new Random();
      r.nextBytes(data);
      out.write(data);
      out.close();
      fs.setPermission(CHILD_FILE1, new FsPermission((short)0700));

      // following read is legal
      byte dataIn[] = new byte[FILE_LEN];
      FSDataInputStream fin = fs.open(CHILD_FILE1);
      fin.read(dataIn);
      for(int i=0; i<FILE_LEN; i++) {
        assertEquals(data[i], dataIn[i]);
      }
      fs.close();

      // test illegal file/dir creation
      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
          USER_NAME, GROUP_NAMES );
      conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME,
          userGroupInfo.toString());
      fs = FileSystem.get(conf);

      // illegal mkdir
      assertTrue(!canMkdirs(fs, CHILD_DIR2));

      // illegal file creation
      assertTrue(!canCreate(fs, CHILD_FILE2));

      // illegal file open
      assertTrue(!canOpen(fs, CHILD_FILE1));
    }
    finally {
      try{fs.close();} catch(Exception e) {}
      try{cluster.shutdown();} catch(Exception e) {}
    }
  }
View Full Code Here

          System.setProperty("test.build.data", dir);
        }

        if (miniHdfsFilesystem) {
          this.cluster =
            new MiniDFSCluster(this.conf, 2, true, (String[])null);
        }
        this.fs = FileSystem.get(conf);
        this.parentdir = new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
        fs.mkdirs(parentdir);
View Full Code Here

  }
  /** The test! */
  @SuppressWarnings("unchecked")
  public void testScanner() throws IOException {
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
   
    try {
     
      // Initialization
     
      if(System.getProperty("test.build.data") == null) {
        String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
        System.out.println(dir);
        System.setProperty("test.build.data", dir);
      }
      Configuration conf = new HBaseConfiguration();
   
      Environment.getenv();
      if(Environment.debugging) {
        Logger rootLogger = Logger.getRootLogger();
        rootLogger.setLevel(Level.WARN);

        ConsoleAppender consoleAppender = null;
        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
            e.hasMoreElements();) {
       
          Appender a = e.nextElement();
          if(a instanceof ConsoleAppender) {
            consoleAppender = (ConsoleAppender)a;
            break;
          }
        }
        if(consoleAppender != null) {
          Layout layout = consoleAppender.getLayout();
          if(layout instanceof PatternLayout) {
            PatternLayout consoleLayout = (PatternLayout)layout;
            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
          }
        }
        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
      }
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      fs = cluster.getFileSystem();
      Path dir = new Path("/hbase");
      fs.mkdirs(dir);
     
      Path regionDir = HStoreFile.getHRegionDir(dir, REGION_INFO.regionName);
      fs.mkdirs(regionDir);
     
      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);

      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
     
      // Write information to the meta table
     
      long lockid = region.startUpdate(ROW_KEY);

      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
      DataOutputStream s = new DataOutputStream(byteStream);
      HGlobals.rootRegionInfo.write(s);
      region.put(lockid, HConstants.COL_REGIONINFO,
          new BytesWritable(byteStream.toByteArray()));
      region.commit(lockid);

      // What we just committed is in the memcache. Verify that we can get
      // it back both with scanning and get
     
      scan(false, null);
      getRegionInfo();
     
      // Close and re-open
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);

      // Verify we can get the data back now that it is on disk.
     
      scan(false, null);
      getRegionInfo();
     
      // Store some new information
      HServerAddress address = new HServerAddress("foo.bar.com:1234");

      lockid = region.startUpdate(ROW_KEY);

      region.put(lockid, HConstants.COL_SERVER,
          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));

      region.put(lockid, HConstants.COL_STARTCODE,
          new BytesWritable(
              String.valueOf(START_CODE).getBytes(HConstants.UTF8_ENCODING)));

      region.commit(lockid);
     
      // Validate that we can still get the HRegionInfo, even though it is in
      // an older row on disk and there is a newer row in the memcache
     
      scan(true, address.toString());
      getRegionInfo();
     
      // flush cache

      region.flushcache(false);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Close and reopen
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Now update the information again

      address = new HServerAddress("bar.foo.com:4321");
     
      lockid = region.startUpdate(ROW_KEY);

      region.put(lockid, HConstants.COL_SERVER,
          new BytesWritable(address.toString().getBytes(HConstants.UTF8_ENCODING)));

      region.commit(lockid);
     
      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // flush cache

      region.flushcache(false);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Close and reopen
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

    } catch(IOException e) {
      e.printStackTrace();
      throw e;
     
    } finally {
      if(fs != null) {
        fs.close();
      }
      if(cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

          }
        }
        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
      }
     
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      fs = cluster.getFileSystem();
      parentdir = new Path("/hbase");
      fs.mkdirs(parentdir);
      newlogdir = new Path(parentdir, "log");
      oldlogfile = new Path(parentdir, "oldlogfile");
View Full Code Here

    super.setUp();
    if (localFS) {
      fileSystem = FileSystem.getLocal(new JobConf());
    }
    else {
      dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null);
      fileSystem = dfsCluster.getFileSystem();
    }
    if (localMR) {
    }
    else {
View Full Code Here

*/
public class TestMiniMRDFSCaching extends TestCase {

  public void testWithDFS() throws IOException {
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    FileSystem fileSys = null;
    try {
      JobConf conf = new JobConf();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(2, fileSys.getName(), 4);
      // run the wordcount example with caching
      boolean ret = MRCaching.launchMRCache("/testing/wc/input",
                                            "/testing/wc/output",
                                            "/cachedir",
                                            mr.createJobConf(),
                                            "The quick brown fox\nhas many silly\n"
                                            + "red fox sox\n");
      assertTrue("Archives not matching", ret);
    } finally {
      if (fileSys != null) {
        fileSys.close();
      }
      if (dfs != null) {
        dfs.shutdown();
      }
      if (mr != null) {
        mr.shutdown();
      }
    }
View Full Code Here

    return result.toString();
  }
  
  public void testClassPath() throws IOException {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;
      final int jobTrackerPort = 60050;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      fileSys = dfs.getFileSystem();
      namenode = fileSys.getName();
      mr = new MiniMRCluster(taskTrackers, namenode, 3);
      JobConf jobConf = new JobConf();
      String result;
      final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
      result = launchWordCount(namenode, jobTrackerName, jobConf,
                               "The quick brown fox\nhas many silly\n" +
                               "red fox sox\n",
                               3, 1);
      assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
                   "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result);
         
    } finally {
      if (fileSys != null) { fileSys.close(); }
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here

 
  public void testExternalWritable()
    throws IOException {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;

    try {
     
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      fileSys = dfs.getFileSystem();
      namenode = fileSys.getName();
      mr = new MiniMRCluster(taskTrackers, namenode, 3);     
      JobConf jobConf = new JobConf();
      String result;
      final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
     
      result = launchExternal(namenode, jobTrackerName, jobConf,
                              "Dennis was here!\nDennis again!",
                              3, 1);
      assertEquals("Dennis again!\t1\nDennis was here!\t1\n", result);
     
    }
    finally {
      if (fileSys != null) { fileSys.close(); }
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here

  public void testSymLink()
  {
    try {
      boolean mayExit = false;
      MiniMRCluster mr = null;
      MiniDFSCluster dfs = null;
      FileSystem fileSys = null;
      try{
        Configuration conf = new Configuration();
        dfs = new MiniDFSCluster(conf, 1, true, null);
        fileSys = dfs.getFileSystem();
        String namenode = fileSys.getName();
        mr  = new MiniMRCluster(1, namenode, 3);
        // During tests, the default Configuration will use a local mapred
        // So don't specify -config or -cluster
        String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
        String strNamenode = "fs.default.name=" + namenode;
        String argv[] = new String[] {
          "-input", INPUT_FILE,
          "-output", OUTPUT_DIR,
          "-mapper", map,
          "-reducer", reduce,
          //"-verbose",
          //"-jobconf", "stream.debug=set"
          "-jobconf", strNamenode,
          "-jobconf", strJobtracker,
          "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
        };

        fileSys.delete(new Path(OUTPUT_DIR));
       
        DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
        file.writeBytes(mapString);
        file.close();
        file = fileSys.create(new Path(CACHE_FILE));
        file.writeBytes(cacheString);
        file.close();
         
        job = new StreamJob(argv, mayExit);     
        job.go();
        String line = null;
        Path[] fileList = fileSys.listPaths(new Path(OUTPUT_DIR));
        for (int i = 0; i < fileList.length; i++){
          System.out.println(fileList[i].toString());
          BufferedReader bread =
            new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
          line = bread.readLine();
          System.out.println(line);
        }
        assertEquals(cacheString + "\t", line);
      } finally{
        if (fileSys != null) { fileSys.close(); }
        if (dfs != null) { dfs.shutdown(); }
        if (mr != null) { mr.shutdown();}
      }
     
    } catch(Exception e) {
      failTrace(e);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.