Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


    fs.mkdirs(p);
    fs.setPermission(p, new FsPermission((short)0777));
  }

  public void testDistinctUsers() throws Exception {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    try {
      Configuration conf = new Configuration();
      UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
      dfs = new MiniDFSCluster(conf, 4, true, null);
      FileSystem fs = dfs.getFileSystem();
      mkdir(fs, "/user");
      mkdir(fs, "/mapred");

      UnixUserGroupInformation MR_UGI = createUGI(
          UnixUserGroupInformation.login().getUserName(), true);
      mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
          false, 1, MR_UGI);

      JobConf pi = createJobConf(mr, PI_UGI);
      TestMiniMRWithDFS.runPI(mr, pi);

      JobConf wc = createJobConf(mr, WC_UGI);
      TestMiniMRWithDFS.runWordCount(mr, wc);
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
  }
View Full Code Here


  }
 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Make the hbase rootdir match the minidfs we just span up
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
    super.setUp();
    HTableDescriptor htd = createTableDescriptor(getName());
View Full Code Here

    this.conf = conf;
    this.deleteOnExit = deleteOnExit;
    this.shutdownDFS = false;
    if (miniHdfsFilesystem) {
      try {
        this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
        this.fs = cluster.getFileSystem();
        this.shutdownDFS = true;
      } catch (IOException e) {
        StaticTestEnvironment.shutdownDfs(cluster);
        throw e;
View Full Code Here

  }
 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.dfsCluster.getFileSystem().getHomeDirectory().toString());
   
    // Note: we must call super.setUp after starting the mini cluster or
View Full Code Here

  /**
   * Splits twice and verifies getting from each of the split regions.
   * @throws Exception
   */
  public void testBasicSplit() throws Exception {
    MiniDFSCluster cluster = null;
    HRegion region = null;
    try {
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        cluster.getFileSystem().getHomeDirectory().toString());
      HTableDescriptor htd = createTableDescriptor(getName());
      region = createNewHRegion(htd, null, null);
      basicSplit(region);
    } finally {
      if (region != null) {
View Full Code Here

  private MiniDFSCluster cluster;

  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
    super.setUp();
    this.dir = new Path("/hbase", getName());
View Full Code Here

    this.conf = conf;
    this.deleteOnExit = deleteOnExit;
    this.shutdownDFS = false;
    if (miniHdfsFilesystem) {
      this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
      this.fs = cluster.getFileSystem();
      this.shutdownDFS = true;
    } else {
      this.cluster = null;
      this.fs = FileSystem.get(conf);
View Full Code Here

  /** The test!
   * @throws IOException
   */
  public void testScanner() throws IOException {
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
   
    try {
     
      // Initialization
     
      Configuration conf = new HBaseConfiguration();
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      fs = cluster.getFileSystem();
      Path dir = new Path("/hbase");
      fs.mkdirs(dir);
     
      Path regionDir = HRegion.getRegionDir(dir, REGION_INFO.regionName);
      fs.mkdirs(regionDir);
     
      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);

      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);
     
      // Write information to the meta table
     
      long lockid = region.startUpdate(ROW_KEY);

      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
      DataOutputStream s = new DataOutputStream(byteStream);
      HGlobals.rootRegionInfo.write(s);
      region.put(lockid, HConstants.COL_REGIONINFO, byteStream.toByteArray());
      region.commit(lockid, System.currentTimeMillis());

      // What we just committed is in the memcache. Verify that we can get
      // it back both with scanning and get
     
      scan(false, null);
      getRegionInfo();
     
      // Close and re-open
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);

      // Verify we can get the data back now that it is on disk.
     
      scan(false, null);
      getRegionInfo();
     
      // Store some new information
      HServerAddress address = new HServerAddress("foo.bar.com:1234");

      lockid = region.startUpdate(ROW_KEY);

      region.put(lockid, HConstants.COL_SERVER,
        Writables.stringToBytes(address.toString()));

      region.put(lockid, HConstants.COL_STARTCODE,
          Writables.longToBytes(START_CODE));

      region.commit(lockid, System.currentTimeMillis());
     
      // Validate that we can still get the HRegionInfo, even though it is in
      // an older row on disk and there is a newer row in the memcache
     
      scan(true, address.toString());
      getRegionInfo();
     
      // flush cache

      region.flushcache(false);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Close and reopen
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Now update the information again

      address = new HServerAddress("bar.foo.com:4321");
     
      lockid = region.startUpdate(ROW_KEY);

      region.put(lockid, HConstants.COL_SERVER,
        Writables.stringToBytes(address.toString()));

      region.commit(lockid, System.currentTimeMillis());
     
      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // flush cache

      region.flushcache(false);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Close and reopen
     
      region.close();
      log.rollWriter();
      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();
     
      // clean up
     
      region.close();
      log.closeAndDelete();
     
    } finally {
      if(cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

  }
 
  /** copy files from dfs file system to dfs file system */
  public void testCopyFromDfsToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(namenode, "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));
        deldir(namenode, "/destdat");
        deldir(namenode, "/srcdat");
        deldir(namenode, "/logs");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }
 
  /** copy files from local file system to dfs file system */
  public void testCopyFromLocalToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles("local", TEST_ROOT_DIR+"/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "file:///"+TEST_ROOT_DIR+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));
        deldir(namenode, "/destdat");
        deldir(namenode, "/logs");
        deldir("local", TEST_ROOT_DIR+"/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.