Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


      assertTrue("Directory " + taskDirs[i] + " not found", found[i]);
    }
  }
 
  public void testWithDFS() throws IOException {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 4, true, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(taskTrackers, fileSys.getName(), 1);
      double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES,
                                           mr.createJobConf());
      double error = Math.abs(Math.PI - estimate);
      assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
      checkTaskDirectories(mr, new String[]{}, new String[]{});
         
      // Run a word count example
      JobConf jobConf = mr.createJobConf();
      // Keeping tasks that match this pattern
      jobConf.setKeepTaskFilesPattern("task_[0-9]*_m_000001_.*");
      String result;
      result = launchWordCount(jobConf,
                               "The quick brown fox\nhas many silly\n" +
                               "red fox sox\n",
                               3, 1);
      assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
                   "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result);
      checkTaskDirectories(mr, new String[]{"job_0002"}, new String[]{"task_0002_m_000001_0"});
      // test with maps=0
      jobConf = mr.createJobConf();
      result = launchWordCount(jobConf, "owen is oom", 0, 1);
      assertEquals("is\t1\noom\t1\nowen\t1\n", result);
    } finally {
      if (fileSys != null) { fileSys.close(); }
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here


    boolean success = false;
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", base + "/logs");
    conf_ = new Configuration();
    String overrideFS = StreamUtil.getBoundAntProperty("fs.default.name", null);
    MiniDFSCluster cluster = null;
    try {
      if (overrideFS == null) {
        cluster = new MiniDFSCluster(conf_, 1, true, null);
        fs_ = cluster.getFileSystem();
      } else {
        System.out.println("overrideFS: " + overrideFS);
        conf_.set("fs.default.name", overrideFS);
        fs_ = FileSystem.get(conf_);
      }
      doAllTestJobs();
      success = true;
    } catch (IOException io) {
      io.printStackTrace();
    } finally {
      try {
        fs_.close();
      } catch (IOException io) {
      }
      if (cluster != null) {
        cluster.shutdown();
        System.out.println("cluster.shutdown(); DONE");
      }
      System.out.println(getClass().getName() + ": success=" + success);
    }
  }
View Full Code Here

  private Path[] path = new Path[NUM_OF_PATHS];
 
  protected void setUp() throws Exception {
    try {
      Configuration conf = new Configuration();
      dfsCluster = new MiniDFSCluster(conf, 1, true, null);
      fs = FileSystem.get(conf);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
View Full Code Here

    }
  }
 
  @SuppressWarnings("unchecked")
  public void testGet() throws IOException {
    MiniDFSCluster cluster = null;

    try {
     
      // Initialization
     
      if(System.getProperty("test.build.data") == null) {
        String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
        System.out.println(dir);
        System.setProperty("test.build.data", dir);
      }
      Configuration conf = new HBaseConfiguration();
   
      Environment.getenv();
      if(Environment.debugging) {
        Logger rootLogger = Logger.getRootLogger();
        rootLogger.setLevel(Level.WARN);

        ConsoleAppender consoleAppender = null;
        for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
            e.hasMoreElements();) {
       
          Appender a = e.nextElement();
          if(a instanceof ConsoleAppender) {
            consoleAppender = (ConsoleAppender)a;
            break;
          }
        }
        if(consoleAppender != null) {
          Layout layout = consoleAppender.getLayout();
          if(layout instanceof PatternLayout) {
            PatternLayout consoleLayout = (PatternLayout)layout;
            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
          }
        }
        Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
      }
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      FileSystem fs = cluster.getFileSystem();
      Path dir = new Path("/hbase");
      fs.mkdirs(dir);
     
      HTableDescriptor desc = new HTableDescriptor("test", 1);
      desc.addFamily(CONTENTS);
      desc.addFamily(HConstants.COLUMN_FAMILY);
     
      HRegionInfo info = new HRegionInfo(0L, desc, null, null);
      Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
      fs.mkdirs(regionDir);
     
      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);

      HRegion r = new HRegion(dir, log, fs, conf, info, null, null);
     
      // Write information to the table
     
      long lockid = r.startUpdate(ROW_KEY);
      ByteArrayOutputStream bytes = new ByteArrayOutputStream();
      DataOutputStream s = new DataOutputStream(bytes);
      CONTENTS.write(s);
      r.put(lockid, CONTENTS, new BytesWritable(bytes.toByteArray()));

      bytes.reset();
      HGlobals.rootRegionInfo.write(s);
     
      r.put(lockid, HConstants.COL_REGIONINFO, new BytesWritable(bytes.toByteArray()));
     
      r.commit(lockid);
     
      lockid = r.startUpdate(ROW_KEY);

      r.put(lockid, HConstants.COL_SERVER,
          new BytesWritable(
              new HServerAddress("foo.bar.com:1234").toString().getBytes(HConstants.UTF8_ENCODING)
              )
      );
     
      r.put(lockid, HConstants.COL_STARTCODE,
          new BytesWritable(
              String.valueOf(lockid).getBytes(HConstants.UTF8_ENCODING)
              )
      );
     
      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
          new BytesWritable("region".getBytes(HConstants.UTF8_ENCODING)));

      r.commit(lockid);
     
      // Verify that get works the same from memcache as when reading from disk
      // NOTE dumpRegion won't work here because it only reads from disk.
     
      verifyGet(r);
     
      // Close and re-open region, forcing updates to disk
     
      r.close();
      log.rollWriter();
      r = new HRegion(dir, log, fs, conf, info, null, null);
     
      // Read it back
     
      verifyGet(r);
     
      // Update one family member and add a new one
     
      lockid = r.startUpdate(ROW_KEY);

      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
          new BytesWritable("region2".getBytes()));

      r.put(lockid, HConstants.COL_SERVER,
          new BytesWritable(
              new HServerAddress("bar.foo.com:4321").toString().getBytes(HConstants.UTF8_ENCODING)
              )
      );
     
      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
          new BytesWritable("junk".getBytes()));
     
      r.commit(lockid);

      verifyGet(r);
     
      // Close region and re-open it
     
      r.close();
      log.rollWriter();
      r = new HRegion(dir, log, fs, conf, info, null, null);

      // Read it back
     
      verifyGet(r);

      // Close region once and for all
     
      r.close();
     
    } catch(IOException e) {
      e.printStackTrace();
      throw e;
     
    } finally {
      if(cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

public class TestDFSShellGenericOptions extends TestCase {

  public void testDFSCommand() throws IOException {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      String [] args = new String[4];
      args[2] = "-mkdir";
      args[3] = "/data";
      testFsOption(args, namenode);
      testConfOption(args, namenode);
      testPropertyOption(args, namenode);
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }
 
  /** copy files from dfs file system to dfs file system */
  public void testCopyFromDfsToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(namenode, "/srcdat");
        new CopyFiles().doMain(conf, new String[] {"hdfs://"+namenode+"/srcdat",
                                                   "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        deldir(namenode, "/destdat");
        deldir(namenode, "/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }
 
  /** copy files from local file system to dfs file system */
  public void testCopyFromLocalToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles("local", TEST_ROOT_DIR+"/srcdat");
        new CopyFiles().doMain(conf, new String[] {"file://"+TEST_ROOT_DIR+"/srcdat",
                                                   "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        deldir(namenode, "/destdat");
        deldir("local", TEST_ROOT_DIR+"/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }

  /** copy files from dfs file system to local file system */
  public void testCopyFromDfsToLocal() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(namenode, "/srcdat");
        new CopyFiles().doMain(conf, new String[] {"hdfs://"+namenode+"/srcdat",
                                                   "file://"+TEST_ROOT_DIR+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
        deldir("local", TEST_ROOT_DIR+"/destdat");
        deldir(namenode, "/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }
 
  /** copy files from dfs file system to dfs file system */
  public void testCopyFromDfsToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(URI.create("hdfs://"+namenode), "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));
        deldir(namenode, "/destdat");
        deldir(namenode, "/srcdat");
        deldir(namenode, "/logs");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

  }
 
  /** copy files from local file system to dfs file system */
  public void testCopyFromLocalToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "file:///"+TEST_ROOT_DIR+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));
        deldir(namenode, "/destdat");
        deldir(namenode, "/logs");
        deldir("local", TEST_ROOT_DIR+"/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.