Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


  }

  /** copy files from dfs file system to local file system */
  public void testCopyFromDfsToLocal() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(namenode, "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("/logs")));
        deldir("local", TEST_ROOT_DIR+"/destdat");
        deldir(namenode, "/logs");
        deldir(namenode, "/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here


    }
  }

  public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(namenode, "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));

        FileStatus[] dchkpoint = getFileStatus(namenode, "/destdat", files);
        final int nupdate = NFILES>>2;
        updateFiles(namenode, "/srcdat", files, nupdate);
        deldir(namenode, "/logs");

        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-update",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        assertTrue("Update failed to replicate all changes in src",
                 checkUpdate(dchkpoint, namenode, "/destdat", files, nupdate));

        deldir(namenode, "/logs");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-overwrite",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        assertTrue("-overwrite didn't.",
                 checkUpdate(dchkpoint, namenode, "/destdat", files, NFILES));

        deldir(namenode, "/destdat");
        deldir(namenode, "/srcdat");
        deldir(namenode, "/logs");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

    desc = new HTableDescriptor(TABLE_NAME);
    desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
    desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));

    dfsCluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
    try {
      fs = dfsCluster.getFileSystem();

      dir = new Path("/hbase");
      fs.mkdirs(dir);
View Full Code Here

 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    super.setUp();
    this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
    this.fs = cluster.getFileSystem();
    this.dir = new Path(DIR, getName());
  }
View Full Code Here

  /**
   * the test
   * @throws IOException
   */
  public void testGet() throws IOException {
    MiniDFSCluster cluster = null;

    try {
     
      // Initialization
     
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      FileSystem fs = cluster.getFileSystem();
      Path dir = new Path("/hbase");
      fs.mkdirs(dir);
     
      HTableDescriptor desc = new HTableDescriptor("test");
      desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
      desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
     
      HRegionInfo info = new HRegionInfo(0L, desc, null, null);
      Path regionDir = HRegion.getRegionDir(dir, info.regionName);
      fs.mkdirs(regionDir);
     
      HLog log = new HLog(fs, new Path(regionDir, "log"), conf);

      HRegion r = new HRegion(dir, log, fs, conf, info, null);
     
      // Write information to the table
     
      long lockid = r.startUpdate(ROW_KEY);
      ByteArrayOutputStream bytes = new ByteArrayOutputStream();
      DataOutputStream s = new DataOutputStream(bytes);
      CONTENTS.write(s);
      r.put(lockid, CONTENTS, bytes.toByteArray());

      bytes.reset();
      HGlobals.rootRegionInfo.write(s);
     
      r.put(lockid, HConstants.COL_REGIONINFO,
          Writables.getBytes(HGlobals.rootRegionInfo));
     
      r.commit(lockid, System.currentTimeMillis());
     
      lockid = r.startUpdate(ROW_KEY);

      r.put(lockid, HConstants.COL_SERVER,
        Writables.stringToBytes(new HServerAddress(SERVER_ADDRESS).toString()));
     
      r.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(lockid));
     
      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
        "region".getBytes(HConstants.UTF8_ENCODING));

      r.commit(lockid, System.currentTimeMillis());
     
      // Verify that get works the same from memcache as when reading from disk
      // NOTE dumpRegion won't work here because it only reads from disk.
     
      verifyGet(r, SERVER_ADDRESS);
     
      // Close and re-open region, forcing updates to disk
     
      r.close();
      log.rollWriter();
      r = new HRegion(dir, log, fs, conf, info, null);
     
      // Read it back
     
      verifyGet(r, SERVER_ADDRESS);
     
      // Update one family member and add a new one
     
      lockid = r.startUpdate(ROW_KEY);

      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
        "region2".getBytes(HConstants.UTF8_ENCODING));

      String otherServerName = "bar.foo.com:4321";
      r.put(lockid, HConstants.COL_SERVER,
        Writables.stringToBytes(new HServerAddress(otherServerName).toString()));
     
      r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
        "junk".getBytes(HConstants.UTF8_ENCODING));
     
      r.commit(lockid, System.currentTimeMillis());

      verifyGet(r, otherServerName);
     
      // Close region and re-open it
     
      r.close();
      log.rollWriter();
      r = new HRegion(dir, log, fs, conf, info, null);

      // Read it back
     
      verifyGet(r, otherServerName);

      // Close region once and for all
     
      r.close();
      log.closeAndDelete();
     
    } finally {
      if(cluster != null) {
        cluster.shutdown();
      }
    }
  }
View Full Code Here

  // Create directories, start mini cluster, etc.
 
  private void setup() throws IOException {

    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    fs = cluster.getFileSystem();
    parentdir = new Path("/hbase");
    fs.mkdirs(parentdir);
    newlogdir = new Path(parentdir, "log");
View Full Code Here

  private Path[] path = new Path[NUM_OF_PATHS];
 
  protected void setUp() throws Exception {
    try {
      Configuration conf = new Configuration();
      dfsCluster = new MiniDFSCluster(conf, 1, true, null);
      fs = FileSystem.get(conf);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
View Full Code Here

    }
   
    value = new ImmutableBytesWritable(
        val.toString().getBytes(HConstants.UTF8_ENCODING));

    dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
     
    // We create three data regions: The first is too large to merge since it
    // will be > 64 MB in size. The second two will be smaller and will be
    // selected for merging.
   
View Full Code Here

  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    try {
      super.setUp();
      dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
    } catch (Exception e) {
      LOG.fatal("error during setUp: ", e);
      throw e;
    }
  }
View Full Code Here

  public void setUp() throws Exception {
    super.setUp();
    // This size is picked so the table is split into two
    // after addContent in testMultiRegionTableMapReduce.
    conf.setLong("hbase.hregion.max.filesize", 256 * 1024);
    dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
    try {
      fs = dfsCluster.getFileSystem();
      dir = new Path("/hbase");
      fs.mkdirs(dir);
      // Start up HBase cluster
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.