Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.MiniDFSCluster


  private void init() throws IOException {
    init(new Configuration());
  }

  private void init(Configuration conf) throws IOException {
    cluster = new MiniDFSCluster(conf, 3, true, new String[]{"/rack1", "/rack2", "/rack1"});
    cluster.waitClusterUp();
    fs = cluster.getFileSystem();
  }
View Full Code Here


   * the internalReleaseOne method
   */
  public void testMultiPathLeaseRecovery()
    throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    NameNode namenode = cluster.getNameNode();
    FSNamesystem spyNamesystem = spy(namenode.getNamesystem());
    LeaseManager leaseManager = new LeaseManager(spyNamesystem);
   
    spyNamesystem.leaseManager = leaseManager;
    spyNamesystem.lmthread.interrupt();
View Full Code Here

    fs.delete(new Path(topdir), true);
  }

  /** verify hdfsproxy implements the hftp interface */
  public void testHdfsProxyInterface() throws Exception {
    MiniDFSCluster cluster = null;
    HdfsProxy proxy = null;
    try {
      final UserGroupInformation CLIENT_UGI = UserGroupInformation.getCurrentUser();
      final String testUser = CLIENT_UGI.getShortUserName();
      final String testGroup = CLIENT_UGI.getGroupNames()[0];

      final Configuration dfsConf = new Configuration();
      dfsConf.set("hadoop.proxyuser." + testUser + ".groups", testGroup);
      dfsConf.set("hadoop.proxyuser." + testGroup + ".hosts",
          "127.0.0.1,localhost");
      dfsConf.set("hadoop.proxyuser." + testUser + ".hosts",
          "127.0.0.1,localhost");
      dfsConf.set("hadoop.security.authentication", "simple");
      cluster = new MiniDFSCluster(dfsConf, 2, true, null);
      cluster.waitActive();

      final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
      final FileSystem hdfs = cluster.getFileSystem();
      final Configuration proxyConf = new Configuration(false);
      proxyConf.set("hdfsproxy.dfs.namenode.address", hdfs.getUri().getHost() + ":"
          + hdfs.getUri().getPort());
      proxyConf.set("hdfsproxy.https.address", "localhost:0");
      final String namenode = hdfs.getUri().toString();
      if (namenode.startsWith("hdfs://")) {
        MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR + "/srcdat");
        hdfs.copyFromLocalFile
      (new Path("file:///" + TEST_ROOT_DIR + "/srcdat"),
             new Path(namenode + "/destdat" ));
        assertTrue("Source and destination directories do not match.",
            checkFiles(hdfs, "/destdat", files));

        proxyConf.set("proxy.http.test.listener.addr", "localhost:0");
        proxy = new HdfsProxy(proxyConf);
        proxy.start();
        InetSocketAddress proxyAddr = NetUtils.createSocketAddr("localhost:0");
        final String realProxyAddr = proxyAddr.getHostName() + ":"
            + proxy.getPort();
        final Path proxyUrl = new Path("hftp://" + realProxyAddr);
  final FileSystem hftp = proxyUrl.getFileSystem(dfsConf);
        FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"),
                      hdfs, new Path(namenode + "/copied1"),
                      false, true, proxyConf);
       
        assertTrue("Source and copied directories do not match.", checkFiles(
            hdfs, "/copied1", files));

        FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"),
                      localfs, new Path(TEST_ROOT_DIR + "/copied2"),
                      false, true, proxyConf);
        assertTrue("Source and copied directories do not match.", checkFiles(
            localfs, TEST_ROOT_DIR + "/copied2", files));

        deldir(hdfs, "/destdat");
        deldir(hdfs, "/logs");
        deldir(hdfs, "/copied1");
        deldir(localfs, TEST_ROOT_DIR + "/srcdat");
        deldir(localfs, TEST_ROOT_DIR + "/copied2");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
      if (proxy != null) {
        proxy.stop();
      }
    }
View Full Code Here

    conf.set("raid.server.address", "localhost:0");
    conf.setInt("hdfs.raid.stripeLength", stripeLength);
    conf.set("xor".equals(erasureCode) ? RaidNode.RAID_LOCATION_KEY :
             RaidNode.RAIDRS_LOCATION_KEY, "/destraid");

    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();
    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
View Full Code Here

    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");
    conf.setInt("hdfs.raid.stripeLength", stripeLength);
    conf.set("hdfs.raid.locations", "/destraid");

    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
View Full Code Here

    // make all deletions not go through Trash
    conf.set("fs.shell.delete.classname", "org.apache.hadoop.hdfs.DFSClient");

    conf.setBoolean("dfs.permissions", false);

    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    String namenode = fileSys.getUri().toString();
    FileSystem.setDefaultUri(conf, namenode);
  }
View Full Code Here

    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");
    conf.setInt("hdfs.raid.stripeLength", stripeLength);
    conf.set("hdfs.raid.locs", "/destraid");

    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
View Full Code Here

    // create a dfs and map-reduce cluster
    final int taskTrackers = 4;
    final int jobTrackerPort = 60050;

    dfs = new MiniDFSCluster(conf, 6, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();
    mr = new MiniMRCluster(taskTrackers, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
View Full Code Here

  @Test
  public void testAuditLogger() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
        DummyAuditLogger.class.getName());
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

    try {
      cluster.waitClusterUp();
      assertTrue(DummyAuditLogger.initialized);
      DummyAuditLogger.resetLogCount();

      FileSystem fs = cluster.getFileSystem();
      long time = System.currentTimeMillis();
      fs.setTimes(new Path("/"), time, time);
      assertEquals(1, DummyAuditLogger.logCount);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

  @Test
  public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
        DummyAuditLogger.class.getName());
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   
    GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
    try {
      cluster.waitClusterUp();
      assertTrue(DummyAuditLogger.initialized);     
      URI uri = new URI(
          "http",
          NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),
          "/webhdfs/v1/", op.toQueryString(), null);
     
      // non-proxy request
      HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
      conn.setRequestMethod(op.getType().toString());
      conn.connect();
      assertEquals(200, conn.getResponseCode());
      conn.disconnect();
      assertEquals(1, DummyAuditLogger.logCount);
      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
     
      // non-trusted proxied request
      conn = (HttpURLConnection) uri.toURL().openConnection();
      conn.setRequestMethod(op.getType().toString());
      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
      conn.connect();
      assertEquals(200, conn.getResponseCode());
      conn.disconnect();
      assertEquals(2, DummyAuditLogger.logCount);
      assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
     
      // trusted proxied request
      conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1");
      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
      conn = (HttpURLConnection) uri.toURL().openConnection();
      conn.setRequestMethod(op.getType().toString());
      conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
      conn.connect();
      assertEquals(200, conn.getResponseCode());
      conn.disconnect();
      assertEquals(3, DummyAuditLogger.logCount);
      assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.