Examples of DistributedFileSystem


Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    final Path walPath = wal.computeFilename();
   

    // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
    try {
      DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
      dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
      cluster.shutdown();
      try {
        // wal.writer.close() will throw an exception,
        // but still call this since it closes the LogSyncer thread first
        wal.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  static void checkFileContent(URI uri, Path name, byte[] expected,
      int readOffset, String readingUser, Configuration conf,
      boolean legacyShortCircuitFails)
      throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext getClientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
      assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
    }
   
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[expected.length-readOffset];
    stm.readFully(readOffset, actual);
    checkData(actual, readOffset, expected, "Read 2");
    stm.close();
    // Now read using a different API.
    actual = new byte[expected.length-readOffset];
    stm = fs.open(name);
    IOUtils.skipFully(stm, readOffset);
    //Read a small number of bytes first.
    int nread = stm.read(actual, 0, 3);
    nread += stm.read(actual, nread, 2);
    //Read across chunk boundary
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  static void checkFileContentDirect(URI uri, Path name, byte[] expected,
      int readOffset, String readingUser, Configuration conf,
      boolean legacyShortCircuitFails)
      throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext clientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
      assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
   
    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testAllocShm", sockDir);
    MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final ShortCircuitCache cache =
        fs.getClient().getClientContext().getShortCircuitCache();
    cache.getDfsClientShmManager().visit(new Visitor() {
      @Override
      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
          throws IOException {
        // The ClientShmManager starts off empty
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShmBasedStaleness", sockDir);
    MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final ShortCircuitCache cache =
        fs.getClient().getClientContext().getShortCircuitCache();
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
        (short)1, SEED);
    FSDataInputStream fis = fs.open(new Path(TEST_FILE));
    int first = fis.read();
    final ExtendedBlock block =
        DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
    Assert.assertTrue(first != -1);
    cache.accept(new CacheVisitor() {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

          DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
          "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      DistributedFileSystem hdfs = cluster.getFileSystem();

      // Create a reasonable namespace
      for (int i = 0; i < NUM_DIRS; i++) {
        Path dir = new Path("/dir" + i);
        hdfs.mkdirs(dir);
        writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
        for (int j = 0; j < FILES_PER_DIR; j++) {
          Path file = new Path(dir, "file" + j);
          FSDataOutputStream o = hdfs.create(file);
          o.write(23);
          o.close();

          writtenFiles.put(file.toString(),
              pathToFileEntry(hdfs, file.toString()));
        }
      }

      // Create an empty directory
      Path emptydir = new Path("/emptydir");
      hdfs.mkdirs(emptydir);
      writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

      // Get delegation tokens so we log the delegation token op
      Token<?>[] delegationTokens = hdfs
          .addDelegationTokens(TEST_RENEWER, null);
      for (Token<?> t : delegationTokens) {
        LOG.debug("got token " + t);
      }

      final Path snapshot = new Path("/snapshot");
      hdfs.mkdirs(snapshot);
      hdfs.allowSnapshot(snapshot);
      hdfs.mkdirs(new Path("/snapshot/1"));
      hdfs.delete(snapshot, true);

      // Set XAttrs so the fsimage contains XAttr ops
      final Path xattr = new Path("/xattr");
      hdfs.mkdirs(xattr);
      hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
      hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
      // OIV should be able to handle empty value XAttrs
      hdfs.setXAttr(xattr, "user.a3", null);
      writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

      // Write results to the fsimage file
      hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
      hdfs.saveNamespace();

      // Determine location of fsimage file
      originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
          .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
      if (originalFsimage == null) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  }
 
  @Test
  public void testAddDelegationTokensDFSApi() throws Exception {
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
    DistributedFileSystem dfs = cluster.getFileSystem();
    Credentials creds = new Credentials();
    final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    checkTokenIdentifier(ugi, tokens[0]);

    final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length); // already have token
    Assert.assertEquals(1, creds.numberOfTokens());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    }
  }

  @Test
  public void testDelegationTokenWithDoAs() throws Exception {
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Credentials creds = new Credentials();
    final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    @SuppressWarnings("unchecked")
    final Token<DelegationTokenIdentifier> token =
        (Token<DelegationTokenIdentifier>) tokens[0];
    final UserGroupInformation longUgi = UserGroupInformation
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

    try {
      Configuration conf = new Configuration();
      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      DistributedFileSystem hdfs = cluster.getFileSystem();

      // Create a reasonable namespace with ACLs
      Path dir = new Path("/dirWithNoAcl");
      hdfs.mkdirs(dir);
      writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));

      dir = new Path("/dirWithDefaultAcl");
      hdfs.mkdirs(dir);
      hdfs.setAcl(dir, Lists.newArrayList(
          aclEntry(DEFAULT, USER, ALL),
          aclEntry(DEFAULT, USER, "foo", ALL),
          aclEntry(DEFAULT, GROUP, READ_EXECUTE),
          aclEntry(DEFAULT, OTHER, NONE)));
      writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));

      Path file = new Path("/noAcl");
      FSDataOutputStream o = hdfs.create(file);
      o.write(23);
      o.close();
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      file = new Path("/withAcl");
      o = hdfs.create(file);
      o.write(23);
      o.close();
      hdfs.setAcl(file, Lists.newArrayList(
          aclEntry(ACCESS, USER, READ_WRITE),
          aclEntry(ACCESS, USER, "foo", READ),
          aclEntry(ACCESS, GROUP, READ),
          aclEntry(ACCESS, OTHER, NONE)));
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      file = new Path("/withSeveralAcls");
      o = hdfs.create(file);
      o.write(23);
      o.close();
      hdfs.setAcl(file, Lists.newArrayList(
          aclEntry(ACCESS, USER, READ_WRITE),
          aclEntry(ACCESS, USER, "foo", READ_WRITE),
          aclEntry(ACCESS, USER, "bar", READ),
          aclEntry(ACCESS, GROUP, READ),
          aclEntry(ACCESS, GROUP, "group", READ),
          aclEntry(ACCESS, OTHER, NONE)));
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      // Write results to the fsimage file
      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
      hdfs.saveNamespace();

      // Determine the location of the fsimage file
      originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
          .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
      if (originalFsimage == null) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DistributedFileSystem

  throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
      return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        return;
      }
    } catch (IOException e) {
      exception = RemoteExceptionHandler.checkIOException(e);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.