Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DistributedFileSystem.create()


    cluster.waitActive();
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    OutputStream out = null;
    try {
      fs.mkdirs(new Path("/test-target"));
      out = fs.create(new Path("/test-source/foo")); // don't close
      fs.rename(new Path("/test-source/"), new Path("/test-target/"));

      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
View Full Code Here


      final Path dir = new Path("/abc/def");
      final Path file1 = new Path(dir, "f1");
      final Path file2 = new Path(dir, "f2");

      // create an empty file f1
      fs.create(file1).close();

      // create an under-construction file f2
      FSDataOutputStream out = fs.create(file2);
      out.writeBytes("hello");
      ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
View Full Code Here

      // create an empty file f1
      fs.create(file1).close();

      // create an under-construction file f2
      FSDataOutputStream out = fs.create(file2);
      out.writeBytes("hello");
      ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
          .of(SyncFlag.UPDATE_LENGTH));

      // checkpoint
View Full Code Here

      //create a file, write some data and leave it open.
      final Path p = new Path("/foo");
      final int size = (1 << 16) + RAN.nextInt(1 << 16);
      LOG.info("size = " + size);
      final FSDataOutputStream out = fs.create(p, REPLICATION);
      final byte[] bytes = new byte[1024];
      for(int remaining = size; remaining > 0; ) {
        RAN.nextBytes(bytes);
        final int len = bytes.length < remaining? bytes.length: remaining;
        out.write(bytes, 0, len);
View Full Code Here

          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // create streams and hsync to force datastreamers to start
      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
      for (int i=0; i < fileCount; i++) {
        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
            .getWrappedStream();
        streams[i].write("1".getBytes());
        streams[i].hsync();
        // the load for writers is 2 because both the write xceiver & packet
        // responder threads are counted in the load
View Full Code Here

        Path dir = new Path("/dir" + i);
        hdfs.mkdirs(dir);
        writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
        for (int j = 0; j < FILES_PER_DIR; j++) {
          Path file = new Path(dir, "file" + j);
          FSDataOutputStream o = hdfs.create(file);
          o.write(23);
          o.close();

          writtenFiles.put(file.toString(),
              pathToFileEntry(hdfs, file.toString()));
View Full Code Here

          aclEntry(DEFAULT, GROUP, READ_EXECUTE),
          aclEntry(DEFAULT, OTHER, NONE)));
      writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));

      Path file = new Path("/noAcl");
      FSDataOutputStream o = hdfs.create(file);
      o.write(23);
      o.close();
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      file = new Path("/withAcl");
View Full Code Here

      o.write(23);
      o.close();
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      file = new Path("/withAcl");
      o = hdfs.create(file);
      o.write(23);
      o.close();
      hdfs.setAcl(file, Lists.newArrayList(
          aclEntry(ACCESS, USER, READ_WRITE),
          aclEntry(ACCESS, USER, "foo", READ),
View Full Code Here

          aclEntry(ACCESS, GROUP, READ),
          aclEntry(ACCESS, OTHER, NONE)));
      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

      file = new Path("/withSeveralAcls");
      o = hdfs.create(file);
      o.write(23);
      o.close();
      hdfs.setAcl(file, Lists.newArrayList(
          aclEntry(ACCESS, USER, READ_WRITE),
          aclEntry(ACCESS, USER, "foo", READ_WRITE),
View Full Code Here

  @Test
  public void testAbandon() throws Exception {
    NameNode nn = cluster.getNameNode();
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    String fileName = "/testAbandon";
    fs.create(new Path(fileName));
    LocatedBlock lbk = nn.addBlock(fileName, fs.getClient().getClientName());
    INodeFileUnderConstruction cons = (INodeFileUnderConstruction) nn.namesystem.dir
        .getInode(fileName);
    cons.setTargets(null);
    nn.abandonBlock(lbk.getBlock(), fileName, fs.getClient().getClientName());
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.