Examples of hflush()


Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      final byte[] bytes = new byte[1024];
      for(int remaining = size; remaining > 0; ) {
        RAN.nextBytes(bytes);
        final int len = bytes.length < remaining? bytes.length: remaining;
        out.write(bytes, 0, len);
        out.hflush();
        remaining -= len;
      }

      //get the RBW
      final ReplicaBeingWritten oldrbw;
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
          .format(true).build();
      FileSystem fs = cluster.getFileSystem();
      fos = fs.create(new Path("tmpfile"));
      fos.write(new byte[] { 0, 1, 2, 3 });
      fos.hflush();
      assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease());

      secondary = startSecondaryNameNode(conf);
      assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    NameNode nn = cluster.getNameNode();

    FSDataOutputStream out = fs.create(filePath, REPL_FACTOR);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      // Set up a spy so that we can delay the block report coming
      // from this node.
      DataNode dn = cluster.getDataNodes().get(0);
      DatanodeProtocolClientSideTranslatorPB spy =
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    // Start writing a file but do not close it
    FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
    for (int i = 0; i < 1024; i++) {
      fout.write(123);
    }
    fout.hflush();
    long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

    // Now abandon the last block
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
    LocatedBlocks blocks =
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      // create a new file.
      final String f = DIR + "testFsCloseAfterClusterShutdown";
      final Path fpath = new Path(f);
      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
      out.write("something_test".getBytes());
      out.hflush();    // ensure that block is allocated

      // shutdown last datanode in pipeline.
      cluster.stopDataNode(2);

      // close file. Since we have set the minReplcatio to 3 but have killed one
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

          out.close();

          // append flushedBytes bytes to the file
          out = fs.append(p);
          out.write(contents, oldFileLen, flushedBytes1);
          out.hflush();

          // write another flushedBytes2 bytes to the file
          out.write(contents, oldFileLen + flushedBytes1, flushedBytes2);
          out.close();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      stm.close();
      // open the file again for append
      stm = fs.append(fileToAppend);
      int mid = rawData.length - 1;
      stm.write(rawData, 1, mid - 1);
      stm.hflush();

      /*
       * wait till token used in stm expires
       */
      Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes,
          BLOCK_SIZE);
      // write a partial block
      int mid = rawData.length - 1;
      stm.write(rawData, 0, mid);
      stm.hflush();

      /*
       * wait till token used in stm expires
       */
      Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    try {
      for (int i = 0; i < 5; i++) {
        FSDataOutputStream stm = fs.create(new Path("/test-uc-" + i));
        stms.add(stm);
        stm.write(1);
        stm.hflush();
      }
      // Roll edit log so that, when the SBN restarts, it will load
      // the namespace during startup and enter safemode.
      nn0.getRpcServer().rollEditLog();
    } finally {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      Path filePath = new Path(pathString);
      FSDataOutputStream create = dfs.create(filePath,
          FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
          null);
      create.write(testData.getBytes());
      create.hflush();
      long fileId = ((DFSOutputStream)create.
          getWrappedStream()).getFileId();
      FileStatus fileStatus = dfs.getFileStatus(filePath);
      DFSClient client = DFSClientAdapter.getClient(dfs);
      // add one dummy block at NN, but not write to DataNode
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.