Examples of DFSOutputStream


Examples of org.apache.hadoop.hdfs.DFSOutputStream

    DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;

    String src = "/testNameNodeFingerprintSent1.txt";
    // Path f = new Path(src);

    DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
        (short) 1, 512L);

    FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

    for (int i = 0; i < 512; i++) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSOutputStream

  /** This optional operation is not yet supported. */
  public FSDataOutputStream append(Path f, int bufferSize,
      Progressable progress) throws IOException {

    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
    return new FSDataOutputStream(op, statistics, op.getInitialLen());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSOutputStream

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   
    try {
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols namenode = cluster.getNameNodeRpc();
      DFSOutputStream out = null;
      try {
        // Create a file and make sure a block is allocated for it.
        out = (DFSOutputStream)(fs.create(file).
            getWrappedStream());
        out.write(1);
        out.hflush();
       
        // Create a snapshot that includes the file.
        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
            new Path("/"), "s1");
       
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSOutputStream

      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      // Open a file and get the head of the pipeline
      Path testFile = new Path("/testRoundTripAckMetric.txt");
      FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
      DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
      // Slow down the writes to catch the write pipeline
      dout.setChunksPerPacket(5);
      dout.setArtificialSlowdown(3000);
      fsout.write(new byte[10000]);
      DatanodeInfo[] pipeline = null;
      int count = 0;
      while (pipeline == null && count < 5) {
        pipeline = dout.getPipeline();
        System.out.println("Waiting for pipeline to be created.");
        Thread.sleep(1000);
        count++;
      }
      // Get the head node that should be receiving downstream acks
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSOutputStream

      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      // Open a file and get the head of the pipeline
      Path testFile = new Path("/testRoundTripAckMetric.txt");
      FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
      DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
      // Slow down the writes to catch the write pipeline
      dout.setChunksPerPacket(5);
      dout.setArtificialSlowdown(3000);
      fsout.write(new byte[10000]);
      DatanodeInfo[] pipeline = null;
      int count = 0;
      while (pipeline == null && count < 5) {
        pipeline = dout.getPipeline();
        System.out.println("Waiting for pipeline to be created.");
        Thread.sleep(1000);
        count++;
      }
      // Get the head node that should be receiving downstream acks
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSOutputStream

          }
        }
      }
    });

    DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
        (short) 1, 512L);

    FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

    // Writing two blocks.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.