Examples of FSDataOutputStream


Examples of eu.stratosphere.core.fs.FSDataOutputStream

    }
    public Path call()  {
      Path tmp = getTempDir(jobID, name);
      try {
        if (!lfs.exists(tmp)) {
          FSDataOutputStream lfsOutput = lfs.create(tmp, false);
          Path distributedPath = new Path(filePath);
          FileSystem fs = distributedPath.getFileSystem();
          FSDataInputStream fsInput = fs.open(distributedPath);
          IOUtils.copyBytes(fsInput, lfsOutput);
        }
View Full Code Here

Examples of org.apache.flink.core.fs.FSDataOutputStream

    // Create test bucket
    fs.mkdirs(bucketPath);

    // Write test file to S3
    final FSDataOutputStream outputStream = fs.create(objectPath, false);
    generateTestData(outputStream, fileSize);
    outputStream.close();

    // Now read the same file back from S3
    final FSDataInputStream inputStream = fs.open(objectPath);
    testReceivedData(inputStream, fileSize);
    inputStream.close();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

  }

  private static void writeHTD(final FileSystem fs, final Path p,
      final HTableDescriptor htd)
  throws IOException {
    FSDataOutputStream out = fs.create(p, false);
    try {
      htd.write(out);
      out.write('\n');
      out.write('\n');
      out.write(Bytes.toBytes(htd.toString()));
    } finally {
      out.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

      }
      HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
      // Can we get at the dfsclient outputstream?  If an instance of
      // SFLW, it'll have done the necessary reflection to get at the
      // protected field name.
      FSDataOutputStream nextHdfsOut = null;
      if (nextWriter instanceof SequenceFileLogWriter) {
        nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
      }

      synchronized (updateLock) {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

        context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());

        // Ensure that the output folder is there and copy the file
        outputFs.mkdirs(outputPath.getParent());
        FSDataOutputStream out = outputFs.create(outputPath, true);
        try {
          if (!copyData(context, inputPath, in, outputPath, out, inputStat.getLen()))
            return false;
        } finally {
          out.close();
        }

        // Preserve attributes
        return preserveAttributes(outputPath, inputStat);
      } finally {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

      throws IOException {
    FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
      HConstants.DATA_FILE_UMASK_KEY);
    Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
    try {
      FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
      try {
        snapshot.writeTo(out);
      } finally {
        out.close();
      }
    } catch (IOException e) {
      // if we get an exception, try to remove the snapshot info
      if (!fs.delete(snapshotInfo, false)) {
        String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
            (dstPath)) {
      FSDataOutputStream outStream = hdfs.append(dstPath);
      writer = SequenceFile.createWriter(conf, outStream, fmt.getKeyClass(),
          fmt.getValueClass(), compType, codeC);
    } else {
      writer = SequenceFile.createWriter(hdfs, conf, dstPath,
          fmt.getKeyClass(), fmt.getValueClass(), compType, codeC);
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

  public static void setVersion(FileSystem fs, Path rootdir, String version,
      int wait, int retries) throws IOException {
    Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
    while (true) {
      try {
        FSDataOutputStream s = fs.create(versionFile);
        s.writeUTF(version);
        LOG.debug("Created version file at " + rootdir.toString() +
            " set its version at:" + version);
        s.close();
        return;
      } catch (IOException e) {
        if (retries > 0) {
          LOG.warn("Unable to create version file at " + rootdir.toString() +
              ", retrying: " + e.getMessage());
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

  public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
      int wait) throws IOException {
    while (true) {
      try {
        Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
        FSDataOutputStream s = fs.create(filePath);
        s.writeUTF(clusterId);
        s.close();
        if (LOG.isDebugEnabled()) {
          LOG.debug("Created cluster ID file at " + filePath.toString() +
              " with ID: " + clusterId);
        }
        return;
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream

    // Hence delete and create the file if exists.
    if (FSUtils.isExists(fs, tmpPath)) {
      FSUtils.delete(fs, tmpPath, true);
    }

    FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);

    try {
      regionInfo.write(out);
      out.write('\n');
      out.write('\n');
      out.write(Bytes.toBytes(regionInfo.toString()));
    } finally {
      out.close();
    }
    if (!fs.rename(tmpPath, regioninfoPath)) {
      throw new IOException("Unable to rename " + tmpPath + " to " +
        regioninfoPath);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.