Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream.readFully()


  }
 
  void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
    byte [] arr = new byte[fileLen];
    FSDataInputStream in = fs.open(path);
    in.readFully(arr);
  }
 
  public void testDataTransferProtocol() throws IOException {
    Random random = new Random();
    int oneMil = 1024*1024;
 
View Full Code Here


    out.sync();
   
    FSDataInputStream in = fileSystem.open(file1);
   
    byte[] buf = new byte[4096];
    in.readFully(0, buf);
    in.close();

    waitForBlocks(fileSystem, file1, 1, writeSize);
   
    int blockMapSize = cluster.getDataNodes().get(0).blockScanner.blockMap.size();
View Full Code Here

      throw new IOException("Split metadata size exceeded " +
          maxMetaInfoSize +". Aborting job " + jobId);
    }
    FSDataInputStream in = fs.open(metaSplitFile);
    byte[] header = new byte[JobSplit.META_SPLIT_FILE_HEADER.length];
    in.readFully(header);
    if (!Arrays.equals(JobSplit.META_SPLIT_FILE_HEADER, header)) {
      throw new IOException("Invalid header on split file");
    }
    int vers = WritableUtils.readVInt(in);
    if (vers != JobSplit.META_SPLIT_VERSION) {
View Full Code Here

                 dfs.getFileStatus(filepath).getLen() == size);

      // verify that there is enough data to read.
      System.out.println("File size is good. Now validating sizes from datanodes...");
      FSDataInputStream stmin = dfs.open(filepath);
      stmin.readFully(0, actual, 0, size);
      stmin.close();
    }
    finally {
      try {
        if (cluster != null) {cluster.shutdown();}
View Full Code Here

    assertTrue("Exists", fs.exists(path));
    assertEquals("Length", len, fs.getFileStatus(path).getLen());

    FSDataInputStream in = fs.open(path);
    byte[] buf = new byte[len];
    in.readFully(0, buf);
    in.close();

    assertEquals(len, buf.length);
    for (int i = 0; i < buf.length; i++) {
      assertEquals("Position " + i, data[i], buf[i]);
View Full Code Here

      FSDataInputStream in = fs.open(fPath);
      byte[] toRead = new byte[files[idx].getSize()];
      byte[] toCompare = new byte[files[idx].getSize()];
      Random rb = new Random(files[idx].getSeed());
      rb.nextBytes(toCompare);
      in.readFully(0, toRead);
      in.close();
      for (int i = 0; i < toRead.length; i++) {
        if (toRead[i] != toCompare[i]) {
          return false;
        }
View Full Code Here

  }

  private void checkFile(FileSystem fs, Path name, int len) throws IOException {
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[len];
    stm.readFully(0, actual);
    checkData(actual, 0, fileContents, "Read 2");
    stm.close();
  }

  private void checkFullFile(FileSystem fs, Path name) throws IOException {
View Full Code Here

      FileStatus stat = raidfs.getFileStatus(file);
      byte[] filebytes = new byte[(int)stat.getLen()];
      FSDataInputStream stm = raidfs.open(file);
      // Test that readFully returns.
      stm.readFully(filebytes, 0, (int)stat.getLen());

      stm = raidfs.open(file);
      // Test that readFully returns.
      stm.readFully(filebytes);
    } finally {
View Full Code Here

      // Test that readFully returns.
      stm.readFully(filebytes, 0, (int)stat.getLen());

      stm = raidfs.open(file);
      // Test that readFully returns.
      stm.readFully(filebytes);
    } finally {
      myTearDown();
    }
  }
View Full Code Here

    for (int idx: idxs) {
      long offset = idx * blockSize;
      LOG.info("Reporting corrupt block " + file + ":" + offset);
      in.seek(offset);
      try {
        in.readFully(new byte[(int)blockSize]);
        fail("Expected exception not thrown for " + file + ":" + offset);
      } catch (org.apache.hadoop.fs.ChecksumException e) {
      } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
      }
    }
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.