Examples of DFSInputStream


Examples of org.apache.hadoop.hdfs.DFSInputStream

    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

      setUpForDoGetTest(cluster, testFile);

      Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
          .getOutputStream();
      DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);

      Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());

      Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

  @SuppressWarnings("unchecked")
  public void testReadFromOneDN() throws IOException {
    LOG.info("Starting testReadFromOneDN()");
    DFSClient client = new DFSClient(
        new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    DFSInputStream in = spy(client.open(testFile.toString()));
    LOG.info("opened " + testFile.toString());

    byte[] dataBuf = new byte[BLOCK_SIZE];

    MockGetBlockReader answer = new MockGetBlockReader();
    Mockito.doAnswer(answer).when(in).getBlockReader(
                           (InetSocketAddress) Matchers.anyObject(),
                           (DatanodeInfo) Matchers.anyObject(),
                           Matchers.anyString(),
                           (ExtendedBlock) Matchers.anyObject(),
                           (Token<BlockTokenIdentifier>) Matchers.anyObject(),
                           Matchers.anyLong(),
                           Matchers.anyLong(),
                           Matchers.anyInt(),
                           Matchers.anyBoolean(),
                           Matchers.anyString());

    // Initial read
    pread(in, 0, dataBuf, 0, dataBuf.length);
    // Read again and verify that the socket is the same
    pread(in, FILE_SIZE - dataBuf.length, dataBuf, 0, dataBuf.length);
    pread(in, 1024, dataBuf, 0, dataBuf.length);
    pread(in, -1, dataBuf, 0, dataBuf.length);            // No seek; just read
    pread(in, 64, dataBuf, 0, dataBuf.length / 2);

    in.close();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.