Examples of DFSInputStream


Examples of org.apache.hadoop.hdfs.DFSInputStream

    } catch (InterruptedException e) {
      response.sendError(400, e.getMessage());
      return;
    }
   
    DFSInputStream in = null;
    OutputStream out = null;

    try {
      in = dfs.open(filename);
      out = response.getOutputStream();
      final long fileLen = in.getFileLength();
      if (reqRanges != null) {
        List<InclusiveByteRange> ranges =
          InclusiveByteRange.satisfiableRanges(reqRanges, fileLen);
        StreamFile.sendPartialData(in, out, response, fileLen, ranges);
      } else {
        // No ranges, so send entire file
        response.setHeader("Content-Disposition", "attachment; filename=\"" +
                           rawFilename + "\"");
        response.setContentType("application/octet-stream");
        response.setHeader(CONTENT_LENGTH, "" + fileLen);
        StreamFile.copyFromOffset(in, out, 0L, fileLen);
      }
      in.close();
      in = null;
      out.close();
      out = null;
      dfs.close();
      dfs = null;
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

      setUpForDoGetTest(cluster, testFile);

      Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
          .getOutputStream();
      DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);

      Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());

      Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
        in.close();
      }
      return content;
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
        for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
          if (blocksToCorrupt.contains(blockIdx)) {
            if (in != null) {
              in.close();
              in = null;
            }
            continue;
          }
          if (in == null) {
            in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
            chainIdx++;
          }
          int len = blockBuffer.length;
          if (blockIdx == (numBlocks - 1)) {
            // The last block might not be full-length
            len = (int)(in.getFileLength() % blockSize);
            if (len == 0) len = blockBuffer.length;
          }
          IOUtils.readFully(in, blockBuffer, 0, (int)len);
          int startIdx = blockIdx * blockSize;
          for (int i = 0; i < len; i++) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

      setUpForDoGetTest(cluster, testFile);

      Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
          .getOutputStream();
      DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);

      Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());

      Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    // Client's name-node proxy should keep the same if the same namenode
    // sends the same fingerprint
    //
    ClientProtocol namenode1 = client.namenode;
    cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
    DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
    int val = dis.read();
    TestCase.assertEquals(66, val);
    dis.close();
    TestCase.assertSame(namenode1, client.namenode);

    // Namenode's fingerprint will be different to client. Client is suppsoed
    // to get a new proxy.
    //
    cluster.getNameNode().setClientProtocolMethodsFingerprint(888);
    dis = client.open("/testClientUpdateMethodList1.txt");
    val = dis.read();
    TestCase.assertEquals(88, val);
    dis.close();
    // Since we didn't change method list of name-node, the fingerprint
    // got from the new proxy should be the same as the previous one.
    TestCase.assertNotSame(namenode1, client.namenode);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

    // Current system has foo deleted and bar with length 2
    // test snapshot has foo with length 1 and bar with length 1

    // Checking current file system
    assertTrue(!dfs.exists(foo));
    DFSInputStream in = client.open("/bar");
    assertTrue(in.getFileLength() == 2);
    assertTrue(in.read() == 1);
    assertTrue(in.read() == 2);
    assertTrue(in.read() == -1); //eof

    // Checking test snapshot
    in = ssClient.open("test", "/foo");
    assertTrue(in.getFileLength() == 1);
    assertTrue(in.read() == 0);
    assertTrue(in.read() == -1); //eof
    in = ssClient.open("test", "/bar");
    assertTrue(in.getFileLength() == 1);
    assertTrue(in.read() == 1);
    assertTrue(in.read() == -1); //eof
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

               path);

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
              return;
            }
          }
        }

        stm.close();
        client.close(); // close dfs client
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

      for (FileStatus child: children) {
        if (!child.isDir()) { // get block ids for file
          Path path = child.getPath(); // paths will be unique
          fileMap.put(path, new ArrayList<Long>());

          DFSInputStream stm = client.open(child.getPath().toUri().getPath());
          LocatedBlocks blocks = stm.fetchLocatedBlocks();
          stm.close();

          for (int i = 0; i < blocks.locatedBlockCount(); i++) {
            Long blockId = blocks.get(i).getBlock().getBlockId();
            fileMap.get(path).add(blockId); // add to file block list
            blockRefMap.put(blockId, null); // mark as unrefereced
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSInputStream

      stm.close();

      in = fs.open(file1);
      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.