Examples of DFSClient


Examples of org.apache.hadoop.hdfs.DFSClient

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = newDfsClient(nnId, conf);
      HdfsDataInputStream in = null;
      try {
        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
     
      final long n = length.getValue() != null ?
        Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
        in.getVisibleLength() - offset.getValue();

      // jetty 6 reserves 12 bytes in the out buffer for chunked responses
      // (file length > 2GB) which causes extremely poor performance when
      // 12 bytes of the output spill into another buffer which results
      // in a big and little write
      int outBufferSize = response.getBufferSize();
      if (n > Integer.MAX_VALUE) {
        outBufferSize -= 12;
      }
      /**
       * Allow the Web UI to perform an AJAX request to get the data.
       */
      return Response.ok(new OpenEntity(in, n, outBufferSize, dfsclient))
          .type(MediaType.APPLICATION_OCTET_STREAM)
          .header("Access-Control-Allow-Methods", "GET")
          .header("Access-Control-Allow-Origin", "*")
          .build();
    }
    case GETFILECHECKSUM:
    {
      MD5MD5CRC32FileChecksum checksum = null;
      DFSClient dfsclient = newDfsClient(nnId, conf);
      try {
        checksum = dfsclient.getFileChecksum(fullpath, Long.MAX_VALUE);
        dfsclient.close();
        dfsclient = null;
      } finally {
        IOUtils.cleanup(LOG, dfsclient);
      }
      final String js = JsonUtil.toJsonString(checksum);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

  }

  private static DFSClient newDfsClient(String nnId,
                                        Configuration conf) throws IOException {
    URI uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + nnId);
    return new DFSClient(uri, conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

      assertTrue(checkFile2(in3));

      /*
       * testing READ interface on DN using a BlockReader
       */
      DFSClient client = null;
      try {
        client = new DFSClient(new InetSocketAddress("localhost",
          cluster.getNameNodePort()), conf);
      } finally {
        if (client != null) client.close();
      }
      List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(
          FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
      LocatedBlock lblock = locatedBlocks.get(0); // first block
      Token<BlockTokenIdentifier> myToken = lblock.getBlockToken();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { ClientProtocol.class }, dummyHandler);
   
    DFSClient client = new DFSClient(null, proxy, conf, null);
    return client;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    }
  }
 
  @Test (timeout=60000)
  public void testCreateSnapshot() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new CreateSnapshotOp(client, "/test", "s1");
    testClientRetryWithFailover(op);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testDeleteSnapshot() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new DeleteSnapshotOp(client, "/test", "s1");
    testClientRetryWithFailover(op);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testRenameSnapshot() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new RenameSnapshotOp(client, "/test", "s1", "s2");
    testClientRetryWithFailover(op);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testCreate() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new CreateOp(client, "/testfile");
    testClientRetryWithFailover(op);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testAppend() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new AppendOp(client, "/testfile");
    testClientRetryWithFailover(op);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testRename() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new RenameOp(client, "/file1", "/file2");
    testClientRetryWithFailover(op);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.