Examples of ClientProtocol


Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    try {
      ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws IOException {
          ClientProtocol nn = createNameNodeProxy();
          final String path = ServletUtil.getDecodedPath(request, "/data");
          final String encodedPath = ServletUtil.getRawPath(request, "/data");
          String delegationToken = request
              .getParameter(JspHelper.DELEGATION_PARAMETER_NAME);

          HdfsFileStatus info = nn.getFileInfo(path);
          if (info != null && !info.isDir()) {
            response.sendRedirect(createRedirectURL(path, encodedPath,
                info, ugi, nn, request, delegationToken).toString());
          } else if (info == null) {
            response.sendError(400, "File not found " + path);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

        failoverProxyProvider, RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            Integer.MAX_VALUE,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { ClientProtocol.class }, dummyHandler);
   
    DFSClient client = new DFSClient(null, proxy, conf, null);
    return client;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(),
        ClientProtocol.class).getProxy();
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
                                                         Long.MAX_VALUE);
    // verify that rack locations match
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      String nsId = dfsUri.getHost();
      List<ProxyAndInfo<ClientProtocol>> proxies =
          HAUtil.getProxiesForAllNameNodesInNameservice(
          dfsConf, nsId, ClientProtocol.class);
      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
        ClientProtocol haNn = proxy.getProxy();
        boolean inSafeMode = haNn.setSafeMode(action, false);
        if (waitExitSafe) {
          inSafeMode = waitExitSafeMode(haNn, inSafeMode);
        }
        System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")
            + " in " + proxy.getAddress());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    @SuppressWarnings("unchecked")
    @Override
    public long renew(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
        (Token<DelegationTokenIdentifier>) token;
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        return nn.renewDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
                                       AccessControlException.class);
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

 
  /* check if there are at least two nodes are on the same rack */
  private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(),
        ClientProtocol.class).getProxy();
     
    waitForBlockReplication(name.toString(), namenode,
                            Math.min(numDatanodes, repl), -1);
   
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
                                                         Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
                                                         Long.MAX_VALUE);
    // verify that rack locations match
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    try {
      ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws IOException {
              ClientProtocol nn = createNameNodeProxy();
              final String path =
                request.getPathInfo() != null ? request.getPathInfo() : "/";
             
              String delegationToken =
                request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
             
              HdfsFileStatus info = nn.getFileInfo(path);
              if ((info != null) && !info.isDir()) {
                try {
                  response.sendRedirect(createUri(path, info, ugi, nn,
                        request, delegationToken).toURL().toString());
                } catch (URISyntaxException e) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.