Examples of ClientProtocol


Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
        " using class " + lrb.getClass());
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      nf.setAccessible(true);
      Field modifiersField = Field.class.getDeclaredField("modifiers");
      modifiersField.setAccessible(true);
      modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);

      ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
      if (namenode == null) {
        LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" +
            " reordering interceptor. Continuing, but this is unexpected."
        );
        return false;
      }

      ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
      nf.set(dfsc, cp1);
      LOG.info("Added intercepting call to namenode#getBlockLocations");
    } catch (NoSuchFieldException e) {
      LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
      return false;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
      final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
      final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);

      try {
        ClientProtocol nnproxy = getUGI(request, conf).doAs
        (new PrivilegedExceptionAction<ClientProtocol>() {
          @Override
          public ClientProtocol run() throws IOException {
            return DFSClient.createNamenode(conf);
          }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    try {
      ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws IOException {
          ClientProtocol nn = createNameNodeProxy();
          final String path = ServletUtil.getDecodedPath(request, "/data");
          final String encodedPath = ServletUtil.getRawPath(request, "/data");
          String delegationToken = request
              .getParameter(JspHelper.DELEGATION_PARAMETER_NAME);

          HdfsFileStatus info = nn.getFileInfo(path);
          if (info != null && !info.isDir()) {
            response.sendRedirect(createRedirectURL(path, encodedPath,
                info, ugi, nn, request, delegationToken).toString());
          } else if (info == null) {
            response.sendError(400, "File not found " + path);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    @SuppressWarnings("unchecked")
    @Override
    public long renew(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
        (Token<DelegationTokenIdentifier>) token;
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        return nn.renewDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
                                       AccessControlException.class);
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

    public void cancel(Token<?> token, Configuration conf) throws IOException {
      Token<DelegationTokenIdentifier> delToken =
          (Token<DelegationTokenIdentifier>) token;
      LOG.info("Cancelling " +
               DelegationTokenIdentifier.stringifyToken(delToken));
      ClientProtocol nn = getNNProxy(delToken, conf);
      try {
        nn.cancelDelegationToken(delToken);
      } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class,
            AccessControlException.class);
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

        failoverProxyProvider, RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            Integer.MAX_VALUE,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { ClientProtocol.class }, dummyHandler);
   
    DFSClient client = new DFSClient(null, proxy, conf, null);
    return client;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      final Map<String, String> root = buildRoot(request, doc);
      final String path = root.get("path");
      final boolean recur = "yes".equals(root.get("recursive"));
      final Pattern filter = Pattern.compile(root.get("filter"));
      final Pattern exclude = Pattern.compile(root.get("exclude"));
      ClientProtocol nnproxy = createNameNodeProxy(ugi);

      doc.declaration();
      doc.startTag("listing");
      for (Map.Entry<String,String> m : root.entrySet()) {
        doc.attribute(m.getKey(), m.getValue());
      }

      FileStatus base = nnproxy.getFileInfo(path);
      if ((base != null) && base.isDir()) {
        writeInfo(base, doc);
      }

      Stack<String> pathstack = new Stack<String>();
      pathstack.push(path);
      while (!pathstack.empty()) {
        String p = pathstack.pop();
        try {
          for (FileStatus i : nnproxy.getListing(p)) {
            if (exclude.matcher(i.getPath().getName()).matches()
                || !filter.matcher(i.getPath().getName()).matches()) {
              continue;
            }
            if (recur && i.isDir()) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

   * }
   */
  public void doGet(HttpServletRequest request, HttpServletResponse response)
    throws IOException {
    final UnixUserGroupInformation ugi = getUGI(request);
    final ClientProtocol nnproxy = createNameNodeProxy(ugi);

    try {
      final String path = request.getPathInfo() != null
        ? request.getPathInfo() : "/";
      FileStatus info = nnproxy.getFileInfo(path);
      if ((info != null) && !info.isDir()) {
        response.sendRedirect(createUri(info, ugi, nnproxy,
              request).toURL().toString());
      } else if (info == null){
        response.sendError(400, "cat: File not found " + path);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.ClientProtocol

      final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
      final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
      final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
      UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
      final ClientProtocol nnproxy = DFSClient.createNamenode(conf);

      try {
        final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
            filename, nnproxy, socketFactory, socketTimeout);
        MD5MD5CRC32FileChecksum.write(xml, checksum);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.