Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.HDFSPolicyProvider


      .build();

    // set service-level authorization security policy
    if (confCopy.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
          server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
    }
  }
View Full Code Here


      .build();

    // set service-level authorization security policy
    if (confCopy.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
          server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
    }
  }
View Full Code Here

    // set service-level authorization security policy
    if (serviceAuthEnabled =
          conf.getBoolean(
            CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
      this.server.refreshServiceAcl(conf, new HDFSPolicyProvider());
      if (this.serviceRpcServer != null) {
        this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
      }
    }

    // The rpc-server port can be ephemeral... ensure we have the correct info
    this.rpcAddress = this.server.getListenerAddress();
View Full Code Here

  public void refreshServiceAcl() throws IOException {
    if (!serviceAuthEnabled) {
      throw new AuthorizationException("Service Level Authorization not enabled!");
    }

    this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
    if (this.serviceRpcServer != null) {
      this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
    }
  }
View Full Code Here

    // set service-level authorization security policy
    if (serviceAuthEnabled =
          conf.getBoolean(
            CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
      this.clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
      if (this.serviceRpcServer != null) {
        this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
      }
    }

    // The rpc-server port can be ephemeral... ensure we have the correct info
    this.clientRpcAddress = this.clientRpcServer.getListenerAddress();
View Full Code Here

  public void refreshServiceAcl() throws IOException {
    if (!serviceAuthEnabled) {
      throw new AuthorizationException("Service Level Authorization not enabled!");
    }

    this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
    if (this.serviceRpcServer != null) {
      this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
    }
  }
View Full Code Here

            null /*secretManager*/);

    // set service-level authorization security policy
    if (confCopy.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
          server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
    }
  }
View Full Code Here

                                          DFS_DATANODE_HANDLER_COUNT_DEFAULT),
                              false, conf, blockPoolTokenSecretManager);
    // set service-level authorization security policy
    if (conf.getBoolean(
        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
      ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
    }
  }
View Full Code Here

      dfs = new MiniDFSCluster(conf, slaves, true, null);

      // Ensure that the protocols authorized on the name node are only the HDFS protocols.
      Set<Class<?>> protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode())
          .getServiceAuthorizationManager().getProtocolsWithAcls();
      Service[] hdfsServices = new HDFSPolicyProvider().getServices();
      for (Service service : hdfsServices) {
        if (!protocolsWithAcls.contains(service.getProtocol()))
          fail("service authorization manager has no entry for protocol " + service.getProtocol());
      }
      if (hdfsServices.length != protocolsWithAcls.size())
        fail("there should be an entry for every HDFS service in the protocols with ACLs map");

      fileSys = dfs.getFileSystem();
      JobConf mrConf = new JobConf(conf);
      mr = new MiniMRCluster(slaves, fileSys.getUri().toString(), 1,
                             null, null, mrConf);

      // Ensure that the protocols configured for the name node did not change
      // when the MR cluster was started.
      protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode())
          .getServiceAuthorizationManager().getProtocolsWithAcls();
      hdfsServices = new HDFSPolicyProvider().getServices();
      for (Service service : hdfsServices) {
        if (!protocolsWithAcls.contains(service.getProtocol()))
          fail("service authorization manager has no entry for protocol " + service.getProtocol());
      }
      if (hdfsServices.length != protocolsWithAcls.size())
View Full Code Here

  private static final String DUMMY_ACL = "nouser nogroup";
  private static final String UNKNOWN_USER = "dev,null";
 
  private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
    FileWriter fos = new FileWriter(policyFile);
    PolicyProvider policyProvider = new HDFSPolicyProvider();
    fos.write("<configuration>\n");
    for (Service service : policyProvider.getServices()) {
      String key = service.getServiceKey();
      String value ="*";
      if (key.equals("security.refresh.policy.protocol.acl")) {
        value = DUMMY_ACL;
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.HDFSPolicyProvider

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.