Package org.apache.hadoop.hive.shims

Examples of org.apache.hadoop.hive.shims.HadoopShims$MiniMrShim


      // }

      //Set HADOOP_USER_NAME env variable for child process, so that
      // it also runs with hadoop permissions for the user the job is running as
      // This will be used by hadoop only in unsecure(/non kerberos) mode
      HadoopShims shim = ShimLoader.getHadoopShims();
      String endUserName = shim.getShortUserName(shim.getUGIForConf(job));
      LOG.debug("setting HADOOP_USER_NAME\t" + endUserName);
      variables.put("HADOOP_USER_NAME", endUserName);

      if (variables.containsKey(HADOOP_OPTS_KEY)) {
        variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts);
View Full Code Here


  }

  private void open() throws MetaException {
    isConnected = false;
    TTransportException tte = null;
    HadoopShims shim = ShimLoader.getHadoopShims();
    boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL);
    boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
    int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);

    for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
      for (URI store : metastoreUris) {
        LOG.info("Trying to connect to metastore with URI " + store);
        try {
          transport = new TSocket(store.getHost(), store.getPort(), 1000 * clientSocketTimeout);
          if (useSasl) {
            // Wrap thrift connection with SASL for secure connection.
            try {
              HadoopThriftAuthBridge.Client authBridge =
                ShimLoader.getHadoopThriftAuthBridge().createClient();

              // check if we should use delegation tokens to authenticate
              // the call below gets hold of the tokens if they are set up by hadoop
              // this should happen on the map/reduce tasks if the client added the
              // tokens into hadoop's credential store in the front end during job
              // submission.
              String tokenSig = conf.get("hive.metastore.token.signature");
              // tokenSig could be null
              tokenStrForm = shim.getTokenStrForm(tokenSig);
              if(tokenStrForm != null) {
                // authenticate using delegation tokens via the "DIGEST" mechanism
                transport = authBridge.createClientTransport(null, store.getHost(),
                    "DIGEST", tokenStrForm, transport,
                        MetaStoreUtils.getMetaStoreSaslProperties(conf));
              } else {
                String principalConfig =
                    conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL);
                transport = authBridge.createClientTransport(
                    principalConfig, store.getHost(), "KERBEROS", null,
                    transport, MetaStoreUtils.getMetaStoreSaslProperties(conf));
              }
            } catch (IOException ioe) {
              LOG.error("Couldn't create client transport", ioe);
              throw new MetaException(ioe.toString());
            }
          } else if (useFramedTransport) {
            transport = new TFramedTransport(transport);
          }

          client = new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
          try {
            transport.open();
            isConnected = true;
          } catch (TTransportException e) {
            tte = e;
            if (LOG.isDebugEnabled()) {
              LOG.warn("Failed to connect to the MetaStore Server...", e);
            } else {
              // Don't print full exception trace if DEBUG is not on.
              LOG.warn("Failed to connect to the MetaStore Server...");
            }
          }

          if (isConnected && !useSasl && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)){
            // Call set_ugi, only in unsecure mode.
            try {
              UserGroupInformation ugi = shim.getUGIForConf(conf);
              client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
            } catch (LoginException e) {
              LOG.warn("Failed to do login. set_ugi() is not successful, " +
                       "Continuing without it.", e);
            } catch (IOException e) {
View Full Code Here

    // Create an archived version of the partition in a directory ending in
    // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition,
    // if it does not already exist. If it does exist, we assume the dir is good
    // to use as the move operation that created it is atomic.
    HadoopShims shim = ShimLoader.getHadoopShims();
    if (!pathExists(intermediateArchivedDir) &&
        !pathExists(intermediateOriginalDir)) {

      // First create the archive in a tmp dir so that if the job fails, the
      // bad files don't pollute the filesystem
      Path tmpPath = new Path(driverContext.getCtx()
                    .getExternalTmpFileURI(originalDir.toUri()), "partlevel");

      console.printInfo("Creating " + archiveName +
                        " for " + originalDir.toString());
      console.printInfo("in " + tmpPath);
      console.printInfo("Please wait... (this may take a while)");

      // Create the Hadoop archive
      int ret=0;
      try {
        int maxJobNameLen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        String jobname = String.format("Archiving %s@%s",
          tbl.getTableName(), partSpecInfo.getName());
        jobname = Utilities.abbreviate(jobname, maxJobNameLen - 6);
        conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname);
        ret = shim.createHadoopArchive(conf, originalDir, tmpPath, archiveName);
      } catch (Exception e) {
        throw new HiveException(e);
      }
      if (ret != 0) {
        throw new HiveException("Error while creating HAR");
View Full Code Here

    // assume the archive is in the original dir, check if it exists
    Path archivePath = new Path(originalDir, archiveName);
    URI archiveUri = archivePath.toUri();
    ArchiveUtils.HarPathHelper harHelper = new ArchiveUtils.HarPathHelper(conf,
        archiveUri, originalUri);
    HadoopShims shim = ShimLoader.getHadoopShims();
    URI sourceUri = harHelper.getHarUri(originalUri, shim);
    Path sourceDir = new Path(sourceUri.getScheme(), sourceUri.getAuthority(), sourceUri.getPath());

    if(!pathExists(intermediateArchivedDir) && !pathExists(archivePath)) {
      throw new HiveException("Haven't found any archive where it should be");
View Full Code Here

      // }

      //Set HADOOP_USER_NAME env variable for child process, so that
      // it also runs with hadoop permissions for the user the job is running as
      // This will be used by hadoop only in unsecure(/non kerberos) mode
      HadoopShims shim = ShimLoader.getHadoopShims();
      String endUserName = shim.getShortUserName(shim.getUGIForConf(job));
      LOG.debug("setting HADOOP_USER_NAME\t" + endUserName);
      variables.put("HADOOP_USER_NAME", endUserName);

      if (variables.containsKey(HADOOP_OPTS_KEY)) {
        variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts);
View Full Code Here

    }

    private void open() throws MetaException {
        isConnected = false;
        TTransportException tte = null;
        HadoopShims shim = ShimLoader.getHadoopShims();

        boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
        int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);

        for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
            for (HostAndPort store : metastoreHosts) {
                logger.info("Trying to connect to metastore at %s", store);
                try {
                    final HiveMetastoreClientConfig metastoreConfig = new HiveMetastoreClientConfig()
                        .setHost(store.getHostText())
                        .setPort(store.getPort())
                        .setFramed(useFramedTransport);

                    final ThriftClientConfig clientConfig = new ThriftClientConfig()
                        .setConnectTimeout(new Duration(clientSocketTimeout, TimeUnit.SECONDS));

                    final HiveMetastoreFactory factory = new SimpleHiveMetastoreFactory(thriftClientManager, clientConfig, metastoreConfig);

                    try {
                        client = closer.register(ThriftHiveMetastore.Client.forHiveMetastore(factory.getDefaultClient()));
                        isConnected = true;
                    }
                    catch (TTransportException e) {
                        tte = e;
                        if (logger.isDebugEnabled()) {
                            logger.warn(e, "Failed to connect to the MetaStore Server...");
                        } else {
                            // Don't print full exception trace if DEBUG is not on.
                            logger.warn("Failed to connect to the MetaStore Server...");
                        }
                    }
                    catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break; // stop trying to connect
                    }

                    if (isConnected && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
                        // Call set_ugi, only in unsecure mode.
                        try {
                            UserGroupInformation ugi = shim.getUGIForConf(conf);
                            client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
                        } catch (LoginException e) {
                            logger.warn(e, "Failed to do login. set_ugi() is not successful, Continuing without it.");
                        } catch (IOException e) {
                            logger.warn(e, "Failed to find ugi of client set_ugi() is not successful, Continuing without it.");
View Full Code Here

    }

    private void open() throws MetaException {
        isConnected = false;
        TTransportException tte = null;
        HadoopShims shim = ShimLoader.getHadoopShims();

        boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
        int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);

        for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
            for (HostAndPort store : metastoreHosts) {
                logger.info("Trying to connect to metastore at %s", store);
                try {
                    final HiveMetastoreClientConfig metastoreConfig = new HiveMetastoreClientConfig()
                        .setHost(store.getHostText())
                        .setPort(store.getPort())
                        .setFramed(useFramedTransport);

                    final ThriftClientConfig clientConfig = new ThriftClientConfig()
                        .setConnectTimeout(new Duration(clientSocketTimeout, TimeUnit.SECONDS));

                    final HiveMetastoreFactory factory = new SimpleHiveMetastoreFactory(thriftClientManager, clientConfig, metastoreConfig);

                    client = closer.register(ThriftHiveMetastore.Client.forHiveMetastore(factory.getDefaultClient()));
                    isConnected = true;

                    if (isConnected && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
                        // Call set_ugi, only in unsecure mode.
                        try {
                            UserGroupInformation ugi = shim.getUGIForConf(conf);
                            client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
                        } catch (LoginException e) {
                            logger.warn(e, "Failed to do login. set_ugi() is not successful, Continuing without it.");
                        } catch (IOException e) {
                            logger.warn(e, "Failed to find ugi of client set_ugi() is not successful, Continuing without it.");
View Full Code Here

  }

  private void open() throws MetaException {
    isConnected = false;
    TTransportException tte = null;
    HadoopShims shim = ShimLoader.getHadoopShims();
    boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL);
    boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
    int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);

    for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
      for (URI store : metastoreUris) {
        LOG.info("Trying to connect to metastore with URI " + store);
        try {
          transport = new TSocket(store.getHost(), store.getPort(), 1000 * clientSocketTimeout);
          if (useSasl) {
            // Wrap thrift connection with SASL for secure connection.
            try {
              HadoopThriftAuthBridge.Client authBridge =
                ShimLoader.getHadoopThriftAuthBridge().createClient();

              // check if we should use delegation tokens to authenticate
              // the call below gets hold of the tokens if they are set up by hadoop
              // this should happen on the map/reduce tasks if the client added the
              // tokens into hadoop's credential store in the front end during job
              // submission.
              String tokenSig = conf.get("hive.metastore.token.signature");
              // tokenSig could be null
              tokenStrForm = shim.getTokenStrForm(tokenSig);

              if(tokenStrForm != null) {
                // authenticate using delegation tokens via the "DIGEST" mechanism
                transport = authBridge.createClientTransport(null, store.getHost(),
                    "DIGEST", tokenStrForm, transport);
              } else {
                String principalConfig =
                    conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL);
                transport = authBridge.createClientTransport(
                    principalConfig, store.getHost(), "KERBEROS", null,
                    transport);
              }
            } catch (IOException ioe) {
              LOG.error("Couldn't create client transport", ioe);
              throw new MetaException(ioe.toString());
            }
          } else if (useFramedTransport) {
            transport = new TFramedTransport(transport);
          }

          client = new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
          try {
            transport.open();
            isConnected = true;
          } catch (TTransportException e) {
            tte = e;
            if (LOG.isDebugEnabled()) {
              LOG.warn("Failed to connect to the MetaStore Server...", e);
            } else {
              // Don't print full exception trace if DEBUG is not on.
              LOG.warn("Failed to connect to the MetaStore Server...");
            }
          }

          if (isConnected && !useSasl && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)){
            // Call set_ugi, only in unsecure mode.
            try {
              UserGroupInformation ugi = shim.getUGIForConf(conf);
              client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
            } catch (LoginException e) {
              LOG.warn("Failed to do login. set_ugi() is not successful, " +
                       "Continuing without it.", e);
            } catch (IOException e) {
View Full Code Here

      console.printInfo("Creating " + archiveName + " for " + originalDir.toString());
      console.printInfo("in " + tmpDir);
      console.printInfo("Please wait... (this may take a while)");

      // Create the Hadoop archive
      HadoopShims shim = ShimLoader.getHadoopShims();
      int ret=0;
      try {
        ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName);
      } catch (Exception e) {
        throw new HiveException(e);
      }
      if (ret != 0) {
        throw new HiveException("Error while creating HAR");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.shims.HadoopShims$MiniMrShim

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.