Package org.apache.hadoop.hive.conf

Examples of org.apache.hadoop.hive.conf.HiveConf


  }

  private String buildHcatDelegationToken(String user)
    throws IOException, InterruptedException, MetaException, TException {
    HiveConf c = new HiveConf();
    final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final TokenWrapper twrapper = new TokenWrapper();
    final UserGroupInformation ugi = UgiFactory.getUgi(user);
    String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
View Full Code Here


    isServerRunning = true;

    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());

    hcatConf = new HiveConf(this.getClass());
    hcatConf.set("hive.metastore.local", "false");
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://127.0.0.1:" + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
View Full Code Here

  private Driver hcatDriver;

  @Override
  protected void setUp() throws Exception {

    HiveConf hcatConf = new HiveConf(this.getClass());
    hcatConf.set(ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");

    hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatDriver = new Driver(hcatConf);
    SessionState.start(new CliSessionState(hcatConf));
  }
View Full Code Here

  @BeforeClass
  public static void setUpOneTime() throws Exception {
    fs = new LocalFileSystem();
    fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());

    HiveConf hiveConf = new HiveConf();
    hiveConf.setInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, 0);
    // Hack to initialize cache with 0 expiry time causing it to return a new hive client every time
    // Otherwise the cache doesn't play well with the second test method with the client gets closed() in the
    // tearDown() of the previous test
    HCatUtil.getHiveClient(hiveConf);
View Full Code Here

  }

  public static HiveConf getHiveConf(Configuration conf)
    throws IOException {

    HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);

    //copy the hive conf into the job conf and restore it
    //in the backend context
    if (conf.get(HCatConstants.HCAT_KEY_HIVE_CONF) == null) {
      conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
        HCatUtil.serialize(hiveConf.getAllProperties()));
    } else {
      //Copy configuration properties into the hive conf
      Properties properties = (Properties) HCatUtil.deserialize(
        conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));

      for (Map.Entry<Object, Object> prop : properties.entrySet()) {
        if (prop.getValue() instanceof String) {
          hiveConf.set((String) prop.getKey(), (String) prop.getValue());
        } else if (prop.getValue() instanceof Integer) {
          hiveConf.setInt((String) prop.getKey(),
            (Integer) prop.getValue());
        } else if (prop.getValue() instanceof Boolean) {
          hiveConf.setBoolean((String) prop.getKey(),
            (Boolean) prop.getValue());
        } else if (prop.getValue() instanceof Long) {
          hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue());
        } else if (prop.getValue() instanceof Float) {
          hiveConf.setFloat((String) prop.getKey(),
            (Float) prop.getValue());
        }
      }
    }

    if (conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
      hiveConf.set("hive.metastore.token.signature",
        conf.get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE));
    }

    return hiveConf;
  }
View Full Code Here

    HiveMetaStoreClient client = null;
    HCatTableInfo tableInfo = jobInfo.getTableInfo();
    List<Partition> partitionsAdded = new ArrayList<Partition>();
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

      FileStatus tblStat = fs.getFileStatus(tblPath);
      String grpName = tblStat.getGroup();
View Full Code Here

  private void cancelDelegationTokens(JobContext context) throws IOException{
    LOG.info("Cancelling deletgation token for the job.");
    HiveMetaStoreClient client = null;
    try {
      HiveConf hiveConf = HCatUtil
          .getHiveConf(context.getConfiguration());
      client = HCatUtil.getHiveClient(hiveConf);
      // cancel the deleg. tokens that were acquired for this job now that
      // we are done - we should cancel if the tokens were acquired by
      // HCatOutputFormat and not if they were supplied by Oozie.
View Full Code Here

    t.start();
    Thread.sleep(40000);

    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());
    hcatConf = new HiveConf(TestHCatClient.class);
    hcatConf.set("hive.metastore.local", "false");
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
      + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
View Full Code Here

    Thread.sleep(10000);
    isServerRunning = true;
    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());

    hcatConf = new HiveConf(TestHCatPartitionPublish.class);
    hcatConf.set("hive.metastore.local", "false");
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
        + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
View Full Code Here

      HMSHandler.LOG.info(msg);
      if (cli.isVerbose()) {
        System.err.println(msg);
      }

      HiveConf conf = new HiveConf(HMSHandler.class);

      // set all properties specified on the command line
      for (Map.Entry<Object, Object> item : hiveconf.entrySet()) {
        conf.set((String) item.getKey(), (String) item.getValue());
      }

      // Add shutdown hook.
      Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.conf.HiveConf

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.