Examples of HiveMetaStoreClient


Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    howlConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HowlSemanticAnalyzer.class.getName());
    howlConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    howlConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    howlConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    clientWH = new Warehouse(howlConf);
    msc = new HiveMetaStoreClient(howlConf,null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());
    msc = new HiveMetaStoreClient(conf);
    msc.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,"testNoPartTbl", true,true);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
    msc.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME,"testPartTbl", true,true);
    pig = new PigServer(ExecType.LOCAL, conf.getAllProperties());
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    hiveDriver = new Driver(hiveConf);

    howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HowlSemanticAnalyzer.class.getName());
    howlDriver = new Driver(howlConf);

    msc = new HiveMetaStoreClient(howlConf);
    SessionState.start(new CliSessionState(howlConf));
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    howlConf.set(ConfVars.PREEXECHOOKS.varname, "");
    howlConf.set(ConfVars.POSTEXECHOOKS.varname, "");
    howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HowlSemanticAnalyzer.class.getName());
    howlDriver = new Driver(howlConf);
    msc = new HiveMetaStoreClient(howlConf);
    SessionState.start(new CliSessionState(howlConf));
    super.setUp();
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    fs = new LocalFileSystem();
    fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());

    initialize();

    client = new HiveMetaStoreClient(hiveConf, null);
    initTable();
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  protected void setUp() throws Exception {
    super.setUp();
    hiveConf = new HiveConf(this.getClass());

    try {
      client = new HiveMetaStoreClient(hiveConf, null);

      initTable();
    } catch (Throwable e) {
      System.err.println("Unable to open the metastore");
      System.err.println(StringUtils.stringifyException(e));
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

            throw new MetaException(
              "Failed to load storage handler:  " + ex.getMessage());
          }
        }
      };
    return new HiveMetaStoreClient(conf, hookLoader);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    HiveConf hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    driver = new Driver(hiveConf);
    client = new HiveMetaStoreClient(hiveConf);
    SessionState.start(new CliSessionState(hiveConf));
    props = new Properties();
    props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name"));
    fullFileName = cluster.getProperties().getProperty("fs.default.name") + fileName;
   
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

        baseCommitter.abortJob(jobContext, state);
      }
      OutputJobInfo jobInfo = HowlOutputFormat.getJobInfo(jobContext);

      try {
        HiveMetaStoreClient client = HowlOutputFormat.createHiveClient(
            jobInfo.getTableInfo().getServerUri(), jobContext.getConfiguration());
        // cancel the deleg. tokens that were acquired for this job now that
        // we are done - we should cancel if the tokens were acquired by
        // HowlOutputFormat and not if they were supplied by Oozie. In the latter
        // case the HOWL_KEY_TOKEN_SIGNATURE property in the conf will not be set
        String tokenStrForm = client.getTokenStrForm();
        if(tokenStrForm != null && jobContext.getConfiguration().get
            (HowlOutputFormat.HOWL_KEY_TOKEN_SIGNATURE) != null) {
          client.cancelDelegationToken(tokenStrForm);
        }
      } catch(Exception e) {
        if( e instanceof HowlException ) {
          throw (HowlException) e;
        } else {
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

        moveTaskOutputs(fs, src, src, tblPath);
        fs.delete(src, true);
        return;
      }

      HiveMetaStoreClient client = null;
      List<String> values = null;
      boolean partitionAdded = false;
      HowlTableInfo tableInfo = jobInfo.getTableInfo();

      try {
        client = HowlOutputFormat.createHiveClient(tableInfo.getServerUri(), conf);

        StorerInfo storer = InitializeInput.extractStorerInfo(table.getSd(),table.getParameters());

        Partition partition = new Partition();
        partition.setDbName(tableInfo.getDatabaseName());
        partition.setTableName(tableInfo.getTableName());
        partition.setSd(new StorageDescriptor(tblSD));
        partition.getSd().setLocation(jobInfo.getLocation());

        updateTableSchema(client, table, jobInfo.getOutputSchema());

        List<FieldSchema> fields = new ArrayList<FieldSchema>();
        for(HowlFieldSchema fieldSchema : jobInfo.getOutputSchema().getFields()) {
          fields.add(HowlSchemaUtils.getFieldSchema(fieldSchema));
        }

        partition.getSd().setCols(fields);

        Map<String,String> partKVs = tableInfo.getPartitionValues();
        //Get partition value list
        partition.setValues(getPartitionValueList(table,partKVs));

        Map<String, String> params = new HashMap<String, String>();
        params.put(HowlConstants.HOWL_ISD_CLASS, storer.getInputSDClass());
        params.put(HowlConstants.HOWL_OSD_CLASS, storer.getOutputSDClass());

        //Copy table level howl.* keys to the partition
        for(Map.Entry<Object, Object> entry : storer.getProperties().entrySet()) {
          params.put(entry.getKey().toString(), entry.getValue().toString());
        }

        partition.setParameters(params);

        // Sets permissions and group name on partition dirs.
        FileStatus tblStat = fs.getFileStatus(tblPath);
        String grpName = tblStat.getGroup();
        FsPermission perms = tblStat.getPermission();
        Path partPath = tblPath;
        for(FieldSchema partKey : table.getPartitionKeys()){
          partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
          fs.setPermission(partPath, perms);
          try{
            fs.setOwner(partPath, null, grpName);
          } catch(AccessControlException ace){
            // log the messages before ignoring. Currently, logging is not built in Howl.
          }
        }

        //Publish the new partition
        client.add_partition(partition);
        partitionAdded = true; //publish to metastore done

        if( baseCommitter != null ) {
          baseCommitter.cleanupJob(context);
        }
        // cancel the deleg. tokens that were acquired for this job now that
        // we are done - we should cancel if the tokens were acquired by
        // HowlOutputFormat and not if they were supplied by Oozie. In the latter
        // case the HOWL_KEY_TOKEN_SIGNATURE property in the conf will not be set
        String tokenStrForm = client.getTokenStrForm();
        if(tokenStrForm != null && context.getConfiguration().get
            (HowlOutputFormat.HOWL_KEY_TOKEN_SIGNATURE) != null) {
          client.cancelDelegationToken(tokenStrForm);
        }
      } catch (Exception e) {

        if( partitionAdded ) {
          try {
            //baseCommitter.cleanupJob failed, try to clean up the metastore
            client.dropPartition(tableInfo.getDatabaseName(),
                    tableInfo.getTableName(), values);
          } catch(Exception te) {
            //Keep cause as the original exception
            throw new HowlException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
          }
        }

        if( e instanceof HowlException ) {
          throw (HowlException) e;
        } else {
          throw new HowlException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
        }
      } finally {
        if( client != null ) {
          client.close();
        }
      }
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.