Package org.apache.hadoop.hive.metastore

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient


        Callable<HiveMetaStoreClient> worker1 = new GetHiveClient();
        Callable<HiveMetaStoreClient> worker2 = new GetHiveClient();
        Future<HiveMetaStoreClient> clientFuture1 = executor.submit(worker1);
        Future<HiveMetaStoreClient> clientFuture2 = executor.submit(worker2);
        HiveMetaStoreClient client1 = clientFuture1.get();
        HiveMetaStoreClient client2 = clientFuture2.get();
        assertNotNull(client1);
        assertNotNull(client2);
        assertNotSame(client1, client2);
    }
View Full Code Here


            moveTaskOutputs(fs, src, src, tblPath, false);
            fs.delete(src, true);
            return;
        }

        HiveMetaStoreClient client = null;
        HCatTableInfo tableInfo = jobInfo.getTableInfo();
        List<Partition> partitionsAdded = new ArrayList<Partition>();
        try {
            HiveConf hiveConf = HCatUtil.getHiveConf(conf);
            client = HCatUtil.getHiveClient(hiveConf);
            StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

            FileStatus tblStat = fs.getFileStatus(tblPath);
            String grpName = tblStat.getGroup();
            FsPermission perms = tblStat.getPermission();

            List<Partition> partitionsToAdd = new ArrayList<Partition>();
            if (!dynamicPartitioningUsed){
                partitionsToAdd.add(
                        constructPartition(
                                context,jobInfo,
                                tblPath.toString(), jobInfo.getPartitionValues()
                                ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                                ,table, fs
                                ,grpName,perms));
            }else{
                for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
                    partitionsToAdd.add(
                            constructPartition(
                                    context,jobInfo,
                                    getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
                                    ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                                    ,table, fs
                                    ,grpName,perms));
                }
            }

            ArrayList<Map<String,String>> ptnInfos = new ArrayList<Map<String,String>>();
            for(Partition ptn : partitionsToAdd){
                ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn));
            }

            //Publish the new partition(s)
            if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){

                Path src = new Path(ptnRootLocation);
                // check here for each dir we're copying out, to see if it
                // already exists, error out if so
                moveTaskOutputs(fs, src, src, tblPath, true);
                moveTaskOutputs(fs, src, src, tblPath, false);
                fs.delete(src, true);
                try {
                    updateTableSchema(client, table, jobInfo.getOutputSchema());
                    LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
                    client.add_partitions(partitionsToAdd);
                    partitionsAdded = partitionsToAdd;
                } catch (Exception e){
                    // There was an error adding partitions : rollback fs copy and rethrow
                    for (Partition p : partitionsToAdd){
                        Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
                        if (fs.exists(ptnPath)){
                            fs.delete(ptnPath,true);
                        }
                    }
                    throw e;
                }

            }else{
                // no harProcessor, regular operation
                updateTableSchema(client, table, jobInfo.getOutputSchema());
                LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
                partitionsAdded = partitionsToAdd;
                if (dynamicPartitioningUsed && (partitionsAdded.size()>0)){
                    Path src = new Path(ptnRootLocation);
                    moveTaskOutputs(fs, src, src, tblPath, true);
                    moveTaskOutputs(fs, src, src, tblPath, false);
                    fs.delete(src, true);
                }
                client.add_partitions(partitionsToAdd);
            }
        } catch (Exception e) {
            if (partitionsAdded.size() > 0) {
                try {
                    // baseCommitter.cleanupJob failed, try to clean up the
                    // metastore
                    for (Partition p : partitionsAdded) {
                        client.dropPartition(tableInfo.getDatabaseName(),
                                tableInfo.getTableName(), p.getValues());
                    }
                } catch (Exception te) {
                    // Keep cause as the original exception
                    throw new HCatException(
View Full Code Here

        }
    }

    private void cancelDelegationTokens(JobContext context) throws IOException{
        LOG.info("Cancelling deletgation token for the job.");
        HiveMetaStoreClient client = null;
        try {
            HiveConf hiveConf = HCatUtil
                    .getHiveConf(context.getConfiguration());
            client = HCatUtil.getHiveClient(hiveConf);
            // cancel the deleg. tokens that were acquired for this job now that
            // we are done - we should cancel if the tokens were acquired by
            // HCatOutputFormat and not if they were supplied by Oozie.
            // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
            // the conf will not be set
            String tokenStrForm = client.getTokenStrForm();
            if (tokenStrForm != null
                    && context.getConfiguration().get(
                            HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
                client.cancelDelegationToken(tokenStrForm);
            }
        } catch (MetaException e) {
            LOG.warn("MetaException while cancelling delegation token.", e);
        } catch (TException e) {
            LOG.warn("TException while cancelling delegation token.", e);
View Full Code Here

    @Before
    public void setUp() throws Exception {
        if (driver == null) {
            setUpHiveConf();
            driver = new Driver(hiveConf);
            client = new HiveMetaStoreClient(hiveConf);
            SessionState.start(new CliSessionState(hiveConf));
        }
    }
View Full Code Here

                HCatSemanticAnalyzer.class.getName());
        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
                "false");
        msc = new HiveMetaStoreClient(hcatConf, null);
        System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
        System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
    }
View Full Code Here

        System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
        System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");

        hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString());
        try {
            hmsc = new HiveMetaStoreClient(hiveConf, null);
            initalizeTables();
        } catch (Throwable e) {
            LOG.error("Exception encountered while setting up testcase", e);
            throw new Exception(e);
        } finally {
View Full Code Here

    clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");


    SessionState.start(new CliSessionState(clientHiveConf));
    msc = new HiveMetaStoreClient(clientHiveConf, null);
    driver = new Driver(clientHiveConf);
  }
View Full Code Here


//    Driver driver = new Driver(hiveConf);
//    SessionState.start(new CliSessionState(hiveConf));

        hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);
    }
View Full Code Here

   * @throws IOException
   */
  public static InputJobInfo getInputJobInfo(
      Configuration conf, InputJobInfo inputJobInfo)
    throws IOException {
    HiveMetaStoreClient client = null;
    HiveConf hiveConf;
    try {
      if (conf != null) {
        hiveConf = HCatUtil.getHiveConf(conf);
      } else {
        hiveConf = new HiveConf(GiraphHCatInputFormat.class);
      }
      client = HCatUtil.getHiveClient(hiveConf);
      Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
          inputJobInfo.getTableName());

      List<PartInfo> partInfoList = new ArrayList<PartInfo>();

      inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
      if (table.getPartitionKeys().size() != 0) {
        // Partitioned table
        List<Partition> parts = client.listPartitionsByFilter(
            inputJobInfo.getDatabaseName(),
            inputJobInfo.getTableName(),
            inputJobInfo.getFilter(),
            (short) -1);

View Full Code Here

  }

  private IMetaStoreClient getMetaStoreClient() throws ExploreException {
    if (metastoreClientLocal.get() == null) {
      try {
        IMetaStoreClient client = new HiveMetaStoreClient(new HiveConf());
        Supplier<IMetaStoreClient> supplier = Suppliers.ofInstance(client);
        metastoreClientLocal.set(supplier);

        // We use GC of the supplier as a signal for us to know that a thread is gone
        // The supplier is set into the thread local, which will get GC'ed when the thread is gone.
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.