Package org.apache.hadoop.hive.metastore

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient


   * support is removed
   * @returns a Meta Store Client
   * @throws HiveMetaException  if a working client can't be created
   */
  private IMetaStoreClient createMetaStoreClient() throws MetaException {
      return new HiveMetaStoreClient(this.conf);
  }
View Full Code Here


    //add the token to the clientUgi for securely talking to the metastore
    clientUgi.addToken(t);
    //Create the metastore client as the clientUgi. Doing so this
    //way will give the client access to the token that was added earlier
    //in the clientUgi
    HiveMetaStoreClient hiveClient =
      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
        public HiveMetaStoreClient run() throws Exception {
          HiveMetaStoreClient hiveClient =
            new HiveMetaStoreClient(conf);
          return hiveClient;
        }
      });

    assertTrue("Couldn't connect to metastore", hiveClient != null);

    //try out some metastore operations
    createDBAndVerifyExistence(hiveClient);

    //check that getDelegationToken fails since we are not authenticating
    //over kerberos
    boolean pass = false;
    try {
      hiveClient.getDelegationToken(clientUgi.getUserName());
    } catch (MetaException ex) {
      pass = true;
    }
    assertTrue("Expected the getDelegationToken call to fail", pass == true);
    hiveClient.close();

    //Now cancel the delegation token
    HiveMetaStore.cancelDelegationToken(tokenStrForm);

    //now metastore connection should fail
    hiveClient =
      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
        public HiveMetaStoreClient run() {
          try {
            HiveMetaStoreClient hiveClient =
              new HiveMetaStoreClient(conf);
            return hiveClient;
          } catch (MetaException e) {
            return null;
          }
        }
View Full Code Here

            throw new MetaException(
              "Failed to load storage handler:  " + ex.getMessage());
          }
        }
      };
    return new HiveMetaStoreClient(conf, hookLoader);
  }
View Full Code Here

   * @throws HiveMetaException
   */
  private IMetaStoreClient createMetaStoreClient() throws MetaException {
    boolean useFileStore = conf.getBoolean("hive.metastore.usefilestore", false);
    if(!useFileStore) {
      return new HiveMetaStoreClient(this.conf);
    }
    return new MetaStoreClient(this.conf);
  }
View Full Code Here

    //add the token to the clientUgi for securely talking to the metastore
    clientUgi.addToken(t);
    //Create the metastore client as the clientUgi. Doing so this
    //way will give the client access to the token that was added earlier
    //in the clientUgi
    HiveMetaStoreClient hiveClient =
      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
        public HiveMetaStoreClient run() throws Exception {
          HiveMetaStoreClient hiveClient =
            new HiveMetaStoreClient(conf);
          return hiveClient;
        }
      });

    assertTrue("Couldn't connect to metastore", hiveClient != null);

    //try out some metastore operations
    createDBAndVerifyExistence(hiveClient);

    //check that getDelegationToken fails since we are not authenticating
    //over kerberos
    boolean pass = false;
    try {
      hiveClient.getDelegationToken(clientUgi.getUserName());
    } catch (MetaException ex) {
      pass = true;
    }
    assertTrue("Expected the getDelegationToken call to fail", pass == true);
    hiveClient.close();

    //Now cancel the delegation token
    HiveMetaStore.cancelDelegationToken(tokenStrForm);

    //now metastore connection should fail
    hiveClient =
      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
        public HiveMetaStoreClient run() {
          try {
            HiveMetaStoreClient hiveClient =
              new HiveMetaStoreClient(conf);
            return hiveClient;
          } catch (MetaException e) {
            return null;
          }
        }
View Full Code Here

     * @param server - Serevr URI for HCat server
     * @throws Exception
     */
    public void set(JobConf launcherJobConf, String principal, String server) throws Exception {
        try {
            HiveMetaStoreClient client = getHCatClient(principal, server);
            XLog.getLog(getClass()).debug(
                    "HCatCredentialHelper: set: User name for which token will be asked from HCat: "
                            + launcherJobConf.get(USER_NAME));
            String tokenStrForm = client.getDelegationToken(launcherJobConf.get(USER_NAME), UserGroupInformation
                    .getLoginUser().getShortUserName());
            Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
            hcatToken.decodeFromUrlString(tokenStrForm);
            launcherJobConf.getCredentials().addToken(new Text("HCat Token"), hcatToken);
            XLog.getLog(getClass()).debug("Added the HCat token in job conf");
View Full Code Here

     * @return HiveMetaStoreClient
     * @throws MetaException
     */
    public HiveMetaStoreClient getHCatClient(String principal, String server) throws MetaException {
        HiveConf hiveConf = null;
        HiveMetaStoreClient hiveclient = null;
        hiveConf = new HiveConf();
        XLog.getLog(getClass()).debug("getHCatClient: Principal: " + principal + " Server: " + server);
        // specified a thrift url

        hiveConf.set(HIVE_METASTORE_SASL_ENABLED, "true");
        hiveConf.set(HIVE_METASTORE_KERBEROS_PRINCIPAL, principal);
        hiveConf.set(HIVE_METASTORE_LOCAL, "false");
        hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, server);
        hiveclient = new HiveMetaStoreClient(hiveConf);
        return hiveclient;
    }
View Full Code Here

    HiveConf hiveConf = new HiveConf(conf, HiveApiOutputCommitter.class);

    String dbName = outputDesc.getDbName();
    String tableName = outputDesc.getTableName();

    HiveMetaStoreClient client;
    Table hiveTable;
    try {
      client = new HiveMetaStoreClient(hiveConf);
      hiveTable = client.getTable(dbName, tableName);
      // CHECKSTYLE: stop IllegalCatch
    } catch (Exception e) {
      // CHECKSTYLE: resume IllegalCatch
      throw new IOException(e);
    }

    Partition partition = new Partition();
    partition.setDbName(dbName);
    partition.setTableName(tableName);
    partition.setParameters(outputInfo.getTableParams());
    List<String> partitionValues = HiveUtils
        .orderedPartitionValues(hiveTable.getPartitionKeys(),
            outputDesc.getPartitionValues());
    partition.setValues(partitionValues);

    StorageDescriptor sd = new StorageDescriptor(hiveTable.getSd());
    sd.setParameters(outputInfo.getSerializerParams());
    sd.setLocation(outputInfo.getFinalOutputPath());
    sd.setCols(outputInfo.getColumnInfo());
    partition.setSd(sd);

    try {
      client.add_partition(partition);
      // CHECKSTYLE: stop IllegalCatch
    } catch (Exception e) {
      // CHECKSTYLE: resume IllegalCatch
      throw new IOException(e);
    }
View Full Code Here

    throws TException {
    String dbName = outputDesc.getDbName();
    String tableName = outputDesc.getTableName();

    HiveConf hiveConf = new HiveConf(conf, HiveApiInputFormat.class);
    HiveMetaStoreClient client = new HiveMetaStoreClient(hiveConf);

    Table table = client.getTable(dbName, tableName);
    sanityCheck(table, outputDesc);

    OutputInfo oti = new OutputInfo(table);

    String partitionPiece;
View Full Code Here

   */
  private void checkPartitionDoesntExist(Configuration conf,
    HiveOutputDescription description, OutputInfo oti)
    throws IOException {
    HiveConf hiveConf = new HiveConf(conf, HiveApiInputFormat.class);
    HiveMetaStoreClient client;
    try {
      client = new HiveMetaStoreClient(hiveConf);
    } catch (MetaException e) {
      throw new IOException(e);
    }

    String db = description.getDbName();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.