Package org.apache.hadoop.hive.metastore

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient


        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
        clientWH = new Warehouse(hcatConf);
        msc = new HiveMetaStoreClient(hcatConf, null);
        System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
        System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
    }
View Full Code Here


    }

    @Override
    public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
        OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context);
        HiveMetaStoreClient client = null;
        try {
            HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
            client = HCatUtil.getHiveClient(hiveConf);
            handleDuplicatePublish(context,
                jobInfo,
View Full Code Here

        Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
        String dbName = dbTablePair.first;
        String tableName = dbTablePair.second;
        Table table = null;
        HiveMetaStoreClient client = null;
        try {
            client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
            table = HCatUtil.getTable(client, dbName, tableName);
        } catch (NoSuchObjectException nsoe) {
            throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
View Full Code Here

     * @throws IOException the exception in communicating with the metadata server
     */
    @SuppressWarnings("unchecked")
    public static void setOutput(Configuration conf, Credentials credentials,
                                 OutputJobInfo outputJobInfo) throws IOException {
        HiveMetaStoreClient client = null;

        try {

            HiveConf hiveConf = HCatUtil.getHiveConf(conf);
            client = HCatUtil.getHiveClient(hiveConf);
            Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
                outputJobInfo.getTableName());

            List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);

            for (String indexName : indexList) {
                Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
                if (!index.isDeferredRebuild()) {
                    throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
                }
            }
            StorageDescriptor sd = table.getTTable().getSd();
View Full Code Here

    @Override
    public void cleanupJob(JobContext context) throws IOException {
        getBaseOutputCommitter().cleanupJob(HCatMapRedUtil.createJobContext(context));

        //Cancel HCat and JobTracker tokens
        HiveMetaStoreClient client = null;
        try {
            HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
            client = HCatUtil.getHiveClient(hiveConf);
            String tokenStrForm = client.getTokenStrForm();
            if (tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
                client.cancelDelegationToken(tokenStrForm);
            }
        } catch (Exception e) {
            LOG.warn("Failed to cancel delegation token", e);
        } finally {
            HCatUtil.closeHiveClientQuietly(client);
View Full Code Here

    /**
     * Returns the given InputJobInfo after populating with data queried from the metadata service.
     */
    private static InputJobInfo getInputJobInfo(
        Configuration conf, InputJobInfo inputJobInfo, String locationFilter) throws Exception {
        HiveMetaStoreClient client = null;
        HiveConf hiveConf = null;
        try {
            if (conf != null) {
                hiveConf = HCatUtil.getHiveConf(conf);
            } else {
                hiveConf = new HiveConf(HCatInputFormat.class);
            }
            client = HCatUtil.getHiveClient(hiveConf);
            Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(),
                inputJobInfo.getTableName());

            List<PartInfo> partInfoList = new ArrayList<PartInfo>();

            inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
            if (table.getPartitionKeys().size() != 0) {
                //Partitioned table
                List<Partition> parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(),
                    inputJobInfo.getTableName(),
                    inputJobInfo.getFilter(),
                    (short) -1);

                // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
View Full Code Here

    }

    private String buildHcatDelegationToken(String user)
        throws IOException, InterruptedException, MetaException, TException {
        HiveConf c = new HiveConf();
        final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
        LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
        final TokenWrapper twrapper = new TokenWrapper();
        final UserGroupInformation ugi = UgiFactory.getUgi(user);
        String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
            public String run()
                throws IOException, MetaException, TException {
                String u = ugi.getUserName();
                return client.getDelegationToken(u);
            }
        });
        return s;
    }
View Full Code Here

    @Test
    public void testCacheHit() throws IOException, MetaException, LoginException {

        HiveClientCache cache = new HiveClientCache(1000);
        HiveMetaStoreClient client = cache.get(hiveConf);
        assertNotNull(client);
        client.close(); // close shouldn't matter

        // Setting a non important configuration should return the same client only
        hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10);
        HiveMetaStoreClient client2 = cache.get(hiveConf);
        assertNotNull(client2);
        assertEquals(client, client2);
        client2.close();
    }
View Full Code Here

    }

    @Test
    public void testCacheMiss() throws IOException, MetaException, LoginException {
        HiveClientCache cache = new HiveClientCache(1000);
        HiveMetaStoreClient client = cache.get(hiveConf);
        assertNotNull(client);

        // Set different uri as it is one of the criteria deciding whether to return the same client or not
        hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different
        HiveMetaStoreClient client2 = cache.get(hiveConf);
        assertNotNull(client2);
        assertNotSame(client, client2);
    }
View Full Code Here

        HiveClientCache cache = new HiveClientCache(1);
        HiveClientCache.CacheableHiveMetaStoreClient client = (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(hiveConf);
        assertNotNull(client);

        Thread.sleep(2500);
        HiveMetaStoreClient client2 = cache.get(hiveConf);
        client.close();
        assertTrue(client.isClosed()); // close() after *expiry time* and *a cache access* should  have tore down the client

        assertNotNull(client2);
        assertNotSame(client, client2);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.