Package org.apache.hadoop.hive.conf

Examples of org.apache.hadoop.hive.conf.HiveConf


   * @param bridge
   * @throws Throwable
   */
  public static void startMetaStore(int port, HadoopThriftAuthBridge bridge)
      throws Throwable {
    startMetaStore(port, bridge, new HiveConf(HMSHandler.class));
  }
View Full Code Here


    HiveMetaStoreClient client = null;
    HCatTableInfo tableInfo = jobInfo.getTableInfo();
    List<Partition> partitionsAdded = new ArrayList<Partition>();
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

      FileStatus tblStat = fs.getFileStatus(tblPath);
      String grpName = tblStat.getGroup();
View Full Code Here

  private void cancelDelegationTokens(JobContext context) throws IOException{
    LOG.info("Cancelling deletgation token for the job.");
    HiveMetaStoreClient client = null;
    try {
      HiveConf hiveConf = HCatUtil
          .getHiveConf(context.getConfiguration());
      client = HCatUtil.getHiveClient(hiveConf);
      // cancel the deleg. tokens that were acquired for this job now that
      // we are done - we should cancel if the tokens were acquired by
      // HCatOutputFormat and not if they were supplied by Oozie.
View Full Code Here

  private static final String TEST_DB1_NAME = "testdb1";
  private static final String TEST_DB2_NAME = "testdb2";

  @Override
  protected void setUp() throws Exception {
    hiveConf = new HiveConf(this.getClass());
    warehouse = new Warehouse(hiveConf);

    // set some values to use for getting conf. vars
    hiveConf.set("hive.metastore.metrics.enabled","true");
    hiveConf.set("hive.key1", "value1");
View Full Code Here

    initializeSetup();
  }

  private static void initializeSetup() throws Exception {

    hiveConf = new HiveConf(mrConf, TestHCatMultiOutputFormat.class);
    hiveConf.set("hive.metastore.local", "false");
    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
    hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
View Full Code Here

   * @param database database
   * @return list of columns in comma seperated way
   * @throws Exception if any error occurs
   */
  private List<String> getTableData(String table, String database) throws Exception {
    HiveConf conf = new HiveConf();
    conf.addResource("hive-site.xml");
    ArrayList<String> results = new ArrayList<String>();
    ArrayList<String> temp = new ArrayList<String>();
    Hive hive = Hive.get(conf);
    org.apache.hadoop.hive.ql.metadata.Table tbl = hive.getTable(database, table);
    FetchWork work;
View Full Code Here

                 OutputJobInfo outputJobInfo) throws IOException {
    HiveMetaStoreClient client = null;

    try {

      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
        outputJobInfo.getTableName());

      List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);
View Full Code Here

    t.start();
    Thread.sleep(40000);

    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());
    hcatConf = new HiveConf(TestHCatClient.class);
    hcatConf.set("hive.metastore.local", "false");
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
      + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
View Full Code Here

  @Override
  public ParseContext transform(ParseContext pctx) throws SemanticException {

    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    HiveConf conf = pctx.getConf();

    if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) {
      // process group-by pattern
      opRules.put(new RuleRegExp("R1",
          GroupByOperator.getOperatorName() + "%" +
View Full Code Here

    protected void processGroupBy(GroupByOptimizerContext ctx,
        Stack<Node> stack,
        GroupByOperator groupByOp,
        int depth) throws SemanticException {
      HiveConf hiveConf = ctx.getConf();
      GroupByOptimizerSortMatch match = checkSortGroupBy(stack, groupByOp);
      boolean useMapperSort =
          HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAP_GROUPBY_SORT);
      GroupByDesc groupByOpDesc = groupByOp.getConf();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.conf.HiveConf

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.