Package org.apache.hadoop.hive.ql

Examples of org.apache.hadoop.hive.ql.Driver$QueryState


      if (el.getKey().startsWith("hbase.")) {
        hcatConf.set(el.getKey(), el.getValue());
      }
    }

    driver = new Driver(hcatConf);
    SessionState.start(new CliSessionState(hcatConf));

  }
View Full Code Here


    }

    public static void execHiveDDL(String ddl) throws Exception {
        System.out.println("Executing ddl = " + ddl);

        Driver hiveDriver = new Driver();
        CommandProcessorResponse response = hiveDriver.run(ddl);

        System.out.println("response = " + response);
        System.out.println("response.getResponseCode() = " + response.getResponseCode());
        System.out.println("response.getErrorMessage() = " + response.getErrorMessage());
        System.out.println("response.getSQLState() = " + response.getSQLState());
View Full Code Here

    ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf);

    SessionState.start(new CliSessionState(clientHiveConf));
    msc = new HiveMetaStoreClient(clientHiveConf, null);
    driver = new Driver(clientHiveConf);
  }
View Full Code Here

    // generate tasks from index query string
    LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString());
    HiveConf queryConf = new HiveConf(pctx.getConf(), CompactIndexHandler.class);
    HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false);
    Driver driver = new Driver(queryConf);
    driver.compile(qlCommand.toString(), false);

    if (pctx.getConf().getBoolVar(ConfVars.HIVE_INDEX_COMPACT_BINARY_SEARCH) && useSorted) {
      // For now, only works if the predicate is a single condition
      MapWork work = null;
      String originalInputFormat = null;
      for (Task task : driver.getPlan().getRootTasks()) {
        // The index query should have one and only one map reduce task in the root tasks
        // Otherwise something is wrong, log the problem and continue using the default format
        if (task.getWork() instanceof MapredWork) {
          if (work != null) {
            LOG.error("Tried to use a binary search on a compact index but there were an " +
                      "unexpected number (>1) of root level map reduce tasks in the " +
                      "reentrant query plan.");
            work.setInputformat(null);
            work.setInputFormatSorted(false);
            break;
          }
          if (task.getWork() != null) {
            work = ((MapredWork)task.getWork()).getMapWork();
          }
          String inputFormat = work.getInputformat();
          originalInputFormat = inputFormat;
          if (inputFormat == null) {
            inputFormat = HiveConf.getVar(pctx.getConf(), HiveConf.ConfVars.HIVEINPUTFORMAT);
          }

          // We can only perform a binary search with HiveInputFormat and CombineHiveInputFormat
          // and BucketizedHiveInputFormat
          try {
            if (!HiveInputFormat.class.isAssignableFrom(Class.forName(inputFormat))) {
              work = null;
              break;
            }
          } catch (ClassNotFoundException e) {
            LOG.error("Map reduce work's input format class: " + inputFormat + " was not found. " +
                       "Cannot use the fact the compact index is sorted.");
            work = null;
            break;
          }

          work.setInputFormatSorted(true);
        }
      }

      if (work != null) {
        // Find the filter operator and expr node which act on the index column and mark them
        if (!findIndexColumnFilter(work.getAliasToWork().values())) {
          LOG.error("Could not locate the index column's filter operator and expr node. Cannot " +
                    "use the fact the compact index is sorted.");
          work.setInputformat(originalInputFormat);
          work.setInputFormatSorted(false);
        }
      }
    }


    queryContext.addAdditionalSemanticInputs(driver.getPlan().getInputs());
    queryContext.setQueryTasks(driver.getPlan().getRootTasks());
    return;
  }
View Full Code Here

    ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf);

    SessionState.start(new CliSessionState(clientHiveConf));
    msc = new HiveMetaStoreClient(clientHiveConf, null);
    driver = new Driver(clientHiveConf);
  }
View Full Code Here

   
    HiveConf hiveConf = new HiveConf(TestSymlinkTextInputFormat.class);
   
    HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_REWORK_MAPREDWORK, true);
    HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    Driver drv = new Driver(hiveConf);
    drv.init();
    String tblName = "text_symlink_text";

    String createSymlinkTableCmd = "create table " + tblName + " (key int) stored as " +
        " inputformat 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' " +
        " outputformat 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'";
   
    SessionState.start(hiveConf);
   
    boolean tblCreated = false;
    try {
      int ecode = 0;
      ecode = drv.run(createSymlinkTableCmd).getResponseCode();
      if (ecode != 0) {
        throw new Exception("Create table command: " + createSymlinkTableCmd
            + " failed with exit code= " + ecode);
      }

      tblCreated = true;
      String loadFileCommand = "LOAD DATA LOCAL INPATH '" +
        new Path(symlinkDir, "symlink_file").toString() + "' INTO TABLE " + tblName;
     
      ecode = drv.run(loadFileCommand).getResponseCode();
      if (ecode != 0) {
        throw new Exception("Load data command: " + loadFileCommand
            + " failed with exit code= " + ecode);
      }
     
      String cmd = "select key from " + tblName;
      drv.compile(cmd);

      //create scratch dir
      String emptyScratchDirStr;
      Path emptyScratchDir;
      Context ctx = new Context(newJob);
      emptyScratchDirStr = ctx.getMRTmpFileURI();
      emptyScratchDir = new Path(emptyScratchDirStr);
      FileSystem fileSys = emptyScratchDir.getFileSystem(newJob);
      fileSys.mkdirs(emptyScratchDir);
     
      QueryPlan plan = drv.getPlan();
      MapRedTask selectTask = (MapRedTask)plan.getRootTasks().get(0);

      List<Path> inputPaths = Utilities.getInputPaths(newJob, selectTask.getWork().getMapWork(), emptyScratchDir.toString(), ctx);
      Utilities.setInputPaths(newJob, inputPaths);

      Utilities.setMapRedWork(newJob, selectTask.getWork(), ctx.getMRTmpFileURI());
     
      CombineHiveInputFormat combineInputFormat = ReflectionUtils.newInstance(
          CombineHiveInputFormat.class, newJob);
     
      combineInputFormat.validateInput(newJob);
     
      InputSplit[] retSplits = combineInputFormat.getSplits(newJob, 1);
      assertEquals(1, retSplits.length);
    } catch (Exception e) {
      e.printStackTrace();
      fail("Caught exception " + e);
    } finally {
      if (tblCreated) {
        drv.run("drop table text_symlink_text").getResponseCode();
      }
    }
  }
View Full Code Here

  private LogHelper console;

  public CliDriver() {
    SessionState ss = SessionState.get();
    sp = new SetProcessor();
    qp = new Driver();
    dfs = new FsShell(ss != null ? ss.getConf() : new Configuration ());
    Log LOG = LogFactory.getLog("CliDriver");
    console = new LogHelper(LOG);
  }
View Full Code Here

    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    SessionState.start(new CliSessionState(hiveConf));
    msc = new HiveMetaStoreClient(hiveConf, null);
    driver = new Driver(hiveConf);
  }
View Full Code Here

          cmd.append(HiveUtils.escapeString(entry.getValue()));
          cmd.append("'");
        }
        cmd.append(")");
      }
      Driver driver = new Driver(conf);
      int rc = driver.compile(cmd.toString());
      if (rc != 0) {
        throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg());
      }
      inputs.addAll(driver.getPlan().getInputs());
    }
  }
View Full Code Here

      do {
        try {
          needRetry = false;
          if (proc != null) {
            if (proc instanceof Driver) {
              Driver qp = (Driver) proc;
              PrintStream out = ss.out;
              long start = System.currentTimeMillis();
              if (ss.getIsVerbose()) {
                out.println(cmd);
              }

              qp.setTryCount(tryCount);
              ret = qp.run(cmd).getResponseCode();
              if (ret != 0) {
                qp.close();
                return ret;
              }

              ArrayList<String> res = new ArrayList<String>();

              if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CLI_PRINT_HEADER)) {
                // Print the column names
                boolean first_col = true;
                Schema sc = qp.getSchema();
                for (FieldSchema fs : sc.getFieldSchemas()) {
                  if (!first_col) {
                    out.print('\t');
                  }
                  out.print(fs.getName());
                  first_col = false;
                }
                out.println();
              }

              try {
                while (qp.getResults(res)) {
                  for (String r : res) {
                    out.println(r);
                  }
                  res.clear();
                  if (out.checkError()) {
                    break;
                  }
                }
              } catch (IOException e) {
                console.printError("Failed with exception " + e.getClass().getName() + ":"
                    + e.getMessage(), "\n"
                    + org.apache.hadoop.util.StringUtils.stringifyException(e));
                ret = 1;
              }

              int cret = qp.close();
              if (ret == 0) {
                ret = cret;
              }

              long end = System.currentTimeMillis();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.Driver$QueryState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.