Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Hive


   */
  public static Set<Partition> checkPartitionsCoveredByIndex(TableScanOperator tableScan,
      ParseContext pctx,
      Map<Table, List<Index>> indexes)
    throws HiveException {
    Hive hive = Hive.get(pctx.getConf());
    Set<Partition> queryPartitions = null;
    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    if(queryPartitionList.getConfirmedPartns() != null
        && !queryPartitionList.getConfirmedPartns().isEmpty()){
View Full Code Here


   
    @Override
    public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,
            ASTNode ast) throws SemanticException {

        Hive db;
        try {
            db = context.getHive();
        } catch (HiveException e) {
            throw new SemanticException(
                    "Couldn't get Hive DB instance in semantic analysis phase.",
                    e);
        }

        // Analyze and create tbl properties object
        int numCh = ast.getChildCount();

        String inputFormat = null, outputFormat = null;
        tableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) ast
                .getChild(0));
        boolean likeTable = false;

        for (int num = 1; num < numCh; num++) {
            ASTNode child = (ASTNode) ast.getChild(num);

            switch (child.getToken().getType()) {

                case HiveParser.TOK_QUERY: // CTAS
                    throw new SemanticException(
                            "Operation not supported. Create table as " +
                            "Select is not a valid operation.");

                case HiveParser.TOK_TABLEBUCKETS:
                    break;

                case HiveParser.TOK_TBLSEQUENCEFILE:
                    inputFormat = HCatConstants.SEQUENCEFILE_INPUT;
                    outputFormat = HCatConstants.SEQUENCEFILE_OUTPUT;
                    break;

                case HiveParser.TOK_TBLTEXTFILE:
                    inputFormat      = org.apache.hadoop.mapred.TextInputFormat.class.getName();
                    outputFormat     = org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat.class.getName();

                    break;

                case HiveParser.TOK_LIKETABLE:
                    likeTable = true;
                    break;

                case HiveParser.TOK_IFNOTEXISTS:
                    try {
                        List<String> tables = db.getTablesByPattern(tableName);
                        if (tables != null && tables.size() > 0) { // table
                                                                   // exists
                            return ast;
                        }
                    } catch (HiveException e) {
View Full Code Here

    if (!HiveConf.getBoolVar(context.getConf(),
        HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
      return;
    }
   
    Hive hive;
    try {
      hive = context.getHive();
   
      for (Task<? extends Serializable> task : rootTasks) {
        if (task.getWork() instanceof DDLWork) {
View Full Code Here

  @Override
  public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
  throws SemanticException {

    Hive db;
    try {
      db = context.getHive();
    } catch (HiveException e) {
      throw new SemanticException("Couldn't get Hive DB instance in semantic analysis phase.", e);
    }

    // Analyze and create tbl properties object
    int numCh = ast.getChildCount();

    databaseName = BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0));

    for (int num = 1; num < numCh; num++) {
      ASTNode child = (ASTNode) ast.getChild(num);

      switch (child.getToken().getType()) {

      case HiveParser.TOK_IFNOTEXISTS:
        try {
          List<String> dbs = db.getDatabasesByPattern(databaseName);
          if (dbs != null && dbs.size() > 0) { // db exists
            return ast;
          }
        } catch (HiveException e) {
          throw new SemanticException(e);
View Full Code Here

      throws HiveException, AuthorizationException {
    HashSet<ReadEntity> inputs = sem.getInputs();
    HashSet<WriteEntity> outputs = sem.getOutputs();
    SessionState ss = SessionState.get();
    HiveOperation op = ss.getHiveOperation();
    Hive db = sem.getDb();
    if (op != null) {
      if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
          || op.equals(HiveOperation.CREATETABLE)) {
        ss.getAuthorizer().authorize(
            db.getDatabase(db.getCurrentDatabase()), null,
            HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
      } else {
        if (op.equals(HiveOperation.IMPORT)) {
          ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
          if (!isa.existsTable()) {
            ss.getAuthorizer().authorize(
                db.getDatabase(db.getCurrentDatabase()), null,
                HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
          }
        }
      }
      if (outputs != null && outputs.size() > 0) {
        for (WriteEntity write : outputs) {

          if (write.getType() == WriteEntity.Type.PARTITION) {
            Partition part = db.getPartition(write.getTable(), write
                .getPartition().getSpec(), false);
            if (part != null) {
              ss.getAuthorizer().authorize(write.getPartition(), null,
                      op.getOutputRequiredPrivileges());
              continue;
View Full Code Here

                               "IMPLICIT",
                               plan.getQueryStr());

      // Lock the database also
      try {
        Hive db = Hive.get(conf);
        lockObjects.add(new HiveLockObj(
                                        new HiveLockObject(db.getCurrentDatabase(), lockData),
                                        HiveLockMode.SHARED));
      } catch (HiveException e) {
        throw new SemanticException(e.getMessage());
      }
View Full Code Here

  @Override
  protected int execute(DriverContext driverContext) {

    try {
      Hive db = Hive.get(conf);
      IndexMetadataChangeWork work = this.getWork();
      String tblName = work.getIndexTbl();
      Table tbl = db.getTable(work.getDbName(), tblName);
      if (tbl == null ) {
        console.printError("Index table can not be null.");
        return 1;
      }

      if (!tbl.getTableType().equals(TableType.INDEX_TABLE)) {
        console.printError("Table " + tbl.getTableName() + " not specified.");
        return 1;
      }

      if (tbl.isPartitioned() && work.getPartSpec() == null) {
        console.printError("Index table is partitioned, but no partition specified.");
        return 1;
      }

      if (work.getPartSpec() != null) {
        Partition part = db.getPartition(tbl, work.getPartSpec(), false);
        if (part == null) {
          console.printError("Partition " +
              Warehouse.makePartName(work.getPartSpec(), false).toString()
              + " does not exist.");
          return 1;
        }

        Path url = new Path(part.getPartitionPath().toString());
        FileSystem fs = url.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(url);

        part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterPartition(tbl.getTableName(), part);
      } else {
        Path url = new Path(tbl.getPath().toString());
        FileSystem fs = url.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(url);
        tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterTable(tbl.getTableName(), tbl);
      }
    } catch (Exception e) {
      e.printStackTrace();
      console.printError("Error changing index table/partition metadata "
          + e.getMessage());
View Full Code Here

  @Override
  public int execute(DriverContext driverContext) {

    // Create the db
    Hive db;
    try {
      db = Hive.get(conf);

      CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
      if (null != createDatabaseDesc) {
View Full Code Here

      HiveTableDef def) throws WindowingException
  {
    def = def == null ? new HiveTableDef() : def;
    HiveMetaStoreClient hiveMSC = qDef.getTranslationInfo()
        .getHiveMSClient();
    Hive hive = qDef.getTranslationInfo().getHive();

    def.setSpec(spec);

    if (spec.getDbName() == null)
    {
      spec.setDbName(hive.getCurrentDatabase());
    }

    try
    {
      Table t = hiveMSC.getTable(spec.getDbName(), spec.getTableName());
View Full Code Here

 
  private static HiveQueryDef translate(QueryDef qDef, HiveQuerySpec spec) throws WindowingException
  {
    HiveQueryDef def = new HiveQueryDef();
    HiveQueryExecutor hiveQryExec = qDef.getTranslationInfo().getHiveQueryExecutor();
    Hive hive = qDef.getTranslationInfo().getHive();
   
    String tableName = hiveQryExec.createTableAsQuery(spec.getHiveQuery());
    HiveTableSpec tSpec = new HiveTableSpec();
    tSpec.setDbName(hive.getCurrentDatabase());
    tSpec.setTableName(tableName);
    tSpec.setPartition(spec.getPartition());
    tSpec.setOrder(spec.getOrder());
    def = (HiveQueryDef) InputTranslation.translate(qDef, tSpec, (HiveTableDef) def);
    return def;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Hive

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.