Package org.apache.hadoop.hive.ql.parse

Examples of org.apache.hadoop.hive.ql.parse.SemanticException


    case HiveParser.TOK_ALTERTABLE_PARTITION:
      if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
        return ast;
      } else if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_ALTERPARTS_MERGEFILES) {
        // unsupported
        throw new SemanticException("Operation not supported.");
      } else {
        return ast;
      }

      // HCat will allow these operations to be performed.
      // Database DDL
    case HiveParser.TOK_SHOWDATABASES:
    case HiveParser.TOK_DROPDATABASE:
    case HiveParser.TOK_SWITCHDATABASE:
    case HiveParser.TOK_DESCDATABASE:
    case HiveParser.TOK_ALTERDATABASE_PROPERTIES:

      // Index DDL
    case HiveParser.TOK_ALTERINDEX_PROPERTIES:
    case HiveParser.TOK_CREATEINDEX:
    case HiveParser.TOK_DROPINDEX:
    case HiveParser.TOK_SHOWINDEXES:

      // View DDL
      // "alter view add partition" does not work because of the nature of implementation
      // of the DDL in hive. Hive will internally invoke another Driver on the select statement,
      // and HCat does not let "select" statement through. I cannot find a way to get around it
      // without modifying hive code. So just leave it unsupported.
      //case HiveParser.TOK_ALTERVIEW_ADDPARTS:
    case HiveParser.TOK_ALTERVIEW_DROPPARTS:
    case HiveParser.TOK_ALTERVIEW_PROPERTIES:
    case HiveParser.TOK_ALTERVIEW_RENAME:
    case HiveParser.TOK_CREATEVIEW:
    case HiveParser.TOK_DROPVIEW:

      // Authorization DDL
    case HiveParser.TOK_CREATEROLE:
    case HiveParser.TOK_DROPROLE:
    case HiveParser.TOK_GRANT_ROLE:
    case HiveParser.TOK_GRANT_WITH_OPTION:
    case HiveParser.TOK_GRANT:
    case HiveParser.TOK_REVOKE_ROLE:
    case HiveParser.TOK_REVOKE:
    case HiveParser.TOK_SHOW_GRANT:
    case HiveParser.TOK_SHOW_ROLE_GRANT:

      // Misc DDL
    case HiveParser.TOK_LOCKTABLE:
    case HiveParser.TOK_UNLOCKTABLE:
    case HiveParser.TOK_SHOWLOCKS:
    case HiveParser.TOK_DESCFUNCTION:
    case HiveParser.TOK_SHOWFUNCTIONS:
    case HiveParser.TOK_EXPLAIN:

      // Table DDL
    case HiveParser.TOK_ALTERTABLE_ADDPARTS:
    case HiveParser.TOK_ALTERTABLE_ADDCOLS:
    case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
    case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
    case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
    case HiveParser.TOK_ALTERTABLE_DROPPARTS:
    case HiveParser.TOK_ALTERTABLE_PROPERTIES:
    case HiveParser.TOK_ALTERTABLE_RENAME:
    case HiveParser.TOK_ALTERTABLE_RENAMECOL:
    case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
    case HiveParser.TOK_ALTERTABLE_SERIALIZER:
    case HiveParser.TOK_ALTERTABLE_TOUCH:
    case HiveParser.TOK_DESCTABLE:
    case HiveParser.TOK_DROPTABLE:
    case HiveParser.TOK_SHOW_TABLESTATUS:
    case HiveParser.TOK_SHOWPARTITIONS:
    case HiveParser.TOK_SHOWTABLES:
      return ast;

    // In all other cases, throw an exception. Its a white-list of allowed operations.
    default:
      throw new SemanticException("Operation not supported.");

    }
  }
View Full Code Here


      }

      authorizeDDL(context, rootTasks);

    } catch (HCatException e) {
      throw new SemanticException(e);
    } catch (HiveException e) {
      throw new SemanticException(e);
    }

    if (hook != null) {
      hook.postAnalyze(context, rootTasks);
    }
View Full Code Here

    Hive db;
    try {
      db = context.getHive();
    } catch (HiveException e) {
      throw new SemanticException(
        "Couldn't get Hive DB instance in semantic analysis phase.",
        e);
    }

    // Analyze and create tbl properties object
    int numCh = ast.getChildCount();

    String inputFormat = null, outputFormat = null;
    tableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) ast
      .getChild(0));
    boolean likeTable = false;

    for (int num = 1; num < numCh; num++) {
      ASTNode child = (ASTNode) ast.getChild(num);

      switch (child.getToken().getType()) {

      case HiveParser.TOK_QUERY: // CTAS
        throw new SemanticException(
          "Operation not supported. Create table as " +
            "Select is not a valid operation.");

      case HiveParser.TOK_TABLEBUCKETS:
        break;

      case HiveParser.TOK_TBLSEQUENCEFILE:
        inputFormat = HCatConstants.SEQUENCEFILE_INPUT;
        outputFormat = HCatConstants.SEQUENCEFILE_OUTPUT;
        break;

      case HiveParser.TOK_TBLTEXTFILE:
        inputFormat = org.apache.hadoop.mapred.TextInputFormat.class.getName();
        outputFormat = org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat.class.getName();

        break;

      case HiveParser.TOK_LIKETABLE:
        likeTable = true;
        break;

      case HiveParser.TOK_IFNOTEXISTS:
        try {
          List<String> tables = db.getTablesByPattern(tableName);
          if (tables != null && tables.size() > 0) { // table
            // exists
            return ast;
          }
        } catch (HiveException e) {
          throw new SemanticException(e);
        }
        break;

      case HiveParser.TOK_TABLEPARTCOLS:
        List<FieldSchema> partCols = BaseSemanticAnalyzer
          .getColumns((ASTNode) child.getChild(0), false);
        for (FieldSchema fs : partCols) {
          if (!fs.getType().equalsIgnoreCase("string")) {
            throw new SemanticException(
              "Operation not supported. HCatalog only " +
                "supports partition columns of type string. "
                + "For column: "
                + fs.getName()
                + " Found type: " + fs.getType());
          }
        }
        break;

      case HiveParser.TOK_STORAGEHANDLER:
        String storageHandler = BaseSemanticAnalyzer
          .unescapeSQLString(child.getChild(0).getText());
        if (org.apache.commons.lang.StringUtils
          .isNotEmpty(storageHandler)) {
          return ast;
        }

        break;

      case HiveParser.TOK_TABLEFILEFORMAT:
        if (child.getChildCount() < 2) {
          throw new SemanticException(
            "Incomplete specification of File Format. " +
              "You must provide InputFormat, OutputFormat.");
        }
        inputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
          .getChild(0).getText());
        outputFormat = BaseSemanticAnalyzer.unescapeSQLString(child
          .getChild(1).getText());
        break;

      case HiveParser.TOK_TBLRCFILE:
        inputFormat = RCFileInputFormat.class.getName();
        outputFormat = RCFileOutputFormat.class.getName();
        break;

      }
    }

    if (!likeTable && (inputFormat == null || outputFormat == null)) {
      throw new SemanticException(
        "STORED AS specification is either incomplete or incorrect.");
    }


    return ast;
View Full Code Here

            desc.getInputFormat(),
            desc.getOutputFormat());
        //Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if 
        //StorageDelegationAuthorizationProvider is used.
      } catch (IOException e) {
        throw new SemanticException(e);
      }
    }

    if (desc != null) {
      try {
        Table table = context.getHive().newTable(desc.getTableName());
        if (desc.getLocation() != null) {
          table.setDataLocation(new Path(desc.getLocation()).toUri());
        }
        if (desc.getStorageHandler() != null) {
          table.setProperty(
            org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
            desc.getStorageHandler());
        }
        for (Map.Entry<String, String> prop : tblProps.entrySet()) {
          table.setProperty(prop.getKey(), prop.getValue());
        }
        for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
          table.setSerdeParam(prop.getKey(), prop.getValue());
        }
        //TODO: set other Table properties as needed

        //authorize against the table operation so that location permissions can be checked if any

        if (HiveConf.getBoolVar(context.getConf(),
          HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
          authorize(table, Privilege.CREATE);
        }
      } catch (HiveException ex) {
        throw new SemanticException(ex);
      }
    }

    desc.setTblProps(tblProps);
    context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName);
View Full Code Here

        parentOp = childOp;
        assert parentOp.getChildOperators().size() == 1;
        childOp = parentOp.getChildOperators().get(0);
      }
      if (childOp == null) {
        throw new SemanticException(
            "Cannot find join op by tracing down the table scan operator tree");
      }
      // skip the big table pos
      int i = childOp.getParentOperators().indexOf(parentOp);
      if (i == bigTablePos) {
        bigTableAlias = alias;
        continue;
      }
      // set alias to work and put into smallTableAliasList
      newLocalWork.getAliasToWork().put(alias, op);
      smallTableAliasList.add(alias);
      // get input path and remove this alias from pathToAlias
      // because this file will be fetched by fetch operator
      LinkedHashMap<String, ArrayList<String>> pathToAliases = newWork.getMapWork().getPathToAliases();

      // keep record all the input path for this alias
      HashSet<String> pathSet = new HashSet<String>();
      HashSet<String> emptyPath = new HashSet<String>();
      for (Map.Entry<String, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
        String path = entry2.getKey();
        ArrayList<String> list = entry2.getValue();
        if (list.contains(alias)) {
          // add to path set
          if (!pathSet.contains(path)) {
            pathSet.add(path);
          }
          //remove this alias from the alias list
          list.remove(alias);
          if(list.size() == 0) {
            emptyPath.add(path);
          }
        }
      }
      //remove the path, with which no alias associates
      for (String path : emptyPath) {
        pathToAliases.remove(path);
      }

      // create fetch work
      FetchWork fetchWork = null;
      List<String> partDir = new ArrayList<String>();
      List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

      for (String tablePath : pathSet) {
        PartitionDesc partitionDesc = newWork.getMapWork().getPathToPartitionInfo().get(tablePath);
        // create fetchwork for non partitioned table
        if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
          fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc());
          break;
        }
        // if table is partitioned,add partDir and partitionDesc
        partDir.add(tablePath);
        partDesc.add(partitionDesc);
      }
      // create fetchwork for partitioned table
      if (fetchWork == null) {
        TableDesc table = newWork.getMapWork().getAliasToPartnInfo().get(alias).getTableDesc();
        fetchWork = new FetchWork(partDir, partDesc, table);
      }
      // set alias to fetch work
      newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
    }
    // remove small table ailias from aliasToWork;Avoid concurrent modification
    for (String alias : smallTableAliasList) {
      newWork.getMapWork().getAliasToWork().remove(alias);
    }

    // set up local work
    newWork.getMapWork().setMapLocalWork(newLocalWork);
    // remove reducer
    newWork.setReduceWork(null);
    // return the big table alias
    if (bigTableAlias == null) {
      throw new SemanticException("Big Table Alias is null");
    }
    return bigTableAlias;
  }
View Full Code Here

      return bigTableAlias;

    } catch (Exception e) {
      e.printStackTrace();
      throw new SemanticException("Failed to generate new mapJoin operator " +
          "by exception : " + e.getMessage());
    }
  }
View Full Code Here

  }

  private static void checkParentOperatorType(Operator<? extends OperatorDesc> op)
      throws SemanticException {
    if (!op.opAllowedBeforeMapJoin()) {
      throw new SemanticException(ErrorMsg.OPERATOR_NOT_ALLOWED_WITH_MAPJOIN.getMsg());
    }
    if (op.getParentOperators() != null) {
      for (Operator<? extends OperatorDesc> parentOp : op.getParentOperators()) {
        checkParentOperatorType(parentOp);
      }
View Full Code Here

  }

  private static void checkChildOperatorType(Operator<? extends OperatorDesc> op)
      throws SemanticException {
    if (!op.opAllowedAfterMapJoin()) {
      throw new SemanticException(ErrorMsg.OPERATOR_NOT_ALLOWED_WITH_MAPJOIN.getMsg());
    }
    if (op.getChildOperators() != null) {
      for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
        checkChildOperatorType(childOp);
      }
View Full Code Here

    JoinCondDesc[] condns = desc.getConds();
    Byte[] tagOrder = desc.getTagOrder();

    if (!noCheckOuterJoin) {
      if (checkMapJoin(mapJoinPos, condns) < 0) {
        throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
      }
    }

    RowResolver outputRS = opParseCtxMap.get(op).getRowResolver();
    Map<Byte, List<ExprNodeDesc>> keyExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
View Full Code Here

      // All tables are to be cached - this is not possible. In future, we can
      // support this by randomly
      // leaving some table from the list of tables to be cached
      if (mapJoinPos == -1) {
        throw new SemanticException(ErrorMsg.INVALID_MAPJOIN_HINT.getMsg(
            Arrays.toString(joinTree.getBaseSrc())));
      }
    }

    return mapJoinPos;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.parse.SemanticException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.