Package org.apache.hadoop.hive.ql.parse

Examples of org.apache.hadoop.hive.ql.parse.SemanticException


      }

      try {
        AuthUtils.authorize(tblDir, FsAction.WRITE, conf);
      } catch (HCatException e) {
        throw new SemanticException(e);
      }
    }
    catch (MetaException e) {
      throw new SemanticException(e);
    } catch (HiveException e) {
      throw new SemanticException(e);
    }
  }
View Full Code Here


      inDriver     = BaseSemanticAnalyzer.unescapeSQLString(((ASTNode) child.getChild(2)).getToken().getText());
      outDriver    = BaseSemanticAnalyzer.unescapeSQLString(((ASTNode) child.getChild(3)).getToken().getText());
      break;

    case HiveParser.TOK_TBLSEQUENCEFILE:
      throw new SemanticException("Operation not supported. HCatalog doesn't support Sequence File by default yet. " +
      "You may specify it through INPUT/OUTPUT storage drivers.");

    case HiveParser.TOK_TBLTEXTFILE:
      throw new SemanticException("Operation not supported. HCatalog doesn't support Text File by default yet. " +
      "You may specify it through INPUT/OUTPUT storage drivers.");

    case HiveParser.TOK_TBLRCFILE:
      inputFormat = RCFileInputFormat.class.getName();
      outputFormat = RCFileOutputFormat.class.getName();
      inDriver = RCFileInputDriver.class.getName();
      outDriver = RCFileOutputDriver.class.getName();
      break;
    }

    if(inputFormat == null || outputFormat == null || inDriver == null || outDriver == null){
      throw new SemanticException("File format specification in command Alter Table file format is incorrect.");
    }
    return ast;
  }
View Full Code Here

        partParams.putAll(hcatProps);
        part.getTPartition().setParameters(partParams);
        db.alterPartition(tableName, part);
      }
    } catch (HiveException he) {
      throw new SemanticException(he);
    } catch (InvalidOperationException e) {
      throw new SemanticException(e);
    }
  }
View Full Code Here

    Map<String, String> tblProps;
    tblName = ast.getChild(0).getText();
    try {
      tblProps = context.getHive().getTable(tblName).getParameters();
    } catch (HiveException he) {
      throw new SemanticException(he);
    }

    inDriver = tblProps.get(HCatConstants.HCAT_ISD_CLASS);
    outDriver = tblProps.get(HCatConstants.HCAT_OSD_CLASS);

    if(inDriver == null  || outDriver == null){
      throw new SemanticException("Operation not supported. Partitions can be added only in a table created through HCatalog. " +
          "It seems table "+tblName+" was not created through HCatalog.");
    }
    return ast;
  }
View Full Code Here

        // If the "strict" mode is on, we have to provide partition pruner for
        // each table.
        if ("strict".equalsIgnoreCase(HiveConf.getVar(conf,
            HiveConf.ConfVars.HIVEMAPREDMODE))) {
          if (!hasColumnExpr(prunerExpr)) {
            throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
                .getMsg("for Alias \"" + alias + "\" Table \""
                    + tab.getTableName() + "\""));
          }
        }
View Full Code Here

      throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, ace);
    } catch (org.apache.hadoop.fs.permission.AccessControlException ace){
      // Older hadoop version will throw this @deprecated Exception.
      throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL, ace);
    } catch (IOException ioe){
      throw new SemanticException(ioe);
    }

    final UserGroupInformation ugi;
    try {
      ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
    } catch (LoginException le) {
      throw new HCatException(ErrorType.ERROR_ACCESS_CONTROL,le);
    } catch (IOException ioe) {
      throw new SemanticException(ioe);
    }

    final FsPermission dirPerms = stat.getPermission();

    final String user = HiveConf.getBoolVar(conf, ConfVars.METASTORE_USE_THRIFT_SASL) ?
View Full Code Here

          indexBuilderTasks.add(indexBuilder);
        }
      }
      return indexBuilderTasks;
    } catch (Exception e) {
      throw new SemanticException(e);
    }
  }
View Full Code Here

        parentOp = childOp;
        assert parentOp.getChildOperators().size() == 1;
        childOp = parentOp.getChildOperators().get(0);
      }
      if (childOp == null) {
        throw new SemanticException(
            "Cannot find join op by tracing down the table scan operator tree");
      }
      // skip the big table pos
      int i = childOp.getParentOperators().indexOf(parentOp);
      if (i == bigTablePos) {
        bigTableAlias = alias;
        continue;
      }
      // set alias to work and put into smallTableAliasList
      newLocalWork.getAliasToWork().put(alias, op);
      smallTableAliasList.add(alias);
      // get input path and remove this alias from pathToAlias
      // because this file will be fetched by fetch operator
      LinkedHashMap<String, ArrayList<String>> pathToAliases = newWork.getPathToAliases();

      // keep record all the input path for this alias
      HashSet<String> pathSet = new HashSet<String>();
      HashSet<String> emptyPath = new HashSet<String>();
      for (Map.Entry<String, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
        String path = entry2.getKey();
        ArrayList<String> list = entry2.getValue();
        if (list.contains(alias)) {
          // add to path set
          if (!pathSet.contains(path)) {
            pathSet.add(path);
          }
          //remove this alias from the alias list
          list.remove(alias);
          if(list.size() == 0) {
            emptyPath.add(path);
          }
        }
      }
      //remove the path, with which no alias associates
      for (String path : emptyPath) {
        pathToAliases.remove(path);
      }

      // create fetch work
      FetchWork fetchWork = null;
      List<String> partDir = new ArrayList<String>();
      List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

      for (String tablePath : pathSet) {
        PartitionDesc partitionDesc = newWork.getPathToPartitionInfo().get(tablePath);
        // create fetchwork for non partitioned table
        if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
          fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc());
          break;
        }
        // if table is partitioned,add partDir and partitionDesc
        partDir.add(tablePath);
        partDesc.add(partitionDesc);
      }
      // create fetchwork for partitioned table
      if (fetchWork == null) {
        fetchWork = new FetchWork(partDir, partDesc);
      }
      // set alias to fetch work
      newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
    }
    // remove small table ailias from aliasToWork;Avoid concurrent modification
    for (String alias : smallTableAliasList) {
      newWork.getAliasToWork().remove(alias);
    }

    // set up local work
    newWork.setMapLocalWork(newLocalWork);
    // remove reducer
    newWork.setReducer(null);
    // return the big table alias
    if (bigTableAlias == null) {
      throw new SemanticException("Big Table Alias is null");
    }
    return bigTableAlias;
  }
View Full Code Here

      return bigTableAlias;

    } catch (Exception e) {
      e.printStackTrace();
      throw new SemanticException("Generate New MapJoin Opertor Exeception " + e.getMessage());
    }

  }
View Full Code Here

          ExprNodeColumnDesc tmpDesc = (ExprNodeColumnDesc) map.get(column);
          if (tmpDesc != null) {
            newColumn = tmpDesc.getColumn();
          }
          if (newColumn == null) {
            throw new SemanticException("No Column name found in parent reduce sink op");
          }
          columnDesc.setColumn(newColumn);
        }
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.parse.SemanticException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.