Examples of exprNodeColumnDesc


Examples of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc

    ArrayList<ColumnInfo> colInfoList = inputRR.getColumnInfos();
    RowResolver rsNewRR = new RowResolver();
    int pos = 0;
    for (ColumnInfo colInfo : colInfoList) {
        ExprNodeDesc valueColExpr = new ExprNodeColumnDesc(colInfo.getType(), colInfo
            .getInternalName(), colInfo.getTabAlias(), colInfo
            .getIsVirtualCol());
        valueCols.add(valueColExpr);
        colExprMap.put(colInfo.getInternalName(), valueColExpr);
        String outColName = SemanticAnalyzer.getColumnInternalName(pos++);
        outputColumnNames.add(outColName);

        String[] alias = inputRR.reverseLookup(colInfo.getInternalName());
        ColumnInfo newColInfo = new ColumnInfo(
            outColName, colInfo.getType(), alias[0],
            colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());
        rsNewRR.put(alias[0], alias[1], newColInfo);

    }

    input = putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils
        .getReduceSinkDesc(orderCols,
            valueCols, outputColumnNames, false,
            -1, partCols, orderString.toString(), -1),
        new RowSchema(rsNewRR.getColumnInfos()), input), rsNewRR);
    input.setColumnExprMap(colExprMap);


// Construct the RR for extract operator
    RowResolver extractRR = new RowResolver();
    LinkedHashMap<String[], ColumnInfo> colsAddedByHaving =
        new LinkedHashMap<String[], ColumnInfo>();
    pos = 0;

    for (ColumnInfo colInfo : colInfoList) {
      String[] alias = inputRR.reverseLookup(colInfo.getInternalName());
      /*
       * if we have already encountered this colInfo internalName.
       * We encounter it again because it must be put for the Having clause.
       * We will add these entries in the end; in a loop on colsAddedByHaving. See below.
       */
      if ( colsAddedByHaving.containsKey(alias)) {
        continue;
      }
      ASTNode astNode = PTFTranslator.getASTNode(colInfo, inputRR);
      ColumnInfo eColInfo = new ColumnInfo(
          SemanticAnalyzer.getColumnInternalName(pos++), colInfo.getType(), alias[0],
          colInfo.getIsVirtualCol(), colInfo.isHiddenVirtualCol());

      if ( astNode == null ) {
        extractRR.put(alias[0], alias[1], eColInfo);
      }
      else {
        /*
         * in case having clause refers to this column may have been added twice;
         * once with the ASTNode.toStringTree as the alias
         * and then with the real alias.
         */
        extractRR.putExpression(astNode, eColInfo);
        if ( !astNode.toStringTree().toLowerCase().equals(alias[1]) ) {
          colsAddedByHaving.put(alias, eColInfo);
        }
      }
    }

    for(Map.Entry<String[], ColumnInfo> columnAddedByHaving : colsAddedByHaving.entrySet() ) {
      String[] alias = columnAddedByHaving.getKey();
      ColumnInfo eColInfo = columnAddedByHaving.getValue();
      extractRR.put(alias[0], alias[1], eColInfo);
    }

    /*
     * b. Construct Extract Operator.
     */
    input = putOpInsertMap(OperatorFactory.getAndMakeChild(
        new ExtractDesc(
            new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
                Utilities.ReduceField.VALUE
                .toString(), "", false)),
        new RowSchema(inputRR.getColumnInfos()),
        input), extractRR);

View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc

        // Not matching the regex?
        if (!regex.matcher(tmp[1]).matches()) {
          continue;
        }

        ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(),
            name, colInfo.getTabAlias(), colInfo.getIsVirtualCol(), colInfo.isSkewedCol());
        if (subQuery) {
          output.checkColumn(tmp[0], tmp[1]);
        }
        col_list.add(expr);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc

      if (exprInfo == null) {
        throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(grpbyExpr));
      }

      groupByKeys.add(new ExprNodeColumnDesc(exprInfo.getType(), exprInfo
          .getInternalName(), "", false));
      String field = getColumnInternalName(i);
      outputColumnNames.add(field);
      groupByOutputRowResolver.putExpression(grpbyExpr,
          new ColumnInfo(field, exprInfo.getType(), null, false));
      colExprMap.put(field, groupByKeys.get(groupByKeys.size() - 1));
    }
    // For each aggregation
    HashMap<String, ASTNode> aggregationTrees = parseInfo
        .getAggregationExprsForClause(dest);
    assert (aggregationTrees != null);
    // get the last colName for the reduce KEY
    // it represents the column name corresponding to distinct aggr, if any
    String lastKeyColName = null;
    List<String> inputKeyCols = ((ReduceSinkDesc) rs.getConf()).getOutputKeyColumnNames();
    if (inputKeyCols.size() > 0) {
      lastKeyColName = inputKeyCols.get(inputKeyCols.size() - 1);
    }
    List<ExprNodeDesc> reduceValues = ((ReduceSinkDesc) rs.getConf()).getValueCols();
    int numDistinctUDFs = 0;
    for (Map.Entry<String, ASTNode> entry : aggregationTrees.entrySet()) {
      ASTNode value = entry.getValue();

      // This is the GenericUDAF name
      String aggName = unescapeIdentifier(value.getChild(0).getText());
      boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI;
      boolean isAllColumns = value.getType() == HiveParser.TOK_FUNCTIONSTAR;

      // Convert children to aggParameters
      ArrayList<ExprNodeDesc> aggParameters = new ArrayList<ExprNodeDesc>();
      // 0 is the function name
      for (int i = 1; i < value.getChildCount(); i++) {
        ASTNode paraExpr = (ASTNode) value.getChild(i);
        ColumnInfo paraExprInfo =
            groupByInputRowResolver.getExpression(paraExpr);
        if (paraExprInfo == null) {
          throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(paraExpr));
        }

        String paraExpression = paraExprInfo.getInternalName();
        assert (paraExpression != null);
        if (isDistinct && lastKeyColName != null) {
          // if aggr is distinct, the parameter is name is constructed as
          // KEY.lastKeyColName:<tag>._colx
          paraExpression = Utilities.ReduceField.KEY.name() + "." +
              lastKeyColName + ":" + numDistinctUDFs + "." +
              getColumnInternalName(i - 1);

        }

        ExprNodeDesc expr = new ExprNodeColumnDesc(paraExprInfo.getType(),
            paraExpression, paraExprInfo.getTabAlias(),
            paraExprInfo.getIsVirtualCol());
        ExprNodeDesc reduceValue = isConstantParameterInAggregationParameters(
            paraExprInfo.getInternalName(), reduceValues);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

    exprNodeDesc desc = null;

    //  If the current subExpression is pre-calculated, as in Group-By etc.
    ColumnInfo colInfo = input.get("", expr.toStringTree());
    if (colInfo != null) {
      desc = new exprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName());
      return desc;
    }   
    return desc;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

      } else if (colInfo == null) {
        ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(tabAlias == null? expr.getChild(0) : expr.getChild(1)));
        return null;
      }

      return new exprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName());
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

          tabAlias = SemanticAnalyzer.getTabAliasForCol(this.metaData, colName, (ASTNode)expr.getChild(0));
        }

        // Set value to null if it's not partition column
        if (tabAlias.equalsIgnoreCase(tableAlias) && tab.isPartitionKey(colName)) {
          desc = new exprNodeColumnDesc(String.class, colName);
        } else {
          try {
            // might be a column from another table
            Table t = this.metaData.getTableForAlias(tabAlias);
            if (t.isPartitionKey(colName)) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

      String   internalName = colNames.get(pos);
      String[] colName      = inputRR.reverseLookup(internalName);
      ColumnInfo in = inputRR.get(colName[0], colName[1]);
      outputRR.put(colName[0], colName[1],
                   new ColumnInfo((Integer.valueOf(pos)).toString(), in.getType()));
      col_list.add(new exprNodeColumnDesc(in.getType(), internalName));
    }

    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
      new selectDesc(col_list), new RowSchema(outputRR.getColumnInfos()), input), outputRR);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

  }

  public void testExprNodeColumnEvaluator() throws Throwable {
    try {
      // get a evaluator for a simple field expression
      exprNodeDesc exprDesc = new exprNodeColumnDesc(colaType, "cola");
      ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(exprDesc);

      // evaluate on row
      InspectableObject result = new InspectableObject();
      eval.evaluate(r.o, r.oi, result);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

  }

  public void testExprNodeFuncEvaluator() throws Throwable {
    try {
      // get a evaluator for a string concatenation expression
      exprNodeDesc col1desc = new exprNodeColumnDesc(col1Type, "col1");
      exprNodeDesc coladesc = new exprNodeColumnDesc(colaType, "cola");
      exprNodeDesc col11desc = new exprNodeIndexDesc(col1desc, new exprNodeConstantDesc(new Integer(1)));
      exprNodeDesc cola0desc = new exprNodeIndexDesc(coladesc, new exprNodeConstantDesc(new Integer(0)));
      exprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc("concat", col11desc, cola0desc);
      ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(func1);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc

  }

  public void testExprNodeConversionEvaluator() throws Throwable {
    try {
      // get a evaluator for a string concatenation expression
      exprNodeDesc col1desc = new exprNodeColumnDesc(col1Type, "col1");
      exprNodeDesc col11desc = new exprNodeIndexDesc(col1desc, new exprNodeConstantDesc(new Integer(1)));
      exprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc(Double.class.getName(), col11desc);
      ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(func1);

      // evaluate on row
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.