/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.optimizer.physical;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.*;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.GraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.PreOrderWalker;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.lib.TaskGraphWalker;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc;
import org.apache.hadoop.hive.ql.plan.AggregationDesc;
import org.apache.hadoop.hive.ql.plan.BaseWork;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.MapWork;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.SMBJoinDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.ql.plan.TezWork;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.ql.udf.UDFAcos;
import org.apache.hadoop.hive.ql.udf.UDFAsin;
import org.apache.hadoop.hive.ql.udf.UDFAtan;
import org.apache.hadoop.hive.ql.udf.UDFBin;
import org.apache.hadoop.hive.ql.udf.UDFConv;
import org.apache.hadoop.hive.ql.udf.UDFCos;
import org.apache.hadoop.hive.ql.udf.UDFDayOfMonth;
import org.apache.hadoop.hive.ql.udf.UDFDegrees;
import org.apache.hadoop.hive.ql.udf.UDFExp;
import org.apache.hadoop.hive.ql.udf.UDFHex;
import org.apache.hadoop.hive.ql.udf.UDFHour;
import org.apache.hadoop.hive.ql.udf.UDFLength;
import org.apache.hadoop.hive.ql.udf.UDFLike;
import org.apache.hadoop.hive.ql.udf.UDFLn;
import org.apache.hadoop.hive.ql.udf.UDFLog;
import org.apache.hadoop.hive.ql.udf.UDFLog10;
import org.apache.hadoop.hive.ql.udf.UDFLog2;
import org.apache.hadoop.hive.ql.udf.UDFMinute;
import org.apache.hadoop.hive.ql.udf.UDFMonth;
import org.apache.hadoop.hive.ql.udf.UDFRadians;
import org.apache.hadoop.hive.ql.udf.UDFRand;
import org.apache.hadoop.hive.ql.udf.UDFRegExp;
import org.apache.hadoop.hive.ql.udf.UDFSecond;
import org.apache.hadoop.hive.ql.udf.UDFSign;
import org.apache.hadoop.hive.ql.udf.UDFSin;
import org.apache.hadoop.hive.ql.udf.UDFSqrt;
import org.apache.hadoop.hive.ql.udf.UDFSubstr;
import org.apache.hadoop.hive.ql.udf.UDFTan;
import org.apache.hadoop.hive.ql.udf.UDFToBoolean;
import org.apache.hadoop.hive.ql.udf.UDFToByte;
import org.apache.hadoop.hive.ql.udf.UDFToDouble;
import org.apache.hadoop.hive.ql.udf.UDFToFloat;
import org.apache.hadoop.hive.ql.udf.UDFToInteger;
import org.apache.hadoop.hive.ql.udf.UDFToLong;
import org.apache.hadoop.hive.ql.udf.UDFToShort;
import org.apache.hadoop.hive.ql.udf.UDFToString;
import org.apache.hadoop.hive.ql.udf.UDFWeekOfYear;
import org.apache.hadoop.hive.ql.udf.UDFYear;
import org.apache.hadoop.hive.ql.udf.generic.*;
public class Vectorizer implements PhysicalPlanResolver {
protected static transient final Log LOG = LogFactory.getLog(Vectorizer.class);
Pattern supportedDataTypesPattern;
List<Task<? extends Serializable>> vectorizableTasks =
new ArrayList<Task<? extends Serializable>>();
Set<Class<?>> supportedGenericUDFs = new HashSet<Class<?>>();
Set<String> supportedAggregationUdfs = new HashSet<String>();
private PhysicalContext physicalContext = null;;
public Vectorizer() {
StringBuilder patternBuilder = new StringBuilder();
patternBuilder.append("int");
patternBuilder.append("|smallint");
patternBuilder.append("|tinyint");
patternBuilder.append("|bigint");
patternBuilder.append("|integer");
patternBuilder.append("|long");
patternBuilder.append("|short");
patternBuilder.append("|timestamp");
patternBuilder.append("|boolean");
patternBuilder.append("|string");
patternBuilder.append("|byte");
patternBuilder.append("|float");
patternBuilder.append("|double");
patternBuilder.append("|date");
// Decimal types can be specified with different precision and scales e.g. decimal(10,5),
// as opposed to other data types which can be represented by constant strings.
// The regex matches only the "decimal" prefix of the type.
patternBuilder.append("|decimal.*");
supportedDataTypesPattern = Pattern.compile(patternBuilder.toString());
supportedGenericUDFs.add(GenericUDFOPPlus.class);
supportedGenericUDFs.add(GenericUDFOPMinus.class);
supportedGenericUDFs.add(GenericUDFOPMultiply.class);
supportedGenericUDFs.add(GenericUDFOPDivide.class);
supportedGenericUDFs.add(GenericUDFOPMod.class);
supportedGenericUDFs.add(GenericUDFOPNegative.class);
supportedGenericUDFs.add(GenericUDFOPPositive.class);
supportedGenericUDFs.add(GenericUDFOPEqualOrLessThan.class);
supportedGenericUDFs.add(GenericUDFOPEqualOrGreaterThan.class);
supportedGenericUDFs.add(GenericUDFOPGreaterThan.class);
supportedGenericUDFs.add(GenericUDFOPLessThan.class);
supportedGenericUDFs.add(GenericUDFOPNot.class);
supportedGenericUDFs.add(GenericUDFOPNotEqual.class);
supportedGenericUDFs.add(GenericUDFOPNotNull.class);
supportedGenericUDFs.add(GenericUDFOPNull.class);
supportedGenericUDFs.add(GenericUDFOPOr.class);
supportedGenericUDFs.add(GenericUDFOPAnd.class);
supportedGenericUDFs.add(GenericUDFOPEqual.class);
supportedGenericUDFs.add(UDFLength.class);
supportedGenericUDFs.add(UDFYear.class);
supportedGenericUDFs.add(UDFMonth.class);
supportedGenericUDFs.add(UDFDayOfMonth.class);
supportedGenericUDFs.add(UDFHour.class);
supportedGenericUDFs.add(UDFMinute.class);
supportedGenericUDFs.add(UDFSecond.class);
supportedGenericUDFs.add(UDFWeekOfYear.class);
supportedGenericUDFs.add(GenericUDFToUnixTimeStamp.class);
supportedGenericUDFs.add(GenericUDFDateAdd.class);
supportedGenericUDFs.add(GenericUDFDateSub.class);
supportedGenericUDFs.add(GenericUDFDate.class);
supportedGenericUDFs.add(GenericUDFDateDiff.class);
supportedGenericUDFs.add(UDFLike.class);
supportedGenericUDFs.add(UDFRegExp.class);
supportedGenericUDFs.add(UDFSubstr.class);
supportedGenericUDFs.add(GenericUDFLTrim.class);
supportedGenericUDFs.add(GenericUDFRTrim.class);
supportedGenericUDFs.add(GenericUDFTrim.class);
supportedGenericUDFs.add(UDFSin.class);
supportedGenericUDFs.add(UDFCos.class);
supportedGenericUDFs.add(UDFTan.class);
supportedGenericUDFs.add(UDFAsin.class);
supportedGenericUDFs.add(UDFAcos.class);
supportedGenericUDFs.add(UDFAtan.class);
supportedGenericUDFs.add(UDFDegrees.class);
supportedGenericUDFs.add(UDFRadians.class);
supportedGenericUDFs.add(GenericUDFFloor.class);
supportedGenericUDFs.add(GenericUDFCeil.class);
supportedGenericUDFs.add(UDFExp.class);
supportedGenericUDFs.add(UDFLn.class);
supportedGenericUDFs.add(UDFLog2.class);
supportedGenericUDFs.add(UDFLog10.class);
supportedGenericUDFs.add(UDFLog.class);
supportedGenericUDFs.add(GenericUDFPower.class);
supportedGenericUDFs.add(GenericUDFRound.class);
supportedGenericUDFs.add(GenericUDFPosMod.class);
supportedGenericUDFs.add(UDFSqrt.class);
supportedGenericUDFs.add(UDFSign.class);
supportedGenericUDFs.add(UDFRand.class);
supportedGenericUDFs.add(UDFBin.class);
supportedGenericUDFs.add(UDFHex.class);
supportedGenericUDFs.add(UDFConv.class);
supportedGenericUDFs.add(GenericUDFLower.class);
supportedGenericUDFs.add(GenericUDFUpper.class);
supportedGenericUDFs.add(GenericUDFConcat.class);
supportedGenericUDFs.add(GenericUDFAbs.class);
supportedGenericUDFs.add(GenericUDFBetween.class);
supportedGenericUDFs.add(GenericUDFIn.class);
supportedGenericUDFs.add(GenericUDFCase.class);
supportedGenericUDFs.add(GenericUDFWhen.class);
supportedGenericUDFs.add(GenericUDFCoalesce.class);
// For type casts
supportedGenericUDFs.add(UDFToLong.class);
supportedGenericUDFs.add(UDFToInteger.class);
supportedGenericUDFs.add(UDFToShort.class);
supportedGenericUDFs.add(UDFToByte.class);
supportedGenericUDFs.add(UDFToBoolean.class);
supportedGenericUDFs.add(UDFToFloat.class);
supportedGenericUDFs.add(UDFToDouble.class);
supportedGenericUDFs.add(UDFToString.class);
supportedGenericUDFs.add(GenericUDFTimestamp.class);
supportedGenericUDFs.add(GenericUDFToDecimal.class);
supportedGenericUDFs.add(GenericUDFToDate.class);
// For conditional expressions
supportedGenericUDFs.add(GenericUDFIf.class);
supportedAggregationUdfs.add("min");
supportedAggregationUdfs.add("max");
supportedAggregationUdfs.add("count");
supportedAggregationUdfs.add("sum");
supportedAggregationUdfs.add("avg");
supportedAggregationUdfs.add("variance");
supportedAggregationUdfs.add("var_pop");
supportedAggregationUdfs.add("var_samp");
supportedAggregationUdfs.add("std");
supportedAggregationUdfs.add("stddev");
supportedAggregationUdfs.add("stddev_pop");
supportedAggregationUdfs.add("stddev_samp");
}
class VectorizationDispatcher implements Dispatcher {
public VectorizationDispatcher(PhysicalContext pctx) {
}
@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs)
throws SemanticException {
Task<? extends Serializable> currTask = (Task<? extends Serializable>) nd;
if (currTask instanceof MapRedTask) {
convertMapWork(((MapRedTask) currTask).getWork().getMapWork());
} else if (currTask instanceof TezTask) {
TezWork work = ((TezTask) currTask).getWork();
for (BaseWork w: work.getAllWork()) {
if (w instanceof MapWork) {
convertMapWork((MapWork)w);
}
}
}
return null;
}
private void convertMapWork(MapWork mapWork) throws SemanticException {
boolean ret = validateMapWork(mapWork);
if (ret) {
vectorizeMapWork(mapWork);
}
}
private boolean validateMapWork(MapWork mapWork) throws SemanticException {
// Validate the input format
for (String path : mapWork.getPathToPartitionInfo().keySet()) {
PartitionDesc pd = mapWork.getPathToPartitionInfo().get(path);
List<Class<?>> interfaceList =
Arrays.asList(pd.getInputFileFormatClass().getInterfaces());
if (!interfaceList.contains(VectorizedInputFormatInterface.class)) {
LOG.info("Input format: " + pd.getInputFileFormatClassName()
+ ", doesn't provide vectorized input");
return false;
}
}
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
ValidationNodeProcessor vnp = new ValidationNodeProcessor();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + ".*"
+ FileSinkOperator.getOperatorName()), vnp);
opRules.put(new RuleRegExp("R2", TableScanOperator.getOperatorName() + ".*"
+ ReduceSinkOperator.getOperatorName()), vnp);
Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// iterator the mapper operator tree
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(mapWork.getAliasToWork().values());
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
ogw.startWalking(topNodes, nodeOutput);
for (Node n : nodeOutput.keySet()) {
if (nodeOutput.get(n) != null) {
if (!((Boolean)nodeOutput.get(n)).booleanValue()) {
return false;
}
}
}
return true;
}
private void vectorizeMapWork(MapWork mapWork) throws SemanticException {
LOG.info("Vectorizing task...");
mapWork.setVectorMode(true);
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
VectorizationNodeProcessor vnp = new VectorizationNodeProcessor(mapWork);
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + ".*" +
ReduceSinkOperator.getOperatorName()), vnp);
opRules.put(new RuleRegExp("R2", TableScanOperator.getOperatorName() + ".*"
+ FileSinkOperator.getOperatorName()), vnp);
Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
GraphWalker ogw = new PreOrderWalker(disp);
// iterator the mapper operator tree
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(mapWork.getAliasToWork().values());
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
ogw.startWalking(topNodes, nodeOutput);
Map<String, Map<Integer, String>> columnVectorTypes = vnp.getScratchColumnVectorTypes();
mapWork.setScratchColumnVectorTypes(columnVectorTypes);
Map<String, Map<String, Integer>> columnMap = vnp.getScratchColumnMap();
mapWork.setScratchColumnMap(columnMap);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("vectorTypes: %s", columnVectorTypes.toString()));
LOG.debug(String.format("columnMap: %s", columnMap.toString()));
}
return;
}
}
class ValidationNodeProcessor implements NodeProcessor {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
for (Node n : stack) {
Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) n;
if ((op.getType().equals(OperatorType.REDUCESINK) || op.getType().equals(OperatorType.FILESINK)) &&
op.getParentOperators().get(0).getType().equals(OperatorType.GROUPBY)) {
return new Boolean(true);
}
boolean ret = validateOperator(op);
if (!ret) {
LOG.info("Operator: " + op.getName() + " could not be vectorized.");
return new Boolean(false);
}
}
return new Boolean(true);
}
}
class VectorizationNodeProcessor implements NodeProcessor {
private final MapWork mWork;
// This is used to extract scratch column types for each file key
private final Map<String, VectorizationContext> scratchColumnContext =
new HashMap<String, VectorizationContext>();
private final Map<Operator<? extends OperatorDesc>, VectorizationContext> vContextsByTSOp =
new HashMap<Operator<? extends OperatorDesc>, VectorizationContext>();
private final Set<Operator<? extends OperatorDesc>> opsDone =
new HashSet<Operator<? extends OperatorDesc>>();
public VectorizationNodeProcessor(MapWork mWork) {
this.mWork = mWork;
}
public Map<String, Map<Integer, String>> getScratchColumnVectorTypes() {
Map<String, Map<Integer, String>> scratchColumnVectorTypes =
new HashMap<String, Map<Integer, String>>();
for (String onefile : scratchColumnContext.keySet()) {
VectorizationContext vc = scratchColumnContext.get(onefile);
Map<Integer, String> cmap = vc.getOutputColumnTypeMap();
scratchColumnVectorTypes.put(onefile, cmap);
}
return scratchColumnVectorTypes;
}
public Map<String, Map<String, Integer>> getScratchColumnMap() {
Map<String, Map<String, Integer>> scratchColumnMap =
new HashMap<String, Map<String, Integer>>();
for(String oneFile: scratchColumnContext.keySet()) {
VectorizationContext vc = scratchColumnContext.get(oneFile);
Map<String, Integer> cmap = vc.getColumnMap();
scratchColumnMap.put(oneFile, cmap);
}
return scratchColumnMap;
}
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) nd;
VectorizationContext vContext = null;
if (op instanceof TableScanOperator) {
vContext = getVectorizationContext((TableScanOperator) op, physicalContext);
for (String onefile : mWork.getPathToAliases().keySet()) {
List<String> aliases = mWork.getPathToAliases().get(onefile);
for (String alias : aliases) {
Operator<? extends OperatorDesc> opRoot = mWork.getAliasToWork().get(alias);
if (op == opRoot) {
// The same vectorization context is copied multiple times into
// the MapWork scratch columnMap
// Each partition gets a copy
//
vContext.setFileKey(onefile);
scratchColumnContext.put(onefile, vContext);
break;
}
}
}
vContextsByTSOp.put(op, vContext);
} else {
assert stack.size() > 1;
// Walk down the stack of operators until we found one willing to give us a context.
// At the bottom will be the TS operator, guaranteed to have a context
int i= stack.size()-2;
while (vContext == null) {
Operator<? extends OperatorDesc> opParent = (Operator<? extends OperatorDesc>) stack.get(i);
vContext = vContextsByTSOp.get(opParent);
--i;
}
}
assert vContext != null;
if ((op.getType().equals(OperatorType.REDUCESINK) || op.getType().equals(OperatorType.FILESINK)) &&
op.getParentOperators().get(0).getType().equals(OperatorType.GROUPBY)) {
// No need to vectorize
if (!opsDone.contains(op)) {
opsDone.add(op);
}
} else {
try {
if (!opsDone.contains(op)) {
Operator<? extends OperatorDesc> vectorOp =
vectorizeOperator(op, vContext);
opsDone.add(op);
if (vectorOp != op) {
opsDone.add(vectorOp);
}
if (vectorOp instanceof VectorizationContextRegion) {
VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp;
VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext();
vContextsByTSOp.put(op, vOutContext);
scratchColumnContext.put(vOutContext.getFileKey(), vOutContext);
}
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
return null;
}
}
private static class ValidatorVectorizationContext extends VectorizationContext {
private ValidatorVectorizationContext() {
super(null, -1);
}
@Override
protected int getInputColumnIndex(String name) {
return 0;
}
@Override
protected int getInputColumnIndex(ExprNodeColumnDesc colExpr) {
return 0;
}
}
@Override
public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException {
this.physicalContext = pctx;
boolean vectorPath = HiveConf.getBoolVar(pctx.getConf(),
HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
if (!vectorPath) {
LOG.info("Vectorization is disabled");
return pctx;
}
// create dispatcher and graph walker
Dispatcher disp = new VectorizationDispatcher(pctx);
TaskGraphWalker ogw = new TaskGraphWalker(disp);
// get all the tasks nodes from root task
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getRootTasks());
// begin to walk through the task tree.
ogw.startWalking(topNodes, null);
return pctx;
}
boolean validateOperator(Operator<? extends OperatorDesc> op) {
boolean ret = false;
switch (op.getType()) {
case MAPJOIN:
if (op instanceof MapJoinOperator) {
ret = validateMapJoinOperator((MapJoinOperator) op);
} else if (op instanceof SMBMapJoinOperator) {
ret = validateSMBMapJoinOperator((SMBMapJoinOperator) op);
}
break;
case GROUPBY:
ret = validateGroupByOperator((GroupByOperator) op);
break;
case FILTER:
ret = validateFilterOperator((FilterOperator) op);
break;
case SELECT:
ret = validateSelectOperator((SelectOperator) op);
break;
case REDUCESINK:
ret = validateReduceSinkOperator((ReduceSinkOperator) op);
break;
case TABLESCAN:
ret = validateTableScanOperator((TableScanOperator) op);
break;
case FILESINK:
case LIMIT:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
private boolean validateSMBMapJoinOperator(SMBMapJoinOperator op) {
SMBJoinDesc desc = op.getConf();
// Validation is the same as for map join, since the 'small' tables are not vectorized
return validateMapJoinDesc(desc);
}
private boolean validateTableScanOperator(TableScanOperator op) {
TableScanDesc desc = op.getConf();
return !desc.isGatherStats();
}
private boolean validateMapJoinOperator(MapJoinOperator op) {
MapJoinDesc desc = op.getConf();
return validateMapJoinDesc(desc);
}
private boolean validateMapJoinDesc(MapJoinDesc desc) {
byte posBigTable = (byte) desc.getPosBigTable();
List<ExprNodeDesc> filterExprs = desc.getFilters().get(posBigTable);
List<ExprNodeDesc> keyExprs = desc.getKeys().get(posBigTable);
List<ExprNodeDesc> valueExprs = desc.getExprs().get(posBigTable);
return validateExprNodeDesc(filterExprs, VectorExpressionDescriptor.Mode.FILTER) &&
validateExprNodeDesc(keyExprs) &&
validateExprNodeDesc(valueExprs);
}
private boolean validateReduceSinkOperator(ReduceSinkOperator op) {
List<ExprNodeDesc> keyDescs = op.getConf().getKeyCols();
List<ExprNodeDesc> partitionDescs = op.getConf().getPartitionCols();
List<ExprNodeDesc> valueDesc = op.getConf().getValueCols();
return validateExprNodeDesc(keyDescs) && validateExprNodeDesc(partitionDescs) &&
validateExprNodeDesc(valueDesc);
}
private boolean validateSelectOperator(SelectOperator op) {
List<ExprNodeDesc> descList = op.getConf().getColList();
for (ExprNodeDesc desc : descList) {
boolean ret = validateExprNodeDesc(desc);
if (!ret) {
return false;
}
}
return true;
}
private boolean validateFilterOperator(FilterOperator op) {
ExprNodeDesc desc = op.getConf().getPredicate();
return validateExprNodeDesc(desc, VectorExpressionDescriptor.Mode.FILTER);
}
private boolean validateGroupByOperator(GroupByOperator op) {
if (op.getConf().isGroupingSetsPresent()) {
LOG.warn("Grouping sets not supported in vector mode");
return false;
}
boolean ret = validateExprNodeDesc(op.getConf().getKeys());
if (!ret) {
return false;
}
return validateAggregationDesc(op.getConf().getAggregators());
}
private boolean validateExprNodeDesc(List<ExprNodeDesc> descs) {
return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION);
}
private boolean validateExprNodeDesc(List<ExprNodeDesc> descs, VectorExpressionDescriptor.Mode mode) {
for (ExprNodeDesc d : descs) {
boolean ret = validateExprNodeDesc(d, mode);
if (!ret) {
return false;
}
}
return true;
}
private boolean validateAggregationDesc(List<AggregationDesc> descs) {
for (AggregationDesc d : descs) {
boolean ret = validateAggregationDesc(d);
if (!ret) {
return false;
}
}
return true;
}
private boolean validateExprNodeDescRecursive(ExprNodeDesc desc) {
String typeName = desc.getTypeInfo().getTypeName();
boolean ret = validateDataType(typeName);
if (!ret) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot vectorize " + desc.toString() + " of type " + typeName);
}
return false;
}
if (desc instanceof ExprNodeGenericFuncDesc) {
ExprNodeGenericFuncDesc d = (ExprNodeGenericFuncDesc) desc;
boolean r = validateGenericUdf(d);
if (!r) {
return false;
}
}
if (desc.getChildren() != null) {
for (ExprNodeDesc d: desc.getChildren()) {
boolean r = validateExprNodeDescRecursive(d);
if (!r) {
return false;
}
}
}
return true;
}
private boolean validateExprNodeDesc(ExprNodeDesc desc) {
return validateExprNodeDesc(desc, VectorExpressionDescriptor.Mode.PROJECTION);
}
boolean validateExprNodeDesc(ExprNodeDesc desc, VectorExpressionDescriptor.Mode mode) {
if (!validateExprNodeDescRecursive(desc)) {
return false;
}
try {
VectorizationContext vc = new ValidatorVectorizationContext();
if (vc.getVectorExpression(desc, mode) == null) {
// TODO: this cannot happen - VectorizationContext throws in such cases.
return false;
}
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to vectorize", e);
}
return false;
}
return true;
}
private boolean validateGenericUdf(ExprNodeGenericFuncDesc genericUDFExpr) {
if (VectorizationContext.isCustomUDF(genericUDFExpr)) {
return true;
}
GenericUDF genericUDF = genericUDFExpr.getGenericUDF();
if (genericUDF instanceof GenericUDFBridge) {
Class<? extends UDF> udf = ((GenericUDFBridge) genericUDF).getUdfClass();
return supportedGenericUDFs.contains(udf);
} else {
return supportedGenericUDFs.contains(genericUDF.getClass());
}
}
private boolean validateAggregationDesc(AggregationDesc aggDesc) {
if (!supportedAggregationUdfs.contains(aggDesc.getGenericUDAFName().toLowerCase())) {
return false;
}
if (aggDesc.getParameters() != null) {
return validateExprNodeDesc(aggDesc.getParameters());
}
return true;
}
private boolean validateDataType(String type) {
return supportedDataTypesPattern.matcher(type.toLowerCase()).matches();
}
private VectorizationContext getVectorizationContext(TableScanOperator op,
PhysicalContext pctx) {
RowSchema rs = op.getSchema();
Map<String, Integer> cmap = new HashMap<String, Integer>();
int columnCount = 0;
for (ColumnInfo c : rs.getSignature()) {
if (!isVirtualColumn(c)) {
cmap.put(c.getInternalName(), columnCount++);
}
}
VectorizationContext vc = new VectorizationContext(cmap, columnCount);
return vc;
}
Operator<? extends OperatorDesc> vectorizeOperator(Operator<? extends OperatorDesc> op,
VectorizationContext vContext) throws HiveException {
Operator<? extends OperatorDesc> vectorOp = null;
switch (op.getType()) {
case MAPJOIN:
case GROUPBY:
case FILTER:
case SELECT:
case FILESINK:
case REDUCESINK:
case LIMIT:
vectorOp = OperatorFactory.getVectorOperator(op.getConf(), vContext);
break;
default:
vectorOp = op;
break;
}
if (vectorOp != op) {
if (op.getParentOperators() != null) {
vectorOp.setParentOperators(op.getParentOperators());
for (Operator<? extends OperatorDesc> p : op.getParentOperators()) {
p.replaceChild(op, vectorOp);
}
}
if (op.getChildOperators() != null) {
vectorOp.setChildOperators(op.getChildOperators());
for (Operator<? extends OperatorDesc> c : op.getChildOperators()) {
c.replaceParent(op, vectorOp);
}
}
((AbstractOperatorDesc) vectorOp.getConf()).setVectorMode(true);
}
return vectorOp;
}
private boolean isVirtualColumn(ColumnInfo column) {
// Not using method column.getIsVirtualCol() because partitioning columns are also
// treated as virtual columns in ColumnInfo.
for (VirtualColumn vc : VirtualColumn.VIRTUAL_COLUMNS) {
if (column.getInternalName().equals(vc.getName())) {
return true;
}
}
return false;
}
}