Package org.apache.hadoop.hive.ql.parse

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList


        return null;
      }


      ParseContext pctx = owc.getParseContext();
      PrunedPartitionList prunedPartList;
      try {
        String alias = (String) owc.getParseContext().getTopOps().keySet().toArray()[0];
        prunedPartList = pctx.getPrunedPartitions(alias, top);
      } catch (HiveException e) {
        // Has to use full name to make sure it does not conflict with
        // com.facebook.presto.hive.shaded.org.apache.commons.lang.StringUtils
        throw new SemanticException(e.getMessage(), e);
      }

      // Otherwise this is not a sampling predicate. We need to process it.
      ExprNodeDesc predicate = fop.getConf().getPredicate();
      String alias = top.getConf().getAlias();

      ArrayList<Partition> partitions = new ArrayList<Partition>();
      if (prunedPartList == null) {
        return null;
      }

      for (Partition p : prunedPartList.getPartitions()) {
        if (!p.getTable().isPartitioned()) {
          return null;
        }
      }

      partitions.addAll(prunedPartList.getPartitions());

      PcrExprProcFactory.NodeInfoWrapper wrapper = PcrExprProcFactory.walkExprTree(
          alias, partitions, top.getConf().getVirtualCols(), predicate);

      if (wrapper.state == PcrExprProcFactory.WalkState.TRUE) {
View Full Code Here


        joinKeyOrder = new Integer[keys.size()];
      }

      Table tbl = topToTable.get(tso);
      if (tbl.isPartitioned()) {
        PrunedPartitionList prunedParts;
        try {
          prunedParts = pGraphContext.getPrunedPartitions(alias, tso);
        } catch (HiveException e) {
          // Has to use full name to make sure it does not conflict with
          // com.facebook.presto.hive.shaded.org.apache.commons.lang.StringUtils
          LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
          throw new SemanticException(e.getMessage(), e);
        }
        List<Partition> partitions = prunedParts.getNotDeniedPartns();
        // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number)
        if (partitions.isEmpty()) {
          if (!alias.equals(baseBigAlias)) {
            tblAliasToNumberOfBucketsInEachPartition.put(alias, Arrays.<Integer> asList());
            tblAliasToBucketedFilePathsInEachPartition.put(alias, new ArrayList<List<String>>());
View Full Code Here

      return false;
    }

    Table tbl = topToTable.get(tso);
    if (tbl.isPartitioned()) {
      PrunedPartitionList prunedParts;
      try {
          prunedParts = pGraphContext.getPrunedPartitions(alias, tso);
      } catch (HiveException e) {
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
      }
      List<Partition> partitions = prunedParts.getNotDeniedPartns();
      // Populate the names and order of columns for the first partition of the
      // first table
      if ((pos == 0) && (partitions != null) && (!partitions.isEmpty())) {
        Partition firstPartition = partitions.get(0);
        sortColumnsFirstTable.addAll(firstPartition.getSortCols());
      }

      for (Partition partition : prunedParts.getNotDeniedPartns()) {
        if (!checkSortColsAndJoinCols(partition.getSortCols(),
          joinCols,
          sortColumnsFirstTable)) {
          return false;
        }
View Full Code Here

        } else {
          // check if the pruner only contains partition columns
          if (PartitionPruner.onlyContainsPartnCols(tab,
              opToPartPruner.get(ts))) {

            PrunedPartitionList partsList;
            try {
              String alias = (String) topOps.keySet().toArray()[0];
              partsList = PartitionPruner.prune(ts, pctx, alias);
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with
              // com.facebook.presto.hive.shaded.org.apache.commons.lang.StringUtils
              LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
              throw new SemanticException(e.getMessage(), e);
            }

            // If there is any unknown partition, create a map-reduce job for
            // the filter to prune correctly
            if (!partsList.hasUnknownPartitions()) {
              globalLimitCtx.enableOpt(tempGlobalLimit);
            }
          }
        }
        if (globalLimitCtx.isEnable()) {
View Full Code Here

    @Override
    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
        Object... nodeOutputs) throws SemanticException {
      TableScanOperator tsop = (TableScanOperator) nd;
      AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
      PrunedPartitionList partList = null;
      try {
        partList = aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
      } catch (HiveException e1) {
        throw new SemanticException(e1);
      }
View Full Code Here

      ParseContext pctx,
      Map<Table, List<Index>> indexes)
    throws HiveException {
    Hive hive = Hive.get(pctx.getConf());
    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    Set<Partition> queryPartitions = queryPartitionList.getPartitions();
    if (queryPartitions == null || queryPartitions.isEmpty()) {
      return null;
    }

    for (Partition part : queryPartitions) {
View Full Code Here

    if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEOPTPPD)) {
      ExprNodeDesc pruner = pctx.getOpToPartPruner().get(ts);
      bypassFilter = PartitionPruner.onlyContainsPartnCols(table, pruner);
    }
    if (aggressive || bypassFilter) {
      PrunedPartitionList pruned = pctx.getPrunedPartitions(alias, ts);
      if (aggressive || !pruned.hasUnknownPartitions()) {
        bypassFilter &= !pruned.hasUnknownPartitions();
        return checkOperators(new FetchData(parent, table, pruned, splitSample), ts,
            aggressive, bypassFilter);
      }
    }
    return null;
View Full Code Here

            //if it's null then the partition probably doesn't exist so let's use table permission
            if (tbl.isPartitioned() &&
                tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) {
              String alias_id = topOpMap.getKey();

              PrunedPartitionList partsList = PartitionPruner.prune(tableScanOp,
                  parseCtx, alias_id);
              Set<Partition> parts = partsList.getPartitions();
              for (Partition part : parts) {
                List<String> existingCols = part2Cols.get(part);
                if (existingCols == null) {
                  existingCols = new ArrayList<String>();
                }
View Full Code Here

        TableScanOperator top) throws SemanticException, UDFArgumentException {
      LBOpPartitionWalkerCtx owc = (LBOpPartitionWalkerCtx) procCtx;

      //Run partition pruner to get partitions
      ParseContext parseCtx = owc.getParseContext();
      PrunedPartitionList prunedPartList;
      try {
        String alias = (String) parseCtx.getTopOps().keySet().toArray()[0];
        prunedPartList = PartitionPruner.prune(top, parseCtx, alias);
      } catch (HiveException e) {
        // Has to use full name to make sure it does not conflict with
View Full Code Here

    // Retrieve all partitions generated from partition pruner and partition column pruner
    PrunerUtils.walkOperatorTree(pctx, opPartWalkerCtx, LBPartitionProcFactory.getFilterProc(),
        LBPartitionProcFactory.getDefaultProc());

    PrunedPartitionList partsList = ((LBOpPartitionWalkerCtx) opPartWalkerCtx).getPartitions();
    if (partsList != null) {
      Set<Partition> parts = partsList.getPartitions();
      if ((parts != null) && (parts.size() > 0)) {
        for (Partition part : parts) {
          // only process partition which is skewed and list bucketed
          if (ListBucketingPrunerUtils.isListBucketingPart(part)) {
            // create a the context for walking operators
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.