Package org.apache.hadoop.hive.ql.parse

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList


      if (!table.isPartitioned()) {
        List<String> sortCols = Utilities.getColumnNamesFromSortCols(table.getSortCols());
        List<String> bucketCols = table.getBucketCols();
        return matchBucketSortCols(groupByCols, bucketCols, sortCols);
      } else {
        PrunedPartitionList partsList;
        try {
          partsList = pGraphContext.getPrunedPartitions(table.getTableName(), tableScanOp);
        } catch (HiveException e) {
          LOG.error(StringUtils.stringifyException(e));
          throw new SemanticException(e.getMessage(), e);
        }

        List<Partition> notDeniedPartns = partsList.getNotDeniedPartns();

        GroupByOptimizerSortMatch currentMatch =
            notDeniedPartns.isEmpty() ? GroupByOptimizerSortMatch.NO_MATCH :
                notDeniedPartns.size() > 1 ? GroupByOptimizerSortMatch.PARTIAL_MATCH :
                    GroupByOptimizerSortMatch.COMPLETE_MATCH;
View Full Code Here


    // Retrieve all partitions generated from partition pruner and partition column pruner
    PrunerUtils.walkOperatorTree(pctx, opPartWalkerCtx, LBPartitionProcFactory.getFilterProc(),
        LBPartitionProcFactory.getDefaultProc());

    PrunedPartitionList partsList = ((LBOpPartitionWalkerCtx) opPartWalkerCtx).getPartitions();
    if (partsList != null) {
      Set<Partition> parts = partsList.getPartitions();
      if ((parts != null) && (parts.size() > 0)) {
        for (Partition part : parts) {
          // only process partition which is skewed and list bucketed
          if (ListBucketingPrunerUtils.isListBucketingPart(part)) {
            // create a the context for walking operators
View Full Code Here

        TableScanOperator top) throws SemanticException, UDFArgumentException {
      LBOpPartitionWalkerCtx owc = (LBOpPartitionWalkerCtx) procCtx;

      //Run partition pruner to get partitions
      ParseContext parseCtx = owc.getParseContext();
      PrunedPartitionList prunedPartList;
      try {
        String alias = (String) parseCtx.getTopOps().keySet().toArray()[0];
        prunedPartList = PartitionPruner.prune(top, parseCtx, alias);
      } catch (HiveException e) {
        // Has to use full name to make sure it does not conflict with
View Full Code Here

    @Override
    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
        Object... nodeOutputs) throws SemanticException {
      TableScanOperator tsop = (TableScanOperator) nd;
      AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
      PrunedPartitionList partList = null;
      try {
        partList = aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
      } catch (HiveException e1) {
        throw new SemanticException(e1);
      }
View Full Code Here

      return false;
    }

    Table tbl = topToTable.get(tso);
    if (tbl.isPartitioned()) {
      PrunedPartitionList prunedParts;
      try {
          prunedParts = pGraphContext.getPrunedPartitions(alias, tso);
      } catch (HiveException e) {
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
      }
      List<Partition> partitions = prunedParts.getNotDeniedPartns();
      // Populate the names and order of columns for the first partition of the
      // first table
      if ((pos == 0) && (partitions != null) && (!partitions.isEmpty())) {
        Partition firstPartition = partitions.get(0);
        sortColumnsFirstTable.addAll(firstPartition.getSortCols());
      }

      for (Partition partition : prunedParts.getNotDeniedPartns()) {
        if (!checkSortColsAndJoinCols(partition.getSortCols(),
          joinCols,
          sortColumnsFirstTable)) {
          return false;
        }
View Full Code Here

    String key = tab.getDbName() + "." + tab.getTableName() + ";";

    if (prunerExpr != null) {
      key = key + prunerExpr.getExprString();
    }
    PrunedPartitionList ret = prunedPartitionsMap.get(key);
    if (ret != null) {
      return ret;
    }

    ret = getPartitionsFromServer(tab, prunerExpr, conf, alias);
View Full Code Here

  private static PrunedPartitionList getPartitionsFromServer(Table tab,
      ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws HiveException {
    try {
      if (!tab.isPartitioned()) {
        // If the table is not partitioned, return everything.
        return new PrunedPartitionList(tab, getAllPartitions(tab), false);
      }
      LOG.debug("tabname = " + tab.getTableName() + " is partitioned");

      if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))
          && !hasColumnExpr(prunerExpr)) {
        // If the "strict" mode is on, we have to provide partition pruner for each table.
        throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
            .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\""));
      }

      if (prunerExpr == null) {
        // Non-strict mode, and there is no predicates at all - get everything.
        return new PrunedPartitionList(tab, getAllPartitions(tab), false);
      }

      // Replace virtual columns with nulls. See javadoc for details.
      prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab));
      // Remove all parts that are not partition columns. See javadoc for details.
      ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone());
      String oldFilter = prunerExpr.getExprString();
      if (compactExpr == null) {
        // Non-strict mode, and all the predicates are on non-partition columns - get everything.
        LOG.debug("Filter " + oldFilter + " was null after compacting");
        return new PrunedPartitionList(tab, getAllPartitions(tab), true);
      }

      LOG.debug("Filter w/ compacting: " + compactExpr.getExprString()
        + "; filter w/o compacting: " + oldFilter);

      // Finally, check the filter for non-built-in UDFs. If these are present, we cannot
      // do filtering on the server, and have to fall back to client path.
      boolean doEvalClientSide = hasUserFunctions(compactExpr);

      // Now filter.
      List<Partition> partitions = new ArrayList<Partition>();
      boolean hasUnknownPartitions = false;
      PerfLogger perfLogger = PerfLogger.getPerfLogger();
      if (!doEvalClientSide) {
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
        try {
          hasUnknownPartitions = Hive.get().getPartitionsByExpr(
              tab, compactExpr, conf, partitions);
        } catch (IMetaStoreClient.IncompatibleMetastoreException ime) {
          // TODO: backward compat for Hive <= 0.12. Can be removed later.
          LOG.warn("Metastore doesn't support getPartitionsByExpr", ime);
          doEvalClientSide = true;
        } finally {
          perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
        }
      }
      if (doEvalClientSide) {
        // Either we have user functions, or metastore is old version - filter names locally.
        hasUnknownPartitions = pruneBySequentialScan(tab, partitions, compactExpr, conf);
      }
      // The partitions are "unknown" if the call says so due to the expression
      // evaluator returning null for a partition, or if we sent a partial expression to
      // metastore and so some partitions may have no data based on other filters.
      boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString());
      return new PrunedPartitionList(tab, new LinkedHashSet<Partition>(partitions),
          hasUnknownPartitions || !isPruningByExactFilter);
    } catch (HiveException e) {
      throw e;
    } catch (Exception e) {
      throw new HiveException(e);
View Full Code Here

        } else {
          // check if the pruner only contains partition columns
          if (PartitionPruner.onlyContainsPartnCols(tab,
              opToPartPruner.get(ts))) {

            PrunedPartitionList partsList;
            try {
              String alias = (String) topOps.keySet().toArray()[0];
              partsList = PartitionPruner.prune(ts, pctx, alias);
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with
              // org.apache.commons.lang.StringUtils
              LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
              throw new SemanticException(e.getMessage(), e);
            }

            // If there is any unknown partition, create a map-reduce job for
            // the filter to prune correctly
            if (!partsList.hasUnknownPartitions()) {
              globalLimitCtx.enableOpt(tempGlobalLimit);
            }
          }
        }
        if (globalLimitCtx.isEnable()) {
View Full Code Here

            // and pass it to setTaskPlan as the last parameter
            Set<Partition> confirmedPartns = GenMapRedUtils
                .getConfirmedPartitionsForScan(parseInfo);
            if (confirmedPartns.size() > 0) {
              Table source = parseCtx.getQB().getMetaData().getTableForAlias(alias);
              PrunedPartitionList partList = new PrunedPartitionList(source, confirmedPartns, false);
              GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, currTask, false, ctx, partList);
            } else { // non-partitioned table
              GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, currTask, false, ctx);
            }
          }
View Full Code Here

      ParseContext pctx,
      Map<Table, List<Index>> indexes)
    throws HiveException {
    Hive hive = Hive.get(pctx.getConf());
    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    Set<Partition> queryPartitions = queryPartitionList.getPartitions();
    if (queryPartitions == null || queryPartitions.isEmpty()) {
      return null;
    }

    for (Partition part : queryPartitions) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.