Examples of PrunedPartitionList


Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

    ArrayList<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

    Path tblDir = null;
    TableDesc tblDesc = null;

    PrunedPartitionList partsList = pList;

    plan.setNameToSplitSample(parseCtx.getNameToSplitSample());

    if (partsList == null) {
      try {
        TableScanOperator tsOp = (TableScanOperator) topOp;
        partsList = PartitionPruner.prune(tsOp, parseCtx, alias_id);
      } catch (SemanticException e) {
        throw e;
      } catch (HiveException e) {
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
      }
    }

    // Generate the map work for this alias_id
    // pass both confirmed and unknown partitions through the map-reduce
    // framework
    Set<Partition> parts = partsList.getPartitions();
    PartitionDesc aliasPartnDesc = null;
    try {
      if (!parts.isEmpty()) {
        aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
      }
    } catch (HiveException e) {
      LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
      throw new SemanticException(e.getMessage(), e);
    }

    // The table does not have any partitions
    if (aliasPartnDesc == null) {
      aliasPartnDesc = new PartitionDesc(Utilities.getTableDesc(parseCtx
          .getTopToTable().get(topOp)), null);

    }

    Map<String, String> props = parseCtx.getTopToProps().get(topOp);
    if (props != null) {
      Properties target = aliasPartnDesc.getProperties();
      if (target == null) {
        aliasPartnDesc.setProperties(target = new Properties());
      }
      target.putAll(props);
    }

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);

    long sizeNeeded = Integer.MAX_VALUE;
    int fileLimit = -1;
    if (parseCtx.getGlobalLimitCtx().isEnable()) {
      long sizePerRow = HiveConf.getLongVar(parseCtx.getConf(),
          HiveConf.ConfVars.HIVELIMITMAXROWSIZE);
      sizeNeeded = parseCtx.getGlobalLimitCtx().getGlobalLimit() * sizePerRow;
      // for the optimization that reduce number of input file, we limit number
      // of files allowed. If more than specific number of files have to be
      // selected, we skip this optimization. Since having too many files as
      // inputs can cause unpredictable latency. It's not necessarily to be
      // cheaper.
      fileLimit =
          HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVELIMITOPTLIMITFILE);

      if (sizePerRow <= 0 || fileLimit <= 0) {
        LOG.info("Skip optimization to reduce input size of 'limit'");
        parseCtx.getGlobalLimitCtx().disableOpt();
      } else if (parts.isEmpty()) {
        LOG.info("Empty input: skip limit optimiztion");
      } else {
        LOG.info("Try to reduce input size for 'limit' " +
            "sizeNeeded: " + sizeNeeded +
            "  file limit : " + fileLimit);
      }
    }
    boolean isFirstPart = true;
    boolean emptyInput = true;
    boolean singlePartition = (parts.size() == 1);

    // Track the dependencies for the view. Consider a query like: select * from V;
    // where V is a view of the form: select * from T
    // The dependencies should include V at depth 0, and T at depth 1 (inferred).
    ReadEntity parentViewInfo = getParentViewInfo(alias_id, parseCtx.getViewAliasToInput());

    // The table should also be considered a part of inputs, even if the table is a
    // partitioned table and whether any partition is selected or not
    PlanUtils.addInput(inputs,
        new ReadEntity(parseCtx.getTopToTable().get(topOp), parentViewInfo));

    for (Partition part : parts) {
      if (part.getTable().isPartitioned()) {
        PlanUtils.addInput(inputs, new ReadEntity(part, parentViewInfo));
      } else {
        PlanUtils.addInput(inputs, new ReadEntity(part.getTable(), parentViewInfo));
      }

      // Later the properties have to come from the partition as opposed
      // to from the table in order to support versioning.
      Path[] paths = null;
      sampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);

      // Lookup list bucketing pruner
      Map<String, ExprNodeDesc> partToPruner = parseCtx.getOpToPartToSkewedPruner().get(topOp);
      ExprNodeDesc listBucketingPruner = (partToPruner != null) ? partToPruner.get(part.getName())
          : null;

      if (sampleDescr != null) {
        assert (listBucketingPruner == null) : "Sampling and list bucketing can't coexit.";
        paths = SamplePruner.prune(part, sampleDescr);
        parseCtx.getGlobalLimitCtx().disableOpt();
      } else if (listBucketingPruner != null) {
        assert (sampleDescr == null) : "Sampling and list bucketing can't coexist.";
        /* Use list bucketing prunner's path. */
        paths = ListBucketingPruner.prune(parseCtx, part, listBucketingPruner);
      } else {
        // Now we only try the first partition, if the first partition doesn't
        // contain enough size, we change to normal mode.
        if (parseCtx.getGlobalLimitCtx().isEnable()) {
          if (isFirstPart) {
            long sizeLeft = sizeNeeded;
            ArrayList<Path> retPathList = new ArrayList<Path>();
            SamplePruner.LimitPruneRetStatus status = SamplePruner.limitPrune(part, sizeLeft,
                fileLimit, retPathList);
            if (status.equals(SamplePruner.LimitPruneRetStatus.NoFile)) {
              continue;
            } else if (status.equals(SamplePruner.LimitPruneRetStatus.NotQualify)) {
              LOG.info("Use full input -- first " + fileLimit + " files are more than "
                  + sizeNeeded
                  + " bytes");

              parseCtx.getGlobalLimitCtx().disableOpt();

            } else {
              emptyInput = false;
              paths = new Path[retPathList.size()];
              int index = 0;
              for (Path path : retPathList) {
                paths[index++] = path;
              }
              if (status.equals(SamplePruner.LimitPruneRetStatus.NeedAllFiles) && singlePartition) {
                // if all files are needed to meet the size limit, we disable
                // optimization. It usually happens for empty table/partition or
                // table/partition with only one file. By disabling this
                // optimization, we can avoid retrying the query if there is
                // not sufficient rows.
                parseCtx.getGlobalLimitCtx().disableOpt();
              }
            }
            isFirstPart = false;
          } else {
            paths = new Path[0];
          }
        }
        if (!parseCtx.getGlobalLimitCtx().isEnable()) {
          paths = part.getPath();
        }
      }

      // is it a partitioned table ?
      if (!part.getTable().isPartitioned()) {
        assert ((tblDir == null) && (tblDesc == null));

        tblDir = paths[0];
        tblDesc = Utilities.getTableDesc(part.getTable());
      } else if (tblDesc == null) {
        tblDesc = Utilities.getTableDesc(part.getTable());
      }

      if (props != null) {
        Properties target = tblDesc.getProperties();
        if (target == null) {
          tblDesc.setProperties(target = new Properties());
        }
        target.putAll(props);
      }

      for (Path p : paths) {
        if (p == null) {
          continue;
        }
        String path = p.toString();
        if (LOG.isDebugEnabled()) {
          LOG.debug("Adding " + path + " of table" + alias_id);
        }

        partDir.add(p);
        try {
          if (part.getTable().isPartitioned()) {
            partDesc.add(Utilities.getPartitionDesc(part));
          }
          else {
            partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part));
          }
        } catch (HiveException e) {
          LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
          throw new SemanticException(e.getMessage(), e);
        }
      }
    }
    if (emptyInput) {
      parseCtx.getGlobalLimitCtx().disableOpt();
    }

    Iterator<Path> iterPath = partDir.iterator();
    Iterator<PartitionDesc> iterPartnDesc = partDesc.iterator();

    if (!local) {
      while (iterPath.hasNext()) {
        assert iterPartnDesc.hasNext();
        String path = iterPath.next().toString();

        PartitionDesc prtDesc = iterPartnDesc.next();

        // Add the path to alias mapping
        if (plan.getPathToAliases().get(path) == null) {
          plan.getPathToAliases().put(path, new ArrayList<String>());
        }
        plan.getPathToAliases().get(path).add(alias_id);
        plan.getPathToPartitionInfo().put(path, prtDesc);
        if (LOG.isDebugEnabled()) {
          LOG.debug("Information added for path " + path);
        }
      }

      assert plan.getAliasToWork().get(alias_id) == null;
      plan.getAliasToWork().put(alias_id, topOp);
    } else {
      // populate local work if needed
      MapredLocalWork localPlan = plan.getMapLocalWork();
      if (localPlan == null) {
        localPlan = new MapredLocalWork(
            new LinkedHashMap<String, Operator<? extends OperatorDesc>>(),
            new LinkedHashMap<String, FetchWork>());
      }

      assert localPlan.getAliasToWork().get(alias_id) == null;
      assert localPlan.getAliasToFetchWork().get(alias_id) == null;
      localPlan.getAliasToWork().put(alias_id, topOp);
      if (tblDir == null) {
        tblDesc = Utilities.getTableDesc(partsList.getSourceTable());
        localPlan.getAliasToFetchWork().put(
            alias_id,
            new FetchWork(FetchWork.convertPathToStringArray(partDir), partDesc, tblDesc));
      } else {
        localPlan.getAliasToFetchWork().put(alias_id,
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

            //if it's null then the partition probably doesn't exist so let's use table permission
            if (tbl.isPartitioned() &&
                tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) {
              String alias_id = topOpMap.getKey();

              PrunedPartitionList partsList = PartitionPruner.prune(tableScanOp,
                  parseCtx, alias_id);
              Set<Partition> parts = partsList.getPartitions();
              for (Partition part : parts) {
                List<String> existingCols = part2Cols.get(part);
                if (existingCols == null) {
                  existingCols = new ArrayList<String>();
                }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

    String key = tab.getDbName() + "." + tab.getTableName() + ";";

    if (prunerExpr != null) {
      key = key + prunerExpr.getExprString();
    }
    PrunedPartitionList ret = prunedPartitionsMap.get(key);
    if (ret != null) {
      return ret;
    }

    LinkedHashSet<Partition> true_parts = new LinkedHashSet<Partition>();
    LinkedHashSet<Partition> unkn_parts = new LinkedHashSet<Partition>();
    LinkedHashSet<Partition> denied_parts = new LinkedHashSet<Partition>();

    try {
      StructObjectInspector rowObjectInspector = (StructObjectInspector) tab
          .getDeserializer().getObjectInspector();
      Object[] rowWithPart = new Object[2];

      if (tab.isPartitioned()) {
        // If the "strict" mode is on, we have to provide partition pruner for
        // each table.
        if ("strict".equalsIgnoreCase(HiveConf.getVar(conf,
            HiveConf.ConfVars.HIVEMAPREDMODE))) {
          if (!hasColumnExpr(prunerExpr)) {
            throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
                .getMsg("for Alias \"" + alias + "\" Table \""
                    + tab.getTableName() + "\""));
          }
        }

        if (prunerExpr == null) {
          // This can happen when hive.mapred.mode=nonstrict and there is no predicates at all
          // Add all partitions to the unknown_parts so that a MR job is generated.
          true_parts.addAll(Hive.get().getPartitions(tab));
        } else {
          // remove non-partition columns
          ExprNodeDesc compactExpr = prunerExpr.clone();
          compactExpr = compactExpr(compactExpr);
          LOG.debug("Filter w/ compacting: " +
              ((compactExpr != null) ? compactExpr.getExprString(): "null") +
              "; filter w/o compacting: " +
              ((prunerExpr != null) ? prunerExpr.getExprString(): "null"));
          if (compactExpr == null) {
            // This could happen when hive.mapred.mode=nonstrict and all the predicates
            // are on non-partition columns.
            unkn_parts.addAll(Hive.get().getPartitions(tab));
          } else if (Utilities.checkJDOPushDown(tab, compactExpr)) {
            String filter = compactExpr.getExprString();
            String oldFilter = prunerExpr.getExprString();

            if (filter.equals(oldFilter)) {
              // pruneExpr contains only partition columns
              pruneByPushDown(tab, true_parts, filter);
            } else {
              // pruneExpr contains non-partition columns
              pruneByPushDown(tab, unkn_parts, filter);
            }
          } else {
            pruneBySequentialScan(tab, true_parts, unkn_parts, denied_parts, prunerExpr, rowObjectInspector);
          }
        }
        LOG.debug("tabname = " + tab.getTableName() + " is partitioned");
      } else {
        true_parts.addAll(Hive.get().getPartitions(tab));
      }
    } catch (HiveException e) {
      throw e;
    } catch (Exception e) {
      throw new HiveException(e);
    }

    // Now return the set of partitions
    ret = new PrunedPartitionList(true_parts, unkn_parts, denied_parts);
    prunedPartitionsMap.put(key, ret);
    return ret;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

                cols.add(columns.get(i).getName());
              }
            }
            if (tbl.isPartitioned() && tableUsePartLevelAuth.get(tbl.getTableName())) {
              String alias_id = topOpMap.getKey();
              PrunedPartitionList partsList = PartitionPruner.prune(parseCtx
                  .getTopToTable().get(topOp), parseCtx.getOpToPartPruner()
                  .get(topOp), parseCtx.getConf(), alias_id, parseCtx
                  .getPrunedPartitions());
              Set<Partition> parts = new HashSet<Partition>();
              parts.addAll(partsList.getConfirmedPartns());
              parts.addAll(partsList.getUnknownPartns());
              for (Partition part : parts) {
                List<String> existingCols = part2Cols.get(part);
                if (existingCols == null) {
                  existingCols = new ArrayList<String>();
                }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

  private Set<Partition> checkPartitionsCoveredByIndex(TableScanOperator tableScan, ParseContext pctx)
    throws HiveException {
    Hive hive = Hive.get(pctx.getConf());

    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    Set<Partition> queryPartitions = queryPartitionList.getConfirmedPartns();

    for (Partition part : queryPartitions) {
      List<Table> sourceIndexTables = getIndexTables(hive, part);
      if (!containsPartition(hive, sourceIndexTables, part)) {
        return null; // problem if it doesn't contain the partition
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

    LOG.trace("prune Expression = " + prunerExpr);
   
    String key = tab.getName() + ";";
    if (prunerExpr != null)
      key = key + prunerExpr.getExprString();
    PrunedPartitionList ret = prunedPartitionsMap.get(key);
    if(ret !=null)
      return ret;
   
    LinkedHashSet<Partition> true_parts = new LinkedHashSet<Partition>();
    LinkedHashSet<Partition> unkn_parts = new LinkedHashSet<Partition>();
    LinkedHashSet<Partition> denied_parts = new LinkedHashSet<Partition>();

    try {
      StructObjectInspector rowObjectInspector = (StructObjectInspector)tab.getDeserializer().getObjectInspector();
      Object[] rowWithPart = new Object[2];

      if(tab.isPartitioned()) {
        for(String partName: Hive.get().getPartitionNames(tab.getDbName(), tab.getName(), (short) -1)) {
          // Set all the variables here
          LinkedHashMap<String, String> partSpec = Warehouse.makeSpecFromName(partName);
          // Create the row object
          ArrayList<String> partNames = new ArrayList<String>();
          ArrayList<String> partValues = new ArrayList<String>();
          ArrayList<ObjectInspector> partObjectInspectors = new ArrayList<ObjectInspector>();
          for(Map.Entry<String,String>entry : partSpec.entrySet()) {
            partNames.add(entry.getKey());
            partValues.add(entry.getValue());
            partObjectInspectors.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
          }
          StructObjectInspector partObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(partNames, partObjectInspectors);

          rowWithPart[1] = partValues;
          ArrayList<StructObjectInspector> ois = new ArrayList<StructObjectInspector>(2);
          ois.add(rowObjectInspector);
          ois.add(partObjectInspector);
          StructObjectInspector rowWithPartObjectInspector = ObjectInspectorFactory.getUnionStructObjectInspector(ois);

          // If the "strict" mode is on, we have to provide partition pruner for each table.
          if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))) {
            if (!hasColumnExpr(prunerExpr)) {
              throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE.getMsg(
                  "for Alias \"" + alias + "\" Table \"" + tab.getName() + "\""));
            }
          }

          // evaluate the expression tree
          if (prunerExpr != null) {
            ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(prunerExpr);
            ObjectInspector evaluateResultOI = evaluator.initialize(rowWithPartObjectInspector);
            Object evaluateResultO = evaluator.evaluate(rowWithPart);
            Boolean r = (Boolean) ((PrimitiveObjectInspector)evaluateResultOI).getPrimitiveJavaObject(evaluateResultO);
            LOG.trace("prune result for partition " + partSpec + ": " + r);
            if (Boolean.FALSE.equals(r)) {
              if (denied_parts.isEmpty()) {
                Partition part = Hive.get().getPartition(tab, partSpec, Boolean.FALSE);
                denied_parts.add(part);
              }
              LOG.trace("pruned partition: " + partSpec);
            } else {
              Partition part = Hive.get().getPartition(tab, partSpec, Boolean.FALSE);
              if (Boolean.TRUE.equals(r)) {
                LOG.debug("retained partition: " + partSpec);
                true_parts.add(part);
              } else {
                LOG.debug("unknown partition: " + partSpec);
                unkn_parts.add(part);
              }
            }
          } else {
            // is there is no parition pruning, all of them are needed
            true_parts.add(Hive.get().getPartition(tab, partSpec, Boolean.FALSE));
          }
        }
      } else {
        true_parts.addAll(Hive.get().getPartitions(tab));
      }
    } catch (HiveException e) {
      throw e;
    } catch (Exception e) {
      throw new HiveException(e);
    }

    // Now return the set of partitions
    ret = new PrunedPartitionList(true_parts, unkn_parts, denied_parts);
    prunedPartitionsMap.put(key, ret);
    return ret;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

          List<String> sortCols = Utilities.getColumnNamesFromSortCols(destTable.getSortCols());
          bucketGroupBy = matchBucketOrSortedColumns(groupByCols, bucketCols, sortCols);
          if (!bucketGroupBy)
            return;
        } else {
          PrunedPartitionList partsList = null;
          try {
            partsList = PartitionPruner.prune(destTable, this.pGraphContext
                .getOpToPartPruner().get(ts), this.pGraphContext.getConf(),
                table, this.pGraphContext.getPrunedPartitions());
          } catch (HiveException e) {
            // Has to use full name to make sure it does not conflict with
            // org.apache.commons.lang.StringUtils
            LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
            throw new SemanticException(e.getMessage(), e);
          }
          List<Partition> parts = new ArrayList<Partition>();
          parts.addAll(partsList.getConfirmedPartns());
          parts.addAll(partsList.getUnknownPartns());
          for (Partition part : parts) {
            List<String> bucketCols = part.getBucketCols();
            List<String> sortCols = Utilities.getColumnNamesFromSortCols(part.getTPartition().getSd().getSortCols());
            bucketGroupBy = matchBucketOrSortedColumns(groupByCols, bucketCols, sortCols);
            if (!bucketGroupBy)
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

            //or non-existent tho such sources may still be referenced by the TableScanOperator
            //if it's null then the partition probably doesn't exist so let's use table permission
            if (tbl.isPartitioned() &&
                tableUsePartLevelAuth.get(tbl.getTableName()) == Boolean.TRUE) {
              String alias_id = topOpMap.getKey();
              PrunedPartitionList partsList = PartitionPruner.prune(parseCtx
                  .getTopToTable().get(topOp), parseCtx.getOpToPartPruner()
                  .get(topOp), parseCtx.getConf(), alias_id, parseCtx
                  .getPrunedPartitions());
              Set<Partition> parts = new HashSet<Partition>();
              parts.addAll(partsList.getConfirmedPartns());
              parts.addAll(partsList.getUnknownPartns());
              for (Partition part : parts) {
                List<String> existingCols = part2Cols.get(part);
                if (existingCols == null) {
                  existingCols = new ArrayList<String>();
                }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

          orders = new Integer[keys.size()];
        }

        Table tbl = topToTable.get(tso);
        if(tbl.isPartitioned()) {
          PrunedPartitionList prunedParts;
          try {
            prunedParts = pGraphContext.getOpToPartList().get(tso);
            if (prunedParts == null) {
              prunedParts = PartitionPruner.prune(tbl, pGraphContext.getOpToPartPruner().get(tso), pGraphContext.getConf(), alias,
                pGraphContext.getPrunedPartitions());
              pGraphContext.getOpToPartList().put(tso, prunedParts);
            }
          } catch (HiveException e) {
            // Has to use full name to make sure it does not conflict with
            // org.apache.commons.lang.StringUtils
            LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
            throw new SemanticException(e.getMessage(), e);
          }
          List<Partition> partitions = prunedParts.getNotDeniedPartns();
          // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number)
          if (partitions.isEmpty()) {
            if (!alias.equals(baseBigAlias)) {
              aliasToPartitionBucketNumberMapping.put(alias, Arrays.<Integer>asList());
              aliasToPartitionBucketFileNamesMapping.put(alias, new ArrayList<List<String>>());
View Full Code Here

Examples of org.apache.hadoop.hive.ql.parse.PrunedPartitionList

      if (!table.isPartitioned()) {
        List<String> sortCols = Utilities.getColumnNamesFromSortCols(table.getSortCols());
        List<String> bucketCols = table.getBucketCols();
        return matchBucketSortCols(groupByCols, bucketCols, sortCols);
      } else {
        PrunedPartitionList partsList = null;
        try {
          partsList = pGraphContext.getOpToPartList().get(tableScanOp);
          if (partsList == null) {
            partsList = PartitionPruner.prune(table,
                pGraphContext.getOpToPartPruner().get(tableScanOp),
                pGraphContext.getConf(),
                table.getTableName(),
                pGraphContext.getPrunedPartitions());
            pGraphContext.getOpToPartList().put(tableScanOp, partsList);
          }
        } catch (HiveException e) {
          LOG.error(StringUtils.stringifyException(e));
          throw new SemanticException(e.getMessage(), e);
        }

        GroupByOptimizerSortMatch currentMatch = GroupByOptimizerSortMatch.COMPLETE_MATCH;
        for (Partition part : partsList.getNotDeniedPartns()) {
          List<String> sortCols = part.getSortColNames();
          List<String> bucketCols = part.getBucketCols();
          GroupByOptimizerSortMatch match = matchBucketSortCols(groupByCols, bucketCols, sortCols);
          if (match == GroupByOptimizerSortMatch.NO_MATCH) {
            return match;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.