Examples of FetchWork


Examples of org.apache.hadoop.hive.ql.plan.FetchWork

      assert localPlan.getAliasToFetchWork().get(alias_id) == null;
      localPlan.getAliasToWork().put(alias_id, topOp);
      if (tblDir == null) {
        localPlan.getAliasToFetchWork().put(
            alias_id,
            new FetchWork(FetchWork.convertPathToStringArray(partDir), partDesc));
      } else {
        localPlan.getAliasToFetchWork().put(alias_id,
            new FetchWork(tblDir.toString(), tblDesc));
      }
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.FetchWork

      }

      assert localPlan.getAliasToWork().get(alias) == null;
      assert localPlan.getAliasToFetchWork().get(alias) == null;
      localPlan.getAliasToWork().put(alias, topOp);
      localPlan.getAliasToFetchWork().put(alias, new FetchWork(alias, tt_desc));
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

    prop.setProperty(Constants.SERIALIZATION_NULL_FORMAT, " ");
    String[] colTypes = schema.split("#");
    prop.setProperty("columns", colTypes[0]);
    prop.setProperty("columns.types", colTypes[1]);

    fetchWork fetch = new fetchWork(
      ctx.getResFile(),
      new tableDesc(LazySimpleSerDe.class, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, prop),
      -1
    );   
    fetch.setSerializationNullFormat(" ");
    return TaskFactory.get(fetch, this.conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

    prop.setProperty(Constants.SERIALIZATION_NULL_FORMAT, " ");
    String[] colTypes = schema.split("#");
    prop.setProperty("columns", colTypes[0]);
    prop.setProperty("columns.types", colTypes[1]);

    fetchWork fetch = new fetchWork(
      ctx.getResFile().toString(),
      new tableDesc(LazySimpleSerDe.class, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, prop),
      -1
    );
    fetch.setSerializationNullFormat(" ");
    return TaskFactory.get(fetch, this.conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

      assert localPlan.getAliasToWork().get(alias_id) == null;
      assert localPlan.getAliasToFetchWork().get(alias_id) == null;
      localPlan.getAliasToWork().put(alias_id, topOp);
      if (tblDir == null)
        localPlan.getAliasToFetchWork().put(alias_id, new fetchWork(fetchWork.convertPathToStringArray(partDir), partDesc));
      else
        localPlan.getAliasToFetchWork().put(alias_id, new fetchWork(tblDir.toString(), tblDesc));
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

                                        new LinkedHashMap<String, fetchWork>());

      assert localPlan.getAliasToWork().get(alias) == null;
      assert localPlan.getAliasToFetchWork().get(alias) == null;
      localPlan.getAliasToWork().put(alias, topOp);
      localPlan.getAliasToFetchWork().put(alias, new fetchWork(alias, tt_desc));
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

    return null;
  }

  @SuppressWarnings("nls")
  private void genMapRedTasks(QB qb) throws SemanticException {
    fetchWork fetch = null;
    List<Task<? extends Serializable>> mvTask = new ArrayList<Task<? extends Serializable>>();
    Task<? extends Serializable> fetchTask = null;

    QBParseInfo qbParseInfo = qb.getParseInfo();

    // Does this query need reduce job
    if (qb.isSelectStarQuery()
        && qbParseInfo.getDestToClusterBy().isEmpty()
        && qbParseInfo.getDestToDistributeBy().isEmpty()
        && qbParseInfo.getDestToOrderBy().isEmpty()
        && qbParseInfo.getDestToSortBy().isEmpty()) {
      boolean noMapRed = false;

      Iterator<Map.Entry<String, Table>> iter = qb.getMetaData().getAliasToTable().entrySet().iterator();
      Table tab = ((Map.Entry<String, Table>)iter.next()).getValue();
      if (!tab.isPartitioned()) {
        if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
          fetch = new fetchWork(tab.getPath().toString(), Utilities.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
          noMapRed = true;
          inputs.add(new ReadEntity(tab));
        }
      }
      else {

        if (topOps.size() == 1) {
          TableScanOperator ts = (TableScanOperator)topOps.values().toArray()[0];

          // check if the pruner only contains partition columns
          if (PartitionPruner.onlyContainsPartnCols(topToTable.get(ts), opToPartPruner.get(ts))) {

            PrunedPartitionList partsList = null;
            try {
              partsList = PartitionPruner.prune(topToTable.get(ts), opToPartPruner.get(ts), conf, (String)topOps.keySet().toArray()[0], prunedPartitions);
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils
              LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
              throw new SemanticException(e.getMessage(), e);
            }

            // If there is any unknown partition, create a map-reduce job for the filter to prune correctly
            if (partsList.getUnknownPartns().size() == 0) {
              List<String> listP = new ArrayList<String>();
              List<partitionDesc> partP = new ArrayList<partitionDesc>();

              Set<Partition> parts = partsList.getConfirmedPartns();
              Iterator<Partition> iterParts = parts.iterator();
              while (iterParts.hasNext()) {
                Partition part = iterParts.next();
                listP.add(part.getPartitionPath().toString());
                try{
                  partP.add(Utilities.getPartitionDesc(part));
                } catch (HiveException e) {
                  throw new SemanticException(e.getMessage(), e);
                }
                inputs.add(new ReadEntity(part));
              }

              fetch = new fetchWork(listP, partP, qb.getParseInfo().getOuterQueryLimit());
              noMapRed = true;
            }
          }
        }
      }

      if (noMapRed) {
        fetchTask = TaskFactory.get(fetch, this.conf);
        setFetchTask(fetchTask);

        // remove root tasks if any
        rootTasks.clear();
        return;
      }
    }

    // In case of a select, use a fetch task instead of a move task
    if (qb.getIsQuery()) {
      if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1))
        throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg());
      String cols = loadFileWork.get(0).getColumns();
      String colTypes = loadFileWork.get(0).getColumnTypes();

      fetch = new fetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
                            new tableDesc(LazySimpleSerDe.class, TextInputFormat.class,
                                           IgnoreKeyTextOutputFormat.class,
                                           Utilities.makeProperties(
                                            org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode,
                                            org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS, cols,
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

    return null;
  }

  @SuppressWarnings("nls")
  private void genMapRedTasks(QB qb) throws SemanticException {
    fetchWork fetch = null;
    List<Task<? extends Serializable>> mvTask = new ArrayList<Task<? extends Serializable>>();
    Task<? extends Serializable> fetchTask = null;

    QBParseInfo qbParseInfo = qb.getParseInfo();
    if (qb.isSelectStarQuery()
        && qbParseInfo.getDestToClusterBy().isEmpty()
        && qbParseInfo.getDestToDistributeBy().isEmpty()
        && qbParseInfo.getDestToOrderBy().isEmpty()
        && qbParseInfo.getDestToSortBy().isEmpty()) {
      Iterator<Map.Entry<String, Table>> iter = qb.getMetaData().getAliasToTable().entrySet().iterator();
      Table tab = ((Map.Entry<String, Table>)iter.next()).getValue();
      if (!tab.isPartitioned()) {
        if (qbParseInfo.getDestToWhereExpr().isEmpty())
          fetch = new fetchWork(tab.getPath().toString(), Utilities.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
        inputs.add(new ReadEntity(tab));
      }
      else {
        if (aliasToPruner.size() == 1) {
          Iterator<Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>> iterP =
            aliasToPruner.entrySet().iterator();
          org.apache.hadoop.hive.ql.parse.ASTPartitionPruner pr =
            ((Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>)iterP.next()).getValue();
          if (pr.onlyContainsPartitionCols()) {
            List<String> listP = new ArrayList<String>();
            List<partitionDesc> partP = new ArrayList<partitionDesc>();
            PrunedPartitionList partsList = null;
            Set<Partition> parts = null;
            try {
              partsList = pr.prune();
              // If there is any unknown partition, create a map-reduce job for the filter to prune correctly
              if (partsList.getUnknownPartns().size() == 0) {
                parts = partsList.getConfirmedPartns();
                Iterator<Partition> iterParts = parts.iterator();
                while (iterParts.hasNext()) {
                  Partition part = iterParts.next();
                  listP.add(part.getPartitionPath().toString());
                  partP.add(Utilities.getPartitionDesc(part));
                  inputs.add(new ReadEntity(part));
                }
                fetch = new fetchWork(listP, partP, qb.getParseInfo().getOuterQueryLimit());
              }
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils
              LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
              throw new SemanticException(e.getMessage(), e);
            }
          }
        }
      }
      if (fetch != null) {
        fetchTask = TaskFactory.get(fetch, this.conf);
        setFetchTask(fetchTask);
        return;
      }
    }

    // In case of a select, use a fetch task instead of a move task
    if (qb.getIsQuery()) {
      if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1))
        throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg());
      String cols = loadFileWork.get(0).getColumns();
      String colTypes = loadFileWork.get(0).getColumnTypes();
   
      fetch = new fetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
                            new tableDesc(LazySimpleSerDe.class, TextInputFormat.class,
                                           IgnoreKeyTextOutputFormat.class,
                                           Utilities.makeProperties(
                                            org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode,
                                            org.apache.hadoop.hive.serde.Constants.LIST_COLUMNS, cols,
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

      assert localPlan.getAliasToWork().get(alias_id) == null;
      assert localPlan.getAliasToFetchWork().get(alias_id) == null;
      localPlan.getAliasToWork().put(alias_id, topOp);
      if (tblDir == null)
        localPlan.getAliasToFetchWork().put(alias_id, new fetchWork(fetchWork.convertPathToStringArray(partDir), partDesc));
      else
        localPlan.getAliasToFetchWork().put(alias_id, new fetchWork(tblDir.toString(), tblDesc));
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.fetchWork

                                        new LinkedHashMap<String, fetchWork>());
     
      assert localPlan.getAliasToWork().get(alias) == null;
      assert localPlan.getAliasToFetchWork().get(alias) == null;
      localPlan.getAliasToWork().put(alias, topOp);
      localPlan.getAliasToFetchWork().put(alias, new fetchWork(alias, tt_desc));
      plan.setMapLocalWork(localPlan);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.