Package org.apache.hadoop.hive.ql.hooks

Examples of org.apache.hadoop.hive.ql.hooks.ReadEntity


      try {
        db.alterTable(tblName, tbl);
      } catch (InvalidOperationException e) {
        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
      } catch (InvalidOperationException e) {
        throw new HiveException(e);
      }
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
    }
    return 0;
  }
View Full Code Here


    // contains the new table. This is needed for rename - both the old and the
    // new table names are
    // passed
    // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer.
    if(part != null) {
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
    }
    else if (allPartitions != null ){
      for (Partition tmpPart: allPartitions) {
        work.getInputs().add(new ReadEntity(tmpPart));
        work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK));
      }
    }
    else {
      work.getInputs().add(new ReadEntity(oldTbl));
      work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
    }
    return 0;
  }
View Full Code Here

        Path toPartPath = new Path(parentPath, partition.getName());
        Task<? extends Serializable> rTask = TaskFactory.get(
            new CopyWork(fromPath, toPartPath, false),
            conf);
        rootTasks.add(rTask);
        inputs.add(new ReadEntity(partition));
      }
    } else {
      Path fromPath = ts.tableHandle.getDataLocation();
      Path toDataPath = new Path(parentPath, "data");
      Task<? extends Serializable> rTask = TaskFactory.get(new CopyWork(
          fromPath, toDataPath, false), conf);
      rootTasks.add(rTask);
      inputs.add(new ReadEntity(ts.tableHandle));
    }
    outputs.add(new WriteEntity(parentPath, toURI.getScheme().equals("hdfs")));
  }
View Full Code Here

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);

    for (Partition part : parts) {
      if (part.getTable().isPartitioned())
        inputs.add(new ReadEntity(part));
      else
        inputs.add(new ReadEntity(part.getTable()));

      // Later the properties have to come from the partition as opposed
      // to from the table in order to support versioning.
      Path paths[];
      sampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);
View Full Code Here

    }

    // This is kind of hacky - the read entity contains the old table, whereas the write entity
    // contains the new table. This is needed for rename - both the old and the new table names are
    // passed
    work.getInputs().add(new ReadEntity(oldTbl));
    work.getOutputs().add(new WriteEntity(tbl));
    return 0;
  }
View Full Code Here

      Table tab = ((Map.Entry<String, Table>)iter.next()).getValue();
      if (!tab.isPartitioned()) {
        if (qbParseInfo.getDestToWhereExpr().isEmpty()) {
          fetch = new fetchWork(tab.getPath().toString(), Utilities.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
          noMapRed = true;
          inputs.add(new ReadEntity(tab));
        }
      }
      else {

        if (topOps.size() == 1) {
          TableScanOperator ts = (TableScanOperator)topOps.values().toArray()[0];

          // check if the pruner only contains partition columns
          if (PartitionPruner.onlyContainsPartnCols(topToTable.get(ts), opToPartPruner.get(ts))) {

            PrunedPartitionList partsList = null;
            try {
              partsList = PartitionPruner.prune(topToTable.get(ts), opToPartPruner.get(ts), conf, (String)topOps.keySet().toArray()[0], prunedPartitions);
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils
              LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
              throw new SemanticException(e.getMessage(), e);
            }

            // If there is any unknown partition, create a map-reduce job for the filter to prune correctly
            if (partsList.getUnknownPartns().size() == 0) {
              List<String> listP = new ArrayList<String>();
              List<partitionDesc> partP = new ArrayList<partitionDesc>();

              Set<Partition> parts = partsList.getConfirmedPartns();
              Iterator<Partition> iterParts = parts.iterator();
              while (iterParts.hasNext()) {
                Partition part = iterParts.next();
                listP.add(part.getPartitionPath().toString());
                try{
                  partP.add(Utilities.getPartitionDesc(part));
                } catch (HiveException e) {
                  throw new SemanticException(e.getMessage(), e);
                }
                inputs.add(new ReadEntity(part));
              }

              fetch = new fetchWork(listP, partP, qb.getParseInfo().getOuterQueryLimit());
              noMapRed = true;
            }
View Full Code Here

    boolean throwException =
        !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
    try {
      Table tab = db.getTable(db.getCurrentDatabase(), tableName, throwException);
      if (tab != null) {
        inputs.add(new ReadEntity(tab));
        outputs.add(new WriteEntity(tab));
      }
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }
View Full Code Here

      tab = db.getTable(db.getCurrentDatabase(), tableName, true);
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }

    inputs.add(new ReadEntity(tab));

    if ((partSpec == null) || (partSpec.isEmpty())) {
      outputs.add(new WriteEntity(tab));
    }
    else {
View Full Code Here

      tab = db.getTable(db.getCurrentDatabase(), tableName, true);
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }

    inputs.add(new ReadEntity(tab));
    outputs.add(new WriteEntity(tab));

    validateAlterTableType(tab, AlterTableTypes.ADDCLUSTERSORTCOLUMN);

    if (ast.getChildCount() == 1) {
View Full Code Here

      Table tab = db.getTable(tableName, true);
      if (tab.getTableType() == org.apache.hadoop.hive.metastore.TableType.INDEX_TABLE) {
        throw new SemanticException(ErrorMsg.SHOW_CREATETABLE_INDEX.getMsg(tableName
            + " has table type INDEX_TABLE"));
      }
      inputs.add(new ReadEntity(tab));
    } catch (SemanticException e) {
      throw e;
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.hooks.ReadEntity

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.