Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Table


      }
    }

    if (desc != null) {
      try {
        Table table = context.getHive().newTable(desc.getTableName());
        if (desc.getLocation() != null) {
          table.setDataLocation(new Path(desc.getLocation()).toUri());
        }
        if (desc.getStorageHandler() != null) {
          table.setProperty(
            org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
            desc.getStorageHandler());
        }
        for (Map.Entry<String, String> prop : tblProps.entrySet()) {
          table.setProperty(prop.getKey(), prop.getValue());
        }
        for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
          table.setSerdeParam(prop.getKey(), prop.getValue());
        }
        //TODO: set other Table properties as needed

        //authorize against the table operation so that location permissions can be checked if any
View Full Code Here


    }
  }

  public static Table getTable(HiveMetaStoreClient client, String dbName, String tableName)
    throws NoSuchObjectException, TException, MetaException {
    return new Table(client.getTable(dbName, tableName));
  }
View Full Code Here

    if (dynamicPartitioningUsed){
      discoverPartitions(context);
    }
    OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context);
    Configuration conf = context.getConfiguration();
    Table table = new Table(jobInfo.getTableInfo().getTable());
    Path tblPath = new Path(table.getTTable().getSd().getLocation());
    FileSystem fs = tblPath.getFileSystem(conf);

    if( table.getPartitionKeys().size() == 0 ) {
      //Move data from temp directory the actual table directory
      //No metastore operation required.
      Path src = new Path(jobInfo.getLocation());
      moveTaskOutputs(fs, src, src, tblPath, false);
      fs.delete(src, true);
      return;
    }

    HiveMetaStoreClient client = null;
    HCatTableInfo tableInfo = jobInfo.getTableInfo();
    List<Partition> partitionsAdded = new ArrayList<Partition>();
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

      FileStatus tblStat = fs.getFileStatus(tblPath);
      String grpName = tblStat.getGroup();
      FsPermission perms = tblStat.getPermission();

      List<Partition> partitionsToAdd = new ArrayList<Partition>();
      if (!dynamicPartitioningUsed){
        partitionsToAdd.add(
            constructPartition(
                context,jobInfo,
                tblPath.toString(), jobInfo.getPartitionValues()
                ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                ,table, fs
                ,grpName,perms));
      }else{
        for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
          partitionsToAdd.add(
              constructPartition(
                  context,jobInfo,
                  getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
                  ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                  ,table, fs
                  ,grpName,perms));
        }
      }

      ArrayList<Map<String,String>> ptnInfos = new ArrayList<Map<String,String>>();
      for(Partition ptn : partitionsToAdd){
        ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn));
      }

      //Publish the new partition(s)
      if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){

        Path src = new Path(ptnRootLocation);
        // check here for each dir we're copying out, to see if it
        // already exists, error out if so
        moveTaskOutputs(fs, src, src, tblPath, true);
        moveTaskOutputs(fs, src, src, tblPath, false);
        fs.delete(src, true);
        try {
          updateTableSchema(client, table, jobInfo.getOutputSchema());
          LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
          client.add_partitions(partitionsToAdd);
          partitionsAdded = partitionsToAdd;
        } catch (Exception e){
          // There was an error adding partitions : rollback fs copy and rethrow
          for (Partition p : partitionsToAdd){
            Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
            if (fs.exists(ptnPath)){
              fs.delete(ptnPath,true);
            }
          }
          throw e;
        }

      }else{
        // no harProcessor, regular operation
        updateTableSchema(client, table, jobInfo.getOutputSchema());
        LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
        if (dynamicPartitioningUsed && (partitionsToAdd.size()>0)){
          Path src = new Path(ptnRootLocation);
          moveTaskOutputs(fs, src, src, tblPath, true);
          moveTaskOutputs(fs, src, src, tblPath, false);
          fs.delete(src, true);
View Full Code Here

    try {

      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      Table table = HCatUtil.getTable(client, outputJobInfo.getDatabaseName(),
        outputJobInfo.getTableName());

      List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);

      for (String indexName : indexList) {
        Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
        if (!index.isDeferredRebuild()) {
          throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
        }
      }
      StorageDescriptor sd = table.getTTable().getSd();

      if (sd.isCompressed()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported");
      }

      if (sd.getBucketCols() != null && !sd.getBucketCols().isEmpty()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with bucket definition from Pig/Mapreduce is not supported");
      }

      if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) {
        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported");
      }

      if (table.getTTable().getPartitionKeysSize() == 0) {
        if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())) {
          // attempt made to save partition values in non-partitioned table - throw error.
          throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
            "Partition values specified for non-partitioned table");
        }
        // non-partitioned table
        outputJobInfo.setPartitionValues(new HashMap<String, String>());

      } else {
        // partitioned table, we expect partition values
        // convert user specified map to have lower case key names
        Map<String, String> valueMap = new HashMap<String, String>();
        if (outputJobInfo.getPartitionValues() != null) {
          for (Map.Entry<String, String> entry : outputJobInfo.getPartitionValues().entrySet()) {
            valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
          }
        }

        if ((outputJobInfo.getPartitionValues() == null)
          || (outputJobInfo.getPartitionValues().size() < table.getTTable().getPartitionKeysSize())) {
          // dynamic partition usecase - partition values were null, or not all were specified
          // need to figure out which keys are not specified.
          List<String> dynamicPartitioningKeys = new ArrayList<String>();
          boolean firstItem = true;
          for (FieldSchema fs : table.getPartitionKeys()) {
            if (!valueMap.containsKey(fs.getName().toLowerCase())) {
              dynamicPartitioningKeys.add(fs.getName().toLowerCase());
            }
          }

          if (valueMap.size() + dynamicPartitioningKeys.size() != table.getTTable().getPartitionKeysSize()) {
            // If this isn't equal, then bogus key values have been inserted, error out.
            throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES, "Invalid partition keys specified");
          }

          outputJobInfo.setDynamicPartitioningKeys(dynamicPartitioningKeys);
          String dynHash;
          if ((dynHash = conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID)) == null) {
            dynHash = String.valueOf(Math.random());
//              LOG.info("New dynHash : ["+dynHash+"]");
//            }else{
//              LOG.info("Old dynHash : ["+dynHash+"]");
          }
          conf.set(HCatConstants.HCAT_DYNAMIC_PTN_JOBID, dynHash);

        }

        outputJobInfo.setPartitionValues(valueMap);
      }

      // To get around hbase failure on single node, see BUG-4383
      conf.set("dfs.client.read.shortcircuit", "false");
      HCatSchema tableSchema = HCatUtil.extractSchema(table);
      StorerInfo storerInfo =
        InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters());

      List<String> partitionCols = new ArrayList<String>();
      for (FieldSchema schema : table.getPartitionKeys()) {
        partitionCols.add(schema.getName());
      }

      HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);

      //Serialize the output info into the configuration
      outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable()));
      outputJobInfo.setOutputSchema(tableSchema);
      harRequested = getHarRequested(hiveConf);
      outputJobInfo.setHarRequested(harRequested);
      maxDynamicPartitions = getMaxDynamicPartitions(hiveConf);
      outputJobInfo.setMaximumDynamicPartitions(maxDynamicPartitions);

      HCatUtil.configureOutputStorageHandler(storageHandler, conf, outputJobInfo);

      Path tblPath = new Path(table.getTTable().getSd().getLocation());

      /*  Set the umask in conf such that files/dirs get created with table-dir
      * permissions. Following three assumptions are made:
      * 1. Actual files/dirs creation is done by RecordWriter of underlying
      * output format. It is assumed that they use default permissions while creation.
View Full Code Here

          // CTAS
          // make the movetask's destination directory the table's destination.
          String location = qb.getTableDesc().getLocation();
          if (location == null) {
            // get the table's default location
            Table dumpTable;
            Path targetPath;
            try {
              dumpTable = db.newTable(qb.getTableDesc().getTableName());
              if (!db.databaseExists(dumpTable.getDbName())) {
                throw new SemanticException("ERROR: The database " + dumpTable.getDbName()
                    + " does not exist.");
              }
              Warehouse wh = new Warehouse(conf);
              targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
                  .getTableName());
            } catch (HiveException e) {
              throw new SemanticException(e);
            } catch (MetaException e) {
              throw new SemanticException(e);
View Full Code Here

        }
      }
    }

    if (subject.getTable()) {
      Table tbl = getTable(subject.getObject(), true);
      if (subject.getPartSpec() != null) {
        Partition part = getPartition(tbl, subject.getPartSpec(), true);
        outputs.add(new WriteEntity(part));
      } else {
        outputs.add(new WriteEntity(tbl));
View Full Code Here

        conf));

  }

  private void analyzeExchangePartition(ASTNode ast) throws SemanticException {
    Table sourceTable =  getTable(getUnescapedName((ASTNode)ast.getChild(0)));
    Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(2)));

    // Get the partition specs
    Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(1));
    validatePartitionValues(partSpecs);
    boolean sameColumns = MetaStoreUtils.compareFieldColumns(
        sourceTable.getAllCols(), destTable.getAllCols());
    boolean samePartitions = MetaStoreUtils.compareFieldColumns(
        sourceTable.getPartitionKeys(), destTable.getPartitionKeys());
    if (!sameColumns || !samePartitions) {
      throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
    }
    List<Partition> partitions = getPartitions(sourceTable, partSpecs, true);
View Full Code Here

    boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
    // we want to signal an error if the table/view doesn't exist and we're
    // configured not to fail silently
    boolean throwException =
        !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
    Table tab = getTable(tableName, throwException);
    if (tab != null) {
      inputs.add(new ReadEntity(tab));
      outputs.add(new WriteEntity(tab));
    }
View Full Code Here

  private void analyzeTruncateTable(ASTNode ast) throws SemanticException {
    ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION
    String tableName = getUnescapedName((ASTNode) root.getChild(0));

    Table table = getTable(tableName, true);
    if (table.getTableType() != TableType.MANAGED_TABLE) {
      throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName));
    }
    if (table.isNonNative()) {
      throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO
    }
    if (!table.isPartitioned() && root.getChildCount() > 1) {
      throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName));
    }
    Map<String, String> partSpec = getPartSpec((ASTNode) root.getChild(1));
    if (partSpec == null) {
      if (!table.isPartitioned()) {
        outputs.add(new WriteEntity(table));
      } else {
        for (Partition partition : getPartitions(table, null, false)) {
          outputs.add(new WriteEntity(partition));
        }
      }
    } else {
      if (isFullSpec(table, partSpec)) {
        Partition partition = getPartition(table, partSpec, true);
        outputs.add(new WriteEntity(partition));
      } else {
        for (Partition partition : getPartitions(table, partSpec, false)) {
          outputs.add(new WriteEntity(partition));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getPartitionPath();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
          bucketCols = table.getBucketCols();
          inputFormatClass = table.getInputFormatClass();
          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
          isListBucketed = table.isStoredAsSubDirectories();
          listBucketColNames = table.getSkewedColNames();
        }

        // throw a HiveException for non-rcfile.
        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
View Full Code Here

  private List<Task<?>> getIndexBuilderMapRed(String baseTableName, String indexName,
      HashMap<String, String> partSpec) throws SemanticException {
    try {
      String dbName = SessionState.get().getCurrentDatabase();
      Index index = db.getIndex(dbName, baseTableName, indexName);
      Table indexTbl = getTable(index.getIndexTableName());
      String baseTblName = index.getOrigTableName();
      Table baseTbl = getTable(baseTblName);

      String handlerCls = index.getIndexHandlerClass();
      HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls);

      List<Partition> indexTblPartitions = null;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Table

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.