Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.CreateTableDesc


            }
          }
          qb.getMetaData().setDestForAlias(name, fname,
              (ast.getToken().getType() == HiveParser.TOK_DIR));

          CreateTableDesc localDirectoryDesc = new CreateTableDesc();
          boolean localDirectoryDescIsSet = false;
          int numCh = ast.getChildCount();
          for (int num = 1; num < numCh ; num++){
            ASTNode child = (ASTNode) ast.getChild(num);
            if (ast.getChild(num) != null){
              switch (child.getToken().getType()) {
                case HiveParser.TOK_TABLEROWFORMAT:
                  rowFormatParams.analyzeRowFormat(shared, child);
                  localDirectoryDesc.setFieldDelim(rowFormatParams.fieldDelim);
                  localDirectoryDesc.setLineDelim(rowFormatParams.lineDelim);
                  localDirectoryDesc.setCollItemDelim(rowFormatParams.collItemDelim);
                  localDirectoryDesc.setMapKeyDelim(rowFormatParams.mapKeyDelim);
                  localDirectoryDesc.setFieldEscape(rowFormatParams.fieldEscape);
                  localDirectoryDesc.setNullFormat(rowFormatParams.nullFormat);
                  localDirectoryDescIsSet=true;
                  break;
                case HiveParser.TOK_TABLESERIALIZER:
                  ASTNode serdeChild = (ASTNode) child.getChild(0);
                  shared.serde = unescapeSQLString(serdeChild.getChild(0).getText());
                  localDirectoryDesc.setSerName(shared.serde);
                  localDirectoryDescIsSet=true;
                  break;
                case HiveParser.TOK_TBLSEQUENCEFILE:
                case HiveParser.TOK_TBLTEXTFILE:
                case HiveParser.TOK_TBLRCFILE:
                case HiveParser.TOK_TBLORCFILE:
                case HiveParser.TOK_TABLEFILEFORMAT:
                  storageFormat.fillStorageFormat(child, shared);
                  localDirectoryDesc.setOutputFormat(storageFormat.outputFormat);
                  localDirectoryDesc.setSerName(shared.serde);
                  localDirectoryDescIsSet=true;
                  break;
              }
            }
          }
View Full Code Here


      // CTAS case: the file output format and serde are defined by the create
      // table command
      // rather than taking the default value
      List<FieldSchema> field_schemas = null;
      CreateTableDesc tblDesc = qb.getTableDesc();
      if (tblDesc != null) {
        field_schemas = new ArrayList<FieldSchema>();
      }

      boolean first = true;
      for (ColumnInfo colInfo : colInfos) {
        String[] nm = inputRR.reverseLookup(colInfo.getInternalName());

        if (nm[1] != null) { // non-null column alias
          colInfo.setAlias(nm[1]);
        }

        String colName = colInfo.getInternalName()//default column name
        if (field_schemas != null) {
          FieldSchema col = new FieldSchema();
          if (!("".equals(nm[0])) && nm[1] != null) {
            colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove ``
          }
          col.setName(colName);;
          col.setType(colInfo.getType().getTypeName());
          field_schemas.add(col);
        }

        if (!first) {
          cols = cols.concat(",");
          colTypes = colTypes.concat(":");
        }

        first = false;
        cols = cols.concat(colName);

        // Replace VOID type with string when the output is a temp table or
        // local files.
        // A VOID type can be generated under the query:
        //
        // select NULL from tt;
        // or
        // insert overwrite local directory "abc" select NULL from tt;
        //
        // where there is no column type to which the NULL value should be
        // converted.
        //
        String tName = colInfo.getType().getTypeName();
        if (tName.equals(serdeConstants.VOID_TYPE_NAME)) {
          colTypes = colTypes.concat(serdeConstants.STRING_TYPE_NAME);
        } else {
          colTypes = colTypes.concat(tName);
        }
      }

      // update the create table descriptor with the resulting schema.
      if (tblDesc != null) {
        tblDesc.setCols(new ArrayList<FieldSchema>(field_schemas));
      }

      boolean isDestTempFile = true;
      if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) {
        idToTableNameMap.put(String.valueOf(destTableId), dest_path.toUri().toString());
View Full Code Here

    String[] qualified = Hive.getQualifiedNames(tableName);
    String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0];
    Database database  = getDatabase(dbName);
    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
    // Handle different types of CREATE TABLE command
    CreateTableDesc crtTblDesc = null;
    switch (command_type) {

    case CREATE_TABLE: // REGULAR CREATE TABLE DDL
      tblProps = addDefaultProperties(tblProps);

      crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols,
          bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
          rowFormatParams.fieldEscape,
          rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim,
          comment,
          storageFormat.inputFormat, storageFormat.outputFormat, location, shared.serde,
          storageFormat.storageHandler, shared.serdeProps, tblProps, ifNotExists, skewedColNames,
          skewedValues);
      crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
      crtTblDesc.setNullFormat(rowFormatParams.nullFormat);

      crtTblDesc.validate(conf);
      // outputs is empty, which means this create table happens in the current
      // database.
      SessionState.get().setCommandType(HiveOperation.CREATETABLE);
      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
          crtTblDesc), conf));
      break;

    case CTLT: // create table like <tbl_name>
      tblProps = addDefaultProperties(tblProps);

      CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt,
          storageFormat.inputFormat, storageFormat.outputFormat, location,
          shared.serde, shared.serdeProps, tblProps, ifNotExists, likeTableName);
      SessionState.get().setCommandType(HiveOperation.CREATETABLE);
      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
          crtTblLikeDesc), conf));
      break;

    case CTAS: // create table as select

      // Verify that the table does not already exist
      try {
        Table dumpTable = db.newTable(tableName);
        if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false)) {
          throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName));
        }
      } catch (HiveException e) {
        throw new SemanticException(e);
      }

      tblProps = addDefaultProperties(tblProps);

      crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, cols, partCols,
          bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
          rowFormatParams.fieldEscape,
          rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim,
          comment, storageFormat.inputFormat,
          storageFormat.outputFormat, location, shared.serde, storageFormat.storageHandler,
          shared.serdeProps,
          tblProps, ifNotExists, skewedColNames, skewedValues);
      crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
      crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
      qb.setTableDesc(crtTblDesc);

      SessionState.get().setCommandType(HiveOperation.CREATETABLE_AS_SELECT);

      return selectStmt;
View Full Code Here

    decideExecMode(rootTasks, ctx, globalLimitCtx);

    if (qb.isCTAS()) {
      // generate a DDL task and make it a dependent task of the leaf
      CreateTableDesc crtTblDesc = qb.getTableDesc();

      crtTblDesc.validate(conf);

      // Clear the output for CTAS since we don't need the output from the
      // mapredWork, the
      // DDLWork at the tail of the chain will have the output
      outputs.clear();
View Full Code Here

      String tmpPath = stripQuotes(fromTree.getText());
      URI fromURI = EximUtil.getValidatedURI(conf, tmpPath);

      FileSystem fs = FileSystem.get(fromURI, conf);
      String dbname = null;
      CreateTableDesc tblDesc = null;
      List<AddPartitionDesc> partitionDescs = new ArrayList<AddPartitionDesc>();
      Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(),
          fromURI.getPath());
      boolean isLocal = FileUtils.isLocalFile(conf, fromURI);
      inputs.add(new ReadEntity(fromPath, isLocal));
      try {
        Path metadataPath = new Path(fromPath, METADATA_NAME);
        Map.Entry<org.apache.hadoop.hive.metastore.api.Table,
        List<Partition>> rv =  EximUtil.readMetaData(fs, metadataPath);
        dbname = SessionState.get().getCurrentDatabase();
        org.apache.hadoop.hive.metastore.api.Table table = rv.getKey();
        tblDesc =  new CreateTableDesc(
            table.getTableName(),
            false, // isExternal: set to false here, can be overwritten by the
                   // IMPORT stmt
            table.getSd().getCols(),
            table.getPartitionKeys(),
            table.getSd().getBucketCols(),
            table.getSd().getSortCols(),
            table.getSd().getNumBuckets(),
            null, null, null, null, null, // these 5 delims passed as serde params
            null, // comment passed as table params
            table.getSd().getInputFormat(),
            table.getSd().getOutputFormat(),
            null, // location: set to null here, can be
                  // overwritten by the IMPORT stmt
            table.getSd().getSerdeInfo().getSerializationLib(),
            null, // storagehandler passed as table params
            table.getSd().getSerdeInfo().getParameters(),
            table.getParameters(), false,
            (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo()
                .getSkewedColNames(),
            (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo()
                .getSkewedColValues());
        tblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories());

        List<FieldSchema> partCols = tblDesc.getPartCols();
        List<String> partColNames = new ArrayList<String>(partCols.size());
        for (FieldSchema fsc : partCols) {
          partColNames.add(fsc.getName());
        }
        List<Partition> partitions = rv.getValue();
        for (Partition partition : partitions) {
          // TODO: this should not create AddPartitionDesc per partition
          AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
              EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()),
              partition.getSd().getLocation(), partition.getParameters());
          AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0);
          partDesc.setInputFormat(partition.getSd().getInputFormat());
          partDesc.setOutputFormat(partition.getSd().getOutputFormat());
          partDesc.setNumBuckets(partition.getSd().getNumBuckets());
          partDesc.setCols(partition.getSd().getCols());
          partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib());
          partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters());
          partDesc.setBucketCols(partition.getSd().getBucketCols());
          partDesc.setSortCols(partition.getSd().getSortCols());
          partDesc.setLocation(new Path(fromPath,
              Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
          partitionDescs.add(partsDesc);
        }
      } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
      }
      LOG.debug("metadata read and parsed");
      for (int i = 1; i < ast.getChildCount(); ++i) {
        ASTNode child = (ASTNode) ast.getChild(i);
        switch (child.getToken().getType()) {
        case HiveParser.KW_EXTERNAL:
          tblDesc.setExternal(true);
          break;
        case HiveParser.TOK_TABLELOCATION:
          String location = unescapeSQLString(child.getChild(0).getText());
          location = EximUtil.relativeToAbsolutePath(conf, location);
          tblDesc.setLocation(location);
          break;
        case HiveParser.TOK_TAB:
          Tree tableTree = child.getChild(0);
          // initialize destination table/partition
          String tableName = getUnescapedName((ASTNode)tableTree);
          tblDesc.setTableName(tableName);
          // get partition metadata if partition specified
          LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, String>();
          if (child.getChildCount() == 2) {
            ASTNode partspec = (ASTNode) child.getChild(1);
            // partSpec is a mapping from partition column name to its value.
            for (int j = 0; j < partspec.getChildCount(); ++j) {
              ASTNode partspec_val = (ASTNode) partspec.getChild(j);
              String val = null;
              String colName = unescapeIdentifier(partspec_val.getChild(0)
                    .getText().toLowerCase());
              if (partspec_val.getChildCount() < 2) { // DP in the form of T
                                                      // partition (ds, hr)
                throw new SemanticException(
                      ErrorMsg.INVALID_PARTITION
                          .getMsg(" - Dynamic partitions not allowed"));
              } else { // in the form of T partition (ds="2010-03-03")
                val = stripQuotes(partspec_val.getChild(1).getText());
              }
              partSpec.put(colName, val);
            }
            boolean found = false;
            for (Iterator<AddPartitionDesc> partnIter = partitionDescs
                  .listIterator(); partnIter.hasNext();) {
              AddPartitionDesc addPartitionDesc = partnIter.next();
              if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(partSpec)) {
                found = true;
              } else {
                partnIter.remove();
              }
            }
            if (!found) {
              throw new SemanticException(
                    ErrorMsg.INVALID_PARTITION
                        .getMsg(" - Specified partition not found in import directory"));
            }
          }
        }
      }
      if (tblDesc.getTableName() == null) {
        throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
      } else {
        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
            if (db.getPartition(table, partSpec, false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        // Set this to read because we can't overwrite any existing partitions
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
        Table table = new Table(dbname, tblDesc.getTableName());
        String currentDb = SessionState.get().getCurrentDatabase();
        conf.set("import.destination.dir",
            wh.getTablePath(db.getDatabaseCurrent(),
                tblDesc.getTableName()).toString());
        if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) {
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            t.addDependentTask(
                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
          }
        } else {
          LOG.debug("adding dependent CopyWork/MoveWork for table");
          if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
            LOG.debug("Importing in place, no emptiness check, no copying/loading");
            Path dataPath = new Path(fromURI.toString(), "data");
            tblDesc.setLocation(dataPath.toString());
          } else {
            Path tablePath = null;
            if (tblDesc.getLocation() != null) {
              tablePath = new Path(tblDesc.getLocation());
            } else {
              tablePath = wh.getTablePath(db.getDatabaseCurrent(), tblDesc.getTableName());
            }
            checkTargetLocationEmpty(fs, tablePath);
            t.addDependentTask(loadTable(fromURI, table));
          }
        }
View Full Code Here

    }

    QueryPlan plan = driver.getPlan();
    DDLTask task = (DDLTask) plan.getRootTasks().get(0);
    DDLWork work = task.getWork();
    CreateTableDesc spec = work.getCreateTblDesc();
    FieldSchema fs = spec.getCols().get(0);
    return fs.getType();
  }
View Full Code Here

      AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
      if (alterDatabaseDesc != null) {
        return alterDatabase(alterDatabaseDesc);
      }

      CreateTableDesc crtTbl = work.getCreateTblDesc();
      if (crtTbl != null) {
        return createTable(db, crtTbl);
      }

      CreateIndexDesc crtIndex = work.getCreateIndexDesc();
View Full Code Here

    decideExecMode(rootTasks, ctx, globalLimitCtx);

    if (qb.isCTAS()) {
      // generate a DDL task and make it a dependent task of the leaf
      CreateTableDesc crtTblDesc = qb.getTableDesc();

      crtTblDesc.validate(conf);

      // clear the mapredWork output file from outputs for CTAS
      // DDLWork at the tail of the chain will have the output
      Iterator<WriteEntity> outIter = outputs.iterator();
      while (outIter.hasNext()) {
View Full Code Here

      String tmpPath = stripQuotes(fromTree.getText());
      URI fromURI = EximUtil.getValidatedURI(conf, tmpPath);

      FileSystem fs = FileSystem.get(fromURI, conf);
      String dbname = null;
      CreateTableDesc tblDesc = null;
      List<AddPartitionDesc> partitionDescs = new ArrayList<AddPartitionDesc>();
      Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(),
          fromURI.getPath());
      boolean isLocal = FileUtils.isLocalFile(conf, fromURI);
      inputs.add(new ReadEntity(fromPath, isLocal));
      try {
        Path metadataPath = new Path(fromPath, METADATA_NAME);
        Map.Entry<org.apache.hadoop.hive.metastore.api.Table,
        List<Partition>> rv =  EximUtil.readMetaData(fs, metadataPath);
        dbname = SessionState.get().getCurrentDatabase();
        org.apache.hadoop.hive.metastore.api.Table table = rv.getKey();
        tblDesc =  new CreateTableDesc(
            table.getTableName(),
            false, // isExternal: set to false here, can be overwritten by the
                   // IMPORT stmt
            table.isTemporary(),
            table.getSd().getCols(),
            table.getPartitionKeys(),
            table.getSd().getBucketCols(),
            table.getSd().getSortCols(),
            table.getSd().getNumBuckets(),
            null, null, null, null, null, // these 5 delims passed as serde params
            null, // comment passed as table params
            table.getSd().getInputFormat(),
            table.getSd().getOutputFormat(),
            null, // location: set to null here, can be
                  // overwritten by the IMPORT stmt
            table.getSd().getSerdeInfo().getSerializationLib(),
            null, // storagehandler passed as table params
            table.getSd().getSerdeInfo().getParameters(),
            table.getParameters(), false,
            (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo()
                .getSkewedColNames(),
            (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo()
                .getSkewedColValues());
        tblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories());

        List<FieldSchema> partCols = tblDesc.getPartCols();
        List<String> partColNames = new ArrayList<String>(partCols.size());
        for (FieldSchema fsc : partCols) {
          partColNames.add(fsc.getName());
        }
        List<Partition> partitions = rv.getValue();
        for (Partition partition : partitions) {
          // TODO: this should not create AddPartitionDesc per partition
          AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
              EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()),
              partition.getSd().getLocation(), partition.getParameters());
          AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0);
          partDesc.setInputFormat(partition.getSd().getInputFormat());
          partDesc.setOutputFormat(partition.getSd().getOutputFormat());
          partDesc.setNumBuckets(partition.getSd().getNumBuckets());
          partDesc.setCols(partition.getSd().getCols());
          partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib());
          partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters());
          partDesc.setBucketCols(partition.getSd().getBucketCols());
          partDesc.setSortCols(partition.getSd().getSortCols());
          partDesc.setLocation(new Path(fromPath,
              Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
          partitionDescs.add(partsDesc);
        }
      } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
      }
      LOG.debug("metadata read and parsed");
      for (int i = 1; i < ast.getChildCount(); ++i) {
        ASTNode child = (ASTNode) ast.getChild(i);
        switch (child.getToken().getType()) {
        case HiveParser.KW_EXTERNAL:
          tblDesc.setExternal(true);
          break;
        case HiveParser.TOK_TABLELOCATION:
          String location = unescapeSQLString(child.getChild(0).getText());
          location = EximUtil.relativeToAbsolutePath(conf, location);
          tblDesc.setLocation(location);
          break;
        case HiveParser.TOK_TAB:
          Tree tableTree = child.getChild(0);
          // initialize destination table/partition
          String tableName = getUnescapedName((ASTNode)tableTree);
          tblDesc.setTableName(tableName);
          // get partition metadata if partition specified
          LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, String>();
          if (child.getChildCount() == 2) {
            ASTNode partspec = (ASTNode) child.getChild(1);
            // partSpec is a mapping from partition column name to its value.
            for (int j = 0; j < partspec.getChildCount(); ++j) {
              ASTNode partspec_val = (ASTNode) partspec.getChild(j);
              String val = null;
              String colName = unescapeIdentifier(partspec_val.getChild(0)
                    .getText().toLowerCase());
              if (partspec_val.getChildCount() < 2) { // DP in the form of T
                                                      // partition (ds, hr)
                throw new SemanticException(
                      ErrorMsg.INVALID_PARTITION
                          .getMsg(" - Dynamic partitions not allowed"));
              } else { // in the form of T partition (ds="2010-03-03")
                val = stripQuotes(partspec_val.getChild(1).getText());
              }
              partSpec.put(colName, val);
            }
            boolean found = false;
            for (Iterator<AddPartitionDesc> partnIter = partitionDescs
                  .listIterator(); partnIter.hasNext();) {
              AddPartitionDesc addPartitionDesc = partnIter.next();
              if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(partSpec)) {
                found = true;
              } else {
                partnIter.remove();
              }
            }
            if (!found) {
              throw new SemanticException(
                    ErrorMsg.INVALID_PARTITION
                        .getMsg(" - Specified partition not found in import directory"));
            }
          }
        }
      }
      if (tblDesc.getTableName() == null) {
        throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
      } else {
        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
            if (db.getPartition(table, partSpec, false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        // Set this to read because we can't overwrite any existing partitions
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
        Table table = new Table(dbname, tblDesc.getTableName());
        String currentDb = SessionState.get().getCurrentDatabase();
        conf.set("import.destination.dir",
            wh.getTablePath(db.getDatabaseCurrent(),
                tblDesc.getTableName()).toString());
        if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) {
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            t.addDependentTask(
                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
          }
        } else {
          LOG.debug("adding dependent CopyWork/MoveWork for table");
          if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
            LOG.debug("Importing in place, no emptiness check, no copying/loading");
            Path dataPath = new Path(fromURI.toString(), "data");
            tblDesc.setLocation(dataPath.toString());
          } else {
            Path tablePath = null;
            if (tblDesc.getLocation() != null) {
              tablePath = new Path(tblDesc.getLocation());
            } else {
              tablePath = wh.getTablePath(db.getDatabaseCurrent(), tblDesc.getTableName());
            }
            checkTargetLocationEmpty(fs, tablePath);
            t.addDependentTask(loadTable(fromURI, table));
          }
        }
View Full Code Here

    if (rootTasks.size() == 0) {
      // There will be no DDL task created in case if its CREATE TABLE IF
      // NOT EXISTS
      return;
    }
    CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1))
      .getWork().getCreateTblDesc();
    if (desc == null) {
      // Desc will be null if its CREATE TABLE LIKE. Desc will be
      // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in
      // pre-hook. So, desc can never be null.
      return;
    }
    Map<String, String> tblProps = desc.getTblProps();
    if (tblProps == null) {
      // tblProps will be null if user didnt use tblprops in his CREATE
      // TABLE cmd.
      tblProps = new HashMap<String, String>();

    }

    // first check if we will allow the user to create table.
    String storageHandler = desc.getStorageHandler();
    if (StringUtils.isEmpty(storageHandler)) {
    } else {
      try {
        HiveStorageHandler storageHandlerInst = HCatUtil
          .getStorageHandler(context.getConf(),
            desc.getStorageHandler(),
            desc.getSerName(),
            desc.getInputFormat(),
            desc.getOutputFormat());
        //Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if
        //StorageDelegationAuthorizationProvider is used.
      } catch (IOException e) {
        throw new SemanticException(e);
      }
    }

    if (desc != null) {
      try {
        Table table = context.getHive().newTable(desc.getTableName());
        if (desc.getLocation() != null) {
          table.setDataLocation(new Path(desc.getLocation()));
        }
        if (desc.getStorageHandler() != null) {
          table.setProperty(
            org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
            desc.getStorageHandler());
        }
        for (Map.Entry<String, String> prop : tblProps.entrySet()) {
          table.setProperty(prop.getKey(), prop.getValue());
        }
        for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
          table.setSerdeParam(prop.getKey(), prop.getValue());
        }
        //TODO: set other Table properties as needed

        //authorize against the table operation so that location permissions can be checked if any

        if (HCatAuthUtil.isAuthorizationEnabled(context.getConf())) {
          authorize(table, Privilege.CREATE);
        }
      } catch (HiveException ex) {
        throw new SemanticException(ex);
      }
    }

    desc.setTblProps(tblProps);
    context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.CreateTableDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.