Examples of TruncateTableDesc


Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

      AlterTableExchangePartition alterTableExchangePartition =
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

          outputs.add(new WriteEntity(partition));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getPartitionPath();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
          bucketCols = table.getBucketCols();
          inputFormatClass = table.getInputFormatClass();
          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
          isListBucketed = table.isStoredAsSubDirectories();
          listBucketColNames = table.getSkewedColNames();
        }

        // throw a HiveException for non-rcfile.
        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
        }

        // throw a HiveException if the table/partition is archived
        if (isArchived) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
        }

        Set<Integer> columnIndexes = new HashSet<Integer>();
        for (String columnName : columnNames) {
          boolean found = false;
          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
              columnIndexes.add(columnIndex);
              found = true;
              break;
            }
          }
          // Throw an exception if the user is trying to truncate a column which doesn't exist
          if (!found) {
            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
          }
          // Throw an exception if the table/partition is bucketed on one of the columns
          for (String bucketCol : bucketCols) {
            if (bucketCol.equalsIgnoreCase(columnName)) {
              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
            }
          }
          if (isListBucketed) {
            for (String listBucketCol : listBucketColNames) {
              if (listBucketCol.equalsIgnoreCase(columnName)) {
                throw new SemanticException(
                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
              }
            }
          }
        }

        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));

        truncateTblDesc.setInputDir(oldTblPartLoc.toString());
        addInputsOutputsAlterTable(tableName, partSpec);

        truncateTblDesc.setLbCtx(lbCtx);

        addInputsOutputsAlterTable(tableName, partSpec);
        ddlWork.setNeedLock(true);
        TableDesc tblDesc = Utilities.getTableDesc(table);
        // Write the output to temporary directory and move it to the final location at the end
        // so the operation is atomic.
        String queryTmpdir = ctx.getExternalTmpFileURI(newTblPartLoc.toUri());
        truncateTblDesc.setOutputDir(queryTmpdir);
        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
            partSpec == null ? new HashMap<String, String>() : partSpec);
        ltd.setLbCtx(lbCtx);
        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
            conf);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getDataLocation();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
          bucketCols = table.getBucketCols();
          inputFormatClass = table.getInputFormatClass();
          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
          isListBucketed = table.isStoredAsSubDirectories();
          listBucketColNames = table.getSkewedColNames();
        }

        // throw a HiveException for non-rcfile.
        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
        }

        // throw a HiveException if the table/partition is archived
        if (isArchived) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
        }

        Set<Integer> columnIndexes = new HashSet<Integer>();
        for (String columnName : columnNames) {
          boolean found = false;
          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
              columnIndexes.add(columnIndex);
              found = true;
              break;
            }
          }
          // Throw an exception if the user is trying to truncate a column which doesn't exist
          if (!found) {
            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
          }
          // Throw an exception if the table/partition is bucketed on one of the columns
          for (String bucketCol : bucketCols) {
            if (bucketCol.equalsIgnoreCase(columnName)) {
              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
            }
          }
          if (isListBucketed) {
            for (String listBucketCol : listBucketColNames) {
              if (listBucketCol.equalsIgnoreCase(columnName)) {
                throw new SemanticException(
                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
              }
            }
          }
        }

        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));

        truncateTblDesc.setInputDir(oldTblPartLoc);
        addInputsOutputsAlterTable(tableName, partSpec);

        truncateTblDesc.setLbCtx(lbCtx);

        addInputsOutputsAlterTable(tableName, partSpec);
        ddlWork.setNeedLock(true);
        TableDesc tblDesc = Utilities.getTableDesc(table);
        // Write the output to temporary directory and move it to the final location at the end
        // so the operation is atomic.
        Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri());
        truncateTblDesc.setOutputDir(queryTmpdir);
        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
            partSpec == null ? new HashMap<String, String>() : partSpec);
        ltd.setLbCtx(lbCtx);
        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
            conf);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

      AlterTableExchangePartition alterTableExchangePartition =
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getDataLocation();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
          bucketCols = table.getBucketCols();
          inputFormatClass = table.getInputFormatClass();
          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
          isListBucketed = table.isStoredAsSubDirectories();
          listBucketColNames = table.getSkewedColNames();
        }

        // throw a HiveException for non-rcfile.
        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
        }

        // throw a HiveException if the table/partition is archived
        if (isArchived) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
        }

        Set<Integer> columnIndexes = new HashSet<Integer>();
        for (String columnName : columnNames) {
          boolean found = false;
          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
              columnIndexes.add(columnIndex);
              found = true;
              break;
            }
          }
          // Throw an exception if the user is trying to truncate a column which doesn't exist
          if (!found) {
            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
          }
          // Throw an exception if the table/partition is bucketed on one of the columns
          for (String bucketCol : bucketCols) {
            if (bucketCol.equalsIgnoreCase(columnName)) {
              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
            }
          }
          if (isListBucketed) {
            for (String listBucketCol : listBucketColNames) {
              if (listBucketCol.equalsIgnoreCase(columnName)) {
                throw new SemanticException(
                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
              }
            }
          }
        }

        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));

        truncateTblDesc.setInputDir(oldTblPartLoc);
        addInputsOutputsAlterTable(tableName, partSpec);

        truncateTblDesc.setLbCtx(lbCtx);

        addInputsOutputsAlterTable(tableName, partSpec);
        ddlWork.setNeedLock(true);
        TableDesc tblDesc = Utilities.getTableDesc(table);
        // Write the output to temporary directory and move it to the final location at the end
        // so the operation is atomic.
        Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri());
        truncateTblDesc.setOutputDir(queryTmpdir);
        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
            partSpec == null ? new HashMap<String, String>() : partSpec);
        ltd.setLbCtx(lbCtx);
        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
            conf);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

      AlterTableExchangePartition alterTableExchangePartition =
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getDataLocation();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
          bucketCols = table.getBucketCols();
          inputFormatClass = table.getInputFormatClass();
          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
          isListBucketed = table.isStoredAsSubDirectories();
          listBucketColNames = table.getSkewedColNames();
        }

        // throw a HiveException for non-rcfile.
        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
        }

        // throw a HiveException if the table/partition is archived
        if (isArchived) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
        }

        Set<Integer> columnIndexes = new HashSet<Integer>();
        for (String columnName : columnNames) {
          boolean found = false;
          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
              columnIndexes.add(columnIndex);
              found = true;
              break;
            }
          }
          // Throw an exception if the user is trying to truncate a column which doesn't exist
          if (!found) {
            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
          }
          // Throw an exception if the table/partition is bucketed on one of the columns
          for (String bucketCol : bucketCols) {
            if (bucketCol.equalsIgnoreCase(columnName)) {
              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
            }
          }
          if (isListBucketed) {
            for (String listBucketCol : listBucketColNames) {
              if (listBucketCol.equalsIgnoreCase(columnName)) {
                throw new SemanticException(
                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
              }
            }
          }
        }

        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));

        truncateTblDesc.setInputDir(oldTblPartLoc);
        addInputsOutputsAlterTable(tableName, partSpec);

        truncateTblDesc.setLbCtx(lbCtx);

        addInputsOutputsAlterTable(tableName, partSpec);
        ddlWork.setNeedLock(true);
        TableDesc tblDesc = Utilities.getTableDesc(table);
        // Write the output to temporary directory and move it to the final location at the end
        // so the operation is atomic.
        Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
        truncateTblDesc.setOutputDir(queryTmpdir);
        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
            partSpec == null ? new HashMap<String, String>() : partSpec);
        ltd.setLbCtx(lbCtx);
        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
            conf);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

      AlterTableExchangePartition alterTableExchangePartition =
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

    } catch (InvalidTableException e) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.TruncateTableDesc

      AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
      if(alterPartDesc != null) {
        return alterTableAlterPart(db, alterPartDesc);
      }

      TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
      if (truncateTableDesc != null) {
        return truncateTable(db, truncateTableDesc);
      }

      AlterTableExchangePartition alterTableExchangePartition =
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.