Examples of Hive


Examples of org.apache.hadoop.hive.ql.metadata.Hive

  @Override
  public int execute(DriverContext driverContext) {

    // Create the db
    Hive db;
    try {
      db = Hive.get(conf);

      CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
      if (null != createDatabaseDesc) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

  }

  public int execute() {

    // Create the db
    Hive db;
    try {
      db = Hive.get(conf);

      createTableDesc crtTbl = work.getCreateTblDesc();
      if (crtTbl != null) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

   * @throws NoSuchObjectException
   * @throws TException
   */
  static private void pruneByPushDown(Table tab, Set<Partition> true_parts, String filter)
      throws HiveException, MetaException, NoSuchObjectException, TException {
    Hive db = Hive.get();
    List<Partition> parts = db.getPartitionsByFilter(tab, filter);
    true_parts.addAll(parts);
    return;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

   */
  public static Set<Partition> checkPartitionsCoveredByIndex(TableScanOperator tableScan,
      ParseContext pctx,
      Map<Table, List<Index>> indexes)
    throws HiveException {
    Hive hive = Hive.get(pctx.getConf());
    Set<Partition> queryPartitions = null;
    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    if(queryPartitionList.getConfirmedPartns() != null
        && !queryPartitionList.getConfirmedPartns().isEmpty()){
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

  private static HiveLockObject getLockObject(HiveConf conf, String path,
    HiveLockMode mode, HiveLockObjectData data,
    String parent, boolean verifyTablePartition)
      throws LockException {
    try {
      Hive db = Hive.get(conf);
      int indx = path.lastIndexOf("LOCK-" + mode.toString());
      String objName = path.substring(("/" + parent + "/").length(), indx-1);
      String[] names = objName.split("/");

      if (names.length < 2) {
        return null;
      }

      if (!verifyTablePartition) {
        return new HiveLockObject(names, data);
      }

      // do not throw exception if table does not exist
      Table tab = db.getTable(names[0], names[1], false);
      if (tab == null) {
        return null;
      }

      if (names.length == 2) {
        return new HiveLockObject(tab, data);
      }

      Map<String, String> partSpec = new HashMap<String, String>();
      for (indx = 2; indx < names.length; indx++) {
        String[] partVals = names[indx].split("=");
        partSpec.put(partVals[0], partVals[1]);
      }

      Partition partn;
      try {
        partn = db.getPartition(tab, partSpec, false);
      } catch (HiveException e) {
        partn = null;
      }

      if (partn == null) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

  @Override
  protected int execute(DriverContext driverContext) {

    try {
      Hive db = Hive.get(conf);
      IndexMetadataChangeWork work = this.getWork();
      String tblName = work.getIndexTbl();
      Table tbl = db.getTable(work.getDbName(), tblName);
      if (tbl == null ) {
        console.printError("Index table can not be null.");
        return 1;
      }

      if (!tbl.getTableType().equals(TableType.INDEX_TABLE)) {
        console.printError("Table " + tbl.getTableName() + " not specified.");
        return 1;
      }

      if (tbl.isPartitioned() && work.getPartSpec() == null) {
        console.printError("Index table is partitioned, but no partition specified.");
        return 1;
      }

      if (work.getPartSpec() != null) {
        Partition part = db.getPartition(tbl, work.getPartSpec(), false);
        if (part == null) {
          console.printError("Partition " +
              Warehouse.makePartName(work.getPartSpec(), false).toString()
              + " does not exist.");
          return 1;
        }

        Path url = new Path(part.getDataLocation().toString());
        FileSystem fs = url.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(url);

        part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterPartition(tbl.getTableName(), part);
      } else {
        Path url = new Path(tbl.getDataLocation().toString());
        FileSystem fs = url.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(url);
        tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterTable(tbl.getTableName(), tbl);
      }
    } catch (Exception e) {
      e.printStackTrace();
      console.printError("Error changing index table/partition metadata "
          + e.getMessage());
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

      throws HiveException, AuthorizationException {
    HashSet<ReadEntity> inputs = sem.getInputs();
    HashSet<WriteEntity> outputs = sem.getOutputs();
    SessionState ss = SessionState.get();
    HiveOperation op = ss.getHiveOperation();
    Hive db = sem.getDb();
    if (op != null) {
      if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
          || op.equals(HiveOperation.CREATETABLE)) {
        ss.getAuthorizer().authorize(
            db.getDatabase(db.getCurrentDatabase()), null,
            HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
      } else {
        if (op.equals(HiveOperation.IMPORT)) {
          ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
          if (!isa.existsTable()) {
            ss.getAuthorizer().authorize(
                db.getDatabase(db.getCurrentDatabase()), null,
                HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
          }
        }
      }
      if (outputs != null && outputs.size() > 0) {
        for (WriteEntity write : outputs) {

          if (write.getType() == WriteEntity.Type.PARTITION) {
            Partition part = db.getPartition(write.getTable(), write
                .getPartition().getSpec(), false);
            if (part != null) {
              ss.getAuthorizer().authorize(write.getPartition(), null,
                      op.getOutputRequiredPrivileges());
              continue;
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

                               "IMPLICIT",
                               plan.getQueryStr());

      // Lock the database also
      try {
        Hive db = Hive.get(conf);
        lockObjects.add(new HiveLockObj(
                                        new HiveLockObject(db.getCurrentDatabase(), lockData),
                                        HiveLockMode.SHARED));
      } catch (HiveException e) {
        throw new SemanticException(e.getMessage());
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

   * @return
   * @throws SemanticException
   */
  Map<Index, Set<String>> getIndexToKeysMap(List<Index> indexTables) throws SemanticException{
    Index index = null;
    Hive hiveInstance = hiveDb;
    Map<Index, Set<String>> indexToKeysMap = new LinkedHashMap<Index, Set<String>>();
     for (int idxCtr = 0; idxCtr < indexTables.size(); idxCtr++)  {
      final Set<String> indexKeyNames = new LinkedHashSet<String>();
      index = indexTables.get(idxCtr);
       //Getting index key columns
      StorageDescriptor sd = index.getSd();
      List<FieldSchema> idxColList = sd.getCols();
      for (FieldSchema fieldSchema : idxColList) {
        indexKeyNames.add(fieldSchema.getName());
      }
      assert indexKeyNames.size()==1;
      // Check that the index schema is as expected. This code block should
      // catch problems of this rewrite breaking when the AggregateIndexHandler
      // index is changed.
      List<String> idxTblColNames = new ArrayList<String>();
      try {
        Table idxTbl = hiveInstance.getTable(index.getDbName(),
            index.getIndexTableName());
        for (FieldSchema idxTblCol : idxTbl.getCols()) {
          idxTblColNames.add(idxTblCol.getName());
        }
      } catch (HiveException e) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.Hive

  }

  public int execute() {

    // Create the db
    Hive db;
    FileSystem fs;
    try {
      db = Hive.get(conf);
      fs = FileSystem.get(conf);

      createTableDesc crtTbl = work.getCreateTblDesc();
      if (crtTbl != null) {

        // create the table
        Table tbl = new Table(crtTbl.getTableName());
        tbl.setFields(crtTbl.getCols());
        StorageDescriptor tblStorDesc = tbl.getTTable().getSd();
        if (crtTbl.getBucketCols() != null)
          tblStorDesc.setBucketCols(crtTbl.getBucketCols());
        if (crtTbl.getSortCols() != null)
          tbl.setSortCols(crtTbl.getSortCols());
        if (crtTbl.getPartCols() != null)
          tbl.setPartCols(crtTbl.getPartCols());
        if (crtTbl.getNumBuckets() != -1)
          tblStorDesc.setNumBuckets(crtTbl.getNumBuckets());

        if (crtTbl.getSerName() != null) {
          tbl.setSerializationLib(crtTbl.getSerName());
          if (crtTbl.getMapProp() != null) {
            Iterator<Map.Entry<String, String>> iter = crtTbl.getMapProp().entrySet().iterator();
            while (iter.hasNext()) {
              Map.Entry<String, String> m = (Map.Entry)iter.next();
              tbl.setSerdeParam(m.getKey(), m.getValue());
            }
          }
        }
        else
        {
          if (crtTbl.getFieldDelim() != null)
          {
            tbl.setSerdeParam(Constants.FIELD_DELIM, crtTbl.getFieldDelim());
            tbl.setSerdeParam(Constants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());
          }
       
          if (crtTbl.getCollItemDelim() != null)
            tbl.setSerdeParam(Constants.COLLECTION_DELIM, crtTbl.getCollItemDelim());
          if (crtTbl.getMapKeyDelim() != null)
            tbl.setSerdeParam(Constants.MAPKEY_DELIM, crtTbl.getMapKeyDelim());
          if (crtTbl.getLineDelim() != null)
            tbl.setSerdeParam(Constants.LINE_DELIM, crtTbl.getLineDelim());
        }
       
        /**
         * For now, if the user specifies either the map or the collections delimiter, we infer the
         * table to DynamicSerDe/TCTLSeparatedProtocol.
         * In the future, we should infer this for any delimiters specified, but this will break older
         * hive tables, so not for now.
         */
        if (crtTbl.getCollItemDelim() != null || crtTbl.getMapKeyDelim() != null) {
          tbl.setSerializationLib(org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe.class.getName());
          tbl.setSerdeParam(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol.class.getName());
        }


        if (crtTbl.getComment() != null)
          tbl.setProperty("comment", crtTbl.getComment());
        if (crtTbl.getLocation() != null)
          tblStorDesc.setLocation(crtTbl.getLocation());

        if (crtTbl.isSequenceFile()) {
          tbl.setInputFormatClass(SequenceFileInputFormat.class);
          tbl.setOutputFormatClass(SequenceFileOutputFormat.class);
        }
        else {
          tbl.setOutputFormatClass(IgnoreKeyTextOutputFormat.class);
          tbl.setInputFormatClass(TextInputFormat.class);
        }

        if (crtTbl.isExternal())
          tbl.setProperty("EXTERNAL", "TRUE");

        // If the sorted columns is a superset of bucketed columns, store this fact. It can be later used to
        // optimize some group-by queries. Note that, the order does not matter as long as it in the first
        // 'n' columns where 'n' is the length of the bucketed columns.
        if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null))
        {
          List<String> bucketCols = tbl.getBucketCols();
          List<Order> sortCols = tbl.getSortCols();

          if (sortCols.size() >= bucketCols.size())
          {
            boolean found = true;

            Iterator<String> iterBucketCols = bucketCols.iterator();
            while (iterBucketCols.hasNext())
            {
              String bucketCol = iterBucketCols.next();
              boolean colFound = false;
              for (int i = 0; i < bucketCols.size(); i++)
              {
                if (bucketCol.equals(sortCols.get(i).getCol())) {
                  colFound = true;
                  break;
                }
              }
              if (colFound == false)
              {
                found = false;
                break;
              }
            }
            if (found)
              tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE");
          }
        }
       
        // set owner, create_time etc
        tbl.setOwner(System.getProperty("user.name"));
        // set create time
        tbl.getTTable().setCreateTime((int) (System.currentTimeMillis()/1000));

        // create the table
        db.createTable(tbl);
        return 0;
      }

      dropTableDesc dropTbl = work.getDropTblDesc();
      if (dropTbl != null) {
        if(dropTbl.getPartSpecs() == null) {
          // drop the table
          db.dropTable(dropTbl.getTableName());
        } else {
          // drop partitions in the list
          Table tbl  = db.getTable(dropTbl.getTableName());
          List<Partition> parts = new ArrayList<Partition>();
          for(HashMap<String, String> partSpec : dropTbl.getPartSpecs()) {
            Partition part = db.getPartition(tbl, partSpec, false);
            if(part == null) {
              console.printInfo("Partition " + partSpec + " does not exist.");
            } else {
              parts.add(part);
            }
          }
          // drop all existing partitions from the list
          for (Partition partition : parts) {
            console.printInfo("Dropping the partition " + partition.getName());
            db.dropPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME,
                dropTbl.getTableName(),
                partition.getValues(),
                true); //drop data for the partition
          }
        }
        return 0;
      }

      alterTableDesc alterTbl = work.getAlterTblDesc();
      if (alterTbl != null) {
        // alter the table
        Table tbl = db.getTable(alterTbl.getOldName());
        if (alterTbl.getOp() == alterTableDesc.alterTableTypes.RENAME)
          tbl.getTTable().setTableName(alterTbl.getNewName());
        else if(alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDCOLS) {
          List<FieldSchema> newCols = alterTbl.getNewCols();
          List<FieldSchema> oldCols = tbl.getCols();
          if(tbl.getSerializationLib().equals(columnsetSerDe.class.getName())) {
            console.printInfo("Replacing columns for columnsetSerDe and changing to typed SerDe");
            tbl.setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
            tbl.getTTable().getSd().setCols(newCols);
          }
          else {
            // make sure the columns does not already exist
            Iterator<FieldSchema> iterNewCols = newCols.iterator();
            while (iterNewCols.hasNext()) {
              FieldSchema newCol = iterNewCols.next();
              String newColName  = newCol.getName();
              Iterator<FieldSchema> iterOldCols = oldCols.iterator();
              while (iterOldCols.hasNext()) {
                String oldColName = iterOldCols.next().getName();
                if (oldColName.equalsIgnoreCase(newColName)) {
                  console.printError("Column '" + newColName + "' exists");
                  return 1;
                }
              }
              oldCols.add(newCol);
            }
            tbl.getTTable().getSd().setCols(oldCols);
          }
        }
        else if(alterTbl.getOp() == alterTableDesc.alterTableTypes.REPLACECOLS) {
          // change SerDe to MetadataTypedColumnsetSerDe if it is columnsetSerDe
          if(tbl.getSerializationLib().equals(columnsetSerDe.class.getName())) {
            console.printInfo("Replacing columns for columnsetSerDe and changing to typed SerDe");
            tbl.setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
          }
          else if(!tbl.getSerializationLib().equals(MetadataTypedColumnsetSerDe.class.getName())) {
            console.printError("Replace columns is not supported for this table. SerDe may be incompatible.");
            return 1;
          }
          tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
        }
        else {
          console.printError("Unsupported Alter commnad");
          return 1;
        }

        // set last modified by properties
        tbl.setProperty("last_modified_by", System.getProperty("user.name"));
        tbl.setProperty("last_modified_time", Long.toString(System.currentTimeMillis()/1000));

        try {
          db.alterTable(alterTbl.getOldName(), tbl);
        } catch (InvalidOperationException e) {
          LOG.info("alter table: " + StringUtils.stringifyException(e));
          return 1;
        } catch (MetaException e) {
          return 1;
        } catch (TException e) {
          return 1;
        }
        return 0;
      }

      descTableDesc descTbl = work.getDescTblDesc();
      if (descTbl != null) {
        // describe the table - populate the output stream
        Table tbl = db.getTable(descTbl.getTableName(), false);
        Partition part = null;
        try {
          if(tbl == null) {
            DataOutput outStream = (DataOutput)fs.open(descTbl.getResFile());
            String errMsg = "Table " + descTbl.getTableName() + " does not exist";
            outStream.write(errMsg.getBytes("UTF-8"));
            ((FSDataOutputStream)outStream).close();
            return 0;
          }
          if(descTbl.getPartSpec() != null) {
            part = db.getPartition(tbl, descTbl.getPartSpec(), false);
            if(part == null) {
              DataOutput outStream = (DataOutput)fs.open(descTbl.getResFile());
              String errMsg = "Partition " + descTbl.getPartSpec() + " for table " + descTbl.getTableName() + " does not exist";
              outStream.write(errMsg.getBytes("UTF-8"));
              ((FSDataOutputStream)outStream).close();
              return 0;
            }
          }
        } catch (FileNotFoundException e) {
          LOG.info("describe table: " + StringUtils.stringifyException(e));
          return 1;
        }
        catch (IOException e) {
          LOG.info("describe table: " + StringUtils.stringifyException(e));
          return 1;
        }
       
        try {

          LOG.info("DDLTask: got data for " +  tbl.getName());
         
          // write the results in the file
          DataOutput os = (DataOutput)fs.create(descTbl.getResFile());
          List<FieldSchema> cols = tbl.getCols();
          if(part != null) {
            cols = part.getTPartition().getSd().getCols();
          }
          Iterator<FieldSchema> iterCols = cols.iterator();
          boolean firstCol = true;
          while (iterCols.hasNext())
          {
            if (!firstCol)
              os.write(terminator);
            FieldSchema col = iterCols.next();
            os.write(col.getName().getBytes("UTF-8"));
            os.write(separator);
            os.write(col.getType().getBytes("UTF-8"));
            if (col.getComment() != null)
            {
              os.write(separator);
              os.write(col.getComment().getBytes("UTF-8"));
            }
            firstCol = false;
          }

          // also return the partitioning columns
          List<FieldSchema> partCols = tbl.getPartCols();
          Iterator<FieldSchema> iterPartCols = partCols.iterator();
          while (iterPartCols.hasNext())
          {
            os.write(terminator);
            FieldSchema col = iterPartCols.next();
            os.write(col.getName().getBytes("UTF-8"));
            os.write(separator);
            os.write(col.getType().getBytes("UTF-8"));
            if (col.getComment() != null)
            {
              os.write(separator);
              os.write(col.getComment().getBytes("UTF-8"));
            }
          }
         
          // if extended desc table then show the complete details of the table
          if(descTbl.isExt()) {
            if(part != null) {
              // show partition informatio
              os.write("\n\nDetailed Partition Information:\n".getBytes("UTF-8"));
              os.write(part.getTPartition().toString().getBytes("UTF-8"));
            } else {
              os.write("\nDetailed Table Information:\n".getBytes("UTF-8"));
              os.write(tbl.getTTable().toString().getBytes("UTF-8"));
            }
          }
         
          LOG.info("DDLTask: written data for " +  tbl.getName());
          ((FSDataOutputStream)os).close();
         
        } catch (FileNotFoundException e) {
          LOG.info("describe table: " + StringUtils.stringifyException(e));
          return 1;
        }
        catch (IOException e) {
          LOG.info("describe table: " + StringUtils.stringifyException(e));
          return 1;
        }
        return 0;
      }

      showTablesDesc showTbls = work.getShowTblsDesc();
      if (showTbls != null) {
        // get the tables for the desired pattenn - populate the output stream
        List<String> tbls = null;
        if (showTbls.getPattern() != null)
        {
          LOG.info("pattern: " + showTbls.getPattern());
          tbls = db.getTablesByPattern(showTbls.getPattern());
          LOG.info("results : " + tbls.size());
        }
        else
          tbls = db.getAllTables();
       
        // write the results in the file
        try {
          DataOutput outStream = (DataOutput)fs.create(showTbls.getResFile());
          SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
          Iterator<String> iterTbls = sortedTbls.iterator();
          boolean firstCol = true;
          while (iterTbls.hasNext())
          {
            if (!firstCol)
              outStream.write(separator);
            outStream.write(iterTbls.next().getBytes("UTF-8"));
            firstCol = false;
          }
          ((FSDataOutputStream)outStream).close();
        } catch (FileNotFoundException e) {
          LOG.info("show table: " + StringUtils.stringifyException(e));
          return 1;
        } catch (IOException e) {
          LOG.info("show table: " + StringUtils.stringifyException(e));
          return 1;
        }
        return 0;
      }

      showPartitionsDesc showParts = work.getShowPartsDesc();
      if (showParts != null) {
        // get the partitions for the table and populate the output
        String tabName = showParts.getTabName();
        Table tbl = null;
        List<String> parts = null;

        tbl = db.getTable(tabName);

        if (!tbl.isPartitioned()) {
          console.printError("Table " + tabName + " is not a partitioned table");
          return 1;
        }

        parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl.getName(), Short.MAX_VALUE);

        // write the results in the file
        try {
          DataOutput outStream = (DataOutput)fs.create(showParts.getResFile());
          Iterator<String> iterParts = parts.iterator();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.