Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Hive


      throws HiveException, AuthorizationException {
    HashSet<ReadEntity> inputs = sem.getInputs();
    HashSet<WriteEntity> outputs = sem.getOutputs();
    SessionState ss = SessionState.get();
    HiveOperation op = ss.getHiveOperation();
    Hive db = sem.getDb();
    if (op != null) {
      if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
          || op.equals(HiveOperation.CREATETABLE)) {
        ss.getAuthorizer().authorize(
            db.getDatabase(db.getCurrentDatabase()), null,
            HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
      }
      if (outputs != null && outputs.size() > 0) {
        for (WriteEntity write : outputs) {

          if (write.getType() == WriteEntity.Type.PARTITION) {
            Partition part = db.getPartition(write.getTable(), write
                .getPartition().getSpec(), false);
            if (part != null) {
              ss.getAuthorizer().authorize(write.getPartition(), null,
                      op.getOutputRequiredPrivileges());
              continue;
View Full Code Here


                               String.valueOf(System.currentTimeMillis()),
                               "IMPLICIT");

      // Lock the database also
      try {
        Hive db = Hive.get(conf);
        lockObjects.add(new HiveLockObj(
                                        new HiveLockObject(db.getCurrentDatabase(), lockData),
                                        HiveLockMode.SHARED));
      } catch (HiveException e) {
        throw new SemanticException(e.getMessage());
      }
View Full Code Here

  @Override
  public int execute(DriverContext driverContext) {

    // Create the db
    Hive db;
    try {
      db = Hive.get(conf);

      CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
      if (null != createDatabaseDesc) {
View Full Code Here

   * @return partitions used by query.  null if they do not exist in index table
   * @throws HiveException
   */
  public static Set<Partition> checkPartitionsCoveredByIndex(TableScanOperator tableScan,
      ParseContext pctx, List<Index> indexes) throws HiveException {
    Hive hive = Hive.get(pctx.getConf());
    // make sure each partition exists on the index table
    PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
    Set<Partition> queryPartitions = queryPartitionList.getPartitions();
    if (queryPartitions == null || queryPartitions.isEmpty()) {
      return null;
View Full Code Here

      if (conf == null) {
        throw new RuntimeException("Must call setHiveConf before any other " +
            "methods.");
      }
      try {
        Hive db = Hive.get(conf);
        client = db.getMSC();
      } catch (MetaException e) {
        throw new LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e);
      } catch (HiveException e) {
        throw new LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e);
      }
View Full Code Here

  @Override
  protected int execute(DriverContext driverContext) {

    try {
      Hive db = Hive.get(conf);
      IndexMetadataChangeWork work = this.getWork();
      String tblName = work.getIndexTbl();
      Table tbl = db.getTable(work.getDbName(), tblName);
      if (tbl == null ) {
        console.printError("Index table can not be null.");
        return 1;
      }

      if (!tbl.getTableType().equals(TableType.INDEX_TABLE)) {
        console.printError("Table " + tbl.getTableName() + " not specified.");
        return 1;
      }

      if (tbl.isPartitioned() && work.getPartSpec() == null) {
        console.printError("Index table is partitioned, but no partition specified.");
        return 1;
      }

      if (work.getPartSpec() != null) {
        Partition part = db.getPartition(tbl, work.getPartSpec(), false);
        if (part == null) {
          console.printError("Partition " +
              Warehouse.makePartName(work.getPartSpec(), false).toString()
              + " does not exist.");
          return 1;
        }

        Path path = part.getDataLocation();
        FileSystem fs = path.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(path);

        part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterPartition(tbl.getTableName(), part);
      } else {
        Path url = new Path(tbl.getPath().toString());
        FileSystem fs = url.getFileSystem(conf);
        FileStatus fstat = fs.getFileStatus(url);
        tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
        db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl);
      }
    } catch (Exception e) {
      e.printStackTrace();
      console.printError("Error changing index table/partition metadata "
          + e.getMessage());
View Full Code Here

    } else if (varname.startsWith(HIVEVAR_PREFIX)) {
      String propName = varname.substring(HIVEVAR_PREFIX.length());
      ss.getHiveVariables().put(propName, new VariableSubstitution().substitute(ss.getConf(),varvalue));
    } else if (varname.startsWith(METACONF_PREFIX)) {
      String propName = varname.substring(METACONF_PREFIX.length());
      Hive hive = Hive.get(ss.getConf());
      hive.setMetaConf(propName, new VariableSubstitution().substitute(ss.getConf(), varvalue));
    } else {
      setConf(varname, varname, varvalue, true);
    }
    return 0;
  }
View Full Code Here

        ss.out.println(varname + " is undefined as a hive variable");
        return new CommandProcessorResponse(1);
      }
    } else if (varname.indexOf(METACONF_PREFIX) == 0) {
      String var = varname.substring(METACONF_PREFIX.length());
      Hive hive = Hive.get(ss.getConf());
      String value = hive.getMetaConf(var);
      if (value != null) {
        ss.out.println(METACONF_PREFIX + var + "=" + value);
        return createProcessorSuccessResponse();
      } else {
        ss.out.println(varname + " is undefined as a hive meta variable");
View Full Code Here

      throws HiveException, AuthorizationException {
    HashSet<ReadEntity> inputs = sem.getInputs();
    HashSet<WriteEntity> outputs = sem.getOutputs();
    SessionState ss = SessionState.get();
    HiveOperation op = ss.getHiveOperation();
    Hive db = sem.getDb();

    if (ss.isAuthorizationModeV2()) {
      // get mapping of tables to columns used
      ColumnAccessInfo colAccessInfo = sem.getColumnAccessInfo();
      // colAccessInfo is set only in case of SemanticAnalyzer
      Map<String, List<String>> selectTab2Cols = colAccessInfo != null ? colAccessInfo
          .getTableToColumnAccessMap() : null;
      Map<String, List<String>> updateTab2Cols = sem.getUpdateColumnAccessInfo() != null ?
          sem.getUpdateColumnAccessInfo().getTableToColumnAccessMap() : null;
      doAuthorizationV2(ss, op, inputs, outputs, command, selectTab2Cols, updateTab2Cols);
     return;
    }
    if (op == null) {
      throw new HiveException("Operation should not be null");
    }
    HiveAuthorizationProvider authorizer = ss.getAuthorizer();
    if (op.equals(HiveOperation.CREATEDATABASE)) {
      authorizer.authorize(
          op.getInputRequiredPrivileges(), op.getOutputRequiredPrivileges());
    } else if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
        || op.equals(HiveOperation.CREATETABLE)) {
      authorizer.authorize(
          db.getDatabase(SessionState.get().getCurrentDatabase()), null,
          HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
    } else {
      if (op.equals(HiveOperation.IMPORT)) {
        ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
        if (!isa.existsTable()) {
          authorizer.authorize(
              db.getDatabase(SessionState.get().getCurrentDatabase()), null,
              HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
        }
      }
    }
    if (outputs != null && outputs.size() > 0) {
      for (WriteEntity write : outputs) {
        if (write.isDummy()) {
          continue;
        }
        if (write.getType() == Entity.Type.DATABASE) {
          authorizer.authorize(write.getDatabase(),
              null, op.getOutputRequiredPrivileges());
          continue;
        }

        if (write.getType() == WriteEntity.Type.PARTITION) {
          Partition part = db.getPartition(write.getTable(), write
              .getPartition().getSpec(), false);
          if (part != null) {
            authorizer.authorize(write.getPartition(), null,
                    op.getOutputRequiredPrivileges());
            continue;
View Full Code Here

  private List<String> getTableData(String table, String database) throws Exception {
    HiveConf conf = new HiveConf();
    conf.addResource("hive-site.xml");
    ArrayList<String> results = new ArrayList<String>();
    ArrayList<String> temp = new ArrayList<String>();
    Hive hive = Hive.get(conf);
    org.apache.hadoop.hive.ql.metadata.Table tbl = hive.getTable(database, table);
    FetchWork work;
    if (!tbl.getPartCols().isEmpty()) {
      List<Partition> partitions = hive.getPartitions(tbl);
      List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
      List<Path> partLocs = new ArrayList<Path>();
      for (Partition part : partitions) {
        partLocs.add(part.getDataLocation());
        partDesc.add(Utilities.getPartitionDesc(part));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Hive

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.