Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.HiveException


   
    try {

      if (privSubjectDesc != null) {
        if (privSubjectDesc.getPartSpec() != null && isGrant) {
          throw new HiveException("Grant does not support partition level.");
        }
        String obj = privSubjectDesc.getObject();
        boolean notFound = true;
        if (privSubjectDesc.getTable()) {
          String[] dbTab = obj.split("\\.");
          if (dbTab.length == 2) {
            dbName = dbTab[0];
            tableName = dbTab[1];
          } else {
            dbName = db.getCurrentDatabase();
            tableName = obj;
          }
          dbObj = db.getDatabase(dbName);
          tableObj = db.getTable(dbName, tableName);
          notFound = (dbObj == null || tableObj == null);
        } else {
          dbName = privSubjectDesc.getObject();
          dbObj = db.getDatabase(dbName);
          notFound = (dbObj == null);
        }
        if (notFound) {
          throw new HiveException(obj + " can not be found");
        }
      }

      PrivilegeBag privBag = new PrivilegeBag();
      if (privSubjectDesc == null) {
        for (int idx = 0; idx < privileges.size(); idx++) {
          Privilege priv = privileges.get(idx).getPrivilege();
          if (privileges.get(idx).getColumns() != null
              && privileges.get(idx).getColumns().size() > 0) {
            throw new HiveException(
                "For user-level privileges, column sets should be null. columns="
                    + privileges.get(idx).getColumns().toString());
          }

          privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef(
              HiveObjectType.GLOBAL, null, null, null, null), null, null,
              new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType,
                  grantOption)));
        }
      } else {
        org.apache.hadoop.hive.metastore.api.Partition partObj = null;
        List<String> partValues = null;
        if (tableObj != null) {
          if ((!tableObj.isPartitioned())
              && privSubjectDesc.getPartSpec() != null) {
            throw new HiveException(
                "Table is not partitioned, but partition name is present: partSpec="
                    + privSubjectDesc.getPartSpec().toString());
          }

          if (privSubjectDesc.getPartSpec() != null) {
            partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(),
                false).getTPartition();
            partValues = partObj.getValues();
          }
        }

        for (PrivilegeDesc privDesc : privileges) {
          List<String> columns = privDesc.getColumns();
          Privilege priv = privDesc.getPrivilege();
          if (columns != null && columns.size() > 0) {
            if (!priv.supportColumnLevel()) {
              throw new HiveException(priv.getPriv()
                  + " does not support column level.");
            }
            if (privSubjectDesc == null || tableName == null) {
              throw new HiveException(
                  "For user-level/database-level privileges, column sets should be null. columns="
                      + columns);
            }
            for (int i = 0; i < columns.size(); i++) {
              privBag.addToPrivileges(new HiveObjectPrivilege(
View Full Code Here


            outStream.write(terminator);
          }
          ((FSDataOutputStream) outStream).close();
        }
      } else {
        throw new HiveException("Unkown role operation "
            + operation.getOperationName());
      }
    } catch (HiveException e) {
      console.printError("Error in role operation "
          + operation.getOperationName() + " on role name "
View Full Code Here

      } else { // if one of them is null, replace the old params with the new one
        database.setParameters(newParams);
      }
      db.alterDatabase(database.getName(), database);
    } else {
      throw new HiveException("ERROR: The database " + dbName + " does not exist.");
    }
    return 0;
  }
View Full Code Here

    if (touchDesc.getPartSpec() == null) {
      try {
        db.alterTable(tblName, tbl);
      } catch (InvalidOperationException e) {
        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
      } catch (InvalidOperationException e) {
        throw new HiveException(e);
      }
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    }
    return 0;
View Full Code Here

  private boolean pathExists(Path p) throws HiveException {
    try {
      FileSystem fs = p.getFileSystem(conf);
      return fs.exists(p);
    } catch (IOException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

  }

  private void moveDir(FileSystem fs, Path from, Path to) throws HiveException {
    try {
      if (!fs.rename(from, to)) {
        throw new HiveException("Moving " + from + " to " + to + " failed!");
      }
    } catch (IOException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

    Map<String, String> partSpec = simpleDesc.getPartSpec();
    Partition p = db.getPartition(tbl, partSpec, false);

    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
      throw new HiveException("ARCHIVE can only be performed on managed tables");
    }

    if (p == null) {
      throw new HiveException("Specified partition does not exist");
    }

    if (isArchived(p)) {
      // If there were a failure right after the metadata was updated in an
      // archiving operation, it's possible that the original, unarchived files
      // weren't deleted.
      Path originalDir = new Path(getOriginalLocation(p));
      Path leftOverIntermediateOriginal = new Path(originalDir.getParent(),
          originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);

      if (pathExists(leftOverIntermediateOriginal)) {
        console.printInfo("Deleting " + leftOverIntermediateOriginal +
        " left over from a previous archiving operation");
        deleteDir(leftOverIntermediateOriginal);
      }

      throw new HiveException("Specified partition is already archived");
    }

    Path originalDir = p.getPartitionPath();
    Path intermediateArchivedDir = new Path(originalDir.getParent(),
        originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
    Path intermediateOriginalDir = new Path(originalDir.getParent(),
        originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
    String archiveName = "data.har";
    FileSystem fs = null;
    try {
      fs = originalDir.getFileSystem(conf);
    } catch (IOException e) {
      throw new HiveException(e);
    }

    // The following steps seem roundabout, but they are meant to aid in
    // recovery if a failure occurs and to keep a consistent state in the FS

    // Steps:
    // 1. Create the archive in a temporary folder
    // 2. Move the archive dir to an intermediate dir that is in at the same
    //    dir as the original partition dir. Call the new dir
    //    intermediate-archive.
    // 3. Rename the original partition dir to an intermediate dir. Call the
    //    renamed dir intermediate-original
    // 4. Rename intermediate-archive to the original partition dir
    // 5. Change the metadata
    // 6. Delete the original partition files in intermediate-original

    // The original partition files are deleted after the metadata change
    // because the presence of those files are used to indicate whether
    // the original partition directory contains archived or unarchived files.

    // Create an archived version of the partition in a directory ending in
    // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition,
    // if it does not already exist. If it does exist, we assume the dir is good
    // to use as the move operation that created it is atomic.
    if (!pathExists(intermediateArchivedDir) &&
        !pathExists(intermediateOriginalDir)) {

      // First create the archive in a tmp dir so that if the job fails, the
      // bad files don't pollute the filesystem
      Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI(originalDir.toUri()), "partlevel");

      console.printInfo("Creating " + archiveName + " for " + originalDir.toString());
      console.printInfo("in " + tmpDir);
      console.printInfo("Please wait... (this may take a while)");

      // Create the Hadoop archive
      HadoopShims shim = ShimLoader.getHadoopShims();
      int ret=0;
      try {
        ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName);
      } catch (Exception e) {
        throw new HiveException(e);
      }
      if (ret != 0) {
        throw new HiveException("Error while creating HAR");
      }
      // Move from the tmp dir to an intermediate directory, in the same level as
      // the partition directory. e.g. .../hr=12-intermediate-archived
      try {
        console.printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir);
        if (pathExists(intermediateArchivedDir)) {
          throw new HiveException("The intermediate archive directory already exists.");
        }
        fs.rename(tmpDir, intermediateArchivedDir);
      } catch (IOException e) {
        throw new HiveException("Error while moving tmp directory");
      }
    } else {
      if (pathExists(intermediateArchivedDir)) {
        console.printInfo("Intermediate archive directory " + intermediateArchivedDir +
        " already exists. Assuming it contains an archived version of the partition");
      }
    }

    // If we get to here, we know that we've archived the partition files, but
    // they may be in the original partition location, or in the intermediate
    // original dir.

    // Move the original parent directory to the intermediate original directory
    // if the move hasn't been made already
    if (!pathExists(intermediateOriginalDir)) {
      console.printInfo("Moving " + originalDir + " to " +
          intermediateOriginalDir);
      moveDir(fs, originalDir, intermediateOriginalDir);
    } else {
      console.printInfo(intermediateOriginalDir + " already exists. " +
          "Assuming it contains the original files in the partition");
    }

    // If there's a failure from here to when the metadata is updated,
    // there will be no data in the partition, or an error while trying to read
    // the partition (if the archive files have been moved to the original
    // partition directory.) But re-running the archive command will allow
    // recovery

    // Move the intermediate archived directory to the original parent directory
    if (!pathExists(originalDir)) {
      console.printInfo("Moving " + intermediateArchivedDir + " to " +
          originalDir);
      moveDir(fs, intermediateArchivedDir, originalDir);
    } else {
      console.printInfo(originalDir + " already exists. " +
          "Assuming it contains the archived version of the partition");
    }

    // Record this change in the metastore
    try {
      boolean parentSettable =
        conf.getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE);

      // dirInArchive is the directory within the archive that has all the files
      // for this partition. With older versions of Hadoop, archiving a
      // a directory would produce the same directory structure
      // in the archive. So if you created myArchive.har of /tmp/myDir, the
      // files in /tmp/myDir would be located under myArchive.har/tmp/myDir/*
      // In this case, dirInArchive should be tmp/myDir

      // With newer versions of Hadoop, the parent directory could be specified.
      // Assuming the parent directory was set to /tmp/myDir when creating the
      // archive, the files can be found under myArchive.har/*
      // In this case, dirInArchive should be empty

      String dirInArchive = "";
      if (!parentSettable) {
        dirInArchive = originalDir.toUri().getPath();
        if(dirInArchive.length() > 1 && dirInArchive.charAt(0)=='/') {
          dirInArchive = dirInArchive.substring(1);
        }
      }
      setArchived(p, originalDir, dirInArchive, archiveName);
      db.alterPartition(tblName, p);
    } catch (Exception e) {
      throw new HiveException("Unable to change the partition info for HAR", e);
    }

    // If a failure occurs here, the directory containing the original files
    // will not be deleted. The user will run ARCHIVE again to clear this up
    deleteDir(intermediateOriginalDir);
View Full Code Here

    Table tbl = db.getTable(dbName, tblName);
    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.UNARCHIVE);

    // Means user specified a table, not a partition
    if (simpleDesc.getPartSpec() == null) {
      throw new HiveException("ARCHIVE is for partitions only");
    }

    Map<String, String> partSpec = simpleDesc.getPartSpec();
    Partition p = db.getPartition(tbl, partSpec, false);

    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
      throw new HiveException("UNARCHIVE can only be performed on managed tables");
    }

    if (p == null) {
      throw new HiveException("Specified partition does not exist");
    }

    if (!isArchived(p)) {
      Path location = new Path(p.getLocation());
      Path leftOverArchiveDir = new Path(location.getParent(),
          location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);

      if (pathExists(leftOverArchiveDir)) {
        console.printInfo("Deleting " + leftOverArchiveDir + " left over " +
        "from a previous unarchiving operation");
        deleteDir(leftOverArchiveDir);
      }

      throw new HiveException("Specified partition is not archived");
    }

    Path originalLocation = new Path(getOriginalLocation(p));
    Path sourceDir = new Path(p.getLocation());
    Path intermediateArchiveDir = new Path(originalLocation.getParent(),
        originalLocation.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
    Path intermediateExtractedDir = new Path(originalLocation.getParent(),
        originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);

    Path tmpDir = new Path(driverContext
          .getCtx()
          .getExternalTmpFileURI(originalLocation.toUri()));

    FileSystem fs = null;
    try {
      fs = tmpDir.getFileSystem(conf);
      // Verify that there are no files in the tmp dir, because if there are, it
      // would be copied to the partition
      FileStatus [] filesInTmpDir = fs.listStatus(tmpDir);
      if (filesInTmpDir != null && filesInTmpDir.length != 0) {
        for (FileStatus file : filesInTmpDir) {
          console.printInfo(file.getPath().toString());
        }
        throw new HiveException("Temporary directory " + tmpDir + " is not empty");
      }

    } catch (IOException e) {
      throw new HiveException(e);
    }

    // Some sanity checks
    if (originalLocation == null) {
      throw new HiveException("Missing archive data in the partition");
    }
    if (!"har".equals(sourceDir.toUri().getScheme())) {
      throw new HiveException("Location should refer to a HAR");
    }

    // Clarification of terms:
    // - The originalLocation directory represents the original directory of the
    //   partition's files. They now contain an archived version of those files
    //   eg. hdfs:/warehouse/myTable/ds=1/
    // - The source directory is the directory containing all the files that
    //   should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/
    //   Note the har:/ scheme

    // Steps:
    // 1. Extract the archive in a temporary folder
    // 2. Move the archive dir to an intermediate dir that is in at the same
    //    dir as originalLocation. Call the new dir intermediate-extracted.
    // 3. Rename the original partition dir to an intermediate dir. Call the
    //    renamed dir intermediate-archive
    // 4. Rename intermediate-extracted to the original partition dir
    // 5. Change the metadata
    // 6. Delete the archived partition files in intermediate-archive

    if (!pathExists(intermediateExtractedDir) &&
        !pathExists(intermediateArchiveDir)) {
      try {

        // Copy the files out of the archive into the temporary directory
        String copySource = (new Path(sourceDir, "*")).toString();
        String copyDest = tmpDir.toString();
        List<String> args = new ArrayList<String>();
        args.add("-cp");
        args.add(copySource);
        args.add(copyDest);

        console.printInfo("Copying " + copySource + " to " + copyDest);
        FsShell fss = new FsShell(conf);
        int ret = 0;
        try {
          ret = ToolRunner.run(fss, args.toArray(new String[0]));
        } catch (Exception e) {
          throw new HiveException(e);
        }
        if (ret != 0) {
          throw new HiveException("Error while copying files from archive");
        }

        console.printInfo("Moving " + tmpDir + " to " + intermediateExtractedDir);
        if (fs.exists(intermediateExtractedDir)) {
          throw new HiveException("Invalid state: the intermediate extracted " +
              "directory already exists.");
        }
        fs.rename(tmpDir, intermediateExtractedDir);
      } catch (Exception e) {
        throw new HiveException(e);
      }
    }

    // At this point, we know that the extracted files are in the intermediate
    // extracted dir, or in the the original directory.

    if (!pathExists(intermediateArchiveDir)) {
      try {
        console.printInfo("Moving " + originalLocation + " to " + intermediateArchiveDir);
        fs.rename(originalLocation, intermediateArchiveDir);
      } catch (IOException e) {
        throw new HiveException(e);
      }
    } else {
      console.printInfo(intermediateArchiveDir + " already exists. " +
      "Assuming it contains the archived version of the partition");
    }

    // If there is a failure from here to until when the metadata is changed,
    // the partition will be empty or throw errors on read.

    // If the original location exists here, then it must be the extracted files
    // because in the previous step, we moved the previous original location
    // (containing the archived version of the files) to intermediateArchiveDir
    if (!pathExists(originalLocation)) {
      try {
        console.printInfo("Moving " + intermediateExtractedDir + " to " + originalLocation);
        fs.rename(intermediateExtractedDir, originalLocation);
      } catch (IOException e) {
        throw new HiveException(e);
      }
    } else {
      console.printInfo(originalLocation + " already exists. " +
      "Assuming it contains the extracted files in the partition");
    }

    setUnArchived(p);
    try {
      db.alterPartition(tblName, p);
    } catch (InvalidOperationException e) {
      throw new HiveException(e);
    }
    // If a failure happens here, the intermediate archive files won't be
    // deleted. The user will need to call unarchive again to clear those up.
    deleteDir(intermediateArchiveDir);

View Full Code Here

      switch (alterType) {
      case ADDPROPS:
        // allow this form
        break;
      default:
        throw new HiveException(
          "Cannot use this form of ALTER TABLE on a view");
      }
    }

    if (tbl.isNonNative()) {
      throw new HiveException("Cannot use ALTER TABLE on a non-native table");
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.HiveException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.