Examples of Warehouse


Examples of org.apache.hadoop.hive.metastore.Warehouse

    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    clientWH = new Warehouse(hcatConf);
    msc = new HiveMetaStoreClient(hcatConf,null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

            // get the table's default location
            Table dumpTable;
            Path targetPath;
            try {
              dumpTable = db.newTable(qb.getTableDesc().getTableName());
              Warehouse wh = new Warehouse(conf);
              targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
                  .getTableName());
            } catch (HiveException e) {
              throw new SemanticException(e);
            } catch (MetaException e) {
              throw new SemanticException(e);
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    }
  }

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
            if (db.getPartition(table, partSpec, false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        // Set this to read because we can't overwrite any existing partitions
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
        Table table = new Table(dbname, tblDesc.getTableName());
        String currentDb = SessionState.get().getCurrentDatabase();
        conf.set("import.destination.dir",
            wh.getTablePath(db.getDatabaseCurrent(),
                tblDesc.getTableName()).toString());
        if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) {
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            t.addDependentTask(
                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
          }
        } else {
          LOG.debug("adding dependent CopyWork/MoveWork for table");
          if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
            LOG.debug("Importing in place, no emptiness check, no copying/loading");
            Path dataPath = new Path(fromURI.toString(), "data");
            tblDesc.setLocation(dataPath.toString());
          } else {
            Path tablePath = null;
            if (tblDesc.getLocation() != null) {
              tablePath = new Path(tblDesc.getLocation());
            } else {
              tablePath = wh.getTablePath(db.getDatabaseCurrent(), tblDesc.getTableName());
            }
            checkTargetLocationEmpty(fs, tablePath);
            t.addDependentTask(loadTable(fromURI, table));
          }
        }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    }
  }

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        String username = ShimLoader.getHadoopShims().getShortUserName(ugi);

        whPath = new Path(whDir);
        whFs = whPath.getFileSystem(conf);

        wh = new Warehouse(conf);
        hive = Hive.get(conf);

        //clean up mess in HMS
        HcatTestUtils.cleanupHMS(hive, wh, perm700);
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
        clientWH = new Warehouse(hcatConf);
        msc = new HiveMetaStoreClient(hcatConf, null);
        System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
        System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    @Override
    public void setConf(Configuration conf) {
        super.setConf(conf);
        try {
            this.wh = new Warehouse(conf);
        } catch (MetaException ex) {
            throw new RuntimeException(ex);
        }
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

                // We dont set perms or groups for default dir.
                return 0;
            } else {
                try {
                    Hive db = Hive.get();
                    Path dbPath = new Warehouse(conf).getDatabasePath(db.getDatabase(dbName));
                    FileSystem fs = dbPath.getFileSystem(conf);
                    if (perms != null) {
                        fs.setPermission(dbPath, perms);
                    }
                    if (null != grp) {
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    }
  }

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.